17ac6653aSJeff Kirsher /*******************************************************************************
27ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
37ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
47ac6653aSJeff Kirsher 
5286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
67ac6653aSJeff Kirsher 
77ac6653aSJeff Kirsher   This program is free software; you can redistribute it and/or modify it
87ac6653aSJeff Kirsher   under the terms and conditions of the GNU General Public License,
97ac6653aSJeff Kirsher   version 2, as published by the Free Software Foundation.
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   This program is distributed in the hope it will be useful, but WITHOUT
127ac6653aSJeff Kirsher   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
137ac6653aSJeff Kirsher   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
147ac6653aSJeff Kirsher   more details.
157ac6653aSJeff Kirsher 
167ac6653aSJeff Kirsher   The full GNU General Public License is included in this distribution in
177ac6653aSJeff Kirsher   the file called "COPYING".
187ac6653aSJeff Kirsher 
197ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
207ac6653aSJeff Kirsher 
217ac6653aSJeff Kirsher   Documentation available at:
227ac6653aSJeff Kirsher 	http://www.stlinux.com
237ac6653aSJeff Kirsher   Support available at:
247ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
257ac6653aSJeff Kirsher *******************************************************************************/
267ac6653aSJeff Kirsher 
276a81c26fSViresh Kumar #include <linux/clk.h>
287ac6653aSJeff Kirsher #include <linux/kernel.h>
297ac6653aSJeff Kirsher #include <linux/interrupt.h>
307ac6653aSJeff Kirsher #include <linux/ip.h>
317ac6653aSJeff Kirsher #include <linux/tcp.h>
327ac6653aSJeff Kirsher #include <linux/skbuff.h>
337ac6653aSJeff Kirsher #include <linux/ethtool.h>
347ac6653aSJeff Kirsher #include <linux/if_ether.h>
357ac6653aSJeff Kirsher #include <linux/crc32.h>
367ac6653aSJeff Kirsher #include <linux/mii.h>
3701789349SJiri Pirko #include <linux/if.h>
387ac6653aSJeff Kirsher #include <linux/if_vlan.h>
397ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
407ac6653aSJeff Kirsher #include <linux/slab.h>
417ac6653aSJeff Kirsher #include <linux/prefetch.h>
42db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
4350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
447ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
457ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
4650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
47891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
484dbbe8ddSJose Abreu #include <net/pkt_cls.h>
49891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
50286a8372SGiuseppe CAVALLARO #include "stmmac.h"
51c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
525790cf3cSMathieu Olivari #include <linux/of_mdio.h>
5319d857c9SPhil Reid #include "dwmac1000.h"
5442de047dSJose Abreu #include "hwif.h"
557ac6653aSJeff Kirsher 
567ac6653aSJeff Kirsher #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
57f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
587ac6653aSJeff Kirsher 
597ac6653aSJeff Kirsher /* Module parameters */
6032ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
617ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
62d3757ba4SJoe Perches module_param(watchdog, int, 0644);
6332ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
647ac6653aSJeff Kirsher 
6532ceabcaSGiuseppe CAVALLARO static int debug = -1;
66d3757ba4SJoe Perches module_param(debug, int, 0644);
6732ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
687ac6653aSJeff Kirsher 
6947d1f71fSstephen hemminger static int phyaddr = -1;
70d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
717ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
727ac6653aSJeff Kirsher 
73e3ad57c9SGiuseppe Cavallaro #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
74120e87f9SGiuseppe Cavallaro #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
757ac6653aSJeff Kirsher 
767ac6653aSJeff Kirsher static int flow_ctrl = FLOW_OFF;
77d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
787ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
797ac6653aSJeff Kirsher 
807ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
81d3757ba4SJoe Perches module_param(pause, int, 0644);
827ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
837ac6653aSJeff Kirsher 
847ac6653aSJeff Kirsher #define TC_DEFAULT 64
857ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
86d3757ba4SJoe Perches module_param(tc, int, 0644);
877ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
887ac6653aSJeff Kirsher 
89d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
90d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
91d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
927ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
937ac6653aSJeff Kirsher 
9422ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
9522ad3838SGiuseppe Cavallaro 
967ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
977ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
987ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
997ac6653aSJeff Kirsher 
100d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
101d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
102d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
103d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
104f5351ef7SGiuseppe CAVALLARO #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105d765955dSGiuseppe CAVALLARO 
10622d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
10722d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1084a7d666aSGiuseppe CAVALLARO  */
1094a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
110d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1114a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1124a7d666aSGiuseppe CAVALLARO 
1137ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1147ac6653aSJeff Kirsher 
11550fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
116bfab27a1SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev);
117466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
118bfab27a1SGiuseppe CAVALLARO #endif
119bfab27a1SGiuseppe CAVALLARO 
1209125cdd1SGiuseppe CAVALLARO #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
1219125cdd1SGiuseppe CAVALLARO 
1227ac6653aSJeff Kirsher /**
1237ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
124732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
125732fdf0eSGiuseppe CAVALLARO  * errors.
1267ac6653aSJeff Kirsher  */
1277ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1287ac6653aSJeff Kirsher {
1297ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1307ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
131d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
132d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1337ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1347ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1357ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1367ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1377ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1387ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
139d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
140d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1417ac6653aSJeff Kirsher }
1427ac6653aSJeff Kirsher 
14332ceabcaSGiuseppe CAVALLARO /**
144c22a3f48SJoao Pinto  * stmmac_disable_all_queues - Disable all queues
145c22a3f48SJoao Pinto  * @priv: driver private structure
146c22a3f48SJoao Pinto  */
147c22a3f48SJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv)
148c22a3f48SJoao Pinto {
149c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
150c22a3f48SJoao Pinto 	u32 queue;
151c22a3f48SJoao Pinto 
152c22a3f48SJoao Pinto 	for (queue = 0; queue < rx_queues_cnt; queue++) {
153c22a3f48SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
154c22a3f48SJoao Pinto 
155c22a3f48SJoao Pinto 		napi_disable(&rx_q->napi);
156c22a3f48SJoao Pinto 	}
157c22a3f48SJoao Pinto }
158c22a3f48SJoao Pinto 
159c22a3f48SJoao Pinto /**
160c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
161c22a3f48SJoao Pinto  * @priv: driver private structure
162c22a3f48SJoao Pinto  */
163c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
164c22a3f48SJoao Pinto {
165c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
166c22a3f48SJoao Pinto 	u32 queue;
167c22a3f48SJoao Pinto 
168c22a3f48SJoao Pinto 	for (queue = 0; queue < rx_queues_cnt; queue++) {
169c22a3f48SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
170c22a3f48SJoao Pinto 
171c22a3f48SJoao Pinto 		napi_enable(&rx_q->napi);
172c22a3f48SJoao Pinto 	}
173c22a3f48SJoao Pinto }
174c22a3f48SJoao Pinto 
175c22a3f48SJoao Pinto /**
176c22a3f48SJoao Pinto  * stmmac_stop_all_queues - Stop all queues
177c22a3f48SJoao Pinto  * @priv: driver private structure
178c22a3f48SJoao Pinto  */
179c22a3f48SJoao Pinto static void stmmac_stop_all_queues(struct stmmac_priv *priv)
180c22a3f48SJoao Pinto {
181c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
182c22a3f48SJoao Pinto 	u32 queue;
183c22a3f48SJoao Pinto 
184c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
185c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
186c22a3f48SJoao Pinto }
187c22a3f48SJoao Pinto 
188c22a3f48SJoao Pinto /**
189c22a3f48SJoao Pinto  * stmmac_start_all_queues - Start all queues
190c22a3f48SJoao Pinto  * @priv: driver private structure
191c22a3f48SJoao Pinto  */
192c22a3f48SJoao Pinto static void stmmac_start_all_queues(struct stmmac_priv *priv)
193c22a3f48SJoao Pinto {
194c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
195c22a3f48SJoao Pinto 	u32 queue;
196c22a3f48SJoao Pinto 
197c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
198c22a3f48SJoao Pinto 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
199c22a3f48SJoao Pinto }
200c22a3f48SJoao Pinto 
20134877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
20234877a15SJose Abreu {
20334877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
20434877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
20534877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
20634877a15SJose Abreu }
20734877a15SJose Abreu 
20834877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
20934877a15SJose Abreu {
21034877a15SJose Abreu 	netif_carrier_off(priv->dev);
21134877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
21234877a15SJose Abreu 	stmmac_service_event_schedule(priv);
21334877a15SJose Abreu }
21434877a15SJose Abreu 
215c22a3f48SJoao Pinto /**
21632ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
21732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
21832ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
21932ceabcaSGiuseppe CAVALLARO  * clock input.
22032ceabcaSGiuseppe CAVALLARO  * Note:
22132ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
22232ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
22332ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
22432ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
22532ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
22632ceabcaSGiuseppe CAVALLARO  */
227cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
228cd7201f4SGiuseppe CAVALLARO {
229cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
230cd7201f4SGiuseppe CAVALLARO 
231f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
232cd7201f4SGiuseppe CAVALLARO 
233cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
234ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
235ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
236ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
237ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
238ceb69499SGiuseppe CAVALLARO 	 * divider.
239ceb69499SGiuseppe CAVALLARO 	 */
240cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
241cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
242cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
243cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
244cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
245cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
246cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
247cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
248cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
249cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
250cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
25119d857c9SPhil Reid 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
252cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
253ceb69499SGiuseppe CAVALLARO 	}
2549f93ac8dSLABBE Corentin 
2559f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
2569f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
2579f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
2589f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
2599f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
2609f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
2619f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
2629f93ac8dSLABBE Corentin 		else
2639f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
2649f93ac8dSLABBE Corentin 	}
265cd7201f4SGiuseppe CAVALLARO }
266cd7201f4SGiuseppe CAVALLARO 
2677ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
2687ac6653aSJeff Kirsher {
269424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
270424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2717ac6653aSJeff Kirsher }
2727ac6653aSJeff Kirsher 
273ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
2747ac6653aSJeff Kirsher {
275ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
276a6a3e026SLABBE Corentin 	u32 avail;
277e3ad57c9SGiuseppe Cavallaro 
278ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
279ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
280e3ad57c9SGiuseppe Cavallaro 	else
281ce736788SJoao Pinto 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
282e3ad57c9SGiuseppe Cavallaro 
283e3ad57c9SGiuseppe Cavallaro 	return avail;
284e3ad57c9SGiuseppe Cavallaro }
285e3ad57c9SGiuseppe Cavallaro 
28654139cf3SJoao Pinto /**
28754139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
28854139cf3SJoao Pinto  * @priv: driver private structure
28954139cf3SJoao Pinto  * @queue: RX queue index
29054139cf3SJoao Pinto  */
29154139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
292e3ad57c9SGiuseppe Cavallaro {
29354139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
294a6a3e026SLABBE Corentin 	u32 dirty;
295e3ad57c9SGiuseppe Cavallaro 
29654139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
29754139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
298e3ad57c9SGiuseppe Cavallaro 	else
29954139cf3SJoao Pinto 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
300e3ad57c9SGiuseppe Cavallaro 
301e3ad57c9SGiuseppe Cavallaro 	return dirty;
3027ac6653aSJeff Kirsher }
3037ac6653aSJeff Kirsher 
30432ceabcaSGiuseppe CAVALLARO /**
305732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_fix_mac_speed - callback for speed selection
30632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
3078d45e42bSLABBE Corentin  * Description: on some platforms (e.g. ST), some HW system configuration
30832ceabcaSGiuseppe CAVALLARO  * registers have to be set according to the link speed negotiated.
3097ac6653aSJeff Kirsher  */
3107ac6653aSJeff Kirsher static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
3117ac6653aSJeff Kirsher {
312d6d50c7eSPhilippe Reynes 	struct net_device *ndev = priv->dev;
313d6d50c7eSPhilippe Reynes 	struct phy_device *phydev = ndev->phydev;
3147ac6653aSJeff Kirsher 
3157ac6653aSJeff Kirsher 	if (likely(priv->plat->fix_mac_speed))
316ceb69499SGiuseppe CAVALLARO 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
3177ac6653aSJeff Kirsher }
3187ac6653aSJeff Kirsher 
31932ceabcaSGiuseppe CAVALLARO /**
320732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
32132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
322732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
323732fdf0eSGiuseppe CAVALLARO  * EEE.
32432ceabcaSGiuseppe CAVALLARO  */
325d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
326d765955dSGiuseppe CAVALLARO {
327ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
328ce736788SJoao Pinto 	u32 queue;
329ce736788SJoao Pinto 
330ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
331ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
332ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
333ce736788SJoao Pinto 
334ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
335ce736788SJoao Pinto 			return; /* still unfinished work */
336ce736788SJoao Pinto 	}
337ce736788SJoao Pinto 
338d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
339ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
340c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
341b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
342d765955dSGiuseppe CAVALLARO }
343d765955dSGiuseppe CAVALLARO 
34432ceabcaSGiuseppe CAVALLARO /**
345732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
34632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
34732ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
34832ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
34932ceabcaSGiuseppe CAVALLARO  */
350d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
351d765955dSGiuseppe CAVALLARO {
352c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
353d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
354d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
355d765955dSGiuseppe CAVALLARO }
356d765955dSGiuseppe CAVALLARO 
357d765955dSGiuseppe CAVALLARO /**
358732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
359d765955dSGiuseppe CAVALLARO  * @arg : data hook
360d765955dSGiuseppe CAVALLARO  * Description:
36132ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
362d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
363d765955dSGiuseppe CAVALLARO  */
364e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
365d765955dSGiuseppe CAVALLARO {
366e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
367d765955dSGiuseppe CAVALLARO 
368d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
369f5351ef7SGiuseppe CAVALLARO 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
370d765955dSGiuseppe CAVALLARO }
371d765955dSGiuseppe CAVALLARO 
372d765955dSGiuseppe CAVALLARO /**
373732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
37432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
375d765955dSGiuseppe CAVALLARO  * Description:
376732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
377732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
378732fdf0eSGiuseppe CAVALLARO  *  timer.
379d765955dSGiuseppe CAVALLARO  */
380d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
381d765955dSGiuseppe CAVALLARO {
382d6d50c7eSPhilippe Reynes 	struct net_device *ndev = priv->dev;
383879626e3SJerome Brunet 	int interface = priv->plat->interface;
3844741cf9cSGiuseppe CAVALLARO 	unsigned long flags;
385d765955dSGiuseppe CAVALLARO 	bool ret = false;
386d765955dSGiuseppe CAVALLARO 
387879626e3SJerome Brunet 	if ((interface != PHY_INTERFACE_MODE_MII) &&
388879626e3SJerome Brunet 	    (interface != PHY_INTERFACE_MODE_GMII) &&
389879626e3SJerome Brunet 	    !phy_interface_mode_is_rgmii(interface))
390879626e3SJerome Brunet 		goto out;
391879626e3SJerome Brunet 
392f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
393f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
394f5351ef7SGiuseppe CAVALLARO 	 */
3953fe5cadbSGiuseppe CAVALLARO 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
3963fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
3973fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
398f5351ef7SGiuseppe CAVALLARO 		goto out;
399f5351ef7SGiuseppe CAVALLARO 
400d765955dSGiuseppe CAVALLARO 	/* MAC core supports the EEE feature. */
401d765955dSGiuseppe CAVALLARO 	if (priv->dma_cap.eee) {
40283bf79b6SGiuseppe CAVALLARO 		int tx_lpi_timer = priv->tx_lpi_timer;
403d765955dSGiuseppe CAVALLARO 
40483bf79b6SGiuseppe CAVALLARO 		/* Check if the PHY supports EEE */
405d6d50c7eSPhilippe Reynes 		if (phy_init_eee(ndev->phydev, 1)) {
40683bf79b6SGiuseppe CAVALLARO 			/* To manage at run-time if the EEE cannot be supported
40783bf79b6SGiuseppe CAVALLARO 			 * anymore (for example because the lp caps have been
40883bf79b6SGiuseppe CAVALLARO 			 * changed).
40983bf79b6SGiuseppe CAVALLARO 			 * In that case the driver disable own timers.
41083bf79b6SGiuseppe CAVALLARO 			 */
4114741cf9cSGiuseppe CAVALLARO 			spin_lock_irqsave(&priv->lock, flags);
41283bf79b6SGiuseppe CAVALLARO 			if (priv->eee_active) {
41338ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "disable EEE\n");
41483bf79b6SGiuseppe CAVALLARO 				del_timer_sync(&priv->eee_ctrl_timer);
415c10d4c82SJose Abreu 				stmmac_set_eee_timer(priv, priv->hw, 0,
41683bf79b6SGiuseppe CAVALLARO 						tx_lpi_timer);
41783bf79b6SGiuseppe CAVALLARO 			}
41883bf79b6SGiuseppe CAVALLARO 			priv->eee_active = 0;
4194741cf9cSGiuseppe CAVALLARO 			spin_unlock_irqrestore(&priv->lock, flags);
42083bf79b6SGiuseppe CAVALLARO 			goto out;
42183bf79b6SGiuseppe CAVALLARO 		}
42283bf79b6SGiuseppe CAVALLARO 		/* Activate the EEE and start timers */
4234741cf9cSGiuseppe CAVALLARO 		spin_lock_irqsave(&priv->lock, flags);
424f5351ef7SGiuseppe CAVALLARO 		if (!priv->eee_active) {
425d765955dSGiuseppe CAVALLARO 			priv->eee_active = 1;
426e99e88a9SKees Cook 			timer_setup(&priv->eee_ctrl_timer,
427e99e88a9SKees Cook 				    stmmac_eee_ctrl_timer, 0);
428ccb36da1SVaishali Thakkar 			mod_timer(&priv->eee_ctrl_timer,
429ccb36da1SVaishali Thakkar 				  STMMAC_LPI_T(eee_timer));
430d765955dSGiuseppe CAVALLARO 
431c10d4c82SJose Abreu 			stmmac_set_eee_timer(priv, priv->hw,
432c10d4c82SJose Abreu 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
43371965352SGiuseppe CAVALLARO 		}
434f5351ef7SGiuseppe CAVALLARO 		/* Set HW EEE according to the speed */
435c10d4c82SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
436d765955dSGiuseppe CAVALLARO 
437d765955dSGiuseppe CAVALLARO 		ret = true;
4384741cf9cSGiuseppe CAVALLARO 		spin_unlock_irqrestore(&priv->lock, flags);
4394741cf9cSGiuseppe CAVALLARO 
44038ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
441d765955dSGiuseppe CAVALLARO 	}
442d765955dSGiuseppe CAVALLARO out:
443d765955dSGiuseppe CAVALLARO 	return ret;
444d765955dSGiuseppe CAVALLARO }
445d765955dSGiuseppe CAVALLARO 
446732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
44732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
448ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
449891434b1SRayagond Kokatanur  * @skb : the socket buffer
450891434b1SRayagond Kokatanur  * Description :
451891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
452891434b1SRayagond Kokatanur  * and also perform some sanity checks.
453891434b1SRayagond Kokatanur  */
454891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
455ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
456891434b1SRayagond Kokatanur {
457891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
458891434b1SRayagond Kokatanur 	u64 ns;
459891434b1SRayagond Kokatanur 
460891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
461891434b1SRayagond Kokatanur 		return;
462891434b1SRayagond Kokatanur 
463ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
46475e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
465891434b1SRayagond Kokatanur 		return;
466891434b1SRayagond Kokatanur 
467891434b1SRayagond Kokatanur 	/* check tx tstamp status */
46842de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
469891434b1SRayagond Kokatanur 		/* get the valid tstamp */
47042de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
471891434b1SRayagond Kokatanur 
472891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
473891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
474ba1ffd74SGiuseppe CAVALLARO 
47533d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
476891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
477891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
478ba1ffd74SGiuseppe CAVALLARO 	}
479891434b1SRayagond Kokatanur 
480891434b1SRayagond Kokatanur 	return;
481891434b1SRayagond Kokatanur }
482891434b1SRayagond Kokatanur 
483732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
48432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
485ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
486ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
487891434b1SRayagond Kokatanur  * @skb : the socket buffer
488891434b1SRayagond Kokatanur  * Description :
489891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
490891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
491891434b1SRayagond Kokatanur  */
492ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
493ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
494891434b1SRayagond Kokatanur {
495891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
49698870943SJose Abreu 	struct dma_desc *desc = p;
497891434b1SRayagond Kokatanur 	u64 ns;
498891434b1SRayagond Kokatanur 
499891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
500891434b1SRayagond Kokatanur 		return;
501ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
502ba1ffd74SGiuseppe CAVALLARO 	if (priv->plat->has_gmac4)
50398870943SJose Abreu 		desc = np;
504891434b1SRayagond Kokatanur 
50598870943SJose Abreu 	/* Check if timestamp is available */
50642de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
50742de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
50833d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
509891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
510891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
511891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
512ba1ffd74SGiuseppe CAVALLARO 	} else  {
51333d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
514ba1ffd74SGiuseppe CAVALLARO 	}
515891434b1SRayagond Kokatanur }
516891434b1SRayagond Kokatanur 
517891434b1SRayagond Kokatanur /**
518891434b1SRayagond Kokatanur  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
519891434b1SRayagond Kokatanur  *  @dev: device pointer.
5208d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
521891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
522891434b1SRayagond Kokatanur  *  Description:
523891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
524891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
525891434b1SRayagond Kokatanur  *  Return Value:
526891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
527891434b1SRayagond Kokatanur  */
528891434b1SRayagond Kokatanur static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
529891434b1SRayagond Kokatanur {
530891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
531891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
5320a624155SArnd Bergmann 	struct timespec64 now;
533891434b1SRayagond Kokatanur 	u64 temp = 0;
534891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
535891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
536891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
537891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
538891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
539891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
540891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
541891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
542891434b1SRayagond Kokatanur 	u32 value = 0;
54319d857c9SPhil Reid 	u32 sec_inc;
544891434b1SRayagond Kokatanur 
545891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
546891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
547891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
548891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
549891434b1SRayagond Kokatanur 
550891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
551891434b1SRayagond Kokatanur 	}
552891434b1SRayagond Kokatanur 
553891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
554891434b1SRayagond Kokatanur 			   sizeof(struct hwtstamp_config)))
555891434b1SRayagond Kokatanur 		return -EFAULT;
556891434b1SRayagond Kokatanur 
55738ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
558891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
559891434b1SRayagond Kokatanur 
560891434b1SRayagond Kokatanur 	/* reserved for future extensions */
561891434b1SRayagond Kokatanur 	if (config.flags)
562891434b1SRayagond Kokatanur 		return -EINVAL;
563891434b1SRayagond Kokatanur 
5645f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5655f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
566891434b1SRayagond Kokatanur 		return -ERANGE;
567891434b1SRayagond Kokatanur 
568891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
569891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
570891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
571ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
572891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
573891434b1SRayagond Kokatanur 			break;
574891434b1SRayagond Kokatanur 
575891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
576ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
577891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
578891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
579fd6720aeSMario Molitor 			if (priv->plat->has_gmac4)
580fd6720aeSMario Molitor 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
581fd6720aeSMario Molitor 			else
582891434b1SRayagond Kokatanur 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
583891434b1SRayagond Kokatanur 
584891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
585891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
586891434b1SRayagond Kokatanur 			break;
587891434b1SRayagond Kokatanur 
588891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
589ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
590891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
591891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
592891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
593891434b1SRayagond Kokatanur 
594891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
595891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
596891434b1SRayagond Kokatanur 			break;
597891434b1SRayagond Kokatanur 
598891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
599ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
600891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
601891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
602891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
603891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
604891434b1SRayagond Kokatanur 
605891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
606891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
607891434b1SRayagond Kokatanur 			break;
608891434b1SRayagond Kokatanur 
609891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
610ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
611891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
612891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
613891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
614fd6720aeSMario Molitor 			if (priv->plat->has_gmac4)
615fd6720aeSMario Molitor 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
616fd6720aeSMario Molitor 			else
617891434b1SRayagond Kokatanur 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
618891434b1SRayagond Kokatanur 
619891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
620891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
621891434b1SRayagond Kokatanur 			break;
622891434b1SRayagond Kokatanur 
623891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
624ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
625891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
626891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
627891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
628891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
629891434b1SRayagond Kokatanur 
630891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
631891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
632891434b1SRayagond Kokatanur 			break;
633891434b1SRayagond Kokatanur 
634891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
635ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
636891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
637891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
638891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
639891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
640891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
641891434b1SRayagond Kokatanur 
642891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
643891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
644891434b1SRayagond Kokatanur 			break;
645891434b1SRayagond Kokatanur 
646891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
647ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
648891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
649891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
650891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
651fd6720aeSMario Molitor 			if (priv->plat->has_gmac4)
652fd6720aeSMario Molitor 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
653fd6720aeSMario Molitor 			else
654891434b1SRayagond Kokatanur 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
655891434b1SRayagond Kokatanur 
656891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
657891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
658891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
659891434b1SRayagond Kokatanur 			break;
660891434b1SRayagond Kokatanur 
661891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
662ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
663891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
664891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
665891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
666891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
667891434b1SRayagond Kokatanur 
668891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
669891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
670891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
671891434b1SRayagond Kokatanur 			break;
672891434b1SRayagond Kokatanur 
673891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
674ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
675891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
676891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
677891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
678891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
679891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
680891434b1SRayagond Kokatanur 
681891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
682891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
683891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
684891434b1SRayagond Kokatanur 			break;
685891434b1SRayagond Kokatanur 
686e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
687891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
688ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
689891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
690891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
691891434b1SRayagond Kokatanur 			break;
692891434b1SRayagond Kokatanur 
693891434b1SRayagond Kokatanur 		default:
694891434b1SRayagond Kokatanur 			return -ERANGE;
695891434b1SRayagond Kokatanur 		}
696891434b1SRayagond Kokatanur 	} else {
697891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
698891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
699891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
700891434b1SRayagond Kokatanur 			break;
701891434b1SRayagond Kokatanur 		default:
702891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
703891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
704891434b1SRayagond Kokatanur 			break;
705891434b1SRayagond Kokatanur 		}
706891434b1SRayagond Kokatanur 	}
707891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
7085f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
709891434b1SRayagond Kokatanur 
710891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
711cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
712891434b1SRayagond Kokatanur 	else {
713891434b1SRayagond Kokatanur 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
714891434b1SRayagond Kokatanur 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
715891434b1SRayagond Kokatanur 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
716891434b1SRayagond Kokatanur 			 ts_master_en | snap_type_sel);
717cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
718891434b1SRayagond Kokatanur 
719891434b1SRayagond Kokatanur 		/* program Sub Second Increment reg */
720cc4c9001SJose Abreu 		stmmac_config_sub_second_increment(priv,
721f573c0b9Sjpinto 				priv->ptpaddr, priv->plat->clk_ptp_rate,
722cc4c9001SJose Abreu 				priv->plat->has_gmac4, &sec_inc);
72319d857c9SPhil Reid 		temp = div_u64(1000000000ULL, sec_inc);
724891434b1SRayagond Kokatanur 
725891434b1SRayagond Kokatanur 		/* calculate default added value:
726891434b1SRayagond Kokatanur 		 * formula is :
727891434b1SRayagond Kokatanur 		 * addend = (2^32)/freq_div_ratio;
72819d857c9SPhil Reid 		 * where, freq_div_ratio = 1e9ns/sec_inc
729891434b1SRayagond Kokatanur 		 */
73019d857c9SPhil Reid 		temp = (u64)(temp << 32);
731f573c0b9Sjpinto 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
732cc4c9001SJose Abreu 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
733891434b1SRayagond Kokatanur 
734891434b1SRayagond Kokatanur 		/* initialize system time */
7350a624155SArnd Bergmann 		ktime_get_real_ts64(&now);
7360a624155SArnd Bergmann 
7370a624155SArnd Bergmann 		/* lower 32 bits of tv_sec are safe until y2106 */
738cc4c9001SJose Abreu 		stmmac_init_systime(priv, priv->ptpaddr,
739cc4c9001SJose Abreu 				(u32)now.tv_sec, now.tv_nsec);
740891434b1SRayagond Kokatanur 	}
741891434b1SRayagond Kokatanur 
742891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
743891434b1SRayagond Kokatanur 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
744891434b1SRayagond Kokatanur }
745891434b1SRayagond Kokatanur 
74632ceabcaSGiuseppe CAVALLARO /**
747732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
74832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
749732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
75032ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
751732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
75232ceabcaSGiuseppe CAVALLARO  */
75392ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
754891434b1SRayagond Kokatanur {
75592ba6888SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
75692ba6888SRayagond Kokatanur 		return -EOPNOTSUPP;
75792ba6888SRayagond Kokatanur 
758891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
759be9b3174SGiuseppe CAVALLARO 	/* Check if adv_ts can be enabled for dwmac 4.x core */
760be9b3174SGiuseppe CAVALLARO 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
761be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
762be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
763be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
764891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
7657cd01399SVince Bridgers 
766be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
767be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7687cd01399SVince Bridgers 
769be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
770be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
771be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
772891434b1SRayagond Kokatanur 
773891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
774891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
77592ba6888SRayagond Kokatanur 
776c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
777c30a70d3SGiuseppe CAVALLARO 
778c30a70d3SGiuseppe CAVALLARO 	return 0;
77992ba6888SRayagond Kokatanur }
78092ba6888SRayagond Kokatanur 
78192ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
78292ba6888SRayagond Kokatanur {
783f573c0b9Sjpinto 	if (priv->plat->clk_ptp_ref)
784f573c0b9Sjpinto 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
78592ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
786891434b1SRayagond Kokatanur }
787891434b1SRayagond Kokatanur 
7887ac6653aSJeff Kirsher /**
78929feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
79029feff39SJoao Pinto  *  @priv: driver private structure
79129feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
79229feff39SJoao Pinto  */
79329feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
79429feff39SJoao Pinto {
79529feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
79629feff39SJoao Pinto 
797c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
79829feff39SJoao Pinto 			priv->pause, tx_cnt);
79929feff39SJoao Pinto }
80029feff39SJoao Pinto 
80129feff39SJoao Pinto /**
802732fdf0eSGiuseppe CAVALLARO  * stmmac_adjust_link - adjusts the link parameters
8037ac6653aSJeff Kirsher  * @dev: net device structure
804732fdf0eSGiuseppe CAVALLARO  * Description: this is the helper called by the physical abstraction layer
805732fdf0eSGiuseppe CAVALLARO  * drivers to communicate the phy link status. According the speed and duplex
806732fdf0eSGiuseppe CAVALLARO  * this driver can invoke registered glue-logic as well.
807732fdf0eSGiuseppe CAVALLARO  * It also invoke the eee initialization because it could happen when switch
808732fdf0eSGiuseppe CAVALLARO  * on different networks (that are eee capable).
8097ac6653aSJeff Kirsher  */
8107ac6653aSJeff Kirsher static void stmmac_adjust_link(struct net_device *dev)
8117ac6653aSJeff Kirsher {
8127ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
813d6d50c7eSPhilippe Reynes 	struct phy_device *phydev = dev->phydev;
8147ac6653aSJeff Kirsher 	unsigned long flags;
81599a4cca2SLABBE Corentin 	bool new_state = false;
8167ac6653aSJeff Kirsher 
817662ec2b7SLABBE Corentin 	if (!phydev)
8187ac6653aSJeff Kirsher 		return;
8197ac6653aSJeff Kirsher 
8207ac6653aSJeff Kirsher 	spin_lock_irqsave(&priv->lock, flags);
821d765955dSGiuseppe CAVALLARO 
8227ac6653aSJeff Kirsher 	if (phydev->link) {
8237ac6653aSJeff Kirsher 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
8247ac6653aSJeff Kirsher 
8257ac6653aSJeff Kirsher 		/* Now we make sure that we can be in full duplex mode.
8267ac6653aSJeff Kirsher 		 * If not, we operate in half-duplex mode. */
8277ac6653aSJeff Kirsher 		if (phydev->duplex != priv->oldduplex) {
82899a4cca2SLABBE Corentin 			new_state = true;
82950cb16d4SLABBE Corentin 			if (!phydev->duplex)
8307ac6653aSJeff Kirsher 				ctrl &= ~priv->hw->link.duplex;
8317ac6653aSJeff Kirsher 			else
8327ac6653aSJeff Kirsher 				ctrl |= priv->hw->link.duplex;
8337ac6653aSJeff Kirsher 			priv->oldduplex = phydev->duplex;
8347ac6653aSJeff Kirsher 		}
8357ac6653aSJeff Kirsher 		/* Flow Control operation */
8367ac6653aSJeff Kirsher 		if (phydev->pause)
83729feff39SJoao Pinto 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
8387ac6653aSJeff Kirsher 
8397ac6653aSJeff Kirsher 		if (phydev->speed != priv->speed) {
84099a4cca2SLABBE Corentin 			new_state = true;
841ca84dfb9SLABBE Corentin 			ctrl &= ~priv->hw->link.speed_mask;
8427ac6653aSJeff Kirsher 			switch (phydev->speed) {
843afbe17a3SLABBE Corentin 			case SPEED_1000:
844ca84dfb9SLABBE Corentin 				ctrl |= priv->hw->link.speed1000;
8457ac6653aSJeff Kirsher 				break;
846afbe17a3SLABBE Corentin 			case SPEED_100:
847ca84dfb9SLABBE Corentin 				ctrl |= priv->hw->link.speed100;
8489beae261SLABBE Corentin 				break;
849afbe17a3SLABBE Corentin 			case SPEED_10:
850ca84dfb9SLABBE Corentin 				ctrl |= priv->hw->link.speed10;
8517ac6653aSJeff Kirsher 				break;
8527ac6653aSJeff Kirsher 			default:
853b3e51069SLABBE Corentin 				netif_warn(priv, link, priv->dev,
854cba920afSLABBE Corentin 					   "broken speed: %d\n", phydev->speed);
855688495b1SLABBE Corentin 				phydev->speed = SPEED_UNKNOWN;
8567ac6653aSJeff Kirsher 				break;
8577ac6653aSJeff Kirsher 			}
8585db13556SLABBE Corentin 			if (phydev->speed != SPEED_UNKNOWN)
8595db13556SLABBE Corentin 				stmmac_hw_fix_mac_speed(priv);
8607ac6653aSJeff Kirsher 			priv->speed = phydev->speed;
8617ac6653aSJeff Kirsher 		}
8627ac6653aSJeff Kirsher 
8637ac6653aSJeff Kirsher 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
8647ac6653aSJeff Kirsher 
8657ac6653aSJeff Kirsher 		if (!priv->oldlink) {
86699a4cca2SLABBE Corentin 			new_state = true;
8674d869b03SLABBE Corentin 			priv->oldlink = true;
8687ac6653aSJeff Kirsher 		}
8697ac6653aSJeff Kirsher 	} else if (priv->oldlink) {
87099a4cca2SLABBE Corentin 		new_state = true;
8714d869b03SLABBE Corentin 		priv->oldlink = false;
872bd00632cSLABBE Corentin 		priv->speed = SPEED_UNKNOWN;
873bd00632cSLABBE Corentin 		priv->oldduplex = DUPLEX_UNKNOWN;
8747ac6653aSJeff Kirsher 	}
8757ac6653aSJeff Kirsher 
8767ac6653aSJeff Kirsher 	if (new_state && netif_msg_link(priv))
8777ac6653aSJeff Kirsher 		phy_print_status(phydev);
8787ac6653aSJeff Kirsher 
8794741cf9cSGiuseppe CAVALLARO 	spin_unlock_irqrestore(&priv->lock, flags);
8804741cf9cSGiuseppe CAVALLARO 
88152f95bbfSGiuseppe CAVALLARO 	if (phydev->is_pseudo_fixed_link)
88252f95bbfSGiuseppe CAVALLARO 		/* Stop PHY layer to call the hook to adjust the link in case
88352f95bbfSGiuseppe CAVALLARO 		 * of a switch is attached to the stmmac driver.
88452f95bbfSGiuseppe CAVALLARO 		 */
88552f95bbfSGiuseppe CAVALLARO 		phydev->irq = PHY_IGNORE_INTERRUPT;
88652f95bbfSGiuseppe CAVALLARO 	else
88752f95bbfSGiuseppe CAVALLARO 		/* At this stage, init the EEE if supported.
88852f95bbfSGiuseppe CAVALLARO 		 * Never called in case of fixed_link.
889f5351ef7SGiuseppe CAVALLARO 		 */
890f5351ef7SGiuseppe CAVALLARO 		priv->eee_enabled = stmmac_eee_init(priv);
8917ac6653aSJeff Kirsher }
8927ac6653aSJeff Kirsher 
89332ceabcaSGiuseppe CAVALLARO /**
894732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
89532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
89632ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
89732ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
89832ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
89932ceabcaSGiuseppe CAVALLARO  */
900e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
901e58bb43fSGiuseppe CAVALLARO {
902e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
903e58bb43fSGiuseppe CAVALLARO 
904e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
9050d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
9060d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
9070d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
9080d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
90938ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
9103fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
9110d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
91238ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
9133fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
914e58bb43fSGiuseppe CAVALLARO 		}
915e58bb43fSGiuseppe CAVALLARO 	}
916e58bb43fSGiuseppe CAVALLARO }
917e58bb43fSGiuseppe CAVALLARO 
9187ac6653aSJeff Kirsher /**
9197ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
9207ac6653aSJeff Kirsher  * @dev: net device structure
9217ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
9227ac6653aSJeff Kirsher  * to the mac driver.
9237ac6653aSJeff Kirsher  *  Return value:
9247ac6653aSJeff Kirsher  *  0 on success
9257ac6653aSJeff Kirsher  */
9267ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
9277ac6653aSJeff Kirsher {
9287ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
9297ac6653aSJeff Kirsher 	struct phy_device *phydev;
930d765955dSGiuseppe CAVALLARO 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
9317ac6653aSJeff Kirsher 	char bus_id[MII_BUS_ID_SIZE];
93279ee1dc3SSrinivas Kandagatla 	int interface = priv->plat->interface;
9339cbadf09SSrinivas Kandagatla 	int max_speed = priv->plat->max_speed;
9344d869b03SLABBE Corentin 	priv->oldlink = false;
935bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
936bd00632cSLABBE Corentin 	priv->oldduplex = DUPLEX_UNKNOWN;
9377ac6653aSJeff Kirsher 
9385790cf3cSMathieu Olivari 	if (priv->plat->phy_node) {
9395790cf3cSMathieu Olivari 		phydev = of_phy_connect(dev, priv->plat->phy_node,
9405790cf3cSMathieu Olivari 					&stmmac_adjust_link, 0, interface);
9415790cf3cSMathieu Olivari 	} else {
942f142af2eSSrinivas Kandagatla 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
943f142af2eSSrinivas Kandagatla 			 priv->plat->bus_id);
944f142af2eSSrinivas Kandagatla 
945d765955dSGiuseppe CAVALLARO 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
9467ac6653aSJeff Kirsher 			 priv->plat->phy_addr);
947de9a2165SLABBE Corentin 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
9485790cf3cSMathieu Olivari 			   phy_id_fmt);
9497ac6653aSJeff Kirsher 
9505790cf3cSMathieu Olivari 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
9515790cf3cSMathieu Olivari 				     interface);
9525790cf3cSMathieu Olivari 	}
9537ac6653aSJeff Kirsher 
954dfc50fcaSAlexey Brodkin 	if (IS_ERR_OR_NULL(phydev)) {
95538ddc59dSLABBE Corentin 		netdev_err(priv->dev, "Could not attach to PHY\n");
956dfc50fcaSAlexey Brodkin 		if (!phydev)
957dfc50fcaSAlexey Brodkin 			return -ENODEV;
958dfc50fcaSAlexey Brodkin 
9597ac6653aSJeff Kirsher 		return PTR_ERR(phydev);
9607ac6653aSJeff Kirsher 	}
9617ac6653aSJeff Kirsher 
96279ee1dc3SSrinivas Kandagatla 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
963c5b9b4e4SSrinivas Kandagatla 	if ((interface == PHY_INTERFACE_MODE_MII) ||
9649cbadf09SSrinivas Kandagatla 	    (interface == PHY_INTERFACE_MODE_RMII) ||
9659cbadf09SSrinivas Kandagatla 		(max_speed < 1000 && max_speed > 0))
966c5b9b4e4SSrinivas Kandagatla 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
967c5b9b4e4SSrinivas Kandagatla 					 SUPPORTED_1000baseT_Full);
96879ee1dc3SSrinivas Kandagatla 
9697ac6653aSJeff Kirsher 	/*
9707ac6653aSJeff Kirsher 	 * Broken HW is sometimes missing the pull-up resistor on the
9717ac6653aSJeff Kirsher 	 * MDIO line, which results in reads to non-existent devices returning
9727ac6653aSJeff Kirsher 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
9737ac6653aSJeff Kirsher 	 * device as well.
9747ac6653aSJeff Kirsher 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
9757ac6653aSJeff Kirsher 	 */
97627732381SMathieu Olivari 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
9777ac6653aSJeff Kirsher 		phy_disconnect(phydev);
9787ac6653aSJeff Kirsher 		return -ENODEV;
9797ac6653aSJeff Kirsher 	}
9808e99fc5fSGiuseppe Cavallaro 
981c51e424dSFlorian Fainelli 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
982c51e424dSFlorian Fainelli 	 * subsequent PHY polling, make sure we force a link transition if
983c51e424dSFlorian Fainelli 	 * we have a UP/DOWN/UP transition
984c51e424dSFlorian Fainelli 	 */
985c51e424dSFlorian Fainelli 	if (phydev->is_pseudo_fixed_link)
986c51e424dSFlorian Fainelli 		phydev->irq = PHY_POLL;
987c51e424dSFlorian Fainelli 
988b05c76a1SLABBE Corentin 	phy_attached_info(phydev);
9897ac6653aSJeff Kirsher 	return 0;
9907ac6653aSJeff Kirsher }
9917ac6653aSJeff Kirsher 
99271fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
993c24602efSGiuseppe CAVALLARO {
99454139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
99571fedb01SJoao Pinto 	void *head_rx;
99654139cf3SJoao Pinto 	u32 queue;
99754139cf3SJoao Pinto 
99854139cf3SJoao Pinto 	/* Display RX rings */
99954139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
100054139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
100154139cf3SJoao Pinto 
100254139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1003d0225e7dSAlexandre TORGUE 
100471fedb01SJoao Pinto 		if (priv->extend_desc)
100554139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
100671fedb01SJoao Pinto 		else
100754139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
100871fedb01SJoao Pinto 
100971fedb01SJoao Pinto 		/* Display RX ring */
101042de047dSJose Abreu 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
10115bacd778SLABBE Corentin 	}
101254139cf3SJoao Pinto }
1013d0225e7dSAlexandre TORGUE 
101471fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
101571fedb01SJoao Pinto {
1016ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
101771fedb01SJoao Pinto 	void *head_tx;
1018ce736788SJoao Pinto 	u32 queue;
1019ce736788SJoao Pinto 
1020ce736788SJoao Pinto 	/* Display TX rings */
1021ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1022ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1023ce736788SJoao Pinto 
1024ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
102571fedb01SJoao Pinto 
102671fedb01SJoao Pinto 		if (priv->extend_desc)
1027ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
102871fedb01SJoao Pinto 		else
1029ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
103071fedb01SJoao Pinto 
103142de047dSJose Abreu 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1032c24602efSGiuseppe CAVALLARO 	}
1033ce736788SJoao Pinto }
1034c24602efSGiuseppe CAVALLARO 
103571fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
103671fedb01SJoao Pinto {
103771fedb01SJoao Pinto 	/* Display RX ring */
103871fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
103971fedb01SJoao Pinto 
104071fedb01SJoao Pinto 	/* Display TX ring */
104171fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
104271fedb01SJoao Pinto }
104371fedb01SJoao Pinto 
1044286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1045286a8372SGiuseppe CAVALLARO {
1046286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1047286a8372SGiuseppe CAVALLARO 
1048286a8372SGiuseppe CAVALLARO 	if (mtu >= BUF_SIZE_4KiB)
1049286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1050286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1051286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1052d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1053286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1054286a8372SGiuseppe CAVALLARO 	else
1055d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1056286a8372SGiuseppe CAVALLARO 
1057286a8372SGiuseppe CAVALLARO 	return ret;
1058286a8372SGiuseppe CAVALLARO }
1059286a8372SGiuseppe CAVALLARO 
106032ceabcaSGiuseppe CAVALLARO /**
106171fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
106232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
106354139cf3SJoao Pinto  * @queue: RX queue index
106471fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
106532ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
106632ceabcaSGiuseppe CAVALLARO  */
106754139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1068c24602efSGiuseppe CAVALLARO {
106954139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
10705bacd778SLABBE Corentin 	int i;
1071c24602efSGiuseppe CAVALLARO 
107271fedb01SJoao Pinto 	/* Clear the RX descriptors */
10735bacd778SLABBE Corentin 	for (i = 0; i < DMA_RX_SIZE; i++)
10745bacd778SLABBE Corentin 		if (priv->extend_desc)
107542de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
10765bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
10775bacd778SLABBE Corentin 					(i == DMA_RX_SIZE - 1));
10785bacd778SLABBE Corentin 		else
107942de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
10805bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
10815bacd778SLABBE Corentin 					(i == DMA_RX_SIZE - 1));
108271fedb01SJoao Pinto }
108371fedb01SJoao Pinto 
108471fedb01SJoao Pinto /**
108571fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
108671fedb01SJoao Pinto  * @priv: driver private structure
1087ce736788SJoao Pinto  * @queue: TX queue index.
108871fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
108971fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
109071fedb01SJoao Pinto  */
1091ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
109271fedb01SJoao Pinto {
1093ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
109471fedb01SJoao Pinto 	int i;
109571fedb01SJoao Pinto 
109671fedb01SJoao Pinto 	/* Clear the TX descriptors */
10975bacd778SLABBE Corentin 	for (i = 0; i < DMA_TX_SIZE; i++)
10985bacd778SLABBE Corentin 		if (priv->extend_desc)
109942de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
110042de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
11015bacd778SLABBE Corentin 		else
110242de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
110342de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1104c24602efSGiuseppe CAVALLARO }
1105c24602efSGiuseppe CAVALLARO 
1106732fdf0eSGiuseppe CAVALLARO /**
110771fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
110871fedb01SJoao Pinto  * @priv: driver private structure
110971fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
111071fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
111171fedb01SJoao Pinto  */
111271fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
111371fedb01SJoao Pinto {
111454139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1115ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
111654139cf3SJoao Pinto 	u32 queue;
111754139cf3SJoao Pinto 
111871fedb01SJoao Pinto 	/* Clear the RX descriptors */
111954139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
112054139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
112171fedb01SJoao Pinto 
112271fedb01SJoao Pinto 	/* Clear the TX descriptors */
1123ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1124ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
112571fedb01SJoao Pinto }
112671fedb01SJoao Pinto 
112771fedb01SJoao Pinto /**
1128732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1129732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1130732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1131732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
113254139cf3SJoao Pinto  * @flags: gfp flag
113354139cf3SJoao Pinto  * @queue: RX queue index
1134732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1135732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1136732fdf0eSGiuseppe CAVALLARO  */
1137c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
113854139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1139c24602efSGiuseppe CAVALLARO {
114054139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1141c24602efSGiuseppe CAVALLARO 	struct sk_buff *skb;
1142c24602efSGiuseppe CAVALLARO 
11434ec49a37SVineet Gupta 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
114456329137SBartlomiej Zolnierkiewicz 	if (!skb) {
114538ddc59dSLABBE Corentin 		netdev_err(priv->dev,
114638ddc59dSLABBE Corentin 			   "%s: Rx init fails; skb is NULL\n", __func__);
114756329137SBartlomiej Zolnierkiewicz 		return -ENOMEM;
1148c24602efSGiuseppe CAVALLARO 	}
114954139cf3SJoao Pinto 	rx_q->rx_skbuff[i] = skb;
115054139cf3SJoao Pinto 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1151c24602efSGiuseppe CAVALLARO 						priv->dma_buf_sz,
1152c24602efSGiuseppe CAVALLARO 						DMA_FROM_DEVICE);
115354139cf3SJoao Pinto 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
115438ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
115556329137SBartlomiej Zolnierkiewicz 		dev_kfree_skb_any(skb);
115656329137SBartlomiej Zolnierkiewicz 		return -EINVAL;
115756329137SBartlomiej Zolnierkiewicz 	}
1158c24602efSGiuseppe CAVALLARO 
1159f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
116054139cf3SJoao Pinto 		p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1161f748be53SAlexandre TORGUE 	else
116254139cf3SJoao Pinto 		p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1163c24602efSGiuseppe CAVALLARO 
11642c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
11652c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1166c24602efSGiuseppe CAVALLARO 
1167c24602efSGiuseppe CAVALLARO 	return 0;
1168c24602efSGiuseppe CAVALLARO }
1169c24602efSGiuseppe CAVALLARO 
117071fedb01SJoao Pinto /**
117171fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
117271fedb01SJoao Pinto  * @priv: private structure
117354139cf3SJoao Pinto  * @queue: RX queue index
117471fedb01SJoao Pinto  * @i: buffer index.
117571fedb01SJoao Pinto  */
117654139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
117756329137SBartlomiej Zolnierkiewicz {
117854139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
117954139cf3SJoao Pinto 
118054139cf3SJoao Pinto 	if (rx_q->rx_skbuff[i]) {
118154139cf3SJoao Pinto 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
118256329137SBartlomiej Zolnierkiewicz 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
118354139cf3SJoao Pinto 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
118456329137SBartlomiej Zolnierkiewicz 	}
118554139cf3SJoao Pinto 	rx_q->rx_skbuff[i] = NULL;
118656329137SBartlomiej Zolnierkiewicz }
118756329137SBartlomiej Zolnierkiewicz 
11887ac6653aSJeff Kirsher /**
118971fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
119071fedb01SJoao Pinto  * @priv: private structure
1191ce736788SJoao Pinto  * @queue: RX queue index
119271fedb01SJoao Pinto  * @i: buffer index.
119371fedb01SJoao Pinto  */
1194ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
119571fedb01SJoao Pinto {
1196ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1197ce736788SJoao Pinto 
1198ce736788SJoao Pinto 	if (tx_q->tx_skbuff_dma[i].buf) {
1199ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
120071fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1201ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1202ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
120371fedb01SJoao Pinto 				       DMA_TO_DEVICE);
120471fedb01SJoao Pinto 		else
120571fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1206ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1207ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
120871fedb01SJoao Pinto 					 DMA_TO_DEVICE);
120971fedb01SJoao Pinto 	}
121071fedb01SJoao Pinto 
1211ce736788SJoao Pinto 	if (tx_q->tx_skbuff[i]) {
1212ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1213ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1214ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1215ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
121671fedb01SJoao Pinto 	}
121771fedb01SJoao Pinto }
121871fedb01SJoao Pinto 
121971fedb01SJoao Pinto /**
122071fedb01SJoao Pinto  * init_dma_rx_desc_rings - init the RX descriptor rings
12217ac6653aSJeff Kirsher  * @dev: net device structure
12225bacd778SLABBE Corentin  * @flags: gfp flag.
122371fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
12245bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1225286a8372SGiuseppe CAVALLARO  * modes.
12267ac6653aSJeff Kirsher  */
122771fedb01SJoao Pinto static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
12287ac6653aSJeff Kirsher {
12297ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
123054139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
12315bacd778SLABBE Corentin 	int ret = -ENOMEM;
12322c520b1cSJose Abreu 	int bfsize = 0;
12331d3028f4SColin Ian King 	int queue;
123454139cf3SJoao Pinto 	int i;
12357ac6653aSJeff Kirsher 
12362c520b1cSJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
12372c520b1cSJose Abreu 	if (bfsize < 0)
12382c520b1cSJose Abreu 		bfsize = 0;
12395bacd778SLABBE Corentin 
12405bacd778SLABBE Corentin 	if (bfsize < BUF_SIZE_16KiB)
12415bacd778SLABBE Corentin 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
12425bacd778SLABBE Corentin 
12435bacd778SLABBE Corentin 	priv->dma_buf_sz = bfsize;
12442618abb7SVince Bridgers 
124554139cf3SJoao Pinto 	/* RX INITIALIZATION */
12465bacd778SLABBE Corentin 	netif_dbg(priv, probe, priv->dev,
12475bacd778SLABBE Corentin 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
12485bacd778SLABBE Corentin 
124954139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
125054139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
125154139cf3SJoao Pinto 
125254139cf3SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
125354139cf3SJoao Pinto 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
125454139cf3SJoao Pinto 			  (u32)rx_q->dma_rx_phy);
125554139cf3SJoao Pinto 
12565bacd778SLABBE Corentin 		for (i = 0; i < DMA_RX_SIZE; i++) {
12575bacd778SLABBE Corentin 			struct dma_desc *p;
12585bacd778SLABBE Corentin 
125954139cf3SJoao Pinto 			if (priv->extend_desc)
126054139cf3SJoao Pinto 				p = &((rx_q->dma_erx + i)->basic);
126154139cf3SJoao Pinto 			else
126254139cf3SJoao Pinto 				p = rx_q->dma_rx + i;
126354139cf3SJoao Pinto 
126454139cf3SJoao Pinto 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
126554139cf3SJoao Pinto 						     queue);
12665bacd778SLABBE Corentin 			if (ret)
12675bacd778SLABBE Corentin 				goto err_init_rx_buffers;
12685bacd778SLABBE Corentin 
12695bacd778SLABBE Corentin 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
127054139cf3SJoao Pinto 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
127154139cf3SJoao Pinto 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
12725bacd778SLABBE Corentin 		}
127354139cf3SJoao Pinto 
127454139cf3SJoao Pinto 		rx_q->cur_rx = 0;
127554139cf3SJoao Pinto 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
127654139cf3SJoao Pinto 
127754139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
12787ac6653aSJeff Kirsher 
1279c24602efSGiuseppe CAVALLARO 		/* Setup the chained descriptor addresses */
1280c24602efSGiuseppe CAVALLARO 		if (priv->mode == STMMAC_CHAIN_MODE) {
128171fedb01SJoao Pinto 			if (priv->extend_desc)
12822c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_erx,
12832c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
128471fedb01SJoao Pinto 			else
12852c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_rx,
12862c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
128771fedb01SJoao Pinto 		}
128854139cf3SJoao Pinto 	}
128954139cf3SJoao Pinto 
129054139cf3SJoao Pinto 	buf_sz = bfsize;
129171fedb01SJoao Pinto 
129271fedb01SJoao Pinto 	return 0;
129354139cf3SJoao Pinto 
129471fedb01SJoao Pinto err_init_rx_buffers:
129554139cf3SJoao Pinto 	while (queue >= 0) {
129671fedb01SJoao Pinto 		while (--i >= 0)
129754139cf3SJoao Pinto 			stmmac_free_rx_buffer(priv, queue, i);
129854139cf3SJoao Pinto 
129954139cf3SJoao Pinto 		if (queue == 0)
130054139cf3SJoao Pinto 			break;
130154139cf3SJoao Pinto 
130254139cf3SJoao Pinto 		i = DMA_RX_SIZE;
130354139cf3SJoao Pinto 		queue--;
130454139cf3SJoao Pinto 	}
130554139cf3SJoao Pinto 
130671fedb01SJoao Pinto 	return ret;
130771fedb01SJoao Pinto }
130871fedb01SJoao Pinto 
130971fedb01SJoao Pinto /**
131071fedb01SJoao Pinto  * init_dma_tx_desc_rings - init the TX descriptor rings
131171fedb01SJoao Pinto  * @dev: net device structure.
131271fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
131371fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
131471fedb01SJoao Pinto  * modes.
131571fedb01SJoao Pinto  */
131671fedb01SJoao Pinto static int init_dma_tx_desc_rings(struct net_device *dev)
131771fedb01SJoao Pinto {
131871fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1319ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1320ce736788SJoao Pinto 	u32 queue;
132171fedb01SJoao Pinto 	int i;
132271fedb01SJoao Pinto 
1323ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1324ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1325ce736788SJoao Pinto 
132671fedb01SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
1327ce736788SJoao Pinto 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1328ce736788SJoao Pinto 			 (u32)tx_q->dma_tx_phy);
132971fedb01SJoao Pinto 
133071fedb01SJoao Pinto 		/* Setup the chained descriptor addresses */
133171fedb01SJoao Pinto 		if (priv->mode == STMMAC_CHAIN_MODE) {
133271fedb01SJoao Pinto 			if (priv->extend_desc)
13332c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_etx,
13342c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
133571fedb01SJoao Pinto 			else
13362c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_tx,
13372c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1338c24602efSGiuseppe CAVALLARO 		}
1339286a8372SGiuseppe CAVALLARO 
1340e3ad57c9SGiuseppe Cavallaro 		for (i = 0; i < DMA_TX_SIZE; i++) {
1341c24602efSGiuseppe CAVALLARO 			struct dma_desc *p;
1342c24602efSGiuseppe CAVALLARO 			if (priv->extend_desc)
1343ce736788SJoao Pinto 				p = &((tx_q->dma_etx + i)->basic);
1344c24602efSGiuseppe CAVALLARO 			else
1345ce736788SJoao Pinto 				p = tx_q->dma_tx + i;
1346f748be53SAlexandre TORGUE 
1347f748be53SAlexandre TORGUE 			if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1348f748be53SAlexandre TORGUE 				p->des0 = 0;
1349f748be53SAlexandre TORGUE 				p->des1 = 0;
1350c24602efSGiuseppe CAVALLARO 				p->des2 = 0;
1351f748be53SAlexandre TORGUE 				p->des3 = 0;
1352f748be53SAlexandre TORGUE 			} else {
1353f748be53SAlexandre TORGUE 				p->des2 = 0;
1354f748be53SAlexandre TORGUE 			}
1355f748be53SAlexandre TORGUE 
1356ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].buf = 0;
1357ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1358ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].len = 0;
1359ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].last_segment = false;
1360ce736788SJoao Pinto 			tx_q->tx_skbuff[i] = NULL;
13614a7d666aSGiuseppe CAVALLARO 		}
1362c24602efSGiuseppe CAVALLARO 
1363ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
1364ce736788SJoao Pinto 		tx_q->cur_tx = 0;
13658d212a9eSNiklas Cassel 		tx_q->mss = 0;
1366ce736788SJoao Pinto 
1367c22a3f48SJoao Pinto 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1368c22a3f48SJoao Pinto 	}
13697ac6653aSJeff Kirsher 
137071fedb01SJoao Pinto 	return 0;
137171fedb01SJoao Pinto }
137271fedb01SJoao Pinto 
137371fedb01SJoao Pinto /**
137471fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
137571fedb01SJoao Pinto  * @dev: net device structure
137671fedb01SJoao Pinto  * @flags: gfp flag.
137771fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
137871fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
137971fedb01SJoao Pinto  * modes.
138071fedb01SJoao Pinto  */
138171fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
138271fedb01SJoao Pinto {
138371fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
138471fedb01SJoao Pinto 	int ret;
138571fedb01SJoao Pinto 
138671fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
138771fedb01SJoao Pinto 	if (ret)
138871fedb01SJoao Pinto 		return ret;
138971fedb01SJoao Pinto 
139071fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
139171fedb01SJoao Pinto 
13925bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
13937ac6653aSJeff Kirsher 
1394c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1395c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
139656329137SBartlomiej Zolnierkiewicz 
139756329137SBartlomiej Zolnierkiewicz 	return ret;
13987ac6653aSJeff Kirsher }
13997ac6653aSJeff Kirsher 
140071fedb01SJoao Pinto /**
140171fedb01SJoao Pinto  * dma_free_rx_skbufs - free RX dma buffers
140271fedb01SJoao Pinto  * @priv: private structure
140354139cf3SJoao Pinto  * @queue: RX queue index
140471fedb01SJoao Pinto  */
140554139cf3SJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
14067ac6653aSJeff Kirsher {
14077ac6653aSJeff Kirsher 	int i;
14087ac6653aSJeff Kirsher 
1409e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_RX_SIZE; i++)
141054139cf3SJoao Pinto 		stmmac_free_rx_buffer(priv, queue, i);
14117ac6653aSJeff Kirsher }
14127ac6653aSJeff Kirsher 
141371fedb01SJoao Pinto /**
141471fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
141571fedb01SJoao Pinto  * @priv: private structure
1416ce736788SJoao Pinto  * @queue: TX queue index
141771fedb01SJoao Pinto  */
1418ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
14197ac6653aSJeff Kirsher {
14207ac6653aSJeff Kirsher 	int i;
14217ac6653aSJeff Kirsher 
142271fedb01SJoao Pinto 	for (i = 0; i < DMA_TX_SIZE; i++)
1423ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
14247ac6653aSJeff Kirsher }
14257ac6653aSJeff Kirsher 
1426732fdf0eSGiuseppe CAVALLARO /**
142754139cf3SJoao Pinto  * free_dma_rx_desc_resources - free RX dma desc resources
142854139cf3SJoao Pinto  * @priv: private structure
142954139cf3SJoao Pinto  */
143054139cf3SJoao Pinto static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
143154139cf3SJoao Pinto {
143254139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
143354139cf3SJoao Pinto 	u32 queue;
143454139cf3SJoao Pinto 
143554139cf3SJoao Pinto 	/* Free RX queue resources */
143654139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
143754139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
143854139cf3SJoao Pinto 
143954139cf3SJoao Pinto 		/* Release the DMA RX socket buffers */
144054139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
144154139cf3SJoao Pinto 
144254139cf3SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
144354139cf3SJoao Pinto 		if (!priv->extend_desc)
144454139cf3SJoao Pinto 			dma_free_coherent(priv->device,
144554139cf3SJoao Pinto 					  DMA_RX_SIZE * sizeof(struct dma_desc),
144654139cf3SJoao Pinto 					  rx_q->dma_rx, rx_q->dma_rx_phy);
144754139cf3SJoao Pinto 		else
144854139cf3SJoao Pinto 			dma_free_coherent(priv->device, DMA_RX_SIZE *
144954139cf3SJoao Pinto 					  sizeof(struct dma_extended_desc),
145054139cf3SJoao Pinto 					  rx_q->dma_erx, rx_q->dma_rx_phy);
145154139cf3SJoao Pinto 
145254139cf3SJoao Pinto 		kfree(rx_q->rx_skbuff_dma);
145354139cf3SJoao Pinto 		kfree(rx_q->rx_skbuff);
145454139cf3SJoao Pinto 	}
145554139cf3SJoao Pinto }
145654139cf3SJoao Pinto 
145754139cf3SJoao Pinto /**
1458ce736788SJoao Pinto  * free_dma_tx_desc_resources - free TX dma desc resources
1459ce736788SJoao Pinto  * @priv: private structure
1460ce736788SJoao Pinto  */
1461ce736788SJoao Pinto static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1462ce736788SJoao Pinto {
1463ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
146462242260SChristophe Jaillet 	u32 queue;
1465ce736788SJoao Pinto 
1466ce736788SJoao Pinto 	/* Free TX queue resources */
1467ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1468ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1469ce736788SJoao Pinto 
1470ce736788SJoao Pinto 		/* Release the DMA TX socket buffers */
1471ce736788SJoao Pinto 		dma_free_tx_skbufs(priv, queue);
1472ce736788SJoao Pinto 
1473ce736788SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
1474ce736788SJoao Pinto 		if (!priv->extend_desc)
1475ce736788SJoao Pinto 			dma_free_coherent(priv->device,
1476ce736788SJoao Pinto 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1477ce736788SJoao Pinto 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1478ce736788SJoao Pinto 		else
1479ce736788SJoao Pinto 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1480ce736788SJoao Pinto 					  sizeof(struct dma_extended_desc),
1481ce736788SJoao Pinto 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1482ce736788SJoao Pinto 
1483ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff_dma);
1484ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff);
1485ce736788SJoao Pinto 	}
1486ce736788SJoao Pinto }
1487ce736788SJoao Pinto 
1488ce736788SJoao Pinto /**
148971fedb01SJoao Pinto  * alloc_dma_rx_desc_resources - alloc RX resources.
1490732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1491732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1492732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1493732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1494732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1495732fdf0eSGiuseppe CAVALLARO  */
149671fedb01SJoao Pinto static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
149709f8d696SSrinivas Kandagatla {
149854139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
14995bacd778SLABBE Corentin 	int ret = -ENOMEM;
150054139cf3SJoao Pinto 	u32 queue;
150109f8d696SSrinivas Kandagatla 
150254139cf3SJoao Pinto 	/* RX queues buffers and DMA */
150354139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
150454139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
150554139cf3SJoao Pinto 
150654139cf3SJoao Pinto 		rx_q->queue_index = queue;
150754139cf3SJoao Pinto 		rx_q->priv_data = priv;
150854139cf3SJoao Pinto 
150954139cf3SJoao Pinto 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
151054139cf3SJoao Pinto 						    sizeof(dma_addr_t),
15115bacd778SLABBE Corentin 						    GFP_KERNEL);
151254139cf3SJoao Pinto 		if (!rx_q->rx_skbuff_dma)
151363c3aa6bSChristophe Jaillet 			goto err_dma;
15145bacd778SLABBE Corentin 
151554139cf3SJoao Pinto 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
151654139cf3SJoao Pinto 						sizeof(struct sk_buff *),
15175bacd778SLABBE Corentin 						GFP_KERNEL);
151854139cf3SJoao Pinto 		if (!rx_q->rx_skbuff)
151954139cf3SJoao Pinto 			goto err_dma;
15205bacd778SLABBE Corentin 
15215bacd778SLABBE Corentin 		if (priv->extend_desc) {
152254139cf3SJoao Pinto 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
152354139cf3SJoao Pinto 							    DMA_RX_SIZE *
15245bacd778SLABBE Corentin 							    sizeof(struct
15255bacd778SLABBE Corentin 							    dma_extended_desc),
152654139cf3SJoao Pinto 							    &rx_q->dma_rx_phy,
15275bacd778SLABBE Corentin 							    GFP_KERNEL);
152854139cf3SJoao Pinto 			if (!rx_q->dma_erx)
15295bacd778SLABBE Corentin 				goto err_dma;
15305bacd778SLABBE Corentin 
153171fedb01SJoao Pinto 		} else {
153254139cf3SJoao Pinto 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
153354139cf3SJoao Pinto 							   DMA_RX_SIZE *
153454139cf3SJoao Pinto 							   sizeof(struct
153554139cf3SJoao Pinto 							   dma_desc),
153654139cf3SJoao Pinto 							   &rx_q->dma_rx_phy,
153771fedb01SJoao Pinto 							   GFP_KERNEL);
153854139cf3SJoao Pinto 			if (!rx_q->dma_rx)
153971fedb01SJoao Pinto 				goto err_dma;
154071fedb01SJoao Pinto 		}
154154139cf3SJoao Pinto 	}
154271fedb01SJoao Pinto 
154371fedb01SJoao Pinto 	return 0;
154471fedb01SJoao Pinto 
154571fedb01SJoao Pinto err_dma:
154654139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
154754139cf3SJoao Pinto 
154871fedb01SJoao Pinto 	return ret;
154971fedb01SJoao Pinto }
155071fedb01SJoao Pinto 
155171fedb01SJoao Pinto /**
155271fedb01SJoao Pinto  * alloc_dma_tx_desc_resources - alloc TX resources.
155371fedb01SJoao Pinto  * @priv: private structure
155471fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
155571fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
155671fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
155771fedb01SJoao Pinto  * allow zero-copy mechanism.
155871fedb01SJoao Pinto  */
155971fedb01SJoao Pinto static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
156071fedb01SJoao Pinto {
1561ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
156271fedb01SJoao Pinto 	int ret = -ENOMEM;
1563ce736788SJoao Pinto 	u32 queue;
156471fedb01SJoao Pinto 
1565ce736788SJoao Pinto 	/* TX queues buffers and DMA */
1566ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1567ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1568ce736788SJoao Pinto 
1569ce736788SJoao Pinto 		tx_q->queue_index = queue;
1570ce736788SJoao Pinto 		tx_q->priv_data = priv;
1571ce736788SJoao Pinto 
1572ce736788SJoao Pinto 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1573ce736788SJoao Pinto 						    sizeof(*tx_q->tx_skbuff_dma),
157471fedb01SJoao Pinto 						    GFP_KERNEL);
1575ce736788SJoao Pinto 		if (!tx_q->tx_skbuff_dma)
157662242260SChristophe Jaillet 			goto err_dma;
157771fedb01SJoao Pinto 
1578ce736788SJoao Pinto 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1579ce736788SJoao Pinto 						sizeof(struct sk_buff *),
158071fedb01SJoao Pinto 						GFP_KERNEL);
1581ce736788SJoao Pinto 		if (!tx_q->tx_skbuff)
158262242260SChristophe Jaillet 			goto err_dma;
158371fedb01SJoao Pinto 
158471fedb01SJoao Pinto 		if (priv->extend_desc) {
1585ce736788SJoao Pinto 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1586ce736788SJoao Pinto 							    DMA_TX_SIZE *
15875bacd778SLABBE Corentin 							    sizeof(struct
15885bacd778SLABBE Corentin 							    dma_extended_desc),
1589ce736788SJoao Pinto 							    &tx_q->dma_tx_phy,
15905bacd778SLABBE Corentin 							    GFP_KERNEL);
1591ce736788SJoao Pinto 			if (!tx_q->dma_etx)
159262242260SChristophe Jaillet 				goto err_dma;
15935bacd778SLABBE Corentin 		} else {
1594ce736788SJoao Pinto 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1595ce736788SJoao Pinto 							   DMA_TX_SIZE *
1596ce736788SJoao Pinto 							   sizeof(struct
1597ce736788SJoao Pinto 								  dma_desc),
1598ce736788SJoao Pinto 							   &tx_q->dma_tx_phy,
15995bacd778SLABBE Corentin 							   GFP_KERNEL);
1600ce736788SJoao Pinto 			if (!tx_q->dma_tx)
160162242260SChristophe Jaillet 				goto err_dma;
1602ce736788SJoao Pinto 		}
16035bacd778SLABBE Corentin 	}
16045bacd778SLABBE Corentin 
16055bacd778SLABBE Corentin 	return 0;
16065bacd778SLABBE Corentin 
160762242260SChristophe Jaillet err_dma:
1608ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
1609ce736788SJoao Pinto 
161009f8d696SSrinivas Kandagatla 	return ret;
16115bacd778SLABBE Corentin }
161209f8d696SSrinivas Kandagatla 
161371fedb01SJoao Pinto /**
161471fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
161571fedb01SJoao Pinto  * @priv: private structure
161671fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
161771fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
161871fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
161971fedb01SJoao Pinto  * allow zero-copy mechanism.
162071fedb01SJoao Pinto  */
162171fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
16225bacd778SLABBE Corentin {
162354139cf3SJoao Pinto 	/* RX Allocation */
162471fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
162571fedb01SJoao Pinto 
162671fedb01SJoao Pinto 	if (ret)
162771fedb01SJoao Pinto 		return ret;
162871fedb01SJoao Pinto 
162971fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
163071fedb01SJoao Pinto 
163171fedb01SJoao Pinto 	return ret;
163271fedb01SJoao Pinto }
163371fedb01SJoao Pinto 
163471fedb01SJoao Pinto /**
163571fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
163671fedb01SJoao Pinto  * @priv: private structure
163771fedb01SJoao Pinto  */
163871fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
163971fedb01SJoao Pinto {
164071fedb01SJoao Pinto 	/* Release the DMA RX socket buffers */
164171fedb01SJoao Pinto 	free_dma_rx_desc_resources(priv);
164271fedb01SJoao Pinto 
164371fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
164471fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
164571fedb01SJoao Pinto }
164671fedb01SJoao Pinto 
164771fedb01SJoao Pinto /**
16489eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
16499eb12474Sjpinto  *  @priv: driver private structure
16509eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
16519eb12474Sjpinto  */
16529eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
16539eb12474Sjpinto {
16544f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
16554f6046f5SJoao Pinto 	int queue;
16564f6046f5SJoao Pinto 	u8 mode;
16579eb12474Sjpinto 
16584f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
16594f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1660c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
16614f6046f5SJoao Pinto 	}
16629eb12474Sjpinto }
16639eb12474Sjpinto 
16649eb12474Sjpinto /**
1665ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
1666ae4f0d46SJoao Pinto  * @priv: driver private structure
1667ae4f0d46SJoao Pinto  * @chan: RX channel index
1668ae4f0d46SJoao Pinto  * Description:
1669ae4f0d46SJoao Pinto  * This starts a RX DMA channel
1670ae4f0d46SJoao Pinto  */
1671ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1672ae4f0d46SJoao Pinto {
1673ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1674a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
1675ae4f0d46SJoao Pinto }
1676ae4f0d46SJoao Pinto 
1677ae4f0d46SJoao Pinto /**
1678ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
1679ae4f0d46SJoao Pinto  * @priv: driver private structure
1680ae4f0d46SJoao Pinto  * @chan: TX channel index
1681ae4f0d46SJoao Pinto  * Description:
1682ae4f0d46SJoao Pinto  * This starts a TX DMA channel
1683ae4f0d46SJoao Pinto  */
1684ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1685ae4f0d46SJoao Pinto {
1686ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1687a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
1688ae4f0d46SJoao Pinto }
1689ae4f0d46SJoao Pinto 
1690ae4f0d46SJoao Pinto /**
1691ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
1692ae4f0d46SJoao Pinto  * @priv: driver private structure
1693ae4f0d46SJoao Pinto  * @chan: RX channel index
1694ae4f0d46SJoao Pinto  * Description:
1695ae4f0d46SJoao Pinto  * This stops a RX DMA channel
1696ae4f0d46SJoao Pinto  */
1697ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1698ae4f0d46SJoao Pinto {
1699ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1700a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1701ae4f0d46SJoao Pinto }
1702ae4f0d46SJoao Pinto 
1703ae4f0d46SJoao Pinto /**
1704ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
1705ae4f0d46SJoao Pinto  * @priv: driver private structure
1706ae4f0d46SJoao Pinto  * @chan: TX channel index
1707ae4f0d46SJoao Pinto  * Description:
1708ae4f0d46SJoao Pinto  * This stops a TX DMA channel
1709ae4f0d46SJoao Pinto  */
1710ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1711ae4f0d46SJoao Pinto {
1712ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1713a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1714ae4f0d46SJoao Pinto }
1715ae4f0d46SJoao Pinto 
1716ae4f0d46SJoao Pinto /**
1717ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
1718ae4f0d46SJoao Pinto  * @priv: driver private structure
1719ae4f0d46SJoao Pinto  * Description:
1720ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
1721ae4f0d46SJoao Pinto  */
1722ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
1723ae4f0d46SJoao Pinto {
1724ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1725ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1726ae4f0d46SJoao Pinto 	u32 chan = 0;
1727ae4f0d46SJoao Pinto 
1728ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1729ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
1730ae4f0d46SJoao Pinto 
1731ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1732ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
1733ae4f0d46SJoao Pinto }
1734ae4f0d46SJoao Pinto 
1735ae4f0d46SJoao Pinto /**
1736ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1737ae4f0d46SJoao Pinto  * @priv: driver private structure
1738ae4f0d46SJoao Pinto  * Description:
1739ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
1740ae4f0d46SJoao Pinto  */
1741ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1742ae4f0d46SJoao Pinto {
1743ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1744ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1745ae4f0d46SJoao Pinto 	u32 chan = 0;
1746ae4f0d46SJoao Pinto 
1747ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1748ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
1749ae4f0d46SJoao Pinto 
1750ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1751ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
1752ae4f0d46SJoao Pinto }
1753ae4f0d46SJoao Pinto 
1754ae4f0d46SJoao Pinto /**
17557ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
175632ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
1757732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
1758732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
17597ac6653aSJeff Kirsher  */
17607ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
17617ac6653aSJeff Kirsher {
17626deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
17636deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1764f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
176552a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
17666deee222SJoao Pinto 	u32 txmode = 0;
17676deee222SJoao Pinto 	u32 rxmode = 0;
17686deee222SJoao Pinto 	u32 chan = 0;
1769a0daae13SJose Abreu 	u8 qmode = 0;
1770f88203a2SVince Bridgers 
177111fbf811SThierry Reding 	if (rxfifosz == 0)
177211fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
177352a76235SJose Abreu 	if (txfifosz == 0)
177452a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
177552a76235SJose Abreu 
177652a76235SJose Abreu 	/* Adjust for real per queue fifo size */
177752a76235SJose Abreu 	rxfifosz /= rx_channels_count;
177852a76235SJose Abreu 	txfifosz /= tx_channels_count;
177911fbf811SThierry Reding 
17806deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
17816deee222SJoao Pinto 		txmode = tc;
17826deee222SJoao Pinto 		rxmode = tc;
17836deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
17847ac6653aSJeff Kirsher 		/*
17857ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
17867ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
17877ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
17887ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
17897ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
17907ac6653aSJeff Kirsher 		 */
17916deee222SJoao Pinto 		txmode = SF_DMA_MODE;
17926deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
1793b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
17946deee222SJoao Pinto 	} else {
17956deee222SJoao Pinto 		txmode = tc;
17966deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
17976deee222SJoao Pinto 	}
17986deee222SJoao Pinto 
17996deee222SJoao Pinto 	/* configure all channels */
18006deee222SJoao Pinto 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1801a0daae13SJose Abreu 		for (chan = 0; chan < rx_channels_count; chan++) {
1802a0daae13SJose Abreu 			qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
18036deee222SJoao Pinto 
1804a4e887faSJose Abreu 			stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1805a0daae13SJose Abreu 					rxfifosz, qmode);
1806a0daae13SJose Abreu 		}
1807a0daae13SJose Abreu 
1808a0daae13SJose Abreu 		for (chan = 0; chan < tx_channels_count; chan++) {
1809a0daae13SJose Abreu 			qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1810a0daae13SJose Abreu 
1811a4e887faSJose Abreu 			stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1812a0daae13SJose Abreu 					txfifosz, qmode);
1813a0daae13SJose Abreu 		}
18146deee222SJoao Pinto 	} else {
1815a4e887faSJose Abreu 		stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
18166deee222SJoao Pinto 	}
18177ac6653aSJeff Kirsher }
18187ac6653aSJeff Kirsher 
18197ac6653aSJeff Kirsher /**
1820732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
182132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1822ce736788SJoao Pinto  * @queue: TX queue index
1823732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
18247ac6653aSJeff Kirsher  */
1825ce736788SJoao Pinto static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
18267ac6653aSJeff Kirsher {
1827ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
182838979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
18298d5f4b07SBernd Edlinger 	unsigned int entry;
18307ac6653aSJeff Kirsher 
1831739c8e14SLino Sanfilippo 	netif_tx_lock(priv->dev);
1832a9097a96SGiuseppe CAVALLARO 
18339125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
18349125cdd1SGiuseppe CAVALLARO 
18358d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
1836ce736788SJoao Pinto 	while (entry != tx_q->cur_tx) {
1837ce736788SJoao Pinto 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1838c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1839c363b658SFabrice Gasnier 		int status;
1840c24602efSGiuseppe CAVALLARO 
1841c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1842ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1843c24602efSGiuseppe CAVALLARO 		else
1844ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
18457ac6653aSJeff Kirsher 
184642de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
184742de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
1848c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
1849c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
1850c363b658SFabrice Gasnier 			break;
1851c363b658SFabrice Gasnier 
1852a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
1853a6b25da5SNiklas Cassel 		 * the own bit.
1854a6b25da5SNiklas Cassel 		 */
1855a6b25da5SNiklas Cassel 		dma_rmb();
1856a6b25da5SNiklas Cassel 
1857c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
1858c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
1859c363b658SFabrice Gasnier 			/* ... verify the status error condition */
1860c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
1861c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
1862c363b658SFabrice Gasnier 			} else {
18637ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
18647ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
1865c363b658SFabrice Gasnier 			}
1866ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_tx_hwtstamp(priv, p, skb);
18677ac6653aSJeff Kirsher 		}
18687ac6653aSJeff Kirsher 
1869ce736788SJoao Pinto 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1870ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1871362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
1872ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
1873ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
18747ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
1875362b37beSGiuseppe CAVALLARO 			else
1876362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
1877ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
1878ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
1879362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
1880ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
1881ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
1882ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1883cf32deecSRayagond Kokatanur 		}
1884f748be53SAlexandre TORGUE 
18852c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
1886f748be53SAlexandre TORGUE 
1887ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1888ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
18897ac6653aSJeff Kirsher 
18907ac6653aSJeff Kirsher 		if (likely(skb != NULL)) {
189138979574SBeniamino Galvani 			pkts_compl++;
189238979574SBeniamino Galvani 			bytes_compl += skb->len;
18937c565c33SEric W. Biederman 			dev_consume_skb_any(skb);
1894ce736788SJoao Pinto 			tx_q->tx_skbuff[entry] = NULL;
18957ac6653aSJeff Kirsher 		}
18967ac6653aSJeff Kirsher 
189742de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
18987ac6653aSJeff Kirsher 
1899e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
19007ac6653aSJeff Kirsher 	}
1901ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
190238979574SBeniamino Galvani 
1903c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1904c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
190538979574SBeniamino Galvani 
1906c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1907c22a3f48SJoao Pinto 								queue))) &&
1908c22a3f48SJoao Pinto 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1909c22a3f48SJoao Pinto 
1910b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
1911b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
1912c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
19137ac6653aSJeff Kirsher 	}
1914d765955dSGiuseppe CAVALLARO 
1915d765955dSGiuseppe CAVALLARO 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1916d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
1917f5351ef7SGiuseppe CAVALLARO 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1918d765955dSGiuseppe CAVALLARO 	}
1919739c8e14SLino Sanfilippo 	netif_tx_unlock(priv->dev);
19207ac6653aSJeff Kirsher }
19217ac6653aSJeff Kirsher 
19227ac6653aSJeff Kirsher /**
1923732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
192432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
19255bacd778SLABBE Corentin  * @chan: channel index
19267ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
1927732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
19287ac6653aSJeff Kirsher  */
19295bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
19307ac6653aSJeff Kirsher {
1931ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1932c24602efSGiuseppe CAVALLARO 	int i;
1933ce736788SJoao Pinto 
1934c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
19357ac6653aSJeff Kirsher 
1936ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
1937ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
1938e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_TX_SIZE; i++)
1939c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
194042de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
194142de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1942c24602efSGiuseppe CAVALLARO 		else
194342de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
194442de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1945ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
1946ce736788SJoao Pinto 	tx_q->cur_tx = 0;
19478d212a9eSNiklas Cassel 	tx_q->mss = 0;
1948c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1949ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
19507ac6653aSJeff Kirsher 
19517ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
1952c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
19537ac6653aSJeff Kirsher }
19547ac6653aSJeff Kirsher 
195532ceabcaSGiuseppe CAVALLARO /**
19566deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
19576deee222SJoao Pinto  *  @priv: driver private structure
19586deee222SJoao Pinto  *  @txmode: TX operating mode
19596deee222SJoao Pinto  *  @rxmode: RX operating mode
19606deee222SJoao Pinto  *  @chan: channel index
19616deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
19626deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
19636deee222SJoao Pinto  *  mode.
19646deee222SJoao Pinto  */
19656deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
19666deee222SJoao Pinto 					  u32 rxmode, u32 chan)
19676deee222SJoao Pinto {
1968a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1969a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
197052a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
197152a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
19726deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
197352a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
19746deee222SJoao Pinto 
19756deee222SJoao Pinto 	if (rxfifosz == 0)
19766deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
197752a76235SJose Abreu 	if (txfifosz == 0)
197852a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
197952a76235SJose Abreu 
198052a76235SJose Abreu 	/* Adjust for real per queue fifo size */
198152a76235SJose Abreu 	rxfifosz /= rx_channels_count;
198252a76235SJose Abreu 	txfifosz /= tx_channels_count;
19836deee222SJoao Pinto 
19846deee222SJoao Pinto 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1985a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz,
1986a4e887faSJose Abreu 				rxqmode);
1987a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz,
1988a4e887faSJose Abreu 				txqmode);
19896deee222SJoao Pinto 	} else {
1990a4e887faSJose Abreu 		stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
19916deee222SJoao Pinto 	}
19926deee222SJoao Pinto }
19936deee222SJoao Pinto 
19948bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
19958bf993a5SJose Abreu {
1996c10d4c82SJose Abreu 	int ret = false;
19978bf993a5SJose Abreu 
19988bf993a5SJose Abreu 	/* Safety features are only available in cores >= 5.10 */
19998bf993a5SJose Abreu 	if (priv->synopsys_id < DWMAC_CORE_5_10)
20008bf993a5SJose Abreu 		return ret;
2001c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
20028bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2003c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
20048bf993a5SJose Abreu 		stmmac_global_err(priv);
2005c10d4c82SJose Abreu 		return true;
2006c10d4c82SJose Abreu 	}
2007c10d4c82SJose Abreu 
2008c10d4c82SJose Abreu 	return false;
20098bf993a5SJose Abreu }
20108bf993a5SJose Abreu 
20116deee222SJoao Pinto /**
2012732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
201332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
201432ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2015732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2016732fdf0eSGiuseppe CAVALLARO  * work can be done.
201732ceabcaSGiuseppe CAVALLARO  */
20187ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
20197ac6653aSJeff Kirsher {
2020d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
20215a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
20225a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
20235a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2024d62a107aSJoao Pinto 	u32 chan;
20255a6a0445SNiklas Cassel 	bool poll_scheduled = false;
20268ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
20278ac60ffbSKees Cook 
20288ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
20298ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
20308ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
203168e5cfafSJoao Pinto 
20325a6a0445SNiklas Cassel 	/* Each DMA channel can be used for rx and tx simultaneously, yet
20335a6a0445SNiklas Cassel 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
20345a6a0445SNiklas Cassel 	 * stmmac_channel struct.
20355a6a0445SNiklas Cassel 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
20365a6a0445SNiklas Cassel 	 * all tx queues rather than just a single tx queue.
20375a6a0445SNiklas Cassel 	 */
20385a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
2039a4e887faSJose Abreu 		status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2040a4e887faSJose Abreu 				&priv->xstats, chan);
20415a6a0445SNiklas Cassel 
20425a6a0445SNiklas Cassel 	for (chan = 0; chan < rx_channel_count; chan++) {
20435a6a0445SNiklas Cassel 		if (likely(status[chan] & handle_rx)) {
2044c22a3f48SJoao Pinto 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2045c22a3f48SJoao Pinto 
20465a6a0445SNiklas Cassel 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2047a4e887faSJose Abreu 				stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
20485a6a0445SNiklas Cassel 				__napi_schedule(&rx_q->napi);
20495a6a0445SNiklas Cassel 				poll_scheduled = true;
20505a6a0445SNiklas Cassel 			}
20515a6a0445SNiklas Cassel 		}
20525a6a0445SNiklas Cassel 	}
20535a6a0445SNiklas Cassel 
20545a6a0445SNiklas Cassel 	/* If we scheduled poll, we already know that tx queues will be checked.
20555a6a0445SNiklas Cassel 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
20565a6a0445SNiklas Cassel 	 * completed transmission, if so, call stmmac_poll (once).
20575a6a0445SNiklas Cassel 	 */
20585a6a0445SNiklas Cassel 	if (!poll_scheduled) {
20595a6a0445SNiklas Cassel 		for (chan = 0; chan < tx_channel_count; chan++) {
20605a6a0445SNiklas Cassel 			if (status[chan] & handle_tx) {
20615a6a0445SNiklas Cassel 				/* It doesn't matter what rx queue we choose
20625a6a0445SNiklas Cassel 				 * here. We use 0 since it always exists.
20635a6a0445SNiklas Cassel 				 */
20645a6a0445SNiklas Cassel 				struct stmmac_rx_queue *rx_q =
20655a6a0445SNiklas Cassel 					&priv->rx_queue[0];
20665a6a0445SNiklas Cassel 
2067c22a3f48SJoao Pinto 				if (likely(napi_schedule_prep(&rx_q->napi))) {
2068a4e887faSJose Abreu 					stmmac_disable_dma_irq(priv,
2069a4e887faSJose Abreu 							priv->ioaddr, chan);
2070c22a3f48SJoao Pinto 					__napi_schedule(&rx_q->napi);
20719125cdd1SGiuseppe CAVALLARO 				}
20725a6a0445SNiklas Cassel 				break;
20735a6a0445SNiklas Cassel 			}
20745a6a0445SNiklas Cassel 		}
20759125cdd1SGiuseppe CAVALLARO 	}
2076d62a107aSJoao Pinto 
20775a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
20785a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
20797ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
2080b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2081b2dec116SSonic Zhang 			    (tc <= 256)) {
20827ac6653aSJeff Kirsher 				tc += 64;
2083c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
2084d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2085d62a107aSJoao Pinto 								      tc,
2086d62a107aSJoao Pinto 								      tc,
2087d62a107aSJoao Pinto 								      chan);
2088c405abe2SSonic Zhang 				else
2089d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2090d62a107aSJoao Pinto 								    tc,
2091d62a107aSJoao Pinto 								    SF_DMA_MODE,
2092d62a107aSJoao Pinto 								    chan);
20937ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
20947ac6653aSJeff Kirsher 			}
20955a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
20964e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
20977ac6653aSJeff Kirsher 		}
2098d62a107aSJoao Pinto 	}
2099d62a107aSJoao Pinto }
21007ac6653aSJeff Kirsher 
210132ceabcaSGiuseppe CAVALLARO /**
210232ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
210332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
210432ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
210532ceabcaSGiuseppe CAVALLARO  */
21061c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
21071c901a46SGiuseppe CAVALLARO {
21081c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
21091c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
21101c901a46SGiuseppe CAVALLARO 
2111ba1ffd74SGiuseppe CAVALLARO 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2112ba1ffd74SGiuseppe CAVALLARO 		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2113f748be53SAlexandre TORGUE 		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2114ba1ffd74SGiuseppe CAVALLARO 	} else {
2115ba1ffd74SGiuseppe CAVALLARO 		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
211636ff7c1eSAlexandre TORGUE 		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2117ba1ffd74SGiuseppe CAVALLARO 	}
211836ff7c1eSAlexandre TORGUE 
211936ff7c1eSAlexandre TORGUE 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
21204f795b25SGiuseppe CAVALLARO 
21214f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
212236ff7c1eSAlexandre TORGUE 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
21231c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
21244f795b25SGiuseppe CAVALLARO 	} else
212538ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
21261c901a46SGiuseppe CAVALLARO }
21271c901a46SGiuseppe CAVALLARO 
2128732fdf0eSGiuseppe CAVALLARO /**
2129732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
213032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
213119e30c14SGiuseppe CAVALLARO  * Description:
213219e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2133e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
213419e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
213519e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2136e7434821SGiuseppe CAVALLARO  */
2137e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2138e7434821SGiuseppe CAVALLARO {
2139a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2140e7434821SGiuseppe CAVALLARO }
2141e7434821SGiuseppe CAVALLARO 
214232ceabcaSGiuseppe CAVALLARO /**
2143732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
214432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
214532ceabcaSGiuseppe CAVALLARO  * Description:
214632ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
214732ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
214832ceabcaSGiuseppe CAVALLARO  */
2149bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2150bfab27a1SGiuseppe CAVALLARO {
2151bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2152c10d4c82SJose Abreu 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2153bfab27a1SGiuseppe CAVALLARO 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2154f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
215538ddc59dSLABBE Corentin 		netdev_info(priv->dev, "device MAC address %pM\n",
2156bfab27a1SGiuseppe CAVALLARO 			    priv->dev->dev_addr);
2157bfab27a1SGiuseppe CAVALLARO 	}
2158c88460b7SHans de Goede }
2159bfab27a1SGiuseppe CAVALLARO 
216032ceabcaSGiuseppe CAVALLARO /**
2161732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
216232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
216332ceabcaSGiuseppe CAVALLARO  * Description:
216432ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
216532ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
216632ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
216732ceabcaSGiuseppe CAVALLARO  */
21680f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
21690f1f88a8SGiuseppe CAVALLARO {
217047f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
217147f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
217254139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2173ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
217447f2a9ceSJoao Pinto 	u32 dummy_dma_rx_phy = 0;
217547f2a9ceSJoao Pinto 	u32 dummy_dma_tx_phy = 0;
217647f2a9ceSJoao Pinto 	u32 chan = 0;
2177c24602efSGiuseppe CAVALLARO 	int atds = 0;
2178495db273SGiuseppe Cavallaro 	int ret = 0;
21790f1f88a8SGiuseppe CAVALLARO 
2180a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2181a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
218289ab75bfSNiklas Cassel 		return -EINVAL;
21830f1f88a8SGiuseppe CAVALLARO 	}
21840f1f88a8SGiuseppe CAVALLARO 
2185c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2186c24602efSGiuseppe CAVALLARO 		atds = 1;
2187c24602efSGiuseppe CAVALLARO 
2188a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2189495db273SGiuseppe Cavallaro 	if (ret) {
2190495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2191495db273SGiuseppe Cavallaro 		return ret;
2192495db273SGiuseppe Cavallaro 	}
2193495db273SGiuseppe Cavallaro 
2194f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
219547f2a9ceSJoao Pinto 		/* DMA Configuration */
2196a4e887faSJose Abreu 		stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
219747f2a9ceSJoao Pinto 				dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
219847f2a9ceSJoao Pinto 
219947f2a9ceSJoao Pinto 		/* DMA RX Channel Configuration */
220047f2a9ceSJoao Pinto 		for (chan = 0; chan < rx_channels_count; chan++) {
220154139cf3SJoao Pinto 			rx_q = &priv->rx_queue[chan];
220254139cf3SJoao Pinto 
2203a4e887faSJose Abreu 			stmmac_init_rx_chan(priv, priv->ioaddr,
2204a4e887faSJose Abreu 					priv->plat->dma_cfg, rx_q->dma_rx_phy,
2205a4e887faSJose Abreu 					chan);
220647f2a9ceSJoao Pinto 
220754139cf3SJoao Pinto 			rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2208f748be53SAlexandre TORGUE 				    (DMA_RX_SIZE * sizeof(struct dma_desc));
2209a4e887faSJose Abreu 			stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2210a4e887faSJose Abreu 					rx_q->rx_tail_addr, chan);
221147f2a9ceSJoao Pinto 		}
221247f2a9ceSJoao Pinto 
221347f2a9ceSJoao Pinto 		/* DMA TX Channel Configuration */
221447f2a9ceSJoao Pinto 		for (chan = 0; chan < tx_channels_count; chan++) {
2215ce736788SJoao Pinto 			tx_q = &priv->tx_queue[chan];
2216ce736788SJoao Pinto 
2217a4e887faSJose Abreu 			stmmac_init_chan(priv, priv->ioaddr,
2218a4e887faSJose Abreu 					priv->plat->dma_cfg, chan);
221947f2a9ceSJoao Pinto 
2220a4e887faSJose Abreu 			stmmac_init_tx_chan(priv, priv->ioaddr,
2221a4e887faSJose Abreu 					priv->plat->dma_cfg, tx_q->dma_tx_phy,
2222a4e887faSJose Abreu 					chan);
2223f748be53SAlexandre TORGUE 
2224ce736788SJoao Pinto 			tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2225f748be53SAlexandre TORGUE 				    (DMA_TX_SIZE * sizeof(struct dma_desc));
2226a4e887faSJose Abreu 			stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2227a4e887faSJose Abreu 					tx_q->tx_tail_addr, chan);
222847f2a9ceSJoao Pinto 		}
222947f2a9ceSJoao Pinto 	} else {
223054139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
2231ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2232a4e887faSJose Abreu 		stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
2233ce736788SJoao Pinto 				tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2234f748be53SAlexandre TORGUE 	}
2235f748be53SAlexandre TORGUE 
2236a4e887faSJose Abreu 	if (priv->plat->axi)
2237a4e887faSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2238afea0365SGiuseppe Cavallaro 
2239495db273SGiuseppe Cavallaro 	return ret;
22400f1f88a8SGiuseppe CAVALLARO }
22410f1f88a8SGiuseppe CAVALLARO 
2242bfab27a1SGiuseppe CAVALLARO /**
2243732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
22449125cdd1SGiuseppe CAVALLARO  * @data: data pointer
22459125cdd1SGiuseppe CAVALLARO  * Description:
22469125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
22479125cdd1SGiuseppe CAVALLARO  */
2248e99e88a9SKees Cook static void stmmac_tx_timer(struct timer_list *t)
22499125cdd1SGiuseppe CAVALLARO {
2250e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2251ce736788SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2252ce736788SJoao Pinto 	u32 queue;
22539125cdd1SGiuseppe CAVALLARO 
2254ce736788SJoao Pinto 	/* let's scan all the tx queues */
2255ce736788SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++)
2256ce736788SJoao Pinto 		stmmac_tx_clean(priv, queue);
22579125cdd1SGiuseppe CAVALLARO }
22589125cdd1SGiuseppe CAVALLARO 
22599125cdd1SGiuseppe CAVALLARO /**
2260732fdf0eSGiuseppe CAVALLARO  * stmmac_init_tx_coalesce - init tx mitigation options.
226132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22629125cdd1SGiuseppe CAVALLARO  * Description:
22639125cdd1SGiuseppe CAVALLARO  * This inits the transmit coalesce parameters: i.e. timer rate,
22649125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
22659125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
22669125cdd1SGiuseppe CAVALLARO  */
22679125cdd1SGiuseppe CAVALLARO static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
22689125cdd1SGiuseppe CAVALLARO {
22699125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
22709125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2271e99e88a9SKees Cook 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
22729125cdd1SGiuseppe CAVALLARO 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
22739125cdd1SGiuseppe CAVALLARO 	add_timer(&priv->txtimer);
22749125cdd1SGiuseppe CAVALLARO }
22759125cdd1SGiuseppe CAVALLARO 
22764854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
22774854ab99SJoao Pinto {
22784854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
22794854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
22804854ab99SJoao Pinto 	u32 chan;
22814854ab99SJoao Pinto 
22824854ab99SJoao Pinto 	/* set TX ring length */
22834854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2284a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
22854854ab99SJoao Pinto 				(DMA_TX_SIZE - 1), chan);
22864854ab99SJoao Pinto 
22874854ab99SJoao Pinto 	/* set RX ring length */
22884854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2289a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
22904854ab99SJoao Pinto 				(DMA_RX_SIZE - 1), chan);
22914854ab99SJoao Pinto }
22924854ab99SJoao Pinto 
22939125cdd1SGiuseppe CAVALLARO /**
22946a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
22956a3a7193SJoao Pinto  *  @priv: driver private structure
22966a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
22976a3a7193SJoao Pinto  */
22986a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
22996a3a7193SJoao Pinto {
23006a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
23016a3a7193SJoao Pinto 	u32 weight;
23026a3a7193SJoao Pinto 	u32 queue;
23036a3a7193SJoao Pinto 
23046a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
23056a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
2306c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
23076a3a7193SJoao Pinto 	}
23086a3a7193SJoao Pinto }
23096a3a7193SJoao Pinto 
23106a3a7193SJoao Pinto /**
231119d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
231219d91873SJoao Pinto  *  @priv: driver private structure
231319d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
231419d91873SJoao Pinto  */
231519d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
231619d91873SJoao Pinto {
231719d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
231819d91873SJoao Pinto 	u32 mode_to_use;
231919d91873SJoao Pinto 	u32 queue;
232019d91873SJoao Pinto 
232144781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
232244781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
232319d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
232419d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
232519d91873SJoao Pinto 			continue;
232619d91873SJoao Pinto 
2327c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
232819d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
232919d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
233019d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
233119d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
233219d91873SJoao Pinto 				queue);
233319d91873SJoao Pinto 	}
233419d91873SJoao Pinto }
233519d91873SJoao Pinto 
233619d91873SJoao Pinto /**
2337d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2338d43042f4SJoao Pinto  *  @priv: driver private structure
2339d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
2340d43042f4SJoao Pinto  */
2341d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2342d43042f4SJoao Pinto {
2343d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2344d43042f4SJoao Pinto 	u32 queue;
2345d43042f4SJoao Pinto 	u32 chan;
2346d43042f4SJoao Pinto 
2347d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2348d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
2349c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2350d43042f4SJoao Pinto 	}
2351d43042f4SJoao Pinto }
2352d43042f4SJoao Pinto 
2353d43042f4SJoao Pinto /**
2354a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2355a8f5102aSJoao Pinto  *  @priv: driver private structure
2356a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
2357a8f5102aSJoao Pinto  */
2358a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2359a8f5102aSJoao Pinto {
2360a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2361a8f5102aSJoao Pinto 	u32 queue;
2362a8f5102aSJoao Pinto 	u32 prio;
2363a8f5102aSJoao Pinto 
2364a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2365a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2366a8f5102aSJoao Pinto 			continue;
2367a8f5102aSJoao Pinto 
2368a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
2369c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2370a8f5102aSJoao Pinto 	}
2371a8f5102aSJoao Pinto }
2372a8f5102aSJoao Pinto 
2373a8f5102aSJoao Pinto /**
2374a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2375a8f5102aSJoao Pinto  *  @priv: driver private structure
2376a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
2377a8f5102aSJoao Pinto  */
2378a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2379a8f5102aSJoao Pinto {
2380a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2381a8f5102aSJoao Pinto 	u32 queue;
2382a8f5102aSJoao Pinto 	u32 prio;
2383a8f5102aSJoao Pinto 
2384a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
2385a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2386a8f5102aSJoao Pinto 			continue;
2387a8f5102aSJoao Pinto 
2388a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
2389c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2390a8f5102aSJoao Pinto 	}
2391a8f5102aSJoao Pinto }
2392a8f5102aSJoao Pinto 
2393a8f5102aSJoao Pinto /**
2394abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2395abe80fdcSJoao Pinto  *  @priv: driver private structure
2396abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
2397abe80fdcSJoao Pinto  */
2398abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2399abe80fdcSJoao Pinto {
2400abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2401abe80fdcSJoao Pinto 	u32 queue;
2402abe80fdcSJoao Pinto 	u8 packet;
2403abe80fdcSJoao Pinto 
2404abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2405abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
2406abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2407abe80fdcSJoao Pinto 			continue;
2408abe80fdcSJoao Pinto 
2409abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2410c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2411abe80fdcSJoao Pinto 	}
2412abe80fdcSJoao Pinto }
2413abe80fdcSJoao Pinto 
2414abe80fdcSJoao Pinto /**
2415d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
2416d0a9c9f9SJoao Pinto  *  @priv: driver private structure
2417d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
2418d0a9c9f9SJoao Pinto  */
2419d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2420d0a9c9f9SJoao Pinto {
2421d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2422d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2423d0a9c9f9SJoao Pinto 
2424c10d4c82SJose Abreu 	if (tx_queues_count > 1)
24256a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
24266a3a7193SJoao Pinto 
2427d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
2428c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2429c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2430d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
2431d0a9c9f9SJoao Pinto 
2432d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
2433c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2434c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2435d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
2436d0a9c9f9SJoao Pinto 
243719d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
2438c10d4c82SJose Abreu 	if (tx_queues_count > 1)
243919d91873SJoao Pinto 		stmmac_configure_cbs(priv);
244019d91873SJoao Pinto 
2441d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
2442d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
2443d43042f4SJoao Pinto 
2444d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
2445d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
24466deee222SJoao Pinto 
2447a8f5102aSJoao Pinto 	/* Set RX priorities */
2448c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2449a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
2450a8f5102aSJoao Pinto 
2451a8f5102aSJoao Pinto 	/* Set TX priorities */
2452c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2453a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
2454abe80fdcSJoao Pinto 
2455abe80fdcSJoao Pinto 	/* Set RX routing */
2456c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2457abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
2458d0a9c9f9SJoao Pinto }
2459d0a9c9f9SJoao Pinto 
24608bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
24618bf993a5SJose Abreu {
2462c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
24638bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
2464c10d4c82SJose Abreu 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
24658bf993a5SJose Abreu 	} else {
24668bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
24678bf993a5SJose Abreu 	}
24688bf993a5SJose Abreu }
24698bf993a5SJose Abreu 
2470d0a9c9f9SJoao Pinto /**
2471732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
2472523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
2473523f11b5SSrinivas Kandagatla  *  Description:
2474732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
2475732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
2476732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
2477732fdf0eSGiuseppe CAVALLARO  *  transmitting.
2478523f11b5SSrinivas Kandagatla  *  Return value:
2479523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2480523f11b5SSrinivas Kandagatla  *  file on failure.
2481523f11b5SSrinivas Kandagatla  */
2482fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2483523f11b5SSrinivas Kandagatla {
2484523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
24853c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2486146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2487146617b8SJoao Pinto 	u32 chan;
2488523f11b5SSrinivas Kandagatla 	int ret;
2489523f11b5SSrinivas Kandagatla 
2490523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
2491523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
2492523f11b5SSrinivas Kandagatla 	if (ret < 0) {
249338ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
249438ddc59dSLABBE Corentin 			   __func__);
2495523f11b5SSrinivas Kandagatla 		return ret;
2496523f11b5SSrinivas Kandagatla 	}
2497523f11b5SSrinivas Kandagatla 
2498523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
2499c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2500523f11b5SSrinivas Kandagatla 
250102e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
250202e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
250302e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
250402e57b9dSGiuseppe CAVALLARO 
250502e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
250602e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
250702e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
250802e57b9dSGiuseppe CAVALLARO 		} else {
250902e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
251002e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
251102e57b9dSGiuseppe CAVALLARO 		}
251202e57b9dSGiuseppe CAVALLARO 	}
251302e57b9dSGiuseppe CAVALLARO 
2514523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
2515c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
2516523f11b5SSrinivas Kandagatla 
2517d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
2518d0a9c9f9SJoao Pinto 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
2519d0a9c9f9SJoao Pinto 		stmmac_mtl_configuration(priv);
25209eb12474Sjpinto 
25218bf993a5SJose Abreu 	/* Initialize Safety Features */
25228bf993a5SJose Abreu 	if (priv->synopsys_id >= DWMAC_CORE_5_10)
25238bf993a5SJose Abreu 		stmmac_safety_feat_configuration(priv);
25248bf993a5SJose Abreu 
2525c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
2526978aded4SGiuseppe CAVALLARO 	if (!ret) {
252738ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2528978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2529d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
2530978aded4SGiuseppe CAVALLARO 	}
2531978aded4SGiuseppe CAVALLARO 
2532523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
2533c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
2534523f11b5SSrinivas Kandagatla 
2535b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
2536b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
2537b4f0a661SJoao Pinto 
2538523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
2539523f11b5SSrinivas Kandagatla 
2540fe131929SHuacai Chen 	if (init_ptp) {
25410ad2be79SThierry Reding 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
25420ad2be79SThierry Reding 		if (ret < 0)
25430ad2be79SThierry Reding 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
25440ad2be79SThierry Reding 
2545523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
2546722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
2547722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2548722eef28SHeiner Kallweit 		else if (ret)
2549722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
2550fe131929SHuacai Chen 	}
2551523f11b5SSrinivas Kandagatla 
255250fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
2553523f11b5SSrinivas Kandagatla 	ret = stmmac_init_fs(dev);
2554523f11b5SSrinivas Kandagatla 	if (ret < 0)
255538ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
255638ddc59dSLABBE Corentin 			    __func__);
2557523f11b5SSrinivas Kandagatla #endif
2558523f11b5SSrinivas Kandagatla 	/* Start the ball rolling... */
2559ae4f0d46SJoao Pinto 	stmmac_start_all_dma(priv);
2560523f11b5SSrinivas Kandagatla 
2561523f11b5SSrinivas Kandagatla 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2562523f11b5SSrinivas Kandagatla 
2563a4e887faSJose Abreu 	if (priv->use_riwt) {
2564a4e887faSJose Abreu 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2565a4e887faSJose Abreu 		if (!ret)
2566523f11b5SSrinivas Kandagatla 			priv->rx_riwt = MAX_DMA_RIWT;
2567523f11b5SSrinivas Kandagatla 	}
2568523f11b5SSrinivas Kandagatla 
2569c10d4c82SJose Abreu 	if (priv->hw->pcs)
2570c10d4c82SJose Abreu 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2571523f11b5SSrinivas Kandagatla 
25724854ab99SJoao Pinto 	/* set TX and RX rings length */
25734854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
25744854ab99SJoao Pinto 
2575f748be53SAlexandre TORGUE 	/* Enable TSO */
2576146617b8SJoao Pinto 	if (priv->tso) {
2577146617b8SJoao Pinto 		for (chan = 0; chan < tx_cnt; chan++)
2578a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2579146617b8SJoao Pinto 	}
2580f748be53SAlexandre TORGUE 
2581523f11b5SSrinivas Kandagatla 	return 0;
2582523f11b5SSrinivas Kandagatla }
2583523f11b5SSrinivas Kandagatla 
2584c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
2585c66f6c37SThierry Reding {
2586c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
2587c66f6c37SThierry Reding 
2588c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2589c66f6c37SThierry Reding }
2590c66f6c37SThierry Reding 
2591523f11b5SSrinivas Kandagatla /**
25927ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
25937ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
25947ac6653aSJeff Kirsher  *  Description:
25957ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
25967ac6653aSJeff Kirsher  *  Return value:
25977ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
25987ac6653aSJeff Kirsher  *  file on failure.
25997ac6653aSJeff Kirsher  */
26007ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev)
26017ac6653aSJeff Kirsher {
26027ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
26037ac6653aSJeff Kirsher 	int ret;
26047ac6653aSJeff Kirsher 
26054bfcbd7aSFrancesco Virlinzi 	stmmac_check_ether_addr(priv);
26064bfcbd7aSFrancesco Virlinzi 
26073fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
26083fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
26093fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
26107ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
2611e58bb43fSGiuseppe CAVALLARO 		if (ret) {
261238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
261338ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
2614e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
261589df20d9SHans de Goede 			return ret;
26167ac6653aSJeff Kirsher 		}
2617e58bb43fSGiuseppe CAVALLARO 	}
26187ac6653aSJeff Kirsher 
2619523f11b5SSrinivas Kandagatla 	/* Extra statistics */
2620523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2621523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
2622523f11b5SSrinivas Kandagatla 
26235bacd778SLABBE Corentin 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
262422ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
262556329137SBartlomiej Zolnierkiewicz 
26265bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
26275bacd778SLABBE Corentin 	if (ret < 0) {
26285bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
26295bacd778SLABBE Corentin 			   __func__);
26305bacd778SLABBE Corentin 		goto dma_desc_error;
26315bacd778SLABBE Corentin 	}
26325bacd778SLABBE Corentin 
26335bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
26345bacd778SLABBE Corentin 	if (ret < 0) {
26355bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
26365bacd778SLABBE Corentin 			   __func__);
26375bacd778SLABBE Corentin 		goto init_error;
26385bacd778SLABBE Corentin 	}
26395bacd778SLABBE Corentin 
2640fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
264156329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
264238ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2643c9324d18SGiuseppe CAVALLARO 		goto init_error;
26447ac6653aSJeff Kirsher 	}
26457ac6653aSJeff Kirsher 
2646777da230SGiuseppe CAVALLARO 	stmmac_init_tx_coalesce(priv);
2647777da230SGiuseppe CAVALLARO 
2648d6d50c7eSPhilippe Reynes 	if (dev->phydev)
2649d6d50c7eSPhilippe Reynes 		phy_start(dev->phydev);
26507ac6653aSJeff Kirsher 
26517ac6653aSJeff Kirsher 	/* Request the IRQ lines */
26527ac6653aSJeff Kirsher 	ret = request_irq(dev->irq, stmmac_interrupt,
26537ac6653aSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
26547ac6653aSJeff Kirsher 	if (unlikely(ret < 0)) {
265538ddc59dSLABBE Corentin 		netdev_err(priv->dev,
265638ddc59dSLABBE Corentin 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
26577ac6653aSJeff Kirsher 			   __func__, dev->irq, ret);
26586c1e5abeSThierry Reding 		goto irq_error;
26597ac6653aSJeff Kirsher 	}
26607ac6653aSJeff Kirsher 
26617a13f8f5SFrancesco Virlinzi 	/* Request the Wake IRQ in case of another line is used for WoL */
26627a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq) {
26637a13f8f5SFrancesco Virlinzi 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
26647a13f8f5SFrancesco Virlinzi 				  IRQF_SHARED, dev->name, dev);
26657a13f8f5SFrancesco Virlinzi 		if (unlikely(ret < 0)) {
266638ddc59dSLABBE Corentin 			netdev_err(priv->dev,
266738ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2668ceb69499SGiuseppe CAVALLARO 				   __func__, priv->wol_irq, ret);
2669c9324d18SGiuseppe CAVALLARO 			goto wolirq_error;
26707a13f8f5SFrancesco Virlinzi 		}
26717a13f8f5SFrancesco Virlinzi 	}
26727a13f8f5SFrancesco Virlinzi 
2673d765955dSGiuseppe CAVALLARO 	/* Request the IRQ lines */
2674d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0) {
2675d765955dSGiuseppe CAVALLARO 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2676d765955dSGiuseppe CAVALLARO 				  dev->name, dev);
2677d765955dSGiuseppe CAVALLARO 		if (unlikely(ret < 0)) {
267838ddc59dSLABBE Corentin 			netdev_err(priv->dev,
267938ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2680d765955dSGiuseppe CAVALLARO 				   __func__, priv->lpi_irq, ret);
2681c9324d18SGiuseppe CAVALLARO 			goto lpiirq_error;
2682d765955dSGiuseppe CAVALLARO 		}
2683d765955dSGiuseppe CAVALLARO 	}
2684d765955dSGiuseppe CAVALLARO 
2685c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
2686c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
26877ac6653aSJeff Kirsher 
26887ac6653aSJeff Kirsher 	return 0;
26897ac6653aSJeff Kirsher 
2690c9324d18SGiuseppe CAVALLARO lpiirq_error:
2691d765955dSGiuseppe CAVALLARO 	if (priv->wol_irq != dev->irq)
2692d765955dSGiuseppe CAVALLARO 		free_irq(priv->wol_irq, dev);
2693c9324d18SGiuseppe CAVALLARO wolirq_error:
26947a13f8f5SFrancesco Virlinzi 	free_irq(dev->irq, dev);
26956c1e5abeSThierry Reding irq_error:
26966c1e5abeSThierry Reding 	if (dev->phydev)
26976c1e5abeSThierry Reding 		phy_stop(dev->phydev);
26987a13f8f5SFrancesco Virlinzi 
26996c1e5abeSThierry Reding 	del_timer_sync(&priv->txtimer);
2700c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
2701c9324d18SGiuseppe CAVALLARO init_error:
2702c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
27035bacd778SLABBE Corentin dma_desc_error:
2704d6d50c7eSPhilippe Reynes 	if (dev->phydev)
2705d6d50c7eSPhilippe Reynes 		phy_disconnect(dev->phydev);
27064bfcbd7aSFrancesco Virlinzi 
27077ac6653aSJeff Kirsher 	return ret;
27087ac6653aSJeff Kirsher }
27097ac6653aSJeff Kirsher 
27107ac6653aSJeff Kirsher /**
27117ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
27127ac6653aSJeff Kirsher  *  @dev : device pointer.
27137ac6653aSJeff Kirsher  *  Description:
27147ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
27157ac6653aSJeff Kirsher  */
27167ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev)
27177ac6653aSJeff Kirsher {
27187ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
27197ac6653aSJeff Kirsher 
2720d765955dSGiuseppe CAVALLARO 	if (priv->eee_enabled)
2721d765955dSGiuseppe CAVALLARO 		del_timer_sync(&priv->eee_ctrl_timer);
2722d765955dSGiuseppe CAVALLARO 
27237ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
2724d6d50c7eSPhilippe Reynes 	if (dev->phydev) {
2725d6d50c7eSPhilippe Reynes 		phy_stop(dev->phydev);
2726d6d50c7eSPhilippe Reynes 		phy_disconnect(dev->phydev);
27277ac6653aSJeff Kirsher 	}
27287ac6653aSJeff Kirsher 
2729c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
27307ac6653aSJeff Kirsher 
2731c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
27327ac6653aSJeff Kirsher 
27339125cdd1SGiuseppe CAVALLARO 	del_timer_sync(&priv->txtimer);
27349125cdd1SGiuseppe CAVALLARO 
27357ac6653aSJeff Kirsher 	/* Free the IRQ lines */
27367ac6653aSJeff Kirsher 	free_irq(dev->irq, dev);
27377a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq)
27387a13f8f5SFrancesco Virlinzi 		free_irq(priv->wol_irq, dev);
2739d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0)
2740d765955dSGiuseppe CAVALLARO 		free_irq(priv->lpi_irq, dev);
27417ac6653aSJeff Kirsher 
27427ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
2743ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
27447ac6653aSJeff Kirsher 
27457ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
27467ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
27477ac6653aSJeff Kirsher 
27487ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
2749c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
27507ac6653aSJeff Kirsher 
27517ac6653aSJeff Kirsher 	netif_carrier_off(dev);
27527ac6653aSJeff Kirsher 
275350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
2754466c5ac8SMathieu Olivari 	stmmac_exit_fs(dev);
2755bfab27a1SGiuseppe CAVALLARO #endif
2756bfab27a1SGiuseppe CAVALLARO 
275792ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
275892ba6888SRayagond Kokatanur 
27597ac6653aSJeff Kirsher 	return 0;
27607ac6653aSJeff Kirsher }
27617ac6653aSJeff Kirsher 
27627ac6653aSJeff Kirsher /**
2763f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
2764f748be53SAlexandre TORGUE  *  @priv: driver private structure
2765f748be53SAlexandre TORGUE  *  @des: buffer start address
2766f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
2767f748be53SAlexandre TORGUE  *  @last_segmant: condition for the last descriptor
2768ce736788SJoao Pinto  *  @queue: TX queue index
2769f748be53SAlexandre TORGUE  *  Description:
2770f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
2771f748be53SAlexandre TORGUE  *  buffer length to fill
2772f748be53SAlexandre TORGUE  */
2773f748be53SAlexandre TORGUE static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2774ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
2775f748be53SAlexandre TORGUE {
2776ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2777f748be53SAlexandre TORGUE 	struct dma_desc *desc;
27785bacd778SLABBE Corentin 	u32 buff_size;
2779ce736788SJoao Pinto 	int tmp_len;
2780f748be53SAlexandre TORGUE 
2781f748be53SAlexandre TORGUE 	tmp_len = total_len;
2782f748be53SAlexandre TORGUE 
2783f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
2784ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2785b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2786ce736788SJoao Pinto 		desc = tx_q->dma_tx + tx_q->cur_tx;
2787f748be53SAlexandre TORGUE 
2788f8be0d78SMichael Weiser 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2789f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2790f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
2791f748be53SAlexandre TORGUE 
279242de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2793f748be53SAlexandre TORGUE 				0, 1,
2794426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2795f748be53SAlexandre TORGUE 				0, 0);
2796f748be53SAlexandre TORGUE 
2797f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
2798f748be53SAlexandre TORGUE 	}
2799f748be53SAlexandre TORGUE }
2800f748be53SAlexandre TORGUE 
2801f748be53SAlexandre TORGUE /**
2802f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2803f748be53SAlexandre TORGUE  *  @skb : the socket buffer
2804f748be53SAlexandre TORGUE  *  @dev : device pointer
2805f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
2806f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
2807f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
2808f748be53SAlexandre TORGUE  *
2809f748be53SAlexandre TORGUE  *  First Descriptor
2810f748be53SAlexandre TORGUE  *   --------
2811f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
2812f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
2813f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
2814f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2815f748be53SAlexandre TORGUE  *   --------
2816f748be53SAlexandre TORGUE  *	|
2817f748be53SAlexandre TORGUE  *     ...
2818f748be53SAlexandre TORGUE  *	|
2819f748be53SAlexandre TORGUE  *   --------
2820f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2821f748be53SAlexandre TORGUE  *   | DES1 | --|
2822f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
2823f748be53SAlexandre TORGUE  *   | DES3 |
2824f748be53SAlexandre TORGUE  *   --------
2825f748be53SAlexandre TORGUE  *
2826f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2827f748be53SAlexandre TORGUE  */
2828f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2829f748be53SAlexandre TORGUE {
2830ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
2831f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
2832f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
2833ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
2834f748be53SAlexandre TORGUE 	unsigned int first_entry, des;
2835ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
2836ce736788SJoao Pinto 	int tmp_pay_len = 0;
2837ce736788SJoao Pinto 	u32 pay_len, mss;
2838f748be53SAlexandre TORGUE 	u8 proto_hdr_len;
2839f748be53SAlexandre TORGUE 	int i;
2840f748be53SAlexandre TORGUE 
2841ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
2842ce736788SJoao Pinto 
2843f748be53SAlexandre TORGUE 	/* Compute header lengths */
2844f748be53SAlexandre TORGUE 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2845f748be53SAlexandre TORGUE 
2846f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
2847ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
2848f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2849c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2850c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2851c22a3f48SJoao Pinto 								queue));
2852f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
285338ddc59dSLABBE Corentin 			netdev_err(priv->dev,
285438ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
285538ddc59dSLABBE Corentin 				   __func__);
2856f748be53SAlexandre TORGUE 		}
2857f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
2858f748be53SAlexandre TORGUE 	}
2859f748be53SAlexandre TORGUE 
2860f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2861f748be53SAlexandre TORGUE 
2862f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
2863f748be53SAlexandre TORGUE 
2864f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
28658d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
2866ce736788SJoao Pinto 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
286742de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
28688d212a9eSNiklas Cassel 		tx_q->mss = mss;
2869ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2870b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2871f748be53SAlexandre TORGUE 	}
2872f748be53SAlexandre TORGUE 
2873f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
2874f748be53SAlexandre TORGUE 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2875f748be53SAlexandre TORGUE 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2876f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2877f748be53SAlexandre TORGUE 			skb->data_len);
2878f748be53SAlexandre TORGUE 	}
2879f748be53SAlexandre TORGUE 
2880ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
2881b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2882f748be53SAlexandre TORGUE 
2883ce736788SJoao Pinto 	desc = tx_q->dma_tx + first_entry;
2884f748be53SAlexandre TORGUE 	first = desc;
2885f748be53SAlexandre TORGUE 
2886f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
2887f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2888f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
2889f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
2890f748be53SAlexandre TORGUE 		goto dma_map_err;
2891f748be53SAlexandre TORGUE 
2892ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2893ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2894f748be53SAlexandre TORGUE 
2895f8be0d78SMichael Weiser 	first->des0 = cpu_to_le32(des);
2896f748be53SAlexandre TORGUE 
2897f748be53SAlexandre TORGUE 	/* Fill start of payload in buff2 of first descriptor */
2898f748be53SAlexandre TORGUE 	if (pay_len)
2899f8be0d78SMichael Weiser 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2900f748be53SAlexandre TORGUE 
2901f748be53SAlexandre TORGUE 	/* If needed take extra descriptors to fill the remaining payload */
2902f748be53SAlexandre TORGUE 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2903f748be53SAlexandre TORGUE 
2904ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2905f748be53SAlexandre TORGUE 
2906f748be53SAlexandre TORGUE 	/* Prepare fragments */
2907f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
2908f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2909f748be53SAlexandre TORGUE 
2910f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
2911f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
2912f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
2913937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
2914937071c1SThierry Reding 			goto dma_map_err;
2915f748be53SAlexandre TORGUE 
2916f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2917ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
2918f748be53SAlexandre TORGUE 
2919ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2920ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2921ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2922f748be53SAlexandre TORGUE 	}
2923f748be53SAlexandre TORGUE 
2924ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2925f748be53SAlexandre TORGUE 
292605cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
292705cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
292805cf0d1bSNiklas Cassel 
292905cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
293005cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
293105cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
293205cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
293305cf0d1bSNiklas Cassel 	 */
2934ce736788SJoao Pinto 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2935f748be53SAlexandre TORGUE 
2936ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2937b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
293838ddc59dSLABBE Corentin 			  __func__);
2939c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2940f748be53SAlexandre TORGUE 	}
2941f748be53SAlexandre TORGUE 
2942f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
2943f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
2944f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
2945f748be53SAlexandre TORGUE 
2946f748be53SAlexandre TORGUE 	/* Manage tx mitigation */
2947f748be53SAlexandre TORGUE 	priv->tx_count_frames += nfrags + 1;
2948f748be53SAlexandre TORGUE 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2949f748be53SAlexandre TORGUE 		mod_timer(&priv->txtimer,
2950f748be53SAlexandre TORGUE 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2951f748be53SAlexandre TORGUE 	} else {
2952f748be53SAlexandre TORGUE 		priv->tx_count_frames = 0;
295342de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
2954f748be53SAlexandre TORGUE 		priv->xstats.tx_set_ic_bit++;
2955f748be53SAlexandre TORGUE 	}
2956f748be53SAlexandre TORGUE 
2957f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
2958f748be53SAlexandre TORGUE 
2959f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2960f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
2961f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
2962f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
296342de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
2964f748be53SAlexandre TORGUE 	}
2965f748be53SAlexandre TORGUE 
2966f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
296742de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2968f748be53SAlexandre TORGUE 			proto_hdr_len,
2969f748be53SAlexandre TORGUE 			pay_len,
2970ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2971f748be53SAlexandre TORGUE 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2972f748be53SAlexandre TORGUE 
2973f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
297415d2ee42SNiklas Cassel 	if (mss_desc) {
297515d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
297615d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
297715d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
297815d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
297915d2ee42SNiklas Cassel 		 */
298015d2ee42SNiklas Cassel 		dma_wmb();
298142de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
298215d2ee42SNiklas Cassel 	}
2983f748be53SAlexandre TORGUE 
2984f748be53SAlexandre TORGUE 	/* The own bit must be the latest setting done when prepare the
2985f748be53SAlexandre TORGUE 	 * descriptor and then barrier is needed to make sure that
2986f748be53SAlexandre TORGUE 	 * all is coherent before granting the DMA engine.
2987f748be53SAlexandre TORGUE 	 */
298895eb930aSNiklas Cassel 	wmb();
2989f748be53SAlexandre TORGUE 
2990f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
2991f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2992ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2993ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
2994f748be53SAlexandre TORGUE 
299542de047dSJose Abreu 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2996f748be53SAlexandre TORGUE 
2997f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
2998f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
2999f748be53SAlexandre TORGUE 	}
3000f748be53SAlexandre TORGUE 
3001c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3002f748be53SAlexandre TORGUE 
3003a4e887faSJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3004f748be53SAlexandre TORGUE 
3005f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3006f748be53SAlexandre TORGUE 
3007f748be53SAlexandre TORGUE dma_map_err:
3008f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
3009f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
3010f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
3011f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3012f748be53SAlexandre TORGUE }
3013f748be53SAlexandre TORGUE 
3014f748be53SAlexandre TORGUE /**
3015732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
30167ac6653aSJeff Kirsher  *  @skb : the socket buffer
30177ac6653aSJeff Kirsher  *  @dev : device pointer
301832ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
301932ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
302032ceabcaSGiuseppe CAVALLARO  *  and SG feature.
30217ac6653aSJeff Kirsher  */
30227ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
30237ac6653aSJeff Kirsher {
30247ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
30250e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
30264a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
3027ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
30287ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
302959423815SColin Ian King 	int entry;
303059423815SColin Ian King 	unsigned int first_entry;
30317ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
3032ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
30330e80bdc9SGiuseppe Cavallaro 	unsigned int enh_desc;
3034f748be53SAlexandre TORGUE 	unsigned int des;
3035f748be53SAlexandre TORGUE 
3036ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3037ce736788SJoao Pinto 
3038f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
3039f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
30409edfa7daSNiklas Cassel 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3041f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
3042f748be53SAlexandre TORGUE 	}
30437ac6653aSJeff Kirsher 
3044ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3045c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3046c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3047c22a3f48SJoao Pinto 								queue));
30487ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
304938ddc59dSLABBE Corentin 			netdev_err(priv->dev,
305038ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
305138ddc59dSLABBE Corentin 				   __func__);
30527ac6653aSJeff Kirsher 		}
30537ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
30547ac6653aSJeff Kirsher 	}
30557ac6653aSJeff Kirsher 
3056d765955dSGiuseppe CAVALLARO 	if (priv->tx_path_in_lpi_mode)
3057d765955dSGiuseppe CAVALLARO 		stmmac_disable_eee_mode(priv);
3058d765955dSGiuseppe CAVALLARO 
3059ce736788SJoao Pinto 	entry = tx_q->cur_tx;
30600e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
3061b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
30627ac6653aSJeff Kirsher 
30637ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
30647ac6653aSJeff Kirsher 
30650e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
3066ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3067c24602efSGiuseppe CAVALLARO 	else
3068ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
3069c24602efSGiuseppe CAVALLARO 
30707ac6653aSJeff Kirsher 	first = desc;
30717ac6653aSJeff Kirsher 
30720e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
30734a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
307429896a67SGiuseppe CAVALLARO 	if (enh_desc)
30752c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
307629896a67SGiuseppe CAVALLARO 
3077f748be53SAlexandre TORGUE 	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3078f748be53SAlexandre TORGUE 					 DWMAC_CORE_4_00)) {
30792c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3080362b37beSGiuseppe CAVALLARO 		if (unlikely(entry < 0))
3081362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
308229896a67SGiuseppe CAVALLARO 	}
30837ac6653aSJeff Kirsher 
30847ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
30859e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
30869e903e08SEric Dumazet 		int len = skb_frag_size(frag);
3087be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
30887ac6653aSJeff Kirsher 
3089e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3090b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
3091e3ad57c9SGiuseppe Cavallaro 
30920e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
3093ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3094c24602efSGiuseppe CAVALLARO 		else
3095ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
30967ac6653aSJeff Kirsher 
3097f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3098f722380dSIan Campbell 				       DMA_TO_DEVICE);
3099f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
3100362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
3101362b37beSGiuseppe CAVALLARO 
3102ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
3103f8be0d78SMichael Weiser 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3104f8be0d78SMichael Weiser 			desc->des0 = cpu_to_le32(des);
3105f8be0d78SMichael Weiser 		else
3106f8be0d78SMichael Weiser 			desc->des2 = cpu_to_le32(des);
3107f748be53SAlexandre TORGUE 
3108ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3109ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
3110ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
31110e80bdc9SGiuseppe Cavallaro 
31120e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
311342de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
311442de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
31157ac6653aSJeff Kirsher 	}
31167ac6653aSJeff Kirsher 
311705cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
311805cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
3119e3ad57c9SGiuseppe Cavallaro 
312005cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
312105cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
312205cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
312305cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
312405cf0d1bSNiklas Cassel 	 */
312505cf0d1bSNiklas Cassel 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3126ce736788SJoao Pinto 	tx_q->cur_tx = entry;
31277ac6653aSJeff Kirsher 
31287ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
3129d0225e7dSAlexandre TORGUE 		void *tx_head;
3130d0225e7dSAlexandre TORGUE 
313138ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
313238ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3133ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
31340e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
313583d7af64SGiuseppe CAVALLARO 
3136c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
3137ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_etx;
3138c24602efSGiuseppe CAVALLARO 		else
3139ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_tx;
3140d0225e7dSAlexandre TORGUE 
314142de047dSJose Abreu 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3142c24602efSGiuseppe CAVALLARO 
314338ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
31447ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
31457ac6653aSJeff Kirsher 	}
31460e80bdc9SGiuseppe Cavallaro 
3147ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3148b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3149b3e51069SLABBE Corentin 			  __func__);
3150c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
31517ac6653aSJeff Kirsher 	}
31527ac6653aSJeff Kirsher 
31537ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
31547ac6653aSJeff Kirsher 
31550e80bdc9SGiuseppe Cavallaro 	/* According to the coalesce parameter the IC bit for the latest
31560e80bdc9SGiuseppe Cavallaro 	 * segment is reset and the timer re-started to clean the tx status.
31570e80bdc9SGiuseppe Cavallaro 	 * This approach takes care about the fragments: desc is the first
31580e80bdc9SGiuseppe Cavallaro 	 * element in case of no SG.
31590e80bdc9SGiuseppe Cavallaro 	 */
31600e80bdc9SGiuseppe Cavallaro 	priv->tx_count_frames += nfrags + 1;
31614ae0169fSJose Abreu 	if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
31624ae0169fSJose Abreu 	    !priv->tx_timer_armed) {
31630e80bdc9SGiuseppe Cavallaro 		mod_timer(&priv->txtimer,
31640e80bdc9SGiuseppe Cavallaro 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
31654ae0169fSJose Abreu 		priv->tx_timer_armed = true;
31660e80bdc9SGiuseppe Cavallaro 	} else {
31670e80bdc9SGiuseppe Cavallaro 		priv->tx_count_frames = 0;
316842de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
31690e80bdc9SGiuseppe Cavallaro 		priv->xstats.tx_set_ic_bit++;
31704ae0169fSJose Abreu 		priv->tx_timer_armed = false;
31710e80bdc9SGiuseppe Cavallaro 	}
31720e80bdc9SGiuseppe Cavallaro 
31730e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
31740e80bdc9SGiuseppe Cavallaro 
31750e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
31760e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
31770e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
31780e80bdc9SGiuseppe Cavallaro 	 */
31790e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
31800e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
31810e80bdc9SGiuseppe Cavallaro 
3182f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
31830e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
3184f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
31850e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
31860e80bdc9SGiuseppe Cavallaro 
3187ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3188f8be0d78SMichael Weiser 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3189f8be0d78SMichael Weiser 			first->des0 = cpu_to_le32(des);
3190f8be0d78SMichael Weiser 		else
3191f8be0d78SMichael Weiser 			first->des2 = cpu_to_le32(des);
3192f748be53SAlexandre TORGUE 
3193ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3194ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
31950e80bdc9SGiuseppe Cavallaro 
3196891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3197891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
3198891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
3199891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
320042de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
3201891434b1SRayagond Kokatanur 		}
3202891434b1SRayagond Kokatanur 
32030e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
320442de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
320542de047dSJose Abreu 				csum_insertion, priv->mode, 1, last_segment,
320642de047dSJose Abreu 				skb->len);
32070e80bdc9SGiuseppe Cavallaro 
32080e80bdc9SGiuseppe Cavallaro 		/* The own bit must be the latest setting done when prepare the
32090e80bdc9SGiuseppe Cavallaro 		 * descriptor and then barrier is needed to make sure that
32100e80bdc9SGiuseppe Cavallaro 		 * all is coherent before granting the DMA engine.
32110e80bdc9SGiuseppe Cavallaro 		 */
321295eb930aSNiklas Cassel 		wmb();
32130e80bdc9SGiuseppe Cavallaro 	}
32147ac6653aSJeff Kirsher 
3215c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3216f748be53SAlexandre TORGUE 
3217f748be53SAlexandre TORGUE 	if (priv->synopsys_id < DWMAC_CORE_4_00)
3218a4e887faSJose Abreu 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
3219f748be53SAlexandre TORGUE 	else
3220a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3221ce736788SJoao Pinto 				queue);
32227ac6653aSJeff Kirsher 
3223362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
3224a9097a96SGiuseppe CAVALLARO 
3225362b37beSGiuseppe CAVALLARO dma_map_err:
322638ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
3227362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
3228362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
32297ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
32307ac6653aSJeff Kirsher }
32317ac6653aSJeff Kirsher 
3232b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3233b9381985SVince Bridgers {
3234b9381985SVince Bridgers 	struct ethhdr *ehdr;
3235b9381985SVince Bridgers 	u16 vlanid;
3236b9381985SVince Bridgers 
3237b9381985SVince Bridgers 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3238b9381985SVince Bridgers 	    NETIF_F_HW_VLAN_CTAG_RX &&
3239b9381985SVince Bridgers 	    !__vlan_get_tag(skb, &vlanid)) {
3240b9381985SVince Bridgers 		/* pop the vlan tag */
3241b9381985SVince Bridgers 		ehdr = (struct ethhdr *)skb->data;
3242b9381985SVince Bridgers 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3243b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
3244b9381985SVince Bridgers 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3245b9381985SVince Bridgers 	}
3246b9381985SVince Bridgers }
3247b9381985SVince Bridgers 
3248b9381985SVince Bridgers 
324954139cf3SJoao Pinto static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3250120e87f9SGiuseppe Cavallaro {
325154139cf3SJoao Pinto 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3252120e87f9SGiuseppe Cavallaro 		return 0;
3253120e87f9SGiuseppe Cavallaro 
3254120e87f9SGiuseppe Cavallaro 	return 1;
3255120e87f9SGiuseppe Cavallaro }
3256120e87f9SGiuseppe Cavallaro 
325732ceabcaSGiuseppe CAVALLARO /**
3258732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
325932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
326054139cf3SJoao Pinto  * @queue: RX queue index
326132ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
326232ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
326332ceabcaSGiuseppe CAVALLARO  */
326454139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
32657ac6653aSJeff Kirsher {
326654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
326754139cf3SJoao Pinto 	int dirty = stmmac_rx_dirty(priv, queue);
326854139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
326954139cf3SJoao Pinto 
32707ac6653aSJeff Kirsher 	int bfsize = priv->dma_buf_sz;
32717ac6653aSJeff Kirsher 
3272e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
3273c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
3274c24602efSGiuseppe CAVALLARO 
3275c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
327654139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3277c24602efSGiuseppe CAVALLARO 		else
327854139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3279c24602efSGiuseppe CAVALLARO 
328054139cf3SJoao Pinto 		if (likely(!rx_q->rx_skbuff[entry])) {
32817ac6653aSJeff Kirsher 			struct sk_buff *skb;
32827ac6653aSJeff Kirsher 
3283acb600deSEric Dumazet 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3284120e87f9SGiuseppe Cavallaro 			if (unlikely(!skb)) {
3285120e87f9SGiuseppe Cavallaro 				/* so for a while no zero-copy! */
328654139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3287120e87f9SGiuseppe Cavallaro 				if (unlikely(net_ratelimit()))
3288120e87f9SGiuseppe Cavallaro 					dev_err(priv->device,
3289120e87f9SGiuseppe Cavallaro 						"fail to alloc skb entry %d\n",
3290120e87f9SGiuseppe Cavallaro 						entry);
32917ac6653aSJeff Kirsher 				break;
3292120e87f9SGiuseppe Cavallaro 			}
32937ac6653aSJeff Kirsher 
329454139cf3SJoao Pinto 			rx_q->rx_skbuff[entry] = skb;
329554139cf3SJoao Pinto 			rx_q->rx_skbuff_dma[entry] =
32967ac6653aSJeff Kirsher 			    dma_map_single(priv->device, skb->data, bfsize,
32977ac6653aSJeff Kirsher 					   DMA_FROM_DEVICE);
3298362b37beSGiuseppe CAVALLARO 			if (dma_mapping_error(priv->device,
329954139cf3SJoao Pinto 					      rx_q->rx_skbuff_dma[entry])) {
330038ddc59dSLABBE Corentin 				netdev_err(priv->dev, "Rx DMA map failed\n");
3301362b37beSGiuseppe CAVALLARO 				dev_kfree_skb(skb);
3302362b37beSGiuseppe CAVALLARO 				break;
3303362b37beSGiuseppe CAVALLARO 			}
3304286a8372SGiuseppe CAVALLARO 
3305f748be53SAlexandre TORGUE 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
330654139cf3SJoao Pinto 				p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3307f748be53SAlexandre TORGUE 				p->des1 = 0;
3308f748be53SAlexandre TORGUE 			} else {
330954139cf3SJoao Pinto 				p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3310f748be53SAlexandre TORGUE 			}
33112c520b1cSJose Abreu 
33122c520b1cSJose Abreu 			stmmac_refill_desc3(priv, rx_q, p);
3313286a8372SGiuseppe CAVALLARO 
331454139cf3SJoao Pinto 			if (rx_q->rx_zeroc_thresh > 0)
331554139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh--;
3316120e87f9SGiuseppe Cavallaro 
3317b3e51069SLABBE Corentin 			netif_dbg(priv, rx_status, priv->dev,
331838ddc59dSLABBE Corentin 				  "refill entry #%d\n", entry);
33197ac6653aSJeff Kirsher 		}
3320ad688cdbSPavel Machek 		dma_wmb();
3321f748be53SAlexandre TORGUE 
3322f748be53SAlexandre TORGUE 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
332342de047dSJose Abreu 			stmmac_init_rx_desc(priv, p, priv->use_riwt, 0, 0);
3324f748be53SAlexandre TORGUE 		else
332542de047dSJose Abreu 			stmmac_set_rx_owner(priv, p);
3326f748be53SAlexandre TORGUE 
3327ad688cdbSPavel Machek 		dma_wmb();
3328e3ad57c9SGiuseppe Cavallaro 
3329e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
33307ac6653aSJeff Kirsher 	}
333154139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
33327ac6653aSJeff Kirsher }
33337ac6653aSJeff Kirsher 
333432ceabcaSGiuseppe CAVALLARO /**
3335732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
333632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
333754139cf3SJoao Pinto  * @limit: napi bugget
333854139cf3SJoao Pinto  * @queue: RX queue index.
333932ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
334032ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
334132ceabcaSGiuseppe CAVALLARO  */
334254139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
33437ac6653aSJeff Kirsher {
334454139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
334554139cf3SJoao Pinto 	unsigned int entry = rx_q->cur_rx;
334654139cf3SJoao Pinto 	int coe = priv->hw->rx_csum;
33477ac6653aSJeff Kirsher 	unsigned int next_entry;
33487ac6653aSJeff Kirsher 	unsigned int count = 0;
33497ac6653aSJeff Kirsher 
335083d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
3351d0225e7dSAlexandre TORGUE 		void *rx_head;
3352d0225e7dSAlexandre TORGUE 
335338ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3354c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
335554139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
3356c24602efSGiuseppe CAVALLARO 		else
335754139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
3358d0225e7dSAlexandre TORGUE 
335942de047dSJose Abreu 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
33607ac6653aSJeff Kirsher 	}
3361c24602efSGiuseppe CAVALLARO 	while (count < limit) {
33627ac6653aSJeff Kirsher 		int status;
33639401bb5cSGiuseppe CAVALLARO 		struct dma_desc *p;
3364ba1ffd74SGiuseppe CAVALLARO 		struct dma_desc *np;
33657ac6653aSJeff Kirsher 
3366c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
336754139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3368c24602efSGiuseppe CAVALLARO 		else
336954139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3370c24602efSGiuseppe CAVALLARO 
3371c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
337242de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
3373c1fa3212SFabrice Gasnier 				&priv->xstats, p);
3374c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
3375c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
33767ac6653aSJeff Kirsher 			break;
33777ac6653aSJeff Kirsher 
33787ac6653aSJeff Kirsher 		count++;
33797ac6653aSJeff Kirsher 
338054139cf3SJoao Pinto 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
338154139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
3382e3ad57c9SGiuseppe Cavallaro 
3383c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
338454139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3385c24602efSGiuseppe CAVALLARO 		else
338654139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
3387ba1ffd74SGiuseppe CAVALLARO 
3388ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
33897ac6653aSJeff Kirsher 
339042de047dSJose Abreu 		if (priv->extend_desc)
339142de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
339242de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
3393891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
33947ac6653aSJeff Kirsher 			priv->dev->stats.rx_errors++;
3395891434b1SRayagond Kokatanur 			if (priv->hwts_rx_en && !priv->extend_desc) {
33968d45e42bSLABBE Corentin 				/* DESC2 & DESC3 will be overwritten by device
3397891434b1SRayagond Kokatanur 				 * with timestamp value, hence reinitialize
3398891434b1SRayagond Kokatanur 				 * them in stmmac_rx_refill() function so that
3399891434b1SRayagond Kokatanur 				 * device can reuse it.
3400891434b1SRayagond Kokatanur 				 */
34019c8080d0SJose Abreu 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
340254139cf3SJoao Pinto 				rx_q->rx_skbuff[entry] = NULL;
3403891434b1SRayagond Kokatanur 				dma_unmap_single(priv->device,
340454139cf3SJoao Pinto 						 rx_q->rx_skbuff_dma[entry],
3405ceb69499SGiuseppe CAVALLARO 						 priv->dma_buf_sz,
3406ceb69499SGiuseppe CAVALLARO 						 DMA_FROM_DEVICE);
3407891434b1SRayagond Kokatanur 			}
3408891434b1SRayagond Kokatanur 		} else {
34097ac6653aSJeff Kirsher 			struct sk_buff *skb;
34107ac6653aSJeff Kirsher 			int frame_len;
3411f748be53SAlexandre TORGUE 			unsigned int des;
3412f748be53SAlexandre TORGUE 
3413f748be53SAlexandre TORGUE 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3414f8be0d78SMichael Weiser 				des = le32_to_cpu(p->des0);
3415f748be53SAlexandre TORGUE 			else
3416f8be0d78SMichael Weiser 				des = le32_to_cpu(p->des2);
34177ac6653aSJeff Kirsher 
341842de047dSJose Abreu 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3419ceb69499SGiuseppe CAVALLARO 
34208d45e42bSLABBE Corentin 			/*  If frame length is greater than skb buffer size
3421f748be53SAlexandre TORGUE 			 *  (preallocated during init) then the packet is
3422f748be53SAlexandre TORGUE 			 *  ignored
3423f748be53SAlexandre TORGUE 			 */
3424e527c4a7SGiuseppe CAVALLARO 			if (frame_len > priv->dma_buf_sz) {
342538ddc59dSLABBE Corentin 				netdev_err(priv->dev,
342638ddc59dSLABBE Corentin 					   "len %d larger than size (%d)\n",
342738ddc59dSLABBE Corentin 					   frame_len, priv->dma_buf_sz);
3428e527c4a7SGiuseppe CAVALLARO 				priv->dev->stats.rx_length_errors++;
3429e527c4a7SGiuseppe CAVALLARO 				break;
3430e527c4a7SGiuseppe CAVALLARO 			}
3431e527c4a7SGiuseppe CAVALLARO 
34327ac6653aSJeff Kirsher 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3433ceb69499SGiuseppe CAVALLARO 			 * Type frames (LLC/LLC-SNAP)
3434565020aaSJose Abreu 			 *
3435565020aaSJose Abreu 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3436565020aaSJose Abreu 			 * feature is always disabled and packets need to be
3437565020aaSJose Abreu 			 * stripped manually.
3438ceb69499SGiuseppe CAVALLARO 			 */
3439565020aaSJose Abreu 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3440565020aaSJose Abreu 			    unlikely(status != llc_snap))
34417ac6653aSJeff Kirsher 				frame_len -= ETH_FCS_LEN;
34427ac6653aSJeff Kirsher 
344383d7af64SGiuseppe CAVALLARO 			if (netif_msg_rx_status(priv)) {
344438ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3445f748be53SAlexandre TORGUE 					   p, entry, des);
344638ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
344783d7af64SGiuseppe CAVALLARO 					   frame_len, status);
344883d7af64SGiuseppe CAVALLARO 			}
344922ad3838SGiuseppe Cavallaro 
3450f748be53SAlexandre TORGUE 			/* The zero-copy is always used for all the sizes
3451f748be53SAlexandre TORGUE 			 * in case of GMAC4 because it needs
3452f748be53SAlexandre TORGUE 			 * to refill the used descriptors, always.
3453f748be53SAlexandre TORGUE 			 */
3454f748be53SAlexandre TORGUE 			if (unlikely(!priv->plat->has_gmac4 &&
3455f748be53SAlexandre TORGUE 				     ((frame_len < priv->rx_copybreak) ||
345654139cf3SJoao Pinto 				     stmmac_rx_threshold_count(rx_q)))) {
345722ad3838SGiuseppe Cavallaro 				skb = netdev_alloc_skb_ip_align(priv->dev,
345822ad3838SGiuseppe Cavallaro 								frame_len);
345922ad3838SGiuseppe Cavallaro 				if (unlikely(!skb)) {
346022ad3838SGiuseppe Cavallaro 					if (net_ratelimit())
346122ad3838SGiuseppe Cavallaro 						dev_warn(priv->device,
346222ad3838SGiuseppe Cavallaro 							 "packet dropped\n");
346322ad3838SGiuseppe Cavallaro 					priv->dev->stats.rx_dropped++;
346422ad3838SGiuseppe Cavallaro 					break;
346522ad3838SGiuseppe Cavallaro 				}
346622ad3838SGiuseppe Cavallaro 
346722ad3838SGiuseppe Cavallaro 				dma_sync_single_for_cpu(priv->device,
346854139cf3SJoao Pinto 							rx_q->rx_skbuff_dma
346922ad3838SGiuseppe Cavallaro 							[entry], frame_len,
347022ad3838SGiuseppe Cavallaro 							DMA_FROM_DEVICE);
347122ad3838SGiuseppe Cavallaro 				skb_copy_to_linear_data(skb,
347254139cf3SJoao Pinto 							rx_q->
347322ad3838SGiuseppe Cavallaro 							rx_skbuff[entry]->data,
347422ad3838SGiuseppe Cavallaro 							frame_len);
347522ad3838SGiuseppe Cavallaro 
347622ad3838SGiuseppe Cavallaro 				skb_put(skb, frame_len);
347722ad3838SGiuseppe Cavallaro 				dma_sync_single_for_device(priv->device,
347854139cf3SJoao Pinto 							   rx_q->rx_skbuff_dma
347922ad3838SGiuseppe Cavallaro 							   [entry], frame_len,
348022ad3838SGiuseppe Cavallaro 							   DMA_FROM_DEVICE);
348122ad3838SGiuseppe Cavallaro 			} else {
348254139cf3SJoao Pinto 				skb = rx_q->rx_skbuff[entry];
34837ac6653aSJeff Kirsher 				if (unlikely(!skb)) {
348438ddc59dSLABBE Corentin 					netdev_err(priv->dev,
348538ddc59dSLABBE Corentin 						   "%s: Inconsistent Rx chain\n",
34867ac6653aSJeff Kirsher 						   priv->dev->name);
34877ac6653aSJeff Kirsher 					priv->dev->stats.rx_dropped++;
34887ac6653aSJeff Kirsher 					break;
34897ac6653aSJeff Kirsher 				}
34907ac6653aSJeff Kirsher 				prefetch(skb->data - NET_IP_ALIGN);
349154139cf3SJoao Pinto 				rx_q->rx_skbuff[entry] = NULL;
349254139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh++;
34937ac6653aSJeff Kirsher 
34947ac6653aSJeff Kirsher 				skb_put(skb, frame_len);
34957ac6653aSJeff Kirsher 				dma_unmap_single(priv->device,
349654139cf3SJoao Pinto 						 rx_q->rx_skbuff_dma[entry],
349722ad3838SGiuseppe Cavallaro 						 priv->dma_buf_sz,
349822ad3838SGiuseppe Cavallaro 						 DMA_FROM_DEVICE);
349922ad3838SGiuseppe Cavallaro 			}
350022ad3838SGiuseppe Cavallaro 
35017ac6653aSJeff Kirsher 			if (netif_msg_pktdata(priv)) {
350238ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "frame received (%dbytes)",
350338ddc59dSLABBE Corentin 					   frame_len);
35047ac6653aSJeff Kirsher 				print_pkt(skb->data, frame_len);
35057ac6653aSJeff Kirsher 			}
350683d7af64SGiuseppe CAVALLARO 
3507ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3508ba1ffd74SGiuseppe CAVALLARO 
3509b9381985SVince Bridgers 			stmmac_rx_vlan(priv->dev, skb);
3510b9381985SVince Bridgers 
35117ac6653aSJeff Kirsher 			skb->protocol = eth_type_trans(skb, priv->dev);
35127ac6653aSJeff Kirsher 
3513ceb69499SGiuseppe CAVALLARO 			if (unlikely(!coe))
35147ac6653aSJeff Kirsher 				skb_checksum_none_assert(skb);
351562a2ab93SGiuseppe CAVALLARO 			else
35167ac6653aSJeff Kirsher 				skb->ip_summed = CHECKSUM_UNNECESSARY;
351762a2ab93SGiuseppe CAVALLARO 
3518c22a3f48SJoao Pinto 			napi_gro_receive(&rx_q->napi, skb);
35197ac6653aSJeff Kirsher 
35207ac6653aSJeff Kirsher 			priv->dev->stats.rx_packets++;
35217ac6653aSJeff Kirsher 			priv->dev->stats.rx_bytes += frame_len;
35227ac6653aSJeff Kirsher 		}
35237ac6653aSJeff Kirsher 		entry = next_entry;
35247ac6653aSJeff Kirsher 	}
35257ac6653aSJeff Kirsher 
352654139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
35277ac6653aSJeff Kirsher 
35287ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
35297ac6653aSJeff Kirsher 
35307ac6653aSJeff Kirsher 	return count;
35317ac6653aSJeff Kirsher }
35327ac6653aSJeff Kirsher 
35337ac6653aSJeff Kirsher /**
35347ac6653aSJeff Kirsher  *  stmmac_poll - stmmac poll method (NAPI)
35357ac6653aSJeff Kirsher  *  @napi : pointer to the napi structure.
35367ac6653aSJeff Kirsher  *  @budget : maximum number of packets that the current CPU can receive from
35377ac6653aSJeff Kirsher  *	      all interfaces.
35387ac6653aSJeff Kirsher  *  Description :
35399125cdd1SGiuseppe CAVALLARO  *  To look at the incoming frames and clear the tx resources.
35407ac6653aSJeff Kirsher  */
35417ac6653aSJeff Kirsher static int stmmac_poll(struct napi_struct *napi, int budget)
35427ac6653aSJeff Kirsher {
3543c22a3f48SJoao Pinto 	struct stmmac_rx_queue *rx_q =
3544c22a3f48SJoao Pinto 		container_of(napi, struct stmmac_rx_queue, napi);
3545c22a3f48SJoao Pinto 	struct stmmac_priv *priv = rx_q->priv_data;
3546ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
3547c22a3f48SJoao Pinto 	u32 chan = rx_q->queue_index;
354854139cf3SJoao Pinto 	int work_done = 0;
3549c22a3f48SJoao Pinto 	u32 queue;
35507ac6653aSJeff Kirsher 
35519125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
3552ce736788SJoao Pinto 
3553ce736788SJoao Pinto 	/* check all the queues */
3554ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++)
3555ce736788SJoao Pinto 		stmmac_tx_clean(priv, queue);
3556ce736788SJoao Pinto 
3557c22a3f48SJoao Pinto 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
35587ac6653aSJeff Kirsher 	if (work_done < budget) {
35596ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
3560a4e887faSJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
35617ac6653aSJeff Kirsher 	}
35627ac6653aSJeff Kirsher 	return work_done;
35637ac6653aSJeff Kirsher }
35647ac6653aSJeff Kirsher 
35657ac6653aSJeff Kirsher /**
35667ac6653aSJeff Kirsher  *  stmmac_tx_timeout
35677ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
35687ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
35697284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
35707ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
35717ac6653aSJeff Kirsher  *   in order to transmit a new packet.
35727ac6653aSJeff Kirsher  */
35737ac6653aSJeff Kirsher static void stmmac_tx_timeout(struct net_device *dev)
35747ac6653aSJeff Kirsher {
35757ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
35767ac6653aSJeff Kirsher 
357734877a15SJose Abreu 	stmmac_global_err(priv);
35787ac6653aSJeff Kirsher }
35797ac6653aSJeff Kirsher 
35807ac6653aSJeff Kirsher /**
358101789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
35827ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
35837ac6653aSJeff Kirsher  *  Description:
35847ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
35857ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
35867ac6653aSJeff Kirsher  *  Return value:
35877ac6653aSJeff Kirsher  *  void.
35887ac6653aSJeff Kirsher  */
358901789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
35907ac6653aSJeff Kirsher {
35917ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
35927ac6653aSJeff Kirsher 
3593c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
35947ac6653aSJeff Kirsher }
35957ac6653aSJeff Kirsher 
35967ac6653aSJeff Kirsher /**
35977ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
35987ac6653aSJeff Kirsher  *  @dev : device pointer.
35997ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
36007ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
36017ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
36027ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
36037ac6653aSJeff Kirsher  *  Return value:
36047ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
36057ac6653aSJeff Kirsher  *  file on failure.
36067ac6653aSJeff Kirsher  */
36077ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
36087ac6653aSJeff Kirsher {
360938ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
361038ddc59dSLABBE Corentin 
36117ac6653aSJeff Kirsher 	if (netif_running(dev)) {
361238ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
36137ac6653aSJeff Kirsher 		return -EBUSY;
36147ac6653aSJeff Kirsher 	}
36157ac6653aSJeff Kirsher 
36167ac6653aSJeff Kirsher 	dev->mtu = new_mtu;
3617f748be53SAlexandre TORGUE 
36187ac6653aSJeff Kirsher 	netdev_update_features(dev);
36197ac6653aSJeff Kirsher 
36207ac6653aSJeff Kirsher 	return 0;
36217ac6653aSJeff Kirsher }
36227ac6653aSJeff Kirsher 
3623c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
3624c8f44affSMichał Mirosław 					     netdev_features_t features)
36257ac6653aSJeff Kirsher {
36267ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36277ac6653aSJeff Kirsher 
362838912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
36297ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
3630d2afb5bdSGiuseppe CAVALLARO 
36317ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
3632a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
36337ac6653aSJeff Kirsher 
36347ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
36357ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
36367ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
3637ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
3638ceb69499SGiuseppe CAVALLARO 	 */
36397ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3640a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
36417ac6653aSJeff Kirsher 
3642f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
3643f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3644f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
3645f748be53SAlexandre TORGUE 			priv->tso = true;
3646f748be53SAlexandre TORGUE 		else
3647f748be53SAlexandre TORGUE 			priv->tso = false;
3648f748be53SAlexandre TORGUE 	}
3649f748be53SAlexandre TORGUE 
36507ac6653aSJeff Kirsher 	return features;
36517ac6653aSJeff Kirsher }
36527ac6653aSJeff Kirsher 
3653d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
3654d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
3655d2afb5bdSGiuseppe CAVALLARO {
3656d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
3657d2afb5bdSGiuseppe CAVALLARO 
3658d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
3659d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
3660d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
3661d2afb5bdSGiuseppe CAVALLARO 	else
3662d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3663d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
3664d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
3665d2afb5bdSGiuseppe CAVALLARO 	 */
3666c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
3667d2afb5bdSGiuseppe CAVALLARO 
3668d2afb5bdSGiuseppe CAVALLARO 	return 0;
3669d2afb5bdSGiuseppe CAVALLARO }
3670d2afb5bdSGiuseppe CAVALLARO 
367132ceabcaSGiuseppe CAVALLARO /**
367232ceabcaSGiuseppe CAVALLARO  *  stmmac_interrupt - main ISR
367332ceabcaSGiuseppe CAVALLARO  *  @irq: interrupt number.
367432ceabcaSGiuseppe CAVALLARO  *  @dev_id: to pass the net device pointer.
367532ceabcaSGiuseppe CAVALLARO  *  Description: this is the main driver interrupt service routine.
3676732fdf0eSGiuseppe CAVALLARO  *  It can call:
3677732fdf0eSGiuseppe CAVALLARO  *  o DMA service routine (to manage incoming frame reception and transmission
3678732fdf0eSGiuseppe CAVALLARO  *    status)
3679732fdf0eSGiuseppe CAVALLARO  *  o Core interrupts to manage: remote wake-up, management counter, LPI
368032ceabcaSGiuseppe CAVALLARO  *    interrupts.
368132ceabcaSGiuseppe CAVALLARO  */
36827ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
36837ac6653aSJeff Kirsher {
36847ac6653aSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
36857ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36867bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
36877bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
36887bac4e1eSJoao Pinto 	u32 queues_count;
36897bac4e1eSJoao Pinto 	u32 queue;
36907bac4e1eSJoao Pinto 
36917bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
36927ac6653aSJeff Kirsher 
369389f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
369489f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
369589f7f2cfSSrinivas Kandagatla 
36967ac6653aSJeff Kirsher 	if (unlikely(!dev)) {
369738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
36987ac6653aSJeff Kirsher 		return IRQ_NONE;
36997ac6653aSJeff Kirsher 	}
37007ac6653aSJeff Kirsher 
370134877a15SJose Abreu 	/* Check if adapter is up */
370234877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
370334877a15SJose Abreu 		return IRQ_HANDLED;
37048bf993a5SJose Abreu 	/* Check if a fatal error happened */
37058bf993a5SJose Abreu 	if (stmmac_safety_feat_interrupt(priv))
37068bf993a5SJose Abreu 		return IRQ_HANDLED;
370734877a15SJose Abreu 
37087ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
3709f748be53SAlexandre TORGUE 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3710c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
37118f71a88dSJoao Pinto 
3712d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
3713d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
37140982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3715d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
37160982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3717d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
37187bac4e1eSJoao Pinto 		}
37197bac4e1eSJoao Pinto 
37207bac4e1eSJoao Pinto 		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
37217bac4e1eSJoao Pinto 			for (queue = 0; queue < queues_count; queue++) {
372254139cf3SJoao Pinto 				struct stmmac_rx_queue *rx_q =
372354139cf3SJoao Pinto 				&priv->rx_queue[queue];
372454139cf3SJoao Pinto 
3725c10d4c82SJose Abreu 				status |= stmmac_host_mtl_irq_status(priv,
3726c10d4c82SJose Abreu 						priv->hw, queue);
37277bac4e1eSJoao Pinto 
3728a4e887faSJose Abreu 				if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3729a4e887faSJose Abreu 					stmmac_set_rx_tail_ptr(priv,
3730a4e887faSJose Abreu 							priv->ioaddr,
373154139cf3SJoao Pinto 							rx_q->rx_tail_addr,
37327bac4e1eSJoao Pinto 							queue);
37337bac4e1eSJoao Pinto 			}
3734d765955dSGiuseppe CAVALLARO 		}
373570523e63SGiuseppe CAVALLARO 
373670523e63SGiuseppe CAVALLARO 		/* PCS link status */
37373fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
373870523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
373970523e63SGiuseppe CAVALLARO 				netif_carrier_on(dev);
374070523e63SGiuseppe CAVALLARO 			else
374170523e63SGiuseppe CAVALLARO 				netif_carrier_off(dev);
374270523e63SGiuseppe CAVALLARO 		}
3743d765955dSGiuseppe CAVALLARO 	}
3744d765955dSGiuseppe CAVALLARO 
3745d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
37467ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
37477ac6653aSJeff Kirsher 
37487ac6653aSJeff Kirsher 	return IRQ_HANDLED;
37497ac6653aSJeff Kirsher }
37507ac6653aSJeff Kirsher 
37517ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
37527ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
3753ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
3754ceb69499SGiuseppe CAVALLARO  */
37557ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
37567ac6653aSJeff Kirsher {
37577ac6653aSJeff Kirsher 	disable_irq(dev->irq);
37587ac6653aSJeff Kirsher 	stmmac_interrupt(dev->irq, dev);
37597ac6653aSJeff Kirsher 	enable_irq(dev->irq);
37607ac6653aSJeff Kirsher }
37617ac6653aSJeff Kirsher #endif
37627ac6653aSJeff Kirsher 
37637ac6653aSJeff Kirsher /**
37647ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
37657ac6653aSJeff Kirsher  *  @dev: Device pointer.
37667ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
37677ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
37687ac6653aSJeff Kirsher  *  @cmd: IOCTL command
37697ac6653aSJeff Kirsher  *  Description:
377032ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
37717ac6653aSJeff Kirsher  */
37727ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
37737ac6653aSJeff Kirsher {
3774891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
37757ac6653aSJeff Kirsher 
37767ac6653aSJeff Kirsher 	if (!netif_running(dev))
37777ac6653aSJeff Kirsher 		return -EINVAL;
37787ac6653aSJeff Kirsher 
3779891434b1SRayagond Kokatanur 	switch (cmd) {
3780891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
3781891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
3782891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
3783d6d50c7eSPhilippe Reynes 		if (!dev->phydev)
37847ac6653aSJeff Kirsher 			return -EINVAL;
3785d6d50c7eSPhilippe Reynes 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3786891434b1SRayagond Kokatanur 		break;
3787891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
3788891434b1SRayagond Kokatanur 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3789891434b1SRayagond Kokatanur 		break;
3790891434b1SRayagond Kokatanur 	default:
3791891434b1SRayagond Kokatanur 		break;
3792891434b1SRayagond Kokatanur 	}
37937ac6653aSJeff Kirsher 
37947ac6653aSJeff Kirsher 	return ret;
37957ac6653aSJeff Kirsher }
37967ac6653aSJeff Kirsher 
37974dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
37984dbbe8ddSJose Abreu 				    void *cb_priv)
37994dbbe8ddSJose Abreu {
38004dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
38014dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
38024dbbe8ddSJose Abreu 
38034dbbe8ddSJose Abreu 	stmmac_disable_all_queues(priv);
38044dbbe8ddSJose Abreu 
38054dbbe8ddSJose Abreu 	switch (type) {
38064dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
38074dbbe8ddSJose Abreu 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
38084dbbe8ddSJose Abreu 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
38094dbbe8ddSJose Abreu 		break;
38104dbbe8ddSJose Abreu 	default:
38114dbbe8ddSJose Abreu 		break;
38124dbbe8ddSJose Abreu 	}
38134dbbe8ddSJose Abreu 
38144dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
38154dbbe8ddSJose Abreu 	return ret;
38164dbbe8ddSJose Abreu }
38174dbbe8ddSJose Abreu 
38184dbbe8ddSJose Abreu static int stmmac_setup_tc_block(struct stmmac_priv *priv,
38194dbbe8ddSJose Abreu 				 struct tc_block_offload *f)
38204dbbe8ddSJose Abreu {
38214dbbe8ddSJose Abreu 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
38224dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
38234dbbe8ddSJose Abreu 
38244dbbe8ddSJose Abreu 	switch (f->command) {
38254dbbe8ddSJose Abreu 	case TC_BLOCK_BIND:
38264dbbe8ddSJose Abreu 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
38274dbbe8ddSJose Abreu 				priv, priv);
38284dbbe8ddSJose Abreu 	case TC_BLOCK_UNBIND:
38294dbbe8ddSJose Abreu 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
38304dbbe8ddSJose Abreu 		return 0;
38314dbbe8ddSJose Abreu 	default:
38324dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
38334dbbe8ddSJose Abreu 	}
38344dbbe8ddSJose Abreu }
38354dbbe8ddSJose Abreu 
38364dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
38374dbbe8ddSJose Abreu 			   void *type_data)
38384dbbe8ddSJose Abreu {
38394dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
38404dbbe8ddSJose Abreu 
38414dbbe8ddSJose Abreu 	switch (type) {
38424dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
38434dbbe8ddSJose Abreu 		return stmmac_setup_tc_block(priv, type_data);
38444dbbe8ddSJose Abreu 	default:
38454dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
38464dbbe8ddSJose Abreu 	}
38474dbbe8ddSJose Abreu }
38484dbbe8ddSJose Abreu 
3849a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3850a830405eSBhadram Varka {
3851a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
3852a830405eSBhadram Varka 	int ret = 0;
3853a830405eSBhadram Varka 
3854a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
3855a830405eSBhadram Varka 	if (ret)
3856a830405eSBhadram Varka 		return ret;
3857a830405eSBhadram Varka 
3858c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3859a830405eSBhadram Varka 
3860a830405eSBhadram Varka 	return ret;
3861a830405eSBhadram Varka }
3862a830405eSBhadram Varka 
386350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
38647ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
38657ac29055SGiuseppe CAVALLARO 
3866c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
3867c24602efSGiuseppe CAVALLARO 			       struct seq_file *seq)
38687ac29055SGiuseppe CAVALLARO {
38697ac29055SGiuseppe CAVALLARO 	int i;
3870c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3871c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
38727ac29055SGiuseppe CAVALLARO 
3873c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
3874c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
3875c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3876c24602efSGiuseppe CAVALLARO 				   i, (unsigned int)virt_to_phys(ep),
3877f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
3878f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
3879f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
3880f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
3881c24602efSGiuseppe CAVALLARO 			ep++;
3882c24602efSGiuseppe CAVALLARO 		} else {
3883c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
388466c25f6eSNiklas Cassel 				   i, (unsigned int)virt_to_phys(p),
3885f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3886f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3887c24602efSGiuseppe CAVALLARO 			p++;
3888c24602efSGiuseppe CAVALLARO 		}
38897ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
38907ac29055SGiuseppe CAVALLARO 	}
3891c24602efSGiuseppe CAVALLARO }
38927ac29055SGiuseppe CAVALLARO 
3893c24602efSGiuseppe CAVALLARO static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3894c24602efSGiuseppe CAVALLARO {
3895c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
3896c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
389754139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
3898ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
389954139cf3SJoao Pinto 	u32 queue;
390054139cf3SJoao Pinto 
390154139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
390254139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
390354139cf3SJoao Pinto 
390454139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
39057ac29055SGiuseppe CAVALLARO 
3906c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
390754139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
390854139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
390954139cf3SJoao Pinto 					   DMA_RX_SIZE, 1, seq);
391054139cf3SJoao Pinto 		} else {
391154139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
391254139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
391354139cf3SJoao Pinto 					   DMA_RX_SIZE, 0, seq);
391454139cf3SJoao Pinto 		}
391554139cf3SJoao Pinto 	}
391654139cf3SJoao Pinto 
3917ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
3918ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3919ce736788SJoao Pinto 
3920ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
3921ce736788SJoao Pinto 
392254139cf3SJoao Pinto 		if (priv->extend_desc) {
3923ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
3924ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
3925ce736788SJoao Pinto 					   DMA_TX_SIZE, 1, seq);
3926c24602efSGiuseppe CAVALLARO 		} else {
3927ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
3928ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
3929ce736788SJoao Pinto 					   DMA_TX_SIZE, 0, seq);
3930ce736788SJoao Pinto 		}
39317ac29055SGiuseppe CAVALLARO 	}
39327ac29055SGiuseppe CAVALLARO 
39337ac29055SGiuseppe CAVALLARO 	return 0;
39347ac29055SGiuseppe CAVALLARO }
39357ac29055SGiuseppe CAVALLARO 
39367ac29055SGiuseppe CAVALLARO static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
39377ac29055SGiuseppe CAVALLARO {
39387ac29055SGiuseppe CAVALLARO 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
39397ac29055SGiuseppe CAVALLARO }
39407ac29055SGiuseppe CAVALLARO 
394122d3efe5SPavel Machek /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
394222d3efe5SPavel Machek 
39437ac29055SGiuseppe CAVALLARO static const struct file_operations stmmac_rings_status_fops = {
39447ac29055SGiuseppe CAVALLARO 	.owner = THIS_MODULE,
39457ac29055SGiuseppe CAVALLARO 	.open = stmmac_sysfs_ring_open,
39467ac29055SGiuseppe CAVALLARO 	.read = seq_read,
39477ac29055SGiuseppe CAVALLARO 	.llseek = seq_lseek,
394874863948SDjalal Harouni 	.release = single_release,
39497ac29055SGiuseppe CAVALLARO };
39507ac29055SGiuseppe CAVALLARO 
3951e7434821SGiuseppe CAVALLARO static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3952e7434821SGiuseppe CAVALLARO {
3953e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
3954e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
3955e7434821SGiuseppe CAVALLARO 
395619e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
3957e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
3958e7434821SGiuseppe CAVALLARO 		return 0;
3959e7434821SGiuseppe CAVALLARO 	}
3960e7434821SGiuseppe CAVALLARO 
3961e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
3962e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
3963e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
3964e7434821SGiuseppe CAVALLARO 
396522d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3966e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
396722d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
3968e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
396922d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
3970e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3971e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
3972e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3973e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3974e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
39758d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3976e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
3977e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3978e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3979e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3980e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3981e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3982e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3983e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
3984e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
3985e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3986e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3987e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3988e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
398922d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3990e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
3991e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3992e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3993e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3994f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3995f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3996f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3997f748be53SAlexandre TORGUE 	} else {
3998e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3999e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4000e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4001e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4002f748be53SAlexandre TORGUE 	}
4003e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4004e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4005e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4006e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
4007e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4008e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
4009e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4010e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4011e7434821SGiuseppe CAVALLARO 
4012e7434821SGiuseppe CAVALLARO 	return 0;
4013e7434821SGiuseppe CAVALLARO }
4014e7434821SGiuseppe CAVALLARO 
4015e7434821SGiuseppe CAVALLARO static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
4016e7434821SGiuseppe CAVALLARO {
4017e7434821SGiuseppe CAVALLARO 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
4018e7434821SGiuseppe CAVALLARO }
4019e7434821SGiuseppe CAVALLARO 
4020e7434821SGiuseppe CAVALLARO static const struct file_operations stmmac_dma_cap_fops = {
4021e7434821SGiuseppe CAVALLARO 	.owner = THIS_MODULE,
4022e7434821SGiuseppe CAVALLARO 	.open = stmmac_sysfs_dma_cap_open,
4023e7434821SGiuseppe CAVALLARO 	.read = seq_read,
4024e7434821SGiuseppe CAVALLARO 	.llseek = seq_lseek,
402574863948SDjalal Harouni 	.release = single_release,
4026e7434821SGiuseppe CAVALLARO };
4027e7434821SGiuseppe CAVALLARO 
40287ac29055SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev)
40297ac29055SGiuseppe CAVALLARO {
4030466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
40317ac29055SGiuseppe CAVALLARO 
4032466c5ac8SMathieu Olivari 	/* Create per netdev entries */
4033466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4034466c5ac8SMathieu Olivari 
4035466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
403638ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
40377ac29055SGiuseppe CAVALLARO 
40387ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
40397ac29055SGiuseppe CAVALLARO 	}
40407ac29055SGiuseppe CAVALLARO 
40417ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
4042466c5ac8SMathieu Olivari 	priv->dbgfs_rings_status =
4043d3757ba4SJoe Perches 		debugfs_create_file("descriptors_status", 0444,
4044466c5ac8SMathieu Olivari 				    priv->dbgfs_dir, dev,
40457ac29055SGiuseppe CAVALLARO 				    &stmmac_rings_status_fops);
40467ac29055SGiuseppe CAVALLARO 
4047466c5ac8SMathieu Olivari 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
404838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4049466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
40507ac29055SGiuseppe CAVALLARO 
40517ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
40527ac29055SGiuseppe CAVALLARO 	}
40537ac29055SGiuseppe CAVALLARO 
4054e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
4055d3757ba4SJoe Perches 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4056466c5ac8SMathieu Olivari 						  priv->dbgfs_dir,
4057e7434821SGiuseppe CAVALLARO 						  dev, &stmmac_dma_cap_fops);
4058e7434821SGiuseppe CAVALLARO 
4059466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
406038ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4061466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
4062e7434821SGiuseppe CAVALLARO 
4063e7434821SGiuseppe CAVALLARO 		return -ENOMEM;
4064e7434821SGiuseppe CAVALLARO 	}
4065e7434821SGiuseppe CAVALLARO 
40667ac29055SGiuseppe CAVALLARO 	return 0;
40677ac29055SGiuseppe CAVALLARO }
40687ac29055SGiuseppe CAVALLARO 
4069466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
40707ac29055SGiuseppe CAVALLARO {
4071466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
4072466c5ac8SMathieu Olivari 
4073466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
40747ac29055SGiuseppe CAVALLARO }
407550fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
40767ac29055SGiuseppe CAVALLARO 
40777ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
40787ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
40797ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
40807ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
40817ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
40827ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
4083d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
408401789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
40857ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
40867ac6653aSJeff Kirsher 	.ndo_do_ioctl = stmmac_ioctl,
40874dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
40887ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
40897ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
40907ac6653aSJeff Kirsher #endif
4091a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
40927ac6653aSJeff Kirsher };
40937ac6653aSJeff Kirsher 
409434877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
409534877a15SJose Abreu {
409634877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
409734877a15SJose Abreu 		return;
409834877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
409934877a15SJose Abreu 		return;
410034877a15SJose Abreu 
410134877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
410234877a15SJose Abreu 
410334877a15SJose Abreu 	rtnl_lock();
410434877a15SJose Abreu 	netif_trans_update(priv->dev);
410534877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
410634877a15SJose Abreu 		usleep_range(1000, 2000);
410734877a15SJose Abreu 
410834877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
410934877a15SJose Abreu 	dev_close(priv->dev);
411034877a15SJose Abreu 	dev_open(priv->dev);
411134877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
411234877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
411334877a15SJose Abreu 	rtnl_unlock();
411434877a15SJose Abreu }
411534877a15SJose Abreu 
411634877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
411734877a15SJose Abreu {
411834877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
411934877a15SJose Abreu 			service_task);
412034877a15SJose Abreu 
412134877a15SJose Abreu 	stmmac_reset_subtask(priv);
412234877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
412334877a15SJose Abreu }
412434877a15SJose Abreu 
41257ac6653aSJeff Kirsher /**
4126cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
412732ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
4128732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
4129732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
4130732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
4131732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
4132cf3f047bSGiuseppe CAVALLARO  */
4133cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
4134cf3f047bSGiuseppe CAVALLARO {
41355f0456b4SJose Abreu 	int ret;
4136cf3f047bSGiuseppe CAVALLARO 
41379f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
41389f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
41399f93ac8dSLABBE Corentin 		chain_mode = 1;
41405f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
41419f93ac8dSLABBE Corentin 
41425f0456b4SJose Abreu 	/* Initialize HW Interface */
41435f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
41445f0456b4SJose Abreu 	if (ret)
41455f0456b4SJose Abreu 		return ret;
41464a7d666aSGiuseppe CAVALLARO 
4147cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
4148cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4149cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
415038ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
4151cf3f047bSGiuseppe CAVALLARO 
4152cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
4153cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
4154cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
4155cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
4156cf3f047bSGiuseppe CAVALLARO 		 */
4157cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4158cf3f047bSGiuseppe CAVALLARO 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
41593fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
416038912bdbSDeepak SIKRI 
4161a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
4162a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
4163a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
4164a8df35d4SEzequiel Garcia 		else
416538912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4166a8df35d4SEzequiel Garcia 
4167f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
4168f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
416938912bdbSDeepak SIKRI 
417038912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
417138912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
417238912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
417338912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
417438912bdbSDeepak SIKRI 
417538ddc59dSLABBE Corentin 	} else {
417638ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
417738ddc59dSLABBE Corentin 	}
4178cf3f047bSGiuseppe CAVALLARO 
4179d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
4180d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
418138ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4182f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
418338ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4184d2afb5bdSGiuseppe CAVALLARO 	}
4185cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
418638ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
4187cf3f047bSGiuseppe CAVALLARO 
4188cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
418938ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4190cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
4191cf3f047bSGiuseppe CAVALLARO 	}
4192cf3f047bSGiuseppe CAVALLARO 
4193f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
419438ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
4195f748be53SAlexandre TORGUE 
4196c24602efSGiuseppe CAVALLARO 	return 0;
4197cf3f047bSGiuseppe CAVALLARO }
4198cf3f047bSGiuseppe CAVALLARO 
4199cf3f047bSGiuseppe CAVALLARO /**
4200bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
4201bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
4202ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
4203e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
4204bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
4205bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
42069afec6efSAndy Shevchenko  * Return:
420715ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
42087ac6653aSJeff Kirsher  */
420915ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
4210cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
4211e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
42127ac6653aSJeff Kirsher {
4213bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
4214bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
4215c22a3f48SJoao Pinto 	int ret = 0;
4216c22a3f48SJoao Pinto 	u32 queue;
42177ac6653aSJeff Kirsher 
4218c22a3f48SJoao Pinto 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4219c22a3f48SJoao Pinto 				  MTL_MAX_TX_QUEUES,
4220c22a3f48SJoao Pinto 				  MTL_MAX_RX_QUEUES);
422141de8d4cSJoe Perches 	if (!ndev)
422215ffac73SJoachim Eastwood 		return -ENOMEM;
42237ac6653aSJeff Kirsher 
4224bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
42257ac6653aSJeff Kirsher 
4226bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
4227bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
4228bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
4229bfab27a1SGiuseppe CAVALLARO 
4230bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
4231cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
4232cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
4233e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
4234e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
4235e56788cfSJoachim Eastwood 
4236e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
4237e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
4238e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
4239e56788cfSJoachim Eastwood 
4240e56788cfSJoachim Eastwood 	if (res->mac)
4241e56788cfSJoachim Eastwood 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4242bfab27a1SGiuseppe CAVALLARO 
4243a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
4244803f8fc4SJoachim Eastwood 
4245cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
4246cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
4247cf3f047bSGiuseppe CAVALLARO 
424834877a15SJose Abreu 	/* Allocate workqueue */
424934877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
425034877a15SJose Abreu 	if (!priv->wq) {
425134877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
425234877a15SJose Abreu 		goto error_wq;
425334877a15SJose Abreu 	}
425434877a15SJose Abreu 
425534877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
425634877a15SJose Abreu 
4257cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
4258ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
4259ceb69499SGiuseppe CAVALLARO 	 */
4260cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
4261cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
4262cf3f047bSGiuseppe CAVALLARO 
426390f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
426490f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
4265f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
426690f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
426790f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
426890f522a2SEugeniy Paltsev 		 */
426990f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
427090f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
427190f522a2SEugeniy Paltsev 	}
4272c5e4ddbdSChen-Yu Tsai 
4273cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
4274c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
4275c24602efSGiuseppe CAVALLARO 	if (ret)
427662866e98SChen-Yu Tsai 		goto error_hw_init;
4277cf3f047bSGiuseppe CAVALLARO 
4278c22a3f48SJoao Pinto 	/* Configure real RX and TX queues */
4279c02b7a91SJoao Pinto 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4280c02b7a91SJoao Pinto 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4281c22a3f48SJoao Pinto 
4282cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
4283cf3f047bSGiuseppe CAVALLARO 
4284cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4285cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
4286f748be53SAlexandre TORGUE 
42874dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
42884dbbe8ddSJose Abreu 	if (!ret) {
42894dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
42904dbbe8ddSJose Abreu 	}
42914dbbe8ddSJose Abreu 
4292f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
42939edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4294f748be53SAlexandre TORGUE 		priv->tso = true;
429538ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
4296f748be53SAlexandre TORGUE 	}
4297bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4298bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
42997ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
43007ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
4301f646968fSPatrick McHardy 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
43027ac6653aSJeff Kirsher #endif
43037ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
43047ac6653aSJeff Kirsher 
430544770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
430644770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
430744770e11SJarod Wilson 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
430844770e11SJarod Wilson 		ndev->max_mtu = JUMBO_LEN;
430944770e11SJarod Wilson 	else
431044770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4311a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4312a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4313a2cd64f3SKweh, Hock Leong 	 */
4314a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4315a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
431644770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
4317a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
4318b618ab45SHeiner Kallweit 		dev_warn(priv->device,
4319a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
4320a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
432144770e11SJarod Wilson 
43227ac6653aSJeff Kirsher 	if (flow_ctrl)
43237ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
43247ac6653aSJeff Kirsher 
432562a2ab93SGiuseppe CAVALLARO 	/* Rx Watchdog is available in the COREs newer than the 3.40.
432662a2ab93SGiuseppe CAVALLARO 	 * In some case, for example on bugged HW this feature
432762a2ab93SGiuseppe CAVALLARO 	 * has to be disable and this can be done by passing the
432862a2ab93SGiuseppe CAVALLARO 	 * riwt_off field from the platform.
432962a2ab93SGiuseppe CAVALLARO 	 */
433062a2ab93SGiuseppe CAVALLARO 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
433162a2ab93SGiuseppe CAVALLARO 		priv->use_riwt = 1;
4332b618ab45SHeiner Kallweit 		dev_info(priv->device,
4333b618ab45SHeiner Kallweit 			 "Enable RX Mitigation via HW Watchdog Timer\n");
433462a2ab93SGiuseppe CAVALLARO 	}
433562a2ab93SGiuseppe CAVALLARO 
4336c22a3f48SJoao Pinto 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4337c22a3f48SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4338c22a3f48SJoao Pinto 
4339c22a3f48SJoao Pinto 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4340c22a3f48SJoao Pinto 			       (8 * priv->plat->rx_queues_to_use));
4341c22a3f48SJoao Pinto 	}
43427ac6653aSJeff Kirsher 
43437ac6653aSJeff Kirsher 	spin_lock_init(&priv->lock);
43447ac6653aSJeff Kirsher 
4345cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
4346cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
4347cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4348cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
4349cd7201f4SGiuseppe CAVALLARO 	 * clock input.
4350cd7201f4SGiuseppe CAVALLARO 	 */
4351cd7201f4SGiuseppe CAVALLARO 	if (!priv->plat->clk_csr)
4352cd7201f4SGiuseppe CAVALLARO 		stmmac_clk_csr_set(priv);
4353cd7201f4SGiuseppe CAVALLARO 	else
4354cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
4355cd7201f4SGiuseppe CAVALLARO 
4356e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
4357e58bb43fSGiuseppe CAVALLARO 
43583fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
43593fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
43603fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
43614bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
43624bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
43634bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
4364b618ab45SHeiner Kallweit 			dev_err(priv->device,
436538ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
43664bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
43676a81c26fSViresh Kumar 			goto error_mdio_register;
43684bfcbd7aSFrancesco Virlinzi 		}
4369e58bb43fSGiuseppe CAVALLARO 	}
43704bfcbd7aSFrancesco Virlinzi 
437157016590SFlorian Fainelli 	ret = register_netdev(ndev);
4372b2eb09afSFlorian Fainelli 	if (ret) {
4373b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
437457016590SFlorian Fainelli 			__func__, ret);
4375b2eb09afSFlorian Fainelli 		goto error_netdev_register;
4376b2eb09afSFlorian Fainelli 	}
43777ac6653aSJeff Kirsher 
437857016590SFlorian Fainelli 	return ret;
43797ac6653aSJeff Kirsher 
43806a81c26fSViresh Kumar error_netdev_register:
4381b2eb09afSFlorian Fainelli 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4382b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4383b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4384b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
43857ac6653aSJeff Kirsher error_mdio_register:
4386c22a3f48SJoao Pinto 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4387c22a3f48SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4388c22a3f48SJoao Pinto 
4389c22a3f48SJoao Pinto 		netif_napi_del(&rx_q->napi);
4390c22a3f48SJoao Pinto 	}
439162866e98SChen-Yu Tsai error_hw_init:
439234877a15SJose Abreu 	destroy_workqueue(priv->wq);
439334877a15SJose Abreu error_wq:
43947ac6653aSJeff Kirsher 	free_netdev(ndev);
43957ac6653aSJeff Kirsher 
439615ffac73SJoachim Eastwood 	return ret;
43977ac6653aSJeff Kirsher }
4398b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
43997ac6653aSJeff Kirsher 
44007ac6653aSJeff Kirsher /**
44017ac6653aSJeff Kirsher  * stmmac_dvr_remove
4402f4e7bd81SJoachim Eastwood  * @dev: device pointer
44037ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4404bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
44057ac6653aSJeff Kirsher  */
4406f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
44077ac6653aSJeff Kirsher {
4408f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
44097ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
44107ac6653aSJeff Kirsher 
441138ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
44127ac6653aSJeff Kirsher 
4413ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
44147ac6653aSJeff Kirsher 
4415c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
44167ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
44177ac6653aSJeff Kirsher 	unregister_netdev(ndev);
4418f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
4419f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
4420f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->pclk);
4421f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->stmmac_clk);
44223fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
44233fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
44243fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4425e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
442634877a15SJose Abreu 	destroy_workqueue(priv->wq);
44277ac6653aSJeff Kirsher 	free_netdev(ndev);
44287ac6653aSJeff Kirsher 
44297ac6653aSJeff Kirsher 	return 0;
44307ac6653aSJeff Kirsher }
4431b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
44327ac6653aSJeff Kirsher 
4433732fdf0eSGiuseppe CAVALLARO /**
4434732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
4435f4e7bd81SJoachim Eastwood  * @dev: device pointer
4436732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
4437732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
4438732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
4439732fdf0eSGiuseppe CAVALLARO  */
4440f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
44417ac6653aSJeff Kirsher {
4442f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
44437ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
4444f8c5a875SGiuseppe CAVALLARO 	unsigned long flags;
44457ac6653aSJeff Kirsher 
44467ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
44477ac6653aSJeff Kirsher 		return 0;
44487ac6653aSJeff Kirsher 
4449d6d50c7eSPhilippe Reynes 	if (ndev->phydev)
4450d6d50c7eSPhilippe Reynes 		phy_stop(ndev->phydev);
4451102463b1SFrancesco Virlinzi 
4452f8c5a875SGiuseppe CAVALLARO 	spin_lock_irqsave(&priv->lock, flags);
44537ac6653aSJeff Kirsher 
44547ac6653aSJeff Kirsher 	netif_device_detach(ndev);
4455c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
44567ac6653aSJeff Kirsher 
4457c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
44587ac6653aSJeff Kirsher 
44597ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
4460ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
4461c24602efSGiuseppe CAVALLARO 
44627ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
446389f7f2cfSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
4464c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
446589f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
446689f7f2cfSSrinivas Kandagatla 	} else {
4467c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
4468db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
4469ba1377ffSGiuseppe CAVALLARO 		/* Disable clock in case of PWM is off */
4470f573c0b9Sjpinto 		clk_disable(priv->plat->pclk);
4471f573c0b9Sjpinto 		clk_disable(priv->plat->stmmac_clk);
4472ba1377ffSGiuseppe CAVALLARO 	}
4473f8c5a875SGiuseppe CAVALLARO 	spin_unlock_irqrestore(&priv->lock, flags);
44742d871aa0SVince Bridgers 
44754d869b03SLABBE Corentin 	priv->oldlink = false;
4476bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
4477bd00632cSLABBE Corentin 	priv->oldduplex = DUPLEX_UNKNOWN;
44787ac6653aSJeff Kirsher 	return 0;
44797ac6653aSJeff Kirsher }
4480b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
44817ac6653aSJeff Kirsher 
4482732fdf0eSGiuseppe CAVALLARO /**
448354139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
448454139cf3SJoao Pinto  * @dev: device pointer
448554139cf3SJoao Pinto  */
448654139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
448754139cf3SJoao Pinto {
448854139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4489ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
449054139cf3SJoao Pinto 	u32 queue;
449154139cf3SJoao Pinto 
449254139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
449354139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
449454139cf3SJoao Pinto 
449554139cf3SJoao Pinto 		rx_q->cur_rx = 0;
449654139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
449754139cf3SJoao Pinto 	}
449854139cf3SJoao Pinto 
4499ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
4500ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4501ce736788SJoao Pinto 
4502ce736788SJoao Pinto 		tx_q->cur_tx = 0;
4503ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
45048d212a9eSNiklas Cassel 		tx_q->mss = 0;
4505ce736788SJoao Pinto 	}
450654139cf3SJoao Pinto }
450754139cf3SJoao Pinto 
450854139cf3SJoao Pinto /**
4509732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
4510f4e7bd81SJoachim Eastwood  * @dev: device pointer
4511732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
4512732fdf0eSGiuseppe CAVALLARO  * in a usable state.
4513732fdf0eSGiuseppe CAVALLARO  */
4514f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
45157ac6653aSJeff Kirsher {
4516f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
45177ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
4518f8c5a875SGiuseppe CAVALLARO 	unsigned long flags;
45197ac6653aSJeff Kirsher 
45207ac6653aSJeff Kirsher 	if (!netif_running(ndev))
45217ac6653aSJeff Kirsher 		return 0;
45227ac6653aSJeff Kirsher 
45237ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
45247ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
45257ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
45267ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
4527ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
4528ceb69499SGiuseppe CAVALLARO 	 */
4529623997fbSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
4530f55d84b0SVincent Palatin 		spin_lock_irqsave(&priv->lock, flags);
4531c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
4532f55d84b0SVincent Palatin 		spin_unlock_irqrestore(&priv->lock, flags);
453389f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
4534623997fbSSrinivas Kandagatla 	} else {
4535db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
45368d45e42bSLABBE Corentin 		/* enable the clk previously disabled */
4537f573c0b9Sjpinto 		clk_enable(priv->plat->stmmac_clk);
4538f573c0b9Sjpinto 		clk_enable(priv->plat->pclk);
4539623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
4540623997fbSSrinivas Kandagatla 		if (priv->mii)
4541623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
4542623997fbSSrinivas Kandagatla 	}
45437ac6653aSJeff Kirsher 
45447ac6653aSJeff Kirsher 	netif_device_attach(ndev);
45457ac6653aSJeff Kirsher 
4546f55d84b0SVincent Palatin 	spin_lock_irqsave(&priv->lock, flags);
4547f55d84b0SVincent Palatin 
454854139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
454954139cf3SJoao Pinto 
4550ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
4551ae79a639SGiuseppe CAVALLARO 
4552fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
4553777da230SGiuseppe CAVALLARO 	stmmac_init_tx_coalesce(priv);
4554ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
45557ac6653aSJeff Kirsher 
4556c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
45577ac6653aSJeff Kirsher 
4558c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
45597ac6653aSJeff Kirsher 
4560f8c5a875SGiuseppe CAVALLARO 	spin_unlock_irqrestore(&priv->lock, flags);
4561102463b1SFrancesco Virlinzi 
4562d6d50c7eSPhilippe Reynes 	if (ndev->phydev)
4563d6d50c7eSPhilippe Reynes 		phy_start(ndev->phydev);
4564102463b1SFrancesco Virlinzi 
45657ac6653aSJeff Kirsher 	return 0;
45667ac6653aSJeff Kirsher }
4567b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
4568ba27ec66SGiuseppe CAVALLARO 
45697ac6653aSJeff Kirsher #ifndef MODULE
45707ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
45717ac6653aSJeff Kirsher {
45727ac6653aSJeff Kirsher 	char *opt;
45737ac6653aSJeff Kirsher 
45747ac6653aSJeff Kirsher 	if (!str || !*str)
45757ac6653aSJeff Kirsher 		return -EINVAL;
45767ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
45777ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
4578ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
45797ac6653aSJeff Kirsher 				goto err;
45807ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4581ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
45827ac6653aSJeff Kirsher 				goto err;
45837ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4584ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
45857ac6653aSJeff Kirsher 				goto err;
45867ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
4587ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
45887ac6653aSJeff Kirsher 				goto err;
45897ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
4590ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
45917ac6653aSJeff Kirsher 				goto err;
45927ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4593ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
45947ac6653aSJeff Kirsher 				goto err;
45957ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
4596ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
45977ac6653aSJeff Kirsher 				goto err;
4598506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4599d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
4600d765955dSGiuseppe CAVALLARO 				goto err;
46014a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
46024a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
46034a7d666aSGiuseppe CAVALLARO 				goto err;
46047ac6653aSJeff Kirsher 		}
46057ac6653aSJeff Kirsher 	}
46067ac6653aSJeff Kirsher 	return 0;
46077ac6653aSJeff Kirsher 
46087ac6653aSJeff Kirsher err:
46097ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
46107ac6653aSJeff Kirsher 	return -EINVAL;
46117ac6653aSJeff Kirsher }
46127ac6653aSJeff Kirsher 
46137ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
4614ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
46156fc0d0f2SGiuseppe Cavallaro 
4616466c5ac8SMathieu Olivari static int __init stmmac_init(void)
4617466c5ac8SMathieu Olivari {
4618466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4619466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
4620466c5ac8SMathieu Olivari 	if (!stmmac_fs_dir) {
4621466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4622466c5ac8SMathieu Olivari 
4623466c5ac8SMathieu Olivari 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4624466c5ac8SMathieu Olivari 			pr_err("ERROR %s, debugfs create directory failed\n",
4625466c5ac8SMathieu Olivari 			       STMMAC_RESOURCE_NAME);
4626466c5ac8SMathieu Olivari 
4627466c5ac8SMathieu Olivari 			return -ENOMEM;
4628466c5ac8SMathieu Olivari 		}
4629466c5ac8SMathieu Olivari 	}
4630466c5ac8SMathieu Olivari #endif
4631466c5ac8SMathieu Olivari 
4632466c5ac8SMathieu Olivari 	return 0;
4633466c5ac8SMathieu Olivari }
4634466c5ac8SMathieu Olivari 
4635466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
4636466c5ac8SMathieu Olivari {
4637466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4638466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
4639466c5ac8SMathieu Olivari #endif
4640466c5ac8SMathieu Olivari }
4641466c5ac8SMathieu Olivari 
4642466c5ac8SMathieu Olivari module_init(stmmac_init)
4643466c5ac8SMathieu Olivari module_exit(stmmac_exit)
4644466c5ac8SMathieu Olivari 
46456fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
46466fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
46476fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
4648