17ac6653aSJeff Kirsher /*******************************************************************************
27ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
37ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
47ac6653aSJeff Kirsher 
5286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
67ac6653aSJeff Kirsher 
77ac6653aSJeff Kirsher   This program is free software; you can redistribute it and/or modify it
87ac6653aSJeff Kirsher   under the terms and conditions of the GNU General Public License,
97ac6653aSJeff Kirsher   version 2, as published by the Free Software Foundation.
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   This program is distributed in the hope it will be useful, but WITHOUT
127ac6653aSJeff Kirsher   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
137ac6653aSJeff Kirsher   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
147ac6653aSJeff Kirsher   more details.
157ac6653aSJeff Kirsher 
167ac6653aSJeff Kirsher   The full GNU General Public License is included in this distribution in
177ac6653aSJeff Kirsher   the file called "COPYING".
187ac6653aSJeff Kirsher 
197ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
207ac6653aSJeff Kirsher 
217ac6653aSJeff Kirsher   Documentation available at:
227ac6653aSJeff Kirsher 	http://www.stlinux.com
237ac6653aSJeff Kirsher   Support available at:
247ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
257ac6653aSJeff Kirsher *******************************************************************************/
267ac6653aSJeff Kirsher 
276a81c26fSViresh Kumar #include <linux/clk.h>
287ac6653aSJeff Kirsher #include <linux/kernel.h>
297ac6653aSJeff Kirsher #include <linux/interrupt.h>
307ac6653aSJeff Kirsher #include <linux/ip.h>
317ac6653aSJeff Kirsher #include <linux/tcp.h>
327ac6653aSJeff Kirsher #include <linux/skbuff.h>
337ac6653aSJeff Kirsher #include <linux/ethtool.h>
347ac6653aSJeff Kirsher #include <linux/if_ether.h>
357ac6653aSJeff Kirsher #include <linux/crc32.h>
367ac6653aSJeff Kirsher #include <linux/mii.h>
3701789349SJiri Pirko #include <linux/if.h>
387ac6653aSJeff Kirsher #include <linux/if_vlan.h>
397ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
407ac6653aSJeff Kirsher #include <linux/slab.h>
417ac6653aSJeff Kirsher #include <linux/prefetch.h>
42db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
4350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
447ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
457ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
4650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
47891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
484dbbe8ddSJose Abreu #include <net/pkt_cls.h>
49891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
50286a8372SGiuseppe CAVALLARO #include "stmmac.h"
51c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
525790cf3cSMathieu Olivari #include <linux/of_mdio.h>
5319d857c9SPhil Reid #include "dwmac1000.h"
5442de047dSJose Abreu #include "hwif.h"
557ac6653aSJeff Kirsher 
567ac6653aSJeff Kirsher #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
57f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
587ac6653aSJeff Kirsher 
597ac6653aSJeff Kirsher /* Module parameters */
6032ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
617ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
62d3757ba4SJoe Perches module_param(watchdog, int, 0644);
6332ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
647ac6653aSJeff Kirsher 
6532ceabcaSGiuseppe CAVALLARO static int debug = -1;
66d3757ba4SJoe Perches module_param(debug, int, 0644);
6732ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
687ac6653aSJeff Kirsher 
6947d1f71fSstephen hemminger static int phyaddr = -1;
70d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
717ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
727ac6653aSJeff Kirsher 
73e3ad57c9SGiuseppe Cavallaro #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
74120e87f9SGiuseppe Cavallaro #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
757ac6653aSJeff Kirsher 
767ac6653aSJeff Kirsher static int flow_ctrl = FLOW_OFF;
77d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
787ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
797ac6653aSJeff Kirsher 
807ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
81d3757ba4SJoe Perches module_param(pause, int, 0644);
827ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
837ac6653aSJeff Kirsher 
847ac6653aSJeff Kirsher #define TC_DEFAULT 64
857ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
86d3757ba4SJoe Perches module_param(tc, int, 0644);
877ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
887ac6653aSJeff Kirsher 
89d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
90d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
91d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
927ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
937ac6653aSJeff Kirsher 
9422ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
9522ad3838SGiuseppe Cavallaro 
967ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
977ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
987ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
997ac6653aSJeff Kirsher 
100d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
101d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
102d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
103d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
104f5351ef7SGiuseppe CAVALLARO #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105d765955dSGiuseppe CAVALLARO 
10622d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
10722d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1084a7d666aSGiuseppe CAVALLARO  */
1094a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
110d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1114a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1124a7d666aSGiuseppe CAVALLARO 
1137ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1147ac6653aSJeff Kirsher 
11550fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
116bfab27a1SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev);
117466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
118bfab27a1SGiuseppe CAVALLARO #endif
119bfab27a1SGiuseppe CAVALLARO 
1209125cdd1SGiuseppe CAVALLARO #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
1219125cdd1SGiuseppe CAVALLARO 
1227ac6653aSJeff Kirsher /**
1237ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
124732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
125732fdf0eSGiuseppe CAVALLARO  * errors.
1267ac6653aSJeff Kirsher  */
1277ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1287ac6653aSJeff Kirsher {
1297ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1307ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
131d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
132d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1337ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1347ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1357ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1367ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1377ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1387ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
139d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
140d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1417ac6653aSJeff Kirsher }
1427ac6653aSJeff Kirsher 
14332ceabcaSGiuseppe CAVALLARO /**
144c22a3f48SJoao Pinto  * stmmac_disable_all_queues - Disable all queues
145c22a3f48SJoao Pinto  * @priv: driver private structure
146c22a3f48SJoao Pinto  */
147c22a3f48SJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv)
148c22a3f48SJoao Pinto {
149c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
150c22a3f48SJoao Pinto 	u32 queue;
151c22a3f48SJoao Pinto 
152c22a3f48SJoao Pinto 	for (queue = 0; queue < rx_queues_cnt; queue++) {
153c22a3f48SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
154c22a3f48SJoao Pinto 
155c22a3f48SJoao Pinto 		napi_disable(&rx_q->napi);
156c22a3f48SJoao Pinto 	}
157c22a3f48SJoao Pinto }
158c22a3f48SJoao Pinto 
159c22a3f48SJoao Pinto /**
160c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
161c22a3f48SJoao Pinto  * @priv: driver private structure
162c22a3f48SJoao Pinto  */
163c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
164c22a3f48SJoao Pinto {
165c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
166c22a3f48SJoao Pinto 	u32 queue;
167c22a3f48SJoao Pinto 
168c22a3f48SJoao Pinto 	for (queue = 0; queue < rx_queues_cnt; queue++) {
169c22a3f48SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
170c22a3f48SJoao Pinto 
171c22a3f48SJoao Pinto 		napi_enable(&rx_q->napi);
172c22a3f48SJoao Pinto 	}
173c22a3f48SJoao Pinto }
174c22a3f48SJoao Pinto 
175c22a3f48SJoao Pinto /**
176c22a3f48SJoao Pinto  * stmmac_stop_all_queues - Stop all queues
177c22a3f48SJoao Pinto  * @priv: driver private structure
178c22a3f48SJoao Pinto  */
179c22a3f48SJoao Pinto static void stmmac_stop_all_queues(struct stmmac_priv *priv)
180c22a3f48SJoao Pinto {
181c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
182c22a3f48SJoao Pinto 	u32 queue;
183c22a3f48SJoao Pinto 
184c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
185c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
186c22a3f48SJoao Pinto }
187c22a3f48SJoao Pinto 
188c22a3f48SJoao Pinto /**
189c22a3f48SJoao Pinto  * stmmac_start_all_queues - Start all queues
190c22a3f48SJoao Pinto  * @priv: driver private structure
191c22a3f48SJoao Pinto  */
192c22a3f48SJoao Pinto static void stmmac_start_all_queues(struct stmmac_priv *priv)
193c22a3f48SJoao Pinto {
194c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
195c22a3f48SJoao Pinto 	u32 queue;
196c22a3f48SJoao Pinto 
197c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
198c22a3f48SJoao Pinto 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
199c22a3f48SJoao Pinto }
200c22a3f48SJoao Pinto 
20134877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
20234877a15SJose Abreu {
20334877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
20434877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
20534877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
20634877a15SJose Abreu }
20734877a15SJose Abreu 
20834877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
20934877a15SJose Abreu {
21034877a15SJose Abreu 	netif_carrier_off(priv->dev);
21134877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
21234877a15SJose Abreu 	stmmac_service_event_schedule(priv);
21334877a15SJose Abreu }
21434877a15SJose Abreu 
215c22a3f48SJoao Pinto /**
21632ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
21732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
21832ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
21932ceabcaSGiuseppe CAVALLARO  * clock input.
22032ceabcaSGiuseppe CAVALLARO  * Note:
22132ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
22232ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
22332ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
22432ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
22532ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
22632ceabcaSGiuseppe CAVALLARO  */
227cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
228cd7201f4SGiuseppe CAVALLARO {
229cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
230cd7201f4SGiuseppe CAVALLARO 
231f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
232cd7201f4SGiuseppe CAVALLARO 
233cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
234ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
235ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
236ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
237ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
238ceb69499SGiuseppe CAVALLARO 	 * divider.
239ceb69499SGiuseppe CAVALLARO 	 */
240cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
241cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
242cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
243cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
244cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
245cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
246cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
247cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
248cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
249cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
250cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
25119d857c9SPhil Reid 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
252cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
253ceb69499SGiuseppe CAVALLARO 	}
2549f93ac8dSLABBE Corentin 
2559f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
2569f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
2579f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
2589f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
2599f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
2609f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
2619f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
2629f93ac8dSLABBE Corentin 		else
2639f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
2649f93ac8dSLABBE Corentin 	}
265cd7201f4SGiuseppe CAVALLARO }
266cd7201f4SGiuseppe CAVALLARO 
2677ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
2687ac6653aSJeff Kirsher {
269424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
270424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2717ac6653aSJeff Kirsher }
2727ac6653aSJeff Kirsher 
273ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
2747ac6653aSJeff Kirsher {
275ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
276a6a3e026SLABBE Corentin 	u32 avail;
277e3ad57c9SGiuseppe Cavallaro 
278ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
279ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
280e3ad57c9SGiuseppe Cavallaro 	else
281ce736788SJoao Pinto 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
282e3ad57c9SGiuseppe Cavallaro 
283e3ad57c9SGiuseppe Cavallaro 	return avail;
284e3ad57c9SGiuseppe Cavallaro }
285e3ad57c9SGiuseppe Cavallaro 
28654139cf3SJoao Pinto /**
28754139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
28854139cf3SJoao Pinto  * @priv: driver private structure
28954139cf3SJoao Pinto  * @queue: RX queue index
29054139cf3SJoao Pinto  */
29154139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
292e3ad57c9SGiuseppe Cavallaro {
29354139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
294a6a3e026SLABBE Corentin 	u32 dirty;
295e3ad57c9SGiuseppe Cavallaro 
29654139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
29754139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
298e3ad57c9SGiuseppe Cavallaro 	else
29954139cf3SJoao Pinto 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
300e3ad57c9SGiuseppe Cavallaro 
301e3ad57c9SGiuseppe Cavallaro 	return dirty;
3027ac6653aSJeff Kirsher }
3037ac6653aSJeff Kirsher 
30432ceabcaSGiuseppe CAVALLARO /**
305732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_fix_mac_speed - callback for speed selection
30632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
3078d45e42bSLABBE Corentin  * Description: on some platforms (e.g. ST), some HW system configuration
30832ceabcaSGiuseppe CAVALLARO  * registers have to be set according to the link speed negotiated.
3097ac6653aSJeff Kirsher  */
3107ac6653aSJeff Kirsher static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
3117ac6653aSJeff Kirsher {
312d6d50c7eSPhilippe Reynes 	struct net_device *ndev = priv->dev;
313d6d50c7eSPhilippe Reynes 	struct phy_device *phydev = ndev->phydev;
3147ac6653aSJeff Kirsher 
3157ac6653aSJeff Kirsher 	if (likely(priv->plat->fix_mac_speed))
316ceb69499SGiuseppe CAVALLARO 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
3177ac6653aSJeff Kirsher }
3187ac6653aSJeff Kirsher 
31932ceabcaSGiuseppe CAVALLARO /**
320732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
32132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
322732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
323732fdf0eSGiuseppe CAVALLARO  * EEE.
32432ceabcaSGiuseppe CAVALLARO  */
325d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
326d765955dSGiuseppe CAVALLARO {
327ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
328ce736788SJoao Pinto 	u32 queue;
329ce736788SJoao Pinto 
330ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
331ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
332ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
333ce736788SJoao Pinto 
334ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
335ce736788SJoao Pinto 			return; /* still unfinished work */
336ce736788SJoao Pinto 	}
337ce736788SJoao Pinto 
338d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
339ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
340c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
341b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
342d765955dSGiuseppe CAVALLARO }
343d765955dSGiuseppe CAVALLARO 
34432ceabcaSGiuseppe CAVALLARO /**
345732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
34632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
34732ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
34832ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
34932ceabcaSGiuseppe CAVALLARO  */
350d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
351d765955dSGiuseppe CAVALLARO {
352c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
353d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
354d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
355d765955dSGiuseppe CAVALLARO }
356d765955dSGiuseppe CAVALLARO 
357d765955dSGiuseppe CAVALLARO /**
358732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
359d765955dSGiuseppe CAVALLARO  * @arg : data hook
360d765955dSGiuseppe CAVALLARO  * Description:
36132ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
362d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
363d765955dSGiuseppe CAVALLARO  */
364e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
365d765955dSGiuseppe CAVALLARO {
366e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
367d765955dSGiuseppe CAVALLARO 
368d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
369f5351ef7SGiuseppe CAVALLARO 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
370d765955dSGiuseppe CAVALLARO }
371d765955dSGiuseppe CAVALLARO 
372d765955dSGiuseppe CAVALLARO /**
373732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
37432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
375d765955dSGiuseppe CAVALLARO  * Description:
376732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
377732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
378732fdf0eSGiuseppe CAVALLARO  *  timer.
379d765955dSGiuseppe CAVALLARO  */
380d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
381d765955dSGiuseppe CAVALLARO {
382d6d50c7eSPhilippe Reynes 	struct net_device *ndev = priv->dev;
383879626e3SJerome Brunet 	int interface = priv->plat->interface;
384d765955dSGiuseppe CAVALLARO 	bool ret = false;
385d765955dSGiuseppe CAVALLARO 
386879626e3SJerome Brunet 	if ((interface != PHY_INTERFACE_MODE_MII) &&
387879626e3SJerome Brunet 	    (interface != PHY_INTERFACE_MODE_GMII) &&
388879626e3SJerome Brunet 	    !phy_interface_mode_is_rgmii(interface))
389879626e3SJerome Brunet 		goto out;
390879626e3SJerome Brunet 
391f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
392f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
393f5351ef7SGiuseppe CAVALLARO 	 */
3943fe5cadbSGiuseppe CAVALLARO 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
3953fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
3963fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
397f5351ef7SGiuseppe CAVALLARO 		goto out;
398f5351ef7SGiuseppe CAVALLARO 
399d765955dSGiuseppe CAVALLARO 	/* MAC core supports the EEE feature. */
400d765955dSGiuseppe CAVALLARO 	if (priv->dma_cap.eee) {
40183bf79b6SGiuseppe CAVALLARO 		int tx_lpi_timer = priv->tx_lpi_timer;
402d765955dSGiuseppe CAVALLARO 
40383bf79b6SGiuseppe CAVALLARO 		/* Check if the PHY supports EEE */
404d6d50c7eSPhilippe Reynes 		if (phy_init_eee(ndev->phydev, 1)) {
40583bf79b6SGiuseppe CAVALLARO 			/* To manage at run-time if the EEE cannot be supported
40683bf79b6SGiuseppe CAVALLARO 			 * anymore (for example because the lp caps have been
40783bf79b6SGiuseppe CAVALLARO 			 * changed).
40883bf79b6SGiuseppe CAVALLARO 			 * In that case the driver disable own timers.
40983bf79b6SGiuseppe CAVALLARO 			 */
41029555fa3SThierry Reding 			mutex_lock(&priv->lock);
41183bf79b6SGiuseppe CAVALLARO 			if (priv->eee_active) {
41238ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "disable EEE\n");
41383bf79b6SGiuseppe CAVALLARO 				del_timer_sync(&priv->eee_ctrl_timer);
414c10d4c82SJose Abreu 				stmmac_set_eee_timer(priv, priv->hw, 0,
41583bf79b6SGiuseppe CAVALLARO 						tx_lpi_timer);
41683bf79b6SGiuseppe CAVALLARO 			}
41783bf79b6SGiuseppe CAVALLARO 			priv->eee_active = 0;
41829555fa3SThierry Reding 			mutex_unlock(&priv->lock);
41983bf79b6SGiuseppe CAVALLARO 			goto out;
42083bf79b6SGiuseppe CAVALLARO 		}
42183bf79b6SGiuseppe CAVALLARO 		/* Activate the EEE and start timers */
42229555fa3SThierry Reding 		mutex_lock(&priv->lock);
423f5351ef7SGiuseppe CAVALLARO 		if (!priv->eee_active) {
424d765955dSGiuseppe CAVALLARO 			priv->eee_active = 1;
425e99e88a9SKees Cook 			timer_setup(&priv->eee_ctrl_timer,
426e99e88a9SKees Cook 				    stmmac_eee_ctrl_timer, 0);
427ccb36da1SVaishali Thakkar 			mod_timer(&priv->eee_ctrl_timer,
428ccb36da1SVaishali Thakkar 				  STMMAC_LPI_T(eee_timer));
429d765955dSGiuseppe CAVALLARO 
430c10d4c82SJose Abreu 			stmmac_set_eee_timer(priv, priv->hw,
431c10d4c82SJose Abreu 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
43271965352SGiuseppe CAVALLARO 		}
433f5351ef7SGiuseppe CAVALLARO 		/* Set HW EEE according to the speed */
434c10d4c82SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
435d765955dSGiuseppe CAVALLARO 
436d765955dSGiuseppe CAVALLARO 		ret = true;
43729555fa3SThierry Reding 		mutex_unlock(&priv->lock);
4384741cf9cSGiuseppe CAVALLARO 
43938ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
440d765955dSGiuseppe CAVALLARO 	}
441d765955dSGiuseppe CAVALLARO out:
442d765955dSGiuseppe CAVALLARO 	return ret;
443d765955dSGiuseppe CAVALLARO }
444d765955dSGiuseppe CAVALLARO 
445732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
44632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
447ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
448891434b1SRayagond Kokatanur  * @skb : the socket buffer
449891434b1SRayagond Kokatanur  * Description :
450891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
451891434b1SRayagond Kokatanur  * and also perform some sanity checks.
452891434b1SRayagond Kokatanur  */
453891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
454ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
455891434b1SRayagond Kokatanur {
456891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
457891434b1SRayagond Kokatanur 	u64 ns;
458891434b1SRayagond Kokatanur 
459891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
460891434b1SRayagond Kokatanur 		return;
461891434b1SRayagond Kokatanur 
462ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
46375e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
464891434b1SRayagond Kokatanur 		return;
465891434b1SRayagond Kokatanur 
466891434b1SRayagond Kokatanur 	/* check tx tstamp status */
46742de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
468891434b1SRayagond Kokatanur 		/* get the valid tstamp */
46942de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
470891434b1SRayagond Kokatanur 
471891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
472891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
473ba1ffd74SGiuseppe CAVALLARO 
47433d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
475891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
476891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
477ba1ffd74SGiuseppe CAVALLARO 	}
478891434b1SRayagond Kokatanur 
479891434b1SRayagond Kokatanur 	return;
480891434b1SRayagond Kokatanur }
481891434b1SRayagond Kokatanur 
482732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
48332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
484ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
485ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
486891434b1SRayagond Kokatanur  * @skb : the socket buffer
487891434b1SRayagond Kokatanur  * Description :
488891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
489891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
490891434b1SRayagond Kokatanur  */
491ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
492ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
493891434b1SRayagond Kokatanur {
494891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
49598870943SJose Abreu 	struct dma_desc *desc = p;
496891434b1SRayagond Kokatanur 	u64 ns;
497891434b1SRayagond Kokatanur 
498891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
499891434b1SRayagond Kokatanur 		return;
500ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
501ba1ffd74SGiuseppe CAVALLARO 	if (priv->plat->has_gmac4)
50298870943SJose Abreu 		desc = np;
503891434b1SRayagond Kokatanur 
50498870943SJose Abreu 	/* Check if timestamp is available */
50542de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
50642de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
50733d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
508891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
509891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
510891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
511ba1ffd74SGiuseppe CAVALLARO 	} else  {
51233d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
513ba1ffd74SGiuseppe CAVALLARO 	}
514891434b1SRayagond Kokatanur }
515891434b1SRayagond Kokatanur 
516891434b1SRayagond Kokatanur /**
517891434b1SRayagond Kokatanur  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
518891434b1SRayagond Kokatanur  *  @dev: device pointer.
5198d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
520891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
521891434b1SRayagond Kokatanur  *  Description:
522891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
523891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
524891434b1SRayagond Kokatanur  *  Return Value:
525891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
526891434b1SRayagond Kokatanur  */
527891434b1SRayagond Kokatanur static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
528891434b1SRayagond Kokatanur {
529891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
530891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
5310a624155SArnd Bergmann 	struct timespec64 now;
532891434b1SRayagond Kokatanur 	u64 temp = 0;
533891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
534891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
535891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
536891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
537891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
538891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
539891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
540891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
541891434b1SRayagond Kokatanur 	u32 value = 0;
54219d857c9SPhil Reid 	u32 sec_inc;
543891434b1SRayagond Kokatanur 
544891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
545891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
546891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
547891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
548891434b1SRayagond Kokatanur 
549891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
550891434b1SRayagond Kokatanur 	}
551891434b1SRayagond Kokatanur 
552891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
553891434b1SRayagond Kokatanur 			   sizeof(struct hwtstamp_config)))
554891434b1SRayagond Kokatanur 		return -EFAULT;
555891434b1SRayagond Kokatanur 
55638ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
557891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
558891434b1SRayagond Kokatanur 
559891434b1SRayagond Kokatanur 	/* reserved for future extensions */
560891434b1SRayagond Kokatanur 	if (config.flags)
561891434b1SRayagond Kokatanur 		return -EINVAL;
562891434b1SRayagond Kokatanur 
5635f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5645f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
565891434b1SRayagond Kokatanur 		return -ERANGE;
566891434b1SRayagond Kokatanur 
567891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
568891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
569891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
570ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
571891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
572891434b1SRayagond Kokatanur 			break;
573891434b1SRayagond Kokatanur 
574891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
575ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
576891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
577891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
578fd6720aeSMario Molitor 			if (priv->plat->has_gmac4)
579fd6720aeSMario Molitor 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
580fd6720aeSMario Molitor 			else
581891434b1SRayagond Kokatanur 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582891434b1SRayagond Kokatanur 
583891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585891434b1SRayagond Kokatanur 			break;
586891434b1SRayagond Kokatanur 
587891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
588ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
589891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
590891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
591891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
592891434b1SRayagond Kokatanur 
593891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
594891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
595891434b1SRayagond Kokatanur 			break;
596891434b1SRayagond Kokatanur 
597891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
598ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
599891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
600891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
601891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
602891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
603891434b1SRayagond Kokatanur 
604891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606891434b1SRayagond Kokatanur 			break;
607891434b1SRayagond Kokatanur 
608891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
609ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
610891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
611891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
612891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
613fd6720aeSMario Molitor 			if (priv->plat->has_gmac4)
614fd6720aeSMario Molitor 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
615fd6720aeSMario Molitor 			else
616891434b1SRayagond Kokatanur 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
617891434b1SRayagond Kokatanur 
618891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
619891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
620891434b1SRayagond Kokatanur 			break;
621891434b1SRayagond Kokatanur 
622891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
623ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
624891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
625891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
626891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
627891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
628891434b1SRayagond Kokatanur 
629891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631891434b1SRayagond Kokatanur 			break;
632891434b1SRayagond Kokatanur 
633891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
634ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
635891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
636891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
637891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
638891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
639891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
640891434b1SRayagond Kokatanur 
641891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643891434b1SRayagond Kokatanur 			break;
644891434b1SRayagond Kokatanur 
645891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
646ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
647891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
648891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
649891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
650fd6720aeSMario Molitor 			if (priv->plat->has_gmac4)
651fd6720aeSMario Molitor 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
652fd6720aeSMario Molitor 			else
653891434b1SRayagond Kokatanur 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
654891434b1SRayagond Kokatanur 
655891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
656891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
657891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
658891434b1SRayagond Kokatanur 			break;
659891434b1SRayagond Kokatanur 
660891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
661ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
662891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
663891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
664891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
665891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
666891434b1SRayagond Kokatanur 
667891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
670891434b1SRayagond Kokatanur 			break;
671891434b1SRayagond Kokatanur 
672891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
673ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
674891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
675891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
676891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
677891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
678891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
679891434b1SRayagond Kokatanur 
680891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
681891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
682891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
683891434b1SRayagond Kokatanur 			break;
684891434b1SRayagond Kokatanur 
685e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
686891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
687ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
688891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
689891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
690891434b1SRayagond Kokatanur 			break;
691891434b1SRayagond Kokatanur 
692891434b1SRayagond Kokatanur 		default:
693891434b1SRayagond Kokatanur 			return -ERANGE;
694891434b1SRayagond Kokatanur 		}
695891434b1SRayagond Kokatanur 	} else {
696891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
697891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
698891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
699891434b1SRayagond Kokatanur 			break;
700891434b1SRayagond Kokatanur 		default:
701891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
702891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
703891434b1SRayagond Kokatanur 			break;
704891434b1SRayagond Kokatanur 		}
705891434b1SRayagond Kokatanur 	}
706891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
7075f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
708891434b1SRayagond Kokatanur 
709891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
710cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
711891434b1SRayagond Kokatanur 	else {
712891434b1SRayagond Kokatanur 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
713891434b1SRayagond Kokatanur 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
714891434b1SRayagond Kokatanur 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
715891434b1SRayagond Kokatanur 			 ts_master_en | snap_type_sel);
716cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
717891434b1SRayagond Kokatanur 
718891434b1SRayagond Kokatanur 		/* program Sub Second Increment reg */
719cc4c9001SJose Abreu 		stmmac_config_sub_second_increment(priv,
720f573c0b9Sjpinto 				priv->ptpaddr, priv->plat->clk_ptp_rate,
721cc4c9001SJose Abreu 				priv->plat->has_gmac4, &sec_inc);
72219d857c9SPhil Reid 		temp = div_u64(1000000000ULL, sec_inc);
723891434b1SRayagond Kokatanur 
7249a8a02c9SJose Abreu 		/* Store sub second increment and flags for later use */
7259a8a02c9SJose Abreu 		priv->sub_second_inc = sec_inc;
7269a8a02c9SJose Abreu 		priv->systime_flags = value;
7279a8a02c9SJose Abreu 
728891434b1SRayagond Kokatanur 		/* calculate default added value:
729891434b1SRayagond Kokatanur 		 * formula is :
730891434b1SRayagond Kokatanur 		 * addend = (2^32)/freq_div_ratio;
73119d857c9SPhil Reid 		 * where, freq_div_ratio = 1e9ns/sec_inc
732891434b1SRayagond Kokatanur 		 */
73319d857c9SPhil Reid 		temp = (u64)(temp << 32);
734f573c0b9Sjpinto 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
735cc4c9001SJose Abreu 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
736891434b1SRayagond Kokatanur 
737891434b1SRayagond Kokatanur 		/* initialize system time */
7380a624155SArnd Bergmann 		ktime_get_real_ts64(&now);
7390a624155SArnd Bergmann 
7400a624155SArnd Bergmann 		/* lower 32 bits of tv_sec are safe until y2106 */
741cc4c9001SJose Abreu 		stmmac_init_systime(priv, priv->ptpaddr,
742cc4c9001SJose Abreu 				(u32)now.tv_sec, now.tv_nsec);
743891434b1SRayagond Kokatanur 	}
744891434b1SRayagond Kokatanur 
745891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
746891434b1SRayagond Kokatanur 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
747891434b1SRayagond Kokatanur }
748891434b1SRayagond Kokatanur 
74932ceabcaSGiuseppe CAVALLARO /**
750732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
75132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
752732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
75332ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
754732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
75532ceabcaSGiuseppe CAVALLARO  */
75692ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
757891434b1SRayagond Kokatanur {
75892ba6888SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
75992ba6888SRayagond Kokatanur 		return -EOPNOTSUPP;
76092ba6888SRayagond Kokatanur 
761891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
762be9b3174SGiuseppe CAVALLARO 	/* Check if adv_ts can be enabled for dwmac 4.x core */
763be9b3174SGiuseppe CAVALLARO 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
764be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
765be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
766be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
767891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
7687cd01399SVince Bridgers 
769be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
770be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7717cd01399SVince Bridgers 
772be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
773be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
774be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
775891434b1SRayagond Kokatanur 
776891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
777891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
77892ba6888SRayagond Kokatanur 
779c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
780c30a70d3SGiuseppe CAVALLARO 
781c30a70d3SGiuseppe CAVALLARO 	return 0;
78292ba6888SRayagond Kokatanur }
78392ba6888SRayagond Kokatanur 
78492ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
78592ba6888SRayagond Kokatanur {
786f573c0b9Sjpinto 	if (priv->plat->clk_ptp_ref)
787f573c0b9Sjpinto 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
78892ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
789891434b1SRayagond Kokatanur }
790891434b1SRayagond Kokatanur 
7917ac6653aSJeff Kirsher /**
79229feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
79329feff39SJoao Pinto  *  @priv: driver private structure
79429feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
79529feff39SJoao Pinto  */
79629feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
79729feff39SJoao Pinto {
79829feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
79929feff39SJoao Pinto 
800c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
80129feff39SJoao Pinto 			priv->pause, tx_cnt);
80229feff39SJoao Pinto }
80329feff39SJoao Pinto 
80429feff39SJoao Pinto /**
805732fdf0eSGiuseppe CAVALLARO  * stmmac_adjust_link - adjusts the link parameters
8067ac6653aSJeff Kirsher  * @dev: net device structure
807732fdf0eSGiuseppe CAVALLARO  * Description: this is the helper called by the physical abstraction layer
808732fdf0eSGiuseppe CAVALLARO  * drivers to communicate the phy link status. According the speed and duplex
809732fdf0eSGiuseppe CAVALLARO  * this driver can invoke registered glue-logic as well.
810732fdf0eSGiuseppe CAVALLARO  * It also invoke the eee initialization because it could happen when switch
811732fdf0eSGiuseppe CAVALLARO  * on different networks (that are eee capable).
8127ac6653aSJeff Kirsher  */
8137ac6653aSJeff Kirsher static void stmmac_adjust_link(struct net_device *dev)
8147ac6653aSJeff Kirsher {
8157ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
816d6d50c7eSPhilippe Reynes 	struct phy_device *phydev = dev->phydev;
81799a4cca2SLABBE Corentin 	bool new_state = false;
8187ac6653aSJeff Kirsher 
819662ec2b7SLABBE Corentin 	if (!phydev)
8207ac6653aSJeff Kirsher 		return;
8217ac6653aSJeff Kirsher 
82229555fa3SThierry Reding 	mutex_lock(&priv->lock);
823d765955dSGiuseppe CAVALLARO 
8247ac6653aSJeff Kirsher 	if (phydev->link) {
8257ac6653aSJeff Kirsher 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
8267ac6653aSJeff Kirsher 
8277ac6653aSJeff Kirsher 		/* Now we make sure that we can be in full duplex mode.
8287ac6653aSJeff Kirsher 		 * If not, we operate in half-duplex mode. */
8297ac6653aSJeff Kirsher 		if (phydev->duplex != priv->oldduplex) {
83099a4cca2SLABBE Corentin 			new_state = true;
83150cb16d4SLABBE Corentin 			if (!phydev->duplex)
8327ac6653aSJeff Kirsher 				ctrl &= ~priv->hw->link.duplex;
8337ac6653aSJeff Kirsher 			else
8347ac6653aSJeff Kirsher 				ctrl |= priv->hw->link.duplex;
8357ac6653aSJeff Kirsher 			priv->oldduplex = phydev->duplex;
8367ac6653aSJeff Kirsher 		}
8377ac6653aSJeff Kirsher 		/* Flow Control operation */
8387ac6653aSJeff Kirsher 		if (phydev->pause)
83929feff39SJoao Pinto 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
8407ac6653aSJeff Kirsher 
8417ac6653aSJeff Kirsher 		if (phydev->speed != priv->speed) {
84299a4cca2SLABBE Corentin 			new_state = true;
843ca84dfb9SLABBE Corentin 			ctrl &= ~priv->hw->link.speed_mask;
8447ac6653aSJeff Kirsher 			switch (phydev->speed) {
845afbe17a3SLABBE Corentin 			case SPEED_1000:
846ca84dfb9SLABBE Corentin 				ctrl |= priv->hw->link.speed1000;
8477ac6653aSJeff Kirsher 				break;
848afbe17a3SLABBE Corentin 			case SPEED_100:
849ca84dfb9SLABBE Corentin 				ctrl |= priv->hw->link.speed100;
8509beae261SLABBE Corentin 				break;
851afbe17a3SLABBE Corentin 			case SPEED_10:
852ca84dfb9SLABBE Corentin 				ctrl |= priv->hw->link.speed10;
8537ac6653aSJeff Kirsher 				break;
8547ac6653aSJeff Kirsher 			default:
855b3e51069SLABBE Corentin 				netif_warn(priv, link, priv->dev,
856cba920afSLABBE Corentin 					   "broken speed: %d\n", phydev->speed);
857688495b1SLABBE Corentin 				phydev->speed = SPEED_UNKNOWN;
8587ac6653aSJeff Kirsher 				break;
8597ac6653aSJeff Kirsher 			}
8605db13556SLABBE Corentin 			if (phydev->speed != SPEED_UNKNOWN)
8615db13556SLABBE Corentin 				stmmac_hw_fix_mac_speed(priv);
8627ac6653aSJeff Kirsher 			priv->speed = phydev->speed;
8637ac6653aSJeff Kirsher 		}
8647ac6653aSJeff Kirsher 
8657ac6653aSJeff Kirsher 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
8667ac6653aSJeff Kirsher 
8677ac6653aSJeff Kirsher 		if (!priv->oldlink) {
86899a4cca2SLABBE Corentin 			new_state = true;
8694d869b03SLABBE Corentin 			priv->oldlink = true;
8707ac6653aSJeff Kirsher 		}
8717ac6653aSJeff Kirsher 	} else if (priv->oldlink) {
87299a4cca2SLABBE Corentin 		new_state = true;
8734d869b03SLABBE Corentin 		priv->oldlink = false;
874bd00632cSLABBE Corentin 		priv->speed = SPEED_UNKNOWN;
875bd00632cSLABBE Corentin 		priv->oldduplex = DUPLEX_UNKNOWN;
8767ac6653aSJeff Kirsher 	}
8777ac6653aSJeff Kirsher 
8787ac6653aSJeff Kirsher 	if (new_state && netif_msg_link(priv))
8797ac6653aSJeff Kirsher 		phy_print_status(phydev);
8807ac6653aSJeff Kirsher 
88129555fa3SThierry Reding 	mutex_unlock(&priv->lock);
8824741cf9cSGiuseppe CAVALLARO 
88352f95bbfSGiuseppe CAVALLARO 	if (phydev->is_pseudo_fixed_link)
88452f95bbfSGiuseppe CAVALLARO 		/* Stop PHY layer to call the hook to adjust the link in case
88552f95bbfSGiuseppe CAVALLARO 		 * of a switch is attached to the stmmac driver.
88652f95bbfSGiuseppe CAVALLARO 		 */
88752f95bbfSGiuseppe CAVALLARO 		phydev->irq = PHY_IGNORE_INTERRUPT;
88852f95bbfSGiuseppe CAVALLARO 	else
88952f95bbfSGiuseppe CAVALLARO 		/* At this stage, init the EEE if supported.
89052f95bbfSGiuseppe CAVALLARO 		 * Never called in case of fixed_link.
891f5351ef7SGiuseppe CAVALLARO 		 */
892f5351ef7SGiuseppe CAVALLARO 		priv->eee_enabled = stmmac_eee_init(priv);
8937ac6653aSJeff Kirsher }
8947ac6653aSJeff Kirsher 
89532ceabcaSGiuseppe CAVALLARO /**
896732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
89732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
89832ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
89932ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
90032ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
90132ceabcaSGiuseppe CAVALLARO  */
902e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
903e58bb43fSGiuseppe CAVALLARO {
904e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
905e58bb43fSGiuseppe CAVALLARO 
906e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
9070d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
9080d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
9090d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
9100d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
91138ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
9123fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
9130d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
91438ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
9153fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
916e58bb43fSGiuseppe CAVALLARO 		}
917e58bb43fSGiuseppe CAVALLARO 	}
918e58bb43fSGiuseppe CAVALLARO }
919e58bb43fSGiuseppe CAVALLARO 
9207ac6653aSJeff Kirsher /**
9217ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
9227ac6653aSJeff Kirsher  * @dev: net device structure
9237ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
9247ac6653aSJeff Kirsher  * to the mac driver.
9257ac6653aSJeff Kirsher  *  Return value:
9267ac6653aSJeff Kirsher  *  0 on success
9277ac6653aSJeff Kirsher  */
9287ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
9297ac6653aSJeff Kirsher {
9307ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
9317ac6653aSJeff Kirsher 	struct phy_device *phydev;
932d765955dSGiuseppe CAVALLARO 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
9337ac6653aSJeff Kirsher 	char bus_id[MII_BUS_ID_SIZE];
93479ee1dc3SSrinivas Kandagatla 	int interface = priv->plat->interface;
9359cbadf09SSrinivas Kandagatla 	int max_speed = priv->plat->max_speed;
9364d869b03SLABBE Corentin 	priv->oldlink = false;
937bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
938bd00632cSLABBE Corentin 	priv->oldduplex = DUPLEX_UNKNOWN;
9397ac6653aSJeff Kirsher 
9405790cf3cSMathieu Olivari 	if (priv->plat->phy_node) {
9415790cf3cSMathieu Olivari 		phydev = of_phy_connect(dev, priv->plat->phy_node,
9425790cf3cSMathieu Olivari 					&stmmac_adjust_link, 0, interface);
9435790cf3cSMathieu Olivari 	} else {
944f142af2eSSrinivas Kandagatla 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
945f142af2eSSrinivas Kandagatla 			 priv->plat->bus_id);
946f142af2eSSrinivas Kandagatla 
947d765955dSGiuseppe CAVALLARO 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
9487ac6653aSJeff Kirsher 			 priv->plat->phy_addr);
949de9a2165SLABBE Corentin 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
9505790cf3cSMathieu Olivari 			   phy_id_fmt);
9517ac6653aSJeff Kirsher 
9525790cf3cSMathieu Olivari 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
9535790cf3cSMathieu Olivari 				     interface);
9545790cf3cSMathieu Olivari 	}
9557ac6653aSJeff Kirsher 
956dfc50fcaSAlexey Brodkin 	if (IS_ERR_OR_NULL(phydev)) {
95738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "Could not attach to PHY\n");
958dfc50fcaSAlexey Brodkin 		if (!phydev)
959dfc50fcaSAlexey Brodkin 			return -ENODEV;
960dfc50fcaSAlexey Brodkin 
9617ac6653aSJeff Kirsher 		return PTR_ERR(phydev);
9627ac6653aSJeff Kirsher 	}
9637ac6653aSJeff Kirsher 
96479ee1dc3SSrinivas Kandagatla 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
965c5b9b4e4SSrinivas Kandagatla 	if ((interface == PHY_INTERFACE_MODE_MII) ||
9669cbadf09SSrinivas Kandagatla 	    (interface == PHY_INTERFACE_MODE_RMII) ||
9679cbadf09SSrinivas Kandagatla 		(max_speed < 1000 && max_speed > 0))
968c5b9b4e4SSrinivas Kandagatla 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
969c5b9b4e4SSrinivas Kandagatla 					 SUPPORTED_1000baseT_Full);
97079ee1dc3SSrinivas Kandagatla 
9717ac6653aSJeff Kirsher 	/*
9727ac6653aSJeff Kirsher 	 * Broken HW is sometimes missing the pull-up resistor on the
9737ac6653aSJeff Kirsher 	 * MDIO line, which results in reads to non-existent devices returning
9747ac6653aSJeff Kirsher 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
9757ac6653aSJeff Kirsher 	 * device as well.
9767ac6653aSJeff Kirsher 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
9777ac6653aSJeff Kirsher 	 */
97827732381SMathieu Olivari 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
9797ac6653aSJeff Kirsher 		phy_disconnect(phydev);
9807ac6653aSJeff Kirsher 		return -ENODEV;
9817ac6653aSJeff Kirsher 	}
9828e99fc5fSGiuseppe Cavallaro 
983c51e424dSFlorian Fainelli 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
984c51e424dSFlorian Fainelli 	 * subsequent PHY polling, make sure we force a link transition if
985c51e424dSFlorian Fainelli 	 * we have a UP/DOWN/UP transition
986c51e424dSFlorian Fainelli 	 */
987c51e424dSFlorian Fainelli 	if (phydev->is_pseudo_fixed_link)
988c51e424dSFlorian Fainelli 		phydev->irq = PHY_POLL;
989c51e424dSFlorian Fainelli 
990b05c76a1SLABBE Corentin 	phy_attached_info(phydev);
9917ac6653aSJeff Kirsher 	return 0;
9927ac6653aSJeff Kirsher }
9937ac6653aSJeff Kirsher 
99471fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
995c24602efSGiuseppe CAVALLARO {
99654139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
99771fedb01SJoao Pinto 	void *head_rx;
99854139cf3SJoao Pinto 	u32 queue;
99954139cf3SJoao Pinto 
100054139cf3SJoao Pinto 	/* Display RX rings */
100154139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
100254139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
100354139cf3SJoao Pinto 
100454139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1005d0225e7dSAlexandre TORGUE 
100671fedb01SJoao Pinto 		if (priv->extend_desc)
100754139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
100871fedb01SJoao Pinto 		else
100954139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
101071fedb01SJoao Pinto 
101171fedb01SJoao Pinto 		/* Display RX ring */
101242de047dSJose Abreu 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
10135bacd778SLABBE Corentin 	}
101454139cf3SJoao Pinto }
1015d0225e7dSAlexandre TORGUE 
101671fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
101771fedb01SJoao Pinto {
1018ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
101971fedb01SJoao Pinto 	void *head_tx;
1020ce736788SJoao Pinto 	u32 queue;
1021ce736788SJoao Pinto 
1022ce736788SJoao Pinto 	/* Display TX rings */
1023ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1024ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1025ce736788SJoao Pinto 
1026ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
102771fedb01SJoao Pinto 
102871fedb01SJoao Pinto 		if (priv->extend_desc)
1029ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
103071fedb01SJoao Pinto 		else
1031ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
103271fedb01SJoao Pinto 
103342de047dSJose Abreu 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1034c24602efSGiuseppe CAVALLARO 	}
1035ce736788SJoao Pinto }
1036c24602efSGiuseppe CAVALLARO 
103771fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
103871fedb01SJoao Pinto {
103971fedb01SJoao Pinto 	/* Display RX ring */
104071fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
104171fedb01SJoao Pinto 
104271fedb01SJoao Pinto 	/* Display TX ring */
104371fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
104471fedb01SJoao Pinto }
104571fedb01SJoao Pinto 
1046286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1047286a8372SGiuseppe CAVALLARO {
1048286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1049286a8372SGiuseppe CAVALLARO 
1050286a8372SGiuseppe CAVALLARO 	if (mtu >= BUF_SIZE_4KiB)
1051286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1052286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1053286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1054d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1055286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1056286a8372SGiuseppe CAVALLARO 	else
1057d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1058286a8372SGiuseppe CAVALLARO 
1059286a8372SGiuseppe CAVALLARO 	return ret;
1060286a8372SGiuseppe CAVALLARO }
1061286a8372SGiuseppe CAVALLARO 
106232ceabcaSGiuseppe CAVALLARO /**
106371fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
106432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
106554139cf3SJoao Pinto  * @queue: RX queue index
106671fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
106732ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
106832ceabcaSGiuseppe CAVALLARO  */
106954139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1070c24602efSGiuseppe CAVALLARO {
107154139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
10725bacd778SLABBE Corentin 	int i;
1073c24602efSGiuseppe CAVALLARO 
107471fedb01SJoao Pinto 	/* Clear the RX descriptors */
10755bacd778SLABBE Corentin 	for (i = 0; i < DMA_RX_SIZE; i++)
10765bacd778SLABBE Corentin 		if (priv->extend_desc)
107742de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
10785bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
10795bacd778SLABBE Corentin 					(i == DMA_RX_SIZE - 1));
10805bacd778SLABBE Corentin 		else
108142de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
10825bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
10835bacd778SLABBE Corentin 					(i == DMA_RX_SIZE - 1));
108471fedb01SJoao Pinto }
108571fedb01SJoao Pinto 
108671fedb01SJoao Pinto /**
108771fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
108871fedb01SJoao Pinto  * @priv: driver private structure
1089ce736788SJoao Pinto  * @queue: TX queue index.
109071fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
109171fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
109271fedb01SJoao Pinto  */
1093ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
109471fedb01SJoao Pinto {
1095ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
109671fedb01SJoao Pinto 	int i;
109771fedb01SJoao Pinto 
109871fedb01SJoao Pinto 	/* Clear the TX descriptors */
10995bacd778SLABBE Corentin 	for (i = 0; i < DMA_TX_SIZE; i++)
11005bacd778SLABBE Corentin 		if (priv->extend_desc)
110142de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
110242de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
11035bacd778SLABBE Corentin 		else
110442de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
110542de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1106c24602efSGiuseppe CAVALLARO }
1107c24602efSGiuseppe CAVALLARO 
1108732fdf0eSGiuseppe CAVALLARO /**
110971fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
111071fedb01SJoao Pinto  * @priv: driver private structure
111171fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
111271fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
111371fedb01SJoao Pinto  */
111471fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
111571fedb01SJoao Pinto {
111654139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1117ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
111854139cf3SJoao Pinto 	u32 queue;
111954139cf3SJoao Pinto 
112071fedb01SJoao Pinto 	/* Clear the RX descriptors */
112154139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
112254139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
112371fedb01SJoao Pinto 
112471fedb01SJoao Pinto 	/* Clear the TX descriptors */
1125ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1126ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
112771fedb01SJoao Pinto }
112871fedb01SJoao Pinto 
112971fedb01SJoao Pinto /**
1130732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1131732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1132732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1133732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
113454139cf3SJoao Pinto  * @flags: gfp flag
113554139cf3SJoao Pinto  * @queue: RX queue index
1136732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1137732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1138732fdf0eSGiuseppe CAVALLARO  */
1139c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
114054139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1141c24602efSGiuseppe CAVALLARO {
114254139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1143c24602efSGiuseppe CAVALLARO 	struct sk_buff *skb;
1144c24602efSGiuseppe CAVALLARO 
11454ec49a37SVineet Gupta 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
114656329137SBartlomiej Zolnierkiewicz 	if (!skb) {
114738ddc59dSLABBE Corentin 		netdev_err(priv->dev,
114838ddc59dSLABBE Corentin 			   "%s: Rx init fails; skb is NULL\n", __func__);
114956329137SBartlomiej Zolnierkiewicz 		return -ENOMEM;
1150c24602efSGiuseppe CAVALLARO 	}
115154139cf3SJoao Pinto 	rx_q->rx_skbuff[i] = skb;
115254139cf3SJoao Pinto 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1153c24602efSGiuseppe CAVALLARO 						priv->dma_buf_sz,
1154c24602efSGiuseppe CAVALLARO 						DMA_FROM_DEVICE);
115554139cf3SJoao Pinto 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
115638ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
115756329137SBartlomiej Zolnierkiewicz 		dev_kfree_skb_any(skb);
115856329137SBartlomiej Zolnierkiewicz 		return -EINVAL;
115956329137SBartlomiej Zolnierkiewicz 	}
1160c24602efSGiuseppe CAVALLARO 
11616844171dSJose Abreu 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1162c24602efSGiuseppe CAVALLARO 
11632c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
11642c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1165c24602efSGiuseppe CAVALLARO 
1166c24602efSGiuseppe CAVALLARO 	return 0;
1167c24602efSGiuseppe CAVALLARO }
1168c24602efSGiuseppe CAVALLARO 
116971fedb01SJoao Pinto /**
117071fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
117171fedb01SJoao Pinto  * @priv: private structure
117254139cf3SJoao Pinto  * @queue: RX queue index
117371fedb01SJoao Pinto  * @i: buffer index.
117471fedb01SJoao Pinto  */
117554139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
117656329137SBartlomiej Zolnierkiewicz {
117754139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
117854139cf3SJoao Pinto 
117954139cf3SJoao Pinto 	if (rx_q->rx_skbuff[i]) {
118054139cf3SJoao Pinto 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
118156329137SBartlomiej Zolnierkiewicz 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
118254139cf3SJoao Pinto 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
118356329137SBartlomiej Zolnierkiewicz 	}
118454139cf3SJoao Pinto 	rx_q->rx_skbuff[i] = NULL;
118556329137SBartlomiej Zolnierkiewicz }
118656329137SBartlomiej Zolnierkiewicz 
11877ac6653aSJeff Kirsher /**
118871fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
118971fedb01SJoao Pinto  * @priv: private structure
1190ce736788SJoao Pinto  * @queue: RX queue index
119171fedb01SJoao Pinto  * @i: buffer index.
119271fedb01SJoao Pinto  */
1193ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
119471fedb01SJoao Pinto {
1195ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1196ce736788SJoao Pinto 
1197ce736788SJoao Pinto 	if (tx_q->tx_skbuff_dma[i].buf) {
1198ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
119971fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1200ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1201ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
120271fedb01SJoao Pinto 				       DMA_TO_DEVICE);
120371fedb01SJoao Pinto 		else
120471fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1205ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1206ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
120771fedb01SJoao Pinto 					 DMA_TO_DEVICE);
120871fedb01SJoao Pinto 	}
120971fedb01SJoao Pinto 
1210ce736788SJoao Pinto 	if (tx_q->tx_skbuff[i]) {
1211ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1212ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1213ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1214ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
121571fedb01SJoao Pinto 	}
121671fedb01SJoao Pinto }
121771fedb01SJoao Pinto 
121871fedb01SJoao Pinto /**
121971fedb01SJoao Pinto  * init_dma_rx_desc_rings - init the RX descriptor rings
12207ac6653aSJeff Kirsher  * @dev: net device structure
12215bacd778SLABBE Corentin  * @flags: gfp flag.
122271fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
12235bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1224286a8372SGiuseppe CAVALLARO  * modes.
12257ac6653aSJeff Kirsher  */
122671fedb01SJoao Pinto static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
12277ac6653aSJeff Kirsher {
12287ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
122954139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
12305bacd778SLABBE Corentin 	int ret = -ENOMEM;
12312c520b1cSJose Abreu 	int bfsize = 0;
12321d3028f4SColin Ian King 	int queue;
123354139cf3SJoao Pinto 	int i;
12347ac6653aSJeff Kirsher 
12352c520b1cSJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
12362c520b1cSJose Abreu 	if (bfsize < 0)
12372c520b1cSJose Abreu 		bfsize = 0;
12385bacd778SLABBE Corentin 
12395bacd778SLABBE Corentin 	if (bfsize < BUF_SIZE_16KiB)
12405bacd778SLABBE Corentin 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
12415bacd778SLABBE Corentin 
12425bacd778SLABBE Corentin 	priv->dma_buf_sz = bfsize;
12432618abb7SVince Bridgers 
124454139cf3SJoao Pinto 	/* RX INITIALIZATION */
12455bacd778SLABBE Corentin 	netif_dbg(priv, probe, priv->dev,
12465bacd778SLABBE Corentin 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
12475bacd778SLABBE Corentin 
124854139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
124954139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
125054139cf3SJoao Pinto 
125154139cf3SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
125254139cf3SJoao Pinto 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
125354139cf3SJoao Pinto 			  (u32)rx_q->dma_rx_phy);
125454139cf3SJoao Pinto 
12555bacd778SLABBE Corentin 		for (i = 0; i < DMA_RX_SIZE; i++) {
12565bacd778SLABBE Corentin 			struct dma_desc *p;
12575bacd778SLABBE Corentin 
125854139cf3SJoao Pinto 			if (priv->extend_desc)
125954139cf3SJoao Pinto 				p = &((rx_q->dma_erx + i)->basic);
126054139cf3SJoao Pinto 			else
126154139cf3SJoao Pinto 				p = rx_q->dma_rx + i;
126254139cf3SJoao Pinto 
126354139cf3SJoao Pinto 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
126454139cf3SJoao Pinto 						     queue);
12655bacd778SLABBE Corentin 			if (ret)
12665bacd778SLABBE Corentin 				goto err_init_rx_buffers;
12675bacd778SLABBE Corentin 
12685bacd778SLABBE Corentin 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
126954139cf3SJoao Pinto 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
127054139cf3SJoao Pinto 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
12715bacd778SLABBE Corentin 		}
127254139cf3SJoao Pinto 
127354139cf3SJoao Pinto 		rx_q->cur_rx = 0;
127454139cf3SJoao Pinto 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
127554139cf3SJoao Pinto 
127654139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
12777ac6653aSJeff Kirsher 
1278c24602efSGiuseppe CAVALLARO 		/* Setup the chained descriptor addresses */
1279c24602efSGiuseppe CAVALLARO 		if (priv->mode == STMMAC_CHAIN_MODE) {
128071fedb01SJoao Pinto 			if (priv->extend_desc)
12812c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_erx,
12822c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
128371fedb01SJoao Pinto 			else
12842c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_rx,
12852c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
128671fedb01SJoao Pinto 		}
128754139cf3SJoao Pinto 	}
128854139cf3SJoao Pinto 
128954139cf3SJoao Pinto 	buf_sz = bfsize;
129071fedb01SJoao Pinto 
129171fedb01SJoao Pinto 	return 0;
129254139cf3SJoao Pinto 
129371fedb01SJoao Pinto err_init_rx_buffers:
129454139cf3SJoao Pinto 	while (queue >= 0) {
129571fedb01SJoao Pinto 		while (--i >= 0)
129654139cf3SJoao Pinto 			stmmac_free_rx_buffer(priv, queue, i);
129754139cf3SJoao Pinto 
129854139cf3SJoao Pinto 		if (queue == 0)
129954139cf3SJoao Pinto 			break;
130054139cf3SJoao Pinto 
130154139cf3SJoao Pinto 		i = DMA_RX_SIZE;
130254139cf3SJoao Pinto 		queue--;
130354139cf3SJoao Pinto 	}
130454139cf3SJoao Pinto 
130571fedb01SJoao Pinto 	return ret;
130671fedb01SJoao Pinto }
130771fedb01SJoao Pinto 
130871fedb01SJoao Pinto /**
130971fedb01SJoao Pinto  * init_dma_tx_desc_rings - init the TX descriptor rings
131071fedb01SJoao Pinto  * @dev: net device structure.
131171fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
131271fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
131371fedb01SJoao Pinto  * modes.
131471fedb01SJoao Pinto  */
131571fedb01SJoao Pinto static int init_dma_tx_desc_rings(struct net_device *dev)
131671fedb01SJoao Pinto {
131771fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1318ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1319ce736788SJoao Pinto 	u32 queue;
132071fedb01SJoao Pinto 	int i;
132171fedb01SJoao Pinto 
1322ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1323ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1324ce736788SJoao Pinto 
132571fedb01SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
1326ce736788SJoao Pinto 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1327ce736788SJoao Pinto 			 (u32)tx_q->dma_tx_phy);
132871fedb01SJoao Pinto 
132971fedb01SJoao Pinto 		/* Setup the chained descriptor addresses */
133071fedb01SJoao Pinto 		if (priv->mode == STMMAC_CHAIN_MODE) {
133171fedb01SJoao Pinto 			if (priv->extend_desc)
13322c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_etx,
13332c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
133471fedb01SJoao Pinto 			else
13352c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_tx,
13362c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1337c24602efSGiuseppe CAVALLARO 		}
1338286a8372SGiuseppe CAVALLARO 
1339e3ad57c9SGiuseppe Cavallaro 		for (i = 0; i < DMA_TX_SIZE; i++) {
1340c24602efSGiuseppe CAVALLARO 			struct dma_desc *p;
1341c24602efSGiuseppe CAVALLARO 			if (priv->extend_desc)
1342ce736788SJoao Pinto 				p = &((tx_q->dma_etx + i)->basic);
1343c24602efSGiuseppe CAVALLARO 			else
1344ce736788SJoao Pinto 				p = tx_q->dma_tx + i;
1345f748be53SAlexandre TORGUE 
134644c67f85SJose Abreu 			stmmac_clear_desc(priv, p);
1347f748be53SAlexandre TORGUE 
1348ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].buf = 0;
1349ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1350ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].len = 0;
1351ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].last_segment = false;
1352ce736788SJoao Pinto 			tx_q->tx_skbuff[i] = NULL;
13534a7d666aSGiuseppe CAVALLARO 		}
1354c24602efSGiuseppe CAVALLARO 
1355ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
1356ce736788SJoao Pinto 		tx_q->cur_tx = 0;
13578d212a9eSNiklas Cassel 		tx_q->mss = 0;
1358ce736788SJoao Pinto 
1359c22a3f48SJoao Pinto 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1360c22a3f48SJoao Pinto 	}
13617ac6653aSJeff Kirsher 
136271fedb01SJoao Pinto 	return 0;
136371fedb01SJoao Pinto }
136471fedb01SJoao Pinto 
136571fedb01SJoao Pinto /**
136671fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
136771fedb01SJoao Pinto  * @dev: net device structure
136871fedb01SJoao Pinto  * @flags: gfp flag.
136971fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
137071fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
137171fedb01SJoao Pinto  * modes.
137271fedb01SJoao Pinto  */
137371fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
137471fedb01SJoao Pinto {
137571fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
137671fedb01SJoao Pinto 	int ret;
137771fedb01SJoao Pinto 
137871fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
137971fedb01SJoao Pinto 	if (ret)
138071fedb01SJoao Pinto 		return ret;
138171fedb01SJoao Pinto 
138271fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
138371fedb01SJoao Pinto 
13845bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
13857ac6653aSJeff Kirsher 
1386c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1387c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
138856329137SBartlomiej Zolnierkiewicz 
138956329137SBartlomiej Zolnierkiewicz 	return ret;
13907ac6653aSJeff Kirsher }
13917ac6653aSJeff Kirsher 
139271fedb01SJoao Pinto /**
139371fedb01SJoao Pinto  * dma_free_rx_skbufs - free RX dma buffers
139471fedb01SJoao Pinto  * @priv: private structure
139554139cf3SJoao Pinto  * @queue: RX queue index
139671fedb01SJoao Pinto  */
139754139cf3SJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
13987ac6653aSJeff Kirsher {
13997ac6653aSJeff Kirsher 	int i;
14007ac6653aSJeff Kirsher 
1401e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_RX_SIZE; i++)
140254139cf3SJoao Pinto 		stmmac_free_rx_buffer(priv, queue, i);
14037ac6653aSJeff Kirsher }
14047ac6653aSJeff Kirsher 
140571fedb01SJoao Pinto /**
140671fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
140771fedb01SJoao Pinto  * @priv: private structure
1408ce736788SJoao Pinto  * @queue: TX queue index
140971fedb01SJoao Pinto  */
1410ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
14117ac6653aSJeff Kirsher {
14127ac6653aSJeff Kirsher 	int i;
14137ac6653aSJeff Kirsher 
141471fedb01SJoao Pinto 	for (i = 0; i < DMA_TX_SIZE; i++)
1415ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
14167ac6653aSJeff Kirsher }
14177ac6653aSJeff Kirsher 
1418732fdf0eSGiuseppe CAVALLARO /**
141954139cf3SJoao Pinto  * free_dma_rx_desc_resources - free RX dma desc resources
142054139cf3SJoao Pinto  * @priv: private structure
142154139cf3SJoao Pinto  */
142254139cf3SJoao Pinto static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
142354139cf3SJoao Pinto {
142454139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
142554139cf3SJoao Pinto 	u32 queue;
142654139cf3SJoao Pinto 
142754139cf3SJoao Pinto 	/* Free RX queue resources */
142854139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
142954139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
143054139cf3SJoao Pinto 
143154139cf3SJoao Pinto 		/* Release the DMA RX socket buffers */
143254139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
143354139cf3SJoao Pinto 
143454139cf3SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
143554139cf3SJoao Pinto 		if (!priv->extend_desc)
143654139cf3SJoao Pinto 			dma_free_coherent(priv->device,
143754139cf3SJoao Pinto 					  DMA_RX_SIZE * sizeof(struct dma_desc),
143854139cf3SJoao Pinto 					  rx_q->dma_rx, rx_q->dma_rx_phy);
143954139cf3SJoao Pinto 		else
144054139cf3SJoao Pinto 			dma_free_coherent(priv->device, DMA_RX_SIZE *
144154139cf3SJoao Pinto 					  sizeof(struct dma_extended_desc),
144254139cf3SJoao Pinto 					  rx_q->dma_erx, rx_q->dma_rx_phy);
144354139cf3SJoao Pinto 
144454139cf3SJoao Pinto 		kfree(rx_q->rx_skbuff_dma);
144554139cf3SJoao Pinto 		kfree(rx_q->rx_skbuff);
144654139cf3SJoao Pinto 	}
144754139cf3SJoao Pinto }
144854139cf3SJoao Pinto 
144954139cf3SJoao Pinto /**
1450ce736788SJoao Pinto  * free_dma_tx_desc_resources - free TX dma desc resources
1451ce736788SJoao Pinto  * @priv: private structure
1452ce736788SJoao Pinto  */
1453ce736788SJoao Pinto static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1454ce736788SJoao Pinto {
1455ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
145662242260SChristophe Jaillet 	u32 queue;
1457ce736788SJoao Pinto 
1458ce736788SJoao Pinto 	/* Free TX queue resources */
1459ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1460ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1461ce736788SJoao Pinto 
1462ce736788SJoao Pinto 		/* Release the DMA TX socket buffers */
1463ce736788SJoao Pinto 		dma_free_tx_skbufs(priv, queue);
1464ce736788SJoao Pinto 
1465ce736788SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
1466ce736788SJoao Pinto 		if (!priv->extend_desc)
1467ce736788SJoao Pinto 			dma_free_coherent(priv->device,
1468ce736788SJoao Pinto 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1469ce736788SJoao Pinto 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1470ce736788SJoao Pinto 		else
1471ce736788SJoao Pinto 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1472ce736788SJoao Pinto 					  sizeof(struct dma_extended_desc),
1473ce736788SJoao Pinto 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1474ce736788SJoao Pinto 
1475ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff_dma);
1476ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff);
1477ce736788SJoao Pinto 	}
1478ce736788SJoao Pinto }
1479ce736788SJoao Pinto 
1480ce736788SJoao Pinto /**
148171fedb01SJoao Pinto  * alloc_dma_rx_desc_resources - alloc RX resources.
1482732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1483732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1484732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1485732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1486732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1487732fdf0eSGiuseppe CAVALLARO  */
148871fedb01SJoao Pinto static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
148909f8d696SSrinivas Kandagatla {
149054139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
14915bacd778SLABBE Corentin 	int ret = -ENOMEM;
149254139cf3SJoao Pinto 	u32 queue;
149309f8d696SSrinivas Kandagatla 
149454139cf3SJoao Pinto 	/* RX queues buffers and DMA */
149554139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
149654139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
149754139cf3SJoao Pinto 
149854139cf3SJoao Pinto 		rx_q->queue_index = queue;
149954139cf3SJoao Pinto 		rx_q->priv_data = priv;
150054139cf3SJoao Pinto 
150154139cf3SJoao Pinto 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
150254139cf3SJoao Pinto 						    sizeof(dma_addr_t),
15035bacd778SLABBE Corentin 						    GFP_KERNEL);
150454139cf3SJoao Pinto 		if (!rx_q->rx_skbuff_dma)
150563c3aa6bSChristophe Jaillet 			goto err_dma;
15065bacd778SLABBE Corentin 
150754139cf3SJoao Pinto 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
150854139cf3SJoao Pinto 						sizeof(struct sk_buff *),
15095bacd778SLABBE Corentin 						GFP_KERNEL);
151054139cf3SJoao Pinto 		if (!rx_q->rx_skbuff)
151154139cf3SJoao Pinto 			goto err_dma;
15125bacd778SLABBE Corentin 
15135bacd778SLABBE Corentin 		if (priv->extend_desc) {
151454139cf3SJoao Pinto 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
151554139cf3SJoao Pinto 							    DMA_RX_SIZE *
15165bacd778SLABBE Corentin 							    sizeof(struct
15175bacd778SLABBE Corentin 							    dma_extended_desc),
151854139cf3SJoao Pinto 							    &rx_q->dma_rx_phy,
15195bacd778SLABBE Corentin 							    GFP_KERNEL);
152054139cf3SJoao Pinto 			if (!rx_q->dma_erx)
15215bacd778SLABBE Corentin 				goto err_dma;
15225bacd778SLABBE Corentin 
152371fedb01SJoao Pinto 		} else {
152454139cf3SJoao Pinto 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
152554139cf3SJoao Pinto 							   DMA_RX_SIZE *
152654139cf3SJoao Pinto 							   sizeof(struct
152754139cf3SJoao Pinto 							   dma_desc),
152854139cf3SJoao Pinto 							   &rx_q->dma_rx_phy,
152971fedb01SJoao Pinto 							   GFP_KERNEL);
153054139cf3SJoao Pinto 			if (!rx_q->dma_rx)
153171fedb01SJoao Pinto 				goto err_dma;
153271fedb01SJoao Pinto 		}
153354139cf3SJoao Pinto 	}
153471fedb01SJoao Pinto 
153571fedb01SJoao Pinto 	return 0;
153671fedb01SJoao Pinto 
153771fedb01SJoao Pinto err_dma:
153854139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
153954139cf3SJoao Pinto 
154071fedb01SJoao Pinto 	return ret;
154171fedb01SJoao Pinto }
154271fedb01SJoao Pinto 
154371fedb01SJoao Pinto /**
154471fedb01SJoao Pinto  * alloc_dma_tx_desc_resources - alloc TX resources.
154571fedb01SJoao Pinto  * @priv: private structure
154671fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
154771fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
154871fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
154971fedb01SJoao Pinto  * allow zero-copy mechanism.
155071fedb01SJoao Pinto  */
155171fedb01SJoao Pinto static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
155271fedb01SJoao Pinto {
1553ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
155471fedb01SJoao Pinto 	int ret = -ENOMEM;
1555ce736788SJoao Pinto 	u32 queue;
155671fedb01SJoao Pinto 
1557ce736788SJoao Pinto 	/* TX queues buffers and DMA */
1558ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1559ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1560ce736788SJoao Pinto 
1561ce736788SJoao Pinto 		tx_q->queue_index = queue;
1562ce736788SJoao Pinto 		tx_q->priv_data = priv;
1563ce736788SJoao Pinto 
1564ce736788SJoao Pinto 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1565ce736788SJoao Pinto 						    sizeof(*tx_q->tx_skbuff_dma),
156671fedb01SJoao Pinto 						    GFP_KERNEL);
1567ce736788SJoao Pinto 		if (!tx_q->tx_skbuff_dma)
156862242260SChristophe Jaillet 			goto err_dma;
156971fedb01SJoao Pinto 
1570ce736788SJoao Pinto 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1571ce736788SJoao Pinto 						sizeof(struct sk_buff *),
157271fedb01SJoao Pinto 						GFP_KERNEL);
1573ce736788SJoao Pinto 		if (!tx_q->tx_skbuff)
157462242260SChristophe Jaillet 			goto err_dma;
157571fedb01SJoao Pinto 
157671fedb01SJoao Pinto 		if (priv->extend_desc) {
1577ce736788SJoao Pinto 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1578ce736788SJoao Pinto 							    DMA_TX_SIZE *
15795bacd778SLABBE Corentin 							    sizeof(struct
15805bacd778SLABBE Corentin 							    dma_extended_desc),
1581ce736788SJoao Pinto 							    &tx_q->dma_tx_phy,
15825bacd778SLABBE Corentin 							    GFP_KERNEL);
1583ce736788SJoao Pinto 			if (!tx_q->dma_etx)
158462242260SChristophe Jaillet 				goto err_dma;
15855bacd778SLABBE Corentin 		} else {
1586ce736788SJoao Pinto 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1587ce736788SJoao Pinto 							   DMA_TX_SIZE *
1588ce736788SJoao Pinto 							   sizeof(struct
1589ce736788SJoao Pinto 								  dma_desc),
1590ce736788SJoao Pinto 							   &tx_q->dma_tx_phy,
15915bacd778SLABBE Corentin 							   GFP_KERNEL);
1592ce736788SJoao Pinto 			if (!tx_q->dma_tx)
159362242260SChristophe Jaillet 				goto err_dma;
1594ce736788SJoao Pinto 		}
15955bacd778SLABBE Corentin 	}
15965bacd778SLABBE Corentin 
15975bacd778SLABBE Corentin 	return 0;
15985bacd778SLABBE Corentin 
159962242260SChristophe Jaillet err_dma:
1600ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
1601ce736788SJoao Pinto 
160209f8d696SSrinivas Kandagatla 	return ret;
16035bacd778SLABBE Corentin }
160409f8d696SSrinivas Kandagatla 
160571fedb01SJoao Pinto /**
160671fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
160771fedb01SJoao Pinto  * @priv: private structure
160871fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
160971fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
161071fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
161171fedb01SJoao Pinto  * allow zero-copy mechanism.
161271fedb01SJoao Pinto  */
161371fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
16145bacd778SLABBE Corentin {
161554139cf3SJoao Pinto 	/* RX Allocation */
161671fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
161771fedb01SJoao Pinto 
161871fedb01SJoao Pinto 	if (ret)
161971fedb01SJoao Pinto 		return ret;
162071fedb01SJoao Pinto 
162171fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
162271fedb01SJoao Pinto 
162371fedb01SJoao Pinto 	return ret;
162471fedb01SJoao Pinto }
162571fedb01SJoao Pinto 
162671fedb01SJoao Pinto /**
162771fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
162871fedb01SJoao Pinto  * @priv: private structure
162971fedb01SJoao Pinto  */
163071fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
163171fedb01SJoao Pinto {
163271fedb01SJoao Pinto 	/* Release the DMA RX socket buffers */
163371fedb01SJoao Pinto 	free_dma_rx_desc_resources(priv);
163471fedb01SJoao Pinto 
163571fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
163671fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
163771fedb01SJoao Pinto }
163871fedb01SJoao Pinto 
163971fedb01SJoao Pinto /**
16409eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
16419eb12474Sjpinto  *  @priv: driver private structure
16429eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
16439eb12474Sjpinto  */
16449eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
16459eb12474Sjpinto {
16464f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
16474f6046f5SJoao Pinto 	int queue;
16484f6046f5SJoao Pinto 	u8 mode;
16499eb12474Sjpinto 
16504f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
16514f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1652c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
16534f6046f5SJoao Pinto 	}
16549eb12474Sjpinto }
16559eb12474Sjpinto 
16569eb12474Sjpinto /**
1657ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
1658ae4f0d46SJoao Pinto  * @priv: driver private structure
1659ae4f0d46SJoao Pinto  * @chan: RX channel index
1660ae4f0d46SJoao Pinto  * Description:
1661ae4f0d46SJoao Pinto  * This starts a RX DMA channel
1662ae4f0d46SJoao Pinto  */
1663ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1664ae4f0d46SJoao Pinto {
1665ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1666a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
1667ae4f0d46SJoao Pinto }
1668ae4f0d46SJoao Pinto 
1669ae4f0d46SJoao Pinto /**
1670ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
1671ae4f0d46SJoao Pinto  * @priv: driver private structure
1672ae4f0d46SJoao Pinto  * @chan: TX channel index
1673ae4f0d46SJoao Pinto  * Description:
1674ae4f0d46SJoao Pinto  * This starts a TX DMA channel
1675ae4f0d46SJoao Pinto  */
1676ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1677ae4f0d46SJoao Pinto {
1678ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1679a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
1680ae4f0d46SJoao Pinto }
1681ae4f0d46SJoao Pinto 
1682ae4f0d46SJoao Pinto /**
1683ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
1684ae4f0d46SJoao Pinto  * @priv: driver private structure
1685ae4f0d46SJoao Pinto  * @chan: RX channel index
1686ae4f0d46SJoao Pinto  * Description:
1687ae4f0d46SJoao Pinto  * This stops a RX DMA channel
1688ae4f0d46SJoao Pinto  */
1689ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1690ae4f0d46SJoao Pinto {
1691ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1692a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1693ae4f0d46SJoao Pinto }
1694ae4f0d46SJoao Pinto 
1695ae4f0d46SJoao Pinto /**
1696ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
1697ae4f0d46SJoao Pinto  * @priv: driver private structure
1698ae4f0d46SJoao Pinto  * @chan: TX channel index
1699ae4f0d46SJoao Pinto  * Description:
1700ae4f0d46SJoao Pinto  * This stops a TX DMA channel
1701ae4f0d46SJoao Pinto  */
1702ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1703ae4f0d46SJoao Pinto {
1704ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1705a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1706ae4f0d46SJoao Pinto }
1707ae4f0d46SJoao Pinto 
1708ae4f0d46SJoao Pinto /**
1709ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
1710ae4f0d46SJoao Pinto  * @priv: driver private structure
1711ae4f0d46SJoao Pinto  * Description:
1712ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
1713ae4f0d46SJoao Pinto  */
1714ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
1715ae4f0d46SJoao Pinto {
1716ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1717ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1718ae4f0d46SJoao Pinto 	u32 chan = 0;
1719ae4f0d46SJoao Pinto 
1720ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1721ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
1722ae4f0d46SJoao Pinto 
1723ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1724ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
1725ae4f0d46SJoao Pinto }
1726ae4f0d46SJoao Pinto 
1727ae4f0d46SJoao Pinto /**
1728ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1729ae4f0d46SJoao Pinto  * @priv: driver private structure
1730ae4f0d46SJoao Pinto  * Description:
1731ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
1732ae4f0d46SJoao Pinto  */
1733ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1734ae4f0d46SJoao Pinto {
1735ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1736ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1737ae4f0d46SJoao Pinto 	u32 chan = 0;
1738ae4f0d46SJoao Pinto 
1739ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1740ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
1741ae4f0d46SJoao Pinto 
1742ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1743ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
1744ae4f0d46SJoao Pinto }
1745ae4f0d46SJoao Pinto 
1746ae4f0d46SJoao Pinto /**
17477ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
174832ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
1749732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
1750732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
17517ac6653aSJeff Kirsher  */
17527ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
17537ac6653aSJeff Kirsher {
17546deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
17556deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1756f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
175752a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
17586deee222SJoao Pinto 	u32 txmode = 0;
17596deee222SJoao Pinto 	u32 rxmode = 0;
17606deee222SJoao Pinto 	u32 chan = 0;
1761a0daae13SJose Abreu 	u8 qmode = 0;
1762f88203a2SVince Bridgers 
176311fbf811SThierry Reding 	if (rxfifosz == 0)
176411fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
176552a76235SJose Abreu 	if (txfifosz == 0)
176652a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
176752a76235SJose Abreu 
176852a76235SJose Abreu 	/* Adjust for real per queue fifo size */
176952a76235SJose Abreu 	rxfifosz /= rx_channels_count;
177052a76235SJose Abreu 	txfifosz /= tx_channels_count;
177111fbf811SThierry Reding 
17726deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
17736deee222SJoao Pinto 		txmode = tc;
17746deee222SJoao Pinto 		rxmode = tc;
17756deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
17767ac6653aSJeff Kirsher 		/*
17777ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
17787ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
17797ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
17807ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
17817ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
17827ac6653aSJeff Kirsher 		 */
17836deee222SJoao Pinto 		txmode = SF_DMA_MODE;
17846deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
1785b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
17866deee222SJoao Pinto 	} else {
17876deee222SJoao Pinto 		txmode = tc;
17886deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
17896deee222SJoao Pinto 	}
17906deee222SJoao Pinto 
17916deee222SJoao Pinto 	/* configure all channels */
1792a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
1793a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
17946deee222SJoao Pinto 
1795a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1796a0daae13SJose Abreu 				rxfifosz, qmode);
1797a0daae13SJose Abreu 	}
1798a0daae13SJose Abreu 
1799a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
1800a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1801a0daae13SJose Abreu 
1802a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1803a0daae13SJose Abreu 				txfifosz, qmode);
1804a0daae13SJose Abreu 	}
18057ac6653aSJeff Kirsher }
18067ac6653aSJeff Kirsher 
18077ac6653aSJeff Kirsher /**
1808732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
180932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1810ce736788SJoao Pinto  * @queue: TX queue index
1811732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
18127ac6653aSJeff Kirsher  */
1813ce736788SJoao Pinto static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
18147ac6653aSJeff Kirsher {
1815ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
181638979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
18178d5f4b07SBernd Edlinger 	unsigned int entry;
18187ac6653aSJeff Kirsher 
1819739c8e14SLino Sanfilippo 	netif_tx_lock(priv->dev);
1820a9097a96SGiuseppe CAVALLARO 
18219125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
18229125cdd1SGiuseppe CAVALLARO 
18238d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
1824ce736788SJoao Pinto 	while (entry != tx_q->cur_tx) {
1825ce736788SJoao Pinto 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1826c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1827c363b658SFabrice Gasnier 		int status;
1828c24602efSGiuseppe CAVALLARO 
1829c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1830ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1831c24602efSGiuseppe CAVALLARO 		else
1832ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
18337ac6653aSJeff Kirsher 
183442de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
183542de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
1836c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
1837c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
1838c363b658SFabrice Gasnier 			break;
1839c363b658SFabrice Gasnier 
1840a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
1841a6b25da5SNiklas Cassel 		 * the own bit.
1842a6b25da5SNiklas Cassel 		 */
1843a6b25da5SNiklas Cassel 		dma_rmb();
1844a6b25da5SNiklas Cassel 
1845c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
1846c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
1847c363b658SFabrice Gasnier 			/* ... verify the status error condition */
1848c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
1849c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
1850c363b658SFabrice Gasnier 			} else {
18517ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
18527ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
1853c363b658SFabrice Gasnier 			}
1854ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_tx_hwtstamp(priv, p, skb);
18557ac6653aSJeff Kirsher 		}
18567ac6653aSJeff Kirsher 
1857ce736788SJoao Pinto 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1858ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1859362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
1860ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
1861ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
18627ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
1863362b37beSGiuseppe CAVALLARO 			else
1864362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
1865ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
1866ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
1867362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
1868ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
1869ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
1870ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1871cf32deecSRayagond Kokatanur 		}
1872f748be53SAlexandre TORGUE 
18732c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
1874f748be53SAlexandre TORGUE 
1875ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1876ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
18777ac6653aSJeff Kirsher 
18787ac6653aSJeff Kirsher 		if (likely(skb != NULL)) {
187938979574SBeniamino Galvani 			pkts_compl++;
188038979574SBeniamino Galvani 			bytes_compl += skb->len;
18817c565c33SEric W. Biederman 			dev_consume_skb_any(skb);
1882ce736788SJoao Pinto 			tx_q->tx_skbuff[entry] = NULL;
18837ac6653aSJeff Kirsher 		}
18847ac6653aSJeff Kirsher 
188542de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
18867ac6653aSJeff Kirsher 
1887e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
18887ac6653aSJeff Kirsher 	}
1889ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
189038979574SBeniamino Galvani 
1891c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1892c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
189338979574SBeniamino Galvani 
1894c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1895c22a3f48SJoao Pinto 								queue))) &&
1896c22a3f48SJoao Pinto 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1897c22a3f48SJoao Pinto 
1898b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
1899b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
1900c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
19017ac6653aSJeff Kirsher 	}
1902d765955dSGiuseppe CAVALLARO 
1903d765955dSGiuseppe CAVALLARO 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1904d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
1905f5351ef7SGiuseppe CAVALLARO 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1906d765955dSGiuseppe CAVALLARO 	}
1907739c8e14SLino Sanfilippo 	netif_tx_unlock(priv->dev);
19087ac6653aSJeff Kirsher }
19097ac6653aSJeff Kirsher 
19107ac6653aSJeff Kirsher /**
1911732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
191232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
19135bacd778SLABBE Corentin  * @chan: channel index
19147ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
1915732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
19167ac6653aSJeff Kirsher  */
19175bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
19187ac6653aSJeff Kirsher {
1919ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1920c24602efSGiuseppe CAVALLARO 	int i;
1921ce736788SJoao Pinto 
1922c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
19237ac6653aSJeff Kirsher 
1924ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
1925ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
1926e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_TX_SIZE; i++)
1927c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
192842de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
192942de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1930c24602efSGiuseppe CAVALLARO 		else
193142de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
193242de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1933ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
1934ce736788SJoao Pinto 	tx_q->cur_tx = 0;
19358d212a9eSNiklas Cassel 	tx_q->mss = 0;
1936c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1937ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
19387ac6653aSJeff Kirsher 
19397ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
1940c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
19417ac6653aSJeff Kirsher }
19427ac6653aSJeff Kirsher 
194332ceabcaSGiuseppe CAVALLARO /**
19446deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
19456deee222SJoao Pinto  *  @priv: driver private structure
19466deee222SJoao Pinto  *  @txmode: TX operating mode
19476deee222SJoao Pinto  *  @rxmode: RX operating mode
19486deee222SJoao Pinto  *  @chan: channel index
19496deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
19506deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
19516deee222SJoao Pinto  *  mode.
19526deee222SJoao Pinto  */
19536deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
19546deee222SJoao Pinto 					  u32 rxmode, u32 chan)
19556deee222SJoao Pinto {
1956a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1957a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
195852a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
195952a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
19606deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
196152a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
19626deee222SJoao Pinto 
19636deee222SJoao Pinto 	if (rxfifosz == 0)
19646deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
196552a76235SJose Abreu 	if (txfifosz == 0)
196652a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
196752a76235SJose Abreu 
196852a76235SJose Abreu 	/* Adjust for real per queue fifo size */
196952a76235SJose Abreu 	rxfifosz /= rx_channels_count;
197052a76235SJose Abreu 	txfifosz /= tx_channels_count;
19716deee222SJoao Pinto 
1972ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
1973ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
19746deee222SJoao Pinto }
19756deee222SJoao Pinto 
19768bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
19778bf993a5SJose Abreu {
197863a550fcSJose Abreu 	int ret;
19798bf993a5SJose Abreu 
1980c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
19818bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
1982c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
19838bf993a5SJose Abreu 		stmmac_global_err(priv);
1984c10d4c82SJose Abreu 		return true;
1985c10d4c82SJose Abreu 	}
1986c10d4c82SJose Abreu 
1987c10d4c82SJose Abreu 	return false;
19888bf993a5SJose Abreu }
19898bf993a5SJose Abreu 
19906deee222SJoao Pinto /**
1991732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
199232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
199332ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
1994732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
1995732fdf0eSGiuseppe CAVALLARO  * work can be done.
199632ceabcaSGiuseppe CAVALLARO  */
19977ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
19987ac6653aSJeff Kirsher {
1999d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
20005a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
20015a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
20025a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2003d62a107aSJoao Pinto 	u32 chan;
20045a6a0445SNiklas Cassel 	bool poll_scheduled = false;
20058ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
20068ac60ffbSKees Cook 
20078ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
20088ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
20098ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
201068e5cfafSJoao Pinto 
20115a6a0445SNiklas Cassel 	/* Each DMA channel can be used for rx and tx simultaneously, yet
20125a6a0445SNiklas Cassel 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
20135a6a0445SNiklas Cassel 	 * stmmac_channel struct.
20145a6a0445SNiklas Cassel 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
20155a6a0445SNiklas Cassel 	 * all tx queues rather than just a single tx queue.
20165a6a0445SNiklas Cassel 	 */
20175a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
2018a4e887faSJose Abreu 		status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2019a4e887faSJose Abreu 				&priv->xstats, chan);
20205a6a0445SNiklas Cassel 
20215a6a0445SNiklas Cassel 	for (chan = 0; chan < rx_channel_count; chan++) {
20225a6a0445SNiklas Cassel 		if (likely(status[chan] & handle_rx)) {
2023c22a3f48SJoao Pinto 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2024c22a3f48SJoao Pinto 
20255a6a0445SNiklas Cassel 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2026a4e887faSJose Abreu 				stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
20275a6a0445SNiklas Cassel 				__napi_schedule(&rx_q->napi);
20285a6a0445SNiklas Cassel 				poll_scheduled = true;
20295a6a0445SNiklas Cassel 			}
20305a6a0445SNiklas Cassel 		}
20315a6a0445SNiklas Cassel 	}
20325a6a0445SNiklas Cassel 
20335a6a0445SNiklas Cassel 	/* If we scheduled poll, we already know that tx queues will be checked.
20345a6a0445SNiklas Cassel 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
20355a6a0445SNiklas Cassel 	 * completed transmission, if so, call stmmac_poll (once).
20365a6a0445SNiklas Cassel 	 */
20375a6a0445SNiklas Cassel 	if (!poll_scheduled) {
20385a6a0445SNiklas Cassel 		for (chan = 0; chan < tx_channel_count; chan++) {
20395a6a0445SNiklas Cassel 			if (status[chan] & handle_tx) {
20405a6a0445SNiklas Cassel 				/* It doesn't matter what rx queue we choose
20415a6a0445SNiklas Cassel 				 * here. We use 0 since it always exists.
20425a6a0445SNiklas Cassel 				 */
20435a6a0445SNiklas Cassel 				struct stmmac_rx_queue *rx_q =
20445a6a0445SNiklas Cassel 					&priv->rx_queue[0];
20455a6a0445SNiklas Cassel 
2046c22a3f48SJoao Pinto 				if (likely(napi_schedule_prep(&rx_q->napi))) {
2047a4e887faSJose Abreu 					stmmac_disable_dma_irq(priv,
2048a4e887faSJose Abreu 							priv->ioaddr, chan);
2049c22a3f48SJoao Pinto 					__napi_schedule(&rx_q->napi);
20509125cdd1SGiuseppe CAVALLARO 				}
20515a6a0445SNiklas Cassel 				break;
20525a6a0445SNiklas Cassel 			}
20535a6a0445SNiklas Cassel 		}
20549125cdd1SGiuseppe CAVALLARO 	}
2055d62a107aSJoao Pinto 
20565a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
20575a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
20587ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
2059b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2060b2dec116SSonic Zhang 			    (tc <= 256)) {
20617ac6653aSJeff Kirsher 				tc += 64;
2062c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
2063d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2064d62a107aSJoao Pinto 								      tc,
2065d62a107aSJoao Pinto 								      tc,
2066d62a107aSJoao Pinto 								      chan);
2067c405abe2SSonic Zhang 				else
2068d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2069d62a107aSJoao Pinto 								    tc,
2070d62a107aSJoao Pinto 								    SF_DMA_MODE,
2071d62a107aSJoao Pinto 								    chan);
20727ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
20737ac6653aSJeff Kirsher 			}
20745a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
20754e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
20767ac6653aSJeff Kirsher 		}
2077d62a107aSJoao Pinto 	}
2078d62a107aSJoao Pinto }
20797ac6653aSJeff Kirsher 
208032ceabcaSGiuseppe CAVALLARO /**
208132ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
208232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
208332ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
208432ceabcaSGiuseppe CAVALLARO  */
20851c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
20861c901a46SGiuseppe CAVALLARO {
20871c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
20881c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
20891c901a46SGiuseppe CAVALLARO 
209036ff7c1eSAlexandre TORGUE 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
20914f795b25SGiuseppe CAVALLARO 
20924f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
209336ff7c1eSAlexandre TORGUE 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
20941c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
20954f795b25SGiuseppe CAVALLARO 	} else
209638ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
20971c901a46SGiuseppe CAVALLARO }
20981c901a46SGiuseppe CAVALLARO 
2099732fdf0eSGiuseppe CAVALLARO /**
2100732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
210132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
210219e30c14SGiuseppe CAVALLARO  * Description:
210319e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2104e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
210519e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
210619e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2107e7434821SGiuseppe CAVALLARO  */
2108e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2109e7434821SGiuseppe CAVALLARO {
2110a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2111e7434821SGiuseppe CAVALLARO }
2112e7434821SGiuseppe CAVALLARO 
211332ceabcaSGiuseppe CAVALLARO /**
2114732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
211532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
211632ceabcaSGiuseppe CAVALLARO  * Description:
211732ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
211832ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
211932ceabcaSGiuseppe CAVALLARO  */
2120bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2121bfab27a1SGiuseppe CAVALLARO {
2122bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2123c10d4c82SJose Abreu 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2124bfab27a1SGiuseppe CAVALLARO 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2125f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
212638ddc59dSLABBE Corentin 		netdev_info(priv->dev, "device MAC address %pM\n",
2127bfab27a1SGiuseppe CAVALLARO 			    priv->dev->dev_addr);
2128bfab27a1SGiuseppe CAVALLARO 	}
2129c88460b7SHans de Goede }
2130bfab27a1SGiuseppe CAVALLARO 
213132ceabcaSGiuseppe CAVALLARO /**
2132732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
213332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
213432ceabcaSGiuseppe CAVALLARO  * Description:
213532ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
213632ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
213732ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
213832ceabcaSGiuseppe CAVALLARO  */
21390f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
21400f1f88a8SGiuseppe CAVALLARO {
214147f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
214247f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
214324aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
214454139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2145ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
214647f2a9ceSJoao Pinto 	u32 chan = 0;
2147c24602efSGiuseppe CAVALLARO 	int atds = 0;
2148495db273SGiuseppe Cavallaro 	int ret = 0;
21490f1f88a8SGiuseppe CAVALLARO 
2150a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2151a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
215289ab75bfSNiklas Cassel 		return -EINVAL;
21530f1f88a8SGiuseppe CAVALLARO 	}
21540f1f88a8SGiuseppe CAVALLARO 
2155c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2156c24602efSGiuseppe CAVALLARO 		atds = 1;
2157c24602efSGiuseppe CAVALLARO 
2158a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2159495db273SGiuseppe Cavallaro 	if (ret) {
2160495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2161495db273SGiuseppe Cavallaro 		return ret;
2162495db273SGiuseppe Cavallaro 	}
2163495db273SGiuseppe Cavallaro 
216447f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
216547f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
216654139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
216754139cf3SJoao Pinto 
216824aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
216924aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
217047f2a9ceSJoao Pinto 
217154139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2172f748be53SAlexandre TORGUE 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2173a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2174a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
217547f2a9ceSJoao Pinto 	}
217647f2a9ceSJoao Pinto 
217747f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
217847f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
2179ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2180ce736788SJoao Pinto 
218124aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
218224aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2183f748be53SAlexandre TORGUE 
2184ce736788SJoao Pinto 		tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2185f748be53SAlexandre TORGUE 			    (DMA_TX_SIZE * sizeof(struct dma_desc));
2186a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2187a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
218847f2a9ceSJoao Pinto 	}
218924aaed0cSJose Abreu 
219024aaed0cSJose Abreu 	/* DMA CSR Channel configuration */
219124aaed0cSJose Abreu 	for (chan = 0; chan < dma_csr_ch; chan++)
219224aaed0cSJose Abreu 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
219324aaed0cSJose Abreu 
219424aaed0cSJose Abreu 	/* DMA Configuration */
219524aaed0cSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2196f748be53SAlexandre TORGUE 
2197a4e887faSJose Abreu 	if (priv->plat->axi)
2198a4e887faSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2199afea0365SGiuseppe Cavallaro 
2200495db273SGiuseppe Cavallaro 	return ret;
22010f1f88a8SGiuseppe CAVALLARO }
22020f1f88a8SGiuseppe CAVALLARO 
2203bfab27a1SGiuseppe CAVALLARO /**
2204732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
22059125cdd1SGiuseppe CAVALLARO  * @data: data pointer
22069125cdd1SGiuseppe CAVALLARO  * Description:
22079125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
22089125cdd1SGiuseppe CAVALLARO  */
2209e99e88a9SKees Cook static void stmmac_tx_timer(struct timer_list *t)
22109125cdd1SGiuseppe CAVALLARO {
2211e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2212ce736788SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2213ce736788SJoao Pinto 	u32 queue;
22149125cdd1SGiuseppe CAVALLARO 
2215ce736788SJoao Pinto 	/* let's scan all the tx queues */
2216ce736788SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++)
2217ce736788SJoao Pinto 		stmmac_tx_clean(priv, queue);
22189125cdd1SGiuseppe CAVALLARO }
22199125cdd1SGiuseppe CAVALLARO 
22209125cdd1SGiuseppe CAVALLARO /**
2221732fdf0eSGiuseppe CAVALLARO  * stmmac_init_tx_coalesce - init tx mitigation options.
222232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22239125cdd1SGiuseppe CAVALLARO  * Description:
22249125cdd1SGiuseppe CAVALLARO  * This inits the transmit coalesce parameters: i.e. timer rate,
22259125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
22269125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
22279125cdd1SGiuseppe CAVALLARO  */
22289125cdd1SGiuseppe CAVALLARO static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
22299125cdd1SGiuseppe CAVALLARO {
22309125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
22319125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2232e99e88a9SKees Cook 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
22339125cdd1SGiuseppe CAVALLARO 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
22349125cdd1SGiuseppe CAVALLARO 	add_timer(&priv->txtimer);
22359125cdd1SGiuseppe CAVALLARO }
22369125cdd1SGiuseppe CAVALLARO 
22374854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
22384854ab99SJoao Pinto {
22394854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
22404854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
22414854ab99SJoao Pinto 	u32 chan;
22424854ab99SJoao Pinto 
22434854ab99SJoao Pinto 	/* set TX ring length */
22444854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2245a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
22464854ab99SJoao Pinto 				(DMA_TX_SIZE - 1), chan);
22474854ab99SJoao Pinto 
22484854ab99SJoao Pinto 	/* set RX ring length */
22494854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2250a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
22514854ab99SJoao Pinto 				(DMA_RX_SIZE - 1), chan);
22524854ab99SJoao Pinto }
22534854ab99SJoao Pinto 
22549125cdd1SGiuseppe CAVALLARO /**
22556a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
22566a3a7193SJoao Pinto  *  @priv: driver private structure
22576a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
22586a3a7193SJoao Pinto  */
22596a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
22606a3a7193SJoao Pinto {
22616a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
22626a3a7193SJoao Pinto 	u32 weight;
22636a3a7193SJoao Pinto 	u32 queue;
22646a3a7193SJoao Pinto 
22656a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
22666a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
2267c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
22686a3a7193SJoao Pinto 	}
22696a3a7193SJoao Pinto }
22706a3a7193SJoao Pinto 
22716a3a7193SJoao Pinto /**
227219d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
227319d91873SJoao Pinto  *  @priv: driver private structure
227419d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
227519d91873SJoao Pinto  */
227619d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
227719d91873SJoao Pinto {
227819d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
227919d91873SJoao Pinto 	u32 mode_to_use;
228019d91873SJoao Pinto 	u32 queue;
228119d91873SJoao Pinto 
228244781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
228344781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
228419d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
228519d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
228619d91873SJoao Pinto 			continue;
228719d91873SJoao Pinto 
2288c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
228919d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
229019d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
229119d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
229219d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
229319d91873SJoao Pinto 				queue);
229419d91873SJoao Pinto 	}
229519d91873SJoao Pinto }
229619d91873SJoao Pinto 
229719d91873SJoao Pinto /**
2298d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2299d43042f4SJoao Pinto  *  @priv: driver private structure
2300d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
2301d43042f4SJoao Pinto  */
2302d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2303d43042f4SJoao Pinto {
2304d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2305d43042f4SJoao Pinto 	u32 queue;
2306d43042f4SJoao Pinto 	u32 chan;
2307d43042f4SJoao Pinto 
2308d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2309d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
2310c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2311d43042f4SJoao Pinto 	}
2312d43042f4SJoao Pinto }
2313d43042f4SJoao Pinto 
2314d43042f4SJoao Pinto /**
2315a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2316a8f5102aSJoao Pinto  *  @priv: driver private structure
2317a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
2318a8f5102aSJoao Pinto  */
2319a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2320a8f5102aSJoao Pinto {
2321a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2322a8f5102aSJoao Pinto 	u32 queue;
2323a8f5102aSJoao Pinto 	u32 prio;
2324a8f5102aSJoao Pinto 
2325a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2326a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2327a8f5102aSJoao Pinto 			continue;
2328a8f5102aSJoao Pinto 
2329a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
2330c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2331a8f5102aSJoao Pinto 	}
2332a8f5102aSJoao Pinto }
2333a8f5102aSJoao Pinto 
2334a8f5102aSJoao Pinto /**
2335a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2336a8f5102aSJoao Pinto  *  @priv: driver private structure
2337a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
2338a8f5102aSJoao Pinto  */
2339a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2340a8f5102aSJoao Pinto {
2341a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2342a8f5102aSJoao Pinto 	u32 queue;
2343a8f5102aSJoao Pinto 	u32 prio;
2344a8f5102aSJoao Pinto 
2345a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
2346a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2347a8f5102aSJoao Pinto 			continue;
2348a8f5102aSJoao Pinto 
2349a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
2350c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2351a8f5102aSJoao Pinto 	}
2352a8f5102aSJoao Pinto }
2353a8f5102aSJoao Pinto 
2354a8f5102aSJoao Pinto /**
2355abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2356abe80fdcSJoao Pinto  *  @priv: driver private structure
2357abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
2358abe80fdcSJoao Pinto  */
2359abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2360abe80fdcSJoao Pinto {
2361abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2362abe80fdcSJoao Pinto 	u32 queue;
2363abe80fdcSJoao Pinto 	u8 packet;
2364abe80fdcSJoao Pinto 
2365abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2366abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
2367abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2368abe80fdcSJoao Pinto 			continue;
2369abe80fdcSJoao Pinto 
2370abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2371c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2372abe80fdcSJoao Pinto 	}
2373abe80fdcSJoao Pinto }
2374abe80fdcSJoao Pinto 
2375abe80fdcSJoao Pinto /**
2376d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
2377d0a9c9f9SJoao Pinto  *  @priv: driver private structure
2378d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
2379d0a9c9f9SJoao Pinto  */
2380d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2381d0a9c9f9SJoao Pinto {
2382d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2383d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2384d0a9c9f9SJoao Pinto 
2385c10d4c82SJose Abreu 	if (tx_queues_count > 1)
23866a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
23876a3a7193SJoao Pinto 
2388d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
2389c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2390c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2391d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
2392d0a9c9f9SJoao Pinto 
2393d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
2394c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2395c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2396d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
2397d0a9c9f9SJoao Pinto 
239819d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
2399c10d4c82SJose Abreu 	if (tx_queues_count > 1)
240019d91873SJoao Pinto 		stmmac_configure_cbs(priv);
240119d91873SJoao Pinto 
2402d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
2403d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
2404d43042f4SJoao Pinto 
2405d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
2406d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
24076deee222SJoao Pinto 
2408a8f5102aSJoao Pinto 	/* Set RX priorities */
2409c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2410a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
2411a8f5102aSJoao Pinto 
2412a8f5102aSJoao Pinto 	/* Set TX priorities */
2413c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2414a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
2415abe80fdcSJoao Pinto 
2416abe80fdcSJoao Pinto 	/* Set RX routing */
2417c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2418abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
2419d0a9c9f9SJoao Pinto }
2420d0a9c9f9SJoao Pinto 
24218bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
24228bf993a5SJose Abreu {
2423c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
24248bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
2425c10d4c82SJose Abreu 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
24268bf993a5SJose Abreu 	} else {
24278bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
24288bf993a5SJose Abreu 	}
24298bf993a5SJose Abreu }
24308bf993a5SJose Abreu 
2431d0a9c9f9SJoao Pinto /**
2432732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
2433523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
2434523f11b5SSrinivas Kandagatla  *  Description:
2435732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
2436732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
2437732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
2438732fdf0eSGiuseppe CAVALLARO  *  transmitting.
2439523f11b5SSrinivas Kandagatla  *  Return value:
2440523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2441523f11b5SSrinivas Kandagatla  *  file on failure.
2442523f11b5SSrinivas Kandagatla  */
2443fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2444523f11b5SSrinivas Kandagatla {
2445523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
24463c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2447146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2448146617b8SJoao Pinto 	u32 chan;
2449523f11b5SSrinivas Kandagatla 	int ret;
2450523f11b5SSrinivas Kandagatla 
2451523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
2452523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
2453523f11b5SSrinivas Kandagatla 	if (ret < 0) {
245438ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
245538ddc59dSLABBE Corentin 			   __func__);
2456523f11b5SSrinivas Kandagatla 		return ret;
2457523f11b5SSrinivas Kandagatla 	}
2458523f11b5SSrinivas Kandagatla 
2459523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
2460c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2461523f11b5SSrinivas Kandagatla 
246202e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
246302e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
246402e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
246502e57b9dSGiuseppe CAVALLARO 
246602e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
246702e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
246802e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
246902e57b9dSGiuseppe CAVALLARO 		} else {
247002e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
247102e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
247202e57b9dSGiuseppe CAVALLARO 		}
247302e57b9dSGiuseppe CAVALLARO 	}
247402e57b9dSGiuseppe CAVALLARO 
2475523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
2476c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
2477523f11b5SSrinivas Kandagatla 
2478d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
2479d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
24809eb12474Sjpinto 
24818bf993a5SJose Abreu 	/* Initialize Safety Features */
24828bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
24838bf993a5SJose Abreu 
2484c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
2485978aded4SGiuseppe CAVALLARO 	if (!ret) {
248638ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2487978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2488d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
2489978aded4SGiuseppe CAVALLARO 	}
2490978aded4SGiuseppe CAVALLARO 
2491523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
2492c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
2493523f11b5SSrinivas Kandagatla 
2494b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
2495b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
2496b4f0a661SJoao Pinto 
2497523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
2498523f11b5SSrinivas Kandagatla 
2499fe131929SHuacai Chen 	if (init_ptp) {
25000ad2be79SThierry Reding 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
25010ad2be79SThierry Reding 		if (ret < 0)
25020ad2be79SThierry Reding 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
25030ad2be79SThierry Reding 
2504523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
2505722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
2506722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2507722eef28SHeiner Kallweit 		else if (ret)
2508722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
2509fe131929SHuacai Chen 	}
2510523f11b5SSrinivas Kandagatla 
251150fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
2512523f11b5SSrinivas Kandagatla 	ret = stmmac_init_fs(dev);
2513523f11b5SSrinivas Kandagatla 	if (ret < 0)
251438ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
251538ddc59dSLABBE Corentin 			    __func__);
2516523f11b5SSrinivas Kandagatla #endif
2517523f11b5SSrinivas Kandagatla 	/* Start the ball rolling... */
2518ae4f0d46SJoao Pinto 	stmmac_start_all_dma(priv);
2519523f11b5SSrinivas Kandagatla 
2520523f11b5SSrinivas Kandagatla 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2521523f11b5SSrinivas Kandagatla 
2522a4e887faSJose Abreu 	if (priv->use_riwt) {
2523a4e887faSJose Abreu 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2524a4e887faSJose Abreu 		if (!ret)
2525523f11b5SSrinivas Kandagatla 			priv->rx_riwt = MAX_DMA_RIWT;
2526523f11b5SSrinivas Kandagatla 	}
2527523f11b5SSrinivas Kandagatla 
2528c10d4c82SJose Abreu 	if (priv->hw->pcs)
2529c10d4c82SJose Abreu 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2530523f11b5SSrinivas Kandagatla 
25314854ab99SJoao Pinto 	/* set TX and RX rings length */
25324854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
25334854ab99SJoao Pinto 
2534f748be53SAlexandre TORGUE 	/* Enable TSO */
2535146617b8SJoao Pinto 	if (priv->tso) {
2536146617b8SJoao Pinto 		for (chan = 0; chan < tx_cnt; chan++)
2537a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2538146617b8SJoao Pinto 	}
2539f748be53SAlexandre TORGUE 
2540523f11b5SSrinivas Kandagatla 	return 0;
2541523f11b5SSrinivas Kandagatla }
2542523f11b5SSrinivas Kandagatla 
2543c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
2544c66f6c37SThierry Reding {
2545c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
2546c66f6c37SThierry Reding 
2547c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2548c66f6c37SThierry Reding }
2549c66f6c37SThierry Reding 
2550523f11b5SSrinivas Kandagatla /**
25517ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
25527ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
25537ac6653aSJeff Kirsher  *  Description:
25547ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
25557ac6653aSJeff Kirsher  *  Return value:
25567ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
25577ac6653aSJeff Kirsher  *  file on failure.
25587ac6653aSJeff Kirsher  */
25597ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev)
25607ac6653aSJeff Kirsher {
25617ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
25627ac6653aSJeff Kirsher 	int ret;
25637ac6653aSJeff Kirsher 
25644bfcbd7aSFrancesco Virlinzi 	stmmac_check_ether_addr(priv);
25654bfcbd7aSFrancesco Virlinzi 
25663fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
25673fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
25683fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
25697ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
2570e58bb43fSGiuseppe CAVALLARO 		if (ret) {
257138ddc59dSLABBE Corentin 			netdev_err(priv->dev,
257238ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
2573e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
257489df20d9SHans de Goede 			return ret;
25757ac6653aSJeff Kirsher 		}
2576e58bb43fSGiuseppe CAVALLARO 	}
25777ac6653aSJeff Kirsher 
2578523f11b5SSrinivas Kandagatla 	/* Extra statistics */
2579523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2580523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
2581523f11b5SSrinivas Kandagatla 
25825bacd778SLABBE Corentin 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
258322ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
258456329137SBartlomiej Zolnierkiewicz 
25855bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
25865bacd778SLABBE Corentin 	if (ret < 0) {
25875bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
25885bacd778SLABBE Corentin 			   __func__);
25895bacd778SLABBE Corentin 		goto dma_desc_error;
25905bacd778SLABBE Corentin 	}
25915bacd778SLABBE Corentin 
25925bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
25935bacd778SLABBE Corentin 	if (ret < 0) {
25945bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
25955bacd778SLABBE Corentin 			   __func__);
25965bacd778SLABBE Corentin 		goto init_error;
25975bacd778SLABBE Corentin 	}
25985bacd778SLABBE Corentin 
2599fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
260056329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
260138ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2602c9324d18SGiuseppe CAVALLARO 		goto init_error;
26037ac6653aSJeff Kirsher 	}
26047ac6653aSJeff Kirsher 
2605777da230SGiuseppe CAVALLARO 	stmmac_init_tx_coalesce(priv);
2606777da230SGiuseppe CAVALLARO 
2607d6d50c7eSPhilippe Reynes 	if (dev->phydev)
2608d6d50c7eSPhilippe Reynes 		phy_start(dev->phydev);
26097ac6653aSJeff Kirsher 
26107ac6653aSJeff Kirsher 	/* Request the IRQ lines */
26117ac6653aSJeff Kirsher 	ret = request_irq(dev->irq, stmmac_interrupt,
26127ac6653aSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
26137ac6653aSJeff Kirsher 	if (unlikely(ret < 0)) {
261438ddc59dSLABBE Corentin 		netdev_err(priv->dev,
261538ddc59dSLABBE Corentin 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
26167ac6653aSJeff Kirsher 			   __func__, dev->irq, ret);
26176c1e5abeSThierry Reding 		goto irq_error;
26187ac6653aSJeff Kirsher 	}
26197ac6653aSJeff Kirsher 
26207a13f8f5SFrancesco Virlinzi 	/* Request the Wake IRQ in case of another line is used for WoL */
26217a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq) {
26227a13f8f5SFrancesco Virlinzi 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
26237a13f8f5SFrancesco Virlinzi 				  IRQF_SHARED, dev->name, dev);
26247a13f8f5SFrancesco Virlinzi 		if (unlikely(ret < 0)) {
262538ddc59dSLABBE Corentin 			netdev_err(priv->dev,
262638ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2627ceb69499SGiuseppe CAVALLARO 				   __func__, priv->wol_irq, ret);
2628c9324d18SGiuseppe CAVALLARO 			goto wolirq_error;
26297a13f8f5SFrancesco Virlinzi 		}
26307a13f8f5SFrancesco Virlinzi 	}
26317a13f8f5SFrancesco Virlinzi 
2632d765955dSGiuseppe CAVALLARO 	/* Request the IRQ lines */
2633d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0) {
2634d765955dSGiuseppe CAVALLARO 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2635d765955dSGiuseppe CAVALLARO 				  dev->name, dev);
2636d765955dSGiuseppe CAVALLARO 		if (unlikely(ret < 0)) {
263738ddc59dSLABBE Corentin 			netdev_err(priv->dev,
263838ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2639d765955dSGiuseppe CAVALLARO 				   __func__, priv->lpi_irq, ret);
2640c9324d18SGiuseppe CAVALLARO 			goto lpiirq_error;
2641d765955dSGiuseppe CAVALLARO 		}
2642d765955dSGiuseppe CAVALLARO 	}
2643d765955dSGiuseppe CAVALLARO 
2644c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
2645c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
26467ac6653aSJeff Kirsher 
26477ac6653aSJeff Kirsher 	return 0;
26487ac6653aSJeff Kirsher 
2649c9324d18SGiuseppe CAVALLARO lpiirq_error:
2650d765955dSGiuseppe CAVALLARO 	if (priv->wol_irq != dev->irq)
2651d765955dSGiuseppe CAVALLARO 		free_irq(priv->wol_irq, dev);
2652c9324d18SGiuseppe CAVALLARO wolirq_error:
26537a13f8f5SFrancesco Virlinzi 	free_irq(dev->irq, dev);
26546c1e5abeSThierry Reding irq_error:
26556c1e5abeSThierry Reding 	if (dev->phydev)
26566c1e5abeSThierry Reding 		phy_stop(dev->phydev);
26577a13f8f5SFrancesco Virlinzi 
26586c1e5abeSThierry Reding 	del_timer_sync(&priv->txtimer);
2659c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
2660c9324d18SGiuseppe CAVALLARO init_error:
2661c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
26625bacd778SLABBE Corentin dma_desc_error:
2663d6d50c7eSPhilippe Reynes 	if (dev->phydev)
2664d6d50c7eSPhilippe Reynes 		phy_disconnect(dev->phydev);
26654bfcbd7aSFrancesco Virlinzi 
26667ac6653aSJeff Kirsher 	return ret;
26677ac6653aSJeff Kirsher }
26687ac6653aSJeff Kirsher 
26697ac6653aSJeff Kirsher /**
26707ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
26717ac6653aSJeff Kirsher  *  @dev : device pointer.
26727ac6653aSJeff Kirsher  *  Description:
26737ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
26747ac6653aSJeff Kirsher  */
26757ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev)
26767ac6653aSJeff Kirsher {
26777ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
26787ac6653aSJeff Kirsher 
2679d765955dSGiuseppe CAVALLARO 	if (priv->eee_enabled)
2680d765955dSGiuseppe CAVALLARO 		del_timer_sync(&priv->eee_ctrl_timer);
2681d765955dSGiuseppe CAVALLARO 
26827ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
2683d6d50c7eSPhilippe Reynes 	if (dev->phydev) {
2684d6d50c7eSPhilippe Reynes 		phy_stop(dev->phydev);
2685d6d50c7eSPhilippe Reynes 		phy_disconnect(dev->phydev);
26867ac6653aSJeff Kirsher 	}
26877ac6653aSJeff Kirsher 
2688c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
26897ac6653aSJeff Kirsher 
2690c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
26917ac6653aSJeff Kirsher 
26929125cdd1SGiuseppe CAVALLARO 	del_timer_sync(&priv->txtimer);
26939125cdd1SGiuseppe CAVALLARO 
26947ac6653aSJeff Kirsher 	/* Free the IRQ lines */
26957ac6653aSJeff Kirsher 	free_irq(dev->irq, dev);
26967a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq)
26977a13f8f5SFrancesco Virlinzi 		free_irq(priv->wol_irq, dev);
2698d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0)
2699d765955dSGiuseppe CAVALLARO 		free_irq(priv->lpi_irq, dev);
27007ac6653aSJeff Kirsher 
27017ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
2702ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
27037ac6653aSJeff Kirsher 
27047ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
27057ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
27067ac6653aSJeff Kirsher 
27077ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
2708c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
27097ac6653aSJeff Kirsher 
27107ac6653aSJeff Kirsher 	netif_carrier_off(dev);
27117ac6653aSJeff Kirsher 
271250fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
2713466c5ac8SMathieu Olivari 	stmmac_exit_fs(dev);
2714bfab27a1SGiuseppe CAVALLARO #endif
2715bfab27a1SGiuseppe CAVALLARO 
271692ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
271792ba6888SRayagond Kokatanur 
27187ac6653aSJeff Kirsher 	return 0;
27197ac6653aSJeff Kirsher }
27207ac6653aSJeff Kirsher 
27217ac6653aSJeff Kirsher /**
2722f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
2723f748be53SAlexandre TORGUE  *  @priv: driver private structure
2724f748be53SAlexandre TORGUE  *  @des: buffer start address
2725f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
2726f748be53SAlexandre TORGUE  *  @last_segmant: condition for the last descriptor
2727ce736788SJoao Pinto  *  @queue: TX queue index
2728f748be53SAlexandre TORGUE  *  Description:
2729f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
2730f748be53SAlexandre TORGUE  *  buffer length to fill
2731f748be53SAlexandre TORGUE  */
2732f748be53SAlexandre TORGUE static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2733ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
2734f748be53SAlexandre TORGUE {
2735ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2736f748be53SAlexandre TORGUE 	struct dma_desc *desc;
27375bacd778SLABBE Corentin 	u32 buff_size;
2738ce736788SJoao Pinto 	int tmp_len;
2739f748be53SAlexandre TORGUE 
2740f748be53SAlexandre TORGUE 	tmp_len = total_len;
2741f748be53SAlexandre TORGUE 
2742f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
2743ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2744b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2745ce736788SJoao Pinto 		desc = tx_q->dma_tx + tx_q->cur_tx;
2746f748be53SAlexandre TORGUE 
2747f8be0d78SMichael Weiser 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2748f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2749f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
2750f748be53SAlexandre TORGUE 
275142de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2752f748be53SAlexandre TORGUE 				0, 1,
2753426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2754f748be53SAlexandre TORGUE 				0, 0);
2755f748be53SAlexandre TORGUE 
2756f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
2757f748be53SAlexandre TORGUE 	}
2758f748be53SAlexandre TORGUE }
2759f748be53SAlexandre TORGUE 
2760f748be53SAlexandre TORGUE /**
2761f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2762f748be53SAlexandre TORGUE  *  @skb : the socket buffer
2763f748be53SAlexandre TORGUE  *  @dev : device pointer
2764f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
2765f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
2766f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
2767f748be53SAlexandre TORGUE  *
2768f748be53SAlexandre TORGUE  *  First Descriptor
2769f748be53SAlexandre TORGUE  *   --------
2770f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
2771f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
2772f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
2773f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2774f748be53SAlexandre TORGUE  *   --------
2775f748be53SAlexandre TORGUE  *	|
2776f748be53SAlexandre TORGUE  *     ...
2777f748be53SAlexandre TORGUE  *	|
2778f748be53SAlexandre TORGUE  *   --------
2779f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2780f748be53SAlexandre TORGUE  *   | DES1 | --|
2781f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
2782f748be53SAlexandre TORGUE  *   | DES3 |
2783f748be53SAlexandre TORGUE  *   --------
2784f748be53SAlexandre TORGUE  *
2785f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2786f748be53SAlexandre TORGUE  */
2787f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2788f748be53SAlexandre TORGUE {
2789ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
2790f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
2791f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
2792ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
2793f748be53SAlexandre TORGUE 	unsigned int first_entry, des;
2794ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
2795ce736788SJoao Pinto 	int tmp_pay_len = 0;
2796ce736788SJoao Pinto 	u32 pay_len, mss;
2797f748be53SAlexandre TORGUE 	u8 proto_hdr_len;
2798f748be53SAlexandre TORGUE 	int i;
2799f748be53SAlexandre TORGUE 
2800ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
2801ce736788SJoao Pinto 
2802f748be53SAlexandre TORGUE 	/* Compute header lengths */
2803f748be53SAlexandre TORGUE 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2804f748be53SAlexandre TORGUE 
2805f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
2806ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
2807f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2808c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2809c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2810c22a3f48SJoao Pinto 								queue));
2811f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
281238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
281338ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
281438ddc59dSLABBE Corentin 				   __func__);
2815f748be53SAlexandre TORGUE 		}
2816f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
2817f748be53SAlexandre TORGUE 	}
2818f748be53SAlexandre TORGUE 
2819f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2820f748be53SAlexandre TORGUE 
2821f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
2822f748be53SAlexandre TORGUE 
2823f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
28248d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
2825ce736788SJoao Pinto 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
282642de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
28278d212a9eSNiklas Cassel 		tx_q->mss = mss;
2828ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2829b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2830f748be53SAlexandre TORGUE 	}
2831f748be53SAlexandre TORGUE 
2832f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
2833f748be53SAlexandre TORGUE 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2834f748be53SAlexandre TORGUE 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2835f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2836f748be53SAlexandre TORGUE 			skb->data_len);
2837f748be53SAlexandre TORGUE 	}
2838f748be53SAlexandre TORGUE 
2839ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
2840b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2841f748be53SAlexandre TORGUE 
2842ce736788SJoao Pinto 	desc = tx_q->dma_tx + first_entry;
2843f748be53SAlexandre TORGUE 	first = desc;
2844f748be53SAlexandre TORGUE 
2845f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
2846f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2847f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
2848f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
2849f748be53SAlexandre TORGUE 		goto dma_map_err;
2850f748be53SAlexandre TORGUE 
2851ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2852ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2853f748be53SAlexandre TORGUE 
2854f8be0d78SMichael Weiser 	first->des0 = cpu_to_le32(des);
2855f748be53SAlexandre TORGUE 
2856f748be53SAlexandre TORGUE 	/* Fill start of payload in buff2 of first descriptor */
2857f748be53SAlexandre TORGUE 	if (pay_len)
2858f8be0d78SMichael Weiser 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2859f748be53SAlexandre TORGUE 
2860f748be53SAlexandre TORGUE 	/* If needed take extra descriptors to fill the remaining payload */
2861f748be53SAlexandre TORGUE 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2862f748be53SAlexandre TORGUE 
2863ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2864f748be53SAlexandre TORGUE 
2865f748be53SAlexandre TORGUE 	/* Prepare fragments */
2866f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
2867f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2868f748be53SAlexandre TORGUE 
2869f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
2870f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
2871f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
2872937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
2873937071c1SThierry Reding 			goto dma_map_err;
2874f748be53SAlexandre TORGUE 
2875f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2876ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
2877f748be53SAlexandre TORGUE 
2878ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2879ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2880ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2881f748be53SAlexandre TORGUE 	}
2882f748be53SAlexandre TORGUE 
2883ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2884f748be53SAlexandre TORGUE 
288505cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
288605cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
288705cf0d1bSNiklas Cassel 
288805cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
288905cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
289005cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
289105cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
289205cf0d1bSNiklas Cassel 	 */
2893ce736788SJoao Pinto 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2894f748be53SAlexandre TORGUE 
2895ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2896b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
289738ddc59dSLABBE Corentin 			  __func__);
2898c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2899f748be53SAlexandre TORGUE 	}
2900f748be53SAlexandre TORGUE 
2901f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
2902f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
2903f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
2904f748be53SAlexandre TORGUE 
2905f748be53SAlexandre TORGUE 	/* Manage tx mitigation */
2906f748be53SAlexandre TORGUE 	priv->tx_count_frames += nfrags + 1;
2907f748be53SAlexandre TORGUE 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2908f748be53SAlexandre TORGUE 		mod_timer(&priv->txtimer,
2909f748be53SAlexandre TORGUE 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2910f748be53SAlexandre TORGUE 	} else {
2911f748be53SAlexandre TORGUE 		priv->tx_count_frames = 0;
291242de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
2913f748be53SAlexandre TORGUE 		priv->xstats.tx_set_ic_bit++;
2914f748be53SAlexandre TORGUE 	}
2915f748be53SAlexandre TORGUE 
2916f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
2917f748be53SAlexandre TORGUE 
2918f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2919f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
2920f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
2921f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
292242de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
2923f748be53SAlexandre TORGUE 	}
2924f748be53SAlexandre TORGUE 
2925f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
292642de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2927f748be53SAlexandre TORGUE 			proto_hdr_len,
2928f748be53SAlexandre TORGUE 			pay_len,
2929ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2930f748be53SAlexandre TORGUE 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2931f748be53SAlexandre TORGUE 
2932f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
293315d2ee42SNiklas Cassel 	if (mss_desc) {
293415d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
293515d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
293615d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
293715d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
293815d2ee42SNiklas Cassel 		 */
293915d2ee42SNiklas Cassel 		dma_wmb();
294042de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
294115d2ee42SNiklas Cassel 	}
2942f748be53SAlexandre TORGUE 
2943f748be53SAlexandre TORGUE 	/* The own bit must be the latest setting done when prepare the
2944f748be53SAlexandre TORGUE 	 * descriptor and then barrier is needed to make sure that
2945f748be53SAlexandre TORGUE 	 * all is coherent before granting the DMA engine.
2946f748be53SAlexandre TORGUE 	 */
294795eb930aSNiklas Cassel 	wmb();
2948f748be53SAlexandre TORGUE 
2949f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
2950f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2951ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2952ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
2953f748be53SAlexandre TORGUE 
295442de047dSJose Abreu 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2955f748be53SAlexandre TORGUE 
2956f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
2957f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
2958f748be53SAlexandre TORGUE 	}
2959f748be53SAlexandre TORGUE 
2960c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2961f748be53SAlexandre TORGUE 
2962a4e887faSJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2963f748be53SAlexandre TORGUE 
2964f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
2965f748be53SAlexandre TORGUE 
2966f748be53SAlexandre TORGUE dma_map_err:
2967f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
2968f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
2969f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
2970f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
2971f748be53SAlexandre TORGUE }
2972f748be53SAlexandre TORGUE 
2973f748be53SAlexandre TORGUE /**
2974732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
29757ac6653aSJeff Kirsher  *  @skb : the socket buffer
29767ac6653aSJeff Kirsher  *  @dev : device pointer
297732ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
297832ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
297932ceabcaSGiuseppe CAVALLARO  *  and SG feature.
29807ac6653aSJeff Kirsher  */
29817ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
29827ac6653aSJeff Kirsher {
29837ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
29840e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
29854a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
2986ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
29877ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
298859423815SColin Ian King 	int entry;
298959423815SColin Ian King 	unsigned int first_entry;
29907ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
2991ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
29920e80bdc9SGiuseppe Cavallaro 	unsigned int enh_desc;
2993f748be53SAlexandre TORGUE 	unsigned int des;
2994f748be53SAlexandre TORGUE 
2995ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
2996ce736788SJoao Pinto 
2997f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
2998f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
29999edfa7daSNiklas Cassel 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3000f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
3001f748be53SAlexandre TORGUE 	}
30027ac6653aSJeff Kirsher 
3003ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3004c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3005c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3006c22a3f48SJoao Pinto 								queue));
30077ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
300838ddc59dSLABBE Corentin 			netdev_err(priv->dev,
300938ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
301038ddc59dSLABBE Corentin 				   __func__);
30117ac6653aSJeff Kirsher 		}
30127ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
30137ac6653aSJeff Kirsher 	}
30147ac6653aSJeff Kirsher 
3015d765955dSGiuseppe CAVALLARO 	if (priv->tx_path_in_lpi_mode)
3016d765955dSGiuseppe CAVALLARO 		stmmac_disable_eee_mode(priv);
3017d765955dSGiuseppe CAVALLARO 
3018ce736788SJoao Pinto 	entry = tx_q->cur_tx;
30190e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
3020b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
30217ac6653aSJeff Kirsher 
30227ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
30237ac6653aSJeff Kirsher 
30240e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
3025ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3026c24602efSGiuseppe CAVALLARO 	else
3027ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
3028c24602efSGiuseppe CAVALLARO 
30297ac6653aSJeff Kirsher 	first = desc;
30307ac6653aSJeff Kirsher 
30310e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
30324a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
303329896a67SGiuseppe CAVALLARO 	if (enh_desc)
30342c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
303529896a67SGiuseppe CAVALLARO 
303663a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
30372c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
303863a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
3039362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
304029896a67SGiuseppe CAVALLARO 	}
30417ac6653aSJeff Kirsher 
30427ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
30439e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
30449e903e08SEric Dumazet 		int len = skb_frag_size(frag);
3045be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
30467ac6653aSJeff Kirsher 
3047e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3048b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
3049e3ad57c9SGiuseppe Cavallaro 
30500e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
3051ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3052c24602efSGiuseppe CAVALLARO 		else
3053ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
30547ac6653aSJeff Kirsher 
3055f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3056f722380dSIan Campbell 				       DMA_TO_DEVICE);
3057f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
3058362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
3059362b37beSGiuseppe CAVALLARO 
3060ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
30616844171dSJose Abreu 
30626844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
3063f748be53SAlexandre TORGUE 
3064ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3065ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
3066ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
30670e80bdc9SGiuseppe Cavallaro 
30680e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
306942de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
307042de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
30717ac6653aSJeff Kirsher 	}
30727ac6653aSJeff Kirsher 
307305cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
307405cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
3075e3ad57c9SGiuseppe Cavallaro 
307605cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
307705cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
307805cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
307905cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
308005cf0d1bSNiklas Cassel 	 */
308105cf0d1bSNiklas Cassel 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3082ce736788SJoao Pinto 	tx_q->cur_tx = entry;
30837ac6653aSJeff Kirsher 
30847ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
3085d0225e7dSAlexandre TORGUE 		void *tx_head;
3086d0225e7dSAlexandre TORGUE 
308738ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
308838ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3089ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
30900e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
309183d7af64SGiuseppe CAVALLARO 
3092c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
3093ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_etx;
3094c24602efSGiuseppe CAVALLARO 		else
3095ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_tx;
3096d0225e7dSAlexandre TORGUE 
309742de047dSJose Abreu 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3098c24602efSGiuseppe CAVALLARO 
309938ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
31007ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
31017ac6653aSJeff Kirsher 	}
31020e80bdc9SGiuseppe Cavallaro 
3103ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3104b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3105b3e51069SLABBE Corentin 			  __func__);
3106c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
31077ac6653aSJeff Kirsher 	}
31087ac6653aSJeff Kirsher 
31097ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
31107ac6653aSJeff Kirsher 
31110e80bdc9SGiuseppe Cavallaro 	/* According to the coalesce parameter the IC bit for the latest
31120e80bdc9SGiuseppe Cavallaro 	 * segment is reset and the timer re-started to clean the tx status.
31130e80bdc9SGiuseppe Cavallaro 	 * This approach takes care about the fragments: desc is the first
31140e80bdc9SGiuseppe Cavallaro 	 * element in case of no SG.
31150e80bdc9SGiuseppe Cavallaro 	 */
31160e80bdc9SGiuseppe Cavallaro 	priv->tx_count_frames += nfrags + 1;
31174ae0169fSJose Abreu 	if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
31184ae0169fSJose Abreu 	    !priv->tx_timer_armed) {
31190e80bdc9SGiuseppe Cavallaro 		mod_timer(&priv->txtimer,
31200e80bdc9SGiuseppe Cavallaro 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
31214ae0169fSJose Abreu 		priv->tx_timer_armed = true;
31220e80bdc9SGiuseppe Cavallaro 	} else {
31230e80bdc9SGiuseppe Cavallaro 		priv->tx_count_frames = 0;
312442de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
31250e80bdc9SGiuseppe Cavallaro 		priv->xstats.tx_set_ic_bit++;
31264ae0169fSJose Abreu 		priv->tx_timer_armed = false;
31270e80bdc9SGiuseppe Cavallaro 	}
31280e80bdc9SGiuseppe Cavallaro 
31290e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
31300e80bdc9SGiuseppe Cavallaro 
31310e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
31320e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
31330e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
31340e80bdc9SGiuseppe Cavallaro 	 */
31350e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
31360e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
31370e80bdc9SGiuseppe Cavallaro 
3138f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
31390e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
3140f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
31410e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
31420e80bdc9SGiuseppe Cavallaro 
3143ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
31446844171dSJose Abreu 
31456844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3146f748be53SAlexandre TORGUE 
3147ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3148ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
31490e80bdc9SGiuseppe Cavallaro 
3150891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3151891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
3152891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
3153891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
315442de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
3155891434b1SRayagond Kokatanur 		}
3156891434b1SRayagond Kokatanur 
31570e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
315842de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
315942de047dSJose Abreu 				csum_insertion, priv->mode, 1, last_segment,
316042de047dSJose Abreu 				skb->len);
31610e80bdc9SGiuseppe Cavallaro 
31620e80bdc9SGiuseppe Cavallaro 		/* The own bit must be the latest setting done when prepare the
31630e80bdc9SGiuseppe Cavallaro 		 * descriptor and then barrier is needed to make sure that
31640e80bdc9SGiuseppe Cavallaro 		 * all is coherent before granting the DMA engine.
31650e80bdc9SGiuseppe Cavallaro 		 */
316695eb930aSNiklas Cassel 		wmb();
31670e80bdc9SGiuseppe Cavallaro 	}
31687ac6653aSJeff Kirsher 
3169c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3170f748be53SAlexandre TORGUE 
3171a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3172f1565c60SJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
31737ac6653aSJeff Kirsher 
3174362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
3175a9097a96SGiuseppe CAVALLARO 
3176362b37beSGiuseppe CAVALLARO dma_map_err:
317738ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
3178362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
3179362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
31807ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
31817ac6653aSJeff Kirsher }
31827ac6653aSJeff Kirsher 
3183b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3184b9381985SVince Bridgers {
3185b9381985SVince Bridgers 	struct ethhdr *ehdr;
3186b9381985SVince Bridgers 	u16 vlanid;
3187b9381985SVince Bridgers 
3188b9381985SVince Bridgers 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3189b9381985SVince Bridgers 	    NETIF_F_HW_VLAN_CTAG_RX &&
3190b9381985SVince Bridgers 	    !__vlan_get_tag(skb, &vlanid)) {
3191b9381985SVince Bridgers 		/* pop the vlan tag */
3192b9381985SVince Bridgers 		ehdr = (struct ethhdr *)skb->data;
3193b9381985SVince Bridgers 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3194b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
3195b9381985SVince Bridgers 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3196b9381985SVince Bridgers 	}
3197b9381985SVince Bridgers }
3198b9381985SVince Bridgers 
3199b9381985SVince Bridgers 
320054139cf3SJoao Pinto static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3201120e87f9SGiuseppe Cavallaro {
320254139cf3SJoao Pinto 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3203120e87f9SGiuseppe Cavallaro 		return 0;
3204120e87f9SGiuseppe Cavallaro 
3205120e87f9SGiuseppe Cavallaro 	return 1;
3206120e87f9SGiuseppe Cavallaro }
3207120e87f9SGiuseppe Cavallaro 
320832ceabcaSGiuseppe CAVALLARO /**
3209732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
321032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
321154139cf3SJoao Pinto  * @queue: RX queue index
321232ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
321332ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
321432ceabcaSGiuseppe CAVALLARO  */
321554139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
32167ac6653aSJeff Kirsher {
321754139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
321854139cf3SJoao Pinto 	int dirty = stmmac_rx_dirty(priv, queue);
321954139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
322054139cf3SJoao Pinto 
32217ac6653aSJeff Kirsher 	int bfsize = priv->dma_buf_sz;
32227ac6653aSJeff Kirsher 
3223e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
3224c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
3225c24602efSGiuseppe CAVALLARO 
3226c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
322754139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3228c24602efSGiuseppe CAVALLARO 		else
322954139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3230c24602efSGiuseppe CAVALLARO 
323154139cf3SJoao Pinto 		if (likely(!rx_q->rx_skbuff[entry])) {
32327ac6653aSJeff Kirsher 			struct sk_buff *skb;
32337ac6653aSJeff Kirsher 
3234acb600deSEric Dumazet 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3235120e87f9SGiuseppe Cavallaro 			if (unlikely(!skb)) {
3236120e87f9SGiuseppe Cavallaro 				/* so for a while no zero-copy! */
323754139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3238120e87f9SGiuseppe Cavallaro 				if (unlikely(net_ratelimit()))
3239120e87f9SGiuseppe Cavallaro 					dev_err(priv->device,
3240120e87f9SGiuseppe Cavallaro 						"fail to alloc skb entry %d\n",
3241120e87f9SGiuseppe Cavallaro 						entry);
32427ac6653aSJeff Kirsher 				break;
3243120e87f9SGiuseppe Cavallaro 			}
32447ac6653aSJeff Kirsher 
324554139cf3SJoao Pinto 			rx_q->rx_skbuff[entry] = skb;
324654139cf3SJoao Pinto 			rx_q->rx_skbuff_dma[entry] =
32477ac6653aSJeff Kirsher 			    dma_map_single(priv->device, skb->data, bfsize,
32487ac6653aSJeff Kirsher 					   DMA_FROM_DEVICE);
3249362b37beSGiuseppe CAVALLARO 			if (dma_mapping_error(priv->device,
325054139cf3SJoao Pinto 					      rx_q->rx_skbuff_dma[entry])) {
325138ddc59dSLABBE Corentin 				netdev_err(priv->dev, "Rx DMA map failed\n");
3252362b37beSGiuseppe CAVALLARO 				dev_kfree_skb(skb);
3253362b37beSGiuseppe CAVALLARO 				break;
3254362b37beSGiuseppe CAVALLARO 			}
3255286a8372SGiuseppe CAVALLARO 
32566844171dSJose Abreu 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
32572c520b1cSJose Abreu 			stmmac_refill_desc3(priv, rx_q, p);
3258286a8372SGiuseppe CAVALLARO 
325954139cf3SJoao Pinto 			if (rx_q->rx_zeroc_thresh > 0)
326054139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh--;
3261120e87f9SGiuseppe Cavallaro 
3262b3e51069SLABBE Corentin 			netif_dbg(priv, rx_status, priv->dev,
326338ddc59dSLABBE Corentin 				  "refill entry #%d\n", entry);
32647ac6653aSJeff Kirsher 		}
3265ad688cdbSPavel Machek 		dma_wmb();
3266f748be53SAlexandre TORGUE 
3267357951cdSJose Abreu 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3268f748be53SAlexandre TORGUE 
3269ad688cdbSPavel Machek 		dma_wmb();
3270e3ad57c9SGiuseppe Cavallaro 
3271e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
32727ac6653aSJeff Kirsher 	}
327354139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
32747ac6653aSJeff Kirsher }
32757ac6653aSJeff Kirsher 
327632ceabcaSGiuseppe CAVALLARO /**
3277732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
327832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
327954139cf3SJoao Pinto  * @limit: napi bugget
328054139cf3SJoao Pinto  * @queue: RX queue index.
328132ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
328232ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
328332ceabcaSGiuseppe CAVALLARO  */
328454139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
32857ac6653aSJeff Kirsher {
328654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
328754139cf3SJoao Pinto 	unsigned int entry = rx_q->cur_rx;
328854139cf3SJoao Pinto 	int coe = priv->hw->rx_csum;
32897ac6653aSJeff Kirsher 	unsigned int next_entry;
32907ac6653aSJeff Kirsher 	unsigned int count = 0;
32917ac6653aSJeff Kirsher 
329283d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
3293d0225e7dSAlexandre TORGUE 		void *rx_head;
3294d0225e7dSAlexandre TORGUE 
329538ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3296c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
329754139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
3298c24602efSGiuseppe CAVALLARO 		else
329954139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
3300d0225e7dSAlexandre TORGUE 
330142de047dSJose Abreu 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
33027ac6653aSJeff Kirsher 	}
3303c24602efSGiuseppe CAVALLARO 	while (count < limit) {
33047ac6653aSJeff Kirsher 		int status;
33059401bb5cSGiuseppe CAVALLARO 		struct dma_desc *p;
3306ba1ffd74SGiuseppe CAVALLARO 		struct dma_desc *np;
33077ac6653aSJeff Kirsher 
3308c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
330954139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3310c24602efSGiuseppe CAVALLARO 		else
331154139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3312c24602efSGiuseppe CAVALLARO 
3313c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
331442de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
3315c1fa3212SFabrice Gasnier 				&priv->xstats, p);
3316c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
3317c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
33187ac6653aSJeff Kirsher 			break;
33197ac6653aSJeff Kirsher 
33207ac6653aSJeff Kirsher 		count++;
33217ac6653aSJeff Kirsher 
332254139cf3SJoao Pinto 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
332354139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
3324e3ad57c9SGiuseppe Cavallaro 
3325c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
332654139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3327c24602efSGiuseppe CAVALLARO 		else
332854139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
3329ba1ffd74SGiuseppe CAVALLARO 
3330ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
33317ac6653aSJeff Kirsher 
333242de047dSJose Abreu 		if (priv->extend_desc)
333342de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
333442de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
3335891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
33367ac6653aSJeff Kirsher 			priv->dev->stats.rx_errors++;
3337891434b1SRayagond Kokatanur 			if (priv->hwts_rx_en && !priv->extend_desc) {
33388d45e42bSLABBE Corentin 				/* DESC2 & DESC3 will be overwritten by device
3339891434b1SRayagond Kokatanur 				 * with timestamp value, hence reinitialize
3340891434b1SRayagond Kokatanur 				 * them in stmmac_rx_refill() function so that
3341891434b1SRayagond Kokatanur 				 * device can reuse it.
3342891434b1SRayagond Kokatanur 				 */
33439c8080d0SJose Abreu 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
334454139cf3SJoao Pinto 				rx_q->rx_skbuff[entry] = NULL;
3345891434b1SRayagond Kokatanur 				dma_unmap_single(priv->device,
334654139cf3SJoao Pinto 						 rx_q->rx_skbuff_dma[entry],
3347ceb69499SGiuseppe CAVALLARO 						 priv->dma_buf_sz,
3348ceb69499SGiuseppe CAVALLARO 						 DMA_FROM_DEVICE);
3349891434b1SRayagond Kokatanur 			}
3350891434b1SRayagond Kokatanur 		} else {
33517ac6653aSJeff Kirsher 			struct sk_buff *skb;
33527ac6653aSJeff Kirsher 			int frame_len;
3353f748be53SAlexandre TORGUE 			unsigned int des;
3354f748be53SAlexandre TORGUE 
3355d2df9ea0SJose Abreu 			stmmac_get_desc_addr(priv, p, &des);
335642de047dSJose Abreu 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3357ceb69499SGiuseppe CAVALLARO 
33588d45e42bSLABBE Corentin 			/*  If frame length is greater than skb buffer size
3359f748be53SAlexandre TORGUE 			 *  (preallocated during init) then the packet is
3360f748be53SAlexandre TORGUE 			 *  ignored
3361f748be53SAlexandre TORGUE 			 */
3362e527c4a7SGiuseppe CAVALLARO 			if (frame_len > priv->dma_buf_sz) {
336338ddc59dSLABBE Corentin 				netdev_err(priv->dev,
336438ddc59dSLABBE Corentin 					   "len %d larger than size (%d)\n",
336538ddc59dSLABBE Corentin 					   frame_len, priv->dma_buf_sz);
3366e527c4a7SGiuseppe CAVALLARO 				priv->dev->stats.rx_length_errors++;
3367e527c4a7SGiuseppe CAVALLARO 				break;
3368e527c4a7SGiuseppe CAVALLARO 			}
3369e527c4a7SGiuseppe CAVALLARO 
33707ac6653aSJeff Kirsher 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3371ceb69499SGiuseppe CAVALLARO 			 * Type frames (LLC/LLC-SNAP)
3372565020aaSJose Abreu 			 *
3373565020aaSJose Abreu 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3374565020aaSJose Abreu 			 * feature is always disabled and packets need to be
3375565020aaSJose Abreu 			 * stripped manually.
3376ceb69499SGiuseppe CAVALLARO 			 */
3377565020aaSJose Abreu 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3378565020aaSJose Abreu 			    unlikely(status != llc_snap))
33797ac6653aSJeff Kirsher 				frame_len -= ETH_FCS_LEN;
33807ac6653aSJeff Kirsher 
338183d7af64SGiuseppe CAVALLARO 			if (netif_msg_rx_status(priv)) {
338238ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3383f748be53SAlexandre TORGUE 					   p, entry, des);
338438ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
338583d7af64SGiuseppe CAVALLARO 					   frame_len, status);
338683d7af64SGiuseppe CAVALLARO 			}
338722ad3838SGiuseppe Cavallaro 
3388f748be53SAlexandre TORGUE 			/* The zero-copy is always used for all the sizes
3389f748be53SAlexandre TORGUE 			 * in case of GMAC4 because it needs
3390f748be53SAlexandre TORGUE 			 * to refill the used descriptors, always.
3391f748be53SAlexandre TORGUE 			 */
3392f748be53SAlexandre TORGUE 			if (unlikely(!priv->plat->has_gmac4 &&
3393f748be53SAlexandre TORGUE 				     ((frame_len < priv->rx_copybreak) ||
339454139cf3SJoao Pinto 				     stmmac_rx_threshold_count(rx_q)))) {
339522ad3838SGiuseppe Cavallaro 				skb = netdev_alloc_skb_ip_align(priv->dev,
339622ad3838SGiuseppe Cavallaro 								frame_len);
339722ad3838SGiuseppe Cavallaro 				if (unlikely(!skb)) {
339822ad3838SGiuseppe Cavallaro 					if (net_ratelimit())
339922ad3838SGiuseppe Cavallaro 						dev_warn(priv->device,
340022ad3838SGiuseppe Cavallaro 							 "packet dropped\n");
340122ad3838SGiuseppe Cavallaro 					priv->dev->stats.rx_dropped++;
340222ad3838SGiuseppe Cavallaro 					break;
340322ad3838SGiuseppe Cavallaro 				}
340422ad3838SGiuseppe Cavallaro 
340522ad3838SGiuseppe Cavallaro 				dma_sync_single_for_cpu(priv->device,
340654139cf3SJoao Pinto 							rx_q->rx_skbuff_dma
340722ad3838SGiuseppe Cavallaro 							[entry], frame_len,
340822ad3838SGiuseppe Cavallaro 							DMA_FROM_DEVICE);
340922ad3838SGiuseppe Cavallaro 				skb_copy_to_linear_data(skb,
341054139cf3SJoao Pinto 							rx_q->
341122ad3838SGiuseppe Cavallaro 							rx_skbuff[entry]->data,
341222ad3838SGiuseppe Cavallaro 							frame_len);
341322ad3838SGiuseppe Cavallaro 
341422ad3838SGiuseppe Cavallaro 				skb_put(skb, frame_len);
341522ad3838SGiuseppe Cavallaro 				dma_sync_single_for_device(priv->device,
341654139cf3SJoao Pinto 							   rx_q->rx_skbuff_dma
341722ad3838SGiuseppe Cavallaro 							   [entry], frame_len,
341822ad3838SGiuseppe Cavallaro 							   DMA_FROM_DEVICE);
341922ad3838SGiuseppe Cavallaro 			} else {
342054139cf3SJoao Pinto 				skb = rx_q->rx_skbuff[entry];
34217ac6653aSJeff Kirsher 				if (unlikely(!skb)) {
342238ddc59dSLABBE Corentin 					netdev_err(priv->dev,
342338ddc59dSLABBE Corentin 						   "%s: Inconsistent Rx chain\n",
34247ac6653aSJeff Kirsher 						   priv->dev->name);
34257ac6653aSJeff Kirsher 					priv->dev->stats.rx_dropped++;
34267ac6653aSJeff Kirsher 					break;
34277ac6653aSJeff Kirsher 				}
34287ac6653aSJeff Kirsher 				prefetch(skb->data - NET_IP_ALIGN);
342954139cf3SJoao Pinto 				rx_q->rx_skbuff[entry] = NULL;
343054139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh++;
34317ac6653aSJeff Kirsher 
34327ac6653aSJeff Kirsher 				skb_put(skb, frame_len);
34337ac6653aSJeff Kirsher 				dma_unmap_single(priv->device,
343454139cf3SJoao Pinto 						 rx_q->rx_skbuff_dma[entry],
343522ad3838SGiuseppe Cavallaro 						 priv->dma_buf_sz,
343622ad3838SGiuseppe Cavallaro 						 DMA_FROM_DEVICE);
343722ad3838SGiuseppe Cavallaro 			}
343822ad3838SGiuseppe Cavallaro 
34397ac6653aSJeff Kirsher 			if (netif_msg_pktdata(priv)) {
344038ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "frame received (%dbytes)",
344138ddc59dSLABBE Corentin 					   frame_len);
34427ac6653aSJeff Kirsher 				print_pkt(skb->data, frame_len);
34437ac6653aSJeff Kirsher 			}
344483d7af64SGiuseppe CAVALLARO 
3445ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3446ba1ffd74SGiuseppe CAVALLARO 
3447b9381985SVince Bridgers 			stmmac_rx_vlan(priv->dev, skb);
3448b9381985SVince Bridgers 
34497ac6653aSJeff Kirsher 			skb->protocol = eth_type_trans(skb, priv->dev);
34507ac6653aSJeff Kirsher 
3451ceb69499SGiuseppe CAVALLARO 			if (unlikely(!coe))
34527ac6653aSJeff Kirsher 				skb_checksum_none_assert(skb);
345362a2ab93SGiuseppe CAVALLARO 			else
34547ac6653aSJeff Kirsher 				skb->ip_summed = CHECKSUM_UNNECESSARY;
345562a2ab93SGiuseppe CAVALLARO 
3456c22a3f48SJoao Pinto 			napi_gro_receive(&rx_q->napi, skb);
34577ac6653aSJeff Kirsher 
34587ac6653aSJeff Kirsher 			priv->dev->stats.rx_packets++;
34597ac6653aSJeff Kirsher 			priv->dev->stats.rx_bytes += frame_len;
34607ac6653aSJeff Kirsher 		}
34617ac6653aSJeff Kirsher 		entry = next_entry;
34627ac6653aSJeff Kirsher 	}
34637ac6653aSJeff Kirsher 
346454139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
34657ac6653aSJeff Kirsher 
34667ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
34677ac6653aSJeff Kirsher 
34687ac6653aSJeff Kirsher 	return count;
34697ac6653aSJeff Kirsher }
34707ac6653aSJeff Kirsher 
34717ac6653aSJeff Kirsher /**
34727ac6653aSJeff Kirsher  *  stmmac_poll - stmmac poll method (NAPI)
34737ac6653aSJeff Kirsher  *  @napi : pointer to the napi structure.
34747ac6653aSJeff Kirsher  *  @budget : maximum number of packets that the current CPU can receive from
34757ac6653aSJeff Kirsher  *	      all interfaces.
34767ac6653aSJeff Kirsher  *  Description :
34779125cdd1SGiuseppe CAVALLARO  *  To look at the incoming frames and clear the tx resources.
34787ac6653aSJeff Kirsher  */
34797ac6653aSJeff Kirsher static int stmmac_poll(struct napi_struct *napi, int budget)
34807ac6653aSJeff Kirsher {
3481c22a3f48SJoao Pinto 	struct stmmac_rx_queue *rx_q =
3482c22a3f48SJoao Pinto 		container_of(napi, struct stmmac_rx_queue, napi);
3483c22a3f48SJoao Pinto 	struct stmmac_priv *priv = rx_q->priv_data;
3484ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
3485c22a3f48SJoao Pinto 	u32 chan = rx_q->queue_index;
348654139cf3SJoao Pinto 	int work_done = 0;
3487c22a3f48SJoao Pinto 	u32 queue;
34887ac6653aSJeff Kirsher 
34899125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
3490ce736788SJoao Pinto 
3491ce736788SJoao Pinto 	/* check all the queues */
3492ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++)
3493ce736788SJoao Pinto 		stmmac_tx_clean(priv, queue);
3494ce736788SJoao Pinto 
3495c22a3f48SJoao Pinto 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
34967ac6653aSJeff Kirsher 	if (work_done < budget) {
34976ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
3498a4e887faSJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
34997ac6653aSJeff Kirsher 	}
35007ac6653aSJeff Kirsher 	return work_done;
35017ac6653aSJeff Kirsher }
35027ac6653aSJeff Kirsher 
35037ac6653aSJeff Kirsher /**
35047ac6653aSJeff Kirsher  *  stmmac_tx_timeout
35057ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
35067ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
35077284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
35087ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
35097ac6653aSJeff Kirsher  *   in order to transmit a new packet.
35107ac6653aSJeff Kirsher  */
35117ac6653aSJeff Kirsher static void stmmac_tx_timeout(struct net_device *dev)
35127ac6653aSJeff Kirsher {
35137ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
35147ac6653aSJeff Kirsher 
351534877a15SJose Abreu 	stmmac_global_err(priv);
35167ac6653aSJeff Kirsher }
35177ac6653aSJeff Kirsher 
35187ac6653aSJeff Kirsher /**
351901789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
35207ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
35217ac6653aSJeff Kirsher  *  Description:
35227ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
35237ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
35247ac6653aSJeff Kirsher  *  Return value:
35257ac6653aSJeff Kirsher  *  void.
35267ac6653aSJeff Kirsher  */
352701789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
35287ac6653aSJeff Kirsher {
35297ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
35307ac6653aSJeff Kirsher 
3531c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
35327ac6653aSJeff Kirsher }
35337ac6653aSJeff Kirsher 
35347ac6653aSJeff Kirsher /**
35357ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
35367ac6653aSJeff Kirsher  *  @dev : device pointer.
35377ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
35387ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
35397ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
35407ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
35417ac6653aSJeff Kirsher  *  Return value:
35427ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
35437ac6653aSJeff Kirsher  *  file on failure.
35447ac6653aSJeff Kirsher  */
35457ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
35467ac6653aSJeff Kirsher {
354738ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
354838ddc59dSLABBE Corentin 
35497ac6653aSJeff Kirsher 	if (netif_running(dev)) {
355038ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
35517ac6653aSJeff Kirsher 		return -EBUSY;
35527ac6653aSJeff Kirsher 	}
35537ac6653aSJeff Kirsher 
35547ac6653aSJeff Kirsher 	dev->mtu = new_mtu;
3555f748be53SAlexandre TORGUE 
35567ac6653aSJeff Kirsher 	netdev_update_features(dev);
35577ac6653aSJeff Kirsher 
35587ac6653aSJeff Kirsher 	return 0;
35597ac6653aSJeff Kirsher }
35607ac6653aSJeff Kirsher 
3561c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
3562c8f44affSMichał Mirosław 					     netdev_features_t features)
35637ac6653aSJeff Kirsher {
35647ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
35657ac6653aSJeff Kirsher 
356638912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
35677ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
3568d2afb5bdSGiuseppe CAVALLARO 
35697ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
3570a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
35717ac6653aSJeff Kirsher 
35727ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
35737ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
35747ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
3575ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
3576ceb69499SGiuseppe CAVALLARO 	 */
35777ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3578a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
35797ac6653aSJeff Kirsher 
3580f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
3581f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3582f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
3583f748be53SAlexandre TORGUE 			priv->tso = true;
3584f748be53SAlexandre TORGUE 		else
3585f748be53SAlexandre TORGUE 			priv->tso = false;
3586f748be53SAlexandre TORGUE 	}
3587f748be53SAlexandre TORGUE 
35887ac6653aSJeff Kirsher 	return features;
35897ac6653aSJeff Kirsher }
35907ac6653aSJeff Kirsher 
3591d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
3592d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
3593d2afb5bdSGiuseppe CAVALLARO {
3594d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
3595d2afb5bdSGiuseppe CAVALLARO 
3596d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
3597d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
3598d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
3599d2afb5bdSGiuseppe CAVALLARO 	else
3600d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3601d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
3602d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
3603d2afb5bdSGiuseppe CAVALLARO 	 */
3604c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
3605d2afb5bdSGiuseppe CAVALLARO 
3606d2afb5bdSGiuseppe CAVALLARO 	return 0;
3607d2afb5bdSGiuseppe CAVALLARO }
3608d2afb5bdSGiuseppe CAVALLARO 
360932ceabcaSGiuseppe CAVALLARO /**
361032ceabcaSGiuseppe CAVALLARO  *  stmmac_interrupt - main ISR
361132ceabcaSGiuseppe CAVALLARO  *  @irq: interrupt number.
361232ceabcaSGiuseppe CAVALLARO  *  @dev_id: to pass the net device pointer.
361332ceabcaSGiuseppe CAVALLARO  *  Description: this is the main driver interrupt service routine.
3614732fdf0eSGiuseppe CAVALLARO  *  It can call:
3615732fdf0eSGiuseppe CAVALLARO  *  o DMA service routine (to manage incoming frame reception and transmission
3616732fdf0eSGiuseppe CAVALLARO  *    status)
3617732fdf0eSGiuseppe CAVALLARO  *  o Core interrupts to manage: remote wake-up, management counter, LPI
361832ceabcaSGiuseppe CAVALLARO  *    interrupts.
361932ceabcaSGiuseppe CAVALLARO  */
36207ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
36217ac6653aSJeff Kirsher {
36227ac6653aSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
36237ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36247bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
36257bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
36267bac4e1eSJoao Pinto 	u32 queues_count;
36277bac4e1eSJoao Pinto 	u32 queue;
36287bac4e1eSJoao Pinto 
36297bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
36307ac6653aSJeff Kirsher 
363189f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
363289f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
363389f7f2cfSSrinivas Kandagatla 
36347ac6653aSJeff Kirsher 	if (unlikely(!dev)) {
363538ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
36367ac6653aSJeff Kirsher 		return IRQ_NONE;
36377ac6653aSJeff Kirsher 	}
36387ac6653aSJeff Kirsher 
363934877a15SJose Abreu 	/* Check if adapter is up */
364034877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
364134877a15SJose Abreu 		return IRQ_HANDLED;
36428bf993a5SJose Abreu 	/* Check if a fatal error happened */
36438bf993a5SJose Abreu 	if (stmmac_safety_feat_interrupt(priv))
36448bf993a5SJose Abreu 		return IRQ_HANDLED;
364534877a15SJose Abreu 
36467ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
3647f748be53SAlexandre TORGUE 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3648c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
364961fac60aSJose Abreu 		int mtl_status;
36508f71a88dSJoao Pinto 
3651d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
3652d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
36530982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3654d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
36550982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3656d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
36577bac4e1eSJoao Pinto 		}
36587bac4e1eSJoao Pinto 
36597bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
366061fac60aSJose Abreu 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
366154139cf3SJoao Pinto 
366261fac60aSJose Abreu 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
366361fac60aSJose Abreu 								queue);
366461fac60aSJose Abreu 			if (mtl_status != -EINVAL)
366561fac60aSJose Abreu 				status |= mtl_status;
36667bac4e1eSJoao Pinto 
3667a4e887faSJose Abreu 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
366861fac60aSJose Abreu 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
366954139cf3SJoao Pinto 						       rx_q->rx_tail_addr,
36707bac4e1eSJoao Pinto 						       queue);
36717bac4e1eSJoao Pinto 		}
367270523e63SGiuseppe CAVALLARO 
367370523e63SGiuseppe CAVALLARO 		/* PCS link status */
36743fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
367570523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
367670523e63SGiuseppe CAVALLARO 				netif_carrier_on(dev);
367770523e63SGiuseppe CAVALLARO 			else
367870523e63SGiuseppe CAVALLARO 				netif_carrier_off(dev);
367970523e63SGiuseppe CAVALLARO 		}
3680d765955dSGiuseppe CAVALLARO 	}
3681d765955dSGiuseppe CAVALLARO 
3682d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
36837ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
36847ac6653aSJeff Kirsher 
36857ac6653aSJeff Kirsher 	return IRQ_HANDLED;
36867ac6653aSJeff Kirsher }
36877ac6653aSJeff Kirsher 
36887ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
36897ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
3690ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
3691ceb69499SGiuseppe CAVALLARO  */
36927ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
36937ac6653aSJeff Kirsher {
36947ac6653aSJeff Kirsher 	disable_irq(dev->irq);
36957ac6653aSJeff Kirsher 	stmmac_interrupt(dev->irq, dev);
36967ac6653aSJeff Kirsher 	enable_irq(dev->irq);
36977ac6653aSJeff Kirsher }
36987ac6653aSJeff Kirsher #endif
36997ac6653aSJeff Kirsher 
37007ac6653aSJeff Kirsher /**
37017ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
37027ac6653aSJeff Kirsher  *  @dev: Device pointer.
37037ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
37047ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
37057ac6653aSJeff Kirsher  *  @cmd: IOCTL command
37067ac6653aSJeff Kirsher  *  Description:
370732ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
37087ac6653aSJeff Kirsher  */
37097ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
37107ac6653aSJeff Kirsher {
3711891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
37127ac6653aSJeff Kirsher 
37137ac6653aSJeff Kirsher 	if (!netif_running(dev))
37147ac6653aSJeff Kirsher 		return -EINVAL;
37157ac6653aSJeff Kirsher 
3716891434b1SRayagond Kokatanur 	switch (cmd) {
3717891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
3718891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
3719891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
3720d6d50c7eSPhilippe Reynes 		if (!dev->phydev)
37217ac6653aSJeff Kirsher 			return -EINVAL;
3722d6d50c7eSPhilippe Reynes 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3723891434b1SRayagond Kokatanur 		break;
3724891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
3725891434b1SRayagond Kokatanur 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3726891434b1SRayagond Kokatanur 		break;
3727891434b1SRayagond Kokatanur 	default:
3728891434b1SRayagond Kokatanur 		break;
3729891434b1SRayagond Kokatanur 	}
37307ac6653aSJeff Kirsher 
37317ac6653aSJeff Kirsher 	return ret;
37327ac6653aSJeff Kirsher }
37337ac6653aSJeff Kirsher 
37344dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
37354dbbe8ddSJose Abreu 				    void *cb_priv)
37364dbbe8ddSJose Abreu {
37374dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
37384dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
37394dbbe8ddSJose Abreu 
37404dbbe8ddSJose Abreu 	stmmac_disable_all_queues(priv);
37414dbbe8ddSJose Abreu 
37424dbbe8ddSJose Abreu 	switch (type) {
37434dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
37444dbbe8ddSJose Abreu 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
37454dbbe8ddSJose Abreu 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
37464dbbe8ddSJose Abreu 		break;
37474dbbe8ddSJose Abreu 	default:
37484dbbe8ddSJose Abreu 		break;
37494dbbe8ddSJose Abreu 	}
37504dbbe8ddSJose Abreu 
37514dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
37524dbbe8ddSJose Abreu 	return ret;
37534dbbe8ddSJose Abreu }
37544dbbe8ddSJose Abreu 
37554dbbe8ddSJose Abreu static int stmmac_setup_tc_block(struct stmmac_priv *priv,
37564dbbe8ddSJose Abreu 				 struct tc_block_offload *f)
37574dbbe8ddSJose Abreu {
37584dbbe8ddSJose Abreu 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
37594dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
37604dbbe8ddSJose Abreu 
37614dbbe8ddSJose Abreu 	switch (f->command) {
37624dbbe8ddSJose Abreu 	case TC_BLOCK_BIND:
37634dbbe8ddSJose Abreu 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
37644dbbe8ddSJose Abreu 				priv, priv);
37654dbbe8ddSJose Abreu 	case TC_BLOCK_UNBIND:
37664dbbe8ddSJose Abreu 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
37674dbbe8ddSJose Abreu 		return 0;
37684dbbe8ddSJose Abreu 	default:
37694dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
37704dbbe8ddSJose Abreu 	}
37714dbbe8ddSJose Abreu }
37724dbbe8ddSJose Abreu 
37734dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
37744dbbe8ddSJose Abreu 			   void *type_data)
37754dbbe8ddSJose Abreu {
37764dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
37774dbbe8ddSJose Abreu 
37784dbbe8ddSJose Abreu 	switch (type) {
37794dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
37804dbbe8ddSJose Abreu 		return stmmac_setup_tc_block(priv, type_data);
37814dbbe8ddSJose Abreu 	default:
37824dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
37834dbbe8ddSJose Abreu 	}
37844dbbe8ddSJose Abreu }
37854dbbe8ddSJose Abreu 
3786a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3787a830405eSBhadram Varka {
3788a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
3789a830405eSBhadram Varka 	int ret = 0;
3790a830405eSBhadram Varka 
3791a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
3792a830405eSBhadram Varka 	if (ret)
3793a830405eSBhadram Varka 		return ret;
3794a830405eSBhadram Varka 
3795c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3796a830405eSBhadram Varka 
3797a830405eSBhadram Varka 	return ret;
3798a830405eSBhadram Varka }
3799a830405eSBhadram Varka 
380050fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
38017ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
38027ac29055SGiuseppe CAVALLARO 
3803c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
3804c24602efSGiuseppe CAVALLARO 			       struct seq_file *seq)
38057ac29055SGiuseppe CAVALLARO {
38067ac29055SGiuseppe CAVALLARO 	int i;
3807c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3808c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
38097ac29055SGiuseppe CAVALLARO 
3810c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
3811c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
3812c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3813c24602efSGiuseppe CAVALLARO 				   i, (unsigned int)virt_to_phys(ep),
3814f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
3815f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
3816f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
3817f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
3818c24602efSGiuseppe CAVALLARO 			ep++;
3819c24602efSGiuseppe CAVALLARO 		} else {
3820c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
382166c25f6eSNiklas Cassel 				   i, (unsigned int)virt_to_phys(p),
3822f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3823f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3824c24602efSGiuseppe CAVALLARO 			p++;
3825c24602efSGiuseppe CAVALLARO 		}
38267ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
38277ac29055SGiuseppe CAVALLARO 	}
3828c24602efSGiuseppe CAVALLARO }
38297ac29055SGiuseppe CAVALLARO 
3830c24602efSGiuseppe CAVALLARO static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3831c24602efSGiuseppe CAVALLARO {
3832c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
3833c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
383454139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
3835ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
383654139cf3SJoao Pinto 	u32 queue;
383754139cf3SJoao Pinto 
383854139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
383954139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
384054139cf3SJoao Pinto 
384154139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
38427ac29055SGiuseppe CAVALLARO 
3843c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
384454139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
384554139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
384654139cf3SJoao Pinto 					   DMA_RX_SIZE, 1, seq);
384754139cf3SJoao Pinto 		} else {
384854139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
384954139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
385054139cf3SJoao Pinto 					   DMA_RX_SIZE, 0, seq);
385154139cf3SJoao Pinto 		}
385254139cf3SJoao Pinto 	}
385354139cf3SJoao Pinto 
3854ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
3855ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3856ce736788SJoao Pinto 
3857ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
3858ce736788SJoao Pinto 
385954139cf3SJoao Pinto 		if (priv->extend_desc) {
3860ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
3861ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
3862ce736788SJoao Pinto 					   DMA_TX_SIZE, 1, seq);
3863c24602efSGiuseppe CAVALLARO 		} else {
3864ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
3865ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
3866ce736788SJoao Pinto 					   DMA_TX_SIZE, 0, seq);
3867ce736788SJoao Pinto 		}
38687ac29055SGiuseppe CAVALLARO 	}
38697ac29055SGiuseppe CAVALLARO 
38707ac29055SGiuseppe CAVALLARO 	return 0;
38717ac29055SGiuseppe CAVALLARO }
38727ac29055SGiuseppe CAVALLARO 
38737ac29055SGiuseppe CAVALLARO static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
38747ac29055SGiuseppe CAVALLARO {
38757ac29055SGiuseppe CAVALLARO 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
38767ac29055SGiuseppe CAVALLARO }
38777ac29055SGiuseppe CAVALLARO 
387822d3efe5SPavel Machek /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
387922d3efe5SPavel Machek 
38807ac29055SGiuseppe CAVALLARO static const struct file_operations stmmac_rings_status_fops = {
38817ac29055SGiuseppe CAVALLARO 	.owner = THIS_MODULE,
38827ac29055SGiuseppe CAVALLARO 	.open = stmmac_sysfs_ring_open,
38837ac29055SGiuseppe CAVALLARO 	.read = seq_read,
38847ac29055SGiuseppe CAVALLARO 	.llseek = seq_lseek,
388574863948SDjalal Harouni 	.release = single_release,
38867ac29055SGiuseppe CAVALLARO };
38877ac29055SGiuseppe CAVALLARO 
3888e7434821SGiuseppe CAVALLARO static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3889e7434821SGiuseppe CAVALLARO {
3890e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
3891e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
3892e7434821SGiuseppe CAVALLARO 
389319e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
3894e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
3895e7434821SGiuseppe CAVALLARO 		return 0;
3896e7434821SGiuseppe CAVALLARO 	}
3897e7434821SGiuseppe CAVALLARO 
3898e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
3899e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
3900e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
3901e7434821SGiuseppe CAVALLARO 
390222d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3903e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
390422d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
3905e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
390622d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
3907e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3908e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
3909e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3910e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3911e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
39128d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3913e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
3914e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3915e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3916e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3917e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3918e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3919e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3920e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
3921e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
3922e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3923e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3924e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3925e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
392622d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3927e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
3928e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3929e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3930e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3931f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3932f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3933f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3934f748be53SAlexandre TORGUE 	} else {
3935e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3936e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3937e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3938e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3939f748be53SAlexandre TORGUE 	}
3940e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3941e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3942e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3943e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
3944e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3945e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
3946e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3947e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3948e7434821SGiuseppe CAVALLARO 
3949e7434821SGiuseppe CAVALLARO 	return 0;
3950e7434821SGiuseppe CAVALLARO }
3951e7434821SGiuseppe CAVALLARO 
3952e7434821SGiuseppe CAVALLARO static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3953e7434821SGiuseppe CAVALLARO {
3954e7434821SGiuseppe CAVALLARO 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3955e7434821SGiuseppe CAVALLARO }
3956e7434821SGiuseppe CAVALLARO 
3957e7434821SGiuseppe CAVALLARO static const struct file_operations stmmac_dma_cap_fops = {
3958e7434821SGiuseppe CAVALLARO 	.owner = THIS_MODULE,
3959e7434821SGiuseppe CAVALLARO 	.open = stmmac_sysfs_dma_cap_open,
3960e7434821SGiuseppe CAVALLARO 	.read = seq_read,
3961e7434821SGiuseppe CAVALLARO 	.llseek = seq_lseek,
396274863948SDjalal Harouni 	.release = single_release,
3963e7434821SGiuseppe CAVALLARO };
3964e7434821SGiuseppe CAVALLARO 
39657ac29055SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev)
39667ac29055SGiuseppe CAVALLARO {
3967466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
39687ac29055SGiuseppe CAVALLARO 
3969466c5ac8SMathieu Olivari 	/* Create per netdev entries */
3970466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3971466c5ac8SMathieu Olivari 
3972466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
397338ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
39747ac29055SGiuseppe CAVALLARO 
39757ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
39767ac29055SGiuseppe CAVALLARO 	}
39777ac29055SGiuseppe CAVALLARO 
39787ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
3979466c5ac8SMathieu Olivari 	priv->dbgfs_rings_status =
3980d3757ba4SJoe Perches 		debugfs_create_file("descriptors_status", 0444,
3981466c5ac8SMathieu Olivari 				    priv->dbgfs_dir, dev,
39827ac29055SGiuseppe CAVALLARO 				    &stmmac_rings_status_fops);
39837ac29055SGiuseppe CAVALLARO 
3984466c5ac8SMathieu Olivari 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
398538ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3986466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
39877ac29055SGiuseppe CAVALLARO 
39887ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
39897ac29055SGiuseppe CAVALLARO 	}
39907ac29055SGiuseppe CAVALLARO 
3991e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
3992d3757ba4SJoe Perches 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
3993466c5ac8SMathieu Olivari 						  priv->dbgfs_dir,
3994e7434821SGiuseppe CAVALLARO 						  dev, &stmmac_dma_cap_fops);
3995e7434821SGiuseppe CAVALLARO 
3996466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
399738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3998466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
3999e7434821SGiuseppe CAVALLARO 
4000e7434821SGiuseppe CAVALLARO 		return -ENOMEM;
4001e7434821SGiuseppe CAVALLARO 	}
4002e7434821SGiuseppe CAVALLARO 
40037ac29055SGiuseppe CAVALLARO 	return 0;
40047ac29055SGiuseppe CAVALLARO }
40057ac29055SGiuseppe CAVALLARO 
4006466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
40077ac29055SGiuseppe CAVALLARO {
4008466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
4009466c5ac8SMathieu Olivari 
4010466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
40117ac29055SGiuseppe CAVALLARO }
401250fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
40137ac29055SGiuseppe CAVALLARO 
40147ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
40157ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
40167ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
40177ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
40187ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
40197ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
4020d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
402101789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
40227ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
40237ac6653aSJeff Kirsher 	.ndo_do_ioctl = stmmac_ioctl,
40244dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
40257ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
40267ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
40277ac6653aSJeff Kirsher #endif
4028a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
40297ac6653aSJeff Kirsher };
40307ac6653aSJeff Kirsher 
403134877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
403234877a15SJose Abreu {
403334877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
403434877a15SJose Abreu 		return;
403534877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
403634877a15SJose Abreu 		return;
403734877a15SJose Abreu 
403834877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
403934877a15SJose Abreu 
404034877a15SJose Abreu 	rtnl_lock();
404134877a15SJose Abreu 	netif_trans_update(priv->dev);
404234877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
404334877a15SJose Abreu 		usleep_range(1000, 2000);
404434877a15SJose Abreu 
404534877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
404634877a15SJose Abreu 	dev_close(priv->dev);
404734877a15SJose Abreu 	dev_open(priv->dev);
404834877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
404934877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
405034877a15SJose Abreu 	rtnl_unlock();
405134877a15SJose Abreu }
405234877a15SJose Abreu 
405334877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
405434877a15SJose Abreu {
405534877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
405634877a15SJose Abreu 			service_task);
405734877a15SJose Abreu 
405834877a15SJose Abreu 	stmmac_reset_subtask(priv);
405934877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
406034877a15SJose Abreu }
406134877a15SJose Abreu 
40627ac6653aSJeff Kirsher /**
4063cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
406432ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
4065732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
4066732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
4067732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
4068732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
4069cf3f047bSGiuseppe CAVALLARO  */
4070cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
4071cf3f047bSGiuseppe CAVALLARO {
40725f0456b4SJose Abreu 	int ret;
4073cf3f047bSGiuseppe CAVALLARO 
40749f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
40759f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
40769f93ac8dSLABBE Corentin 		chain_mode = 1;
40775f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
40789f93ac8dSLABBE Corentin 
40795f0456b4SJose Abreu 	/* Initialize HW Interface */
40805f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
40815f0456b4SJose Abreu 	if (ret)
40825f0456b4SJose Abreu 		return ret;
40834a7d666aSGiuseppe CAVALLARO 
4084cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
4085cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4086cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
408738ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
4088cf3f047bSGiuseppe CAVALLARO 
4089cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
4090cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
4091cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
4092cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
4093cf3f047bSGiuseppe CAVALLARO 		 */
4094cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4095cf3f047bSGiuseppe CAVALLARO 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
40963fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
409738912bdbSDeepak SIKRI 
4098a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
4099a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
4100a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
4101a8df35d4SEzequiel Garcia 		else
410238912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4103a8df35d4SEzequiel Garcia 
4104f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
4105f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
410638912bdbSDeepak SIKRI 
410738912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
410838912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
410938912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
411038912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
411138912bdbSDeepak SIKRI 
411238ddc59dSLABBE Corentin 	} else {
411338ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
411438ddc59dSLABBE Corentin 	}
4115cf3f047bSGiuseppe CAVALLARO 
4116d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
4117d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
411838ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4119f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
412038ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4121d2afb5bdSGiuseppe CAVALLARO 	}
4122cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
412338ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
4124cf3f047bSGiuseppe CAVALLARO 
4125cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
412638ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4127cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
4128cf3f047bSGiuseppe CAVALLARO 	}
4129cf3f047bSGiuseppe CAVALLARO 
4130f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
413138ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
4132f748be53SAlexandre TORGUE 
4133c24602efSGiuseppe CAVALLARO 	return 0;
4134cf3f047bSGiuseppe CAVALLARO }
4135cf3f047bSGiuseppe CAVALLARO 
4136cf3f047bSGiuseppe CAVALLARO /**
4137bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
4138bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
4139ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
4140e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
4141bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
4142bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
41439afec6efSAndy Shevchenko  * Return:
414415ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
41457ac6653aSJeff Kirsher  */
414615ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
4147cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
4148e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
41497ac6653aSJeff Kirsher {
4150bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
4151bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
4152c22a3f48SJoao Pinto 	int ret = 0;
4153c22a3f48SJoao Pinto 	u32 queue;
41547ac6653aSJeff Kirsher 
4155c22a3f48SJoao Pinto 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4156c22a3f48SJoao Pinto 				  MTL_MAX_TX_QUEUES,
4157c22a3f48SJoao Pinto 				  MTL_MAX_RX_QUEUES);
415841de8d4cSJoe Perches 	if (!ndev)
415915ffac73SJoachim Eastwood 		return -ENOMEM;
41607ac6653aSJeff Kirsher 
4161bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
41627ac6653aSJeff Kirsher 
4163bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
4164bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
4165bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
4166bfab27a1SGiuseppe CAVALLARO 
4167bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
4168cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
4169cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
4170e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
4171e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
4172e56788cfSJoachim Eastwood 
4173e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
4174e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
4175e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
4176e56788cfSJoachim Eastwood 
4177e56788cfSJoachim Eastwood 	if (res->mac)
4178e56788cfSJoachim Eastwood 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4179bfab27a1SGiuseppe CAVALLARO 
4180a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
4181803f8fc4SJoachim Eastwood 
4182cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
4183cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
4184cf3f047bSGiuseppe CAVALLARO 
418534877a15SJose Abreu 	/* Allocate workqueue */
418634877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
418734877a15SJose Abreu 	if (!priv->wq) {
418834877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
418934877a15SJose Abreu 		goto error_wq;
419034877a15SJose Abreu 	}
419134877a15SJose Abreu 
419234877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
419334877a15SJose Abreu 
4194cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
4195ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
4196ceb69499SGiuseppe CAVALLARO 	 */
4197cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
4198cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
4199cf3f047bSGiuseppe CAVALLARO 
420090f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
420190f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
4202f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
420390f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
420490f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
420590f522a2SEugeniy Paltsev 		 */
420690f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
420790f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
420890f522a2SEugeniy Paltsev 	}
4209c5e4ddbdSChen-Yu Tsai 
4210cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
4211c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
4212c24602efSGiuseppe CAVALLARO 	if (ret)
421362866e98SChen-Yu Tsai 		goto error_hw_init;
4214cf3f047bSGiuseppe CAVALLARO 
4215c22a3f48SJoao Pinto 	/* Configure real RX and TX queues */
4216c02b7a91SJoao Pinto 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4217c02b7a91SJoao Pinto 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4218c22a3f48SJoao Pinto 
4219cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
4220cf3f047bSGiuseppe CAVALLARO 
4221cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4222cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
4223f748be53SAlexandre TORGUE 
42244dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
42254dbbe8ddSJose Abreu 	if (!ret) {
42264dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
42274dbbe8ddSJose Abreu 	}
42284dbbe8ddSJose Abreu 
4229f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
42309edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4231f748be53SAlexandre TORGUE 		priv->tso = true;
423238ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
4233f748be53SAlexandre TORGUE 	}
4234bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4235bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
42367ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
42377ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
4238f646968fSPatrick McHardy 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
42397ac6653aSJeff Kirsher #endif
42407ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
42417ac6653aSJeff Kirsher 
424244770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
424344770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
424444770e11SJarod Wilson 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
424544770e11SJarod Wilson 		ndev->max_mtu = JUMBO_LEN;
424644770e11SJarod Wilson 	else
424744770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4248a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4249a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4250a2cd64f3SKweh, Hock Leong 	 */
4251a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4252a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
425344770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
4254a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
4255b618ab45SHeiner Kallweit 		dev_warn(priv->device,
4256a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
4257a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
425844770e11SJarod Wilson 
42597ac6653aSJeff Kirsher 	if (flow_ctrl)
42607ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
42617ac6653aSJeff Kirsher 
426262a2ab93SGiuseppe CAVALLARO 	/* Rx Watchdog is available in the COREs newer than the 3.40.
426362a2ab93SGiuseppe CAVALLARO 	 * In some case, for example on bugged HW this feature
426462a2ab93SGiuseppe CAVALLARO 	 * has to be disable and this can be done by passing the
426562a2ab93SGiuseppe CAVALLARO 	 * riwt_off field from the platform.
426662a2ab93SGiuseppe CAVALLARO 	 */
426762a2ab93SGiuseppe CAVALLARO 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
426862a2ab93SGiuseppe CAVALLARO 		priv->use_riwt = 1;
4269b618ab45SHeiner Kallweit 		dev_info(priv->device,
4270b618ab45SHeiner Kallweit 			 "Enable RX Mitigation via HW Watchdog Timer\n");
427162a2ab93SGiuseppe CAVALLARO 	}
427262a2ab93SGiuseppe CAVALLARO 
4273c22a3f48SJoao Pinto 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4274c22a3f48SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4275c22a3f48SJoao Pinto 
4276c22a3f48SJoao Pinto 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4277c22a3f48SJoao Pinto 			       (8 * priv->plat->rx_queues_to_use));
4278c22a3f48SJoao Pinto 	}
42797ac6653aSJeff Kirsher 
428029555fa3SThierry Reding 	mutex_init(&priv->lock);
42817ac6653aSJeff Kirsher 
4282cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
4283cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
4284cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4285cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
4286cd7201f4SGiuseppe CAVALLARO 	 * clock input.
4287cd7201f4SGiuseppe CAVALLARO 	 */
4288cd7201f4SGiuseppe CAVALLARO 	if (!priv->plat->clk_csr)
4289cd7201f4SGiuseppe CAVALLARO 		stmmac_clk_csr_set(priv);
4290cd7201f4SGiuseppe CAVALLARO 	else
4291cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
4292cd7201f4SGiuseppe CAVALLARO 
4293e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
4294e58bb43fSGiuseppe CAVALLARO 
42953fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
42963fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
42973fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
42984bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
42994bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
43004bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
4301b618ab45SHeiner Kallweit 			dev_err(priv->device,
430238ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
43034bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
43046a81c26fSViresh Kumar 			goto error_mdio_register;
43054bfcbd7aSFrancesco Virlinzi 		}
4306e58bb43fSGiuseppe CAVALLARO 	}
43074bfcbd7aSFrancesco Virlinzi 
430857016590SFlorian Fainelli 	ret = register_netdev(ndev);
4309b2eb09afSFlorian Fainelli 	if (ret) {
4310b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
431157016590SFlorian Fainelli 			__func__, ret);
4312b2eb09afSFlorian Fainelli 		goto error_netdev_register;
4313b2eb09afSFlorian Fainelli 	}
43147ac6653aSJeff Kirsher 
431557016590SFlorian Fainelli 	return ret;
43167ac6653aSJeff Kirsher 
43176a81c26fSViresh Kumar error_netdev_register:
4318b2eb09afSFlorian Fainelli 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4319b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4320b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4321b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
43227ac6653aSJeff Kirsher error_mdio_register:
4323c22a3f48SJoao Pinto 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4324c22a3f48SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4325c22a3f48SJoao Pinto 
4326c22a3f48SJoao Pinto 		netif_napi_del(&rx_q->napi);
4327c22a3f48SJoao Pinto 	}
432862866e98SChen-Yu Tsai error_hw_init:
432934877a15SJose Abreu 	destroy_workqueue(priv->wq);
433034877a15SJose Abreu error_wq:
43317ac6653aSJeff Kirsher 	free_netdev(ndev);
43327ac6653aSJeff Kirsher 
433315ffac73SJoachim Eastwood 	return ret;
43347ac6653aSJeff Kirsher }
4335b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
43367ac6653aSJeff Kirsher 
43377ac6653aSJeff Kirsher /**
43387ac6653aSJeff Kirsher  * stmmac_dvr_remove
4339f4e7bd81SJoachim Eastwood  * @dev: device pointer
43407ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4341bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
43427ac6653aSJeff Kirsher  */
4343f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
43447ac6653aSJeff Kirsher {
4345f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
43467ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
43477ac6653aSJeff Kirsher 
434838ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
43497ac6653aSJeff Kirsher 
4350ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
43517ac6653aSJeff Kirsher 
4352c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
43537ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
43547ac6653aSJeff Kirsher 	unregister_netdev(ndev);
4355f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
4356f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
4357f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->pclk);
4358f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->stmmac_clk);
43593fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
43603fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
43613fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4362e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
436334877a15SJose Abreu 	destroy_workqueue(priv->wq);
436429555fa3SThierry Reding 	mutex_destroy(&priv->lock);
43657ac6653aSJeff Kirsher 	free_netdev(ndev);
43667ac6653aSJeff Kirsher 
43677ac6653aSJeff Kirsher 	return 0;
43687ac6653aSJeff Kirsher }
4369b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
43707ac6653aSJeff Kirsher 
4371732fdf0eSGiuseppe CAVALLARO /**
4372732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
4373f4e7bd81SJoachim Eastwood  * @dev: device pointer
4374732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
4375732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
4376732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
4377732fdf0eSGiuseppe CAVALLARO  */
4378f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
43797ac6653aSJeff Kirsher {
4380f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
43817ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
43827ac6653aSJeff Kirsher 
43837ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
43847ac6653aSJeff Kirsher 		return 0;
43857ac6653aSJeff Kirsher 
4386d6d50c7eSPhilippe Reynes 	if (ndev->phydev)
4387d6d50c7eSPhilippe Reynes 		phy_stop(ndev->phydev);
4388102463b1SFrancesco Virlinzi 
438929555fa3SThierry Reding 	mutex_lock(&priv->lock);
43907ac6653aSJeff Kirsher 
43917ac6653aSJeff Kirsher 	netif_device_detach(ndev);
4392c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
43937ac6653aSJeff Kirsher 
4394c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
43957ac6653aSJeff Kirsher 
43967ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
4397ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
4398c24602efSGiuseppe CAVALLARO 
43997ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
440089f7f2cfSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
4401c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
440289f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
440389f7f2cfSSrinivas Kandagatla 	} else {
4404c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
4405db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
4406ba1377ffSGiuseppe CAVALLARO 		/* Disable clock in case of PWM is off */
4407f573c0b9Sjpinto 		clk_disable(priv->plat->pclk);
4408f573c0b9Sjpinto 		clk_disable(priv->plat->stmmac_clk);
4409ba1377ffSGiuseppe CAVALLARO 	}
441029555fa3SThierry Reding 	mutex_unlock(&priv->lock);
44112d871aa0SVince Bridgers 
44124d869b03SLABBE Corentin 	priv->oldlink = false;
4413bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
4414bd00632cSLABBE Corentin 	priv->oldduplex = DUPLEX_UNKNOWN;
44157ac6653aSJeff Kirsher 	return 0;
44167ac6653aSJeff Kirsher }
4417b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
44187ac6653aSJeff Kirsher 
4419732fdf0eSGiuseppe CAVALLARO /**
442054139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
442154139cf3SJoao Pinto  * @dev: device pointer
442254139cf3SJoao Pinto  */
442354139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
442454139cf3SJoao Pinto {
442554139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4426ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
442754139cf3SJoao Pinto 	u32 queue;
442854139cf3SJoao Pinto 
442954139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
443054139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
443154139cf3SJoao Pinto 
443254139cf3SJoao Pinto 		rx_q->cur_rx = 0;
443354139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
443454139cf3SJoao Pinto 	}
443554139cf3SJoao Pinto 
4436ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
4437ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4438ce736788SJoao Pinto 
4439ce736788SJoao Pinto 		tx_q->cur_tx = 0;
4440ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
44418d212a9eSNiklas Cassel 		tx_q->mss = 0;
4442ce736788SJoao Pinto 	}
444354139cf3SJoao Pinto }
444454139cf3SJoao Pinto 
444554139cf3SJoao Pinto /**
4446732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
4447f4e7bd81SJoachim Eastwood  * @dev: device pointer
4448732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
4449732fdf0eSGiuseppe CAVALLARO  * in a usable state.
4450732fdf0eSGiuseppe CAVALLARO  */
4451f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
44527ac6653aSJeff Kirsher {
4453f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
44547ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
44557ac6653aSJeff Kirsher 
44567ac6653aSJeff Kirsher 	if (!netif_running(ndev))
44577ac6653aSJeff Kirsher 		return 0;
44587ac6653aSJeff Kirsher 
44597ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
44607ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
44617ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
44627ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
4463ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
4464ceb69499SGiuseppe CAVALLARO 	 */
4465623997fbSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
446629555fa3SThierry Reding 		mutex_lock(&priv->lock);
4467c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
446829555fa3SThierry Reding 		mutex_unlock(&priv->lock);
446989f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
4470623997fbSSrinivas Kandagatla 	} else {
4471db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
44728d45e42bSLABBE Corentin 		/* enable the clk previously disabled */
4473f573c0b9Sjpinto 		clk_enable(priv->plat->stmmac_clk);
4474f573c0b9Sjpinto 		clk_enable(priv->plat->pclk);
4475623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
4476623997fbSSrinivas Kandagatla 		if (priv->mii)
4477623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
4478623997fbSSrinivas Kandagatla 	}
44797ac6653aSJeff Kirsher 
44807ac6653aSJeff Kirsher 	netif_device_attach(ndev);
44817ac6653aSJeff Kirsher 
448229555fa3SThierry Reding 	mutex_lock(&priv->lock);
4483f55d84b0SVincent Palatin 
448454139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
448554139cf3SJoao Pinto 
4486ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
4487ae79a639SGiuseppe CAVALLARO 
4488fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
4489777da230SGiuseppe CAVALLARO 	stmmac_init_tx_coalesce(priv);
4490ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
44917ac6653aSJeff Kirsher 
4492c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
44937ac6653aSJeff Kirsher 
4494c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
44957ac6653aSJeff Kirsher 
449629555fa3SThierry Reding 	mutex_unlock(&priv->lock);
4497102463b1SFrancesco Virlinzi 
4498d6d50c7eSPhilippe Reynes 	if (ndev->phydev)
4499d6d50c7eSPhilippe Reynes 		phy_start(ndev->phydev);
4500102463b1SFrancesco Virlinzi 
45017ac6653aSJeff Kirsher 	return 0;
45027ac6653aSJeff Kirsher }
4503b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
4504ba27ec66SGiuseppe CAVALLARO 
45057ac6653aSJeff Kirsher #ifndef MODULE
45067ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
45077ac6653aSJeff Kirsher {
45087ac6653aSJeff Kirsher 	char *opt;
45097ac6653aSJeff Kirsher 
45107ac6653aSJeff Kirsher 	if (!str || !*str)
45117ac6653aSJeff Kirsher 		return -EINVAL;
45127ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
45137ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
4514ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
45157ac6653aSJeff Kirsher 				goto err;
45167ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4517ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
45187ac6653aSJeff Kirsher 				goto err;
45197ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4520ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
45217ac6653aSJeff Kirsher 				goto err;
45227ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
4523ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
45247ac6653aSJeff Kirsher 				goto err;
45257ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
4526ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
45277ac6653aSJeff Kirsher 				goto err;
45287ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4529ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
45307ac6653aSJeff Kirsher 				goto err;
45317ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
4532ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
45337ac6653aSJeff Kirsher 				goto err;
4534506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4535d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
4536d765955dSGiuseppe CAVALLARO 				goto err;
45374a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
45384a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
45394a7d666aSGiuseppe CAVALLARO 				goto err;
45407ac6653aSJeff Kirsher 		}
45417ac6653aSJeff Kirsher 	}
45427ac6653aSJeff Kirsher 	return 0;
45437ac6653aSJeff Kirsher 
45447ac6653aSJeff Kirsher err:
45457ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
45467ac6653aSJeff Kirsher 	return -EINVAL;
45477ac6653aSJeff Kirsher }
45487ac6653aSJeff Kirsher 
45497ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
4550ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
45516fc0d0f2SGiuseppe Cavallaro 
4552466c5ac8SMathieu Olivari static int __init stmmac_init(void)
4553466c5ac8SMathieu Olivari {
4554466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4555466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
4556466c5ac8SMathieu Olivari 	if (!stmmac_fs_dir) {
4557466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4558466c5ac8SMathieu Olivari 
4559466c5ac8SMathieu Olivari 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4560466c5ac8SMathieu Olivari 			pr_err("ERROR %s, debugfs create directory failed\n",
4561466c5ac8SMathieu Olivari 			       STMMAC_RESOURCE_NAME);
4562466c5ac8SMathieu Olivari 
4563466c5ac8SMathieu Olivari 			return -ENOMEM;
4564466c5ac8SMathieu Olivari 		}
4565466c5ac8SMathieu Olivari 	}
4566466c5ac8SMathieu Olivari #endif
4567466c5ac8SMathieu Olivari 
4568466c5ac8SMathieu Olivari 	return 0;
4569466c5ac8SMathieu Olivari }
4570466c5ac8SMathieu Olivari 
4571466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
4572466c5ac8SMathieu Olivari {
4573466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4574466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
4575466c5ac8SMathieu Olivari #endif
4576466c5ac8SMathieu Olivari }
4577466c5ac8SMathieu Olivari 
4578466c5ac8SMathieu Olivari module_init(stmmac_init)
4579466c5ac8SMathieu Olivari module_exit(stmmac_exit)
4580466c5ac8SMathieu Olivari 
45816fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
45826fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
45836fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
4584