17ac6653aSJeff Kirsher /*******************************************************************************
27ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
37ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
47ac6653aSJeff Kirsher 
5286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
67ac6653aSJeff Kirsher 
77ac6653aSJeff Kirsher   This program is free software; you can redistribute it and/or modify it
87ac6653aSJeff Kirsher   under the terms and conditions of the GNU General Public License,
97ac6653aSJeff Kirsher   version 2, as published by the Free Software Foundation.
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   This program is distributed in the hope it will be useful, but WITHOUT
127ac6653aSJeff Kirsher   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
137ac6653aSJeff Kirsher   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
147ac6653aSJeff Kirsher   more details.
157ac6653aSJeff Kirsher 
167ac6653aSJeff Kirsher   The full GNU General Public License is included in this distribution in
177ac6653aSJeff Kirsher   the file called "COPYING".
187ac6653aSJeff Kirsher 
197ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
207ac6653aSJeff Kirsher 
217ac6653aSJeff Kirsher   Documentation available at:
227ac6653aSJeff Kirsher 	http://www.stlinux.com
237ac6653aSJeff Kirsher   Support available at:
247ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
257ac6653aSJeff Kirsher *******************************************************************************/
267ac6653aSJeff Kirsher 
276a81c26fSViresh Kumar #include <linux/clk.h>
287ac6653aSJeff Kirsher #include <linux/kernel.h>
297ac6653aSJeff Kirsher #include <linux/interrupt.h>
307ac6653aSJeff Kirsher #include <linux/ip.h>
317ac6653aSJeff Kirsher #include <linux/tcp.h>
327ac6653aSJeff Kirsher #include <linux/skbuff.h>
337ac6653aSJeff Kirsher #include <linux/ethtool.h>
347ac6653aSJeff Kirsher #include <linux/if_ether.h>
357ac6653aSJeff Kirsher #include <linux/crc32.h>
367ac6653aSJeff Kirsher #include <linux/mii.h>
3701789349SJiri Pirko #include <linux/if.h>
387ac6653aSJeff Kirsher #include <linux/if_vlan.h>
397ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
407ac6653aSJeff Kirsher #include <linux/slab.h>
417ac6653aSJeff Kirsher #include <linux/prefetch.h>
42db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
4350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
447ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
457ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
4650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
47891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
484dbbe8ddSJose Abreu #include <net/pkt_cls.h>
49891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
50286a8372SGiuseppe CAVALLARO #include "stmmac.h"
51c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
525790cf3cSMathieu Olivari #include <linux/of_mdio.h>
5319d857c9SPhil Reid #include "dwmac1000.h"
547d9e6c5aSJose Abreu #include "dwxgmac2.h"
5542de047dSJose Abreu #include "hwif.h"
567ac6653aSJeff Kirsher 
579939a46dSEugeniy Paltsev #define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
58f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
597ac6653aSJeff Kirsher 
607ac6653aSJeff Kirsher /* Module parameters */
6132ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
627ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
63d3757ba4SJoe Perches module_param(watchdog, int, 0644);
6432ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
657ac6653aSJeff Kirsher 
6632ceabcaSGiuseppe CAVALLARO static int debug = -1;
67d3757ba4SJoe Perches module_param(debug, int, 0644);
6832ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
697ac6653aSJeff Kirsher 
7047d1f71fSstephen hemminger static int phyaddr = -1;
71d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
727ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
737ac6653aSJeff Kirsher 
74e3ad57c9SGiuseppe Cavallaro #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
75120e87f9SGiuseppe Cavallaro #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
767ac6653aSJeff Kirsher 
777ac6653aSJeff Kirsher static int flow_ctrl = FLOW_OFF;
78d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
797ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
807ac6653aSJeff Kirsher 
817ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
82d3757ba4SJoe Perches module_param(pause, int, 0644);
837ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
847ac6653aSJeff Kirsher 
857ac6653aSJeff Kirsher #define TC_DEFAULT 64
867ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
87d3757ba4SJoe Perches module_param(tc, int, 0644);
887ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
897ac6653aSJeff Kirsher 
90d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
91d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
92d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
937ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
947ac6653aSJeff Kirsher 
9522ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
9622ad3838SGiuseppe Cavallaro 
977ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
987ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
997ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
1007ac6653aSJeff Kirsher 
101d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
102d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
104d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105f5351ef7SGiuseppe CAVALLARO #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106d765955dSGiuseppe CAVALLARO 
10722d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
10822d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1094a7d666aSGiuseppe CAVALLARO  */
1104a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
111d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1124a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1134a7d666aSGiuseppe CAVALLARO 
1147ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1157ac6653aSJeff Kirsher 
11650fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
117bfab27a1SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev);
118466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
119bfab27a1SGiuseppe CAVALLARO #endif
120bfab27a1SGiuseppe CAVALLARO 
1219125cdd1SGiuseppe CAVALLARO #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
1229125cdd1SGiuseppe CAVALLARO 
1237ac6653aSJeff Kirsher /**
1247ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
125732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
126732fdf0eSGiuseppe CAVALLARO  * errors.
1277ac6653aSJeff Kirsher  */
1287ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1297ac6653aSJeff Kirsher {
1307ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1317ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
132d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1347ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1357ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1367ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1377ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1387ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1397ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
140d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
141d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1427ac6653aSJeff Kirsher }
1437ac6653aSJeff Kirsher 
14432ceabcaSGiuseppe CAVALLARO /**
145c22a3f48SJoao Pinto  * stmmac_disable_all_queues - Disable all queues
146c22a3f48SJoao Pinto  * @priv: driver private structure
147c22a3f48SJoao Pinto  */
148c22a3f48SJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149c22a3f48SJoao Pinto {
150c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1518fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1528fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
153c22a3f48SJoao Pinto 	u32 queue;
154c22a3f48SJoao Pinto 
1558fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1568fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
157c22a3f48SJoao Pinto 
1588fce3331SJose Abreu 		napi_disable(&ch->napi);
159c22a3f48SJoao Pinto 	}
160c22a3f48SJoao Pinto }
161c22a3f48SJoao Pinto 
162c22a3f48SJoao Pinto /**
163c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
164c22a3f48SJoao Pinto  * @priv: driver private structure
165c22a3f48SJoao Pinto  */
166c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
167c22a3f48SJoao Pinto {
168c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1698fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1708fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
171c22a3f48SJoao Pinto 	u32 queue;
172c22a3f48SJoao Pinto 
1738fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1748fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
175c22a3f48SJoao Pinto 
1768fce3331SJose Abreu 		napi_enable(&ch->napi);
177c22a3f48SJoao Pinto 	}
178c22a3f48SJoao Pinto }
179c22a3f48SJoao Pinto 
180c22a3f48SJoao Pinto /**
181c22a3f48SJoao Pinto  * stmmac_stop_all_queues - Stop all queues
182c22a3f48SJoao Pinto  * @priv: driver private structure
183c22a3f48SJoao Pinto  */
184c22a3f48SJoao Pinto static void stmmac_stop_all_queues(struct stmmac_priv *priv)
185c22a3f48SJoao Pinto {
186c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
187c22a3f48SJoao Pinto 	u32 queue;
188c22a3f48SJoao Pinto 
189c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
190c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
191c22a3f48SJoao Pinto }
192c22a3f48SJoao Pinto 
193c22a3f48SJoao Pinto /**
194c22a3f48SJoao Pinto  * stmmac_start_all_queues - Start all queues
195c22a3f48SJoao Pinto  * @priv: driver private structure
196c22a3f48SJoao Pinto  */
197c22a3f48SJoao Pinto static void stmmac_start_all_queues(struct stmmac_priv *priv)
198c22a3f48SJoao Pinto {
199c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
200c22a3f48SJoao Pinto 	u32 queue;
201c22a3f48SJoao Pinto 
202c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
203c22a3f48SJoao Pinto 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
204c22a3f48SJoao Pinto }
205c22a3f48SJoao Pinto 
20634877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
20734877a15SJose Abreu {
20834877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
20934877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
21034877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
21134877a15SJose Abreu }
21234877a15SJose Abreu 
21334877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
21434877a15SJose Abreu {
21534877a15SJose Abreu 	netif_carrier_off(priv->dev);
21634877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
21734877a15SJose Abreu 	stmmac_service_event_schedule(priv);
21834877a15SJose Abreu }
21934877a15SJose Abreu 
220c22a3f48SJoao Pinto /**
22132ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
22232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22332ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
22432ceabcaSGiuseppe CAVALLARO  * clock input.
22532ceabcaSGiuseppe CAVALLARO  * Note:
22632ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
22732ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
22832ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
22932ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
23032ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
23132ceabcaSGiuseppe CAVALLARO  */
232cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
233cd7201f4SGiuseppe CAVALLARO {
234cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
235cd7201f4SGiuseppe CAVALLARO 
236f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
237cd7201f4SGiuseppe CAVALLARO 
238cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
239ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
240ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
241ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
242ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
243ceb69499SGiuseppe CAVALLARO 	 * divider.
244ceb69499SGiuseppe CAVALLARO 	 */
245cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
246cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
247cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
248cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
249cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
250cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
251cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
252cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
253cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
254cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
255cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
25619d857c9SPhil Reid 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
257cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
258ceb69499SGiuseppe CAVALLARO 	}
2599f93ac8dSLABBE Corentin 
2609f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
2619f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
2629f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
2639f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
2649f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
2659f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
2669f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
2679f93ac8dSLABBE Corentin 		else
2689f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
2699f93ac8dSLABBE Corentin 	}
2707d9e6c5aSJose Abreu 
2717d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
2727d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
2737d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
2747d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
2757d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
2767d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
2777d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
2787d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
2797d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
2807d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
2817d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
2827d9e6c5aSJose Abreu 		else
2837d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
2847d9e6c5aSJose Abreu 	}
285cd7201f4SGiuseppe CAVALLARO }
286cd7201f4SGiuseppe CAVALLARO 
2877ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
2887ac6653aSJeff Kirsher {
289424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
290424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2917ac6653aSJeff Kirsher }
2927ac6653aSJeff Kirsher 
293ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
2947ac6653aSJeff Kirsher {
295ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
296a6a3e026SLABBE Corentin 	u32 avail;
297e3ad57c9SGiuseppe Cavallaro 
298ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
299ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
300e3ad57c9SGiuseppe Cavallaro 	else
301ce736788SJoao Pinto 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
302e3ad57c9SGiuseppe Cavallaro 
303e3ad57c9SGiuseppe Cavallaro 	return avail;
304e3ad57c9SGiuseppe Cavallaro }
305e3ad57c9SGiuseppe Cavallaro 
30654139cf3SJoao Pinto /**
30754139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
30854139cf3SJoao Pinto  * @priv: driver private structure
30954139cf3SJoao Pinto  * @queue: RX queue index
31054139cf3SJoao Pinto  */
31154139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
312e3ad57c9SGiuseppe Cavallaro {
31354139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
314a6a3e026SLABBE Corentin 	u32 dirty;
315e3ad57c9SGiuseppe Cavallaro 
31654139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
31754139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
318e3ad57c9SGiuseppe Cavallaro 	else
31954139cf3SJoao Pinto 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
320e3ad57c9SGiuseppe Cavallaro 
321e3ad57c9SGiuseppe Cavallaro 	return dirty;
3227ac6653aSJeff Kirsher }
3237ac6653aSJeff Kirsher 
32432ceabcaSGiuseppe CAVALLARO /**
325732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_fix_mac_speed - callback for speed selection
32632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
3278d45e42bSLABBE Corentin  * Description: on some platforms (e.g. ST), some HW system configuration
32832ceabcaSGiuseppe CAVALLARO  * registers have to be set according to the link speed negotiated.
3297ac6653aSJeff Kirsher  */
3307ac6653aSJeff Kirsher static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
3317ac6653aSJeff Kirsher {
332d6d50c7eSPhilippe Reynes 	struct net_device *ndev = priv->dev;
333d6d50c7eSPhilippe Reynes 	struct phy_device *phydev = ndev->phydev;
3347ac6653aSJeff Kirsher 
3357ac6653aSJeff Kirsher 	if (likely(priv->plat->fix_mac_speed))
336ceb69499SGiuseppe CAVALLARO 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
3377ac6653aSJeff Kirsher }
3387ac6653aSJeff Kirsher 
33932ceabcaSGiuseppe CAVALLARO /**
340732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
34132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
342732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
343732fdf0eSGiuseppe CAVALLARO  * EEE.
34432ceabcaSGiuseppe CAVALLARO  */
345d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
346d765955dSGiuseppe CAVALLARO {
347ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
348ce736788SJoao Pinto 	u32 queue;
349ce736788SJoao Pinto 
350ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
351ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
352ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
353ce736788SJoao Pinto 
354ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
355ce736788SJoao Pinto 			return; /* still unfinished work */
356ce736788SJoao Pinto 	}
357ce736788SJoao Pinto 
358d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
359ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
360c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
361b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
362d765955dSGiuseppe CAVALLARO }
363d765955dSGiuseppe CAVALLARO 
36432ceabcaSGiuseppe CAVALLARO /**
365732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
36632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
36732ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
36832ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
36932ceabcaSGiuseppe CAVALLARO  */
370d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
371d765955dSGiuseppe CAVALLARO {
372c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
373d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
374d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
375d765955dSGiuseppe CAVALLARO }
376d765955dSGiuseppe CAVALLARO 
377d765955dSGiuseppe CAVALLARO /**
378732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
379d765955dSGiuseppe CAVALLARO  * @arg : data hook
380d765955dSGiuseppe CAVALLARO  * Description:
38132ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
382d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
383d765955dSGiuseppe CAVALLARO  */
384e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
385d765955dSGiuseppe CAVALLARO {
386e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
387d765955dSGiuseppe CAVALLARO 
388d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
389f5351ef7SGiuseppe CAVALLARO 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
390d765955dSGiuseppe CAVALLARO }
391d765955dSGiuseppe CAVALLARO 
392d765955dSGiuseppe CAVALLARO /**
393732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
39432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
395d765955dSGiuseppe CAVALLARO  * Description:
396732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
397732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
398732fdf0eSGiuseppe CAVALLARO  *  timer.
399d765955dSGiuseppe CAVALLARO  */
400d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
401d765955dSGiuseppe CAVALLARO {
402d6d50c7eSPhilippe Reynes 	struct net_device *ndev = priv->dev;
403879626e3SJerome Brunet 	int interface = priv->plat->interface;
404d765955dSGiuseppe CAVALLARO 	bool ret = false;
405d765955dSGiuseppe CAVALLARO 
406879626e3SJerome Brunet 	if ((interface != PHY_INTERFACE_MODE_MII) &&
407879626e3SJerome Brunet 	    (interface != PHY_INTERFACE_MODE_GMII) &&
408879626e3SJerome Brunet 	    !phy_interface_mode_is_rgmii(interface))
409879626e3SJerome Brunet 		goto out;
410879626e3SJerome Brunet 
411f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
412f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
413f5351ef7SGiuseppe CAVALLARO 	 */
4143fe5cadbSGiuseppe CAVALLARO 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
4153fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
4163fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
417f5351ef7SGiuseppe CAVALLARO 		goto out;
418f5351ef7SGiuseppe CAVALLARO 
419d765955dSGiuseppe CAVALLARO 	/* MAC core supports the EEE feature. */
420d765955dSGiuseppe CAVALLARO 	if (priv->dma_cap.eee) {
42183bf79b6SGiuseppe CAVALLARO 		int tx_lpi_timer = priv->tx_lpi_timer;
422d765955dSGiuseppe CAVALLARO 
42383bf79b6SGiuseppe CAVALLARO 		/* Check if the PHY supports EEE */
424d6d50c7eSPhilippe Reynes 		if (phy_init_eee(ndev->phydev, 1)) {
42583bf79b6SGiuseppe CAVALLARO 			/* To manage at run-time if the EEE cannot be supported
42683bf79b6SGiuseppe CAVALLARO 			 * anymore (for example because the lp caps have been
42783bf79b6SGiuseppe CAVALLARO 			 * changed).
42883bf79b6SGiuseppe CAVALLARO 			 * In that case the driver disable own timers.
42983bf79b6SGiuseppe CAVALLARO 			 */
43029555fa3SThierry Reding 			mutex_lock(&priv->lock);
43183bf79b6SGiuseppe CAVALLARO 			if (priv->eee_active) {
43238ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "disable EEE\n");
43383bf79b6SGiuseppe CAVALLARO 				del_timer_sync(&priv->eee_ctrl_timer);
434c10d4c82SJose Abreu 				stmmac_set_eee_timer(priv, priv->hw, 0,
43583bf79b6SGiuseppe CAVALLARO 						tx_lpi_timer);
43683bf79b6SGiuseppe CAVALLARO 			}
43783bf79b6SGiuseppe CAVALLARO 			priv->eee_active = 0;
43829555fa3SThierry Reding 			mutex_unlock(&priv->lock);
43983bf79b6SGiuseppe CAVALLARO 			goto out;
44083bf79b6SGiuseppe CAVALLARO 		}
44183bf79b6SGiuseppe CAVALLARO 		/* Activate the EEE and start timers */
44229555fa3SThierry Reding 		mutex_lock(&priv->lock);
443f5351ef7SGiuseppe CAVALLARO 		if (!priv->eee_active) {
444d765955dSGiuseppe CAVALLARO 			priv->eee_active = 1;
445e99e88a9SKees Cook 			timer_setup(&priv->eee_ctrl_timer,
446e99e88a9SKees Cook 				    stmmac_eee_ctrl_timer, 0);
447ccb36da1SVaishali Thakkar 			mod_timer(&priv->eee_ctrl_timer,
448ccb36da1SVaishali Thakkar 				  STMMAC_LPI_T(eee_timer));
449d765955dSGiuseppe CAVALLARO 
450c10d4c82SJose Abreu 			stmmac_set_eee_timer(priv, priv->hw,
451c10d4c82SJose Abreu 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
45271965352SGiuseppe CAVALLARO 		}
453f5351ef7SGiuseppe CAVALLARO 		/* Set HW EEE according to the speed */
454c10d4c82SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
455d765955dSGiuseppe CAVALLARO 
456d765955dSGiuseppe CAVALLARO 		ret = true;
45729555fa3SThierry Reding 		mutex_unlock(&priv->lock);
4584741cf9cSGiuseppe CAVALLARO 
45938ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
460d765955dSGiuseppe CAVALLARO 	}
461d765955dSGiuseppe CAVALLARO out:
462d765955dSGiuseppe CAVALLARO 	return ret;
463d765955dSGiuseppe CAVALLARO }
464d765955dSGiuseppe CAVALLARO 
465732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
46632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
467ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
468891434b1SRayagond Kokatanur  * @skb : the socket buffer
469891434b1SRayagond Kokatanur  * Description :
470891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
471891434b1SRayagond Kokatanur  * and also perform some sanity checks.
472891434b1SRayagond Kokatanur  */
473891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
474ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
475891434b1SRayagond Kokatanur {
476891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
477891434b1SRayagond Kokatanur 	u64 ns;
478891434b1SRayagond Kokatanur 
479891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
480891434b1SRayagond Kokatanur 		return;
481891434b1SRayagond Kokatanur 
482ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
48375e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
484891434b1SRayagond Kokatanur 		return;
485891434b1SRayagond Kokatanur 
486891434b1SRayagond Kokatanur 	/* check tx tstamp status */
48742de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
488891434b1SRayagond Kokatanur 		/* get the valid tstamp */
48942de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
490891434b1SRayagond Kokatanur 
491891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
493ba1ffd74SGiuseppe CAVALLARO 
49433d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
495891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
496891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
497ba1ffd74SGiuseppe CAVALLARO 	}
498891434b1SRayagond Kokatanur 
499891434b1SRayagond Kokatanur 	return;
500891434b1SRayagond Kokatanur }
501891434b1SRayagond Kokatanur 
502732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
50332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
504ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
505ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
506891434b1SRayagond Kokatanur  * @skb : the socket buffer
507891434b1SRayagond Kokatanur  * Description :
508891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
509891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
510891434b1SRayagond Kokatanur  */
511ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
512ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
513891434b1SRayagond Kokatanur {
514891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
51598870943SJose Abreu 	struct dma_desc *desc = p;
516891434b1SRayagond Kokatanur 	u64 ns;
517891434b1SRayagond Kokatanur 
518891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
519891434b1SRayagond Kokatanur 		return;
520ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
5217d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
52298870943SJose Abreu 		desc = np;
523891434b1SRayagond Kokatanur 
52498870943SJose Abreu 	/* Check if timestamp is available */
52542de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
52642de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
52733d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
528891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
529891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
530891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
531ba1ffd74SGiuseppe CAVALLARO 	} else  {
53233d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
533ba1ffd74SGiuseppe CAVALLARO 	}
534891434b1SRayagond Kokatanur }
535891434b1SRayagond Kokatanur 
536891434b1SRayagond Kokatanur /**
537891434b1SRayagond Kokatanur  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
538891434b1SRayagond Kokatanur  *  @dev: device pointer.
5398d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
540891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
541891434b1SRayagond Kokatanur  *  Description:
542891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
543891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
544891434b1SRayagond Kokatanur  *  Return Value:
545891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
546891434b1SRayagond Kokatanur  */
547891434b1SRayagond Kokatanur static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
548891434b1SRayagond Kokatanur {
549891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
550891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
5510a624155SArnd Bergmann 	struct timespec64 now;
552891434b1SRayagond Kokatanur 	u64 temp = 0;
553891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
554891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
555891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
556891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
557891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
558891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
559891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
560891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
561891434b1SRayagond Kokatanur 	u32 value = 0;
56219d857c9SPhil Reid 	u32 sec_inc;
5637d9e6c5aSJose Abreu 	bool xmac;
5647d9e6c5aSJose Abreu 
5657d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
566891434b1SRayagond Kokatanur 
567891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
568891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
569891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
570891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
571891434b1SRayagond Kokatanur 
572891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
573891434b1SRayagond Kokatanur 	}
574891434b1SRayagond Kokatanur 
575891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
576891434b1SRayagond Kokatanur 			   sizeof(struct hwtstamp_config)))
577891434b1SRayagond Kokatanur 		return -EFAULT;
578891434b1SRayagond Kokatanur 
57938ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
580891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
581891434b1SRayagond Kokatanur 
582891434b1SRayagond Kokatanur 	/* reserved for future extensions */
583891434b1SRayagond Kokatanur 	if (config.flags)
584891434b1SRayagond Kokatanur 		return -EINVAL;
585891434b1SRayagond Kokatanur 
5865f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5875f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
588891434b1SRayagond Kokatanur 		return -ERANGE;
589891434b1SRayagond Kokatanur 
590891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
591891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
592891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
593ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
594891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
595891434b1SRayagond Kokatanur 			break;
596891434b1SRayagond Kokatanur 
597891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
598ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
599891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
600891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
6017d9e6c5aSJose Abreu 			if (xmac)
602fd6720aeSMario Molitor 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
603fd6720aeSMario Molitor 			else
604891434b1SRayagond Kokatanur 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
605891434b1SRayagond Kokatanur 
606891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
607891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
608891434b1SRayagond Kokatanur 			break;
609891434b1SRayagond Kokatanur 
610891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
611ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
612891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
613891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
614891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
615891434b1SRayagond Kokatanur 
616891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
617891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
618891434b1SRayagond Kokatanur 			break;
619891434b1SRayagond Kokatanur 
620891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
621ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
622891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
623891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
624891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
625891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
626891434b1SRayagond Kokatanur 
627891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
628891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
629891434b1SRayagond Kokatanur 			break;
630891434b1SRayagond Kokatanur 
631891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
632ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
633891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
634891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
635891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
6367d9e6c5aSJose Abreu 			if (xmac)
637fd6720aeSMario Molitor 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
638fd6720aeSMario Molitor 			else
639891434b1SRayagond Kokatanur 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
640891434b1SRayagond Kokatanur 
641891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643891434b1SRayagond Kokatanur 			break;
644891434b1SRayagond Kokatanur 
645891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
646ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
647891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
648891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
649891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
650891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
651891434b1SRayagond Kokatanur 
652891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
653891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
654891434b1SRayagond Kokatanur 			break;
655891434b1SRayagond Kokatanur 
656891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
657ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
658891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
659891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
660891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
661891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
662891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
663891434b1SRayagond Kokatanur 
664891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
665891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
666891434b1SRayagond Kokatanur 			break;
667891434b1SRayagond Kokatanur 
668891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
669ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
670891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
671891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
672891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
6737d9e6c5aSJose Abreu 			if (xmac)
674fd6720aeSMario Molitor 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
675fd6720aeSMario Molitor 			else
676891434b1SRayagond Kokatanur 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
677891434b1SRayagond Kokatanur 
678891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
679891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
680891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
681891434b1SRayagond Kokatanur 			break;
682891434b1SRayagond Kokatanur 
683891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
684ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
685891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
686891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
687891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
688891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
689891434b1SRayagond Kokatanur 
690891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
691891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
692891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
693891434b1SRayagond Kokatanur 			break;
694891434b1SRayagond Kokatanur 
695891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
696ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
697891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
698891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
699891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
700891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
701891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
702891434b1SRayagond Kokatanur 
703891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
704891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
705891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
706891434b1SRayagond Kokatanur 			break;
707891434b1SRayagond Kokatanur 
708e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
709891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
710ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
711891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
712891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
713891434b1SRayagond Kokatanur 			break;
714891434b1SRayagond Kokatanur 
715891434b1SRayagond Kokatanur 		default:
716891434b1SRayagond Kokatanur 			return -ERANGE;
717891434b1SRayagond Kokatanur 		}
718891434b1SRayagond Kokatanur 	} else {
719891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
720891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
721891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
722891434b1SRayagond Kokatanur 			break;
723891434b1SRayagond Kokatanur 		default:
724891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
725891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
726891434b1SRayagond Kokatanur 			break;
727891434b1SRayagond Kokatanur 		}
728891434b1SRayagond Kokatanur 	}
729891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
7305f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
731891434b1SRayagond Kokatanur 
732891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
733cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
734891434b1SRayagond Kokatanur 	else {
735891434b1SRayagond Kokatanur 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
736891434b1SRayagond Kokatanur 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
737891434b1SRayagond Kokatanur 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
738891434b1SRayagond Kokatanur 			 ts_master_en | snap_type_sel);
739cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
740891434b1SRayagond Kokatanur 
741891434b1SRayagond Kokatanur 		/* program Sub Second Increment reg */
742cc4c9001SJose Abreu 		stmmac_config_sub_second_increment(priv,
743f573c0b9Sjpinto 				priv->ptpaddr, priv->plat->clk_ptp_rate,
7447d9e6c5aSJose Abreu 				xmac, &sec_inc);
74519d857c9SPhil Reid 		temp = div_u64(1000000000ULL, sec_inc);
746891434b1SRayagond Kokatanur 
7479a8a02c9SJose Abreu 		/* Store sub second increment and flags for later use */
7489a8a02c9SJose Abreu 		priv->sub_second_inc = sec_inc;
7499a8a02c9SJose Abreu 		priv->systime_flags = value;
7509a8a02c9SJose Abreu 
751891434b1SRayagond Kokatanur 		/* calculate default added value:
752891434b1SRayagond Kokatanur 		 * formula is :
753891434b1SRayagond Kokatanur 		 * addend = (2^32)/freq_div_ratio;
75419d857c9SPhil Reid 		 * where, freq_div_ratio = 1e9ns/sec_inc
755891434b1SRayagond Kokatanur 		 */
75619d857c9SPhil Reid 		temp = (u64)(temp << 32);
757f573c0b9Sjpinto 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
758cc4c9001SJose Abreu 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
759891434b1SRayagond Kokatanur 
760891434b1SRayagond Kokatanur 		/* initialize system time */
7610a624155SArnd Bergmann 		ktime_get_real_ts64(&now);
7620a624155SArnd Bergmann 
7630a624155SArnd Bergmann 		/* lower 32 bits of tv_sec are safe until y2106 */
764cc4c9001SJose Abreu 		stmmac_init_systime(priv, priv->ptpaddr,
765cc4c9001SJose Abreu 				(u32)now.tv_sec, now.tv_nsec);
766891434b1SRayagond Kokatanur 	}
767891434b1SRayagond Kokatanur 
768891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
769891434b1SRayagond Kokatanur 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
770891434b1SRayagond Kokatanur }
771891434b1SRayagond Kokatanur 
77232ceabcaSGiuseppe CAVALLARO /**
773732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
77432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
775732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
77632ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
777732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
77832ceabcaSGiuseppe CAVALLARO  */
77992ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
780891434b1SRayagond Kokatanur {
7817d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
7827d9e6c5aSJose Abreu 
78392ba6888SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
78492ba6888SRayagond Kokatanur 		return -EOPNOTSUPP;
78592ba6888SRayagond Kokatanur 
786891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
7877d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
7887d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
789be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
790be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
791be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
792891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
7937cd01399SVince Bridgers 
794be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
795be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7967cd01399SVince Bridgers 
797be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
798be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
799be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
800891434b1SRayagond Kokatanur 
801891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
802891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
80392ba6888SRayagond Kokatanur 
804c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
805c30a70d3SGiuseppe CAVALLARO 
806c30a70d3SGiuseppe CAVALLARO 	return 0;
80792ba6888SRayagond Kokatanur }
80892ba6888SRayagond Kokatanur 
80992ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
81092ba6888SRayagond Kokatanur {
811f573c0b9Sjpinto 	if (priv->plat->clk_ptp_ref)
812f573c0b9Sjpinto 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
81392ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
814891434b1SRayagond Kokatanur }
815891434b1SRayagond Kokatanur 
8167ac6653aSJeff Kirsher /**
81729feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
81829feff39SJoao Pinto  *  @priv: driver private structure
81929feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
82029feff39SJoao Pinto  */
82129feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
82229feff39SJoao Pinto {
82329feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
82429feff39SJoao Pinto 
825c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
82629feff39SJoao Pinto 			priv->pause, tx_cnt);
82729feff39SJoao Pinto }
82829feff39SJoao Pinto 
82929feff39SJoao Pinto /**
830732fdf0eSGiuseppe CAVALLARO  * stmmac_adjust_link - adjusts the link parameters
8317ac6653aSJeff Kirsher  * @dev: net device structure
832732fdf0eSGiuseppe CAVALLARO  * Description: this is the helper called by the physical abstraction layer
833732fdf0eSGiuseppe CAVALLARO  * drivers to communicate the phy link status. According the speed and duplex
834732fdf0eSGiuseppe CAVALLARO  * this driver can invoke registered glue-logic as well.
835732fdf0eSGiuseppe CAVALLARO  * It also invoke the eee initialization because it could happen when switch
836732fdf0eSGiuseppe CAVALLARO  * on different networks (that are eee capable).
8377ac6653aSJeff Kirsher  */
8387ac6653aSJeff Kirsher static void stmmac_adjust_link(struct net_device *dev)
8397ac6653aSJeff Kirsher {
8407ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
841d6d50c7eSPhilippe Reynes 	struct phy_device *phydev = dev->phydev;
84299a4cca2SLABBE Corentin 	bool new_state = false;
8437ac6653aSJeff Kirsher 
844662ec2b7SLABBE Corentin 	if (!phydev)
8457ac6653aSJeff Kirsher 		return;
8467ac6653aSJeff Kirsher 
84729555fa3SThierry Reding 	mutex_lock(&priv->lock);
848d765955dSGiuseppe CAVALLARO 
8497ac6653aSJeff Kirsher 	if (phydev->link) {
8507ac6653aSJeff Kirsher 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
8517ac6653aSJeff Kirsher 
8527ac6653aSJeff Kirsher 		/* Now we make sure that we can be in full duplex mode.
8537ac6653aSJeff Kirsher 		 * If not, we operate in half-duplex mode. */
8547ac6653aSJeff Kirsher 		if (phydev->duplex != priv->oldduplex) {
85599a4cca2SLABBE Corentin 			new_state = true;
85650cb16d4SLABBE Corentin 			if (!phydev->duplex)
8577ac6653aSJeff Kirsher 				ctrl &= ~priv->hw->link.duplex;
8587ac6653aSJeff Kirsher 			else
8597ac6653aSJeff Kirsher 				ctrl |= priv->hw->link.duplex;
8607ac6653aSJeff Kirsher 			priv->oldduplex = phydev->duplex;
8617ac6653aSJeff Kirsher 		}
8627ac6653aSJeff Kirsher 		/* Flow Control operation */
8637ac6653aSJeff Kirsher 		if (phydev->pause)
86429feff39SJoao Pinto 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
8657ac6653aSJeff Kirsher 
8667ac6653aSJeff Kirsher 		if (phydev->speed != priv->speed) {
86799a4cca2SLABBE Corentin 			new_state = true;
868ca84dfb9SLABBE Corentin 			ctrl &= ~priv->hw->link.speed_mask;
8697ac6653aSJeff Kirsher 			switch (phydev->speed) {
870afbe17a3SLABBE Corentin 			case SPEED_1000:
871ca84dfb9SLABBE Corentin 				ctrl |= priv->hw->link.speed1000;
8727ac6653aSJeff Kirsher 				break;
873afbe17a3SLABBE Corentin 			case SPEED_100:
874ca84dfb9SLABBE Corentin 				ctrl |= priv->hw->link.speed100;
8759beae261SLABBE Corentin 				break;
876afbe17a3SLABBE Corentin 			case SPEED_10:
877ca84dfb9SLABBE Corentin 				ctrl |= priv->hw->link.speed10;
8787ac6653aSJeff Kirsher 				break;
8797ac6653aSJeff Kirsher 			default:
880b3e51069SLABBE Corentin 				netif_warn(priv, link, priv->dev,
881cba920afSLABBE Corentin 					   "broken speed: %d\n", phydev->speed);
882688495b1SLABBE Corentin 				phydev->speed = SPEED_UNKNOWN;
8837ac6653aSJeff Kirsher 				break;
8847ac6653aSJeff Kirsher 			}
8855db13556SLABBE Corentin 			if (phydev->speed != SPEED_UNKNOWN)
8865db13556SLABBE Corentin 				stmmac_hw_fix_mac_speed(priv);
8877ac6653aSJeff Kirsher 			priv->speed = phydev->speed;
8887ac6653aSJeff Kirsher 		}
8897ac6653aSJeff Kirsher 
8907ac6653aSJeff Kirsher 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
8917ac6653aSJeff Kirsher 
8927ac6653aSJeff Kirsher 		if (!priv->oldlink) {
89399a4cca2SLABBE Corentin 			new_state = true;
8944d869b03SLABBE Corentin 			priv->oldlink = true;
8957ac6653aSJeff Kirsher 		}
8967ac6653aSJeff Kirsher 	} else if (priv->oldlink) {
89799a4cca2SLABBE Corentin 		new_state = true;
8984d869b03SLABBE Corentin 		priv->oldlink = false;
899bd00632cSLABBE Corentin 		priv->speed = SPEED_UNKNOWN;
900bd00632cSLABBE Corentin 		priv->oldduplex = DUPLEX_UNKNOWN;
9017ac6653aSJeff Kirsher 	}
9027ac6653aSJeff Kirsher 
9037ac6653aSJeff Kirsher 	if (new_state && netif_msg_link(priv))
9047ac6653aSJeff Kirsher 		phy_print_status(phydev);
9057ac6653aSJeff Kirsher 
90629555fa3SThierry Reding 	mutex_unlock(&priv->lock);
9074741cf9cSGiuseppe CAVALLARO 
90852f95bbfSGiuseppe CAVALLARO 	if (phydev->is_pseudo_fixed_link)
90952f95bbfSGiuseppe CAVALLARO 		/* Stop PHY layer to call the hook to adjust the link in case
91052f95bbfSGiuseppe CAVALLARO 		 * of a switch is attached to the stmmac driver.
91152f95bbfSGiuseppe CAVALLARO 		 */
91252f95bbfSGiuseppe CAVALLARO 		phydev->irq = PHY_IGNORE_INTERRUPT;
91352f95bbfSGiuseppe CAVALLARO 	else
91452f95bbfSGiuseppe CAVALLARO 		/* At this stage, init the EEE if supported.
91552f95bbfSGiuseppe CAVALLARO 		 * Never called in case of fixed_link.
916f5351ef7SGiuseppe CAVALLARO 		 */
917f5351ef7SGiuseppe CAVALLARO 		priv->eee_enabled = stmmac_eee_init(priv);
9187ac6653aSJeff Kirsher }
9197ac6653aSJeff Kirsher 
92032ceabcaSGiuseppe CAVALLARO /**
921732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
92232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
92332ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
92432ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
92532ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
92632ceabcaSGiuseppe CAVALLARO  */
927e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
928e58bb43fSGiuseppe CAVALLARO {
929e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
930e58bb43fSGiuseppe CAVALLARO 
931e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
9320d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
9330d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
9340d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
9350d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
93638ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
9373fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
9380d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
93938ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
9403fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
941e58bb43fSGiuseppe CAVALLARO 		}
942e58bb43fSGiuseppe CAVALLARO 	}
943e58bb43fSGiuseppe CAVALLARO }
944e58bb43fSGiuseppe CAVALLARO 
9457ac6653aSJeff Kirsher /**
9467ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
9477ac6653aSJeff Kirsher  * @dev: net device structure
9487ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
9497ac6653aSJeff Kirsher  * to the mac driver.
9507ac6653aSJeff Kirsher  *  Return value:
9517ac6653aSJeff Kirsher  *  0 on success
9527ac6653aSJeff Kirsher  */
9537ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
9547ac6653aSJeff Kirsher {
9557ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
956b6cfffa7SBhadram Varka 	u32 tx_cnt = priv->plat->tx_queues_to_use;
9577ac6653aSJeff Kirsher 	struct phy_device *phydev;
958d765955dSGiuseppe CAVALLARO 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
9597ac6653aSJeff Kirsher 	char bus_id[MII_BUS_ID_SIZE];
96079ee1dc3SSrinivas Kandagatla 	int interface = priv->plat->interface;
9619cbadf09SSrinivas Kandagatla 	int max_speed = priv->plat->max_speed;
9624d869b03SLABBE Corentin 	priv->oldlink = false;
963bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
964bd00632cSLABBE Corentin 	priv->oldduplex = DUPLEX_UNKNOWN;
9657ac6653aSJeff Kirsher 
9665790cf3cSMathieu Olivari 	if (priv->plat->phy_node) {
9675790cf3cSMathieu Olivari 		phydev = of_phy_connect(dev, priv->plat->phy_node,
9685790cf3cSMathieu Olivari 					&stmmac_adjust_link, 0, interface);
9695790cf3cSMathieu Olivari 	} else {
970f142af2eSSrinivas Kandagatla 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
971f142af2eSSrinivas Kandagatla 			 priv->plat->bus_id);
972f142af2eSSrinivas Kandagatla 
973d765955dSGiuseppe CAVALLARO 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
9747ac6653aSJeff Kirsher 			 priv->plat->phy_addr);
975de9a2165SLABBE Corentin 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
9765790cf3cSMathieu Olivari 			   phy_id_fmt);
9777ac6653aSJeff Kirsher 
9785790cf3cSMathieu Olivari 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
9795790cf3cSMathieu Olivari 				     interface);
9805790cf3cSMathieu Olivari 	}
9817ac6653aSJeff Kirsher 
982dfc50fcaSAlexey Brodkin 	if (IS_ERR_OR_NULL(phydev)) {
98338ddc59dSLABBE Corentin 		netdev_err(priv->dev, "Could not attach to PHY\n");
984dfc50fcaSAlexey Brodkin 		if (!phydev)
985dfc50fcaSAlexey Brodkin 			return -ENODEV;
986dfc50fcaSAlexey Brodkin 
9877ac6653aSJeff Kirsher 		return PTR_ERR(phydev);
9887ac6653aSJeff Kirsher 	}
9897ac6653aSJeff Kirsher 
99079ee1dc3SSrinivas Kandagatla 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
991c5b9b4e4SSrinivas Kandagatla 	if ((interface == PHY_INTERFACE_MODE_MII) ||
9929cbadf09SSrinivas Kandagatla 	    (interface == PHY_INTERFACE_MODE_RMII) ||
9939cbadf09SSrinivas Kandagatla 		(max_speed < 1000 && max_speed > 0))
99458056c1eSAndrew Lunn 		phy_set_max_speed(phydev, SPEED_100);
99579ee1dc3SSrinivas Kandagatla 
9967ac6653aSJeff Kirsher 	/*
997b6cfffa7SBhadram Varka 	 * Half-duplex mode not supported with multiqueue
998b6cfffa7SBhadram Varka 	 * half-duplex can only works with single queue
999b6cfffa7SBhadram Varka 	 */
100041124fa6SAndrew Lunn 	if (tx_cnt > 1) {
100141124fa6SAndrew Lunn 		phy_remove_link_mode(phydev,
100241124fa6SAndrew Lunn 				     ETHTOOL_LINK_MODE_10baseT_Half_BIT);
100341124fa6SAndrew Lunn 		phy_remove_link_mode(phydev,
100441124fa6SAndrew Lunn 				     ETHTOOL_LINK_MODE_100baseT_Half_BIT);
100541124fa6SAndrew Lunn 		phy_remove_link_mode(phydev,
100641124fa6SAndrew Lunn 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
100741124fa6SAndrew Lunn 	}
1008b6cfffa7SBhadram Varka 
1009b6cfffa7SBhadram Varka 	/*
10107ac6653aSJeff Kirsher 	 * Broken HW is sometimes missing the pull-up resistor on the
10117ac6653aSJeff Kirsher 	 * MDIO line, which results in reads to non-existent devices returning
10127ac6653aSJeff Kirsher 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
10137ac6653aSJeff Kirsher 	 * device as well.
10147ac6653aSJeff Kirsher 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
10157ac6653aSJeff Kirsher 	 */
101627732381SMathieu Olivari 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
10177ac6653aSJeff Kirsher 		phy_disconnect(phydev);
10187ac6653aSJeff Kirsher 		return -ENODEV;
10197ac6653aSJeff Kirsher 	}
10208e99fc5fSGiuseppe Cavallaro 
1021c51e424dSFlorian Fainelli 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1022c51e424dSFlorian Fainelli 	 * subsequent PHY polling, make sure we force a link transition if
1023c51e424dSFlorian Fainelli 	 * we have a UP/DOWN/UP transition
1024c51e424dSFlorian Fainelli 	 */
1025c51e424dSFlorian Fainelli 	if (phydev->is_pseudo_fixed_link)
1026c51e424dSFlorian Fainelli 		phydev->irq = PHY_POLL;
1027c51e424dSFlorian Fainelli 
1028b05c76a1SLABBE Corentin 	phy_attached_info(phydev);
10297ac6653aSJeff Kirsher 	return 0;
10307ac6653aSJeff Kirsher }
10317ac6653aSJeff Kirsher 
103271fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1033c24602efSGiuseppe CAVALLARO {
103454139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
103571fedb01SJoao Pinto 	void *head_rx;
103654139cf3SJoao Pinto 	u32 queue;
103754139cf3SJoao Pinto 
103854139cf3SJoao Pinto 	/* Display RX rings */
103954139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
104054139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
104154139cf3SJoao Pinto 
104254139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1043d0225e7dSAlexandre TORGUE 
104471fedb01SJoao Pinto 		if (priv->extend_desc)
104554139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
104671fedb01SJoao Pinto 		else
104754139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
104871fedb01SJoao Pinto 
104971fedb01SJoao Pinto 		/* Display RX ring */
105042de047dSJose Abreu 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
10515bacd778SLABBE Corentin 	}
105254139cf3SJoao Pinto }
1053d0225e7dSAlexandre TORGUE 
105471fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
105571fedb01SJoao Pinto {
1056ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
105771fedb01SJoao Pinto 	void *head_tx;
1058ce736788SJoao Pinto 	u32 queue;
1059ce736788SJoao Pinto 
1060ce736788SJoao Pinto 	/* Display TX rings */
1061ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1062ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1063ce736788SJoao Pinto 
1064ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
106571fedb01SJoao Pinto 
106671fedb01SJoao Pinto 		if (priv->extend_desc)
1067ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
106871fedb01SJoao Pinto 		else
1069ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
107071fedb01SJoao Pinto 
107142de047dSJose Abreu 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1072c24602efSGiuseppe CAVALLARO 	}
1073ce736788SJoao Pinto }
1074c24602efSGiuseppe CAVALLARO 
107571fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
107671fedb01SJoao Pinto {
107771fedb01SJoao Pinto 	/* Display RX ring */
107871fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
107971fedb01SJoao Pinto 
108071fedb01SJoao Pinto 	/* Display TX ring */
108171fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
108271fedb01SJoao Pinto }
108371fedb01SJoao Pinto 
1084286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1085286a8372SGiuseppe CAVALLARO {
1086286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1087286a8372SGiuseppe CAVALLARO 
1088286a8372SGiuseppe CAVALLARO 	if (mtu >= BUF_SIZE_4KiB)
1089286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1090286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1091286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1092d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1093286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1094286a8372SGiuseppe CAVALLARO 	else
1095d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1096286a8372SGiuseppe CAVALLARO 
1097286a8372SGiuseppe CAVALLARO 	return ret;
1098286a8372SGiuseppe CAVALLARO }
1099286a8372SGiuseppe CAVALLARO 
110032ceabcaSGiuseppe CAVALLARO /**
110171fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
110232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
110354139cf3SJoao Pinto  * @queue: RX queue index
110471fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
110532ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
110632ceabcaSGiuseppe CAVALLARO  */
110754139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1108c24602efSGiuseppe CAVALLARO {
110954139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
11105bacd778SLABBE Corentin 	int i;
1111c24602efSGiuseppe CAVALLARO 
111271fedb01SJoao Pinto 	/* Clear the RX descriptors */
11135bacd778SLABBE Corentin 	for (i = 0; i < DMA_RX_SIZE; i++)
11145bacd778SLABBE Corentin 		if (priv->extend_desc)
111542de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
11165bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
11175bacd778SLABBE Corentin 					(i == DMA_RX_SIZE - 1));
11185bacd778SLABBE Corentin 		else
111942de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
11205bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
11215bacd778SLABBE Corentin 					(i == DMA_RX_SIZE - 1));
112271fedb01SJoao Pinto }
112371fedb01SJoao Pinto 
112471fedb01SJoao Pinto /**
112571fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
112671fedb01SJoao Pinto  * @priv: driver private structure
1127ce736788SJoao Pinto  * @queue: TX queue index.
112871fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
112971fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
113071fedb01SJoao Pinto  */
1131ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
113271fedb01SJoao Pinto {
1133ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
113471fedb01SJoao Pinto 	int i;
113571fedb01SJoao Pinto 
113671fedb01SJoao Pinto 	/* Clear the TX descriptors */
11375bacd778SLABBE Corentin 	for (i = 0; i < DMA_TX_SIZE; i++)
11385bacd778SLABBE Corentin 		if (priv->extend_desc)
113942de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
114042de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
11415bacd778SLABBE Corentin 		else
114242de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
114342de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1144c24602efSGiuseppe CAVALLARO }
1145c24602efSGiuseppe CAVALLARO 
1146732fdf0eSGiuseppe CAVALLARO /**
114771fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
114871fedb01SJoao Pinto  * @priv: driver private structure
114971fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
115071fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
115171fedb01SJoao Pinto  */
115271fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
115371fedb01SJoao Pinto {
115454139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1155ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
115654139cf3SJoao Pinto 	u32 queue;
115754139cf3SJoao Pinto 
115871fedb01SJoao Pinto 	/* Clear the RX descriptors */
115954139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
116054139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
116171fedb01SJoao Pinto 
116271fedb01SJoao Pinto 	/* Clear the TX descriptors */
1163ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1164ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
116571fedb01SJoao Pinto }
116671fedb01SJoao Pinto 
116771fedb01SJoao Pinto /**
1168732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1169732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1170732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1171732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
117254139cf3SJoao Pinto  * @flags: gfp flag
117354139cf3SJoao Pinto  * @queue: RX queue index
1174732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1175732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1176732fdf0eSGiuseppe CAVALLARO  */
1177c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
117854139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1179c24602efSGiuseppe CAVALLARO {
118054139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1181c24602efSGiuseppe CAVALLARO 	struct sk_buff *skb;
1182c24602efSGiuseppe CAVALLARO 
11834ec49a37SVineet Gupta 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
118456329137SBartlomiej Zolnierkiewicz 	if (!skb) {
118538ddc59dSLABBE Corentin 		netdev_err(priv->dev,
118638ddc59dSLABBE Corentin 			   "%s: Rx init fails; skb is NULL\n", __func__);
118756329137SBartlomiej Zolnierkiewicz 		return -ENOMEM;
1188c24602efSGiuseppe CAVALLARO 	}
118954139cf3SJoao Pinto 	rx_q->rx_skbuff[i] = skb;
119054139cf3SJoao Pinto 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1191c24602efSGiuseppe CAVALLARO 						priv->dma_buf_sz,
1192c24602efSGiuseppe CAVALLARO 						DMA_FROM_DEVICE);
119354139cf3SJoao Pinto 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
119438ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
119556329137SBartlomiej Zolnierkiewicz 		dev_kfree_skb_any(skb);
119656329137SBartlomiej Zolnierkiewicz 		return -EINVAL;
119756329137SBartlomiej Zolnierkiewicz 	}
1198c24602efSGiuseppe CAVALLARO 
11996844171dSJose Abreu 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1200c24602efSGiuseppe CAVALLARO 
12012c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
12022c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1203c24602efSGiuseppe CAVALLARO 
1204c24602efSGiuseppe CAVALLARO 	return 0;
1205c24602efSGiuseppe CAVALLARO }
1206c24602efSGiuseppe CAVALLARO 
120771fedb01SJoao Pinto /**
120871fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
120971fedb01SJoao Pinto  * @priv: private structure
121054139cf3SJoao Pinto  * @queue: RX queue index
121171fedb01SJoao Pinto  * @i: buffer index.
121271fedb01SJoao Pinto  */
121354139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
121456329137SBartlomiej Zolnierkiewicz {
121554139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
121654139cf3SJoao Pinto 
121754139cf3SJoao Pinto 	if (rx_q->rx_skbuff[i]) {
121854139cf3SJoao Pinto 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
121956329137SBartlomiej Zolnierkiewicz 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
122054139cf3SJoao Pinto 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
122156329137SBartlomiej Zolnierkiewicz 	}
122254139cf3SJoao Pinto 	rx_q->rx_skbuff[i] = NULL;
122356329137SBartlomiej Zolnierkiewicz }
122456329137SBartlomiej Zolnierkiewicz 
12257ac6653aSJeff Kirsher /**
122671fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
122771fedb01SJoao Pinto  * @priv: private structure
1228ce736788SJoao Pinto  * @queue: RX queue index
122971fedb01SJoao Pinto  * @i: buffer index.
123071fedb01SJoao Pinto  */
1231ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
123271fedb01SJoao Pinto {
1233ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1234ce736788SJoao Pinto 
1235ce736788SJoao Pinto 	if (tx_q->tx_skbuff_dma[i].buf) {
1236ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
123771fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1238ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1239ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
124071fedb01SJoao Pinto 				       DMA_TO_DEVICE);
124171fedb01SJoao Pinto 		else
124271fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1243ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1244ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
124571fedb01SJoao Pinto 					 DMA_TO_DEVICE);
124671fedb01SJoao Pinto 	}
124771fedb01SJoao Pinto 
1248ce736788SJoao Pinto 	if (tx_q->tx_skbuff[i]) {
1249ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1250ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1251ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1252ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
125371fedb01SJoao Pinto 	}
125471fedb01SJoao Pinto }
125571fedb01SJoao Pinto 
125671fedb01SJoao Pinto /**
125771fedb01SJoao Pinto  * init_dma_rx_desc_rings - init the RX descriptor rings
12587ac6653aSJeff Kirsher  * @dev: net device structure
12595bacd778SLABBE Corentin  * @flags: gfp flag.
126071fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
12615bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1262286a8372SGiuseppe CAVALLARO  * modes.
12637ac6653aSJeff Kirsher  */
126471fedb01SJoao Pinto static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
12657ac6653aSJeff Kirsher {
12667ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
126754139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
12685bacd778SLABBE Corentin 	int ret = -ENOMEM;
12692c520b1cSJose Abreu 	int bfsize = 0;
12701d3028f4SColin Ian King 	int queue;
127154139cf3SJoao Pinto 	int i;
12727ac6653aSJeff Kirsher 
12732c520b1cSJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
12742c520b1cSJose Abreu 	if (bfsize < 0)
12752c520b1cSJose Abreu 		bfsize = 0;
12765bacd778SLABBE Corentin 
12775bacd778SLABBE Corentin 	if (bfsize < BUF_SIZE_16KiB)
12785bacd778SLABBE Corentin 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
12795bacd778SLABBE Corentin 
12805bacd778SLABBE Corentin 	priv->dma_buf_sz = bfsize;
12812618abb7SVince Bridgers 
128254139cf3SJoao Pinto 	/* RX INITIALIZATION */
12835bacd778SLABBE Corentin 	netif_dbg(priv, probe, priv->dev,
12845bacd778SLABBE Corentin 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
12855bacd778SLABBE Corentin 
128654139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
128754139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
128854139cf3SJoao Pinto 
128954139cf3SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
129054139cf3SJoao Pinto 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
129154139cf3SJoao Pinto 			  (u32)rx_q->dma_rx_phy);
129254139cf3SJoao Pinto 
12935bacd778SLABBE Corentin 		for (i = 0; i < DMA_RX_SIZE; i++) {
12945bacd778SLABBE Corentin 			struct dma_desc *p;
12955bacd778SLABBE Corentin 
129654139cf3SJoao Pinto 			if (priv->extend_desc)
129754139cf3SJoao Pinto 				p = &((rx_q->dma_erx + i)->basic);
129854139cf3SJoao Pinto 			else
129954139cf3SJoao Pinto 				p = rx_q->dma_rx + i;
130054139cf3SJoao Pinto 
130154139cf3SJoao Pinto 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
130254139cf3SJoao Pinto 						     queue);
13035bacd778SLABBE Corentin 			if (ret)
13045bacd778SLABBE Corentin 				goto err_init_rx_buffers;
13055bacd778SLABBE Corentin 
13065bacd778SLABBE Corentin 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
130754139cf3SJoao Pinto 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
130854139cf3SJoao Pinto 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
13095bacd778SLABBE Corentin 		}
131054139cf3SJoao Pinto 
131154139cf3SJoao Pinto 		rx_q->cur_rx = 0;
131254139cf3SJoao Pinto 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
131354139cf3SJoao Pinto 
131454139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
13157ac6653aSJeff Kirsher 
1316c24602efSGiuseppe CAVALLARO 		/* Setup the chained descriptor addresses */
1317c24602efSGiuseppe CAVALLARO 		if (priv->mode == STMMAC_CHAIN_MODE) {
131871fedb01SJoao Pinto 			if (priv->extend_desc)
13192c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_erx,
13202c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
132171fedb01SJoao Pinto 			else
13222c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_rx,
13232c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
132471fedb01SJoao Pinto 		}
132554139cf3SJoao Pinto 	}
132654139cf3SJoao Pinto 
132754139cf3SJoao Pinto 	buf_sz = bfsize;
132871fedb01SJoao Pinto 
132971fedb01SJoao Pinto 	return 0;
133054139cf3SJoao Pinto 
133171fedb01SJoao Pinto err_init_rx_buffers:
133254139cf3SJoao Pinto 	while (queue >= 0) {
133371fedb01SJoao Pinto 		while (--i >= 0)
133454139cf3SJoao Pinto 			stmmac_free_rx_buffer(priv, queue, i);
133554139cf3SJoao Pinto 
133654139cf3SJoao Pinto 		if (queue == 0)
133754139cf3SJoao Pinto 			break;
133854139cf3SJoao Pinto 
133954139cf3SJoao Pinto 		i = DMA_RX_SIZE;
134054139cf3SJoao Pinto 		queue--;
134154139cf3SJoao Pinto 	}
134254139cf3SJoao Pinto 
134371fedb01SJoao Pinto 	return ret;
134471fedb01SJoao Pinto }
134571fedb01SJoao Pinto 
134671fedb01SJoao Pinto /**
134771fedb01SJoao Pinto  * init_dma_tx_desc_rings - init the TX descriptor rings
134871fedb01SJoao Pinto  * @dev: net device structure.
134971fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
135071fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
135171fedb01SJoao Pinto  * modes.
135271fedb01SJoao Pinto  */
135371fedb01SJoao Pinto static int init_dma_tx_desc_rings(struct net_device *dev)
135471fedb01SJoao Pinto {
135571fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1356ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1357ce736788SJoao Pinto 	u32 queue;
135871fedb01SJoao Pinto 	int i;
135971fedb01SJoao Pinto 
1360ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1361ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1362ce736788SJoao Pinto 
136371fedb01SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
1364ce736788SJoao Pinto 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1365ce736788SJoao Pinto 			 (u32)tx_q->dma_tx_phy);
136671fedb01SJoao Pinto 
136771fedb01SJoao Pinto 		/* Setup the chained descriptor addresses */
136871fedb01SJoao Pinto 		if (priv->mode == STMMAC_CHAIN_MODE) {
136971fedb01SJoao Pinto 			if (priv->extend_desc)
13702c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_etx,
13712c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
137271fedb01SJoao Pinto 			else
13732c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_tx,
13742c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1375c24602efSGiuseppe CAVALLARO 		}
1376286a8372SGiuseppe CAVALLARO 
1377e3ad57c9SGiuseppe Cavallaro 		for (i = 0; i < DMA_TX_SIZE; i++) {
1378c24602efSGiuseppe CAVALLARO 			struct dma_desc *p;
1379c24602efSGiuseppe CAVALLARO 			if (priv->extend_desc)
1380ce736788SJoao Pinto 				p = &((tx_q->dma_etx + i)->basic);
1381c24602efSGiuseppe CAVALLARO 			else
1382ce736788SJoao Pinto 				p = tx_q->dma_tx + i;
1383f748be53SAlexandre TORGUE 
138444c67f85SJose Abreu 			stmmac_clear_desc(priv, p);
1385f748be53SAlexandre TORGUE 
1386ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].buf = 0;
1387ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1388ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].len = 0;
1389ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].last_segment = false;
1390ce736788SJoao Pinto 			tx_q->tx_skbuff[i] = NULL;
13914a7d666aSGiuseppe CAVALLARO 		}
1392c24602efSGiuseppe CAVALLARO 
1393ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
1394ce736788SJoao Pinto 		tx_q->cur_tx = 0;
13958d212a9eSNiklas Cassel 		tx_q->mss = 0;
1396ce736788SJoao Pinto 
1397c22a3f48SJoao Pinto 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1398c22a3f48SJoao Pinto 	}
13997ac6653aSJeff Kirsher 
140071fedb01SJoao Pinto 	return 0;
140171fedb01SJoao Pinto }
140271fedb01SJoao Pinto 
140371fedb01SJoao Pinto /**
140471fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
140571fedb01SJoao Pinto  * @dev: net device structure
140671fedb01SJoao Pinto  * @flags: gfp flag.
140771fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
140871fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
140971fedb01SJoao Pinto  * modes.
141071fedb01SJoao Pinto  */
141171fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
141271fedb01SJoao Pinto {
141371fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
141471fedb01SJoao Pinto 	int ret;
141571fedb01SJoao Pinto 
141671fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
141771fedb01SJoao Pinto 	if (ret)
141871fedb01SJoao Pinto 		return ret;
141971fedb01SJoao Pinto 
142071fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
142171fedb01SJoao Pinto 
14225bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
14237ac6653aSJeff Kirsher 
1424c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1425c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
142656329137SBartlomiej Zolnierkiewicz 
142756329137SBartlomiej Zolnierkiewicz 	return ret;
14287ac6653aSJeff Kirsher }
14297ac6653aSJeff Kirsher 
143071fedb01SJoao Pinto /**
143171fedb01SJoao Pinto  * dma_free_rx_skbufs - free RX dma buffers
143271fedb01SJoao Pinto  * @priv: private structure
143354139cf3SJoao Pinto  * @queue: RX queue index
143471fedb01SJoao Pinto  */
143554139cf3SJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
14367ac6653aSJeff Kirsher {
14377ac6653aSJeff Kirsher 	int i;
14387ac6653aSJeff Kirsher 
1439e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_RX_SIZE; i++)
144054139cf3SJoao Pinto 		stmmac_free_rx_buffer(priv, queue, i);
14417ac6653aSJeff Kirsher }
14427ac6653aSJeff Kirsher 
144371fedb01SJoao Pinto /**
144471fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
144571fedb01SJoao Pinto  * @priv: private structure
1446ce736788SJoao Pinto  * @queue: TX queue index
144771fedb01SJoao Pinto  */
1448ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
14497ac6653aSJeff Kirsher {
14507ac6653aSJeff Kirsher 	int i;
14517ac6653aSJeff Kirsher 
145271fedb01SJoao Pinto 	for (i = 0; i < DMA_TX_SIZE; i++)
1453ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
14547ac6653aSJeff Kirsher }
14557ac6653aSJeff Kirsher 
1456732fdf0eSGiuseppe CAVALLARO /**
145754139cf3SJoao Pinto  * free_dma_rx_desc_resources - free RX dma desc resources
145854139cf3SJoao Pinto  * @priv: private structure
145954139cf3SJoao Pinto  */
146054139cf3SJoao Pinto static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
146154139cf3SJoao Pinto {
146254139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
146354139cf3SJoao Pinto 	u32 queue;
146454139cf3SJoao Pinto 
146554139cf3SJoao Pinto 	/* Free RX queue resources */
146654139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
146754139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
146854139cf3SJoao Pinto 
146954139cf3SJoao Pinto 		/* Release the DMA RX socket buffers */
147054139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
147154139cf3SJoao Pinto 
147254139cf3SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
147354139cf3SJoao Pinto 		if (!priv->extend_desc)
147454139cf3SJoao Pinto 			dma_free_coherent(priv->device,
147554139cf3SJoao Pinto 					  DMA_RX_SIZE * sizeof(struct dma_desc),
147654139cf3SJoao Pinto 					  rx_q->dma_rx, rx_q->dma_rx_phy);
147754139cf3SJoao Pinto 		else
147854139cf3SJoao Pinto 			dma_free_coherent(priv->device, DMA_RX_SIZE *
147954139cf3SJoao Pinto 					  sizeof(struct dma_extended_desc),
148054139cf3SJoao Pinto 					  rx_q->dma_erx, rx_q->dma_rx_phy);
148154139cf3SJoao Pinto 
148254139cf3SJoao Pinto 		kfree(rx_q->rx_skbuff_dma);
148354139cf3SJoao Pinto 		kfree(rx_q->rx_skbuff);
148454139cf3SJoao Pinto 	}
148554139cf3SJoao Pinto }
148654139cf3SJoao Pinto 
148754139cf3SJoao Pinto /**
1488ce736788SJoao Pinto  * free_dma_tx_desc_resources - free TX dma desc resources
1489ce736788SJoao Pinto  * @priv: private structure
1490ce736788SJoao Pinto  */
1491ce736788SJoao Pinto static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1492ce736788SJoao Pinto {
1493ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
149462242260SChristophe Jaillet 	u32 queue;
1495ce736788SJoao Pinto 
1496ce736788SJoao Pinto 	/* Free TX queue resources */
1497ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1498ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1499ce736788SJoao Pinto 
1500ce736788SJoao Pinto 		/* Release the DMA TX socket buffers */
1501ce736788SJoao Pinto 		dma_free_tx_skbufs(priv, queue);
1502ce736788SJoao Pinto 
1503ce736788SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
1504ce736788SJoao Pinto 		if (!priv->extend_desc)
1505ce736788SJoao Pinto 			dma_free_coherent(priv->device,
1506ce736788SJoao Pinto 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1507ce736788SJoao Pinto 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1508ce736788SJoao Pinto 		else
1509ce736788SJoao Pinto 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1510ce736788SJoao Pinto 					  sizeof(struct dma_extended_desc),
1511ce736788SJoao Pinto 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1512ce736788SJoao Pinto 
1513ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff_dma);
1514ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff);
1515ce736788SJoao Pinto 	}
1516ce736788SJoao Pinto }
1517ce736788SJoao Pinto 
1518ce736788SJoao Pinto /**
151971fedb01SJoao Pinto  * alloc_dma_rx_desc_resources - alloc RX resources.
1520732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1521732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1522732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1523732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1524732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1525732fdf0eSGiuseppe CAVALLARO  */
152671fedb01SJoao Pinto static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
152709f8d696SSrinivas Kandagatla {
152854139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
15295bacd778SLABBE Corentin 	int ret = -ENOMEM;
153054139cf3SJoao Pinto 	u32 queue;
153109f8d696SSrinivas Kandagatla 
153254139cf3SJoao Pinto 	/* RX queues buffers and DMA */
153354139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
153454139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
153554139cf3SJoao Pinto 
153654139cf3SJoao Pinto 		rx_q->queue_index = queue;
153754139cf3SJoao Pinto 		rx_q->priv_data = priv;
153854139cf3SJoao Pinto 
153954139cf3SJoao Pinto 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
154054139cf3SJoao Pinto 						    sizeof(dma_addr_t),
15415bacd778SLABBE Corentin 						    GFP_KERNEL);
154254139cf3SJoao Pinto 		if (!rx_q->rx_skbuff_dma)
154363c3aa6bSChristophe Jaillet 			goto err_dma;
15445bacd778SLABBE Corentin 
154554139cf3SJoao Pinto 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
154654139cf3SJoao Pinto 						sizeof(struct sk_buff *),
15475bacd778SLABBE Corentin 						GFP_KERNEL);
154854139cf3SJoao Pinto 		if (!rx_q->rx_skbuff)
154954139cf3SJoao Pinto 			goto err_dma;
15505bacd778SLABBE Corentin 
15515bacd778SLABBE Corentin 		if (priv->extend_desc) {
1552750afb08SLuis Chamberlain 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1553750afb08SLuis Chamberlain 							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
155454139cf3SJoao Pinto 							   &rx_q->dma_rx_phy,
15555bacd778SLABBE Corentin 							   GFP_KERNEL);
155654139cf3SJoao Pinto 			if (!rx_q->dma_erx)
15575bacd778SLABBE Corentin 				goto err_dma;
15585bacd778SLABBE Corentin 
155971fedb01SJoao Pinto 		} else {
1560750afb08SLuis Chamberlain 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1561750afb08SLuis Chamberlain 							  DMA_RX_SIZE * sizeof(struct dma_desc),
156254139cf3SJoao Pinto 							  &rx_q->dma_rx_phy,
156371fedb01SJoao Pinto 							  GFP_KERNEL);
156454139cf3SJoao Pinto 			if (!rx_q->dma_rx)
156571fedb01SJoao Pinto 				goto err_dma;
156671fedb01SJoao Pinto 		}
156754139cf3SJoao Pinto 	}
156871fedb01SJoao Pinto 
156971fedb01SJoao Pinto 	return 0;
157071fedb01SJoao Pinto 
157171fedb01SJoao Pinto err_dma:
157254139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
157354139cf3SJoao Pinto 
157471fedb01SJoao Pinto 	return ret;
157571fedb01SJoao Pinto }
157671fedb01SJoao Pinto 
157771fedb01SJoao Pinto /**
157871fedb01SJoao Pinto  * alloc_dma_tx_desc_resources - alloc TX resources.
157971fedb01SJoao Pinto  * @priv: private structure
158071fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
158171fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
158271fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
158371fedb01SJoao Pinto  * allow zero-copy mechanism.
158471fedb01SJoao Pinto  */
158571fedb01SJoao Pinto static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
158671fedb01SJoao Pinto {
1587ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
158871fedb01SJoao Pinto 	int ret = -ENOMEM;
1589ce736788SJoao Pinto 	u32 queue;
159071fedb01SJoao Pinto 
1591ce736788SJoao Pinto 	/* TX queues buffers and DMA */
1592ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1593ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1594ce736788SJoao Pinto 
1595ce736788SJoao Pinto 		tx_q->queue_index = queue;
1596ce736788SJoao Pinto 		tx_q->priv_data = priv;
1597ce736788SJoao Pinto 
1598ce736788SJoao Pinto 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1599ce736788SJoao Pinto 						    sizeof(*tx_q->tx_skbuff_dma),
160071fedb01SJoao Pinto 						    GFP_KERNEL);
1601ce736788SJoao Pinto 		if (!tx_q->tx_skbuff_dma)
160262242260SChristophe Jaillet 			goto err_dma;
160371fedb01SJoao Pinto 
1604ce736788SJoao Pinto 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1605ce736788SJoao Pinto 						sizeof(struct sk_buff *),
160671fedb01SJoao Pinto 						GFP_KERNEL);
1607ce736788SJoao Pinto 		if (!tx_q->tx_skbuff)
160862242260SChristophe Jaillet 			goto err_dma;
160971fedb01SJoao Pinto 
161071fedb01SJoao Pinto 		if (priv->extend_desc) {
1611750afb08SLuis Chamberlain 			tx_q->dma_etx = dma_alloc_coherent(priv->device,
1612750afb08SLuis Chamberlain 							   DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1613ce736788SJoao Pinto 							   &tx_q->dma_tx_phy,
16145bacd778SLABBE Corentin 							   GFP_KERNEL);
1615ce736788SJoao Pinto 			if (!tx_q->dma_etx)
161662242260SChristophe Jaillet 				goto err_dma;
16175bacd778SLABBE Corentin 		} else {
1618750afb08SLuis Chamberlain 			tx_q->dma_tx = dma_alloc_coherent(priv->device,
1619750afb08SLuis Chamberlain 							  DMA_TX_SIZE * sizeof(struct dma_desc),
1620ce736788SJoao Pinto 							  &tx_q->dma_tx_phy,
16215bacd778SLABBE Corentin 							  GFP_KERNEL);
1622ce736788SJoao Pinto 			if (!tx_q->dma_tx)
162362242260SChristophe Jaillet 				goto err_dma;
1624ce736788SJoao Pinto 		}
16255bacd778SLABBE Corentin 	}
16265bacd778SLABBE Corentin 
16275bacd778SLABBE Corentin 	return 0;
16285bacd778SLABBE Corentin 
162962242260SChristophe Jaillet err_dma:
1630ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
1631ce736788SJoao Pinto 
163209f8d696SSrinivas Kandagatla 	return ret;
16335bacd778SLABBE Corentin }
163409f8d696SSrinivas Kandagatla 
163571fedb01SJoao Pinto /**
163671fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
163771fedb01SJoao Pinto  * @priv: private structure
163871fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
163971fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
164071fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
164171fedb01SJoao Pinto  * allow zero-copy mechanism.
164271fedb01SJoao Pinto  */
164371fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
16445bacd778SLABBE Corentin {
164554139cf3SJoao Pinto 	/* RX Allocation */
164671fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
164771fedb01SJoao Pinto 
164871fedb01SJoao Pinto 	if (ret)
164971fedb01SJoao Pinto 		return ret;
165071fedb01SJoao Pinto 
165171fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
165271fedb01SJoao Pinto 
165371fedb01SJoao Pinto 	return ret;
165471fedb01SJoao Pinto }
165571fedb01SJoao Pinto 
165671fedb01SJoao Pinto /**
165771fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
165871fedb01SJoao Pinto  * @priv: private structure
165971fedb01SJoao Pinto  */
166071fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
166171fedb01SJoao Pinto {
166271fedb01SJoao Pinto 	/* Release the DMA RX socket buffers */
166371fedb01SJoao Pinto 	free_dma_rx_desc_resources(priv);
166471fedb01SJoao Pinto 
166571fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
166671fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
166771fedb01SJoao Pinto }
166871fedb01SJoao Pinto 
166971fedb01SJoao Pinto /**
16709eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
16719eb12474Sjpinto  *  @priv: driver private structure
16729eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
16739eb12474Sjpinto  */
16749eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
16759eb12474Sjpinto {
16764f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
16774f6046f5SJoao Pinto 	int queue;
16784f6046f5SJoao Pinto 	u8 mode;
16799eb12474Sjpinto 
16804f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
16814f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1682c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
16834f6046f5SJoao Pinto 	}
16849eb12474Sjpinto }
16859eb12474Sjpinto 
16869eb12474Sjpinto /**
1687ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
1688ae4f0d46SJoao Pinto  * @priv: driver private structure
1689ae4f0d46SJoao Pinto  * @chan: RX channel index
1690ae4f0d46SJoao Pinto  * Description:
1691ae4f0d46SJoao Pinto  * This starts a RX DMA channel
1692ae4f0d46SJoao Pinto  */
1693ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1694ae4f0d46SJoao Pinto {
1695ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1696a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
1697ae4f0d46SJoao Pinto }
1698ae4f0d46SJoao Pinto 
1699ae4f0d46SJoao Pinto /**
1700ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
1701ae4f0d46SJoao Pinto  * @priv: driver private structure
1702ae4f0d46SJoao Pinto  * @chan: TX channel index
1703ae4f0d46SJoao Pinto  * Description:
1704ae4f0d46SJoao Pinto  * This starts a TX DMA channel
1705ae4f0d46SJoao Pinto  */
1706ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1707ae4f0d46SJoao Pinto {
1708ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1709a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
1710ae4f0d46SJoao Pinto }
1711ae4f0d46SJoao Pinto 
1712ae4f0d46SJoao Pinto /**
1713ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
1714ae4f0d46SJoao Pinto  * @priv: driver private structure
1715ae4f0d46SJoao Pinto  * @chan: RX channel index
1716ae4f0d46SJoao Pinto  * Description:
1717ae4f0d46SJoao Pinto  * This stops a RX DMA channel
1718ae4f0d46SJoao Pinto  */
1719ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1720ae4f0d46SJoao Pinto {
1721ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1722a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1723ae4f0d46SJoao Pinto }
1724ae4f0d46SJoao Pinto 
1725ae4f0d46SJoao Pinto /**
1726ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
1727ae4f0d46SJoao Pinto  * @priv: driver private structure
1728ae4f0d46SJoao Pinto  * @chan: TX channel index
1729ae4f0d46SJoao Pinto  * Description:
1730ae4f0d46SJoao Pinto  * This stops a TX DMA channel
1731ae4f0d46SJoao Pinto  */
1732ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1733ae4f0d46SJoao Pinto {
1734ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1735a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1736ae4f0d46SJoao Pinto }
1737ae4f0d46SJoao Pinto 
1738ae4f0d46SJoao Pinto /**
1739ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
1740ae4f0d46SJoao Pinto  * @priv: driver private structure
1741ae4f0d46SJoao Pinto  * Description:
1742ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
1743ae4f0d46SJoao Pinto  */
1744ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
1745ae4f0d46SJoao Pinto {
1746ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1747ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1748ae4f0d46SJoao Pinto 	u32 chan = 0;
1749ae4f0d46SJoao Pinto 
1750ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1751ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
1752ae4f0d46SJoao Pinto 
1753ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1754ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
1755ae4f0d46SJoao Pinto }
1756ae4f0d46SJoao Pinto 
1757ae4f0d46SJoao Pinto /**
1758ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1759ae4f0d46SJoao Pinto  * @priv: driver private structure
1760ae4f0d46SJoao Pinto  * Description:
1761ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
1762ae4f0d46SJoao Pinto  */
1763ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1764ae4f0d46SJoao Pinto {
1765ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1766ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1767ae4f0d46SJoao Pinto 	u32 chan = 0;
1768ae4f0d46SJoao Pinto 
1769ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1770ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
1771ae4f0d46SJoao Pinto 
1772ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1773ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
1774ae4f0d46SJoao Pinto }
1775ae4f0d46SJoao Pinto 
1776ae4f0d46SJoao Pinto /**
17777ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
177832ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
1779732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
1780732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
17817ac6653aSJeff Kirsher  */
17827ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
17837ac6653aSJeff Kirsher {
17846deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
17856deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1786f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
178752a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
17886deee222SJoao Pinto 	u32 txmode = 0;
17896deee222SJoao Pinto 	u32 rxmode = 0;
17906deee222SJoao Pinto 	u32 chan = 0;
1791a0daae13SJose Abreu 	u8 qmode = 0;
1792f88203a2SVince Bridgers 
179311fbf811SThierry Reding 	if (rxfifosz == 0)
179411fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
179552a76235SJose Abreu 	if (txfifosz == 0)
179652a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
179752a76235SJose Abreu 
179852a76235SJose Abreu 	/* Adjust for real per queue fifo size */
179952a76235SJose Abreu 	rxfifosz /= rx_channels_count;
180052a76235SJose Abreu 	txfifosz /= tx_channels_count;
180111fbf811SThierry Reding 
18026deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
18036deee222SJoao Pinto 		txmode = tc;
18046deee222SJoao Pinto 		rxmode = tc;
18056deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
18067ac6653aSJeff Kirsher 		/*
18077ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
18087ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
18097ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
18107ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
18117ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
18127ac6653aSJeff Kirsher 		 */
18136deee222SJoao Pinto 		txmode = SF_DMA_MODE;
18146deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
1815b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
18166deee222SJoao Pinto 	} else {
18176deee222SJoao Pinto 		txmode = tc;
18186deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
18196deee222SJoao Pinto 	}
18206deee222SJoao Pinto 
18216deee222SJoao Pinto 	/* configure all channels */
1822a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
1823a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
18246deee222SJoao Pinto 
1825a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1826a0daae13SJose Abreu 				rxfifosz, qmode);
18274205c88eSJose Abreu 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
18284205c88eSJose Abreu 				chan);
1829a0daae13SJose Abreu 	}
1830a0daae13SJose Abreu 
1831a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
1832a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1833a0daae13SJose Abreu 
1834a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1835a0daae13SJose Abreu 				txfifosz, qmode);
1836a0daae13SJose Abreu 	}
18377ac6653aSJeff Kirsher }
18387ac6653aSJeff Kirsher 
18397ac6653aSJeff Kirsher /**
1840732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
184132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1842ce736788SJoao Pinto  * @queue: TX queue index
1843732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
18447ac6653aSJeff Kirsher  */
18458fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
18467ac6653aSJeff Kirsher {
1847ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
184838979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
18498fce3331SJose Abreu 	unsigned int entry, count = 0;
18507ac6653aSJeff Kirsher 
18518fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1852a9097a96SGiuseppe CAVALLARO 
18539125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
18549125cdd1SGiuseppe CAVALLARO 
18558d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
18568fce3331SJose Abreu 	while ((entry != tx_q->cur_tx) && (count < budget)) {
1857ce736788SJoao Pinto 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1858c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1859c363b658SFabrice Gasnier 		int status;
1860c24602efSGiuseppe CAVALLARO 
1861c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1862ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1863c24602efSGiuseppe CAVALLARO 		else
1864ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
18657ac6653aSJeff Kirsher 
186642de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
186742de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
1868c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
1869c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
1870c363b658SFabrice Gasnier 			break;
1871c363b658SFabrice Gasnier 
18728fce3331SJose Abreu 		count++;
18738fce3331SJose Abreu 
1874a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
1875a6b25da5SNiklas Cassel 		 * the own bit.
1876a6b25da5SNiklas Cassel 		 */
1877a6b25da5SNiklas Cassel 		dma_rmb();
1878a6b25da5SNiklas Cassel 
1879c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
1880c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
1881c363b658SFabrice Gasnier 			/* ... verify the status error condition */
1882c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
1883c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
1884c363b658SFabrice Gasnier 			} else {
18857ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
18867ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
1887c363b658SFabrice Gasnier 			}
1888ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_tx_hwtstamp(priv, p, skb);
18897ac6653aSJeff Kirsher 		}
18907ac6653aSJeff Kirsher 
1891ce736788SJoao Pinto 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1892ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1893362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
1894ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
1895ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
18967ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
1897362b37beSGiuseppe CAVALLARO 			else
1898362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
1899ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
1900ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
1901362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
1902ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
1903ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
1904ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1905cf32deecSRayagond Kokatanur 		}
1906f748be53SAlexandre TORGUE 
19072c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
1908f748be53SAlexandre TORGUE 
1909ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1910ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
19117ac6653aSJeff Kirsher 
19127ac6653aSJeff Kirsher 		if (likely(skb != NULL)) {
191338979574SBeniamino Galvani 			pkts_compl++;
191438979574SBeniamino Galvani 			bytes_compl += skb->len;
19157c565c33SEric W. Biederman 			dev_consume_skb_any(skb);
1916ce736788SJoao Pinto 			tx_q->tx_skbuff[entry] = NULL;
19177ac6653aSJeff Kirsher 		}
19187ac6653aSJeff Kirsher 
191942de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
19207ac6653aSJeff Kirsher 
1921e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
19227ac6653aSJeff Kirsher 	}
1923ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
192438979574SBeniamino Galvani 
1925c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1926c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
192738979574SBeniamino Galvani 
1928c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1929c22a3f48SJoao Pinto 								queue))) &&
1930c22a3f48SJoao Pinto 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1931c22a3f48SJoao Pinto 
1932b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
1933b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
1934c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
19357ac6653aSJeff Kirsher 	}
1936d765955dSGiuseppe CAVALLARO 
1937d765955dSGiuseppe CAVALLARO 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1938d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
1939f5351ef7SGiuseppe CAVALLARO 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1940d765955dSGiuseppe CAVALLARO 	}
19418fce3331SJose Abreu 
19428fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
19438fce3331SJose Abreu 
19448fce3331SJose Abreu 	return count;
19457ac6653aSJeff Kirsher }
19467ac6653aSJeff Kirsher 
19477ac6653aSJeff Kirsher /**
1948732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
194932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
19505bacd778SLABBE Corentin  * @chan: channel index
19517ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
1952732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
19537ac6653aSJeff Kirsher  */
19545bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
19557ac6653aSJeff Kirsher {
1956ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1957c24602efSGiuseppe CAVALLARO 	int i;
1958ce736788SJoao Pinto 
1959c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
19607ac6653aSJeff Kirsher 
1961ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
1962ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
1963e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_TX_SIZE; i++)
1964c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
196542de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
196642de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1967c24602efSGiuseppe CAVALLARO 		else
196842de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
196942de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1970ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
1971ce736788SJoao Pinto 	tx_q->cur_tx = 0;
19728d212a9eSNiklas Cassel 	tx_q->mss = 0;
1973c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1974ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
19757ac6653aSJeff Kirsher 
19767ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
1977c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
19787ac6653aSJeff Kirsher }
19797ac6653aSJeff Kirsher 
198032ceabcaSGiuseppe CAVALLARO /**
19816deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
19826deee222SJoao Pinto  *  @priv: driver private structure
19836deee222SJoao Pinto  *  @txmode: TX operating mode
19846deee222SJoao Pinto  *  @rxmode: RX operating mode
19856deee222SJoao Pinto  *  @chan: channel index
19866deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
19876deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
19886deee222SJoao Pinto  *  mode.
19896deee222SJoao Pinto  */
19906deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
19916deee222SJoao Pinto 					  u32 rxmode, u32 chan)
19926deee222SJoao Pinto {
1993a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1994a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
199552a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
199652a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
19976deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
199852a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
19996deee222SJoao Pinto 
20006deee222SJoao Pinto 	if (rxfifosz == 0)
20016deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
200252a76235SJose Abreu 	if (txfifosz == 0)
200352a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
200452a76235SJose Abreu 
200552a76235SJose Abreu 	/* Adjust for real per queue fifo size */
200652a76235SJose Abreu 	rxfifosz /= rx_channels_count;
200752a76235SJose Abreu 	txfifosz /= tx_channels_count;
20086deee222SJoao Pinto 
2009ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2010ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
20116deee222SJoao Pinto }
20126deee222SJoao Pinto 
20138bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
20148bf993a5SJose Abreu {
201563a550fcSJose Abreu 	int ret;
20168bf993a5SJose Abreu 
2017c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
20188bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2019c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
20208bf993a5SJose Abreu 		stmmac_global_err(priv);
2021c10d4c82SJose Abreu 		return true;
2022c10d4c82SJose Abreu 	}
2023c10d4c82SJose Abreu 
2024c10d4c82SJose Abreu 	return false;
20258bf993a5SJose Abreu }
20268bf993a5SJose Abreu 
20278fce3331SJose Abreu static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
20288fce3331SJose Abreu {
20298fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
20308fce3331SJose Abreu 						 &priv->xstats, chan);
20318fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
20328fce3331SJose Abreu 	bool needs_work = false;
20338fce3331SJose Abreu 
20348fce3331SJose Abreu 	if ((status & handle_rx) && ch->has_rx) {
20358fce3331SJose Abreu 		needs_work = true;
20368fce3331SJose Abreu 	} else {
20378fce3331SJose Abreu 		status &= ~handle_rx;
20388fce3331SJose Abreu 	}
20398fce3331SJose Abreu 
20408fce3331SJose Abreu 	if ((status & handle_tx) && ch->has_tx) {
20418fce3331SJose Abreu 		needs_work = true;
20428fce3331SJose Abreu 	} else {
20438fce3331SJose Abreu 		status &= ~handle_tx;
20448fce3331SJose Abreu 	}
20458fce3331SJose Abreu 
20468fce3331SJose Abreu 	if (needs_work && napi_schedule_prep(&ch->napi)) {
20478fce3331SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
20488fce3331SJose Abreu 		__napi_schedule(&ch->napi);
20498fce3331SJose Abreu 	}
20508fce3331SJose Abreu 
20518fce3331SJose Abreu 	return status;
20528fce3331SJose Abreu }
20538fce3331SJose Abreu 
20546deee222SJoao Pinto /**
2055732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
205632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
205732ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2058732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2059732fdf0eSGiuseppe CAVALLARO  * work can be done.
206032ceabcaSGiuseppe CAVALLARO  */
20617ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
20627ac6653aSJeff Kirsher {
2063d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
20645a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
20655a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
20665a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2067d62a107aSJoao Pinto 	u32 chan;
20688ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
20698ac60ffbSKees Cook 
20708ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
20718ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
20728ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
207368e5cfafSJoao Pinto 
20745a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
20758fce3331SJose Abreu 		status[chan] = stmmac_napi_check(priv, chan);
2076d62a107aSJoao Pinto 
20775a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
20785a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
20797ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
2080b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2081b2dec116SSonic Zhang 			    (tc <= 256)) {
20827ac6653aSJeff Kirsher 				tc += 64;
2083c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
2084d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2085d62a107aSJoao Pinto 								      tc,
2086d62a107aSJoao Pinto 								      tc,
2087d62a107aSJoao Pinto 								      chan);
2088c405abe2SSonic Zhang 				else
2089d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2090d62a107aSJoao Pinto 								    tc,
2091d62a107aSJoao Pinto 								    SF_DMA_MODE,
2092d62a107aSJoao Pinto 								    chan);
20937ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
20947ac6653aSJeff Kirsher 			}
20955a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
20964e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
20977ac6653aSJeff Kirsher 		}
2098d62a107aSJoao Pinto 	}
2099d62a107aSJoao Pinto }
21007ac6653aSJeff Kirsher 
210132ceabcaSGiuseppe CAVALLARO /**
210232ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
210332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
210432ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
210532ceabcaSGiuseppe CAVALLARO  */
21061c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
21071c901a46SGiuseppe CAVALLARO {
21081c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
21091c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
21101c901a46SGiuseppe CAVALLARO 
211136ff7c1eSAlexandre TORGUE 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
21124f795b25SGiuseppe CAVALLARO 
21134f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
211436ff7c1eSAlexandre TORGUE 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
21151c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
21164f795b25SGiuseppe CAVALLARO 	} else
211738ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
21181c901a46SGiuseppe CAVALLARO }
21191c901a46SGiuseppe CAVALLARO 
2120732fdf0eSGiuseppe CAVALLARO /**
2121732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
212232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
212319e30c14SGiuseppe CAVALLARO  * Description:
212419e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2125e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
212619e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
212719e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2128e7434821SGiuseppe CAVALLARO  */
2129e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2130e7434821SGiuseppe CAVALLARO {
2131a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2132e7434821SGiuseppe CAVALLARO }
2133e7434821SGiuseppe CAVALLARO 
213432ceabcaSGiuseppe CAVALLARO /**
2135732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
213632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
213732ceabcaSGiuseppe CAVALLARO  * Description:
213832ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
213932ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
214032ceabcaSGiuseppe CAVALLARO  */
2141bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2142bfab27a1SGiuseppe CAVALLARO {
2143bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2144c10d4c82SJose Abreu 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2145bfab27a1SGiuseppe CAVALLARO 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2146f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
214738ddc59dSLABBE Corentin 		netdev_info(priv->dev, "device MAC address %pM\n",
2148bfab27a1SGiuseppe CAVALLARO 			    priv->dev->dev_addr);
2149bfab27a1SGiuseppe CAVALLARO 	}
2150c88460b7SHans de Goede }
2151bfab27a1SGiuseppe CAVALLARO 
215232ceabcaSGiuseppe CAVALLARO /**
2153732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
215432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
215532ceabcaSGiuseppe CAVALLARO  * Description:
215632ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
215732ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
215832ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
215932ceabcaSGiuseppe CAVALLARO  */
21600f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
21610f1f88a8SGiuseppe CAVALLARO {
216247f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
216347f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
216424aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
216554139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2166ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
216747f2a9ceSJoao Pinto 	u32 chan = 0;
2168c24602efSGiuseppe CAVALLARO 	int atds = 0;
2169495db273SGiuseppe Cavallaro 	int ret = 0;
21700f1f88a8SGiuseppe CAVALLARO 
2171a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2172a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
217389ab75bfSNiklas Cassel 		return -EINVAL;
21740f1f88a8SGiuseppe CAVALLARO 	}
21750f1f88a8SGiuseppe CAVALLARO 
2176c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2177c24602efSGiuseppe CAVALLARO 		atds = 1;
2178c24602efSGiuseppe CAVALLARO 
2179a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2180495db273SGiuseppe Cavallaro 	if (ret) {
2181495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2182495db273SGiuseppe Cavallaro 		return ret;
2183495db273SGiuseppe Cavallaro 	}
2184495db273SGiuseppe Cavallaro 
21857d9e6c5aSJose Abreu 	/* DMA Configuration */
21867d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
21877d9e6c5aSJose Abreu 
21887d9e6c5aSJose Abreu 	if (priv->plat->axi)
21897d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
21907d9e6c5aSJose Abreu 
219147f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
219247f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
219354139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
219454139cf3SJoao Pinto 
219524aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
219624aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
219747f2a9ceSJoao Pinto 
219854139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2199f748be53SAlexandre TORGUE 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2200a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2201a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
220247f2a9ceSJoao Pinto 	}
220347f2a9ceSJoao Pinto 
220447f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
220547f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
2206ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2207ce736788SJoao Pinto 
220824aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
220924aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2210f748be53SAlexandre TORGUE 
22110431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2212a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2213a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
221447f2a9ceSJoao Pinto 	}
221524aaed0cSJose Abreu 
221624aaed0cSJose Abreu 	/* DMA CSR Channel configuration */
221724aaed0cSJose Abreu 	for (chan = 0; chan < dma_csr_ch; chan++)
221824aaed0cSJose Abreu 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
221924aaed0cSJose Abreu 
2220495db273SGiuseppe Cavallaro 	return ret;
22210f1f88a8SGiuseppe CAVALLARO }
22220f1f88a8SGiuseppe CAVALLARO 
22238fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
22248fce3331SJose Abreu {
22258fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
22268fce3331SJose Abreu 
22278fce3331SJose Abreu 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
22288fce3331SJose Abreu }
22298fce3331SJose Abreu 
2230bfab27a1SGiuseppe CAVALLARO /**
2231732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
22329125cdd1SGiuseppe CAVALLARO  * @data: data pointer
22339125cdd1SGiuseppe CAVALLARO  * Description:
22349125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
22359125cdd1SGiuseppe CAVALLARO  */
2236e99e88a9SKees Cook static void stmmac_tx_timer(struct timer_list *t)
22379125cdd1SGiuseppe CAVALLARO {
22388fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
22398fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
22408fce3331SJose Abreu 	struct stmmac_channel *ch;
22419125cdd1SGiuseppe CAVALLARO 
22428fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
22438fce3331SJose Abreu 
22448fce3331SJose Abreu 	if (likely(napi_schedule_prep(&ch->napi)))
22458fce3331SJose Abreu 		__napi_schedule(&ch->napi);
22469125cdd1SGiuseppe CAVALLARO }
22479125cdd1SGiuseppe CAVALLARO 
22489125cdd1SGiuseppe CAVALLARO /**
2249732fdf0eSGiuseppe CAVALLARO  * stmmac_init_tx_coalesce - init tx mitigation options.
225032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22519125cdd1SGiuseppe CAVALLARO  * Description:
22529125cdd1SGiuseppe CAVALLARO  * This inits the transmit coalesce parameters: i.e. timer rate,
22539125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
22549125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
22559125cdd1SGiuseppe CAVALLARO  */
22569125cdd1SGiuseppe CAVALLARO static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
22579125cdd1SGiuseppe CAVALLARO {
22588fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
22598fce3331SJose Abreu 	u32 chan;
22608fce3331SJose Abreu 
22619125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
22629125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
22638fce3331SJose Abreu 
22648fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
22658fce3331SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
22668fce3331SJose Abreu 
22678fce3331SJose Abreu 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
22688fce3331SJose Abreu 	}
22699125cdd1SGiuseppe CAVALLARO }
22709125cdd1SGiuseppe CAVALLARO 
22714854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
22724854ab99SJoao Pinto {
22734854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
22744854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
22754854ab99SJoao Pinto 	u32 chan;
22764854ab99SJoao Pinto 
22774854ab99SJoao Pinto 	/* set TX ring length */
22784854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2279a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
22804854ab99SJoao Pinto 				(DMA_TX_SIZE - 1), chan);
22814854ab99SJoao Pinto 
22824854ab99SJoao Pinto 	/* set RX ring length */
22834854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2284a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
22854854ab99SJoao Pinto 				(DMA_RX_SIZE - 1), chan);
22864854ab99SJoao Pinto }
22874854ab99SJoao Pinto 
22889125cdd1SGiuseppe CAVALLARO /**
22896a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
22906a3a7193SJoao Pinto  *  @priv: driver private structure
22916a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
22926a3a7193SJoao Pinto  */
22936a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
22946a3a7193SJoao Pinto {
22956a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
22966a3a7193SJoao Pinto 	u32 weight;
22976a3a7193SJoao Pinto 	u32 queue;
22986a3a7193SJoao Pinto 
22996a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
23006a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
2301c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
23026a3a7193SJoao Pinto 	}
23036a3a7193SJoao Pinto }
23046a3a7193SJoao Pinto 
23056a3a7193SJoao Pinto /**
230619d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
230719d91873SJoao Pinto  *  @priv: driver private structure
230819d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
230919d91873SJoao Pinto  */
231019d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
231119d91873SJoao Pinto {
231219d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
231319d91873SJoao Pinto 	u32 mode_to_use;
231419d91873SJoao Pinto 	u32 queue;
231519d91873SJoao Pinto 
231644781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
231744781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
231819d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
231919d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
232019d91873SJoao Pinto 			continue;
232119d91873SJoao Pinto 
2322c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
232319d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
232419d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
232519d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
232619d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
232719d91873SJoao Pinto 				queue);
232819d91873SJoao Pinto 	}
232919d91873SJoao Pinto }
233019d91873SJoao Pinto 
233119d91873SJoao Pinto /**
2332d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2333d43042f4SJoao Pinto  *  @priv: driver private structure
2334d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
2335d43042f4SJoao Pinto  */
2336d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2337d43042f4SJoao Pinto {
2338d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2339d43042f4SJoao Pinto 	u32 queue;
2340d43042f4SJoao Pinto 	u32 chan;
2341d43042f4SJoao Pinto 
2342d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2343d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
2344c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2345d43042f4SJoao Pinto 	}
2346d43042f4SJoao Pinto }
2347d43042f4SJoao Pinto 
2348d43042f4SJoao Pinto /**
2349a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2350a8f5102aSJoao Pinto  *  @priv: driver private structure
2351a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
2352a8f5102aSJoao Pinto  */
2353a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2354a8f5102aSJoao Pinto {
2355a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2356a8f5102aSJoao Pinto 	u32 queue;
2357a8f5102aSJoao Pinto 	u32 prio;
2358a8f5102aSJoao Pinto 
2359a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2360a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2361a8f5102aSJoao Pinto 			continue;
2362a8f5102aSJoao Pinto 
2363a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
2364c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2365a8f5102aSJoao Pinto 	}
2366a8f5102aSJoao Pinto }
2367a8f5102aSJoao Pinto 
2368a8f5102aSJoao Pinto /**
2369a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2370a8f5102aSJoao Pinto  *  @priv: driver private structure
2371a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
2372a8f5102aSJoao Pinto  */
2373a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2374a8f5102aSJoao Pinto {
2375a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2376a8f5102aSJoao Pinto 	u32 queue;
2377a8f5102aSJoao Pinto 	u32 prio;
2378a8f5102aSJoao Pinto 
2379a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
2380a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2381a8f5102aSJoao Pinto 			continue;
2382a8f5102aSJoao Pinto 
2383a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
2384c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2385a8f5102aSJoao Pinto 	}
2386a8f5102aSJoao Pinto }
2387a8f5102aSJoao Pinto 
2388a8f5102aSJoao Pinto /**
2389abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2390abe80fdcSJoao Pinto  *  @priv: driver private structure
2391abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
2392abe80fdcSJoao Pinto  */
2393abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2394abe80fdcSJoao Pinto {
2395abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2396abe80fdcSJoao Pinto 	u32 queue;
2397abe80fdcSJoao Pinto 	u8 packet;
2398abe80fdcSJoao Pinto 
2399abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2400abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
2401abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2402abe80fdcSJoao Pinto 			continue;
2403abe80fdcSJoao Pinto 
2404abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2405c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2406abe80fdcSJoao Pinto 	}
2407abe80fdcSJoao Pinto }
2408abe80fdcSJoao Pinto 
2409abe80fdcSJoao Pinto /**
2410d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
2411d0a9c9f9SJoao Pinto  *  @priv: driver private structure
2412d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
2413d0a9c9f9SJoao Pinto  */
2414d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2415d0a9c9f9SJoao Pinto {
2416d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2417d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2418d0a9c9f9SJoao Pinto 
2419c10d4c82SJose Abreu 	if (tx_queues_count > 1)
24206a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
24216a3a7193SJoao Pinto 
2422d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
2423c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2424c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2425d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
2426d0a9c9f9SJoao Pinto 
2427d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
2428c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2429c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2430d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
2431d0a9c9f9SJoao Pinto 
243219d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
2433c10d4c82SJose Abreu 	if (tx_queues_count > 1)
243419d91873SJoao Pinto 		stmmac_configure_cbs(priv);
243519d91873SJoao Pinto 
2436d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
2437d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
2438d43042f4SJoao Pinto 
2439d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
2440d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
24416deee222SJoao Pinto 
2442a8f5102aSJoao Pinto 	/* Set RX priorities */
2443c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2444a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
2445a8f5102aSJoao Pinto 
2446a8f5102aSJoao Pinto 	/* Set TX priorities */
2447c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2448a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
2449abe80fdcSJoao Pinto 
2450abe80fdcSJoao Pinto 	/* Set RX routing */
2451c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2452abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
2453d0a9c9f9SJoao Pinto }
2454d0a9c9f9SJoao Pinto 
24558bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
24568bf993a5SJose Abreu {
2457c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
24588bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
2459c10d4c82SJose Abreu 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
24608bf993a5SJose Abreu 	} else {
24618bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
24628bf993a5SJose Abreu 	}
24638bf993a5SJose Abreu }
24648bf993a5SJose Abreu 
2465d0a9c9f9SJoao Pinto /**
2466732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
2467523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
2468523f11b5SSrinivas Kandagatla  *  Description:
2469732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
2470732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
2471732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
2472732fdf0eSGiuseppe CAVALLARO  *  transmitting.
2473523f11b5SSrinivas Kandagatla  *  Return value:
2474523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2475523f11b5SSrinivas Kandagatla  *  file on failure.
2476523f11b5SSrinivas Kandagatla  */
2477fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2478523f11b5SSrinivas Kandagatla {
2479523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
24803c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2481146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2482146617b8SJoao Pinto 	u32 chan;
2483523f11b5SSrinivas Kandagatla 	int ret;
2484523f11b5SSrinivas Kandagatla 
2485523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
2486523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
2487523f11b5SSrinivas Kandagatla 	if (ret < 0) {
248838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
248938ddc59dSLABBE Corentin 			   __func__);
2490523f11b5SSrinivas Kandagatla 		return ret;
2491523f11b5SSrinivas Kandagatla 	}
2492523f11b5SSrinivas Kandagatla 
2493523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
2494c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2495523f11b5SSrinivas Kandagatla 
249602e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
249702e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
249802e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
249902e57b9dSGiuseppe CAVALLARO 
250002e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
250102e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
250202e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
250302e57b9dSGiuseppe CAVALLARO 		} else {
250402e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
250502e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
250602e57b9dSGiuseppe CAVALLARO 		}
250702e57b9dSGiuseppe CAVALLARO 	}
250802e57b9dSGiuseppe CAVALLARO 
2509523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
2510c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
2511523f11b5SSrinivas Kandagatla 
2512d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
2513d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
25149eb12474Sjpinto 
25158bf993a5SJose Abreu 	/* Initialize Safety Features */
25168bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
25178bf993a5SJose Abreu 
2518c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
2519978aded4SGiuseppe CAVALLARO 	if (!ret) {
252038ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2521978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2522d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
2523978aded4SGiuseppe CAVALLARO 	}
2524978aded4SGiuseppe CAVALLARO 
2525523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
2526c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
2527523f11b5SSrinivas Kandagatla 
2528b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
2529b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
2530b4f0a661SJoao Pinto 
2531523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
2532523f11b5SSrinivas Kandagatla 
2533fe131929SHuacai Chen 	if (init_ptp) {
25340ad2be79SThierry Reding 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
25350ad2be79SThierry Reding 		if (ret < 0)
25360ad2be79SThierry Reding 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
25370ad2be79SThierry Reding 
2538523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
2539722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
2540722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2541722eef28SHeiner Kallweit 		else if (ret)
2542722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
2543fe131929SHuacai Chen 	}
2544523f11b5SSrinivas Kandagatla 
2545523f11b5SSrinivas Kandagatla 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2546523f11b5SSrinivas Kandagatla 
2547a4e887faSJose Abreu 	if (priv->use_riwt) {
2548a4e887faSJose Abreu 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2549a4e887faSJose Abreu 		if (!ret)
2550523f11b5SSrinivas Kandagatla 			priv->rx_riwt = MAX_DMA_RIWT;
2551523f11b5SSrinivas Kandagatla 	}
2552523f11b5SSrinivas Kandagatla 
2553c10d4c82SJose Abreu 	if (priv->hw->pcs)
2554c10d4c82SJose Abreu 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2555523f11b5SSrinivas Kandagatla 
25564854ab99SJoao Pinto 	/* set TX and RX rings length */
25574854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
25584854ab99SJoao Pinto 
2559f748be53SAlexandre TORGUE 	/* Enable TSO */
2560146617b8SJoao Pinto 	if (priv->tso) {
2561146617b8SJoao Pinto 		for (chan = 0; chan < tx_cnt; chan++)
2562a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2563146617b8SJoao Pinto 	}
2564f748be53SAlexandre TORGUE 
25657d9e6c5aSJose Abreu 	/* Start the ball rolling... */
25667d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
25677d9e6c5aSJose Abreu 
2568523f11b5SSrinivas Kandagatla 	return 0;
2569523f11b5SSrinivas Kandagatla }
2570523f11b5SSrinivas Kandagatla 
2571c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
2572c66f6c37SThierry Reding {
2573c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
2574c66f6c37SThierry Reding 
2575c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2576c66f6c37SThierry Reding }
2577c66f6c37SThierry Reding 
2578523f11b5SSrinivas Kandagatla /**
25797ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
25807ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
25817ac6653aSJeff Kirsher  *  Description:
25827ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
25837ac6653aSJeff Kirsher  *  Return value:
25847ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
25857ac6653aSJeff Kirsher  *  file on failure.
25867ac6653aSJeff Kirsher  */
25877ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev)
25887ac6653aSJeff Kirsher {
25897ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
25908fce3331SJose Abreu 	u32 chan;
25917ac6653aSJeff Kirsher 	int ret;
25927ac6653aSJeff Kirsher 
25934bfcbd7aSFrancesco Virlinzi 	stmmac_check_ether_addr(priv);
25944bfcbd7aSFrancesco Virlinzi 
25953fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
25963fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
25973fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
25987ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
2599e58bb43fSGiuseppe CAVALLARO 		if (ret) {
260038ddc59dSLABBE Corentin 			netdev_err(priv->dev,
260138ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
2602e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
260389df20d9SHans de Goede 			return ret;
26047ac6653aSJeff Kirsher 		}
2605e58bb43fSGiuseppe CAVALLARO 	}
26067ac6653aSJeff Kirsher 
2607523f11b5SSrinivas Kandagatla 	/* Extra statistics */
2608523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2609523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
2610523f11b5SSrinivas Kandagatla 
26115bacd778SLABBE Corentin 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
261222ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
261356329137SBartlomiej Zolnierkiewicz 
26145bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
26155bacd778SLABBE Corentin 	if (ret < 0) {
26165bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
26175bacd778SLABBE Corentin 			   __func__);
26185bacd778SLABBE Corentin 		goto dma_desc_error;
26195bacd778SLABBE Corentin 	}
26205bacd778SLABBE Corentin 
26215bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
26225bacd778SLABBE Corentin 	if (ret < 0) {
26235bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
26245bacd778SLABBE Corentin 			   __func__);
26255bacd778SLABBE Corentin 		goto init_error;
26265bacd778SLABBE Corentin 	}
26275bacd778SLABBE Corentin 
2628fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
262956329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
263038ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2631c9324d18SGiuseppe CAVALLARO 		goto init_error;
26327ac6653aSJeff Kirsher 	}
26337ac6653aSJeff Kirsher 
2634777da230SGiuseppe CAVALLARO 	stmmac_init_tx_coalesce(priv);
2635777da230SGiuseppe CAVALLARO 
2636d6d50c7eSPhilippe Reynes 	if (dev->phydev)
2637d6d50c7eSPhilippe Reynes 		phy_start(dev->phydev);
26387ac6653aSJeff Kirsher 
26397ac6653aSJeff Kirsher 	/* Request the IRQ lines */
26407ac6653aSJeff Kirsher 	ret = request_irq(dev->irq, stmmac_interrupt,
26417ac6653aSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
26427ac6653aSJeff Kirsher 	if (unlikely(ret < 0)) {
264338ddc59dSLABBE Corentin 		netdev_err(priv->dev,
264438ddc59dSLABBE Corentin 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
26457ac6653aSJeff Kirsher 			   __func__, dev->irq, ret);
26466c1e5abeSThierry Reding 		goto irq_error;
26477ac6653aSJeff Kirsher 	}
26487ac6653aSJeff Kirsher 
26497a13f8f5SFrancesco Virlinzi 	/* Request the Wake IRQ in case of another line is used for WoL */
26507a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq) {
26517a13f8f5SFrancesco Virlinzi 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
26527a13f8f5SFrancesco Virlinzi 				  IRQF_SHARED, dev->name, dev);
26537a13f8f5SFrancesco Virlinzi 		if (unlikely(ret < 0)) {
265438ddc59dSLABBE Corentin 			netdev_err(priv->dev,
265538ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2656ceb69499SGiuseppe CAVALLARO 				   __func__, priv->wol_irq, ret);
2657c9324d18SGiuseppe CAVALLARO 			goto wolirq_error;
26587a13f8f5SFrancesco Virlinzi 		}
26597a13f8f5SFrancesco Virlinzi 	}
26607a13f8f5SFrancesco Virlinzi 
2661d765955dSGiuseppe CAVALLARO 	/* Request the IRQ lines */
2662d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0) {
2663d765955dSGiuseppe CAVALLARO 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2664d765955dSGiuseppe CAVALLARO 				  dev->name, dev);
2665d765955dSGiuseppe CAVALLARO 		if (unlikely(ret < 0)) {
266638ddc59dSLABBE Corentin 			netdev_err(priv->dev,
266738ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2668d765955dSGiuseppe CAVALLARO 				   __func__, priv->lpi_irq, ret);
2669c9324d18SGiuseppe CAVALLARO 			goto lpiirq_error;
2670d765955dSGiuseppe CAVALLARO 		}
2671d765955dSGiuseppe CAVALLARO 	}
2672d765955dSGiuseppe CAVALLARO 
2673c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
2674c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
26757ac6653aSJeff Kirsher 
26767ac6653aSJeff Kirsher 	return 0;
26777ac6653aSJeff Kirsher 
2678c9324d18SGiuseppe CAVALLARO lpiirq_error:
2679d765955dSGiuseppe CAVALLARO 	if (priv->wol_irq != dev->irq)
2680d765955dSGiuseppe CAVALLARO 		free_irq(priv->wol_irq, dev);
2681c9324d18SGiuseppe CAVALLARO wolirq_error:
26827a13f8f5SFrancesco Virlinzi 	free_irq(dev->irq, dev);
26836c1e5abeSThierry Reding irq_error:
26846c1e5abeSThierry Reding 	if (dev->phydev)
26856c1e5abeSThierry Reding 		phy_stop(dev->phydev);
26867a13f8f5SFrancesco Virlinzi 
26878fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
26888fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
26898fce3331SJose Abreu 
2690c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
2691c9324d18SGiuseppe CAVALLARO init_error:
2692c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
26935bacd778SLABBE Corentin dma_desc_error:
2694d6d50c7eSPhilippe Reynes 	if (dev->phydev)
2695d6d50c7eSPhilippe Reynes 		phy_disconnect(dev->phydev);
26964bfcbd7aSFrancesco Virlinzi 
26977ac6653aSJeff Kirsher 	return ret;
26987ac6653aSJeff Kirsher }
26997ac6653aSJeff Kirsher 
27007ac6653aSJeff Kirsher /**
27017ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
27027ac6653aSJeff Kirsher  *  @dev : device pointer.
27037ac6653aSJeff Kirsher  *  Description:
27047ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
27057ac6653aSJeff Kirsher  */
27067ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev)
27077ac6653aSJeff Kirsher {
27087ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
27098fce3331SJose Abreu 	u32 chan;
27107ac6653aSJeff Kirsher 
2711d765955dSGiuseppe CAVALLARO 	if (priv->eee_enabled)
2712d765955dSGiuseppe CAVALLARO 		del_timer_sync(&priv->eee_ctrl_timer);
2713d765955dSGiuseppe CAVALLARO 
27147ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
2715d6d50c7eSPhilippe Reynes 	if (dev->phydev) {
2716d6d50c7eSPhilippe Reynes 		phy_stop(dev->phydev);
2717d6d50c7eSPhilippe Reynes 		phy_disconnect(dev->phydev);
27187ac6653aSJeff Kirsher 	}
27197ac6653aSJeff Kirsher 
2720c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
27217ac6653aSJeff Kirsher 
2722c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
27237ac6653aSJeff Kirsher 
27248fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
27258fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
27269125cdd1SGiuseppe CAVALLARO 
27277ac6653aSJeff Kirsher 	/* Free the IRQ lines */
27287ac6653aSJeff Kirsher 	free_irq(dev->irq, dev);
27297a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq)
27307a13f8f5SFrancesco Virlinzi 		free_irq(priv->wol_irq, dev);
2731d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0)
2732d765955dSGiuseppe CAVALLARO 		free_irq(priv->lpi_irq, dev);
27337ac6653aSJeff Kirsher 
27347ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
2735ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
27367ac6653aSJeff Kirsher 
27377ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
27387ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
27397ac6653aSJeff Kirsher 
27407ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
2741c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
27427ac6653aSJeff Kirsher 
27437ac6653aSJeff Kirsher 	netif_carrier_off(dev);
27447ac6653aSJeff Kirsher 
274592ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
274692ba6888SRayagond Kokatanur 
27477ac6653aSJeff Kirsher 	return 0;
27487ac6653aSJeff Kirsher }
27497ac6653aSJeff Kirsher 
27507ac6653aSJeff Kirsher /**
2751f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
2752f748be53SAlexandre TORGUE  *  @priv: driver private structure
2753f748be53SAlexandre TORGUE  *  @des: buffer start address
2754f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
2755f748be53SAlexandre TORGUE  *  @last_segmant: condition for the last descriptor
2756ce736788SJoao Pinto  *  @queue: TX queue index
2757f748be53SAlexandre TORGUE  *  Description:
2758f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
2759f748be53SAlexandre TORGUE  *  buffer length to fill
2760f748be53SAlexandre TORGUE  */
2761f748be53SAlexandre TORGUE static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2762ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
2763f748be53SAlexandre TORGUE {
2764ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2765f748be53SAlexandre TORGUE 	struct dma_desc *desc;
27665bacd778SLABBE Corentin 	u32 buff_size;
2767ce736788SJoao Pinto 	int tmp_len;
2768f748be53SAlexandre TORGUE 
2769f748be53SAlexandre TORGUE 	tmp_len = total_len;
2770f748be53SAlexandre TORGUE 
2771f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
2772ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2773b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2774ce736788SJoao Pinto 		desc = tx_q->dma_tx + tx_q->cur_tx;
2775f748be53SAlexandre TORGUE 
2776f8be0d78SMichael Weiser 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2777f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2778f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
2779f748be53SAlexandre TORGUE 
278042de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2781f748be53SAlexandre TORGUE 				0, 1,
2782426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2783f748be53SAlexandre TORGUE 				0, 0);
2784f748be53SAlexandre TORGUE 
2785f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
2786f748be53SAlexandre TORGUE 	}
2787f748be53SAlexandre TORGUE }
2788f748be53SAlexandre TORGUE 
2789f748be53SAlexandre TORGUE /**
2790f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2791f748be53SAlexandre TORGUE  *  @skb : the socket buffer
2792f748be53SAlexandre TORGUE  *  @dev : device pointer
2793f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
2794f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
2795f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
2796f748be53SAlexandre TORGUE  *
2797f748be53SAlexandre TORGUE  *  First Descriptor
2798f748be53SAlexandre TORGUE  *   --------
2799f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
2800f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
2801f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
2802f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2803f748be53SAlexandre TORGUE  *   --------
2804f748be53SAlexandre TORGUE  *	|
2805f748be53SAlexandre TORGUE  *     ...
2806f748be53SAlexandre TORGUE  *	|
2807f748be53SAlexandre TORGUE  *   --------
2808f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2809f748be53SAlexandre TORGUE  *   | DES1 | --|
2810f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
2811f748be53SAlexandre TORGUE  *   | DES3 |
2812f748be53SAlexandre TORGUE  *   --------
2813f748be53SAlexandre TORGUE  *
2814f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2815f748be53SAlexandre TORGUE  */
2816f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2817f748be53SAlexandre TORGUE {
2818ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
2819f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
2820f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
2821ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
2822f748be53SAlexandre TORGUE 	unsigned int first_entry, des;
2823ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
2824ce736788SJoao Pinto 	int tmp_pay_len = 0;
2825ce736788SJoao Pinto 	u32 pay_len, mss;
2826f748be53SAlexandre TORGUE 	u8 proto_hdr_len;
2827f748be53SAlexandre TORGUE 	int i;
2828f748be53SAlexandre TORGUE 
2829ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
2830ce736788SJoao Pinto 
2831f748be53SAlexandre TORGUE 	/* Compute header lengths */
2832f748be53SAlexandre TORGUE 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2833f748be53SAlexandre TORGUE 
2834f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
2835ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
2836f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2837c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2838c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2839c22a3f48SJoao Pinto 								queue));
2840f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
284138ddc59dSLABBE Corentin 			netdev_err(priv->dev,
284238ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
284338ddc59dSLABBE Corentin 				   __func__);
2844f748be53SAlexandre TORGUE 		}
2845f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
2846f748be53SAlexandre TORGUE 	}
2847f748be53SAlexandre TORGUE 
2848f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2849f748be53SAlexandre TORGUE 
2850f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
2851f748be53SAlexandre TORGUE 
2852f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
28538d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
2854ce736788SJoao Pinto 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
285542de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
28568d212a9eSNiklas Cassel 		tx_q->mss = mss;
2857ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2858b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2859f748be53SAlexandre TORGUE 	}
2860f748be53SAlexandre TORGUE 
2861f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
2862f748be53SAlexandre TORGUE 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2863f748be53SAlexandre TORGUE 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2864f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2865f748be53SAlexandre TORGUE 			skb->data_len);
2866f748be53SAlexandre TORGUE 	}
2867f748be53SAlexandre TORGUE 
2868ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
2869b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2870f748be53SAlexandre TORGUE 
2871ce736788SJoao Pinto 	desc = tx_q->dma_tx + first_entry;
2872f748be53SAlexandre TORGUE 	first = desc;
2873f748be53SAlexandre TORGUE 
2874f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
2875f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2876f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
2877f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
2878f748be53SAlexandre TORGUE 		goto dma_map_err;
2879f748be53SAlexandre TORGUE 
2880ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2881ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2882f748be53SAlexandre TORGUE 
2883f8be0d78SMichael Weiser 	first->des0 = cpu_to_le32(des);
2884f748be53SAlexandre TORGUE 
2885f748be53SAlexandre TORGUE 	/* Fill start of payload in buff2 of first descriptor */
2886f748be53SAlexandre TORGUE 	if (pay_len)
2887f8be0d78SMichael Weiser 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2888f748be53SAlexandre TORGUE 
2889f748be53SAlexandre TORGUE 	/* If needed take extra descriptors to fill the remaining payload */
2890f748be53SAlexandre TORGUE 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2891f748be53SAlexandre TORGUE 
2892ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2893f748be53SAlexandre TORGUE 
2894f748be53SAlexandre TORGUE 	/* Prepare fragments */
2895f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
2896f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2897f748be53SAlexandre TORGUE 
2898f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
2899f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
2900f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
2901937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
2902937071c1SThierry Reding 			goto dma_map_err;
2903f748be53SAlexandre TORGUE 
2904f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2905ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
2906f748be53SAlexandre TORGUE 
2907ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2908ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2909ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2910f748be53SAlexandre TORGUE 	}
2911f748be53SAlexandre TORGUE 
2912ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2913f748be53SAlexandre TORGUE 
291405cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
291505cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
291605cf0d1bSNiklas Cassel 
291705cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
291805cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
291905cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
292005cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
292105cf0d1bSNiklas Cassel 	 */
2922ce736788SJoao Pinto 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2923f748be53SAlexandre TORGUE 
2924ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2925b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
292638ddc59dSLABBE Corentin 			  __func__);
2927c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2928f748be53SAlexandre TORGUE 	}
2929f748be53SAlexandre TORGUE 
2930f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
2931f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
2932f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
2933f748be53SAlexandre TORGUE 
2934f748be53SAlexandre TORGUE 	/* Manage tx mitigation */
29358fce3331SJose Abreu 	tx_q->tx_count_frames += nfrags + 1;
29368fce3331SJose Abreu 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
293742de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
2938f748be53SAlexandre TORGUE 		priv->xstats.tx_set_ic_bit++;
29398fce3331SJose Abreu 		tx_q->tx_count_frames = 0;
29408fce3331SJose Abreu 	} else {
29418fce3331SJose Abreu 		stmmac_tx_timer_arm(priv, queue);
2942f748be53SAlexandre TORGUE 	}
2943f748be53SAlexandre TORGUE 
2944f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
2945f748be53SAlexandre TORGUE 
2946f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2947f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
2948f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
2949f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
295042de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
2951f748be53SAlexandre TORGUE 	}
2952f748be53SAlexandre TORGUE 
2953f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
295442de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2955f748be53SAlexandre TORGUE 			proto_hdr_len,
2956f748be53SAlexandre TORGUE 			pay_len,
2957ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2958f748be53SAlexandre TORGUE 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2959f748be53SAlexandre TORGUE 
2960f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
296115d2ee42SNiklas Cassel 	if (mss_desc) {
296215d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
296315d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
296415d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
296515d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
296615d2ee42SNiklas Cassel 		 */
296715d2ee42SNiklas Cassel 		dma_wmb();
296842de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
296915d2ee42SNiklas Cassel 	}
2970f748be53SAlexandre TORGUE 
2971f748be53SAlexandre TORGUE 	/* The own bit must be the latest setting done when prepare the
2972f748be53SAlexandre TORGUE 	 * descriptor and then barrier is needed to make sure that
2973f748be53SAlexandre TORGUE 	 * all is coherent before granting the DMA engine.
2974f748be53SAlexandre TORGUE 	 */
297595eb930aSNiklas Cassel 	wmb();
2976f748be53SAlexandre TORGUE 
2977f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
2978f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2979ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2980ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
2981f748be53SAlexandre TORGUE 
298242de047dSJose Abreu 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2983f748be53SAlexandre TORGUE 
2984f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
2985f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
2986f748be53SAlexandre TORGUE 	}
2987f748be53SAlexandre TORGUE 
2988c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2989f748be53SAlexandre TORGUE 
29900431100bSJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
2991a4e887faSJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2992f748be53SAlexandre TORGUE 
2993f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
2994f748be53SAlexandre TORGUE 
2995f748be53SAlexandre TORGUE dma_map_err:
2996f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
2997f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
2998f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
2999f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3000f748be53SAlexandre TORGUE }
3001f748be53SAlexandre TORGUE 
3002f748be53SAlexandre TORGUE /**
3003732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
30047ac6653aSJeff Kirsher  *  @skb : the socket buffer
30057ac6653aSJeff Kirsher  *  @dev : device pointer
300632ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
300732ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
300832ceabcaSGiuseppe CAVALLARO  *  and SG feature.
30097ac6653aSJeff Kirsher  */
30107ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
30117ac6653aSJeff Kirsher {
30127ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
30130e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
30144a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
3015ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
30167ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
301759423815SColin Ian King 	int entry;
301859423815SColin Ian King 	unsigned int first_entry;
30197ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
3020ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
30210e80bdc9SGiuseppe Cavallaro 	unsigned int enh_desc;
3022f748be53SAlexandre TORGUE 	unsigned int des;
3023f748be53SAlexandre TORGUE 
3024ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3025ce736788SJoao Pinto 
3026f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
3027f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
30289edfa7daSNiklas Cassel 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3029f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
3030f748be53SAlexandre TORGUE 	}
30317ac6653aSJeff Kirsher 
3032ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3033c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3034c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3035c22a3f48SJoao Pinto 								queue));
30367ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
303738ddc59dSLABBE Corentin 			netdev_err(priv->dev,
303838ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
303938ddc59dSLABBE Corentin 				   __func__);
30407ac6653aSJeff Kirsher 		}
30417ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
30427ac6653aSJeff Kirsher 	}
30437ac6653aSJeff Kirsher 
3044d765955dSGiuseppe CAVALLARO 	if (priv->tx_path_in_lpi_mode)
3045d765955dSGiuseppe CAVALLARO 		stmmac_disable_eee_mode(priv);
3046d765955dSGiuseppe CAVALLARO 
3047ce736788SJoao Pinto 	entry = tx_q->cur_tx;
30480e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
3049b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
30507ac6653aSJeff Kirsher 
30517ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
30527ac6653aSJeff Kirsher 
30530e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
3054ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3055c24602efSGiuseppe CAVALLARO 	else
3056ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
3057c24602efSGiuseppe CAVALLARO 
30587ac6653aSJeff Kirsher 	first = desc;
30597ac6653aSJeff Kirsher 
30600e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
30614a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
306229896a67SGiuseppe CAVALLARO 	if (enh_desc)
30632c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
306429896a67SGiuseppe CAVALLARO 
306563a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
30662c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
306763a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
3068362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
306929896a67SGiuseppe CAVALLARO 	}
30707ac6653aSJeff Kirsher 
30717ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
30729e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
30739e903e08SEric Dumazet 		int len = skb_frag_size(frag);
3074be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
30757ac6653aSJeff Kirsher 
3076e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3077b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
3078e3ad57c9SGiuseppe Cavallaro 
30790e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
3080ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3081c24602efSGiuseppe CAVALLARO 		else
3082ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
30837ac6653aSJeff Kirsher 
3084f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3085f722380dSIan Campbell 				       DMA_TO_DEVICE);
3086f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
3087362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
3088362b37beSGiuseppe CAVALLARO 
3089ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
30906844171dSJose Abreu 
30916844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
3092f748be53SAlexandre TORGUE 
3093ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3094ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
3095ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
30960e80bdc9SGiuseppe Cavallaro 
30970e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
309842de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
309942de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
31007ac6653aSJeff Kirsher 	}
31017ac6653aSJeff Kirsher 
310205cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
310305cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
3104e3ad57c9SGiuseppe Cavallaro 
310505cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
310605cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
310705cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
310805cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
310905cf0d1bSNiklas Cassel 	 */
311005cf0d1bSNiklas Cassel 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3111ce736788SJoao Pinto 	tx_q->cur_tx = entry;
31127ac6653aSJeff Kirsher 
31137ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
3114d0225e7dSAlexandre TORGUE 		void *tx_head;
3115d0225e7dSAlexandre TORGUE 
311638ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
311738ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3118ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
31190e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
312083d7af64SGiuseppe CAVALLARO 
3121c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
3122ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_etx;
3123c24602efSGiuseppe CAVALLARO 		else
3124ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_tx;
3125d0225e7dSAlexandre TORGUE 
312642de047dSJose Abreu 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3127c24602efSGiuseppe CAVALLARO 
312838ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
31297ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
31307ac6653aSJeff Kirsher 	}
31310e80bdc9SGiuseppe Cavallaro 
3132ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3133b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3134b3e51069SLABBE Corentin 			  __func__);
3135c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
31367ac6653aSJeff Kirsher 	}
31377ac6653aSJeff Kirsher 
31387ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
31397ac6653aSJeff Kirsher 
31400e80bdc9SGiuseppe Cavallaro 	/* According to the coalesce parameter the IC bit for the latest
31410e80bdc9SGiuseppe Cavallaro 	 * segment is reset and the timer re-started to clean the tx status.
31420e80bdc9SGiuseppe Cavallaro 	 * This approach takes care about the fragments: desc is the first
31430e80bdc9SGiuseppe Cavallaro 	 * element in case of no SG.
31440e80bdc9SGiuseppe Cavallaro 	 */
31458fce3331SJose Abreu 	tx_q->tx_count_frames += nfrags + 1;
31468fce3331SJose Abreu 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
314742de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
31480e80bdc9SGiuseppe Cavallaro 		priv->xstats.tx_set_ic_bit++;
31498fce3331SJose Abreu 		tx_q->tx_count_frames = 0;
31508fce3331SJose Abreu 	} else {
31518fce3331SJose Abreu 		stmmac_tx_timer_arm(priv, queue);
31520e80bdc9SGiuseppe Cavallaro 	}
31530e80bdc9SGiuseppe Cavallaro 
31540e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
31550e80bdc9SGiuseppe Cavallaro 
31560e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
31570e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
31580e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
31590e80bdc9SGiuseppe Cavallaro 	 */
31600e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
31610e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
31620e80bdc9SGiuseppe Cavallaro 
3163f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
31640e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
3165f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
31660e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
31670e80bdc9SGiuseppe Cavallaro 
3168ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
31696844171dSJose Abreu 
31706844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3171f748be53SAlexandre TORGUE 
3172ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3173ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
31740e80bdc9SGiuseppe Cavallaro 
3175891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3176891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
3177891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
3178891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
317942de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
3180891434b1SRayagond Kokatanur 		}
3181891434b1SRayagond Kokatanur 
31820e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
318342de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
318442de047dSJose Abreu 				csum_insertion, priv->mode, 1, last_segment,
318542de047dSJose Abreu 				skb->len);
31860e80bdc9SGiuseppe Cavallaro 
31870e80bdc9SGiuseppe Cavallaro 		/* The own bit must be the latest setting done when prepare the
31880e80bdc9SGiuseppe Cavallaro 		 * descriptor and then barrier is needed to make sure that
31890e80bdc9SGiuseppe Cavallaro 		 * all is coherent before granting the DMA engine.
31900e80bdc9SGiuseppe Cavallaro 		 */
319195eb930aSNiklas Cassel 		wmb();
31920e80bdc9SGiuseppe Cavallaro 	}
31937ac6653aSJeff Kirsher 
3194c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3195f748be53SAlexandre TORGUE 
3196a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
31978fce3331SJose Abreu 
31980431100bSJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3199f1565c60SJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
32007ac6653aSJeff Kirsher 
3201362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
3202a9097a96SGiuseppe CAVALLARO 
3203362b37beSGiuseppe CAVALLARO dma_map_err:
320438ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
3205362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
3206362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
32077ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
32087ac6653aSJeff Kirsher }
32097ac6653aSJeff Kirsher 
3210b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3211b9381985SVince Bridgers {
3212ab188e8fSElad Nachman 	struct vlan_ethhdr *veth;
3213ab188e8fSElad Nachman 	__be16 vlan_proto;
3214b9381985SVince Bridgers 	u16 vlanid;
3215b9381985SVince Bridgers 
3216ab188e8fSElad Nachman 	veth = (struct vlan_ethhdr *)skb->data;
3217ab188e8fSElad Nachman 	vlan_proto = veth->h_vlan_proto;
3218ab188e8fSElad Nachman 
3219ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3220ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3221ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
3222ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3223b9381985SVince Bridgers 		/* pop the vlan tag */
3224ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
3225ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3226b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
3227ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3228b9381985SVince Bridgers 	}
3229b9381985SVince Bridgers }
3230b9381985SVince Bridgers 
3231b9381985SVince Bridgers 
323254139cf3SJoao Pinto static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3233120e87f9SGiuseppe Cavallaro {
323454139cf3SJoao Pinto 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3235120e87f9SGiuseppe Cavallaro 		return 0;
3236120e87f9SGiuseppe Cavallaro 
3237120e87f9SGiuseppe Cavallaro 	return 1;
3238120e87f9SGiuseppe Cavallaro }
3239120e87f9SGiuseppe Cavallaro 
324032ceabcaSGiuseppe CAVALLARO /**
3241732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
324232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
324354139cf3SJoao Pinto  * @queue: RX queue index
324432ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
324532ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
324632ceabcaSGiuseppe CAVALLARO  */
324754139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
32487ac6653aSJeff Kirsher {
324954139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
325054139cf3SJoao Pinto 	int dirty = stmmac_rx_dirty(priv, queue);
325154139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
325254139cf3SJoao Pinto 
32537ac6653aSJeff Kirsher 	int bfsize = priv->dma_buf_sz;
32547ac6653aSJeff Kirsher 
3255e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
3256c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
3257c24602efSGiuseppe CAVALLARO 
3258c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
325954139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3260c24602efSGiuseppe CAVALLARO 		else
326154139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3262c24602efSGiuseppe CAVALLARO 
326354139cf3SJoao Pinto 		if (likely(!rx_q->rx_skbuff[entry])) {
32647ac6653aSJeff Kirsher 			struct sk_buff *skb;
32657ac6653aSJeff Kirsher 
3266acb600deSEric Dumazet 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3267120e87f9SGiuseppe Cavallaro 			if (unlikely(!skb)) {
3268120e87f9SGiuseppe Cavallaro 				/* so for a while no zero-copy! */
326954139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3270120e87f9SGiuseppe Cavallaro 				if (unlikely(net_ratelimit()))
3271120e87f9SGiuseppe Cavallaro 					dev_err(priv->device,
3272120e87f9SGiuseppe Cavallaro 						"fail to alloc skb entry %d\n",
3273120e87f9SGiuseppe Cavallaro 						entry);
32747ac6653aSJeff Kirsher 				break;
3275120e87f9SGiuseppe Cavallaro 			}
32767ac6653aSJeff Kirsher 
327754139cf3SJoao Pinto 			rx_q->rx_skbuff[entry] = skb;
327854139cf3SJoao Pinto 			rx_q->rx_skbuff_dma[entry] =
32797ac6653aSJeff Kirsher 			    dma_map_single(priv->device, skb->data, bfsize,
32807ac6653aSJeff Kirsher 					   DMA_FROM_DEVICE);
3281362b37beSGiuseppe CAVALLARO 			if (dma_mapping_error(priv->device,
328254139cf3SJoao Pinto 					      rx_q->rx_skbuff_dma[entry])) {
328338ddc59dSLABBE Corentin 				netdev_err(priv->dev, "Rx DMA map failed\n");
3284362b37beSGiuseppe CAVALLARO 				dev_kfree_skb(skb);
3285362b37beSGiuseppe CAVALLARO 				break;
3286362b37beSGiuseppe CAVALLARO 			}
3287286a8372SGiuseppe CAVALLARO 
32886844171dSJose Abreu 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
32892c520b1cSJose Abreu 			stmmac_refill_desc3(priv, rx_q, p);
3290286a8372SGiuseppe CAVALLARO 
329154139cf3SJoao Pinto 			if (rx_q->rx_zeroc_thresh > 0)
329254139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh--;
3293120e87f9SGiuseppe Cavallaro 
3294b3e51069SLABBE Corentin 			netif_dbg(priv, rx_status, priv->dev,
329538ddc59dSLABBE Corentin 				  "refill entry #%d\n", entry);
32967ac6653aSJeff Kirsher 		}
3297ad688cdbSPavel Machek 		dma_wmb();
3298f748be53SAlexandre TORGUE 
3299357951cdSJose Abreu 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3300f748be53SAlexandre TORGUE 
3301ad688cdbSPavel Machek 		dma_wmb();
3302e3ad57c9SGiuseppe Cavallaro 
3303e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
33047ac6653aSJeff Kirsher 	}
330554139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
33067ac6653aSJeff Kirsher }
33077ac6653aSJeff Kirsher 
330832ceabcaSGiuseppe CAVALLARO /**
3309732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
331032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
331154139cf3SJoao Pinto  * @limit: napi bugget
331254139cf3SJoao Pinto  * @queue: RX queue index.
331332ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
331432ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
331532ceabcaSGiuseppe CAVALLARO  */
331654139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
33177ac6653aSJeff Kirsher {
331854139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
33198fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
332054139cf3SJoao Pinto 	unsigned int entry = rx_q->cur_rx;
332154139cf3SJoao Pinto 	int coe = priv->hw->rx_csum;
33227ac6653aSJeff Kirsher 	unsigned int next_entry;
33237ac6653aSJeff Kirsher 	unsigned int count = 0;
33247d9e6c5aSJose Abreu 	bool xmac;
33257d9e6c5aSJose Abreu 
33267d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
33277ac6653aSJeff Kirsher 
332883d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
3329d0225e7dSAlexandre TORGUE 		void *rx_head;
3330d0225e7dSAlexandre TORGUE 
333138ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3332c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
333354139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
3334c24602efSGiuseppe CAVALLARO 		else
333554139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
3336d0225e7dSAlexandre TORGUE 
333742de047dSJose Abreu 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
33387ac6653aSJeff Kirsher 	}
3339c24602efSGiuseppe CAVALLARO 	while (count < limit) {
33407ac6653aSJeff Kirsher 		int status;
33419401bb5cSGiuseppe CAVALLARO 		struct dma_desc *p;
3342ba1ffd74SGiuseppe CAVALLARO 		struct dma_desc *np;
33437ac6653aSJeff Kirsher 
3344c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
334554139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3346c24602efSGiuseppe CAVALLARO 		else
334754139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3348c24602efSGiuseppe CAVALLARO 
3349c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
335042de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
3351c1fa3212SFabrice Gasnier 				&priv->xstats, p);
3352c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
3353c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
33547ac6653aSJeff Kirsher 			break;
33557ac6653aSJeff Kirsher 
33567ac6653aSJeff Kirsher 		count++;
33577ac6653aSJeff Kirsher 
335854139cf3SJoao Pinto 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
335954139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
3360e3ad57c9SGiuseppe Cavallaro 
3361c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
336254139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3363c24602efSGiuseppe CAVALLARO 		else
336454139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
3365ba1ffd74SGiuseppe CAVALLARO 
3366ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
33677ac6653aSJeff Kirsher 
336842de047dSJose Abreu 		if (priv->extend_desc)
336942de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
337042de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
3371891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
33727ac6653aSJeff Kirsher 			priv->dev->stats.rx_errors++;
3373891434b1SRayagond Kokatanur 			if (priv->hwts_rx_en && !priv->extend_desc) {
33748d45e42bSLABBE Corentin 				/* DESC2 & DESC3 will be overwritten by device
3375891434b1SRayagond Kokatanur 				 * with timestamp value, hence reinitialize
3376891434b1SRayagond Kokatanur 				 * them in stmmac_rx_refill() function so that
3377891434b1SRayagond Kokatanur 				 * device can reuse it.
3378891434b1SRayagond Kokatanur 				 */
33799c8080d0SJose Abreu 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
338054139cf3SJoao Pinto 				rx_q->rx_skbuff[entry] = NULL;
3381891434b1SRayagond Kokatanur 				dma_unmap_single(priv->device,
338254139cf3SJoao Pinto 						 rx_q->rx_skbuff_dma[entry],
3383ceb69499SGiuseppe CAVALLARO 						 priv->dma_buf_sz,
3384ceb69499SGiuseppe CAVALLARO 						 DMA_FROM_DEVICE);
3385891434b1SRayagond Kokatanur 			}
3386891434b1SRayagond Kokatanur 		} else {
33877ac6653aSJeff Kirsher 			struct sk_buff *skb;
33887ac6653aSJeff Kirsher 			int frame_len;
3389f748be53SAlexandre TORGUE 			unsigned int des;
3390f748be53SAlexandre TORGUE 
3391d2df9ea0SJose Abreu 			stmmac_get_desc_addr(priv, p, &des);
339242de047dSJose Abreu 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3393ceb69499SGiuseppe CAVALLARO 
33948d45e42bSLABBE Corentin 			/*  If frame length is greater than skb buffer size
3395f748be53SAlexandre TORGUE 			 *  (preallocated during init) then the packet is
3396f748be53SAlexandre TORGUE 			 *  ignored
3397f748be53SAlexandre TORGUE 			 */
3398e527c4a7SGiuseppe CAVALLARO 			if (frame_len > priv->dma_buf_sz) {
339938ddc59dSLABBE Corentin 				netdev_err(priv->dev,
340038ddc59dSLABBE Corentin 					   "len %d larger than size (%d)\n",
340138ddc59dSLABBE Corentin 					   frame_len, priv->dma_buf_sz);
3402e527c4a7SGiuseppe CAVALLARO 				priv->dev->stats.rx_length_errors++;
3403e527c4a7SGiuseppe CAVALLARO 				break;
3404e527c4a7SGiuseppe CAVALLARO 			}
3405e527c4a7SGiuseppe CAVALLARO 
34067ac6653aSJeff Kirsher 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3407ceb69499SGiuseppe CAVALLARO 			 * Type frames (LLC/LLC-SNAP)
3408565020aaSJose Abreu 			 *
3409565020aaSJose Abreu 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3410565020aaSJose Abreu 			 * feature is always disabled and packets need to be
3411565020aaSJose Abreu 			 * stripped manually.
3412ceb69499SGiuseppe CAVALLARO 			 */
3413565020aaSJose Abreu 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3414565020aaSJose Abreu 			    unlikely(status != llc_snap))
34157ac6653aSJeff Kirsher 				frame_len -= ETH_FCS_LEN;
34167ac6653aSJeff Kirsher 
341783d7af64SGiuseppe CAVALLARO 			if (netif_msg_rx_status(priv)) {
341838ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3419f748be53SAlexandre TORGUE 					   p, entry, des);
342038ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
342183d7af64SGiuseppe CAVALLARO 					   frame_len, status);
342283d7af64SGiuseppe CAVALLARO 			}
342322ad3838SGiuseppe Cavallaro 
3424f748be53SAlexandre TORGUE 			/* The zero-copy is always used for all the sizes
3425f748be53SAlexandre TORGUE 			 * in case of GMAC4 because it needs
3426f748be53SAlexandre TORGUE 			 * to refill the used descriptors, always.
3427f748be53SAlexandre TORGUE 			 */
34287d9e6c5aSJose Abreu 			if (unlikely(!xmac &&
3429f748be53SAlexandre TORGUE 				     ((frame_len < priv->rx_copybreak) ||
343054139cf3SJoao Pinto 				     stmmac_rx_threshold_count(rx_q)))) {
343122ad3838SGiuseppe Cavallaro 				skb = netdev_alloc_skb_ip_align(priv->dev,
343222ad3838SGiuseppe Cavallaro 								frame_len);
343322ad3838SGiuseppe Cavallaro 				if (unlikely(!skb)) {
343422ad3838SGiuseppe Cavallaro 					if (net_ratelimit())
343522ad3838SGiuseppe Cavallaro 						dev_warn(priv->device,
343622ad3838SGiuseppe Cavallaro 							 "packet dropped\n");
343722ad3838SGiuseppe Cavallaro 					priv->dev->stats.rx_dropped++;
343822ad3838SGiuseppe Cavallaro 					break;
343922ad3838SGiuseppe Cavallaro 				}
344022ad3838SGiuseppe Cavallaro 
344122ad3838SGiuseppe Cavallaro 				dma_sync_single_for_cpu(priv->device,
344254139cf3SJoao Pinto 							rx_q->rx_skbuff_dma
344322ad3838SGiuseppe Cavallaro 							[entry], frame_len,
344422ad3838SGiuseppe Cavallaro 							DMA_FROM_DEVICE);
344522ad3838SGiuseppe Cavallaro 				skb_copy_to_linear_data(skb,
344654139cf3SJoao Pinto 							rx_q->
344722ad3838SGiuseppe Cavallaro 							rx_skbuff[entry]->data,
344822ad3838SGiuseppe Cavallaro 							frame_len);
344922ad3838SGiuseppe Cavallaro 
345022ad3838SGiuseppe Cavallaro 				skb_put(skb, frame_len);
345122ad3838SGiuseppe Cavallaro 				dma_sync_single_for_device(priv->device,
345254139cf3SJoao Pinto 							   rx_q->rx_skbuff_dma
345322ad3838SGiuseppe Cavallaro 							   [entry], frame_len,
345422ad3838SGiuseppe Cavallaro 							   DMA_FROM_DEVICE);
345522ad3838SGiuseppe Cavallaro 			} else {
345654139cf3SJoao Pinto 				skb = rx_q->rx_skbuff[entry];
34577ac6653aSJeff Kirsher 				if (unlikely(!skb)) {
345838ddc59dSLABBE Corentin 					netdev_err(priv->dev,
345938ddc59dSLABBE Corentin 						   "%s: Inconsistent Rx chain\n",
34607ac6653aSJeff Kirsher 						   priv->dev->name);
34617ac6653aSJeff Kirsher 					priv->dev->stats.rx_dropped++;
34627ac6653aSJeff Kirsher 					break;
34637ac6653aSJeff Kirsher 				}
34647ac6653aSJeff Kirsher 				prefetch(skb->data - NET_IP_ALIGN);
346554139cf3SJoao Pinto 				rx_q->rx_skbuff[entry] = NULL;
346654139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh++;
34677ac6653aSJeff Kirsher 
34687ac6653aSJeff Kirsher 				skb_put(skb, frame_len);
34697ac6653aSJeff Kirsher 				dma_unmap_single(priv->device,
347054139cf3SJoao Pinto 						 rx_q->rx_skbuff_dma[entry],
347122ad3838SGiuseppe Cavallaro 						 priv->dma_buf_sz,
347222ad3838SGiuseppe Cavallaro 						 DMA_FROM_DEVICE);
347322ad3838SGiuseppe Cavallaro 			}
347422ad3838SGiuseppe Cavallaro 
34757ac6653aSJeff Kirsher 			if (netif_msg_pktdata(priv)) {
347638ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "frame received (%dbytes)",
347738ddc59dSLABBE Corentin 					   frame_len);
34787ac6653aSJeff Kirsher 				print_pkt(skb->data, frame_len);
34797ac6653aSJeff Kirsher 			}
348083d7af64SGiuseppe CAVALLARO 
3481ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3482ba1ffd74SGiuseppe CAVALLARO 
3483b9381985SVince Bridgers 			stmmac_rx_vlan(priv->dev, skb);
3484b9381985SVince Bridgers 
34857ac6653aSJeff Kirsher 			skb->protocol = eth_type_trans(skb, priv->dev);
34867ac6653aSJeff Kirsher 
3487ceb69499SGiuseppe CAVALLARO 			if (unlikely(!coe))
34887ac6653aSJeff Kirsher 				skb_checksum_none_assert(skb);
348962a2ab93SGiuseppe CAVALLARO 			else
34907ac6653aSJeff Kirsher 				skb->ip_summed = CHECKSUM_UNNECESSARY;
349162a2ab93SGiuseppe CAVALLARO 
34928fce3331SJose Abreu 			napi_gro_receive(&ch->napi, skb);
34937ac6653aSJeff Kirsher 
34947ac6653aSJeff Kirsher 			priv->dev->stats.rx_packets++;
34957ac6653aSJeff Kirsher 			priv->dev->stats.rx_bytes += frame_len;
34967ac6653aSJeff Kirsher 		}
34977ac6653aSJeff Kirsher 		entry = next_entry;
34987ac6653aSJeff Kirsher 	}
34997ac6653aSJeff Kirsher 
350054139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
35017ac6653aSJeff Kirsher 
35027ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
35037ac6653aSJeff Kirsher 
35047ac6653aSJeff Kirsher 	return count;
35057ac6653aSJeff Kirsher }
35067ac6653aSJeff Kirsher 
35077ac6653aSJeff Kirsher /**
35087ac6653aSJeff Kirsher  *  stmmac_poll - stmmac poll method (NAPI)
35097ac6653aSJeff Kirsher  *  @napi : pointer to the napi structure.
35107ac6653aSJeff Kirsher  *  @budget : maximum number of packets that the current CPU can receive from
35117ac6653aSJeff Kirsher  *	      all interfaces.
35127ac6653aSJeff Kirsher  *  Description :
35139125cdd1SGiuseppe CAVALLARO  *  To look at the incoming frames and clear the tx resources.
35147ac6653aSJeff Kirsher  */
35158fce3331SJose Abreu static int stmmac_napi_poll(struct napi_struct *napi, int budget)
35167ac6653aSJeff Kirsher {
35178fce3331SJose Abreu 	struct stmmac_channel *ch =
35188fce3331SJose Abreu 		container_of(napi, struct stmmac_channel, napi);
35198fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
35208fce3331SJose Abreu 	int work_done = 0, work_rem = budget;
35218fce3331SJose Abreu 	u32 chan = ch->index;
35227ac6653aSJeff Kirsher 
35239125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
3524ce736788SJoao Pinto 
35258fce3331SJose Abreu 	if (ch->has_tx) {
35268fce3331SJose Abreu 		int done = stmmac_tx_clean(priv, work_rem, chan);
3527ce736788SJoao Pinto 
35288fce3331SJose Abreu 		work_done += done;
35298fce3331SJose Abreu 		work_rem -= done;
35307ac6653aSJeff Kirsher 	}
35318fce3331SJose Abreu 
35328fce3331SJose Abreu 	if (ch->has_rx) {
35338fce3331SJose Abreu 		int done = stmmac_rx(priv, work_rem, chan);
35348fce3331SJose Abreu 
35358fce3331SJose Abreu 		work_done += done;
35368fce3331SJose Abreu 		work_rem -= done;
35378fce3331SJose Abreu 	}
35388fce3331SJose Abreu 
35398fce3331SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done))
35408fce3331SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
35418fce3331SJose Abreu 
35427ac6653aSJeff Kirsher 	return work_done;
35437ac6653aSJeff Kirsher }
35447ac6653aSJeff Kirsher 
35457ac6653aSJeff Kirsher /**
35467ac6653aSJeff Kirsher  *  stmmac_tx_timeout
35477ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
35487ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
35497284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
35507ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
35517ac6653aSJeff Kirsher  *   in order to transmit a new packet.
35527ac6653aSJeff Kirsher  */
35537ac6653aSJeff Kirsher static void stmmac_tx_timeout(struct net_device *dev)
35547ac6653aSJeff Kirsher {
35557ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
35567ac6653aSJeff Kirsher 
355734877a15SJose Abreu 	stmmac_global_err(priv);
35587ac6653aSJeff Kirsher }
35597ac6653aSJeff Kirsher 
35607ac6653aSJeff Kirsher /**
356101789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
35627ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
35637ac6653aSJeff Kirsher  *  Description:
35647ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
35657ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
35667ac6653aSJeff Kirsher  *  Return value:
35677ac6653aSJeff Kirsher  *  void.
35687ac6653aSJeff Kirsher  */
356901789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
35707ac6653aSJeff Kirsher {
35717ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
35727ac6653aSJeff Kirsher 
3573c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
35747ac6653aSJeff Kirsher }
35757ac6653aSJeff Kirsher 
35767ac6653aSJeff Kirsher /**
35777ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
35787ac6653aSJeff Kirsher  *  @dev : device pointer.
35797ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
35807ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
35817ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
35827ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
35837ac6653aSJeff Kirsher  *  Return value:
35847ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
35857ac6653aSJeff Kirsher  *  file on failure.
35867ac6653aSJeff Kirsher  */
35877ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
35887ac6653aSJeff Kirsher {
358938ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
359038ddc59dSLABBE Corentin 
35917ac6653aSJeff Kirsher 	if (netif_running(dev)) {
359238ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
35937ac6653aSJeff Kirsher 		return -EBUSY;
35947ac6653aSJeff Kirsher 	}
35957ac6653aSJeff Kirsher 
35967ac6653aSJeff Kirsher 	dev->mtu = new_mtu;
3597f748be53SAlexandre TORGUE 
35987ac6653aSJeff Kirsher 	netdev_update_features(dev);
35997ac6653aSJeff Kirsher 
36007ac6653aSJeff Kirsher 	return 0;
36017ac6653aSJeff Kirsher }
36027ac6653aSJeff Kirsher 
3603c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
3604c8f44affSMichał Mirosław 					     netdev_features_t features)
36057ac6653aSJeff Kirsher {
36067ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36077ac6653aSJeff Kirsher 
360838912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
36097ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
3610d2afb5bdSGiuseppe CAVALLARO 
36117ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
3612a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
36137ac6653aSJeff Kirsher 
36147ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
36157ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
36167ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
3617ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
3618ceb69499SGiuseppe CAVALLARO 	 */
36197ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3620a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
36217ac6653aSJeff Kirsher 
3622f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
3623f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3624f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
3625f748be53SAlexandre TORGUE 			priv->tso = true;
3626f748be53SAlexandre TORGUE 		else
3627f748be53SAlexandre TORGUE 			priv->tso = false;
3628f748be53SAlexandre TORGUE 	}
3629f748be53SAlexandre TORGUE 
36307ac6653aSJeff Kirsher 	return features;
36317ac6653aSJeff Kirsher }
36327ac6653aSJeff Kirsher 
3633d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
3634d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
3635d2afb5bdSGiuseppe CAVALLARO {
3636d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
3637d2afb5bdSGiuseppe CAVALLARO 
3638d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
3639d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
3640d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
3641d2afb5bdSGiuseppe CAVALLARO 	else
3642d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3643d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
3644d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
3645d2afb5bdSGiuseppe CAVALLARO 	 */
3646c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
3647d2afb5bdSGiuseppe CAVALLARO 
3648d2afb5bdSGiuseppe CAVALLARO 	return 0;
3649d2afb5bdSGiuseppe CAVALLARO }
3650d2afb5bdSGiuseppe CAVALLARO 
365132ceabcaSGiuseppe CAVALLARO /**
365232ceabcaSGiuseppe CAVALLARO  *  stmmac_interrupt - main ISR
365332ceabcaSGiuseppe CAVALLARO  *  @irq: interrupt number.
365432ceabcaSGiuseppe CAVALLARO  *  @dev_id: to pass the net device pointer.
365532ceabcaSGiuseppe CAVALLARO  *  Description: this is the main driver interrupt service routine.
3656732fdf0eSGiuseppe CAVALLARO  *  It can call:
3657732fdf0eSGiuseppe CAVALLARO  *  o DMA service routine (to manage incoming frame reception and transmission
3658732fdf0eSGiuseppe CAVALLARO  *    status)
3659732fdf0eSGiuseppe CAVALLARO  *  o Core interrupts to manage: remote wake-up, management counter, LPI
366032ceabcaSGiuseppe CAVALLARO  *    interrupts.
366132ceabcaSGiuseppe CAVALLARO  */
36627ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
36637ac6653aSJeff Kirsher {
36647ac6653aSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
36657ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36667bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
36677bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
36687bac4e1eSJoao Pinto 	u32 queues_count;
36697bac4e1eSJoao Pinto 	u32 queue;
36707d9e6c5aSJose Abreu 	bool xmac;
36717bac4e1eSJoao Pinto 
36727d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
36737bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
36747ac6653aSJeff Kirsher 
367589f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
367689f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
367789f7f2cfSSrinivas Kandagatla 
36787ac6653aSJeff Kirsher 	if (unlikely(!dev)) {
367938ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
36807ac6653aSJeff Kirsher 		return IRQ_NONE;
36817ac6653aSJeff Kirsher 	}
36827ac6653aSJeff Kirsher 
368334877a15SJose Abreu 	/* Check if adapter is up */
368434877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
368534877a15SJose Abreu 		return IRQ_HANDLED;
36868bf993a5SJose Abreu 	/* Check if a fatal error happened */
36878bf993a5SJose Abreu 	if (stmmac_safety_feat_interrupt(priv))
36888bf993a5SJose Abreu 		return IRQ_HANDLED;
368934877a15SJose Abreu 
36907ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
36917d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
3692c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
369361fac60aSJose Abreu 		int mtl_status;
36948f71a88dSJoao Pinto 
3695d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
3696d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
36970982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3698d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
36990982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3700d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
37017bac4e1eSJoao Pinto 		}
37027bac4e1eSJoao Pinto 
37037bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
370461fac60aSJose Abreu 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
370554139cf3SJoao Pinto 
370661fac60aSJose Abreu 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
370761fac60aSJose Abreu 								queue);
370861fac60aSJose Abreu 			if (mtl_status != -EINVAL)
370961fac60aSJose Abreu 				status |= mtl_status;
37107bac4e1eSJoao Pinto 
3711a4e887faSJose Abreu 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
371261fac60aSJose Abreu 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
371354139cf3SJoao Pinto 						       rx_q->rx_tail_addr,
37147bac4e1eSJoao Pinto 						       queue);
37157bac4e1eSJoao Pinto 		}
371670523e63SGiuseppe CAVALLARO 
371770523e63SGiuseppe CAVALLARO 		/* PCS link status */
37183fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
371970523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
372070523e63SGiuseppe CAVALLARO 				netif_carrier_on(dev);
372170523e63SGiuseppe CAVALLARO 			else
372270523e63SGiuseppe CAVALLARO 				netif_carrier_off(dev);
372370523e63SGiuseppe CAVALLARO 		}
3724d765955dSGiuseppe CAVALLARO 	}
3725d765955dSGiuseppe CAVALLARO 
3726d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
37277ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
37287ac6653aSJeff Kirsher 
37297ac6653aSJeff Kirsher 	return IRQ_HANDLED;
37307ac6653aSJeff Kirsher }
37317ac6653aSJeff Kirsher 
37327ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
37337ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
3734ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
3735ceb69499SGiuseppe CAVALLARO  */
37367ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
37377ac6653aSJeff Kirsher {
37387ac6653aSJeff Kirsher 	disable_irq(dev->irq);
37397ac6653aSJeff Kirsher 	stmmac_interrupt(dev->irq, dev);
37407ac6653aSJeff Kirsher 	enable_irq(dev->irq);
37417ac6653aSJeff Kirsher }
37427ac6653aSJeff Kirsher #endif
37437ac6653aSJeff Kirsher 
37447ac6653aSJeff Kirsher /**
37457ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
37467ac6653aSJeff Kirsher  *  @dev: Device pointer.
37477ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
37487ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
37497ac6653aSJeff Kirsher  *  @cmd: IOCTL command
37507ac6653aSJeff Kirsher  *  Description:
375132ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
37527ac6653aSJeff Kirsher  */
37537ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
37547ac6653aSJeff Kirsher {
3755891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
37567ac6653aSJeff Kirsher 
37577ac6653aSJeff Kirsher 	if (!netif_running(dev))
37587ac6653aSJeff Kirsher 		return -EINVAL;
37597ac6653aSJeff Kirsher 
3760891434b1SRayagond Kokatanur 	switch (cmd) {
3761891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
3762891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
3763891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
3764d6d50c7eSPhilippe Reynes 		if (!dev->phydev)
37657ac6653aSJeff Kirsher 			return -EINVAL;
3766d6d50c7eSPhilippe Reynes 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3767891434b1SRayagond Kokatanur 		break;
3768891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
3769891434b1SRayagond Kokatanur 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3770891434b1SRayagond Kokatanur 		break;
3771891434b1SRayagond Kokatanur 	default:
3772891434b1SRayagond Kokatanur 		break;
3773891434b1SRayagond Kokatanur 	}
37747ac6653aSJeff Kirsher 
37757ac6653aSJeff Kirsher 	return ret;
37767ac6653aSJeff Kirsher }
37777ac6653aSJeff Kirsher 
37784dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
37794dbbe8ddSJose Abreu 				    void *cb_priv)
37804dbbe8ddSJose Abreu {
37814dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
37824dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
37834dbbe8ddSJose Abreu 
37844dbbe8ddSJose Abreu 	stmmac_disable_all_queues(priv);
37854dbbe8ddSJose Abreu 
37864dbbe8ddSJose Abreu 	switch (type) {
37874dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
37884dbbe8ddSJose Abreu 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
37894dbbe8ddSJose Abreu 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
37904dbbe8ddSJose Abreu 		break;
37914dbbe8ddSJose Abreu 	default:
37924dbbe8ddSJose Abreu 		break;
37934dbbe8ddSJose Abreu 	}
37944dbbe8ddSJose Abreu 
37954dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
37964dbbe8ddSJose Abreu 	return ret;
37974dbbe8ddSJose Abreu }
37984dbbe8ddSJose Abreu 
37994dbbe8ddSJose Abreu static int stmmac_setup_tc_block(struct stmmac_priv *priv,
38004dbbe8ddSJose Abreu 				 struct tc_block_offload *f)
38014dbbe8ddSJose Abreu {
38024dbbe8ddSJose Abreu 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
38034dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
38044dbbe8ddSJose Abreu 
38054dbbe8ddSJose Abreu 	switch (f->command) {
38064dbbe8ddSJose Abreu 	case TC_BLOCK_BIND:
38074dbbe8ddSJose Abreu 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
380860513bd8SJohn Hurley 				priv, priv, f->extack);
38094dbbe8ddSJose Abreu 	case TC_BLOCK_UNBIND:
38104dbbe8ddSJose Abreu 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
38114dbbe8ddSJose Abreu 		return 0;
38124dbbe8ddSJose Abreu 	default:
38134dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
38144dbbe8ddSJose Abreu 	}
38154dbbe8ddSJose Abreu }
38164dbbe8ddSJose Abreu 
38174dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
38184dbbe8ddSJose Abreu 			   void *type_data)
38194dbbe8ddSJose Abreu {
38204dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
38214dbbe8ddSJose Abreu 
38224dbbe8ddSJose Abreu 	switch (type) {
38234dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
38244dbbe8ddSJose Abreu 		return stmmac_setup_tc_block(priv, type_data);
38251f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
38261f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
38274dbbe8ddSJose Abreu 	default:
38284dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
38294dbbe8ddSJose Abreu 	}
38304dbbe8ddSJose Abreu }
38314dbbe8ddSJose Abreu 
3832a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3833a830405eSBhadram Varka {
3834a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
3835a830405eSBhadram Varka 	int ret = 0;
3836a830405eSBhadram Varka 
3837a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
3838a830405eSBhadram Varka 	if (ret)
3839a830405eSBhadram Varka 		return ret;
3840a830405eSBhadram Varka 
3841c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3842a830405eSBhadram Varka 
3843a830405eSBhadram Varka 	return ret;
3844a830405eSBhadram Varka }
3845a830405eSBhadram Varka 
384650fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
38477ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
38487ac29055SGiuseppe CAVALLARO 
3849c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
3850c24602efSGiuseppe CAVALLARO 			       struct seq_file *seq)
38517ac29055SGiuseppe CAVALLARO {
38527ac29055SGiuseppe CAVALLARO 	int i;
3853c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3854c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
38557ac29055SGiuseppe CAVALLARO 
3856c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
3857c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
3858c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3859c24602efSGiuseppe CAVALLARO 				   i, (unsigned int)virt_to_phys(ep),
3860f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
3861f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
3862f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
3863f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
3864c24602efSGiuseppe CAVALLARO 			ep++;
3865c24602efSGiuseppe CAVALLARO 		} else {
3866c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
386766c25f6eSNiklas Cassel 				   i, (unsigned int)virt_to_phys(p),
3868f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3869f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3870c24602efSGiuseppe CAVALLARO 			p++;
3871c24602efSGiuseppe CAVALLARO 		}
38727ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
38737ac29055SGiuseppe CAVALLARO 	}
3874c24602efSGiuseppe CAVALLARO }
38757ac29055SGiuseppe CAVALLARO 
3876fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
3877c24602efSGiuseppe CAVALLARO {
3878c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
3879c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
388054139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
3881ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
388254139cf3SJoao Pinto 	u32 queue;
388354139cf3SJoao Pinto 
38845f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
38855f2b8b62SThierry Reding 		return 0;
38865f2b8b62SThierry Reding 
388754139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
388854139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
388954139cf3SJoao Pinto 
389054139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
38917ac29055SGiuseppe CAVALLARO 
3892c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
389354139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
389454139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
389554139cf3SJoao Pinto 					   DMA_RX_SIZE, 1, seq);
389654139cf3SJoao Pinto 		} else {
389754139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
389854139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
389954139cf3SJoao Pinto 					   DMA_RX_SIZE, 0, seq);
390054139cf3SJoao Pinto 		}
390154139cf3SJoao Pinto 	}
390254139cf3SJoao Pinto 
3903ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
3904ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3905ce736788SJoao Pinto 
3906ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
3907ce736788SJoao Pinto 
390854139cf3SJoao Pinto 		if (priv->extend_desc) {
3909ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
3910ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
3911ce736788SJoao Pinto 					   DMA_TX_SIZE, 1, seq);
3912c24602efSGiuseppe CAVALLARO 		} else {
3913ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
3914ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
3915ce736788SJoao Pinto 					   DMA_TX_SIZE, 0, seq);
3916ce736788SJoao Pinto 		}
39177ac29055SGiuseppe CAVALLARO 	}
39187ac29055SGiuseppe CAVALLARO 
39197ac29055SGiuseppe CAVALLARO 	return 0;
39207ac29055SGiuseppe CAVALLARO }
3921fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
39227ac29055SGiuseppe CAVALLARO 
3923fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
3924e7434821SGiuseppe CAVALLARO {
3925e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
3926e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
3927e7434821SGiuseppe CAVALLARO 
392819e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
3929e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
3930e7434821SGiuseppe CAVALLARO 		return 0;
3931e7434821SGiuseppe CAVALLARO 	}
3932e7434821SGiuseppe CAVALLARO 
3933e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
3934e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
3935e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
3936e7434821SGiuseppe CAVALLARO 
393722d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3938e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
393922d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
3940e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
394122d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
3942e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3943e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
3944e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3945e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3946e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
39478d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3948e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
3949e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3950e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3951e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3952e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3953e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3954e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3955e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
3956e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
3957e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3958e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3959e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3960e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
396122d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3962e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
3963e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3964e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3965e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3966f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3967f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3968f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3969f748be53SAlexandre TORGUE 	} else {
3970e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3971e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3972e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3973e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3974f748be53SAlexandre TORGUE 	}
3975e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3976e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3977e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3978e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
3979e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3980e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
3981e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3982e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3983e7434821SGiuseppe CAVALLARO 
3984e7434821SGiuseppe CAVALLARO 	return 0;
3985e7434821SGiuseppe CAVALLARO }
3986fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
3987e7434821SGiuseppe CAVALLARO 
39887ac29055SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev)
39897ac29055SGiuseppe CAVALLARO {
3990466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
39917ac29055SGiuseppe CAVALLARO 
3992466c5ac8SMathieu Olivari 	/* Create per netdev entries */
3993466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3994466c5ac8SMathieu Olivari 
3995466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
399638ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
39977ac29055SGiuseppe CAVALLARO 
39987ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
39997ac29055SGiuseppe CAVALLARO 	}
40007ac29055SGiuseppe CAVALLARO 
40017ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
4002466c5ac8SMathieu Olivari 	priv->dbgfs_rings_status =
4003d3757ba4SJoe Perches 		debugfs_create_file("descriptors_status", 0444,
4004466c5ac8SMathieu Olivari 				    priv->dbgfs_dir, dev,
40057ac29055SGiuseppe CAVALLARO 				    &stmmac_rings_status_fops);
40067ac29055SGiuseppe CAVALLARO 
4007466c5ac8SMathieu Olivari 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
400838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4009466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
40107ac29055SGiuseppe CAVALLARO 
40117ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
40127ac29055SGiuseppe CAVALLARO 	}
40137ac29055SGiuseppe CAVALLARO 
4014e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
4015d3757ba4SJoe Perches 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4016466c5ac8SMathieu Olivari 						  priv->dbgfs_dir,
4017e7434821SGiuseppe CAVALLARO 						  dev, &stmmac_dma_cap_fops);
4018e7434821SGiuseppe CAVALLARO 
4019466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
402038ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4021466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
4022e7434821SGiuseppe CAVALLARO 
4023e7434821SGiuseppe CAVALLARO 		return -ENOMEM;
4024e7434821SGiuseppe CAVALLARO 	}
4025e7434821SGiuseppe CAVALLARO 
40267ac29055SGiuseppe CAVALLARO 	return 0;
40277ac29055SGiuseppe CAVALLARO }
40287ac29055SGiuseppe CAVALLARO 
4029466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
40307ac29055SGiuseppe CAVALLARO {
4031466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
4032466c5ac8SMathieu Olivari 
4033466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
40347ac29055SGiuseppe CAVALLARO }
403550fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
40367ac29055SGiuseppe CAVALLARO 
40377ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
40387ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
40397ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
40407ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
40417ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
40427ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
4043d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
404401789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
40457ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
40467ac6653aSJeff Kirsher 	.ndo_do_ioctl = stmmac_ioctl,
40474dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
40487ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
40497ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
40507ac6653aSJeff Kirsher #endif
4051a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
40527ac6653aSJeff Kirsher };
40537ac6653aSJeff Kirsher 
405434877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
405534877a15SJose Abreu {
405634877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
405734877a15SJose Abreu 		return;
405834877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
405934877a15SJose Abreu 		return;
406034877a15SJose Abreu 
406134877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
406234877a15SJose Abreu 
406334877a15SJose Abreu 	rtnl_lock();
406434877a15SJose Abreu 	netif_trans_update(priv->dev);
406534877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
406634877a15SJose Abreu 		usleep_range(1000, 2000);
406734877a15SJose Abreu 
406834877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
406934877a15SJose Abreu 	dev_close(priv->dev);
407000f54e68SPetr Machata 	dev_open(priv->dev, NULL);
407134877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
407234877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
407334877a15SJose Abreu 	rtnl_unlock();
407434877a15SJose Abreu }
407534877a15SJose Abreu 
407634877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
407734877a15SJose Abreu {
407834877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
407934877a15SJose Abreu 			service_task);
408034877a15SJose Abreu 
408134877a15SJose Abreu 	stmmac_reset_subtask(priv);
408234877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
408334877a15SJose Abreu }
408434877a15SJose Abreu 
40857ac6653aSJeff Kirsher /**
4086cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
408732ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
4088732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
4089732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
4090732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
4091732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
4092cf3f047bSGiuseppe CAVALLARO  */
4093cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
4094cf3f047bSGiuseppe CAVALLARO {
40955f0456b4SJose Abreu 	int ret;
4096cf3f047bSGiuseppe CAVALLARO 
40979f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
40989f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
40999f93ac8dSLABBE Corentin 		chain_mode = 1;
41005f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
41019f93ac8dSLABBE Corentin 
41025f0456b4SJose Abreu 	/* Initialize HW Interface */
41035f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
41045f0456b4SJose Abreu 	if (ret)
41055f0456b4SJose Abreu 		return ret;
41064a7d666aSGiuseppe CAVALLARO 
4107cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
4108cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4109cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
411038ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
4111cf3f047bSGiuseppe CAVALLARO 
4112cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
4113cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
4114cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
4115cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
4116cf3f047bSGiuseppe CAVALLARO 		 */
4117cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4118cf3f047bSGiuseppe CAVALLARO 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
41193fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
412038912bdbSDeepak SIKRI 
4121a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
4122a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
4123a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
4124a8df35d4SEzequiel Garcia 		else
412538912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4126a8df35d4SEzequiel Garcia 
4127f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
4128f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
412938912bdbSDeepak SIKRI 
413038912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
413138912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
413238912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
413338912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
413438912bdbSDeepak SIKRI 
413538ddc59dSLABBE Corentin 	} else {
413638ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
413738ddc59dSLABBE Corentin 	}
4138cf3f047bSGiuseppe CAVALLARO 
4139d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
4140d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
414138ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4142f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
414338ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4144d2afb5bdSGiuseppe CAVALLARO 	}
4145cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
414638ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
4147cf3f047bSGiuseppe CAVALLARO 
4148cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
414938ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4150cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
4151cf3f047bSGiuseppe CAVALLARO 	}
4152cf3f047bSGiuseppe CAVALLARO 
4153f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
415438ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
4155f748be53SAlexandre TORGUE 
41567cfde0afSJose Abreu 	/* Run HW quirks, if any */
41577cfde0afSJose Abreu 	if (priv->hwif_quirks) {
41587cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
41597cfde0afSJose Abreu 		if (ret)
41607cfde0afSJose Abreu 			return ret;
41617cfde0afSJose Abreu 	}
41627cfde0afSJose Abreu 
4163c24602efSGiuseppe CAVALLARO 	return 0;
4164cf3f047bSGiuseppe CAVALLARO }
4165cf3f047bSGiuseppe CAVALLARO 
4166cf3f047bSGiuseppe CAVALLARO /**
4167bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
4168bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
4169ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
4170e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
4171bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
4172bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
41739afec6efSAndy Shevchenko  * Return:
417415ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
41757ac6653aSJeff Kirsher  */
417615ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
4177cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
4178e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
41797ac6653aSJeff Kirsher {
4180bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
4181bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
41828fce3331SJose Abreu 	u32 queue, maxq;
4183c22a3f48SJoao Pinto 	int ret = 0;
41847ac6653aSJeff Kirsher 
4185c22a3f48SJoao Pinto 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4186c22a3f48SJoao Pinto 				  MTL_MAX_TX_QUEUES,
4187c22a3f48SJoao Pinto 				  MTL_MAX_RX_QUEUES);
418841de8d4cSJoe Perches 	if (!ndev)
418915ffac73SJoachim Eastwood 		return -ENOMEM;
41907ac6653aSJeff Kirsher 
4191bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
41927ac6653aSJeff Kirsher 
4193bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
4194bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
4195bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
4196bfab27a1SGiuseppe CAVALLARO 
4197bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
4198cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
4199cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
4200e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
4201e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
4202e56788cfSJoachim Eastwood 
4203e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
4204e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
4205e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
4206e56788cfSJoachim Eastwood 
4207e56788cfSJoachim Eastwood 	if (res->mac)
4208e56788cfSJoachim Eastwood 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4209bfab27a1SGiuseppe CAVALLARO 
4210a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
4211803f8fc4SJoachim Eastwood 
4212cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
4213cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
4214cf3f047bSGiuseppe CAVALLARO 
421534877a15SJose Abreu 	/* Allocate workqueue */
421634877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
421734877a15SJose Abreu 	if (!priv->wq) {
421834877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
4219b26322d2SDan Carpenter 		ret = -ENOMEM;
422034877a15SJose Abreu 		goto error_wq;
422134877a15SJose Abreu 	}
422234877a15SJose Abreu 
422334877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
422434877a15SJose Abreu 
4225cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
4226ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
4227ceb69499SGiuseppe CAVALLARO 	 */
4228cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
4229cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
4230cf3f047bSGiuseppe CAVALLARO 
423190f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
423290f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
4233f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
423490f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
423590f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
423690f522a2SEugeniy Paltsev 		 */
423790f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
423890f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
423990f522a2SEugeniy Paltsev 	}
4240c5e4ddbdSChen-Yu Tsai 
4241cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
4242c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
4243c24602efSGiuseppe CAVALLARO 	if (ret)
424462866e98SChen-Yu Tsai 		goto error_hw_init;
4245cf3f047bSGiuseppe CAVALLARO 
4246c22a3f48SJoao Pinto 	/* Configure real RX and TX queues */
4247c02b7a91SJoao Pinto 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4248c02b7a91SJoao Pinto 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4249c22a3f48SJoao Pinto 
4250cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
4251cf3f047bSGiuseppe CAVALLARO 
4252cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4253cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
4254f748be53SAlexandre TORGUE 
42554dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
42564dbbe8ddSJose Abreu 	if (!ret) {
42574dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
42584dbbe8ddSJose Abreu 	}
42594dbbe8ddSJose Abreu 
4260f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
42619edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4262f748be53SAlexandre TORGUE 		priv->tso = true;
426338ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
4264f748be53SAlexandre TORGUE 	}
4265bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4266bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
42677ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
42687ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
4269ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
42707ac6653aSJeff Kirsher #endif
42717ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
42727ac6653aSJeff Kirsher 
427344770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
427444770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
427544770e11SJarod Wilson 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
427644770e11SJarod Wilson 		ndev->max_mtu = JUMBO_LEN;
42777d9e6c5aSJose Abreu 	else if (priv->plat->has_xgmac)
42787d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
427944770e11SJarod Wilson 	else
428044770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4281a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4282a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4283a2cd64f3SKweh, Hock Leong 	 */
4284a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4285a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
428644770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
4287a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
4288b618ab45SHeiner Kallweit 		dev_warn(priv->device,
4289a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
4290a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
429144770e11SJarod Wilson 
42927ac6653aSJeff Kirsher 	if (flow_ctrl)
42937ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
42947ac6653aSJeff Kirsher 
429562a2ab93SGiuseppe CAVALLARO 	/* Rx Watchdog is available in the COREs newer than the 3.40.
429662a2ab93SGiuseppe CAVALLARO 	 * In some case, for example on bugged HW this feature
429762a2ab93SGiuseppe CAVALLARO 	 * has to be disable and this can be done by passing the
429862a2ab93SGiuseppe CAVALLARO 	 * riwt_off field from the platform.
429962a2ab93SGiuseppe CAVALLARO 	 */
43007d9e6c5aSJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
43017d9e6c5aSJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
430262a2ab93SGiuseppe CAVALLARO 		priv->use_riwt = 1;
4303b618ab45SHeiner Kallweit 		dev_info(priv->device,
4304b618ab45SHeiner Kallweit 			 "Enable RX Mitigation via HW Watchdog Timer\n");
430562a2ab93SGiuseppe CAVALLARO 	}
430662a2ab93SGiuseppe CAVALLARO 
43078fce3331SJose Abreu 	/* Setup channels NAPI */
43088fce3331SJose Abreu 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4309c22a3f48SJoao Pinto 
43108fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
43118fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
43128fce3331SJose Abreu 
43138fce3331SJose Abreu 		ch->priv_data = priv;
43148fce3331SJose Abreu 		ch->index = queue;
43158fce3331SJose Abreu 
43168fce3331SJose Abreu 		if (queue < priv->plat->rx_queues_to_use)
43178fce3331SJose Abreu 			ch->has_rx = true;
43188fce3331SJose Abreu 		if (queue < priv->plat->tx_queues_to_use)
43198fce3331SJose Abreu 			ch->has_tx = true;
43208fce3331SJose Abreu 
43218fce3331SJose Abreu 		netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
43228fce3331SJose Abreu 			       NAPI_POLL_WEIGHT);
4323c22a3f48SJoao Pinto 	}
43247ac6653aSJeff Kirsher 
432529555fa3SThierry Reding 	mutex_init(&priv->lock);
43267ac6653aSJeff Kirsher 
4327cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
4328cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
4329cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4330cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
4331cd7201f4SGiuseppe CAVALLARO 	 * clock input.
4332cd7201f4SGiuseppe CAVALLARO 	 */
4333cd7201f4SGiuseppe CAVALLARO 	if (!priv->plat->clk_csr)
4334cd7201f4SGiuseppe CAVALLARO 		stmmac_clk_csr_set(priv);
4335cd7201f4SGiuseppe CAVALLARO 	else
4336cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
4337cd7201f4SGiuseppe CAVALLARO 
4338e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
4339e58bb43fSGiuseppe CAVALLARO 
43403fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
43413fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
43423fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
43434bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
43444bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
43454bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
4346b618ab45SHeiner Kallweit 			dev_err(priv->device,
434738ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
43484bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
43496a81c26fSViresh Kumar 			goto error_mdio_register;
43504bfcbd7aSFrancesco Virlinzi 		}
4351e58bb43fSGiuseppe CAVALLARO 	}
43524bfcbd7aSFrancesco Virlinzi 
435357016590SFlorian Fainelli 	ret = register_netdev(ndev);
4354b2eb09afSFlorian Fainelli 	if (ret) {
4355b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
435657016590SFlorian Fainelli 			__func__, ret);
4357b2eb09afSFlorian Fainelli 		goto error_netdev_register;
4358b2eb09afSFlorian Fainelli 	}
43597ac6653aSJeff Kirsher 
43605f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
43615f2b8b62SThierry Reding 	ret = stmmac_init_fs(ndev);
43625f2b8b62SThierry Reding 	if (ret < 0)
43635f2b8b62SThierry Reding 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
43645f2b8b62SThierry Reding 			    __func__);
43655f2b8b62SThierry Reding #endif
43665f2b8b62SThierry Reding 
436757016590SFlorian Fainelli 	return ret;
43687ac6653aSJeff Kirsher 
43696a81c26fSViresh Kumar error_netdev_register:
4370b2eb09afSFlorian Fainelli 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4371b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4372b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4373b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
43747ac6653aSJeff Kirsher error_mdio_register:
43758fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
43768fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
4377c22a3f48SJoao Pinto 
43788fce3331SJose Abreu 		netif_napi_del(&ch->napi);
4379c22a3f48SJoao Pinto 	}
438062866e98SChen-Yu Tsai error_hw_init:
438134877a15SJose Abreu 	destroy_workqueue(priv->wq);
438234877a15SJose Abreu error_wq:
43837ac6653aSJeff Kirsher 	free_netdev(ndev);
43847ac6653aSJeff Kirsher 
438515ffac73SJoachim Eastwood 	return ret;
43867ac6653aSJeff Kirsher }
4387b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
43887ac6653aSJeff Kirsher 
43897ac6653aSJeff Kirsher /**
43907ac6653aSJeff Kirsher  * stmmac_dvr_remove
4391f4e7bd81SJoachim Eastwood  * @dev: device pointer
43927ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4393bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
43947ac6653aSJeff Kirsher  */
4395f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
43967ac6653aSJeff Kirsher {
4397f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
43987ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
43997ac6653aSJeff Kirsher 
440038ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
44017ac6653aSJeff Kirsher 
44025f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
44035f2b8b62SThierry Reding 	stmmac_exit_fs(ndev);
44045f2b8b62SThierry Reding #endif
4405ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
44067ac6653aSJeff Kirsher 
4407c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
44087ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
44097ac6653aSJeff Kirsher 	unregister_netdev(ndev);
4410f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
4411f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
4412f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->pclk);
4413f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->stmmac_clk);
44143fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
44153fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
44163fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4417e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
441834877a15SJose Abreu 	destroy_workqueue(priv->wq);
441929555fa3SThierry Reding 	mutex_destroy(&priv->lock);
44207ac6653aSJeff Kirsher 	free_netdev(ndev);
44217ac6653aSJeff Kirsher 
44227ac6653aSJeff Kirsher 	return 0;
44237ac6653aSJeff Kirsher }
4424b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
44257ac6653aSJeff Kirsher 
4426732fdf0eSGiuseppe CAVALLARO /**
4427732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
4428f4e7bd81SJoachim Eastwood  * @dev: device pointer
4429732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
4430732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
4431732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
4432732fdf0eSGiuseppe CAVALLARO  */
4433f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
44347ac6653aSJeff Kirsher {
4435f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
44367ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
44377ac6653aSJeff Kirsher 
44387ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
44397ac6653aSJeff Kirsher 		return 0;
44407ac6653aSJeff Kirsher 
4441d6d50c7eSPhilippe Reynes 	if (ndev->phydev)
4442d6d50c7eSPhilippe Reynes 		phy_stop(ndev->phydev);
4443102463b1SFrancesco Virlinzi 
444429555fa3SThierry Reding 	mutex_lock(&priv->lock);
44457ac6653aSJeff Kirsher 
44467ac6653aSJeff Kirsher 	netif_device_detach(ndev);
4447c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
44487ac6653aSJeff Kirsher 
4449c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
44507ac6653aSJeff Kirsher 
44517ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
4452ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
4453c24602efSGiuseppe CAVALLARO 
44547ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
445589f7f2cfSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
4456c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
445789f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
445889f7f2cfSSrinivas Kandagatla 	} else {
4459c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
4460db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
4461ba1377ffSGiuseppe CAVALLARO 		/* Disable clock in case of PWM is off */
4462f573c0b9Sjpinto 		clk_disable(priv->plat->pclk);
4463f573c0b9Sjpinto 		clk_disable(priv->plat->stmmac_clk);
4464ba1377ffSGiuseppe CAVALLARO 	}
446529555fa3SThierry Reding 	mutex_unlock(&priv->lock);
44662d871aa0SVince Bridgers 
44674d869b03SLABBE Corentin 	priv->oldlink = false;
4468bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
4469bd00632cSLABBE Corentin 	priv->oldduplex = DUPLEX_UNKNOWN;
44707ac6653aSJeff Kirsher 	return 0;
44717ac6653aSJeff Kirsher }
4472b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
44737ac6653aSJeff Kirsher 
4474732fdf0eSGiuseppe CAVALLARO /**
447554139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
447654139cf3SJoao Pinto  * @dev: device pointer
447754139cf3SJoao Pinto  */
447854139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
447954139cf3SJoao Pinto {
448054139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4481ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
448254139cf3SJoao Pinto 	u32 queue;
448354139cf3SJoao Pinto 
448454139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
448554139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
448654139cf3SJoao Pinto 
448754139cf3SJoao Pinto 		rx_q->cur_rx = 0;
448854139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
448954139cf3SJoao Pinto 	}
449054139cf3SJoao Pinto 
4491ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
4492ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4493ce736788SJoao Pinto 
4494ce736788SJoao Pinto 		tx_q->cur_tx = 0;
4495ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
44968d212a9eSNiklas Cassel 		tx_q->mss = 0;
4497ce736788SJoao Pinto 	}
449854139cf3SJoao Pinto }
449954139cf3SJoao Pinto 
450054139cf3SJoao Pinto /**
4501732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
4502f4e7bd81SJoachim Eastwood  * @dev: device pointer
4503732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
4504732fdf0eSGiuseppe CAVALLARO  * in a usable state.
4505732fdf0eSGiuseppe CAVALLARO  */
4506f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
45077ac6653aSJeff Kirsher {
4508f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
45097ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
45107ac6653aSJeff Kirsher 
45117ac6653aSJeff Kirsher 	if (!netif_running(ndev))
45127ac6653aSJeff Kirsher 		return 0;
45137ac6653aSJeff Kirsher 
45147ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
45157ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
45167ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
45177ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
4518ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
4519ceb69499SGiuseppe CAVALLARO 	 */
4520623997fbSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
452129555fa3SThierry Reding 		mutex_lock(&priv->lock);
4522c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
452329555fa3SThierry Reding 		mutex_unlock(&priv->lock);
452489f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
4525623997fbSSrinivas Kandagatla 	} else {
4526db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
45278d45e42bSLABBE Corentin 		/* enable the clk previously disabled */
4528f573c0b9Sjpinto 		clk_enable(priv->plat->stmmac_clk);
4529f573c0b9Sjpinto 		clk_enable(priv->plat->pclk);
4530623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
4531623997fbSSrinivas Kandagatla 		if (priv->mii)
4532623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
4533623997fbSSrinivas Kandagatla 	}
45347ac6653aSJeff Kirsher 
45357ac6653aSJeff Kirsher 	netif_device_attach(ndev);
45367ac6653aSJeff Kirsher 
453729555fa3SThierry Reding 	mutex_lock(&priv->lock);
4538f55d84b0SVincent Palatin 
453954139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
454054139cf3SJoao Pinto 
4541ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
4542ae79a639SGiuseppe CAVALLARO 
4543fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
4544777da230SGiuseppe CAVALLARO 	stmmac_init_tx_coalesce(priv);
4545ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
45467ac6653aSJeff Kirsher 
4547c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
45487ac6653aSJeff Kirsher 
4549c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
45507ac6653aSJeff Kirsher 
455129555fa3SThierry Reding 	mutex_unlock(&priv->lock);
4552102463b1SFrancesco Virlinzi 
4553d6d50c7eSPhilippe Reynes 	if (ndev->phydev)
4554d6d50c7eSPhilippe Reynes 		phy_start(ndev->phydev);
4555102463b1SFrancesco Virlinzi 
45567ac6653aSJeff Kirsher 	return 0;
45577ac6653aSJeff Kirsher }
4558b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
4559ba27ec66SGiuseppe CAVALLARO 
45607ac6653aSJeff Kirsher #ifndef MODULE
45617ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
45627ac6653aSJeff Kirsher {
45637ac6653aSJeff Kirsher 	char *opt;
45647ac6653aSJeff Kirsher 
45657ac6653aSJeff Kirsher 	if (!str || !*str)
45667ac6653aSJeff Kirsher 		return -EINVAL;
45677ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
45687ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
4569ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
45707ac6653aSJeff Kirsher 				goto err;
45717ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4572ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
45737ac6653aSJeff Kirsher 				goto err;
45747ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4575ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
45767ac6653aSJeff Kirsher 				goto err;
45777ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
4578ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
45797ac6653aSJeff Kirsher 				goto err;
45807ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
4581ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
45827ac6653aSJeff Kirsher 				goto err;
45837ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4584ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
45857ac6653aSJeff Kirsher 				goto err;
45867ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
4587ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
45887ac6653aSJeff Kirsher 				goto err;
4589506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4590d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
4591d765955dSGiuseppe CAVALLARO 				goto err;
45924a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
45934a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
45944a7d666aSGiuseppe CAVALLARO 				goto err;
45957ac6653aSJeff Kirsher 		}
45967ac6653aSJeff Kirsher 	}
45977ac6653aSJeff Kirsher 	return 0;
45987ac6653aSJeff Kirsher 
45997ac6653aSJeff Kirsher err:
46007ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
46017ac6653aSJeff Kirsher 	return -EINVAL;
46027ac6653aSJeff Kirsher }
46037ac6653aSJeff Kirsher 
46047ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
4605ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
46066fc0d0f2SGiuseppe Cavallaro 
4607466c5ac8SMathieu Olivari static int __init stmmac_init(void)
4608466c5ac8SMathieu Olivari {
4609466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4610466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
4611466c5ac8SMathieu Olivari 	if (!stmmac_fs_dir) {
4612466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4613466c5ac8SMathieu Olivari 
4614466c5ac8SMathieu Olivari 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4615466c5ac8SMathieu Olivari 			pr_err("ERROR %s, debugfs create directory failed\n",
4616466c5ac8SMathieu Olivari 			       STMMAC_RESOURCE_NAME);
4617466c5ac8SMathieu Olivari 
4618466c5ac8SMathieu Olivari 			return -ENOMEM;
4619466c5ac8SMathieu Olivari 		}
4620466c5ac8SMathieu Olivari 	}
4621466c5ac8SMathieu Olivari #endif
4622466c5ac8SMathieu Olivari 
4623466c5ac8SMathieu Olivari 	return 0;
4624466c5ac8SMathieu Olivari }
4625466c5ac8SMathieu Olivari 
4626466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
4627466c5ac8SMathieu Olivari {
4628466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4629466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
4630466c5ac8SMathieu Olivari #endif
4631466c5ac8SMathieu Olivari }
4632466c5ac8SMathieu Olivari 
4633466c5ac8SMathieu Olivari module_init(stmmac_init)
4634466c5ac8SMathieu Olivari module_exit(stmmac_exit)
4635466c5ac8SMathieu Olivari 
46366fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
46376fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
46386fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
4639