17ac6653aSJeff Kirsher /*******************************************************************************
27ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
37ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
47ac6653aSJeff Kirsher 
5286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
67ac6653aSJeff Kirsher 
77ac6653aSJeff Kirsher   This program is free software; you can redistribute it and/or modify it
87ac6653aSJeff Kirsher   under the terms and conditions of the GNU General Public License,
97ac6653aSJeff Kirsher   version 2, as published by the Free Software Foundation.
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   This program is distributed in the hope it will be useful, but WITHOUT
127ac6653aSJeff Kirsher   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
137ac6653aSJeff Kirsher   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
147ac6653aSJeff Kirsher   more details.
157ac6653aSJeff Kirsher 
167ac6653aSJeff Kirsher   The full GNU General Public License is included in this distribution in
177ac6653aSJeff Kirsher   the file called "COPYING".
187ac6653aSJeff Kirsher 
197ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
207ac6653aSJeff Kirsher 
217ac6653aSJeff Kirsher   Documentation available at:
227ac6653aSJeff Kirsher 	http://www.stlinux.com
237ac6653aSJeff Kirsher   Support available at:
247ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
257ac6653aSJeff Kirsher *******************************************************************************/
267ac6653aSJeff Kirsher 
276a81c26fSViresh Kumar #include <linux/clk.h>
287ac6653aSJeff Kirsher #include <linux/kernel.h>
297ac6653aSJeff Kirsher #include <linux/interrupt.h>
307ac6653aSJeff Kirsher #include <linux/ip.h>
317ac6653aSJeff Kirsher #include <linux/tcp.h>
327ac6653aSJeff Kirsher #include <linux/skbuff.h>
337ac6653aSJeff Kirsher #include <linux/ethtool.h>
347ac6653aSJeff Kirsher #include <linux/if_ether.h>
357ac6653aSJeff Kirsher #include <linux/crc32.h>
367ac6653aSJeff Kirsher #include <linux/mii.h>
3701789349SJiri Pirko #include <linux/if.h>
387ac6653aSJeff Kirsher #include <linux/if_vlan.h>
397ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
407ac6653aSJeff Kirsher #include <linux/slab.h>
417ac6653aSJeff Kirsher #include <linux/prefetch.h>
42db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
4350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
447ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
457ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
4650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
47891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
48891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
49286a8372SGiuseppe CAVALLARO #include "stmmac.h"
50c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
515790cf3cSMathieu Olivari #include <linux/of_mdio.h>
5219d857c9SPhil Reid #include "dwmac1000.h"
537ac6653aSJeff Kirsher 
547ac6653aSJeff Kirsher #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
55f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
567ac6653aSJeff Kirsher 
577ac6653aSJeff Kirsher /* Module parameters */
5832ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
597ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
607ac6653aSJeff Kirsher module_param(watchdog, int, S_IRUGO | S_IWUSR);
6132ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
627ac6653aSJeff Kirsher 
6332ceabcaSGiuseppe CAVALLARO static int debug = -1;
647ac6653aSJeff Kirsher module_param(debug, int, S_IRUGO | S_IWUSR);
6532ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
667ac6653aSJeff Kirsher 
6747d1f71fSstephen hemminger static int phyaddr = -1;
687ac6653aSJeff Kirsher module_param(phyaddr, int, S_IRUGO);
697ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
707ac6653aSJeff Kirsher 
71e3ad57c9SGiuseppe Cavallaro #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
72120e87f9SGiuseppe Cavallaro #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
737ac6653aSJeff Kirsher 
747ac6653aSJeff Kirsher static int flow_ctrl = FLOW_OFF;
757ac6653aSJeff Kirsher module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
767ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
777ac6653aSJeff Kirsher 
787ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
797ac6653aSJeff Kirsher module_param(pause, int, S_IRUGO | S_IWUSR);
807ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
817ac6653aSJeff Kirsher 
827ac6653aSJeff Kirsher #define TC_DEFAULT 64
837ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
847ac6653aSJeff Kirsher module_param(tc, int, S_IRUGO | S_IWUSR);
857ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
867ac6653aSJeff Kirsher 
87d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
88d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
897ac6653aSJeff Kirsher module_param(buf_sz, int, S_IRUGO | S_IWUSR);
907ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
917ac6653aSJeff Kirsher 
9222ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
9322ad3838SGiuseppe Cavallaro 
947ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
957ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
967ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
977ac6653aSJeff Kirsher 
98d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
99d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100d765955dSGiuseppe CAVALLARO module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102f5351ef7SGiuseppe CAVALLARO #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103d765955dSGiuseppe CAVALLARO 
10422d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
10522d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1064a7d666aSGiuseppe CAVALLARO  */
1074a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
1084a7d666aSGiuseppe CAVALLARO module_param(chain_mode, int, S_IRUGO);
1094a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1104a7d666aSGiuseppe CAVALLARO 
1117ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1127ac6653aSJeff Kirsher 
11350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
114bfab27a1SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev);
115466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
116bfab27a1SGiuseppe CAVALLARO #endif
117bfab27a1SGiuseppe CAVALLARO 
1189125cdd1SGiuseppe CAVALLARO #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
1199125cdd1SGiuseppe CAVALLARO 
1207ac6653aSJeff Kirsher /**
1217ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
122732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
123732fdf0eSGiuseppe CAVALLARO  * errors.
1247ac6653aSJeff Kirsher  */
1257ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1267ac6653aSJeff Kirsher {
1277ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1287ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
129d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1317ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1327ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1337ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1347ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1357ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1367ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
137d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
138d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1397ac6653aSJeff Kirsher }
1407ac6653aSJeff Kirsher 
14132ceabcaSGiuseppe CAVALLARO /**
14232ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
14332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
14432ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
14532ceabcaSGiuseppe CAVALLARO  * clock input.
14632ceabcaSGiuseppe CAVALLARO  * Note:
14732ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
14832ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
14932ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
15032ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
15132ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
15232ceabcaSGiuseppe CAVALLARO  */
153cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
154cd7201f4SGiuseppe CAVALLARO {
155cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
156cd7201f4SGiuseppe CAVALLARO 
157f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
158cd7201f4SGiuseppe CAVALLARO 
159cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
160ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
161ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
162ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
163ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
164ceb69499SGiuseppe CAVALLARO 	 * divider.
165ceb69499SGiuseppe CAVALLARO 	 */
166cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
167cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
168cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
169cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
170cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
171cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
172cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
173cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
174cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
175cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
176cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
17719d857c9SPhil Reid 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
178cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
179ceb69499SGiuseppe CAVALLARO 	}
180cd7201f4SGiuseppe CAVALLARO }
181cd7201f4SGiuseppe CAVALLARO 
1827ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
1837ac6653aSJeff Kirsher {
184424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
185424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
1867ac6653aSJeff Kirsher }
1877ac6653aSJeff Kirsher 
188aff3d9efSJoao Pinto /**
189aff3d9efSJoao Pinto  * stmmac_tx_avail - Get tx queue availability
190aff3d9efSJoao Pinto  * @priv: driver private structure
191aff3d9efSJoao Pinto  * @queue: TX queue index
192aff3d9efSJoao Pinto  */
193aff3d9efSJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
1947ac6653aSJeff Kirsher {
195aff3d9efSJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
196a6a3e026SLABBE Corentin 	u32 avail;
197e3ad57c9SGiuseppe Cavallaro 
198aff3d9efSJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
199aff3d9efSJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
200e3ad57c9SGiuseppe Cavallaro 	else
201aff3d9efSJoao Pinto 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
202e3ad57c9SGiuseppe Cavallaro 
203e3ad57c9SGiuseppe Cavallaro 	return avail;
204e3ad57c9SGiuseppe Cavallaro }
205e3ad57c9SGiuseppe Cavallaro 
206aff3d9efSJoao Pinto /**
207aff3d9efSJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
208aff3d9efSJoao Pinto  * @priv: driver private structure
209aff3d9efSJoao Pinto  * @queue: RX queue index
210aff3d9efSJoao Pinto  */
211aff3d9efSJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
212e3ad57c9SGiuseppe Cavallaro {
213aff3d9efSJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
214a6a3e026SLABBE Corentin 	u32 dirty;
215e3ad57c9SGiuseppe Cavallaro 
216aff3d9efSJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
217aff3d9efSJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
218e3ad57c9SGiuseppe Cavallaro 	else
219aff3d9efSJoao Pinto 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
220e3ad57c9SGiuseppe Cavallaro 
221e3ad57c9SGiuseppe Cavallaro 	return dirty;
2227ac6653aSJeff Kirsher }
2237ac6653aSJeff Kirsher 
22432ceabcaSGiuseppe CAVALLARO /**
225732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_fix_mac_speed - callback for speed selection
22632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
2278d45e42bSLABBE Corentin  * Description: on some platforms (e.g. ST), some HW system configuration
22832ceabcaSGiuseppe CAVALLARO  * registers have to be set according to the link speed negotiated.
2297ac6653aSJeff Kirsher  */
2307ac6653aSJeff Kirsher static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
2317ac6653aSJeff Kirsher {
232d6d50c7eSPhilippe Reynes 	struct net_device *ndev = priv->dev;
233d6d50c7eSPhilippe Reynes 	struct phy_device *phydev = ndev->phydev;
2347ac6653aSJeff Kirsher 
2357ac6653aSJeff Kirsher 	if (likely(priv->plat->fix_mac_speed))
236ceb69499SGiuseppe CAVALLARO 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
2377ac6653aSJeff Kirsher }
2387ac6653aSJeff Kirsher 
23932ceabcaSGiuseppe CAVALLARO /**
240732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
24132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
242732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
243732fdf0eSGiuseppe CAVALLARO  * EEE.
24432ceabcaSGiuseppe CAVALLARO  */
245d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
246d765955dSGiuseppe CAVALLARO {
247aff3d9efSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
248aff3d9efSJoao Pinto 	u32 queue;
249aff3d9efSJoao Pinto 
250aff3d9efSJoao Pinto 	/* check if all TX queues have the work finished */
251aff3d9efSJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
252aff3d9efSJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
253aff3d9efSJoao Pinto 
254aff3d9efSJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
255aff3d9efSJoao Pinto 			return; /* still unfinished work */
256aff3d9efSJoao Pinto 	}
257aff3d9efSJoao Pinto 
258d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
259aff3d9efSJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
260b4b7b772Sjpinto 		priv->hw->mac->set_eee_mode(priv->hw,
261b4b7b772Sjpinto 					    priv->plat->en_tx_lpi_clockgating);
262d765955dSGiuseppe CAVALLARO }
263d765955dSGiuseppe CAVALLARO 
26432ceabcaSGiuseppe CAVALLARO /**
265732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
26632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
26732ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
26832ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
26932ceabcaSGiuseppe CAVALLARO  */
270d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
271d765955dSGiuseppe CAVALLARO {
2727ed24bbeSVince Bridgers 	priv->hw->mac->reset_eee_mode(priv->hw);
273d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
274d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
275d765955dSGiuseppe CAVALLARO }
276d765955dSGiuseppe CAVALLARO 
277d765955dSGiuseppe CAVALLARO /**
278732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
279d765955dSGiuseppe CAVALLARO  * @arg : data hook
280d765955dSGiuseppe CAVALLARO  * Description:
28132ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
282d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
283d765955dSGiuseppe CAVALLARO  */
284d765955dSGiuseppe CAVALLARO static void stmmac_eee_ctrl_timer(unsigned long arg)
285d765955dSGiuseppe CAVALLARO {
286d765955dSGiuseppe CAVALLARO 	struct stmmac_priv *priv = (struct stmmac_priv *)arg;
287d765955dSGiuseppe CAVALLARO 
288d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
289f5351ef7SGiuseppe CAVALLARO 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
290d765955dSGiuseppe CAVALLARO }
291d765955dSGiuseppe CAVALLARO 
292d765955dSGiuseppe CAVALLARO /**
293732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
29432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
295d765955dSGiuseppe CAVALLARO  * Description:
296732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
297732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
298732fdf0eSGiuseppe CAVALLARO  *  timer.
299d765955dSGiuseppe CAVALLARO  */
300d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
301d765955dSGiuseppe CAVALLARO {
302d6d50c7eSPhilippe Reynes 	struct net_device *ndev = priv->dev;
3034741cf9cSGiuseppe CAVALLARO 	unsigned long flags;
304d765955dSGiuseppe CAVALLARO 	bool ret = false;
305d765955dSGiuseppe CAVALLARO 
306f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
307f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
308f5351ef7SGiuseppe CAVALLARO 	 */
3093fe5cadbSGiuseppe CAVALLARO 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
3103fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
3113fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
312f5351ef7SGiuseppe CAVALLARO 		goto out;
313f5351ef7SGiuseppe CAVALLARO 
314d765955dSGiuseppe CAVALLARO 	/* MAC core supports the EEE feature. */
315d765955dSGiuseppe CAVALLARO 	if (priv->dma_cap.eee) {
31683bf79b6SGiuseppe CAVALLARO 		int tx_lpi_timer = priv->tx_lpi_timer;
317d765955dSGiuseppe CAVALLARO 
31883bf79b6SGiuseppe CAVALLARO 		/* Check if the PHY supports EEE */
319d6d50c7eSPhilippe Reynes 		if (phy_init_eee(ndev->phydev, 1)) {
32083bf79b6SGiuseppe CAVALLARO 			/* To manage at run-time if the EEE cannot be supported
32183bf79b6SGiuseppe CAVALLARO 			 * anymore (for example because the lp caps have been
32283bf79b6SGiuseppe CAVALLARO 			 * changed).
32383bf79b6SGiuseppe CAVALLARO 			 * In that case the driver disable own timers.
32483bf79b6SGiuseppe CAVALLARO 			 */
3254741cf9cSGiuseppe CAVALLARO 			spin_lock_irqsave(&priv->lock, flags);
32683bf79b6SGiuseppe CAVALLARO 			if (priv->eee_active) {
32738ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "disable EEE\n");
32883bf79b6SGiuseppe CAVALLARO 				del_timer_sync(&priv->eee_ctrl_timer);
3297ed24bbeSVince Bridgers 				priv->hw->mac->set_eee_timer(priv->hw, 0,
33083bf79b6SGiuseppe CAVALLARO 							     tx_lpi_timer);
33183bf79b6SGiuseppe CAVALLARO 			}
33283bf79b6SGiuseppe CAVALLARO 			priv->eee_active = 0;
3334741cf9cSGiuseppe CAVALLARO 			spin_unlock_irqrestore(&priv->lock, flags);
33483bf79b6SGiuseppe CAVALLARO 			goto out;
33583bf79b6SGiuseppe CAVALLARO 		}
33683bf79b6SGiuseppe CAVALLARO 		/* Activate the EEE and start timers */
3374741cf9cSGiuseppe CAVALLARO 		spin_lock_irqsave(&priv->lock, flags);
338f5351ef7SGiuseppe CAVALLARO 		if (!priv->eee_active) {
339d765955dSGiuseppe CAVALLARO 			priv->eee_active = 1;
340ccb36da1SVaishali Thakkar 			setup_timer(&priv->eee_ctrl_timer,
341ccb36da1SVaishali Thakkar 				    stmmac_eee_ctrl_timer,
342ccb36da1SVaishali Thakkar 				    (unsigned long)priv);
343ccb36da1SVaishali Thakkar 			mod_timer(&priv->eee_ctrl_timer,
344ccb36da1SVaishali Thakkar 				  STMMAC_LPI_T(eee_timer));
345d765955dSGiuseppe CAVALLARO 
3467ed24bbeSVince Bridgers 			priv->hw->mac->set_eee_timer(priv->hw,
347f5351ef7SGiuseppe CAVALLARO 						     STMMAC_DEFAULT_LIT_LS,
34883bf79b6SGiuseppe CAVALLARO 						     tx_lpi_timer);
34971965352SGiuseppe CAVALLARO 		}
350f5351ef7SGiuseppe CAVALLARO 		/* Set HW EEE according to the speed */
351d6d50c7eSPhilippe Reynes 		priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
352d765955dSGiuseppe CAVALLARO 
353d765955dSGiuseppe CAVALLARO 		ret = true;
3544741cf9cSGiuseppe CAVALLARO 		spin_unlock_irqrestore(&priv->lock, flags);
3554741cf9cSGiuseppe CAVALLARO 
35638ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
357d765955dSGiuseppe CAVALLARO 	}
358d765955dSGiuseppe CAVALLARO out:
359d765955dSGiuseppe CAVALLARO 	return ret;
360d765955dSGiuseppe CAVALLARO }
361d765955dSGiuseppe CAVALLARO 
362732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
36332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
364ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
365891434b1SRayagond Kokatanur  * @skb : the socket buffer
366891434b1SRayagond Kokatanur  * Description :
367891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
368891434b1SRayagond Kokatanur  * and also perform some sanity checks.
369891434b1SRayagond Kokatanur  */
370891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
371ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
372891434b1SRayagond Kokatanur {
373891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
374891434b1SRayagond Kokatanur 	u64 ns;
375891434b1SRayagond Kokatanur 
376891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
377891434b1SRayagond Kokatanur 		return;
378891434b1SRayagond Kokatanur 
379ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
38075e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
381891434b1SRayagond Kokatanur 		return;
382891434b1SRayagond Kokatanur 
383891434b1SRayagond Kokatanur 	/* check tx tstamp status */
384ba1ffd74SGiuseppe CAVALLARO 	if (!priv->hw->desc->get_tx_timestamp_status(p)) {
385891434b1SRayagond Kokatanur 		/* get the valid tstamp */
386ba1ffd74SGiuseppe CAVALLARO 		ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
387891434b1SRayagond Kokatanur 
388891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
389891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
390ba1ffd74SGiuseppe CAVALLARO 
391ba1ffd74SGiuseppe CAVALLARO 		netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
392891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
393891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
394ba1ffd74SGiuseppe CAVALLARO 	}
395891434b1SRayagond Kokatanur 
396891434b1SRayagond Kokatanur 	return;
397891434b1SRayagond Kokatanur }
398891434b1SRayagond Kokatanur 
399732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
40032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
401ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
402ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
403891434b1SRayagond Kokatanur  * @skb : the socket buffer
404891434b1SRayagond Kokatanur  * Description :
405891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
406891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
407891434b1SRayagond Kokatanur  */
408ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
409ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
410891434b1SRayagond Kokatanur {
411891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
412891434b1SRayagond Kokatanur 	u64 ns;
413891434b1SRayagond Kokatanur 
414891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
415891434b1SRayagond Kokatanur 		return;
416891434b1SRayagond Kokatanur 
417ba1ffd74SGiuseppe CAVALLARO 	/* Check if timestamp is available */
418ba1ffd74SGiuseppe CAVALLARO 	if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
419ba1ffd74SGiuseppe CAVALLARO 		/* For GMAC4, the valid timestamp is from CTX next desc. */
420ba1ffd74SGiuseppe CAVALLARO 		if (priv->plat->has_gmac4)
421ba1ffd74SGiuseppe CAVALLARO 			ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
422891434b1SRayagond Kokatanur 		else
423ba1ffd74SGiuseppe CAVALLARO 			ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
424891434b1SRayagond Kokatanur 
425ba1ffd74SGiuseppe CAVALLARO 		netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
426891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
427891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
428891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
429ba1ffd74SGiuseppe CAVALLARO 	} else  {
430ba1ffd74SGiuseppe CAVALLARO 		netdev_err(priv->dev, "cannot get RX hw timestamp\n");
431ba1ffd74SGiuseppe CAVALLARO 	}
432891434b1SRayagond Kokatanur }
433891434b1SRayagond Kokatanur 
434891434b1SRayagond Kokatanur /**
435891434b1SRayagond Kokatanur  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
436891434b1SRayagond Kokatanur  *  @dev: device pointer.
4378d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
438891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
439891434b1SRayagond Kokatanur  *  Description:
440891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
441891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
442891434b1SRayagond Kokatanur  *  Return Value:
443891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
444891434b1SRayagond Kokatanur  */
445891434b1SRayagond Kokatanur static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
446891434b1SRayagond Kokatanur {
447891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
448891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
4490a624155SArnd Bergmann 	struct timespec64 now;
450891434b1SRayagond Kokatanur 	u64 temp = 0;
451891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
452891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
453891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
454891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
455891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
456891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
457891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
458891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
459891434b1SRayagond Kokatanur 	u32 value = 0;
46019d857c9SPhil Reid 	u32 sec_inc;
461891434b1SRayagond Kokatanur 
462891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
463891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
464891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
465891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
466891434b1SRayagond Kokatanur 
467891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
468891434b1SRayagond Kokatanur 	}
469891434b1SRayagond Kokatanur 
470891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
471891434b1SRayagond Kokatanur 			   sizeof(struct hwtstamp_config)))
472891434b1SRayagond Kokatanur 		return -EFAULT;
473891434b1SRayagond Kokatanur 
47438ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
475891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
476891434b1SRayagond Kokatanur 
477891434b1SRayagond Kokatanur 	/* reserved for future extensions */
478891434b1SRayagond Kokatanur 	if (config.flags)
479891434b1SRayagond Kokatanur 		return -EINVAL;
480891434b1SRayagond Kokatanur 
4815f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
4825f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
483891434b1SRayagond Kokatanur 		return -ERANGE;
484891434b1SRayagond Kokatanur 
485891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
486891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
487891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
488ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
489891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
490891434b1SRayagond Kokatanur 			break;
491891434b1SRayagond Kokatanur 
492891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
493ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
494891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
495891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
496891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
497891434b1SRayagond Kokatanur 
498891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
499891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
500891434b1SRayagond Kokatanur 			break;
501891434b1SRayagond Kokatanur 
502891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
503ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
504891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
505891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
506891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
507891434b1SRayagond Kokatanur 
508891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
509891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
510891434b1SRayagond Kokatanur 			break;
511891434b1SRayagond Kokatanur 
512891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
513ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
514891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
515891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
516891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
517891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
518891434b1SRayagond Kokatanur 
519891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
520891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
521891434b1SRayagond Kokatanur 			break;
522891434b1SRayagond Kokatanur 
523891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
524ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
525891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
526891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
527891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
528891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
529891434b1SRayagond Kokatanur 
530891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
531891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
532891434b1SRayagond Kokatanur 			break;
533891434b1SRayagond Kokatanur 
534891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
535ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
536891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
537891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
538891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
539891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
540891434b1SRayagond Kokatanur 
541891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
542891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
543891434b1SRayagond Kokatanur 			break;
544891434b1SRayagond Kokatanur 
545891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
546ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
547891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
548891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
549891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
550891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
551891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
552891434b1SRayagond Kokatanur 
553891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
554891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
555891434b1SRayagond Kokatanur 			break;
556891434b1SRayagond Kokatanur 
557891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
558ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
559891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
560891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
561891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
562891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
563891434b1SRayagond Kokatanur 
564891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
565891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
566891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
567891434b1SRayagond Kokatanur 			break;
568891434b1SRayagond Kokatanur 
569891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
570ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
571891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
572891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
573891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
574891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
575891434b1SRayagond Kokatanur 
576891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
577891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
578891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
579891434b1SRayagond Kokatanur 			break;
580891434b1SRayagond Kokatanur 
581891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
582ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
583891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
584891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
585891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
586891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
587891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
588891434b1SRayagond Kokatanur 
589891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
590891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
591891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
592891434b1SRayagond Kokatanur 			break;
593891434b1SRayagond Kokatanur 
594891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
595ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
596891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
597891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
598891434b1SRayagond Kokatanur 			break;
599891434b1SRayagond Kokatanur 
600891434b1SRayagond Kokatanur 		default:
601891434b1SRayagond Kokatanur 			return -ERANGE;
602891434b1SRayagond Kokatanur 		}
603891434b1SRayagond Kokatanur 	} else {
604891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
605891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
606891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
607891434b1SRayagond Kokatanur 			break;
608891434b1SRayagond Kokatanur 		default:
609891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
610891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
611891434b1SRayagond Kokatanur 			break;
612891434b1SRayagond Kokatanur 		}
613891434b1SRayagond Kokatanur 	}
614891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
6155f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
616891434b1SRayagond Kokatanur 
617891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
618ba1ffd74SGiuseppe CAVALLARO 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
619891434b1SRayagond Kokatanur 	else {
620891434b1SRayagond Kokatanur 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
621891434b1SRayagond Kokatanur 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
622891434b1SRayagond Kokatanur 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
623891434b1SRayagond Kokatanur 			 ts_master_en | snap_type_sel);
624ba1ffd74SGiuseppe CAVALLARO 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
625891434b1SRayagond Kokatanur 
626891434b1SRayagond Kokatanur 		/* program Sub Second Increment reg */
62719d857c9SPhil Reid 		sec_inc = priv->hw->ptp->config_sub_second_increment(
628f573c0b9Sjpinto 			priv->ptpaddr, priv->plat->clk_ptp_rate,
629ba1ffd74SGiuseppe CAVALLARO 			priv->plat->has_gmac4);
63019d857c9SPhil Reid 		temp = div_u64(1000000000ULL, sec_inc);
631891434b1SRayagond Kokatanur 
632891434b1SRayagond Kokatanur 		/* calculate default added value:
633891434b1SRayagond Kokatanur 		 * formula is :
634891434b1SRayagond Kokatanur 		 * addend = (2^32)/freq_div_ratio;
63519d857c9SPhil Reid 		 * where, freq_div_ratio = 1e9ns/sec_inc
636891434b1SRayagond Kokatanur 		 */
63719d857c9SPhil Reid 		temp = (u64)(temp << 32);
638f573c0b9Sjpinto 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
639ba1ffd74SGiuseppe CAVALLARO 		priv->hw->ptp->config_addend(priv->ptpaddr,
640891434b1SRayagond Kokatanur 					     priv->default_addend);
641891434b1SRayagond Kokatanur 
642891434b1SRayagond Kokatanur 		/* initialize system time */
6430a624155SArnd Bergmann 		ktime_get_real_ts64(&now);
6440a624155SArnd Bergmann 
6450a624155SArnd Bergmann 		/* lower 32 bits of tv_sec are safe until y2106 */
646ba1ffd74SGiuseppe CAVALLARO 		priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
647891434b1SRayagond Kokatanur 					    now.tv_nsec);
648891434b1SRayagond Kokatanur 	}
649891434b1SRayagond Kokatanur 
650891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
651891434b1SRayagond Kokatanur 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
652891434b1SRayagond Kokatanur }
653891434b1SRayagond Kokatanur 
65432ceabcaSGiuseppe CAVALLARO /**
655732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
65632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
657732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
65832ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
659732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
66032ceabcaSGiuseppe CAVALLARO  */
66192ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
662891434b1SRayagond Kokatanur {
66392ba6888SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
66492ba6888SRayagond Kokatanur 		return -EOPNOTSUPP;
66592ba6888SRayagond Kokatanur 
666891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
667be9b3174SGiuseppe CAVALLARO 	/* Check if adv_ts can be enabled for dwmac 4.x core */
668be9b3174SGiuseppe CAVALLARO 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
669be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
670be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
671be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
672891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
6737cd01399SVince Bridgers 
674be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
675be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
6767cd01399SVince Bridgers 
677be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
678be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
679be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
680891434b1SRayagond Kokatanur 
681891434b1SRayagond Kokatanur 	priv->hw->ptp = &stmmac_ptp;
682891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
683891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
68492ba6888SRayagond Kokatanur 
685c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
686c30a70d3SGiuseppe CAVALLARO 
687c30a70d3SGiuseppe CAVALLARO 	return 0;
68892ba6888SRayagond Kokatanur }
68992ba6888SRayagond Kokatanur 
69092ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
69192ba6888SRayagond Kokatanur {
692f573c0b9Sjpinto 	if (priv->plat->clk_ptp_ref)
693f573c0b9Sjpinto 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
69492ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
695891434b1SRayagond Kokatanur }
696891434b1SRayagond Kokatanur 
6977ac6653aSJeff Kirsher /**
69829feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
69929feff39SJoao Pinto  *  @priv: driver private structure
70029feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
70129feff39SJoao Pinto  */
70229feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
70329feff39SJoao Pinto {
70429feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
70529feff39SJoao Pinto 
70629feff39SJoao Pinto 	priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
70729feff39SJoao Pinto 				 priv->pause, tx_cnt);
70829feff39SJoao Pinto }
70929feff39SJoao Pinto 
71029feff39SJoao Pinto /**
711732fdf0eSGiuseppe CAVALLARO  * stmmac_adjust_link - adjusts the link parameters
7127ac6653aSJeff Kirsher  * @dev: net device structure
713732fdf0eSGiuseppe CAVALLARO  * Description: this is the helper called by the physical abstraction layer
714732fdf0eSGiuseppe CAVALLARO  * drivers to communicate the phy link status. According the speed and duplex
715732fdf0eSGiuseppe CAVALLARO  * this driver can invoke registered glue-logic as well.
716732fdf0eSGiuseppe CAVALLARO  * It also invoke the eee initialization because it could happen when switch
717732fdf0eSGiuseppe CAVALLARO  * on different networks (that are eee capable).
7187ac6653aSJeff Kirsher  */
7197ac6653aSJeff Kirsher static void stmmac_adjust_link(struct net_device *dev)
7207ac6653aSJeff Kirsher {
7217ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
722d6d50c7eSPhilippe Reynes 	struct phy_device *phydev = dev->phydev;
7237ac6653aSJeff Kirsher 	unsigned long flags;
7247ac6653aSJeff Kirsher 	int new_state = 0;
7257ac6653aSJeff Kirsher 
726662ec2b7SLABBE Corentin 	if (!phydev)
7277ac6653aSJeff Kirsher 		return;
7287ac6653aSJeff Kirsher 
7297ac6653aSJeff Kirsher 	spin_lock_irqsave(&priv->lock, flags);
730d765955dSGiuseppe CAVALLARO 
7317ac6653aSJeff Kirsher 	if (phydev->link) {
7327ac6653aSJeff Kirsher 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
7337ac6653aSJeff Kirsher 
7347ac6653aSJeff Kirsher 		/* Now we make sure that we can be in full duplex mode.
7357ac6653aSJeff Kirsher 		 * If not, we operate in half-duplex mode. */
7367ac6653aSJeff Kirsher 		if (phydev->duplex != priv->oldduplex) {
7377ac6653aSJeff Kirsher 			new_state = 1;
7387ac6653aSJeff Kirsher 			if (!(phydev->duplex))
7397ac6653aSJeff Kirsher 				ctrl &= ~priv->hw->link.duplex;
7407ac6653aSJeff Kirsher 			else
7417ac6653aSJeff Kirsher 				ctrl |= priv->hw->link.duplex;
7427ac6653aSJeff Kirsher 			priv->oldduplex = phydev->duplex;
7437ac6653aSJeff Kirsher 		}
7447ac6653aSJeff Kirsher 		/* Flow Control operation */
7457ac6653aSJeff Kirsher 		if (phydev->pause)
74629feff39SJoao Pinto 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
7477ac6653aSJeff Kirsher 
7487ac6653aSJeff Kirsher 		if (phydev->speed != priv->speed) {
7497ac6653aSJeff Kirsher 			new_state = 1;
7507ac6653aSJeff Kirsher 			switch (phydev->speed) {
7517ac6653aSJeff Kirsher 			case 1000:
7523e12790eSLABBE Corentin 				if (priv->plat->has_gmac ||
7533e12790eSLABBE Corentin 				    priv->plat->has_gmac4)
7547ac6653aSJeff Kirsher 					ctrl &= ~priv->hw->link.port;
7557ac6653aSJeff Kirsher 				break;
7567ac6653aSJeff Kirsher 			case 100:
7579beae261SLABBE Corentin 				if (priv->plat->has_gmac ||
7589beae261SLABBE Corentin 				    priv->plat->has_gmac4) {
7599beae261SLABBE Corentin 					ctrl |= priv->hw->link.port;
7609beae261SLABBE Corentin 					ctrl |= priv->hw->link.speed;
7619beae261SLABBE Corentin 				} else {
7629beae261SLABBE Corentin 					ctrl &= ~priv->hw->link.port;
7639beae261SLABBE Corentin 				}
7649beae261SLABBE Corentin 				break;
7657ac6653aSJeff Kirsher 			case 10:
7663e12790eSLABBE Corentin 				if (priv->plat->has_gmac ||
7673e12790eSLABBE Corentin 				    priv->plat->has_gmac4) {
7687ac6653aSJeff Kirsher 					ctrl |= priv->hw->link.port;
7697ac6653aSJeff Kirsher 					ctrl &= ~(priv->hw->link.speed);
7707ac6653aSJeff Kirsher 				} else {
7717ac6653aSJeff Kirsher 					ctrl &= ~priv->hw->link.port;
7727ac6653aSJeff Kirsher 				}
7737ac6653aSJeff Kirsher 				break;
7747ac6653aSJeff Kirsher 			default:
775b3e51069SLABBE Corentin 				netif_warn(priv, link, priv->dev,
776cba920afSLABBE Corentin 					   "broken speed: %d\n", phydev->speed);
777688495b1SLABBE Corentin 				phydev->speed = SPEED_UNKNOWN;
7787ac6653aSJeff Kirsher 				break;
7797ac6653aSJeff Kirsher 			}
7805db13556SLABBE Corentin 			if (phydev->speed != SPEED_UNKNOWN)
7815db13556SLABBE Corentin 				stmmac_hw_fix_mac_speed(priv);
7827ac6653aSJeff Kirsher 			priv->speed = phydev->speed;
7837ac6653aSJeff Kirsher 		}
7847ac6653aSJeff Kirsher 
7857ac6653aSJeff Kirsher 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
7867ac6653aSJeff Kirsher 
7877ac6653aSJeff Kirsher 		if (!priv->oldlink) {
7887ac6653aSJeff Kirsher 			new_state = 1;
7897ac6653aSJeff Kirsher 			priv->oldlink = 1;
7907ac6653aSJeff Kirsher 		}
7917ac6653aSJeff Kirsher 	} else if (priv->oldlink) {
7927ac6653aSJeff Kirsher 		new_state = 1;
7937ac6653aSJeff Kirsher 		priv->oldlink = 0;
794bd00632cSLABBE Corentin 		priv->speed = SPEED_UNKNOWN;
795bd00632cSLABBE Corentin 		priv->oldduplex = DUPLEX_UNKNOWN;
7967ac6653aSJeff Kirsher 	}
7977ac6653aSJeff Kirsher 
7987ac6653aSJeff Kirsher 	if (new_state && netif_msg_link(priv))
7997ac6653aSJeff Kirsher 		phy_print_status(phydev);
8007ac6653aSJeff Kirsher 
8014741cf9cSGiuseppe CAVALLARO 	spin_unlock_irqrestore(&priv->lock, flags);
8024741cf9cSGiuseppe CAVALLARO 
80352f95bbfSGiuseppe CAVALLARO 	if (phydev->is_pseudo_fixed_link)
80452f95bbfSGiuseppe CAVALLARO 		/* Stop PHY layer to call the hook to adjust the link in case
80552f95bbfSGiuseppe CAVALLARO 		 * of a switch is attached to the stmmac driver.
80652f95bbfSGiuseppe CAVALLARO 		 */
80752f95bbfSGiuseppe CAVALLARO 		phydev->irq = PHY_IGNORE_INTERRUPT;
80852f95bbfSGiuseppe CAVALLARO 	else
80952f95bbfSGiuseppe CAVALLARO 		/* At this stage, init the EEE if supported.
81052f95bbfSGiuseppe CAVALLARO 		 * Never called in case of fixed_link.
811f5351ef7SGiuseppe CAVALLARO 		 */
812f5351ef7SGiuseppe CAVALLARO 		priv->eee_enabled = stmmac_eee_init(priv);
8137ac6653aSJeff Kirsher }
8147ac6653aSJeff Kirsher 
81532ceabcaSGiuseppe CAVALLARO /**
816732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
81732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
81832ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
81932ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
82032ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
82132ceabcaSGiuseppe CAVALLARO  */
822e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
823e58bb43fSGiuseppe CAVALLARO {
824e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
825e58bb43fSGiuseppe CAVALLARO 
826e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
8270d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
8280d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
8290d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
8300d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
83138ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
8323fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
8330d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
83438ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
8353fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
836e58bb43fSGiuseppe CAVALLARO 		}
837e58bb43fSGiuseppe CAVALLARO 	}
838e58bb43fSGiuseppe CAVALLARO }
839e58bb43fSGiuseppe CAVALLARO 
8407ac6653aSJeff Kirsher /**
8417ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
8427ac6653aSJeff Kirsher  * @dev: net device structure
8437ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
8447ac6653aSJeff Kirsher  * to the mac driver.
8457ac6653aSJeff Kirsher  *  Return value:
8467ac6653aSJeff Kirsher  *  0 on success
8477ac6653aSJeff Kirsher  */
8487ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
8497ac6653aSJeff Kirsher {
8507ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
8517ac6653aSJeff Kirsher 	struct phy_device *phydev;
852d765955dSGiuseppe CAVALLARO 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
8537ac6653aSJeff Kirsher 	char bus_id[MII_BUS_ID_SIZE];
85479ee1dc3SSrinivas Kandagatla 	int interface = priv->plat->interface;
8559cbadf09SSrinivas Kandagatla 	int max_speed = priv->plat->max_speed;
8567ac6653aSJeff Kirsher 	priv->oldlink = 0;
857bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
858bd00632cSLABBE Corentin 	priv->oldduplex = DUPLEX_UNKNOWN;
8597ac6653aSJeff Kirsher 
8605790cf3cSMathieu Olivari 	if (priv->plat->phy_node) {
8615790cf3cSMathieu Olivari 		phydev = of_phy_connect(dev, priv->plat->phy_node,
8625790cf3cSMathieu Olivari 					&stmmac_adjust_link, 0, interface);
8635790cf3cSMathieu Olivari 	} else {
864f142af2eSSrinivas Kandagatla 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
865f142af2eSSrinivas Kandagatla 			 priv->plat->bus_id);
866f142af2eSSrinivas Kandagatla 
867d765955dSGiuseppe CAVALLARO 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
8687ac6653aSJeff Kirsher 			 priv->plat->phy_addr);
869de9a2165SLABBE Corentin 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
8705790cf3cSMathieu Olivari 			   phy_id_fmt);
8717ac6653aSJeff Kirsher 
8725790cf3cSMathieu Olivari 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
8735790cf3cSMathieu Olivari 				     interface);
8745790cf3cSMathieu Olivari 	}
8757ac6653aSJeff Kirsher 
876dfc50fcaSAlexey Brodkin 	if (IS_ERR_OR_NULL(phydev)) {
87738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "Could not attach to PHY\n");
878dfc50fcaSAlexey Brodkin 		if (!phydev)
879dfc50fcaSAlexey Brodkin 			return -ENODEV;
880dfc50fcaSAlexey Brodkin 
8817ac6653aSJeff Kirsher 		return PTR_ERR(phydev);
8827ac6653aSJeff Kirsher 	}
8837ac6653aSJeff Kirsher 
88479ee1dc3SSrinivas Kandagatla 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
885c5b9b4e4SSrinivas Kandagatla 	if ((interface == PHY_INTERFACE_MODE_MII) ||
8869cbadf09SSrinivas Kandagatla 	    (interface == PHY_INTERFACE_MODE_RMII) ||
8879cbadf09SSrinivas Kandagatla 		(max_speed < 1000 && max_speed > 0))
888c5b9b4e4SSrinivas Kandagatla 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
889c5b9b4e4SSrinivas Kandagatla 					 SUPPORTED_1000baseT_Full);
89079ee1dc3SSrinivas Kandagatla 
8917ac6653aSJeff Kirsher 	/*
8927ac6653aSJeff Kirsher 	 * Broken HW is sometimes missing the pull-up resistor on the
8937ac6653aSJeff Kirsher 	 * MDIO line, which results in reads to non-existent devices returning
8947ac6653aSJeff Kirsher 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
8957ac6653aSJeff Kirsher 	 * device as well.
8967ac6653aSJeff Kirsher 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
8977ac6653aSJeff Kirsher 	 */
89827732381SMathieu Olivari 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
8997ac6653aSJeff Kirsher 		phy_disconnect(phydev);
9007ac6653aSJeff Kirsher 		return -ENODEV;
9017ac6653aSJeff Kirsher 	}
9028e99fc5fSGiuseppe Cavallaro 
903c51e424dSFlorian Fainelli 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
904c51e424dSFlorian Fainelli 	 * subsequent PHY polling, make sure we force a link transition if
905c51e424dSFlorian Fainelli 	 * we have a UP/DOWN/UP transition
906c51e424dSFlorian Fainelli 	 */
907c51e424dSFlorian Fainelli 	if (phydev->is_pseudo_fixed_link)
908c51e424dSFlorian Fainelli 		phydev->irq = PHY_POLL;
909c51e424dSFlorian Fainelli 
910b05c76a1SLABBE Corentin 	phy_attached_info(phydev);
9117ac6653aSJeff Kirsher 	return 0;
9127ac6653aSJeff Kirsher }
9137ac6653aSJeff Kirsher 
914c24602efSGiuseppe CAVALLARO static void stmmac_display_rings(struct stmmac_priv *priv)
915c24602efSGiuseppe CAVALLARO {
916aff3d9efSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
917aff3d9efSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
918d0225e7dSAlexandre TORGUE 	void *head_rx, *head_tx;
919aff3d9efSJoao Pinto 	u32 queue;
920d0225e7dSAlexandre TORGUE 
921aff3d9efSJoao Pinto 	/* Display RX rings */
922aff3d9efSJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
923aff3d9efSJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
924aff3d9efSJoao Pinto 
925aff3d9efSJoao Pinto 		pr_info("\tRX Queue %d rings\n", queue);
926aff3d9efSJoao Pinto 
927aff3d9efSJoao Pinto 		if (priv->extend_desc)
928aff3d9efSJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
929aff3d9efSJoao Pinto 		else
930aff3d9efSJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
931d0225e7dSAlexandre TORGUE 
932d0225e7dSAlexandre TORGUE 		/* Display Rx ring */
933d0225e7dSAlexandre TORGUE 		priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
934aff3d9efSJoao Pinto 	}
935aff3d9efSJoao Pinto 
936aff3d9efSJoao Pinto 	/* Display TX rings */
937aff3d9efSJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
938aff3d9efSJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
939aff3d9efSJoao Pinto 
940aff3d9efSJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
941aff3d9efSJoao Pinto 
942aff3d9efSJoao Pinto 		if (priv->extend_desc)
943aff3d9efSJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
944aff3d9efSJoao Pinto 		else
945aff3d9efSJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
946aff3d9efSJoao Pinto 
947d0225e7dSAlexandre TORGUE 		/* Display Tx ring */
948d0225e7dSAlexandre TORGUE 		priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
949c24602efSGiuseppe CAVALLARO 	}
950aff3d9efSJoao Pinto }
951c24602efSGiuseppe CAVALLARO 
952286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
953286a8372SGiuseppe CAVALLARO {
954286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
955286a8372SGiuseppe CAVALLARO 
956286a8372SGiuseppe CAVALLARO 	if (mtu >= BUF_SIZE_4KiB)
957286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
958286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
959286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
960d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
961286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
962286a8372SGiuseppe CAVALLARO 	else
963d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
964286a8372SGiuseppe CAVALLARO 
965286a8372SGiuseppe CAVALLARO 	return ret;
966286a8372SGiuseppe CAVALLARO }
967286a8372SGiuseppe CAVALLARO 
96832ceabcaSGiuseppe CAVALLARO /**
969aff3d9efSJoao Pinto  * stmmac_clear_rx_descriptors - clear the descriptors of a RX queue
970aff3d9efSJoao Pinto  * @priv: driver private structure
971aff3d9efSJoao Pinto  * @queue: RX queue index
972aff3d9efSJoao Pinto  * Description: this function is called to clear the RX descriptors
973aff3d9efSJoao Pinto  * in case of both basic and extended descriptors are used.
974aff3d9efSJoao Pinto  */
975aff3d9efSJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
976aff3d9efSJoao Pinto {
977aff3d9efSJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
978aff3d9efSJoao Pinto 	u32 i = 0;
979aff3d9efSJoao Pinto 
980aff3d9efSJoao Pinto 	/* Clear the RX descriptors */
981aff3d9efSJoao Pinto 	for (i = 0; i < DMA_RX_SIZE; i++)
982aff3d9efSJoao Pinto 		if (priv->extend_desc)
983aff3d9efSJoao Pinto 			priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
984aff3d9efSJoao Pinto 						     priv->use_riwt, priv->mode,
985aff3d9efSJoao Pinto 						     (i == DMA_RX_SIZE - 1));
986aff3d9efSJoao Pinto 		else
987aff3d9efSJoao Pinto 			priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
988aff3d9efSJoao Pinto 						     priv->use_riwt, priv->mode,
989aff3d9efSJoao Pinto 						     (i == DMA_RX_SIZE - 1));
990aff3d9efSJoao Pinto }
991aff3d9efSJoao Pinto 
992aff3d9efSJoao Pinto /**
993aff3d9efSJoao Pinto  * stmmac_clear_tx_descriptors - clear the descriptors of a TX queue
994aff3d9efSJoao Pinto  * @priv: driver private structure
995aff3d9efSJoao Pinto  * @queue: TX queue index
996aff3d9efSJoao Pinto  * Description: this function is called to clear the TX descriptors
997aff3d9efSJoao Pinto  * in case of both basic and extended descriptors are used.
998aff3d9efSJoao Pinto  */
999aff3d9efSJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1000aff3d9efSJoao Pinto {
1001aff3d9efSJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1002aff3d9efSJoao Pinto 	u32 i = 0;
1003aff3d9efSJoao Pinto 
1004aff3d9efSJoao Pinto 	/* Clear the TX descriptors */
1005aff3d9efSJoao Pinto 	for (i = 0; i < DMA_TX_SIZE; i++)
1006aff3d9efSJoao Pinto 		if (priv->extend_desc)
1007aff3d9efSJoao Pinto 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1008aff3d9efSJoao Pinto 						     priv->mode,
1009aff3d9efSJoao Pinto 						     (i == DMA_TX_SIZE - 1));
1010aff3d9efSJoao Pinto 		else
1011aff3d9efSJoao Pinto 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1012aff3d9efSJoao Pinto 						     priv->mode,
1013aff3d9efSJoao Pinto 						     (i == DMA_TX_SIZE - 1));
1014aff3d9efSJoao Pinto }
1015aff3d9efSJoao Pinto 
1016aff3d9efSJoao Pinto /**
1017732fdf0eSGiuseppe CAVALLARO  * stmmac_clear_descriptors - clear descriptors
101832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
101932ceabcaSGiuseppe CAVALLARO  * Description: this function is called to clear the tx and rx descriptors
102032ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
102132ceabcaSGiuseppe CAVALLARO  */
1022c24602efSGiuseppe CAVALLARO static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1023c24602efSGiuseppe CAVALLARO {
1024aff3d9efSJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1025aff3d9efSJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1026aff3d9efSJoao Pinto 	u32 queue;
1027c24602efSGiuseppe CAVALLARO 
1028aff3d9efSJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
1029aff3d9efSJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
1030aff3d9efSJoao Pinto 
1031aff3d9efSJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1032aff3d9efSJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
1033c24602efSGiuseppe CAVALLARO }
1034c24602efSGiuseppe CAVALLARO 
1035732fdf0eSGiuseppe CAVALLARO /**
1036732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1037732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1038732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1039732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
1040732fdf0eSGiuseppe CAVALLARO  * @flags: gfp flag.
1041aff3d9efSJoao Pinto  * @queue: RX queue index
1042732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1043732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1044732fdf0eSGiuseppe CAVALLARO  */
1045c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1046aff3d9efSJoao Pinto 				  int i, gfp_t flags, u32 queue)
1047c24602efSGiuseppe CAVALLARO {
1048aff3d9efSJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1049c24602efSGiuseppe CAVALLARO 	struct sk_buff *skb;
1050c24602efSGiuseppe CAVALLARO 
10514ec49a37SVineet Gupta 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
105256329137SBartlomiej Zolnierkiewicz 	if (!skb) {
105338ddc59dSLABBE Corentin 		netdev_err(priv->dev,
105438ddc59dSLABBE Corentin 			   "%s: Rx init fails; skb is NULL\n", __func__);
105556329137SBartlomiej Zolnierkiewicz 		return -ENOMEM;
1056c24602efSGiuseppe CAVALLARO 	}
1057aff3d9efSJoao Pinto 	rx_q->rx_skbuff[i] = skb;
1058aff3d9efSJoao Pinto 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1059c24602efSGiuseppe CAVALLARO 						priv->dma_buf_sz,
1060c24602efSGiuseppe CAVALLARO 						DMA_FROM_DEVICE);
1061aff3d9efSJoao Pinto 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
106238ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
106356329137SBartlomiej Zolnierkiewicz 		dev_kfree_skb_any(skb);
106456329137SBartlomiej Zolnierkiewicz 		return -EINVAL;
106556329137SBartlomiej Zolnierkiewicz 	}
1066c24602efSGiuseppe CAVALLARO 
1067f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
1068aff3d9efSJoao Pinto 		p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1069f748be53SAlexandre TORGUE 	else
1070aff3d9efSJoao Pinto 		p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1071c24602efSGiuseppe CAVALLARO 
107229896a67SGiuseppe CAVALLARO 	if ((priv->hw->mode->init_desc3) &&
1073c24602efSGiuseppe CAVALLARO 	    (priv->dma_buf_sz == BUF_SIZE_16KiB))
107429896a67SGiuseppe CAVALLARO 		priv->hw->mode->init_desc3(p);
1075c24602efSGiuseppe CAVALLARO 
1076c24602efSGiuseppe CAVALLARO 	return 0;
1077c24602efSGiuseppe CAVALLARO }
1078c24602efSGiuseppe CAVALLARO 
1079aff3d9efSJoao Pinto /**
1080aff3d9efSJoao Pinto  * stmmac_free_rx_buffers - free RX buffers.
1081aff3d9efSJoao Pinto  * @priv: driver private structure
1082aff3d9efSJoao Pinto  * @queue: RX queue index
1083aff3d9efSJoao Pinto  * @i: buffer index
1084aff3d9efSJoao Pinto  */
1085aff3d9efSJoao Pinto static void stmmac_free_rx_buffers(struct stmmac_priv *priv, u32 queue, int i)
108656329137SBartlomiej Zolnierkiewicz {
1087aff3d9efSJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1088aff3d9efSJoao Pinto 
1089aff3d9efSJoao Pinto 	if (rx_q->rx_skbuff[i]) {
1090aff3d9efSJoao Pinto 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
109156329137SBartlomiej Zolnierkiewicz 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1092aff3d9efSJoao Pinto 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
109356329137SBartlomiej Zolnierkiewicz 	}
1094aff3d9efSJoao Pinto 	rx_q->rx_skbuff[i] = NULL;
109556329137SBartlomiej Zolnierkiewicz }
109656329137SBartlomiej Zolnierkiewicz 
10977ac6653aSJeff Kirsher /**
1098aff3d9efSJoao Pinto  * stmmac_free_tx_buffers - free RX buffers.
1099aff3d9efSJoao Pinto  * @priv: driver private structure
1100aff3d9efSJoao Pinto  * @queue: RX queue index
1101aff3d9efSJoao Pinto  * @i: buffer index
1102aff3d9efSJoao Pinto  */
1103aff3d9efSJoao Pinto static void stmmac_free_tx_buffers(struct stmmac_priv *priv, u32 queue, u32 i)
1104aff3d9efSJoao Pinto {
1105aff3d9efSJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1106aff3d9efSJoao Pinto 
1107aff3d9efSJoao Pinto 	if (tx_q->tx_skbuff_dma[i].buf) {
1108aff3d9efSJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1109aff3d9efSJoao Pinto 			dma_unmap_page(priv->device,
1110aff3d9efSJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1111aff3d9efSJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
1112aff3d9efSJoao Pinto 				       DMA_TO_DEVICE);
1113aff3d9efSJoao Pinto 		else
1114aff3d9efSJoao Pinto 			dma_unmap_single(priv->device,
1115aff3d9efSJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1116aff3d9efSJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
1117aff3d9efSJoao Pinto 					 DMA_TO_DEVICE);
1118aff3d9efSJoao Pinto 	}
1119aff3d9efSJoao Pinto 
1120aff3d9efSJoao Pinto 	if (tx_q->tx_skbuff[i]) {
1121aff3d9efSJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1122aff3d9efSJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1123aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1124aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1125aff3d9efSJoao Pinto 	}
1126aff3d9efSJoao Pinto }
1127aff3d9efSJoao Pinto 
1128aff3d9efSJoao Pinto /**
1129aff3d9efSJoao Pinto  * init_tx_dma_desc_rings - init the TX descriptor rings
11307ac6653aSJeff Kirsher  * @dev: net device structure
1131aff3d9efSJoao Pinto  * Description: this function initializes the DMA TX descriptors
1132aff3d9efSJoao Pinto  * and allocates the socket buffers. It suppors the chained and ring
1133286a8372SGiuseppe CAVALLARO  * modes.
11347ac6653aSJeff Kirsher  */
1135aff3d9efSJoao Pinto static int init_tx_dma_desc_rings(struct net_device *dev)
11367ac6653aSJeff Kirsher {
11377ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
1138aff3d9efSJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1139aff3d9efSJoao Pinto 	u32 queue;
1140aff3d9efSJoao Pinto 	int i = 0;
11417ac6653aSJeff Kirsher 
1142aff3d9efSJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1143aff3d9efSJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
11442618abb7SVince Bridgers 
1145b3e51069SLABBE Corentin 		netif_dbg(priv, probe, priv->dev,
1146aff3d9efSJoao Pinto 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1147aff3d9efSJoao Pinto 			  (u32)tx_q->dma_tx_phy);
11487ac6653aSJeff Kirsher 
1149c24602efSGiuseppe CAVALLARO 		/* Setup the chained descriptor addresses */
1150c24602efSGiuseppe CAVALLARO 		if (priv->mode == STMMAC_CHAIN_MODE) {
1151aff3d9efSJoao Pinto 			if (priv->extend_desc)
1152aff3d9efSJoao Pinto 				priv->hw->mode->init(tx_q->dma_etx,
1153aff3d9efSJoao Pinto 						     tx_q->dma_tx_phy,
1154e3ad57c9SGiuseppe Cavallaro 						     DMA_TX_SIZE, 1);
1155aff3d9efSJoao Pinto 			else
1156aff3d9efSJoao Pinto 				priv->hw->mode->init(tx_q->dma_tx,
1157aff3d9efSJoao Pinto 						     tx_q->dma_tx_phy,
1158e3ad57c9SGiuseppe Cavallaro 						     DMA_TX_SIZE, 0);
1159c24602efSGiuseppe CAVALLARO 		}
1160286a8372SGiuseppe CAVALLARO 
1161e3ad57c9SGiuseppe Cavallaro 		for (i = 0; i < DMA_TX_SIZE; i++) {
1162c24602efSGiuseppe CAVALLARO 			struct dma_desc *p;
1163aff3d9efSJoao Pinto 
1164c24602efSGiuseppe CAVALLARO 			if (priv->extend_desc)
1165aff3d9efSJoao Pinto 				p = &((tx_q->dma_etx + i)->basic);
1166c24602efSGiuseppe CAVALLARO 			else
1167aff3d9efSJoao Pinto 				p = tx_q->dma_tx + i;
1168f748be53SAlexandre TORGUE 
1169f748be53SAlexandre TORGUE 			if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1170f748be53SAlexandre TORGUE 				p->des0 = 0;
1171f748be53SAlexandre TORGUE 				p->des1 = 0;
1172c24602efSGiuseppe CAVALLARO 				p->des2 = 0;
1173f748be53SAlexandre TORGUE 				p->des3 = 0;
1174f748be53SAlexandre TORGUE 			} else {
1175f748be53SAlexandre TORGUE 				p->des2 = 0;
1176f748be53SAlexandre TORGUE 			}
1177f748be53SAlexandre TORGUE 
1178aff3d9efSJoao Pinto 			tx_q->tx_skbuff_dma[i].buf = 0;
1179aff3d9efSJoao Pinto 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1180aff3d9efSJoao Pinto 			tx_q->tx_skbuff_dma[i].len = 0;
1181aff3d9efSJoao Pinto 			tx_q->tx_skbuff_dma[i].last_segment = false;
1182aff3d9efSJoao Pinto 			tx_q->tx_skbuff[i] = NULL;
11834a7d666aSGiuseppe CAVALLARO 		}
1184c24602efSGiuseppe CAVALLARO 
1185aff3d9efSJoao Pinto 		tx_q->dirty_tx = 0;
1186aff3d9efSJoao Pinto 		tx_q->cur_tx = 0;
1187aff3d9efSJoao Pinto 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1188aff3d9efSJoao Pinto 	}
11897ac6653aSJeff Kirsher 
1190aff3d9efSJoao Pinto 	return 0;
1191aff3d9efSJoao Pinto }
1192aff3d9efSJoao Pinto 
1193aff3d9efSJoao Pinto /**
1194aff3d9efSJoao Pinto  * init_rx_dma_desc_rings - init the RX descriptor rings
1195aff3d9efSJoao Pinto  * @dev: net device structure
1196aff3d9efSJoao Pinto  * @flags: gfp flag.
1197aff3d9efSJoao Pinto  * Description: this function initializes the DMA RX descriptors
1198aff3d9efSJoao Pinto  * and allocates the socket buffers. It suppors the chained and ring
1199aff3d9efSJoao Pinto  * modes.
1200aff3d9efSJoao Pinto  */
1201aff3d9efSJoao Pinto static int init_rx_dma_desc_rings(struct net_device *dev, gfp_t flags)
1202aff3d9efSJoao Pinto {
1203aff3d9efSJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1204aff3d9efSJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
1205aff3d9efSJoao Pinto 	unsigned int bfsize = 0;
1206aff3d9efSJoao Pinto 	int ret = -ENOMEM;
1207aff3d9efSJoao Pinto 	u32 queue;
1208aff3d9efSJoao Pinto 	int i;
1209aff3d9efSJoao Pinto 
1210aff3d9efSJoao Pinto 	if (priv->hw->mode->set_16kib_bfsize)
1211aff3d9efSJoao Pinto 		bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1212aff3d9efSJoao Pinto 
1213aff3d9efSJoao Pinto 	if (bfsize < BUF_SIZE_16KiB)
1214aff3d9efSJoao Pinto 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1215aff3d9efSJoao Pinto 
1216aff3d9efSJoao Pinto 	priv->dma_buf_sz = bfsize;
1217aff3d9efSJoao Pinto 
1218aff3d9efSJoao Pinto 	/* RX INITIALIZATION */
1219aff3d9efSJoao Pinto 	netif_dbg(priv, probe, priv->dev,
1220aff3d9efSJoao Pinto 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1221aff3d9efSJoao Pinto 
1222aff3d9efSJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
1223aff3d9efSJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1224aff3d9efSJoao Pinto 
1225aff3d9efSJoao Pinto 		netif_dbg(priv, probe, priv->dev,
1226aff3d9efSJoao Pinto 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1227aff3d9efSJoao Pinto 			  (u32)rx_q->dma_rx_phy);
1228aff3d9efSJoao Pinto 
1229aff3d9efSJoao Pinto 		for (i = 0; i < DMA_RX_SIZE; i++) {
1230aff3d9efSJoao Pinto 			struct dma_desc *p;
1231aff3d9efSJoao Pinto 
1232aff3d9efSJoao Pinto 			if (priv->extend_desc)
1233aff3d9efSJoao Pinto 				p = &((rx_q->dma_erx + i)->basic);
1234aff3d9efSJoao Pinto 			else
1235aff3d9efSJoao Pinto 				p = rx_q->dma_rx + i;
1236aff3d9efSJoao Pinto 
1237aff3d9efSJoao Pinto 			ret = stmmac_init_rx_buffers(priv, p, i, flags, queue);
1238aff3d9efSJoao Pinto 			if (ret)
1239aff3d9efSJoao Pinto 				goto err_init_rx_buffers;
1240aff3d9efSJoao Pinto 
1241aff3d9efSJoao Pinto 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1242aff3d9efSJoao Pinto 				  rx_q->rx_skbuff[i],
1243aff3d9efSJoao Pinto 				  rx_q->rx_skbuff[i]->data,
1244aff3d9efSJoao Pinto 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1245aff3d9efSJoao Pinto 		}
1246aff3d9efSJoao Pinto 
1247aff3d9efSJoao Pinto 		rx_q->cur_rx = 0;
1248aff3d9efSJoao Pinto 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1249aff3d9efSJoao Pinto 
1250aff3d9efSJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
1251aff3d9efSJoao Pinto 
1252aff3d9efSJoao Pinto 		if (priv->mode == STMMAC_CHAIN_MODE) {
1253aff3d9efSJoao Pinto 			if (priv->extend_desc)
1254aff3d9efSJoao Pinto 				priv->hw->mode->init(rx_q->dma_erx,
1255aff3d9efSJoao Pinto 						     rx_q->dma_rx_phy,
1256aff3d9efSJoao Pinto 						     DMA_RX_SIZE, 1);
1257aff3d9efSJoao Pinto 			else
1258aff3d9efSJoao Pinto 				priv->hw->mode->init(rx_q->dma_rx,
1259aff3d9efSJoao Pinto 						     rx_q->dma_rx_phy,
1260aff3d9efSJoao Pinto 						     DMA_RX_SIZE, 0);
1261aff3d9efSJoao Pinto 		}
1262aff3d9efSJoao Pinto 	}
1263aff3d9efSJoao Pinto 
1264aff3d9efSJoao Pinto 	buf_sz = bfsize;
1265aff3d9efSJoao Pinto 
1266aff3d9efSJoao Pinto 	return 0;
1267aff3d9efSJoao Pinto 
1268aff3d9efSJoao Pinto err_init_rx_buffers:
1269aff3d9efSJoao Pinto 	while (queue-- >= 0) {
1270aff3d9efSJoao Pinto 		while (--i >= 0)
1271aff3d9efSJoao Pinto 			stmmac_free_rx_buffers(priv, queue, i);
1272aff3d9efSJoao Pinto 
1273aff3d9efSJoao Pinto 		i = DMA_RX_SIZE;
1274aff3d9efSJoao Pinto 	}
1275aff3d9efSJoao Pinto 
1276aff3d9efSJoao Pinto 	return ret;
1277aff3d9efSJoao Pinto }
1278aff3d9efSJoao Pinto 
1279aff3d9efSJoao Pinto /**
1280aff3d9efSJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
1281aff3d9efSJoao Pinto  * @dev: net device structure
1282aff3d9efSJoao Pinto  * @flags: gfp flag.
1283aff3d9efSJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
1284aff3d9efSJoao Pinto  * and allocates the socket buffers. It suppors the chained and ring
1285aff3d9efSJoao Pinto  * modes.
1286aff3d9efSJoao Pinto  */
1287aff3d9efSJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1288aff3d9efSJoao Pinto {
1289aff3d9efSJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1290aff3d9efSJoao Pinto 	int ret = init_rx_dma_desc_rings(dev, flags);
1291aff3d9efSJoao Pinto 
1292aff3d9efSJoao Pinto 	if (ret)
1293aff3d9efSJoao Pinto 		return ret;
1294aff3d9efSJoao Pinto 
1295aff3d9efSJoao Pinto 	ret = init_tx_dma_desc_rings(dev);
12967ac6653aSJeff Kirsher 
1297c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1298c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
129956329137SBartlomiej Zolnierkiewicz 
130056329137SBartlomiej Zolnierkiewicz 	return ret;
13017ac6653aSJeff Kirsher }
13027ac6653aSJeff Kirsher 
1303aff3d9efSJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
13047ac6653aSJeff Kirsher {
13057ac6653aSJeff Kirsher 	int i;
13067ac6653aSJeff Kirsher 
1307e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_RX_SIZE; i++)
1308aff3d9efSJoao Pinto 		stmmac_free_rx_buffers(priv, queue, i);
13097ac6653aSJeff Kirsher }
13107ac6653aSJeff Kirsher 
1311aff3d9efSJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
13127ac6653aSJeff Kirsher {
13137ac6653aSJeff Kirsher 	int i;
13147ac6653aSJeff Kirsher 
1315aff3d9efSJoao Pinto 	for (i = 0; i < DMA_TX_SIZE; i++)
1316aff3d9efSJoao Pinto 		stmmac_free_tx_buffers(priv, queue, i);
131775e4364fSdamuzi000 }
131875e4364fSdamuzi000 
1319aff3d9efSJoao Pinto /**
1320aff3d9efSJoao Pinto  * free_rx_dma_desc_resources - free RX DMA resources
1321aff3d9efSJoao Pinto  * @priv: driver private structure
1322aff3d9efSJoao Pinto  */
1323aff3d9efSJoao Pinto static void free_rx_dma_desc_resources(struct stmmac_priv *priv)
1324aff3d9efSJoao Pinto {
1325aff3d9efSJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
1326aff3d9efSJoao Pinto 	u32 queue = 0;
1327aff3d9efSJoao Pinto 
1328aff3d9efSJoao Pinto 	if (!priv->rx_queue)
1329aff3d9efSJoao Pinto 		return;
1330aff3d9efSJoao Pinto 
1331aff3d9efSJoao Pinto 	/* Free RX queue resources */
1332aff3d9efSJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
1333aff3d9efSJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1334aff3d9efSJoao Pinto 
1335aff3d9efSJoao Pinto 		if (!rx_q)
1336aff3d9efSJoao Pinto 			break;
1337aff3d9efSJoao Pinto 
1338aff3d9efSJoao Pinto 		/* Release the DMA RX socket buffers */
1339aff3d9efSJoao Pinto 		dma_free_rx_skbufs(priv, queue);
1340aff3d9efSJoao Pinto 
1341aff3d9efSJoao Pinto 		kfree(rx_q->rx_skbuff);
1342aff3d9efSJoao Pinto 
1343aff3d9efSJoao Pinto 		kfree(rx_q->rx_skbuff_dma);
1344aff3d9efSJoao Pinto 
1345aff3d9efSJoao Pinto 		if (!priv->extend_desc)
1346aff3d9efSJoao Pinto 			dma_free_coherent(priv->device,
1347aff3d9efSJoao Pinto 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1348aff3d9efSJoao Pinto 					  rx_q->dma_rx,
1349aff3d9efSJoao Pinto 					  rx_q->dma_rx_phy);
1350aff3d9efSJoao Pinto 		else
1351aff3d9efSJoao Pinto 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1352aff3d9efSJoao Pinto 					  sizeof(struct dma_extended_desc),
1353aff3d9efSJoao Pinto 					  rx_q->dma_erx,
1354aff3d9efSJoao Pinto 					  rx_q->dma_rx_phy);
1355aff3d9efSJoao Pinto 	}
1356aff3d9efSJoao Pinto 
1357aff3d9efSJoao Pinto 	kfree(priv->rx_queue);
1358aff3d9efSJoao Pinto }
1359aff3d9efSJoao Pinto 
1360aff3d9efSJoao Pinto /**
1361aff3d9efSJoao Pinto  * free_tx_dma_desc_resources - free TX DMA resources
1362aff3d9efSJoao Pinto  * @priv: driver private structure
1363aff3d9efSJoao Pinto  */
1364aff3d9efSJoao Pinto static void free_tx_dma_desc_resources(struct stmmac_priv *priv)
1365aff3d9efSJoao Pinto {
1366aff3d9efSJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
1367aff3d9efSJoao Pinto 	u32 queue = 0;
1368aff3d9efSJoao Pinto 
1369aff3d9efSJoao Pinto 	if (!priv->tx_queue)
1370aff3d9efSJoao Pinto 		return;
1371aff3d9efSJoao Pinto 
1372aff3d9efSJoao Pinto 	/* Free TX queue resources */
1373aff3d9efSJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1374aff3d9efSJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1375aff3d9efSJoao Pinto 
1376aff3d9efSJoao Pinto 		if (!tx_q)
1377aff3d9efSJoao Pinto 			break;
1378aff3d9efSJoao Pinto 
1379aff3d9efSJoao Pinto 		/* Release the DMA TX socket buffers */
1380aff3d9efSJoao Pinto 		dma_free_tx_skbufs(priv, queue);
1381aff3d9efSJoao Pinto 
1382aff3d9efSJoao Pinto 		kfree(tx_q->tx_skbuff);
1383aff3d9efSJoao Pinto 
1384aff3d9efSJoao Pinto 		kfree(tx_q->tx_skbuff_dma);
1385aff3d9efSJoao Pinto 
1386aff3d9efSJoao Pinto 		if (!priv->extend_desc)
1387aff3d9efSJoao Pinto 			dma_free_coherent(priv->device,
1388aff3d9efSJoao Pinto 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1389aff3d9efSJoao Pinto 					  tx_q->dma_tx,
1390aff3d9efSJoao Pinto 					  tx_q->dma_tx_phy);
1391aff3d9efSJoao Pinto 		else
1392aff3d9efSJoao Pinto 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1393aff3d9efSJoao Pinto 					  sizeof(struct dma_extended_desc),
1394aff3d9efSJoao Pinto 					  tx_q->dma_etx,
1395aff3d9efSJoao Pinto 					  tx_q->dma_tx_phy);
1396aff3d9efSJoao Pinto 	}
1397aff3d9efSJoao Pinto 
1398aff3d9efSJoao Pinto 	kfree(priv->tx_queue);
1399aff3d9efSJoao Pinto }
1400aff3d9efSJoao Pinto 
1401aff3d9efSJoao Pinto /**
1402aff3d9efSJoao Pinto  * free_dma_desc_resources - free All DMA resources
1403aff3d9efSJoao Pinto  * @priv: driver private structure
1404aff3d9efSJoao Pinto  */
1405aff3d9efSJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
1406aff3d9efSJoao Pinto {
1407aff3d9efSJoao Pinto 	free_rx_dma_desc_resources(priv);
1408aff3d9efSJoao Pinto 	free_tx_dma_desc_resources(priv);
1409aff3d9efSJoao Pinto }
1410aff3d9efSJoao Pinto 
1411aff3d9efSJoao Pinto /**
1412aff3d9efSJoao Pinto  * alloc_rx_dma_desc_resources - alloc RX resources.
1413aff3d9efSJoao Pinto  * @priv: private structure
1414aff3d9efSJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
1415aff3d9efSJoao Pinto  * this function allocates the resources for RX paths. It pre-allocates the
1416aff3d9efSJoao Pinto  * RX socket buffer in order to allow zero-copy mechanism.
1417aff3d9efSJoao Pinto  */
1418aff3d9efSJoao Pinto static int alloc_rx_dma_desc_resources(struct stmmac_priv *priv)
1419aff3d9efSJoao Pinto {
1420aff3d9efSJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
1421aff3d9efSJoao Pinto 	int ret = -ENOMEM;
1422aff3d9efSJoao Pinto 	u32 queue = 0;
1423aff3d9efSJoao Pinto 
1424aff3d9efSJoao Pinto 	/* Allocate RX queues array */
1425aff3d9efSJoao Pinto 	priv->rx_queue = kmalloc_array(rx_count,
1426aff3d9efSJoao Pinto 				       sizeof(struct stmmac_rx_queue),
1427aff3d9efSJoao Pinto 				       GFP_KERNEL);
1428aff3d9efSJoao Pinto 	if (!priv->rx_queue) {
1429aff3d9efSJoao Pinto 		kfree(priv->rx_queue);
1430aff3d9efSJoao Pinto 		return -ENOMEM;
1431aff3d9efSJoao Pinto 	}
1432aff3d9efSJoao Pinto 
1433aff3d9efSJoao Pinto 	/* RX queues buffers and DMA */
1434aff3d9efSJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
1435aff3d9efSJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1436aff3d9efSJoao Pinto 
1437aff3d9efSJoao Pinto 		rx_q->queue_index = queue;
1438aff3d9efSJoao Pinto 		rx_q->priv_data = priv;
1439aff3d9efSJoao Pinto 
1440aff3d9efSJoao Pinto 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1441aff3d9efSJoao Pinto 							sizeof(dma_addr_t),
1442aff3d9efSJoao Pinto 							GFP_KERNEL);
1443aff3d9efSJoao Pinto 		if (!rx_q->rx_skbuff_dma)
1444aff3d9efSJoao Pinto 			goto err_dma_buffers;
1445aff3d9efSJoao Pinto 
1446aff3d9efSJoao Pinto 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1447aff3d9efSJoao Pinto 						    sizeof(struct sk_buff *),
1448aff3d9efSJoao Pinto 						    GFP_KERNEL);
1449aff3d9efSJoao Pinto 		if (!rx_q->rx_skbuff)
1450aff3d9efSJoao Pinto 			goto err_dma_buffers;
1451aff3d9efSJoao Pinto 
1452aff3d9efSJoao Pinto 		if (priv->extend_desc) {
1453aff3d9efSJoao Pinto 			rx_q->dma_erx =	dma_zalloc_coherent(priv->device,
1454aff3d9efSJoao Pinto 			(DMA_RX_SIZE * sizeof(struct dma_extended_desc)),
1455aff3d9efSJoao Pinto 			&rx_q->dma_rx_phy, GFP_KERNEL);
1456aff3d9efSJoao Pinto 
1457aff3d9efSJoao Pinto 			if (!rx_q->dma_erx)
1458aff3d9efSJoao Pinto 				goto err_dma_buffers;
1459aff3d9efSJoao Pinto 		} else {
1460aff3d9efSJoao Pinto 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1461aff3d9efSJoao Pinto 			(DMA_RX_SIZE * sizeof(struct dma_desc)),
1462aff3d9efSJoao Pinto 			&rx_q->dma_rx_phy, GFP_KERNEL);
1463aff3d9efSJoao Pinto 
1464aff3d9efSJoao Pinto 			if (!rx_q->dma_rx)
1465aff3d9efSJoao Pinto 				goto err_dma_buffers;
14667ac6653aSJeff Kirsher 		}
14677ac6653aSJeff Kirsher 	}
1468aff3d9efSJoao Pinto 
1469aff3d9efSJoao Pinto 	return 0;
1470aff3d9efSJoao Pinto 
1471aff3d9efSJoao Pinto err_dma_buffers:
1472aff3d9efSJoao Pinto 	free_rx_dma_desc_resources(priv);
1473aff3d9efSJoao Pinto 
1474aff3d9efSJoao Pinto 	return ret;
1475aff3d9efSJoao Pinto }
1476aff3d9efSJoao Pinto 
1477aff3d9efSJoao Pinto /**
1478aff3d9efSJoao Pinto  * alloc_tx_dma_desc_resources - alloc TX resources.
1479aff3d9efSJoao Pinto  * @priv: private structure
1480aff3d9efSJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
1481aff3d9efSJoao Pinto  * this function allocates the resources for TX paths.
1482aff3d9efSJoao Pinto  */
1483aff3d9efSJoao Pinto static int alloc_tx_dma_desc_resources(struct stmmac_priv *priv)
1484aff3d9efSJoao Pinto {
1485aff3d9efSJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
1486aff3d9efSJoao Pinto 	int ret = -ENOMEM;
1487aff3d9efSJoao Pinto 	u32 queue = 0;
1488aff3d9efSJoao Pinto 
1489aff3d9efSJoao Pinto 	/* Allocate TX queues array */
1490aff3d9efSJoao Pinto 	priv->tx_queue = kmalloc_array(tx_count,
1491aff3d9efSJoao Pinto 				       sizeof(struct stmmac_tx_queue),
1492aff3d9efSJoao Pinto 				       GFP_KERNEL);
1493aff3d9efSJoao Pinto 	if (!priv->tx_queue)
1494aff3d9efSJoao Pinto 		return -ENOMEM;
1495aff3d9efSJoao Pinto 
1496aff3d9efSJoao Pinto 	/* TX queues buffers and DMA */
1497aff3d9efSJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1498aff3d9efSJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1499aff3d9efSJoao Pinto 
1500aff3d9efSJoao Pinto 		tx_q->queue_index = queue;
1501aff3d9efSJoao Pinto 		tx_q->priv_data = priv;
1502aff3d9efSJoao Pinto 
1503aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1504aff3d9efSJoao Pinto 					  sizeof(struct stmmac_tx_info),
1505aff3d9efSJoao Pinto 					  GFP_KERNEL);
1506aff3d9efSJoao Pinto 
1507aff3d9efSJoao Pinto 		if (!tx_q->tx_skbuff_dma)
1508aff3d9efSJoao Pinto 			goto err_dma_buffers;
1509aff3d9efSJoao Pinto 
1510aff3d9efSJoao Pinto 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1511aff3d9efSJoao Pinto 						    sizeof(struct sk_buff *),
1512aff3d9efSJoao Pinto 						    GFP_KERNEL);
1513aff3d9efSJoao Pinto 		if (!tx_q->tx_skbuff)
1514aff3d9efSJoao Pinto 			goto err_dma_buffers;
1515aff3d9efSJoao Pinto 
1516aff3d9efSJoao Pinto 		if (priv->extend_desc) {
1517aff3d9efSJoao Pinto 			tx_q->dma_etx =
1518aff3d9efSJoao Pinto 			dma_zalloc_coherent(priv->device,
1519aff3d9efSJoao Pinto 			(DMA_TX_SIZE * sizeof(struct dma_extended_desc)),
1520aff3d9efSJoao Pinto 			&tx_q->dma_tx_phy, GFP_KERNEL);
1521aff3d9efSJoao Pinto 
1522aff3d9efSJoao Pinto 			if (!tx_q->dma_etx)
1523aff3d9efSJoao Pinto 				goto err_dma_buffers;
1524aff3d9efSJoao Pinto 		} else {
1525aff3d9efSJoao Pinto 			tx_q->dma_tx =
1526aff3d9efSJoao Pinto 			dma_zalloc_coherent(priv->device,
1527aff3d9efSJoao Pinto 			(DMA_TX_SIZE * sizeof(struct dma_desc)),
1528aff3d9efSJoao Pinto 			&tx_q->dma_tx_phy, GFP_KERNEL);
1529aff3d9efSJoao Pinto 
1530aff3d9efSJoao Pinto 			if (!tx_q->dma_tx)
1531aff3d9efSJoao Pinto 				goto err_dma_buffers;
1532aff3d9efSJoao Pinto 		}
1533aff3d9efSJoao Pinto 	}
1534aff3d9efSJoao Pinto 
1535aff3d9efSJoao Pinto 	return 0;
1536aff3d9efSJoao Pinto 
1537aff3d9efSJoao Pinto err_dma_buffers:
1538aff3d9efSJoao Pinto 	free_tx_dma_desc_resources(priv);
1539aff3d9efSJoao Pinto 
1540aff3d9efSJoao Pinto 	return ret;
15417ac6653aSJeff Kirsher }
15427ac6653aSJeff Kirsher 
1543732fdf0eSGiuseppe CAVALLARO /**
1544732fdf0eSGiuseppe CAVALLARO  * alloc_dma_desc_resources - alloc TX/RX resources.
1545732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1546732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1547732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1548732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1549732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1550732fdf0eSGiuseppe CAVALLARO  */
155109f8d696SSrinivas Kandagatla static int alloc_dma_desc_resources(struct stmmac_priv *priv)
155209f8d696SSrinivas Kandagatla {
1553aff3d9efSJoao Pinto 	int ret = 0;
155409f8d696SSrinivas Kandagatla 
1555aff3d9efSJoao Pinto 	ret = alloc_tx_dma_desc_resources(priv);
1556aff3d9efSJoao Pinto 	if (ret)
155709f8d696SSrinivas Kandagatla 		return ret;
155809f8d696SSrinivas Kandagatla 
1559aff3d9efSJoao Pinto 	ret = alloc_rx_dma_desc_resources(priv);
15607ac6653aSJeff Kirsher 
1561aff3d9efSJoao Pinto 	return ret;
15627ac6653aSJeff Kirsher }
15637ac6653aSJeff Kirsher 
15647ac6653aSJeff Kirsher /**
15659eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
15669eb12474Sjpinto  *  @priv: driver private structure
15679eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
15689eb12474Sjpinto  */
15699eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
15709eb12474Sjpinto {
15714f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
15724f6046f5SJoao Pinto 	int queue;
15734f6046f5SJoao Pinto 	u8 mode;
15749eb12474Sjpinto 
15754f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
15764f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
15774f6046f5SJoao Pinto 		priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
15784f6046f5SJoao Pinto 	}
15799eb12474Sjpinto }
15809eb12474Sjpinto 
15819eb12474Sjpinto /**
1582ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
1583ae4f0d46SJoao Pinto  * @priv: driver private structure
1584ae4f0d46SJoao Pinto  * @chan: RX channel index
1585ae4f0d46SJoao Pinto  * Description:
1586ae4f0d46SJoao Pinto  * This starts a RX DMA channel
1587ae4f0d46SJoao Pinto  */
1588ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1589ae4f0d46SJoao Pinto {
1590ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1591ae4f0d46SJoao Pinto 	priv->hw->dma->start_rx(priv->ioaddr, chan);
1592ae4f0d46SJoao Pinto }
1593ae4f0d46SJoao Pinto 
1594ae4f0d46SJoao Pinto /**
1595ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
1596ae4f0d46SJoao Pinto  * @priv: driver private structure
1597ae4f0d46SJoao Pinto  * @chan: TX channel index
1598ae4f0d46SJoao Pinto  * Description:
1599ae4f0d46SJoao Pinto  * This starts a TX DMA channel
1600ae4f0d46SJoao Pinto  */
1601ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1602ae4f0d46SJoao Pinto {
1603ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1604ae4f0d46SJoao Pinto 	priv->hw->dma->start_tx(priv->ioaddr, chan);
1605ae4f0d46SJoao Pinto }
1606ae4f0d46SJoao Pinto 
1607ae4f0d46SJoao Pinto /**
1608ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
1609ae4f0d46SJoao Pinto  * @priv: driver private structure
1610ae4f0d46SJoao Pinto  * @chan: RX channel index
1611ae4f0d46SJoao Pinto  * Description:
1612ae4f0d46SJoao Pinto  * This stops a RX DMA channel
1613ae4f0d46SJoao Pinto  */
1614ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1615ae4f0d46SJoao Pinto {
1616ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1617ae4f0d46SJoao Pinto 	priv->hw->dma->stop_rx(priv->ioaddr, chan);
1618ae4f0d46SJoao Pinto }
1619ae4f0d46SJoao Pinto 
1620ae4f0d46SJoao Pinto /**
1621ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
1622ae4f0d46SJoao Pinto  * @priv: driver private structure
1623ae4f0d46SJoao Pinto  * @chan: TX channel index
1624ae4f0d46SJoao Pinto  * Description:
1625ae4f0d46SJoao Pinto  * This stops a TX DMA channel
1626ae4f0d46SJoao Pinto  */
1627ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1628ae4f0d46SJoao Pinto {
1629ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1630ae4f0d46SJoao Pinto 	priv->hw->dma->stop_tx(priv->ioaddr, chan);
1631ae4f0d46SJoao Pinto }
1632ae4f0d46SJoao Pinto 
1633ae4f0d46SJoao Pinto /**
1634ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
1635ae4f0d46SJoao Pinto  * @priv: driver private structure
1636ae4f0d46SJoao Pinto  * Description:
1637ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
1638ae4f0d46SJoao Pinto  */
1639ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
1640ae4f0d46SJoao Pinto {
1641ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1642ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1643ae4f0d46SJoao Pinto 	u32 chan = 0;
1644ae4f0d46SJoao Pinto 
1645ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1646ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
1647ae4f0d46SJoao Pinto 
1648ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1649ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
1650ae4f0d46SJoao Pinto }
1651ae4f0d46SJoao Pinto 
1652ae4f0d46SJoao Pinto /**
1653ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1654ae4f0d46SJoao Pinto  * @priv: driver private structure
1655ae4f0d46SJoao Pinto  * Description:
1656ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
1657ae4f0d46SJoao Pinto  */
1658ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1659ae4f0d46SJoao Pinto {
1660ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1661ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1662ae4f0d46SJoao Pinto 	u32 chan = 0;
1663ae4f0d46SJoao Pinto 
1664ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1665ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
1666ae4f0d46SJoao Pinto 
1667ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1668ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
1669ae4f0d46SJoao Pinto }
1670ae4f0d46SJoao Pinto 
1671ae4f0d46SJoao Pinto /**
16727ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
167332ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
1674732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
1675732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
16767ac6653aSJeff Kirsher  */
16777ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
16787ac6653aSJeff Kirsher {
16796deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
16806deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1681f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
16826deee222SJoao Pinto 	u32 txmode = 0;
16836deee222SJoao Pinto 	u32 rxmode = 0;
16846deee222SJoao Pinto 	u32 chan = 0;
1685f88203a2SVince Bridgers 
168611fbf811SThierry Reding 	if (rxfifosz == 0)
168711fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
168811fbf811SThierry Reding 
16896deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
16906deee222SJoao Pinto 		txmode = tc;
16916deee222SJoao Pinto 		rxmode = tc;
16926deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
16937ac6653aSJeff Kirsher 		/*
16947ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
16957ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
16967ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
16977ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
16987ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
16997ac6653aSJeff Kirsher 		 */
17006deee222SJoao Pinto 		txmode = SF_DMA_MODE;
17016deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
1702b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
17036deee222SJoao Pinto 	} else {
17046deee222SJoao Pinto 		txmode = tc;
17056deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
17066deee222SJoao Pinto 	}
17076deee222SJoao Pinto 
17086deee222SJoao Pinto 	/* configure all channels */
17096deee222SJoao Pinto 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
17106deee222SJoao Pinto 		for (chan = 0; chan < rx_channels_count; chan++)
17116deee222SJoao Pinto 			priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1712f88203a2SVince Bridgers 						   rxfifosz);
17136deee222SJoao Pinto 
17146deee222SJoao Pinto 		for (chan = 0; chan < tx_channels_count; chan++)
17156deee222SJoao Pinto 			priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
17166deee222SJoao Pinto 	} else {
17176deee222SJoao Pinto 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
17186deee222SJoao Pinto 					rxfifosz);
17196deee222SJoao Pinto 	}
17207ac6653aSJeff Kirsher }
17217ac6653aSJeff Kirsher 
17227ac6653aSJeff Kirsher /**
1723732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
172432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1725aff3d9efSJoao Pinto  * @queue: TX queue index
1726732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
17277ac6653aSJeff Kirsher  */
1728aff3d9efSJoao Pinto static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
17297ac6653aSJeff Kirsher {
1730aff3d9efSJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
173138979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
1732aff3d9efSJoao Pinto 	unsigned int entry = tx_q->dirty_tx;
17337ac6653aSJeff Kirsher 
1734739c8e14SLino Sanfilippo 	netif_tx_lock(priv->dev);
1735a9097a96SGiuseppe CAVALLARO 
17369125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
17379125cdd1SGiuseppe CAVALLARO 
1738aff3d9efSJoao Pinto 	while (entry != tx_q->cur_tx) {
1739aff3d9efSJoao Pinto 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1740c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1741c363b658SFabrice Gasnier 		int status;
1742c24602efSGiuseppe CAVALLARO 
1743c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1744aff3d9efSJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1745c24602efSGiuseppe CAVALLARO 		else
1746aff3d9efSJoao Pinto 			p = tx_q->dma_tx + entry;
17477ac6653aSJeff Kirsher 
1748c363b658SFabrice Gasnier 		status = priv->hw->desc->tx_status(&priv->dev->stats,
17497ac6653aSJeff Kirsher 						      &priv->xstats, p,
17507ac6653aSJeff Kirsher 						      priv->ioaddr);
1751c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
1752c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
1753c363b658SFabrice Gasnier 			break;
1754c363b658SFabrice Gasnier 
1755c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
1756c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
1757c363b658SFabrice Gasnier 			/* ... verify the status error condition */
1758c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
1759c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
1760c363b658SFabrice Gasnier 			} else {
17617ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
17627ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
1763c363b658SFabrice Gasnier 			}
1764ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_tx_hwtstamp(priv, p, skb);
17657ac6653aSJeff Kirsher 		}
17667ac6653aSJeff Kirsher 
1767aff3d9efSJoao Pinto 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1768aff3d9efSJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1769362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
1770aff3d9efSJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
1771aff3d9efSJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
17727ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
1773362b37beSGiuseppe CAVALLARO 			else
1774362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
1775aff3d9efSJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
1776aff3d9efSJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
1777362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
1778aff3d9efSJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
1779aff3d9efSJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
1780aff3d9efSJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1781cf32deecSRayagond Kokatanur 		}
1782f748be53SAlexandre TORGUE 
1783f748be53SAlexandre TORGUE 		if (priv->hw->mode->clean_desc3)
1784aff3d9efSJoao Pinto 			priv->hw->mode->clean_desc3(tx_q, p);
1785f748be53SAlexandre TORGUE 
1786aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1787aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
17887ac6653aSJeff Kirsher 
17897ac6653aSJeff Kirsher 		if (likely(skb != NULL)) {
179038979574SBeniamino Galvani 			pkts_compl++;
179138979574SBeniamino Galvani 			bytes_compl += skb->len;
17927c565c33SEric W. Biederman 			dev_consume_skb_any(skb);
1793aff3d9efSJoao Pinto 			tx_q->tx_skbuff[entry] = NULL;
17947ac6653aSJeff Kirsher 		}
17957ac6653aSJeff Kirsher 
17964a7d666aSGiuseppe CAVALLARO 		priv->hw->desc->release_tx_desc(p, priv->mode);
17977ac6653aSJeff Kirsher 
1798e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
17997ac6653aSJeff Kirsher 	}
1800aff3d9efSJoao Pinto 	tx_q->dirty_tx = entry;
180138979574SBeniamino Galvani 
1802aff3d9efSJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1803aff3d9efSJoao Pinto 				  pkts_compl, bytes_compl);
180438979574SBeniamino Galvani 
1805aff3d9efSJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1806aff3d9efSJoao Pinto 							       queue))) &&
1807aff3d9efSJoao Pinto 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1808b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
1809b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
1810aff3d9efSJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
18117ac6653aSJeff Kirsher 	}
1812d765955dSGiuseppe CAVALLARO 
1813d765955dSGiuseppe CAVALLARO 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1814d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
1815f5351ef7SGiuseppe CAVALLARO 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1816d765955dSGiuseppe CAVALLARO 	}
1817739c8e14SLino Sanfilippo 	netif_tx_unlock(priv->dev);
18187ac6653aSJeff Kirsher }
18197ac6653aSJeff Kirsher 
18204f513ecdSJoao Pinto static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
18217ac6653aSJeff Kirsher {
18224f513ecdSJoao Pinto 	priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
18237ac6653aSJeff Kirsher }
18247ac6653aSJeff Kirsher 
18254f513ecdSJoao Pinto static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
18267ac6653aSJeff Kirsher {
18274f513ecdSJoao Pinto 	priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
18287ac6653aSJeff Kirsher }
18297ac6653aSJeff Kirsher 
18307ac6653aSJeff Kirsher /**
1831732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
183232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1833aff3d9efSJoao Pinto  * @queue: queue index
18347ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
1835732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
18367ac6653aSJeff Kirsher  */
1837aff3d9efSJoao Pinto static void stmmac_tx_err(struct stmmac_priv *priv, u32 queue)
18387ac6653aSJeff Kirsher {
1839aff3d9efSJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1840aff3d9efSJoao Pinto 	u32 chan = queue;
1841c24602efSGiuseppe CAVALLARO 	int i;
1842aff3d9efSJoao Pinto 
1843aff3d9efSJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
18447ac6653aSJeff Kirsher 
1845ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
1846aff3d9efSJoao Pinto 	dma_free_tx_skbufs(priv, queue);
1847e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_TX_SIZE; i++)
1848c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1849aff3d9efSJoao Pinto 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1850c24602efSGiuseppe CAVALLARO 						     priv->mode,
1851e3ad57c9SGiuseppe Cavallaro 						     (i == DMA_TX_SIZE - 1));
1852c24602efSGiuseppe CAVALLARO 		else
1853aff3d9efSJoao Pinto 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1854c24602efSGiuseppe CAVALLARO 						     priv->mode,
1855e3ad57c9SGiuseppe Cavallaro 						     (i == DMA_TX_SIZE - 1));
1856aff3d9efSJoao Pinto 	tx_q->dirty_tx = 0;
1857aff3d9efSJoao Pinto 	tx_q->cur_tx = 0;
1858aff3d9efSJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1859ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
18607ac6653aSJeff Kirsher 
18617ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
1862aff3d9efSJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
18637ac6653aSJeff Kirsher }
18647ac6653aSJeff Kirsher 
186532ceabcaSGiuseppe CAVALLARO /**
18666deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
18676deee222SJoao Pinto  *  @priv: driver private structure
18686deee222SJoao Pinto  *  @txmode: TX operating mode
18696deee222SJoao Pinto  *  @rxmode: RX operating mode
18706deee222SJoao Pinto  *  @chan: channel index
18716deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
18726deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
18736deee222SJoao Pinto  *  mode.
18746deee222SJoao Pinto  */
18756deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
18766deee222SJoao Pinto 					  u32 rxmode, u32 chan)
18776deee222SJoao Pinto {
18786deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
18796deee222SJoao Pinto 
18806deee222SJoao Pinto 	if (rxfifosz == 0)
18816deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
18826deee222SJoao Pinto 
18836deee222SJoao Pinto 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
18846deee222SJoao Pinto 		priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
18856deee222SJoao Pinto 					   rxfifosz);
18866deee222SJoao Pinto 		priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
18876deee222SJoao Pinto 	} else {
18886deee222SJoao Pinto 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
18896deee222SJoao Pinto 					rxfifosz);
18906deee222SJoao Pinto 	}
18916deee222SJoao Pinto }
18926deee222SJoao Pinto 
18936deee222SJoao Pinto /**
1894732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
189532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
189632ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
1897732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
1898732fdf0eSGiuseppe CAVALLARO  * work can be done.
189932ceabcaSGiuseppe CAVALLARO  */
19007ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
19017ac6653aSJeff Kirsher {
1902d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
19037ac6653aSJeff Kirsher 	int status;
1904d62a107aSJoao Pinto 	u32 chan;
190568e5cfafSJoao Pinto 
1906d62a107aSJoao Pinto 	for (chan = 0; chan < tx_channel_count; chan++) {
1907aff3d9efSJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1908aff3d9efSJoao Pinto 
1909d62a107aSJoao Pinto 		status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1910d62a107aSJoao Pinto 						      &priv->xstats, chan);
19119125cdd1SGiuseppe CAVALLARO 		if (likely((status & handle_rx)) || (status & handle_tx)) {
1912aff3d9efSJoao Pinto 			if (likely(napi_schedule_prep(&rx_q->napi))) {
19134f513ecdSJoao Pinto 				stmmac_disable_dma_irq(priv, chan);
1914aff3d9efSJoao Pinto 				__napi_schedule(&rx_q->napi);
19159125cdd1SGiuseppe CAVALLARO 			}
19169125cdd1SGiuseppe CAVALLARO 		}
1917d62a107aSJoao Pinto 
19189125cdd1SGiuseppe CAVALLARO 		if (unlikely(status & tx_hard_error_bump_tc)) {
19197ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
1920b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1921b2dec116SSonic Zhang 			    (tc <= 256)) {
19227ac6653aSJeff Kirsher 				tc += 64;
1923c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
1924d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
1925d62a107aSJoao Pinto 								      tc,
1926d62a107aSJoao Pinto 								      tc,
1927d62a107aSJoao Pinto 								      chan);
1928c405abe2SSonic Zhang 				else
1929d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
1930d62a107aSJoao Pinto 								    tc,
1931d62a107aSJoao Pinto 								    SF_DMA_MODE,
1932d62a107aSJoao Pinto 								    chan);
19337ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
19347ac6653aSJeff Kirsher 			}
1935d62a107aSJoao Pinto 		} else if (unlikely(status == tx_hard_error)) {
19364e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
19377ac6653aSJeff Kirsher 		}
1938d62a107aSJoao Pinto 	}
1939d62a107aSJoao Pinto }
19407ac6653aSJeff Kirsher 
194132ceabcaSGiuseppe CAVALLARO /**
194232ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
194332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
194432ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
194532ceabcaSGiuseppe CAVALLARO  */
19461c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
19471c901a46SGiuseppe CAVALLARO {
19481c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
19491c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
19501c901a46SGiuseppe CAVALLARO 
1951ba1ffd74SGiuseppe CAVALLARO 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1952ba1ffd74SGiuseppe CAVALLARO 		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
1953f748be53SAlexandre TORGUE 		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1954ba1ffd74SGiuseppe CAVALLARO 	} else {
1955ba1ffd74SGiuseppe CAVALLARO 		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
195636ff7c1eSAlexandre TORGUE 		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1957ba1ffd74SGiuseppe CAVALLARO 	}
195836ff7c1eSAlexandre TORGUE 
195936ff7c1eSAlexandre TORGUE 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
19604f795b25SGiuseppe CAVALLARO 
19614f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
196236ff7c1eSAlexandre TORGUE 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
19631c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
19644f795b25SGiuseppe CAVALLARO 	} else
196538ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
19661c901a46SGiuseppe CAVALLARO }
19671c901a46SGiuseppe CAVALLARO 
1968732fdf0eSGiuseppe CAVALLARO /**
1969732fdf0eSGiuseppe CAVALLARO  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
197032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
197132ceabcaSGiuseppe CAVALLARO  * Description: select the Enhanced/Alternate or Normal descriptors.
1972732fdf0eSGiuseppe CAVALLARO  * In case of Enhanced/Alternate, it checks if the extended descriptors are
1973732fdf0eSGiuseppe CAVALLARO  * supported by the HW capability register.
1974ff3dd78cSGiuseppe CAVALLARO  */
197519e30c14SGiuseppe CAVALLARO static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
197619e30c14SGiuseppe CAVALLARO {
197719e30c14SGiuseppe CAVALLARO 	if (priv->plat->enh_desc) {
197838ddc59dSLABBE Corentin 		dev_info(priv->device, "Enhanced/Alternate descriptors\n");
1979c24602efSGiuseppe CAVALLARO 
1980c24602efSGiuseppe CAVALLARO 		/* GMAC older than 3.50 has no extended descriptors */
1981c24602efSGiuseppe CAVALLARO 		if (priv->synopsys_id >= DWMAC_CORE_3_50) {
198238ddc59dSLABBE Corentin 			dev_info(priv->device, "Enabled extended descriptors\n");
1983c24602efSGiuseppe CAVALLARO 			priv->extend_desc = 1;
1984c24602efSGiuseppe CAVALLARO 		} else
198538ddc59dSLABBE Corentin 			dev_warn(priv->device, "Extended descriptors not supported\n");
1986c24602efSGiuseppe CAVALLARO 
198719e30c14SGiuseppe CAVALLARO 		priv->hw->desc = &enh_desc_ops;
198819e30c14SGiuseppe CAVALLARO 	} else {
198938ddc59dSLABBE Corentin 		dev_info(priv->device, "Normal descriptors\n");
199019e30c14SGiuseppe CAVALLARO 		priv->hw->desc = &ndesc_ops;
199119e30c14SGiuseppe CAVALLARO 	}
199219e30c14SGiuseppe CAVALLARO }
199319e30c14SGiuseppe CAVALLARO 
199419e30c14SGiuseppe CAVALLARO /**
1995732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
199632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
199719e30c14SGiuseppe CAVALLARO  * Description:
199819e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
1999e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
200019e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
200119e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2002e7434821SGiuseppe CAVALLARO  */
2003e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2004e7434821SGiuseppe CAVALLARO {
2005f10a6a35SAlexandre TORGUE 	u32 ret = 0;
20063c20f72fSGiuseppe CAVALLARO 
20075e6efe88SGiuseppe CAVALLARO 	if (priv->hw->dma->get_hw_feature) {
2008f10a6a35SAlexandre TORGUE 		priv->hw->dma->get_hw_feature(priv->ioaddr,
2009f10a6a35SAlexandre TORGUE 					      &priv->dma_cap);
2010f10a6a35SAlexandre TORGUE 		ret = 1;
201119e30c14SGiuseppe CAVALLARO 	}
2012e7434821SGiuseppe CAVALLARO 
2013f10a6a35SAlexandre TORGUE 	return ret;
2014e7434821SGiuseppe CAVALLARO }
2015e7434821SGiuseppe CAVALLARO 
201632ceabcaSGiuseppe CAVALLARO /**
2017732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
201832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
201932ceabcaSGiuseppe CAVALLARO  * Description:
202032ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
202132ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
202232ceabcaSGiuseppe CAVALLARO  */
2023bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2024bfab27a1SGiuseppe CAVALLARO {
2025bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
20267ed24bbeSVince Bridgers 		priv->hw->mac->get_umac_addr(priv->hw,
2027bfab27a1SGiuseppe CAVALLARO 					     priv->dev->dev_addr, 0);
2028bfab27a1SGiuseppe CAVALLARO 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2029f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
203038ddc59dSLABBE Corentin 		netdev_info(priv->dev, "device MAC address %pM\n",
2031bfab27a1SGiuseppe CAVALLARO 			    priv->dev->dev_addr);
2032bfab27a1SGiuseppe CAVALLARO 	}
2033c88460b7SHans de Goede }
2034bfab27a1SGiuseppe CAVALLARO 
203532ceabcaSGiuseppe CAVALLARO /**
2036732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
203732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
203832ceabcaSGiuseppe CAVALLARO  * Description:
203932ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
204032ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
204132ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
204232ceabcaSGiuseppe CAVALLARO  */
20430f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
20440f1f88a8SGiuseppe CAVALLARO {
204547f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
204647f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2047aff3d9efSJoao Pinto 	struct stmmac_rx_queue *rx_q;
2048aff3d9efSJoao Pinto 	struct stmmac_tx_queue *tx_q;
204947f2a9ceSJoao Pinto 	u32 dummy_dma_rx_phy = 0;
205047f2a9ceSJoao Pinto 	u32 dummy_dma_tx_phy = 0;
205147f2a9ceSJoao Pinto 	u32 chan = 0;
2052c24602efSGiuseppe CAVALLARO 	int atds = 0;
2053495db273SGiuseppe Cavallaro 	int ret = 0;
20540f1f88a8SGiuseppe CAVALLARO 
2055a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2056a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
205789ab75bfSNiklas Cassel 		return -EINVAL;
20580f1f88a8SGiuseppe CAVALLARO 	}
20590f1f88a8SGiuseppe CAVALLARO 
2060c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2061c24602efSGiuseppe CAVALLARO 		atds = 1;
2062c24602efSGiuseppe CAVALLARO 
2063495db273SGiuseppe Cavallaro 	ret = priv->hw->dma->reset(priv->ioaddr);
2064495db273SGiuseppe Cavallaro 	if (ret) {
2065495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2066495db273SGiuseppe Cavallaro 		return ret;
2067495db273SGiuseppe Cavallaro 	}
2068495db273SGiuseppe Cavallaro 
2069f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
207047f2a9ceSJoao Pinto 		/* DMA Configuration */
207147f2a9ceSJoao Pinto 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
207247f2a9ceSJoao Pinto 				    dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
207347f2a9ceSJoao Pinto 
207447f2a9ceSJoao Pinto 		/* DMA RX Channel Configuration */
207547f2a9ceSJoao Pinto 		for (chan = 0; chan < rx_channels_count; chan++) {
2076aff3d9efSJoao Pinto 			rx_q = &priv->rx_queue[chan];
2077aff3d9efSJoao Pinto 
207847f2a9ceSJoao Pinto 			priv->hw->dma->init_rx_chan(priv->ioaddr,
207947f2a9ceSJoao Pinto 						    priv->plat->dma_cfg,
2080aff3d9efSJoao Pinto 						    rx_q->dma_rx_phy, chan);
208147f2a9ceSJoao Pinto 
2082aff3d9efSJoao Pinto 			rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2083f748be53SAlexandre TORGUE 				    (DMA_RX_SIZE * sizeof(struct dma_desc));
208447f2a9ceSJoao Pinto 			priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2085aff3d9efSJoao Pinto 						       rx_q->rx_tail_addr,
208647f2a9ceSJoao Pinto 						       chan);
208747f2a9ceSJoao Pinto 		}
208847f2a9ceSJoao Pinto 
208947f2a9ceSJoao Pinto 		/* DMA TX Channel Configuration */
209047f2a9ceSJoao Pinto 		for (chan = 0; chan < tx_channels_count; chan++) {
2091aff3d9efSJoao Pinto 			tx_q = &priv->tx_queue[chan];
2092aff3d9efSJoao Pinto 
209347f2a9ceSJoao Pinto 			priv->hw->dma->init_chan(priv->ioaddr,
209447f2a9ceSJoao Pinto 						 priv->plat->dma_cfg,
209547f2a9ceSJoao Pinto 						 chan);
209647f2a9ceSJoao Pinto 
209747f2a9ceSJoao Pinto 			priv->hw->dma->init_tx_chan(priv->ioaddr,
209847f2a9ceSJoao Pinto 						    priv->plat->dma_cfg,
2099aff3d9efSJoao Pinto 						    tx_q->dma_tx_phy, chan);
2100f748be53SAlexandre TORGUE 
2101aff3d9efSJoao Pinto 			tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2102f748be53SAlexandre TORGUE 				    (DMA_TX_SIZE * sizeof(struct dma_desc));
210347f2a9ceSJoao Pinto 			priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2104aff3d9efSJoao Pinto 						       tx_q->tx_tail_addr,
210547f2a9ceSJoao Pinto 						       chan);
210647f2a9ceSJoao Pinto 		}
210747f2a9ceSJoao Pinto 	} else {
2108aff3d9efSJoao Pinto 		rx_q = &priv->rx_queue[chan];
2109aff3d9efSJoao Pinto 		tx_q = &priv->tx_queue[chan];
2110aff3d9efSJoao Pinto 
211147f2a9ceSJoao Pinto 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2112aff3d9efSJoao Pinto 				    tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2113f748be53SAlexandre TORGUE 	}
2114f748be53SAlexandre TORGUE 
2115f748be53SAlexandre TORGUE 	if (priv->plat->axi && priv->hw->dma->axi)
2116afea0365SGiuseppe Cavallaro 		priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2117afea0365SGiuseppe Cavallaro 
2118495db273SGiuseppe Cavallaro 	return ret;
21190f1f88a8SGiuseppe CAVALLARO }
21200f1f88a8SGiuseppe CAVALLARO 
2121bfab27a1SGiuseppe CAVALLARO /**
2122732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
21239125cdd1SGiuseppe CAVALLARO  * @data: data pointer
21249125cdd1SGiuseppe CAVALLARO  * Description:
21259125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
21269125cdd1SGiuseppe CAVALLARO  */
21279125cdd1SGiuseppe CAVALLARO static void stmmac_tx_timer(unsigned long data)
21289125cdd1SGiuseppe CAVALLARO {
21299125cdd1SGiuseppe CAVALLARO 	struct stmmac_priv *priv = (struct stmmac_priv *)data;
2130aff3d9efSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2131aff3d9efSJoao Pinto 	u32 queue;
21329125cdd1SGiuseppe CAVALLARO 
2133aff3d9efSJoao Pinto 	/* let's scan all the tx queues */
2134aff3d9efSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++)
2135aff3d9efSJoao Pinto 		stmmac_tx_clean(priv, queue);
2136aff3d9efSJoao Pinto }
2137aff3d9efSJoao Pinto 
2138aff3d9efSJoao Pinto /**
2139aff3d9efSJoao Pinto  * stmmac_stop_all_queues - Stop all queues
2140aff3d9efSJoao Pinto  * @priv: driver private structure
2141aff3d9efSJoao Pinto  */
2142aff3d9efSJoao Pinto static void stmmac_stop_all_queues(struct stmmac_priv *priv)
2143aff3d9efSJoao Pinto {
2144aff3d9efSJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2145aff3d9efSJoao Pinto 	u32 queue;
2146aff3d9efSJoao Pinto 
2147aff3d9efSJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
2148aff3d9efSJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2149aff3d9efSJoao Pinto }
2150aff3d9efSJoao Pinto 
2151aff3d9efSJoao Pinto /**
2152aff3d9efSJoao Pinto  * stmmac_start_all_queues - Start all queues
2153aff3d9efSJoao Pinto  * @priv: driver private structure
2154aff3d9efSJoao Pinto  */
2155aff3d9efSJoao Pinto static void stmmac_start_all_queues(struct stmmac_priv *priv)
2156aff3d9efSJoao Pinto {
2157aff3d9efSJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2158aff3d9efSJoao Pinto 	u32 queue;
2159aff3d9efSJoao Pinto 
2160aff3d9efSJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
2161aff3d9efSJoao Pinto 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
2162aff3d9efSJoao Pinto }
2163aff3d9efSJoao Pinto 
2164aff3d9efSJoao Pinto /**
2165aff3d9efSJoao Pinto  * stmmac_disable_all_queues - Disable all queues
2166aff3d9efSJoao Pinto  * @priv: driver private structure
2167aff3d9efSJoao Pinto  */
2168aff3d9efSJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv)
2169aff3d9efSJoao Pinto {
2170aff3d9efSJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2171aff3d9efSJoao Pinto 	u32 queue;
2172aff3d9efSJoao Pinto 
2173aff3d9efSJoao Pinto 	for (queue = 0; queue < rx_queues_cnt; queue++) {
2174aff3d9efSJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2175aff3d9efSJoao Pinto 
2176aff3d9efSJoao Pinto 		napi_disable(&rx_q->napi);
2177aff3d9efSJoao Pinto 	}
2178aff3d9efSJoao Pinto }
2179aff3d9efSJoao Pinto 
2180aff3d9efSJoao Pinto /**
2181aff3d9efSJoao Pinto  * stmmac_enable_all_queues - Enable all queues
2182aff3d9efSJoao Pinto  * @priv: driver private structure
2183aff3d9efSJoao Pinto  */
2184aff3d9efSJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
2185aff3d9efSJoao Pinto {
2186aff3d9efSJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2187aff3d9efSJoao Pinto 	u32 queue;
2188aff3d9efSJoao Pinto 
2189aff3d9efSJoao Pinto 	for (queue = 0; queue < rx_queues_cnt; queue++) {
2190aff3d9efSJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2191aff3d9efSJoao Pinto 
2192aff3d9efSJoao Pinto 		napi_enable(&rx_q->napi);
2193aff3d9efSJoao Pinto 	}
21949125cdd1SGiuseppe CAVALLARO }
21959125cdd1SGiuseppe CAVALLARO 
21969125cdd1SGiuseppe CAVALLARO /**
2197732fdf0eSGiuseppe CAVALLARO  * stmmac_init_tx_coalesce - init tx mitigation options.
219832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
21999125cdd1SGiuseppe CAVALLARO  * Description:
22009125cdd1SGiuseppe CAVALLARO  * This inits the transmit coalesce parameters: i.e. timer rate,
22019125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
22029125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
22039125cdd1SGiuseppe CAVALLARO  */
22049125cdd1SGiuseppe CAVALLARO static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
22059125cdd1SGiuseppe CAVALLARO {
22069125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
22079125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
22089125cdd1SGiuseppe CAVALLARO 	init_timer(&priv->txtimer);
22099125cdd1SGiuseppe CAVALLARO 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
22109125cdd1SGiuseppe CAVALLARO 	priv->txtimer.data = (unsigned long)priv;
22119125cdd1SGiuseppe CAVALLARO 	priv->txtimer.function = stmmac_tx_timer;
22129125cdd1SGiuseppe CAVALLARO 	add_timer(&priv->txtimer);
22139125cdd1SGiuseppe CAVALLARO }
22149125cdd1SGiuseppe CAVALLARO 
22154854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
22164854ab99SJoao Pinto {
22174854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
22184854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
22194854ab99SJoao Pinto 	u32 chan;
22204854ab99SJoao Pinto 
22214854ab99SJoao Pinto 	/* set TX ring length */
22224854ab99SJoao Pinto 	if (priv->hw->dma->set_tx_ring_len) {
22234854ab99SJoao Pinto 		for (chan = 0; chan < tx_channels_count; chan++)
22244854ab99SJoao Pinto 			priv->hw->dma->set_tx_ring_len(priv->ioaddr,
22254854ab99SJoao Pinto 						       (DMA_TX_SIZE - 1), chan);
22264854ab99SJoao Pinto 	}
22274854ab99SJoao Pinto 
22284854ab99SJoao Pinto 	/* set RX ring length */
22294854ab99SJoao Pinto 	if (priv->hw->dma->set_rx_ring_len) {
22304854ab99SJoao Pinto 		for (chan = 0; chan < rx_channels_count; chan++)
22314854ab99SJoao Pinto 			priv->hw->dma->set_rx_ring_len(priv->ioaddr,
22324854ab99SJoao Pinto 						       (DMA_RX_SIZE - 1), chan);
22334854ab99SJoao Pinto 	}
22344854ab99SJoao Pinto }
22354854ab99SJoao Pinto 
22369125cdd1SGiuseppe CAVALLARO /**
22376a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
22386a3a7193SJoao Pinto  *  @priv: driver private structure
22396a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
22406a3a7193SJoao Pinto  */
22416a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
22426a3a7193SJoao Pinto {
22436a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
22446a3a7193SJoao Pinto 	u32 weight;
22456a3a7193SJoao Pinto 	u32 queue;
22466a3a7193SJoao Pinto 
22476a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
22486a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
22496a3a7193SJoao Pinto 		priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
22506a3a7193SJoao Pinto 	}
22516a3a7193SJoao Pinto }
22526a3a7193SJoao Pinto 
22536a3a7193SJoao Pinto /**
225419d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
225519d91873SJoao Pinto  *  @priv: driver private structure
225619d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
225719d91873SJoao Pinto  */
225819d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
225919d91873SJoao Pinto {
226019d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
226119d91873SJoao Pinto 	u32 mode_to_use;
226219d91873SJoao Pinto 	u32 queue;
226319d91873SJoao Pinto 
226419d91873SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
226519d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
226619d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
226719d91873SJoao Pinto 			continue;
226819d91873SJoao Pinto 
226919d91873SJoao Pinto 		priv->hw->mac->config_cbs(priv->hw,
227019d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
227119d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
227219d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
227319d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
227419d91873SJoao Pinto 				queue);
227519d91873SJoao Pinto 	}
227619d91873SJoao Pinto }
227719d91873SJoao Pinto 
227819d91873SJoao Pinto /**
2279d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2280d43042f4SJoao Pinto  *  @priv: driver private structure
2281d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
2282d43042f4SJoao Pinto  */
2283d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2284d43042f4SJoao Pinto {
2285d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2286d43042f4SJoao Pinto 	u32 queue;
2287d43042f4SJoao Pinto 	u32 chan;
2288d43042f4SJoao Pinto 
2289d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2290d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
2291d43042f4SJoao Pinto 		priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2292d43042f4SJoao Pinto 	}
2293d43042f4SJoao Pinto }
2294d43042f4SJoao Pinto 
2295d43042f4SJoao Pinto /**
2296a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2297a8f5102aSJoao Pinto  *  @priv: driver private structure
2298a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
2299a8f5102aSJoao Pinto  */
2300a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2301a8f5102aSJoao Pinto {
2302a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2303a8f5102aSJoao Pinto 	u32 queue;
2304a8f5102aSJoao Pinto 	u32 prio;
2305a8f5102aSJoao Pinto 
2306a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2307a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2308a8f5102aSJoao Pinto 			continue;
2309a8f5102aSJoao Pinto 
2310a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
2311a8f5102aSJoao Pinto 		priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2312a8f5102aSJoao Pinto 	}
2313a8f5102aSJoao Pinto }
2314a8f5102aSJoao Pinto 
2315a8f5102aSJoao Pinto /**
2316a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2317a8f5102aSJoao Pinto  *  @priv: driver private structure
2318a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
2319a8f5102aSJoao Pinto  */
2320a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2321a8f5102aSJoao Pinto {
2322a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2323a8f5102aSJoao Pinto 	u32 queue;
2324a8f5102aSJoao Pinto 	u32 prio;
2325a8f5102aSJoao Pinto 
2326a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
2327a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2328a8f5102aSJoao Pinto 			continue;
2329a8f5102aSJoao Pinto 
2330a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
2331a8f5102aSJoao Pinto 		priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2332a8f5102aSJoao Pinto 	}
2333a8f5102aSJoao Pinto }
2334a8f5102aSJoao Pinto 
2335a8f5102aSJoao Pinto /**
2336abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2337abe80fdcSJoao Pinto  *  @priv: driver private structure
2338abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
2339abe80fdcSJoao Pinto  */
2340abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2341abe80fdcSJoao Pinto {
2342abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2343abe80fdcSJoao Pinto 	u32 queue;
2344abe80fdcSJoao Pinto 	u8 packet;
2345abe80fdcSJoao Pinto 
2346abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2347abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
2348abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2349abe80fdcSJoao Pinto 			continue;
2350abe80fdcSJoao Pinto 
2351abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2352abe80fdcSJoao Pinto 		priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2353abe80fdcSJoao Pinto 	}
2354abe80fdcSJoao Pinto }
2355abe80fdcSJoao Pinto 
2356abe80fdcSJoao Pinto /**
2357d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
2358d0a9c9f9SJoao Pinto  *  @priv: driver private structure
2359d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
2360d0a9c9f9SJoao Pinto  */
2361d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2362d0a9c9f9SJoao Pinto {
2363d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2364d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2365d0a9c9f9SJoao Pinto 
23666a3a7193SJoao Pinto 	if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
23676a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
23686a3a7193SJoao Pinto 
2369d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
2370d0a9c9f9SJoao Pinto 	if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2371d0a9c9f9SJoao Pinto 		priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2372d0a9c9f9SJoao Pinto 						priv->plat->rx_sched_algorithm);
2373d0a9c9f9SJoao Pinto 
2374d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
2375d0a9c9f9SJoao Pinto 	if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2376d0a9c9f9SJoao Pinto 		priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2377d0a9c9f9SJoao Pinto 						priv->plat->tx_sched_algorithm);
2378d0a9c9f9SJoao Pinto 
237919d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
238019d91873SJoao Pinto 	if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
238119d91873SJoao Pinto 		stmmac_configure_cbs(priv);
238219d91873SJoao Pinto 
2383d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
2384d43042f4SJoao Pinto 	if (rx_queues_count > 1 && priv->hw->mac->map_mtl_to_dma)
2385d43042f4SJoao Pinto 		stmmac_rx_queue_dma_chan_map(priv);
2386d43042f4SJoao Pinto 
2387d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
2388f3976874SThierry Reding 	if (priv->hw->mac->rx_queue_enable)
2389d0a9c9f9SJoao Pinto 		stmmac_mac_enable_rx_queues(priv);
23906deee222SJoao Pinto 
2391a8f5102aSJoao Pinto 	/* Set RX priorities */
2392a8f5102aSJoao Pinto 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2393a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
2394a8f5102aSJoao Pinto 
2395a8f5102aSJoao Pinto 	/* Set TX priorities */
2396a8f5102aSJoao Pinto 	if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2397a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
2398abe80fdcSJoao Pinto 
2399abe80fdcSJoao Pinto 	/* Set RX routing */
2400abe80fdcSJoao Pinto 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2401abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
2402d0a9c9f9SJoao Pinto }
2403d0a9c9f9SJoao Pinto 
2404d0a9c9f9SJoao Pinto /**
2405732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
2406523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
2407523f11b5SSrinivas Kandagatla  *  Description:
2408732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
2409732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
2410732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
2411732fdf0eSGiuseppe CAVALLARO  *  transmitting.
2412523f11b5SSrinivas Kandagatla  *  Return value:
2413523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2414523f11b5SSrinivas Kandagatla  *  file on failure.
2415523f11b5SSrinivas Kandagatla  */
2416fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2417523f11b5SSrinivas Kandagatla {
2418523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
24193c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2420146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2421146617b8SJoao Pinto 	u32 chan;
2422523f11b5SSrinivas Kandagatla 	int ret;
2423523f11b5SSrinivas Kandagatla 
2424523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
2425523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
2426523f11b5SSrinivas Kandagatla 	if (ret < 0) {
242738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
242838ddc59dSLABBE Corentin 			   __func__);
2429523f11b5SSrinivas Kandagatla 		return ret;
2430523f11b5SSrinivas Kandagatla 	}
2431523f11b5SSrinivas Kandagatla 
2432523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
24337ed24bbeSVince Bridgers 	priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2434523f11b5SSrinivas Kandagatla 
243502e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
243602e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
243702e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
243802e57b9dSGiuseppe CAVALLARO 
243902e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
244002e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
244102e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
244202e57b9dSGiuseppe CAVALLARO 		} else {
244302e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
244402e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
244502e57b9dSGiuseppe CAVALLARO 		}
244602e57b9dSGiuseppe CAVALLARO 	}
244702e57b9dSGiuseppe CAVALLARO 
2448523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
24497ed24bbeSVince Bridgers 	priv->hw->mac->core_init(priv->hw, dev->mtu);
2450523f11b5SSrinivas Kandagatla 
2451d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
2452d0a9c9f9SJoao Pinto 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
2453d0a9c9f9SJoao Pinto 		stmmac_mtl_configuration(priv);
24549eb12474Sjpinto 
2455978aded4SGiuseppe CAVALLARO 	ret = priv->hw->mac->rx_ipc(priv->hw);
2456978aded4SGiuseppe CAVALLARO 	if (!ret) {
245738ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2458978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2459d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
2460978aded4SGiuseppe CAVALLARO 	}
2461978aded4SGiuseppe CAVALLARO 
2462523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
2463270c7759SLABBE Corentin 	priv->hw->mac->set_mac(priv->ioaddr, true);
2464523f11b5SSrinivas Kandagatla 
2465b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
2466b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
2467b4f0a661SJoao Pinto 
2468523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
2469523f11b5SSrinivas Kandagatla 
2470fe131929SHuacai Chen 	if (init_ptp) {
24710ad2be79SThierry Reding 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
24720ad2be79SThierry Reding 		if (ret < 0)
24730ad2be79SThierry Reding 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
24740ad2be79SThierry Reding 
2475523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
2476722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
2477722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2478722eef28SHeiner Kallweit 		else if (ret)
2479722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
2480fe131929SHuacai Chen 	}
2481523f11b5SSrinivas Kandagatla 
248250fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
2483523f11b5SSrinivas Kandagatla 	ret = stmmac_init_fs(dev);
2484523f11b5SSrinivas Kandagatla 	if (ret < 0)
248538ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
248638ddc59dSLABBE Corentin 			    __func__);
2487523f11b5SSrinivas Kandagatla #endif
2488523f11b5SSrinivas Kandagatla 	/* Start the ball rolling... */
2489ae4f0d46SJoao Pinto 	stmmac_start_all_dma(priv);
2490523f11b5SSrinivas Kandagatla 
2491523f11b5SSrinivas Kandagatla 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2492523f11b5SSrinivas Kandagatla 
2493523f11b5SSrinivas Kandagatla 	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2494523f11b5SSrinivas Kandagatla 		priv->rx_riwt = MAX_DMA_RIWT;
24953c55d4d0SJoao Pinto 		priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2496523f11b5SSrinivas Kandagatla 	}
2497523f11b5SSrinivas Kandagatla 
24983fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
249902e57b9dSGiuseppe CAVALLARO 		priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2500523f11b5SSrinivas Kandagatla 
25014854ab99SJoao Pinto 	/* set TX and RX rings length */
25024854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
25034854ab99SJoao Pinto 
2504f748be53SAlexandre TORGUE 	/* Enable TSO */
2505146617b8SJoao Pinto 	if (priv->tso) {
2506146617b8SJoao Pinto 		for (chan = 0; chan < tx_cnt; chan++)
2507146617b8SJoao Pinto 			priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2508146617b8SJoao Pinto 	}
2509f748be53SAlexandre TORGUE 
2510523f11b5SSrinivas Kandagatla 	return 0;
2511523f11b5SSrinivas Kandagatla }
2512523f11b5SSrinivas Kandagatla 
2513c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
2514c66f6c37SThierry Reding {
2515c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
2516c66f6c37SThierry Reding 
2517c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2518c66f6c37SThierry Reding }
2519c66f6c37SThierry Reding 
2520523f11b5SSrinivas Kandagatla /**
25217ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
25227ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
25237ac6653aSJeff Kirsher  *  Description:
25247ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
25257ac6653aSJeff Kirsher  *  Return value:
25267ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
25277ac6653aSJeff Kirsher  *  file on failure.
25287ac6653aSJeff Kirsher  */
25297ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev)
25307ac6653aSJeff Kirsher {
25317ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
25327ac6653aSJeff Kirsher 	int ret;
25337ac6653aSJeff Kirsher 
25344bfcbd7aSFrancesco Virlinzi 	stmmac_check_ether_addr(priv);
25354bfcbd7aSFrancesco Virlinzi 
25363fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
25373fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
25383fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
25397ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
2540e58bb43fSGiuseppe CAVALLARO 		if (ret) {
254138ddc59dSLABBE Corentin 			netdev_err(priv->dev,
254238ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
2543e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
254489df20d9SHans de Goede 			return ret;
25457ac6653aSJeff Kirsher 		}
2546e58bb43fSGiuseppe CAVALLARO 	}
25477ac6653aSJeff Kirsher 
2548523f11b5SSrinivas Kandagatla 	/* Extra statistics */
2549523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2550523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
2551523f11b5SSrinivas Kandagatla 
255222ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
255356329137SBartlomiej Zolnierkiewicz 
2554fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
255556329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
255638ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2557c9324d18SGiuseppe CAVALLARO 		goto init_error;
25587ac6653aSJeff Kirsher 	}
25597ac6653aSJeff Kirsher 
2560777da230SGiuseppe CAVALLARO 	stmmac_init_tx_coalesce(priv);
2561777da230SGiuseppe CAVALLARO 
2562d6d50c7eSPhilippe Reynes 	if (dev->phydev)
2563d6d50c7eSPhilippe Reynes 		phy_start(dev->phydev);
25647ac6653aSJeff Kirsher 
25657ac6653aSJeff Kirsher 	/* Request the IRQ lines */
25667ac6653aSJeff Kirsher 	ret = request_irq(dev->irq, stmmac_interrupt,
25677ac6653aSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
25687ac6653aSJeff Kirsher 	if (unlikely(ret < 0)) {
256938ddc59dSLABBE Corentin 		netdev_err(priv->dev,
257038ddc59dSLABBE Corentin 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
25717ac6653aSJeff Kirsher 			   __func__, dev->irq, ret);
25726c1e5abeSThierry Reding 		goto irq_error;
25737ac6653aSJeff Kirsher 	}
25747ac6653aSJeff Kirsher 
25757a13f8f5SFrancesco Virlinzi 	/* Request the Wake IRQ in case of another line is used for WoL */
25767a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq) {
25777a13f8f5SFrancesco Virlinzi 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
25787a13f8f5SFrancesco Virlinzi 				  IRQF_SHARED, dev->name, dev);
25797a13f8f5SFrancesco Virlinzi 		if (unlikely(ret < 0)) {
258038ddc59dSLABBE Corentin 			netdev_err(priv->dev,
258138ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2582ceb69499SGiuseppe CAVALLARO 				   __func__, priv->wol_irq, ret);
2583c9324d18SGiuseppe CAVALLARO 			goto wolirq_error;
25847a13f8f5SFrancesco Virlinzi 		}
25857a13f8f5SFrancesco Virlinzi 	}
25867a13f8f5SFrancesco Virlinzi 
2587d765955dSGiuseppe CAVALLARO 	/* Request the IRQ lines */
2588d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0) {
2589d765955dSGiuseppe CAVALLARO 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2590d765955dSGiuseppe CAVALLARO 				  dev->name, dev);
2591d765955dSGiuseppe CAVALLARO 		if (unlikely(ret < 0)) {
259238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
259338ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2594d765955dSGiuseppe CAVALLARO 				   __func__, priv->lpi_irq, ret);
2595c9324d18SGiuseppe CAVALLARO 			goto lpiirq_error;
2596d765955dSGiuseppe CAVALLARO 		}
2597d765955dSGiuseppe CAVALLARO 	}
2598d765955dSGiuseppe CAVALLARO 
2599aff3d9efSJoao Pinto 	stmmac_enable_all_queues(priv);
2600aff3d9efSJoao Pinto 	stmmac_start_all_queues(priv);
26017ac6653aSJeff Kirsher 
26027ac6653aSJeff Kirsher 	return 0;
26037ac6653aSJeff Kirsher 
2604c9324d18SGiuseppe CAVALLARO lpiirq_error:
2605d765955dSGiuseppe CAVALLARO 	if (priv->wol_irq != dev->irq)
2606d765955dSGiuseppe CAVALLARO 		free_irq(priv->wol_irq, dev);
2607c9324d18SGiuseppe CAVALLARO wolirq_error:
26087a13f8f5SFrancesco Virlinzi 	free_irq(dev->irq, dev);
26096c1e5abeSThierry Reding irq_error:
26106c1e5abeSThierry Reding 	if (dev->phydev)
26116c1e5abeSThierry Reding 		phy_stop(dev->phydev);
26127a13f8f5SFrancesco Virlinzi 
26136c1e5abeSThierry Reding 	del_timer_sync(&priv->txtimer);
2614c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
2615c9324d18SGiuseppe CAVALLARO init_error:
2616c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
2617aff3d9efSJoao Pinto 
2618d6d50c7eSPhilippe Reynes 	if (dev->phydev)
2619d6d50c7eSPhilippe Reynes 		phy_disconnect(dev->phydev);
26204bfcbd7aSFrancesco Virlinzi 
26217ac6653aSJeff Kirsher 	return ret;
26227ac6653aSJeff Kirsher }
26237ac6653aSJeff Kirsher 
26247ac6653aSJeff Kirsher /**
26257ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
26267ac6653aSJeff Kirsher  *  @dev : device pointer.
26277ac6653aSJeff Kirsher  *  Description:
26287ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
26297ac6653aSJeff Kirsher  */
26307ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev)
26317ac6653aSJeff Kirsher {
26327ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
26337ac6653aSJeff Kirsher 
2634d765955dSGiuseppe CAVALLARO 	if (priv->eee_enabled)
2635d765955dSGiuseppe CAVALLARO 		del_timer_sync(&priv->eee_ctrl_timer);
2636d765955dSGiuseppe CAVALLARO 
26377ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
2638d6d50c7eSPhilippe Reynes 	if (dev->phydev) {
2639d6d50c7eSPhilippe Reynes 		phy_stop(dev->phydev);
2640d6d50c7eSPhilippe Reynes 		phy_disconnect(dev->phydev);
26417ac6653aSJeff Kirsher 	}
26427ac6653aSJeff Kirsher 
2643aff3d9efSJoao Pinto 	stmmac_stop_all_queues(priv);
26447ac6653aSJeff Kirsher 
2645aff3d9efSJoao Pinto 	stmmac_disable_all_queues(priv);
26467ac6653aSJeff Kirsher 
26479125cdd1SGiuseppe CAVALLARO 	del_timer_sync(&priv->txtimer);
26489125cdd1SGiuseppe CAVALLARO 
26497ac6653aSJeff Kirsher 	/* Free the IRQ lines */
26507ac6653aSJeff Kirsher 	free_irq(dev->irq, dev);
26517a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq)
26527a13f8f5SFrancesco Virlinzi 		free_irq(priv->wol_irq, dev);
2653d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0)
2654d765955dSGiuseppe CAVALLARO 		free_irq(priv->lpi_irq, dev);
26557ac6653aSJeff Kirsher 
26567ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
2657ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
26587ac6653aSJeff Kirsher 
26597ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
26607ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
26617ac6653aSJeff Kirsher 
26627ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
2663270c7759SLABBE Corentin 	priv->hw->mac->set_mac(priv->ioaddr, false);
26647ac6653aSJeff Kirsher 
26657ac6653aSJeff Kirsher 	netif_carrier_off(dev);
26667ac6653aSJeff Kirsher 
266750fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
2668466c5ac8SMathieu Olivari 	stmmac_exit_fs(dev);
2669bfab27a1SGiuseppe CAVALLARO #endif
2670bfab27a1SGiuseppe CAVALLARO 
267192ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
267292ba6888SRayagond Kokatanur 
26737ac6653aSJeff Kirsher 	return 0;
26747ac6653aSJeff Kirsher }
26757ac6653aSJeff Kirsher 
26767ac6653aSJeff Kirsher /**
2677f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
2678f748be53SAlexandre TORGUE  *  @priv: driver private structure
2679f748be53SAlexandre TORGUE  *  @des: buffer start address
2680f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
2681f748be53SAlexandre TORGUE  *  @last_segmant: condition for the last descriptor
2682aff3d9efSJoao Pinto  *  @queue: TX queue index
2683f748be53SAlexandre TORGUE  *  Description:
2684f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
2685f748be53SAlexandre TORGUE  *  buffer length to fill
2686f748be53SAlexandre TORGUE  */
2687f748be53SAlexandre TORGUE static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2688aff3d9efSJoao Pinto 				 int total_len, bool last_segment, u32 queue)
2689f748be53SAlexandre TORGUE {
2690aff3d9efSJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2691f748be53SAlexandre TORGUE 	struct dma_desc *desc;
2692f748be53SAlexandre TORGUE 	u32 buff_size;
2693aff3d9efSJoao Pinto 	int tmp_len;
2694f748be53SAlexandre TORGUE 
2695f748be53SAlexandre TORGUE 	tmp_len = total_len;
2696f748be53SAlexandre TORGUE 
2697f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
2698aff3d9efSJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2699aff3d9efSJoao Pinto 		desc = tx_q->dma_tx + tx_q->cur_tx;
2700f748be53SAlexandre TORGUE 
2701f8be0d78SMichael Weiser 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2702f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2703f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
2704f748be53SAlexandre TORGUE 
2705f748be53SAlexandre TORGUE 		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2706f748be53SAlexandre TORGUE 			0, 1,
2707f748be53SAlexandre TORGUE 			(last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
2708f748be53SAlexandre TORGUE 			0, 0);
2709f748be53SAlexandre TORGUE 
2710f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
2711f748be53SAlexandre TORGUE 	}
2712f748be53SAlexandre TORGUE }
2713f748be53SAlexandre TORGUE 
2714f748be53SAlexandre TORGUE /**
2715f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2716f748be53SAlexandre TORGUE  *  @skb : the socket buffer
2717f748be53SAlexandre TORGUE  *  @dev : device pointer
2718f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
2719f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
2720f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
2721f748be53SAlexandre TORGUE  *
2722f748be53SAlexandre TORGUE  *  First Descriptor
2723f748be53SAlexandre TORGUE  *   --------
2724f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
2725f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
2726f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
2727f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2728f748be53SAlexandre TORGUE  *   --------
2729f748be53SAlexandre TORGUE  *	|
2730f748be53SAlexandre TORGUE  *     ...
2731f748be53SAlexandre TORGUE  *	|
2732f748be53SAlexandre TORGUE  *   --------
2733f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2734f748be53SAlexandre TORGUE  *   | DES1 | --|
2735f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
2736f748be53SAlexandre TORGUE  *   | DES3 |
2737f748be53SAlexandre TORGUE  *   --------
2738f748be53SAlexandre TORGUE  *
2739f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2740f748be53SAlexandre TORGUE  */
2741f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2742f748be53SAlexandre TORGUE {
2743aff3d9efSJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
2744f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
2745aff3d9efSJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
2746f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
2747f748be53SAlexandre TORGUE 	unsigned int first_entry, des;
2748aff3d9efSJoao Pinto 	struct stmmac_tx_queue *tx_q;
2749aff3d9efSJoao Pinto 	int tmp_pay_len = 0;
2750aff3d9efSJoao Pinto 	u32 pay_len, mss;
2751f748be53SAlexandre TORGUE 	u8 proto_hdr_len;
2752f748be53SAlexandre TORGUE 	int i;
2753f748be53SAlexandre TORGUE 
2754aff3d9efSJoao Pinto 	tx_q = &priv->tx_queue[queue];
2755aff3d9efSJoao Pinto 
2756f748be53SAlexandre TORGUE 	/* Compute header lengths */
2757f748be53SAlexandre TORGUE 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2758f748be53SAlexandre TORGUE 
2759f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
2760aff3d9efSJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
2761f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2762aff3d9efSJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2763aff3d9efSJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(dev, queue));
2764f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
276538ddc59dSLABBE Corentin 			netdev_err(priv->dev,
276638ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
276738ddc59dSLABBE Corentin 				   __func__);
2768f748be53SAlexandre TORGUE 		}
2769f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
2770f748be53SAlexandre TORGUE 	}
2771f748be53SAlexandre TORGUE 
2772f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2773f748be53SAlexandre TORGUE 
2774f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
2775f748be53SAlexandre TORGUE 
2776f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
2777f748be53SAlexandre TORGUE 	if (mss != priv->mss) {
2778aff3d9efSJoao Pinto 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2779f748be53SAlexandre TORGUE 		priv->hw->desc->set_mss(mss_desc, mss);
2780f748be53SAlexandre TORGUE 		priv->mss = mss;
2781aff3d9efSJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2782f748be53SAlexandre TORGUE 	}
2783f748be53SAlexandre TORGUE 
2784f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
2785f748be53SAlexandre TORGUE 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2786f748be53SAlexandre TORGUE 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2787f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2788f748be53SAlexandre TORGUE 			skb->data_len);
2789f748be53SAlexandre TORGUE 	}
2790f748be53SAlexandre TORGUE 
2791aff3d9efSJoao Pinto 	first_entry = tx_q->cur_tx;
2792f748be53SAlexandre TORGUE 
2793aff3d9efSJoao Pinto 	desc = tx_q->dma_tx + first_entry;
2794f748be53SAlexandre TORGUE 	first = desc;
2795f748be53SAlexandre TORGUE 
2796f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
2797f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2798f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
2799f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
2800f748be53SAlexandre TORGUE 		goto dma_map_err;
2801f748be53SAlexandre TORGUE 
2802aff3d9efSJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2803aff3d9efSJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2804aff3d9efSJoao Pinto 	tx_q->tx_skbuff[first_entry] = skb;
2805f748be53SAlexandre TORGUE 
2806f8be0d78SMichael Weiser 	first->des0 = cpu_to_le32(des);
2807f748be53SAlexandre TORGUE 
2808f748be53SAlexandre TORGUE 	/* Fill start of payload in buff2 of first descriptor */
2809f748be53SAlexandre TORGUE 	if (pay_len)
2810f8be0d78SMichael Weiser 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2811f748be53SAlexandre TORGUE 
2812f748be53SAlexandre TORGUE 	/* If needed take extra descriptors to fill the remaining payload */
2813f748be53SAlexandre TORGUE 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2814f748be53SAlexandre TORGUE 
2815aff3d9efSJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2816f748be53SAlexandre TORGUE 
2817f748be53SAlexandre TORGUE 	/* Prepare fragments */
2818f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
2819f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2820f748be53SAlexandre TORGUE 
2821f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
2822f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
2823f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
2824937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
2825937071c1SThierry Reding 			goto dma_map_err;
2826f748be53SAlexandre TORGUE 
2827f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2828aff3d9efSJoao Pinto 				     (i == nfrags - 1), queue);
2829f748be53SAlexandre TORGUE 
2830aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2831aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2832aff3d9efSJoao Pinto 		tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2833aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2834f748be53SAlexandre TORGUE 	}
2835f748be53SAlexandre TORGUE 
2836aff3d9efSJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2837f748be53SAlexandre TORGUE 
2838aff3d9efSJoao Pinto 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2839f748be53SAlexandre TORGUE 
2840aff3d9efSJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2841b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
284238ddc59dSLABBE Corentin 			  __func__);
2843aff3d9efSJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue));
2844f748be53SAlexandre TORGUE 	}
2845f748be53SAlexandre TORGUE 
2846f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
2847f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
2848f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
2849f748be53SAlexandre TORGUE 
2850f748be53SAlexandre TORGUE 	/* Manage tx mitigation */
2851f748be53SAlexandre TORGUE 	priv->tx_count_frames += nfrags + 1;
2852f748be53SAlexandre TORGUE 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2853f748be53SAlexandre TORGUE 		mod_timer(&priv->txtimer,
2854f748be53SAlexandre TORGUE 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2855f748be53SAlexandre TORGUE 	} else {
2856f748be53SAlexandre TORGUE 		priv->tx_count_frames = 0;
2857f748be53SAlexandre TORGUE 		priv->hw->desc->set_tx_ic(desc);
2858f748be53SAlexandre TORGUE 		priv->xstats.tx_set_ic_bit++;
2859f748be53SAlexandre TORGUE 	}
2860f748be53SAlexandre TORGUE 
2861f748be53SAlexandre TORGUE 	if (!priv->hwts_tx_en)
2862f748be53SAlexandre TORGUE 		skb_tx_timestamp(skb);
2863f748be53SAlexandre TORGUE 
2864f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2865f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
2866f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
2867f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2868f748be53SAlexandre TORGUE 		priv->hw->desc->enable_tx_timestamp(first);
2869f748be53SAlexandre TORGUE 	}
2870f748be53SAlexandre TORGUE 
2871f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
2872f748be53SAlexandre TORGUE 	priv->hw->desc->prepare_tso_tx_desc(first, 1,
2873f748be53SAlexandre TORGUE 			proto_hdr_len,
2874f748be53SAlexandre TORGUE 			pay_len,
2875aff3d9efSJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2876f748be53SAlexandre TORGUE 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2877f748be53SAlexandre TORGUE 
2878f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
2879f748be53SAlexandre TORGUE 	if (mss_desc)
2880f748be53SAlexandre TORGUE 		priv->hw->desc->set_tx_owner(mss_desc);
2881f748be53SAlexandre TORGUE 
2882f748be53SAlexandre TORGUE 	/* The own bit must be the latest setting done when prepare the
2883f748be53SAlexandre TORGUE 	 * descriptor and then barrier is needed to make sure that
2884f748be53SAlexandre TORGUE 	 * all is coherent before granting the DMA engine.
2885f748be53SAlexandre TORGUE 	 */
2886ad688cdbSPavel Machek 	dma_wmb();
2887f748be53SAlexandre TORGUE 
2888f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
2889f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2890aff3d9efSJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2891aff3d9efSJoao Pinto 			tx_q->cur_tx, first, nfrags);
2892f748be53SAlexandre TORGUE 
2893aff3d9efSJoao Pinto 		priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2894f748be53SAlexandre TORGUE 					     0);
2895f748be53SAlexandre TORGUE 
2896f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
2897f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
2898f748be53SAlexandre TORGUE 	}
2899f748be53SAlexandre TORGUE 
2900aff3d9efSJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2901f748be53SAlexandre TORGUE 
2902aff3d9efSJoao Pinto 	priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2903aff3d9efSJoao Pinto 				       queue);
2904f748be53SAlexandre TORGUE 
2905f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
2906f748be53SAlexandre TORGUE 
2907f748be53SAlexandre TORGUE dma_map_err:
2908f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
2909f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
2910f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
2911f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
2912f748be53SAlexandre TORGUE }
2913f748be53SAlexandre TORGUE 
2914f748be53SAlexandre TORGUE /**
2915732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
29167ac6653aSJeff Kirsher  *  @skb : the socket buffer
29177ac6653aSJeff Kirsher  *  @dev : device pointer
291832ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
291932ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
292032ceabcaSGiuseppe CAVALLARO  *  and SG feature.
29217ac6653aSJeff Kirsher  */
29227ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
29237ac6653aSJeff Kirsher {
29247ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
29250e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
29264a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
2927aff3d9efSJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
29287ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
29290e80bdc9SGiuseppe Cavallaro 	unsigned int entry, first_entry;
29307ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
2931aff3d9efSJoao Pinto 	struct stmmac_tx_queue *tx_q;
29320e80bdc9SGiuseppe Cavallaro 	unsigned int enh_desc;
2933f748be53SAlexandre TORGUE 	unsigned int des;
2934f748be53SAlexandre TORGUE 
2935aff3d9efSJoao Pinto 	tx_q = &priv->tx_queue[queue];
2936aff3d9efSJoao Pinto 
2937f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
2938f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
2939f748be53SAlexandre TORGUE 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2940f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
2941f748be53SAlexandre TORGUE 	}
29427ac6653aSJeff Kirsher 
2943aff3d9efSJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2944aff3d9efSJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2945aff3d9efSJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(dev, queue));
29467ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
294738ddc59dSLABBE Corentin 			netdev_err(priv->dev,
294838ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
294938ddc59dSLABBE Corentin 				   __func__);
29507ac6653aSJeff Kirsher 		}
29517ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
29527ac6653aSJeff Kirsher 	}
29537ac6653aSJeff Kirsher 
2954d765955dSGiuseppe CAVALLARO 	if (priv->tx_path_in_lpi_mode)
2955d765955dSGiuseppe CAVALLARO 		stmmac_disable_eee_mode(priv);
2956d765955dSGiuseppe CAVALLARO 
2957aff3d9efSJoao Pinto 	entry = tx_q->cur_tx;
29580e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
29597ac6653aSJeff Kirsher 
29607ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
29617ac6653aSJeff Kirsher 
29620e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
2963aff3d9efSJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2964c24602efSGiuseppe CAVALLARO 	else
2965aff3d9efSJoao Pinto 		desc = tx_q->dma_tx + entry;
2966c24602efSGiuseppe CAVALLARO 
29677ac6653aSJeff Kirsher 	first = desc;
29687ac6653aSJeff Kirsher 
2969aff3d9efSJoao Pinto 	tx_q->tx_skbuff[first_entry] = skb;
29700e80bdc9SGiuseppe Cavallaro 
29710e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
29724a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
297329896a67SGiuseppe CAVALLARO 	if (enh_desc)
297429896a67SGiuseppe CAVALLARO 		is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
297529896a67SGiuseppe CAVALLARO 
2976f748be53SAlexandre TORGUE 	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2977f748be53SAlexandre TORGUE 					 DWMAC_CORE_4_00)) {
2978aff3d9efSJoao Pinto 		entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
2979362b37beSGiuseppe CAVALLARO 		if (unlikely(entry < 0))
2980362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
298129896a67SGiuseppe CAVALLARO 	}
29827ac6653aSJeff Kirsher 
29837ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
29849e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
29859e903e08SEric Dumazet 		int len = skb_frag_size(frag);
2986be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
29877ac6653aSJeff Kirsher 
2988e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2989e3ad57c9SGiuseppe Cavallaro 
29900e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
2991aff3d9efSJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2992c24602efSGiuseppe CAVALLARO 		else
2993aff3d9efSJoao Pinto 			desc = tx_q->dma_tx + entry;
29947ac6653aSJeff Kirsher 
2995f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
2996f722380dSIan Campbell 				       DMA_TO_DEVICE);
2997f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
2998362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
2999362b37beSGiuseppe CAVALLARO 
3000aff3d9efSJoao Pinto 		tx_q->tx_skbuff[entry] = NULL;
3001f748be53SAlexandre TORGUE 
3002aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
3003f8be0d78SMichael Weiser 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3004f8be0d78SMichael Weiser 			desc->des0 = cpu_to_le32(des);
3005f8be0d78SMichael Weiser 		else
3006f8be0d78SMichael Weiser 			desc->des2 = cpu_to_le32(des);
3007f748be53SAlexandre TORGUE 
3008aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3009aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
3010aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
30110e80bdc9SGiuseppe Cavallaro 
30120e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
30134a7d666aSGiuseppe CAVALLARO 		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3014be434d50SGiuseppe Cavallaro 						priv->mode, 1, last_segment);
30157ac6653aSJeff Kirsher 	}
30167ac6653aSJeff Kirsher 
3017e3ad57c9SGiuseppe Cavallaro 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3018e3ad57c9SGiuseppe Cavallaro 
3019aff3d9efSJoao Pinto 	tx_q->cur_tx = entry;
30207ac6653aSJeff Kirsher 
30217ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
3022d0225e7dSAlexandre TORGUE 		void *tx_head;
3023d0225e7dSAlexandre TORGUE 
302438ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
302538ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3026aff3d9efSJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
30270e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
302883d7af64SGiuseppe CAVALLARO 
3029c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
3030aff3d9efSJoao Pinto 			tx_head = (void *)tx_q->dma_etx;
3031c24602efSGiuseppe CAVALLARO 		else
3032aff3d9efSJoao Pinto 			tx_head = (void *)tx_q->dma_tx;
3033d0225e7dSAlexandre TORGUE 
3034d0225e7dSAlexandre TORGUE 		priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3035c24602efSGiuseppe CAVALLARO 
303638ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
30377ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
30387ac6653aSJeff Kirsher 	}
30390e80bdc9SGiuseppe Cavallaro 
3040aff3d9efSJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3041b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3042b3e51069SLABBE Corentin 			  __func__);
3043aff3d9efSJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue));
30447ac6653aSJeff Kirsher 	}
30457ac6653aSJeff Kirsher 
30467ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
30477ac6653aSJeff Kirsher 
30480e80bdc9SGiuseppe Cavallaro 	/* According to the coalesce parameter the IC bit for the latest
30490e80bdc9SGiuseppe Cavallaro 	 * segment is reset and the timer re-started to clean the tx status.
30500e80bdc9SGiuseppe Cavallaro 	 * This approach takes care about the fragments: desc is the first
30510e80bdc9SGiuseppe Cavallaro 	 * element in case of no SG.
30520e80bdc9SGiuseppe Cavallaro 	 */
30530e80bdc9SGiuseppe Cavallaro 	priv->tx_count_frames += nfrags + 1;
30540e80bdc9SGiuseppe Cavallaro 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
30550e80bdc9SGiuseppe Cavallaro 		mod_timer(&priv->txtimer,
30560e80bdc9SGiuseppe Cavallaro 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
30570e80bdc9SGiuseppe Cavallaro 	} else {
30580e80bdc9SGiuseppe Cavallaro 		priv->tx_count_frames = 0;
30590e80bdc9SGiuseppe Cavallaro 		priv->hw->desc->set_tx_ic(desc);
30600e80bdc9SGiuseppe Cavallaro 		priv->xstats.tx_set_ic_bit++;
30610e80bdc9SGiuseppe Cavallaro 	}
30620e80bdc9SGiuseppe Cavallaro 
30630e80bdc9SGiuseppe Cavallaro 	if (!priv->hwts_tx_en)
30640e80bdc9SGiuseppe Cavallaro 		skb_tx_timestamp(skb);
30650e80bdc9SGiuseppe Cavallaro 
30660e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
30670e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
30680e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
30690e80bdc9SGiuseppe Cavallaro 	 */
30700e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
30710e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
30720e80bdc9SGiuseppe Cavallaro 
3073f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
30740e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
3075f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
30760e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
30770e80bdc9SGiuseppe Cavallaro 
3078aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3079f8be0d78SMichael Weiser 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3080f8be0d78SMichael Weiser 			first->des0 = cpu_to_le32(des);
3081f8be0d78SMichael Weiser 		else
3082f8be0d78SMichael Weiser 			first->des2 = cpu_to_le32(des);
3083f748be53SAlexandre TORGUE 
3084aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3085aff3d9efSJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
30860e80bdc9SGiuseppe Cavallaro 
3087891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3088891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
3089891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
3090891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3091891434b1SRayagond Kokatanur 			priv->hw->desc->enable_tx_timestamp(first);
3092891434b1SRayagond Kokatanur 		}
3093891434b1SRayagond Kokatanur 
30940e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
30950e80bdc9SGiuseppe Cavallaro 		priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
30960e80bdc9SGiuseppe Cavallaro 						csum_insertion, priv->mode, 1,
30970e80bdc9SGiuseppe Cavallaro 						last_segment);
30980e80bdc9SGiuseppe Cavallaro 
30990e80bdc9SGiuseppe Cavallaro 		/* The own bit must be the latest setting done when prepare the
31000e80bdc9SGiuseppe Cavallaro 		 * descriptor and then barrier is needed to make sure that
31010e80bdc9SGiuseppe Cavallaro 		 * all is coherent before granting the DMA engine.
31020e80bdc9SGiuseppe Cavallaro 		 */
3103ad688cdbSPavel Machek 		dma_wmb();
31040e80bdc9SGiuseppe Cavallaro 	}
31057ac6653aSJeff Kirsher 
3106aff3d9efSJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3107f748be53SAlexandre TORGUE 
3108f748be53SAlexandre TORGUE 	if (priv->synopsys_id < DWMAC_CORE_4_00)
31097ac6653aSJeff Kirsher 		priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3110f748be53SAlexandre TORGUE 	else
3111aff3d9efSJoao Pinto 		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3112aff3d9efSJoao Pinto 					       queue);
31137ac6653aSJeff Kirsher 
3114362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
3115a9097a96SGiuseppe CAVALLARO 
3116362b37beSGiuseppe CAVALLARO dma_map_err:
311738ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
3118362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
3119362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
31207ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
31217ac6653aSJeff Kirsher }
31227ac6653aSJeff Kirsher 
3123b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3124b9381985SVince Bridgers {
3125b9381985SVince Bridgers 	struct ethhdr *ehdr;
3126b9381985SVince Bridgers 	u16 vlanid;
3127b9381985SVince Bridgers 
3128b9381985SVince Bridgers 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3129b9381985SVince Bridgers 	    NETIF_F_HW_VLAN_CTAG_RX &&
3130b9381985SVince Bridgers 	    !__vlan_get_tag(skb, &vlanid)) {
3131b9381985SVince Bridgers 		/* pop the vlan tag */
3132b9381985SVince Bridgers 		ehdr = (struct ethhdr *)skb->data;
3133b9381985SVince Bridgers 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3134b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
3135b9381985SVince Bridgers 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3136b9381985SVince Bridgers 	}
3137b9381985SVince Bridgers }
3138b9381985SVince Bridgers 
3139b9381985SVince Bridgers 
3140aff3d9efSJoao Pinto static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3141120e87f9SGiuseppe Cavallaro {
3142aff3d9efSJoao Pinto 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3143120e87f9SGiuseppe Cavallaro 		return 0;
3144120e87f9SGiuseppe Cavallaro 
3145120e87f9SGiuseppe Cavallaro 	return 1;
3146120e87f9SGiuseppe Cavallaro }
3147120e87f9SGiuseppe Cavallaro 
314832ceabcaSGiuseppe CAVALLARO /**
3149732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
315032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
3151aff3d9efSJoao Pinto  * @queue: RX queue index
315232ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
315332ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
315432ceabcaSGiuseppe CAVALLARO  */
3155aff3d9efSJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
31567ac6653aSJeff Kirsher {
3157aff3d9efSJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3158aff3d9efSJoao Pinto 	int dirty = stmmac_rx_dirty(priv, queue);
3159aff3d9efSJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
31607ac6653aSJeff Kirsher 	int bfsize = priv->dma_buf_sz;
31617ac6653aSJeff Kirsher 
3162e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
3163c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
3164c24602efSGiuseppe CAVALLARO 
3165c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
3166aff3d9efSJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3167c24602efSGiuseppe CAVALLARO 		else
3168aff3d9efSJoao Pinto 			p = rx_q->dma_rx + entry;
3169c24602efSGiuseppe CAVALLARO 
3170aff3d9efSJoao Pinto 		if (!rx_q->rx_skbuff[entry]) {
31717ac6653aSJeff Kirsher 			struct sk_buff *skb;
31727ac6653aSJeff Kirsher 
3173acb600deSEric Dumazet 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3174120e87f9SGiuseppe Cavallaro 			if (unlikely(!skb)) {
3175120e87f9SGiuseppe Cavallaro 				/* so for a while no zero-copy! */
3176aff3d9efSJoao Pinto 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3177120e87f9SGiuseppe Cavallaro 				if (unlikely(net_ratelimit()))
3178120e87f9SGiuseppe Cavallaro 					dev_err(priv->device,
3179120e87f9SGiuseppe Cavallaro 						"fail to alloc skb entry %d\n",
3180120e87f9SGiuseppe Cavallaro 						entry);
31817ac6653aSJeff Kirsher 				break;
3182120e87f9SGiuseppe Cavallaro 			}
31837ac6653aSJeff Kirsher 
3184aff3d9efSJoao Pinto 			rx_q->rx_skbuff[entry] = skb;
3185aff3d9efSJoao Pinto 			rx_q->rx_skbuff_dma[entry] =
31867ac6653aSJeff Kirsher 			    dma_map_single(priv->device, skb->data, bfsize,
31877ac6653aSJeff Kirsher 					   DMA_FROM_DEVICE);
3188362b37beSGiuseppe CAVALLARO 			if (dma_mapping_error(priv->device,
3189aff3d9efSJoao Pinto 					      rx_q->rx_skbuff_dma[entry])) {
319038ddc59dSLABBE Corentin 				netdev_err(priv->dev, "Rx DMA map failed\n");
3191362b37beSGiuseppe CAVALLARO 				dev_kfree_skb(skb);
3192362b37beSGiuseppe CAVALLARO 				break;
3193362b37beSGiuseppe CAVALLARO 			}
3194286a8372SGiuseppe CAVALLARO 
3195f748be53SAlexandre TORGUE 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3196aff3d9efSJoao Pinto 				p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3197f748be53SAlexandre TORGUE 				p->des1 = 0;
3198f748be53SAlexandre TORGUE 			} else {
3199aff3d9efSJoao Pinto 				p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3200f748be53SAlexandre TORGUE 			}
3201f748be53SAlexandre TORGUE 			if (priv->hw->mode->refill_desc3)
3202aff3d9efSJoao Pinto 				priv->hw->mode->refill_desc3(rx_q, p);
3203286a8372SGiuseppe CAVALLARO 
3204aff3d9efSJoao Pinto 			if (rx_q->rx_zeroc_thresh > 0)
3205aff3d9efSJoao Pinto 				rx_q->rx_zeroc_thresh--;
3206120e87f9SGiuseppe Cavallaro 
3207b3e51069SLABBE Corentin 			netif_dbg(priv, rx_status, priv->dev,
320838ddc59dSLABBE Corentin 				  "refill entry #%d\n", entry);
32097ac6653aSJeff Kirsher 		}
3210ad688cdbSPavel Machek 		dma_wmb();
3211f748be53SAlexandre TORGUE 
3212f748be53SAlexandre TORGUE 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3213f748be53SAlexandre TORGUE 			priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3214f748be53SAlexandre TORGUE 		else
3215c24602efSGiuseppe CAVALLARO 			priv->hw->desc->set_rx_owner(p);
3216f748be53SAlexandre TORGUE 
3217ad688cdbSPavel Machek 		dma_wmb();
3218e3ad57c9SGiuseppe Cavallaro 
3219e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
32207ac6653aSJeff Kirsher 	}
3221aff3d9efSJoao Pinto 	rx_q->dirty_rx = entry;
32227ac6653aSJeff Kirsher }
32237ac6653aSJeff Kirsher 
322432ceabcaSGiuseppe CAVALLARO /**
3225732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
322632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
322732ceabcaSGiuseppe CAVALLARO  * @limit: napi bugget.
322832ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
322932ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
323032ceabcaSGiuseppe CAVALLARO  */
3231aff3d9efSJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
32327ac6653aSJeff Kirsher {
3233aff3d9efSJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3234aff3d9efSJoao Pinto 	unsigned int entry = rx_q->cur_rx;
3235aff3d9efSJoao Pinto 	int coe = priv->hw->rx_csum;
32367ac6653aSJeff Kirsher 	unsigned int next_entry;
32377ac6653aSJeff Kirsher 	unsigned int count = 0;
32387ac6653aSJeff Kirsher 
323983d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
3240d0225e7dSAlexandre TORGUE 		void *rx_head;
3241d0225e7dSAlexandre TORGUE 
324238ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3243c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
3244aff3d9efSJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
3245c24602efSGiuseppe CAVALLARO 		else
3246aff3d9efSJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
3247d0225e7dSAlexandre TORGUE 
3248d0225e7dSAlexandre TORGUE 		priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
32497ac6653aSJeff Kirsher 	}
3250c24602efSGiuseppe CAVALLARO 	while (count < limit) {
32517ac6653aSJeff Kirsher 		int status;
32529401bb5cSGiuseppe CAVALLARO 		struct dma_desc *p;
3253ba1ffd74SGiuseppe CAVALLARO 		struct dma_desc *np;
32547ac6653aSJeff Kirsher 
3255c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
3256aff3d9efSJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3257c24602efSGiuseppe CAVALLARO 		else
3258aff3d9efSJoao Pinto 			p = rx_q->dma_rx + entry;
3259c24602efSGiuseppe CAVALLARO 
3260c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
3261c1fa3212SFabrice Gasnier 		status = priv->hw->desc->rx_status(&priv->dev->stats,
3262c1fa3212SFabrice Gasnier 						   &priv->xstats, p);
3263c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
3264c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
32657ac6653aSJeff Kirsher 			break;
32667ac6653aSJeff Kirsher 
32677ac6653aSJeff Kirsher 		count++;
32687ac6653aSJeff Kirsher 
3269aff3d9efSJoao Pinto 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3270aff3d9efSJoao Pinto 		next_entry = rx_q->cur_rx;
3271e3ad57c9SGiuseppe Cavallaro 
3272c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
3273aff3d9efSJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3274c24602efSGiuseppe CAVALLARO 		else
3275aff3d9efSJoao Pinto 			np = rx_q->dma_rx + next_entry;
3276ba1ffd74SGiuseppe CAVALLARO 
3277ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
32787ac6653aSJeff Kirsher 
3279c24602efSGiuseppe CAVALLARO 		if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3280c24602efSGiuseppe CAVALLARO 			priv->hw->desc->rx_extended_status(&priv->dev->stats,
3281c24602efSGiuseppe CAVALLARO 							   &priv->xstats,
3282aff3d9efSJoao Pinto 							   rx_q->dma_erx +
3283c24602efSGiuseppe CAVALLARO 							   entry);
3284891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
32857ac6653aSJeff Kirsher 			priv->dev->stats.rx_errors++;
3286891434b1SRayagond Kokatanur 			if (priv->hwts_rx_en && !priv->extend_desc) {
32878d45e42bSLABBE Corentin 				/* DESC2 & DESC3 will be overwritten by device
3288891434b1SRayagond Kokatanur 				 * with timestamp value, hence reinitialize
3289891434b1SRayagond Kokatanur 				 * them in stmmac_rx_refill() function so that
3290891434b1SRayagond Kokatanur 				 * device can reuse it.
3291891434b1SRayagond Kokatanur 				 */
3292aff3d9efSJoao Pinto 				rx_q->rx_skbuff[entry] = NULL;
3293891434b1SRayagond Kokatanur 				dma_unmap_single(priv->device,
3294aff3d9efSJoao Pinto 						 rx_q->rx_skbuff_dma[entry],
3295ceb69499SGiuseppe CAVALLARO 						 priv->dma_buf_sz,
3296ceb69499SGiuseppe CAVALLARO 						 DMA_FROM_DEVICE);
3297891434b1SRayagond Kokatanur 			}
3298891434b1SRayagond Kokatanur 		} else {
32997ac6653aSJeff Kirsher 			struct sk_buff *skb;
33007ac6653aSJeff Kirsher 			int frame_len;
3301f748be53SAlexandre TORGUE 			unsigned int des;
3302f748be53SAlexandre TORGUE 
3303f748be53SAlexandre TORGUE 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3304f8be0d78SMichael Weiser 				des = le32_to_cpu(p->des0);
3305f748be53SAlexandre TORGUE 			else
3306f8be0d78SMichael Weiser 				des = le32_to_cpu(p->des2);
33077ac6653aSJeff Kirsher 
3308ceb69499SGiuseppe CAVALLARO 			frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3309ceb69499SGiuseppe CAVALLARO 
33108d45e42bSLABBE Corentin 			/*  If frame length is greater than skb buffer size
3311f748be53SAlexandre TORGUE 			 *  (preallocated during init) then the packet is
3312f748be53SAlexandre TORGUE 			 *  ignored
3313f748be53SAlexandre TORGUE 			 */
3314e527c4a7SGiuseppe CAVALLARO 			if (frame_len > priv->dma_buf_sz) {
331538ddc59dSLABBE Corentin 				netdev_err(priv->dev,
331638ddc59dSLABBE Corentin 					   "len %d larger than size (%d)\n",
331738ddc59dSLABBE Corentin 					   frame_len, priv->dma_buf_sz);
3318e527c4a7SGiuseppe CAVALLARO 				priv->dev->stats.rx_length_errors++;
3319e527c4a7SGiuseppe CAVALLARO 				break;
3320e527c4a7SGiuseppe CAVALLARO 			}
3321e527c4a7SGiuseppe CAVALLARO 
33227ac6653aSJeff Kirsher 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3323ceb69499SGiuseppe CAVALLARO 			 * Type frames (LLC/LLC-SNAP)
3324ceb69499SGiuseppe CAVALLARO 			 */
33257ac6653aSJeff Kirsher 			if (unlikely(status != llc_snap))
33267ac6653aSJeff Kirsher 				frame_len -= ETH_FCS_LEN;
33277ac6653aSJeff Kirsher 
332883d7af64SGiuseppe CAVALLARO 			if (netif_msg_rx_status(priv)) {
332938ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3330f748be53SAlexandre TORGUE 					   p, entry, des);
333183d7af64SGiuseppe CAVALLARO 				if (frame_len > ETH_FRAME_LEN)
333238ddc59dSLABBE Corentin 					netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
333383d7af64SGiuseppe CAVALLARO 						   frame_len, status);
333483d7af64SGiuseppe CAVALLARO 			}
333522ad3838SGiuseppe Cavallaro 
3336f748be53SAlexandre TORGUE 			/* The zero-copy is always used for all the sizes
3337f748be53SAlexandre TORGUE 			 * in case of GMAC4 because it needs
3338f748be53SAlexandre TORGUE 			 * to refill the used descriptors, always.
3339f748be53SAlexandre TORGUE 			 */
3340f748be53SAlexandre TORGUE 			if (unlikely(!priv->plat->has_gmac4 &&
3341f748be53SAlexandre TORGUE 				     ((frame_len < priv->rx_copybreak) ||
3342aff3d9efSJoao Pinto 				     stmmac_rx_threshold_count(rx_q)))) {
334322ad3838SGiuseppe Cavallaro 				skb = netdev_alloc_skb_ip_align(priv->dev,
334422ad3838SGiuseppe Cavallaro 								frame_len);
334522ad3838SGiuseppe Cavallaro 				if (unlikely(!skb)) {
334622ad3838SGiuseppe Cavallaro 					if (net_ratelimit())
334722ad3838SGiuseppe Cavallaro 						dev_warn(priv->device,
334822ad3838SGiuseppe Cavallaro 							 "packet dropped\n");
334922ad3838SGiuseppe Cavallaro 					priv->dev->stats.rx_dropped++;
335022ad3838SGiuseppe Cavallaro 					break;
335122ad3838SGiuseppe Cavallaro 				}
335222ad3838SGiuseppe Cavallaro 
335322ad3838SGiuseppe Cavallaro 				dma_sync_single_for_cpu(priv->device,
3354aff3d9efSJoao Pinto 							rx_q->rx_skbuff_dma
335522ad3838SGiuseppe Cavallaro 							[entry], frame_len,
335622ad3838SGiuseppe Cavallaro 							DMA_FROM_DEVICE);
335722ad3838SGiuseppe Cavallaro 				skb_copy_to_linear_data(skb,
3358aff3d9efSJoao Pinto 							rx_q->
335922ad3838SGiuseppe Cavallaro 							rx_skbuff[entry]->data,
336022ad3838SGiuseppe Cavallaro 							frame_len);
336122ad3838SGiuseppe Cavallaro 
336222ad3838SGiuseppe Cavallaro 				skb_put(skb, frame_len);
336322ad3838SGiuseppe Cavallaro 				dma_sync_single_for_device(priv->device,
3364aff3d9efSJoao Pinto 							   rx_q->rx_skbuff_dma
336522ad3838SGiuseppe Cavallaro 							   [entry], frame_len,
336622ad3838SGiuseppe Cavallaro 							   DMA_FROM_DEVICE);
336722ad3838SGiuseppe Cavallaro 			} else {
3368aff3d9efSJoao Pinto 				skb = rx_q->rx_skbuff[entry];
33697ac6653aSJeff Kirsher 				if (unlikely(!skb)) {
337038ddc59dSLABBE Corentin 					netdev_err(priv->dev,
337138ddc59dSLABBE Corentin 						   "%s: Inconsistent Rx chain\n",
33727ac6653aSJeff Kirsher 						   priv->dev->name);
33737ac6653aSJeff Kirsher 					priv->dev->stats.rx_dropped++;
33747ac6653aSJeff Kirsher 					break;
33757ac6653aSJeff Kirsher 				}
33767ac6653aSJeff Kirsher 				prefetch(skb->data - NET_IP_ALIGN);
3377aff3d9efSJoao Pinto 				rx_q->rx_skbuff[entry] = NULL;
3378aff3d9efSJoao Pinto 				rx_q->rx_zeroc_thresh++;
33797ac6653aSJeff Kirsher 
33807ac6653aSJeff Kirsher 				skb_put(skb, frame_len);
33817ac6653aSJeff Kirsher 				dma_unmap_single(priv->device,
3382aff3d9efSJoao Pinto 						 rx_q->rx_skbuff_dma[entry],
338322ad3838SGiuseppe Cavallaro 						 priv->dma_buf_sz,
338422ad3838SGiuseppe Cavallaro 						 DMA_FROM_DEVICE);
338522ad3838SGiuseppe Cavallaro 			}
338622ad3838SGiuseppe Cavallaro 
33877ac6653aSJeff Kirsher 			if (netif_msg_pktdata(priv)) {
338838ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "frame received (%dbytes)",
338938ddc59dSLABBE Corentin 					   frame_len);
33907ac6653aSJeff Kirsher 				print_pkt(skb->data, frame_len);
33917ac6653aSJeff Kirsher 			}
339283d7af64SGiuseppe CAVALLARO 
3393ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3394ba1ffd74SGiuseppe CAVALLARO 
3395b9381985SVince Bridgers 			stmmac_rx_vlan(priv->dev, skb);
3396b9381985SVince Bridgers 
33977ac6653aSJeff Kirsher 			skb->protocol = eth_type_trans(skb, priv->dev);
33987ac6653aSJeff Kirsher 
3399ceb69499SGiuseppe CAVALLARO 			if (unlikely(!coe))
34007ac6653aSJeff Kirsher 				skb_checksum_none_assert(skb);
340162a2ab93SGiuseppe CAVALLARO 			else
34027ac6653aSJeff Kirsher 				skb->ip_summed = CHECKSUM_UNNECESSARY;
340362a2ab93SGiuseppe CAVALLARO 
3404aff3d9efSJoao Pinto 			napi_gro_receive(&rx_q->napi, skb);
34057ac6653aSJeff Kirsher 
34067ac6653aSJeff Kirsher 			priv->dev->stats.rx_packets++;
34077ac6653aSJeff Kirsher 			priv->dev->stats.rx_bytes += frame_len;
34087ac6653aSJeff Kirsher 		}
34097ac6653aSJeff Kirsher 		entry = next_entry;
34107ac6653aSJeff Kirsher 	}
34117ac6653aSJeff Kirsher 
3412aff3d9efSJoao Pinto 	stmmac_rx_refill(priv, queue);
34137ac6653aSJeff Kirsher 
34147ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
34157ac6653aSJeff Kirsher 
34167ac6653aSJeff Kirsher 	return count;
34177ac6653aSJeff Kirsher }
34187ac6653aSJeff Kirsher 
34197ac6653aSJeff Kirsher /**
34207ac6653aSJeff Kirsher  *  stmmac_poll - stmmac poll method (NAPI)
34217ac6653aSJeff Kirsher  *  @napi : pointer to the napi structure.
34227ac6653aSJeff Kirsher  *  @budget : maximum number of packets that the current CPU can receive from
34237ac6653aSJeff Kirsher  *	      all interfaces.
34247ac6653aSJeff Kirsher  *  Description :
34259125cdd1SGiuseppe CAVALLARO  *  To look at the incoming frames and clear the tx resources.
34267ac6653aSJeff Kirsher  */
34277ac6653aSJeff Kirsher static int stmmac_poll(struct napi_struct *napi, int budget)
34287ac6653aSJeff Kirsher {
3429aff3d9efSJoao Pinto 	struct stmmac_rx_queue *rx_q =
3430aff3d9efSJoao Pinto 		container_of(napi, struct stmmac_rx_queue, napi);
3431aff3d9efSJoao Pinto 	struct stmmac_priv *priv = rx_q->priv_data;
3432aff3d9efSJoao Pinto 	u32 tx_count = priv->dma_cap.number_tx_queues;
3433aff3d9efSJoao Pinto 	u32 chan = rx_q->queue_index;
3434aff3d9efSJoao Pinto 	u32 work_done = 0;
3435aff3d9efSJoao Pinto 	u32 queue = 0;
34367ac6653aSJeff Kirsher 
34379125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
3438aff3d9efSJoao Pinto 	/* check all the queues */
3439aff3d9efSJoao Pinto 	for (queue = 0; queue < tx_count; queue++)
3440aff3d9efSJoao Pinto 		stmmac_tx_clean(priv, queue);
34417ac6653aSJeff Kirsher 
3442aff3d9efSJoao Pinto 	/* Process RX packets from this queue */
3443aff3d9efSJoao Pinto 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3444aff3d9efSJoao Pinto 
34457ac6653aSJeff Kirsher 	if (work_done < budget) {
34466ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
34474f513ecdSJoao Pinto 		stmmac_enable_dma_irq(priv, chan);
34487ac6653aSJeff Kirsher 	}
34497ac6653aSJeff Kirsher 	return work_done;
34507ac6653aSJeff Kirsher }
34517ac6653aSJeff Kirsher 
34527ac6653aSJeff Kirsher /**
34537ac6653aSJeff Kirsher  *  stmmac_tx_timeout
34547ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
34557ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
34567284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
34577ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
34587ac6653aSJeff Kirsher  *   in order to transmit a new packet.
34597ac6653aSJeff Kirsher  */
34607ac6653aSJeff Kirsher static void stmmac_tx_timeout(struct net_device *dev)
34617ac6653aSJeff Kirsher {
34627ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
3463aff3d9efSJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
3464aff3d9efSJoao Pinto 	u32 chan;
34657ac6653aSJeff Kirsher 
34667ac6653aSJeff Kirsher 	/* Clear Tx resources and restart transmitting again */
3467aff3d9efSJoao Pinto 	for (chan = 0; chan < tx_count; chan++)
34684e593262SJoao Pinto 		stmmac_tx_err(priv, chan);
34697ac6653aSJeff Kirsher }
34707ac6653aSJeff Kirsher 
34717ac6653aSJeff Kirsher /**
347201789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
34737ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
34747ac6653aSJeff Kirsher  *  Description:
34757ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
34767ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
34777ac6653aSJeff Kirsher  *  Return value:
34787ac6653aSJeff Kirsher  *  void.
34797ac6653aSJeff Kirsher  */
348001789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
34817ac6653aSJeff Kirsher {
34827ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
34837ac6653aSJeff Kirsher 
34843b57de95SVince Bridgers 	priv->hw->mac->set_filter(priv->hw, dev);
34857ac6653aSJeff Kirsher }
34867ac6653aSJeff Kirsher 
34877ac6653aSJeff Kirsher /**
34887ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
34897ac6653aSJeff Kirsher  *  @dev : device pointer.
34907ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
34917ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
34927ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
34937ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
34947ac6653aSJeff Kirsher  *  Return value:
34957ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
34967ac6653aSJeff Kirsher  *  file on failure.
34977ac6653aSJeff Kirsher  */
34987ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
34997ac6653aSJeff Kirsher {
350038ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
350138ddc59dSLABBE Corentin 
35027ac6653aSJeff Kirsher 	if (netif_running(dev)) {
350338ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
35047ac6653aSJeff Kirsher 		return -EBUSY;
35057ac6653aSJeff Kirsher 	}
35067ac6653aSJeff Kirsher 
35077ac6653aSJeff Kirsher 	dev->mtu = new_mtu;
3508f748be53SAlexandre TORGUE 
35097ac6653aSJeff Kirsher 	netdev_update_features(dev);
35107ac6653aSJeff Kirsher 
35117ac6653aSJeff Kirsher 	return 0;
35127ac6653aSJeff Kirsher }
35137ac6653aSJeff Kirsher 
3514c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
3515c8f44affSMichał Mirosław 					     netdev_features_t features)
35167ac6653aSJeff Kirsher {
35177ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
35187ac6653aSJeff Kirsher 
351938912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
35207ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
3521d2afb5bdSGiuseppe CAVALLARO 
35227ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
3523a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
35247ac6653aSJeff Kirsher 
35257ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
35267ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
35277ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
3528ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
3529ceb69499SGiuseppe CAVALLARO 	 */
35307ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3531a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
35327ac6653aSJeff Kirsher 
3533f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
3534f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3535f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
3536f748be53SAlexandre TORGUE 			priv->tso = true;
3537f748be53SAlexandre TORGUE 		else
3538f748be53SAlexandre TORGUE 			priv->tso = false;
3539f748be53SAlexandre TORGUE 	}
3540f748be53SAlexandre TORGUE 
35417ac6653aSJeff Kirsher 	return features;
35427ac6653aSJeff Kirsher }
35437ac6653aSJeff Kirsher 
3544d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
3545d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
3546d2afb5bdSGiuseppe CAVALLARO {
3547d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
3548d2afb5bdSGiuseppe CAVALLARO 
3549d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
3550d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
3551d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
3552d2afb5bdSGiuseppe CAVALLARO 	else
3553d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3554d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
3555d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
3556d2afb5bdSGiuseppe CAVALLARO 	 */
3557d2afb5bdSGiuseppe CAVALLARO 	priv->hw->mac->rx_ipc(priv->hw);
3558d2afb5bdSGiuseppe CAVALLARO 
3559d2afb5bdSGiuseppe CAVALLARO 	return 0;
3560d2afb5bdSGiuseppe CAVALLARO }
3561d2afb5bdSGiuseppe CAVALLARO 
356232ceabcaSGiuseppe CAVALLARO /**
356332ceabcaSGiuseppe CAVALLARO  *  stmmac_interrupt - main ISR
356432ceabcaSGiuseppe CAVALLARO  *  @irq: interrupt number.
356532ceabcaSGiuseppe CAVALLARO  *  @dev_id: to pass the net device pointer.
356632ceabcaSGiuseppe CAVALLARO  *  Description: this is the main driver interrupt service routine.
3567732fdf0eSGiuseppe CAVALLARO  *  It can call:
3568732fdf0eSGiuseppe CAVALLARO  *  o DMA service routine (to manage incoming frame reception and transmission
3569732fdf0eSGiuseppe CAVALLARO  *    status)
3570732fdf0eSGiuseppe CAVALLARO  *  o Core interrupts to manage: remote wake-up, management counter, LPI
357132ceabcaSGiuseppe CAVALLARO  *    interrupts.
357232ceabcaSGiuseppe CAVALLARO  */
35737ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
35747ac6653aSJeff Kirsher {
35757ac6653aSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
35767ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
35777bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
35787bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
35797bac4e1eSJoao Pinto 	u32 queues_count;
35807bac4e1eSJoao Pinto 	u32 queue;
35817bac4e1eSJoao Pinto 
35827bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
35837ac6653aSJeff Kirsher 
358489f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
358589f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
358689f7f2cfSSrinivas Kandagatla 
35877ac6653aSJeff Kirsher 	if (unlikely(!dev)) {
358838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
35897ac6653aSJeff Kirsher 		return IRQ_NONE;
35907ac6653aSJeff Kirsher 	}
35917ac6653aSJeff Kirsher 
35927ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
3593f748be53SAlexandre TORGUE 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
35947ed24bbeSVince Bridgers 		int status = priv->hw->mac->host_irq_status(priv->hw,
35950982a0f6SGiuseppe CAVALLARO 							    &priv->xstats);
35968f71a88dSJoao Pinto 
3597d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
3598d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
35990982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3600d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
36010982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3602d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
36037bac4e1eSJoao Pinto 		}
36047bac4e1eSJoao Pinto 
36057bac4e1eSJoao Pinto 		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
36067bac4e1eSJoao Pinto 			for (queue = 0; queue < queues_count; queue++) {
3607aff3d9efSJoao Pinto 				struct stmmac_rx_queue *rx_q =
3608aff3d9efSJoao Pinto 				&priv->rx_queue[queue];
3609aff3d9efSJoao Pinto 
36107bac4e1eSJoao Pinto 				status |=
36117bac4e1eSJoao Pinto 				priv->hw->mac->host_mtl_irq_status(priv->hw,
36127bac4e1eSJoao Pinto 								   queue);
36137bac4e1eSJoao Pinto 
36147bac4e1eSJoao Pinto 				if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
36157bac4e1eSJoao Pinto 				    priv->hw->dma->set_rx_tail_ptr)
3616f748be53SAlexandre TORGUE 					priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3617aff3d9efSJoao Pinto 								rx_q->rx_tail_addr,
36187bac4e1eSJoao Pinto 								queue);
36197bac4e1eSJoao Pinto 			}
3620d765955dSGiuseppe CAVALLARO 		}
362170523e63SGiuseppe CAVALLARO 
362270523e63SGiuseppe CAVALLARO 		/* PCS link status */
36233fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
362470523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
362570523e63SGiuseppe CAVALLARO 				netif_carrier_on(dev);
362670523e63SGiuseppe CAVALLARO 			else
362770523e63SGiuseppe CAVALLARO 				netif_carrier_off(dev);
362870523e63SGiuseppe CAVALLARO 		}
3629d765955dSGiuseppe CAVALLARO 	}
3630d765955dSGiuseppe CAVALLARO 
3631d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
36327ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
36337ac6653aSJeff Kirsher 
36347ac6653aSJeff Kirsher 	return IRQ_HANDLED;
36357ac6653aSJeff Kirsher }
36367ac6653aSJeff Kirsher 
36377ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
36387ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
3639ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
3640ceb69499SGiuseppe CAVALLARO  */
36417ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
36427ac6653aSJeff Kirsher {
36437ac6653aSJeff Kirsher 	disable_irq(dev->irq);
36447ac6653aSJeff Kirsher 	stmmac_interrupt(dev->irq, dev);
36457ac6653aSJeff Kirsher 	enable_irq(dev->irq);
36467ac6653aSJeff Kirsher }
36477ac6653aSJeff Kirsher #endif
36487ac6653aSJeff Kirsher 
36497ac6653aSJeff Kirsher /**
36507ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
36517ac6653aSJeff Kirsher  *  @dev: Device pointer.
36527ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
36537ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
36547ac6653aSJeff Kirsher  *  @cmd: IOCTL command
36557ac6653aSJeff Kirsher  *  Description:
365632ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
36577ac6653aSJeff Kirsher  */
36587ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
36597ac6653aSJeff Kirsher {
3660891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
36617ac6653aSJeff Kirsher 
36627ac6653aSJeff Kirsher 	if (!netif_running(dev))
36637ac6653aSJeff Kirsher 		return -EINVAL;
36647ac6653aSJeff Kirsher 
3665891434b1SRayagond Kokatanur 	switch (cmd) {
3666891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
3667891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
3668891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
3669d6d50c7eSPhilippe Reynes 		if (!dev->phydev)
36707ac6653aSJeff Kirsher 			return -EINVAL;
3671d6d50c7eSPhilippe Reynes 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3672891434b1SRayagond Kokatanur 		break;
3673891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
3674891434b1SRayagond Kokatanur 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3675891434b1SRayagond Kokatanur 		break;
3676891434b1SRayagond Kokatanur 	default:
3677891434b1SRayagond Kokatanur 		break;
3678891434b1SRayagond Kokatanur 	}
36797ac6653aSJeff Kirsher 
36807ac6653aSJeff Kirsher 	return ret;
36817ac6653aSJeff Kirsher }
36827ac6653aSJeff Kirsher 
368350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
36847ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
36857ac29055SGiuseppe CAVALLARO 
3686c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
3687c24602efSGiuseppe CAVALLARO 			       struct seq_file *seq)
36887ac29055SGiuseppe CAVALLARO {
36897ac29055SGiuseppe CAVALLARO 	int i;
3690c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3691c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
36927ac29055SGiuseppe CAVALLARO 
3693c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
3694c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
3695c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3696c24602efSGiuseppe CAVALLARO 				   i, (unsigned int)virt_to_phys(ep),
3697f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
3698f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
3699f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
3700f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
3701c24602efSGiuseppe CAVALLARO 			ep++;
3702c24602efSGiuseppe CAVALLARO 		} else {
3703c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3704c24602efSGiuseppe CAVALLARO 				   i, (unsigned int)virt_to_phys(ep),
3705f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3706f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3707c24602efSGiuseppe CAVALLARO 			p++;
3708c24602efSGiuseppe CAVALLARO 		}
37097ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
37107ac29055SGiuseppe CAVALLARO 	}
3711c24602efSGiuseppe CAVALLARO }
37127ac29055SGiuseppe CAVALLARO 
3713c24602efSGiuseppe CAVALLARO static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3714c24602efSGiuseppe CAVALLARO {
3715c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
3716c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
3717aff3d9efSJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
3718aff3d9efSJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
3719aff3d9efSJoao Pinto 	u32 queue;
3720aff3d9efSJoao Pinto 
3721aff3d9efSJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
3722aff3d9efSJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3723aff3d9efSJoao Pinto 
3724aff3d9efSJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
37257ac29055SGiuseppe CAVALLARO 
3726c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
3727aff3d9efSJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
3728aff3d9efSJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
3729aff3d9efSJoao Pinto 					   DMA_RX_SIZE, 1, seq);
3730c24602efSGiuseppe CAVALLARO 		} else {
3731aff3d9efSJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
3732aff3d9efSJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
3733aff3d9efSJoao Pinto 					   DMA_RX_SIZE, 0, seq);
3734aff3d9efSJoao Pinto 		}
3735aff3d9efSJoao Pinto 	}
3736aff3d9efSJoao Pinto 
3737aff3d9efSJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
3738aff3d9efSJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3739aff3d9efSJoao Pinto 
3740aff3d9efSJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
3741aff3d9efSJoao Pinto 
3742aff3d9efSJoao Pinto 		if (priv->extend_desc) {
3743aff3d9efSJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
3744aff3d9efSJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
3745aff3d9efSJoao Pinto 					   DMA_TX_SIZE, 1, seq);
3746aff3d9efSJoao Pinto 		} else {
3747aff3d9efSJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
3748aff3d9efSJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
3749aff3d9efSJoao Pinto 					   DMA_TX_SIZE, 0, seq);
3750aff3d9efSJoao Pinto 		}
37517ac29055SGiuseppe CAVALLARO 	}
37527ac29055SGiuseppe CAVALLARO 
37537ac29055SGiuseppe CAVALLARO 	return 0;
37547ac29055SGiuseppe CAVALLARO }
37557ac29055SGiuseppe CAVALLARO 
37567ac29055SGiuseppe CAVALLARO static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
37577ac29055SGiuseppe CAVALLARO {
37587ac29055SGiuseppe CAVALLARO 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
37597ac29055SGiuseppe CAVALLARO }
37607ac29055SGiuseppe CAVALLARO 
376122d3efe5SPavel Machek /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
376222d3efe5SPavel Machek 
37637ac29055SGiuseppe CAVALLARO static const struct file_operations stmmac_rings_status_fops = {
37647ac29055SGiuseppe CAVALLARO 	.owner = THIS_MODULE,
37657ac29055SGiuseppe CAVALLARO 	.open = stmmac_sysfs_ring_open,
37667ac29055SGiuseppe CAVALLARO 	.read = seq_read,
37677ac29055SGiuseppe CAVALLARO 	.llseek = seq_lseek,
376874863948SDjalal Harouni 	.release = single_release,
37697ac29055SGiuseppe CAVALLARO };
37707ac29055SGiuseppe CAVALLARO 
3771e7434821SGiuseppe CAVALLARO static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3772e7434821SGiuseppe CAVALLARO {
3773e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
3774e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
3775e7434821SGiuseppe CAVALLARO 
377619e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
3777e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
3778e7434821SGiuseppe CAVALLARO 		return 0;
3779e7434821SGiuseppe CAVALLARO 	}
3780e7434821SGiuseppe CAVALLARO 
3781e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
3782e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
3783e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
3784e7434821SGiuseppe CAVALLARO 
378522d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3786e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
378722d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
3788e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
378922d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
3790e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3791e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
3792e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3793e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3794e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
37958d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3796e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
3797e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3798e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3799e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3800e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3801e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3802e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3803e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
3804e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
3805e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3806e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3807e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3808e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
380922d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3810e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
3811e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3812e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3813e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3814f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3815f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3816f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3817f748be53SAlexandre TORGUE 	} else {
3818e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3819e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3820e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3821e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3822f748be53SAlexandre TORGUE 	}
3823e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3824e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3825e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3826e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
3827e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3828e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
3829e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3830e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3831e7434821SGiuseppe CAVALLARO 
3832e7434821SGiuseppe CAVALLARO 	return 0;
3833e7434821SGiuseppe CAVALLARO }
3834e7434821SGiuseppe CAVALLARO 
3835e7434821SGiuseppe CAVALLARO static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3836e7434821SGiuseppe CAVALLARO {
3837e7434821SGiuseppe CAVALLARO 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3838e7434821SGiuseppe CAVALLARO }
3839e7434821SGiuseppe CAVALLARO 
3840e7434821SGiuseppe CAVALLARO static const struct file_operations stmmac_dma_cap_fops = {
3841e7434821SGiuseppe CAVALLARO 	.owner = THIS_MODULE,
3842e7434821SGiuseppe CAVALLARO 	.open = stmmac_sysfs_dma_cap_open,
3843e7434821SGiuseppe CAVALLARO 	.read = seq_read,
3844e7434821SGiuseppe CAVALLARO 	.llseek = seq_lseek,
384574863948SDjalal Harouni 	.release = single_release,
3846e7434821SGiuseppe CAVALLARO };
3847e7434821SGiuseppe CAVALLARO 
38487ac29055SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev)
38497ac29055SGiuseppe CAVALLARO {
3850466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
38517ac29055SGiuseppe CAVALLARO 
3852466c5ac8SMathieu Olivari 	/* Create per netdev entries */
3853466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3854466c5ac8SMathieu Olivari 
3855466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
385638ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
38577ac29055SGiuseppe CAVALLARO 
38587ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
38597ac29055SGiuseppe CAVALLARO 	}
38607ac29055SGiuseppe CAVALLARO 
38617ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
3862466c5ac8SMathieu Olivari 	priv->dbgfs_rings_status =
3863466c5ac8SMathieu Olivari 		debugfs_create_file("descriptors_status", S_IRUGO,
3864466c5ac8SMathieu Olivari 				    priv->dbgfs_dir, dev,
38657ac29055SGiuseppe CAVALLARO 				    &stmmac_rings_status_fops);
38667ac29055SGiuseppe CAVALLARO 
3867466c5ac8SMathieu Olivari 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
386838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3869466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
38707ac29055SGiuseppe CAVALLARO 
38717ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
38727ac29055SGiuseppe CAVALLARO 	}
38737ac29055SGiuseppe CAVALLARO 
3874e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
3875466c5ac8SMathieu Olivari 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3876466c5ac8SMathieu Olivari 					    priv->dbgfs_dir,
3877e7434821SGiuseppe CAVALLARO 					    dev, &stmmac_dma_cap_fops);
3878e7434821SGiuseppe CAVALLARO 
3879466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
388038ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3881466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
3882e7434821SGiuseppe CAVALLARO 
3883e7434821SGiuseppe CAVALLARO 		return -ENOMEM;
3884e7434821SGiuseppe CAVALLARO 	}
3885e7434821SGiuseppe CAVALLARO 
38867ac29055SGiuseppe CAVALLARO 	return 0;
38877ac29055SGiuseppe CAVALLARO }
38887ac29055SGiuseppe CAVALLARO 
3889466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
38907ac29055SGiuseppe CAVALLARO {
3891466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
3892466c5ac8SMathieu Olivari 
3893466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
38947ac29055SGiuseppe CAVALLARO }
389550fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
38967ac29055SGiuseppe CAVALLARO 
38977ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
38987ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
38997ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
39007ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
39017ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
39027ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
3903d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
390401789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
39057ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
39067ac6653aSJeff Kirsher 	.ndo_do_ioctl = stmmac_ioctl,
39077ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
39087ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
39097ac6653aSJeff Kirsher #endif
39107ac6653aSJeff Kirsher 	.ndo_set_mac_address = eth_mac_addr,
39117ac6653aSJeff Kirsher };
39127ac6653aSJeff Kirsher 
39137ac6653aSJeff Kirsher /**
3914cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
391532ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
3916732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
3917732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
3918732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
3919732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
3920cf3f047bSGiuseppe CAVALLARO  */
3921cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
3922cf3f047bSGiuseppe CAVALLARO {
3923cf3f047bSGiuseppe CAVALLARO 	struct mac_device_info *mac;
3924cf3f047bSGiuseppe CAVALLARO 
3925cf3f047bSGiuseppe CAVALLARO 	/* Identify the MAC HW device */
392603f2eecdSMarc Kleine-Budde 	if (priv->plat->has_gmac) {
392703f2eecdSMarc Kleine-Budde 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
39283b57de95SVince Bridgers 		mac = dwmac1000_setup(priv->ioaddr,
39293b57de95SVince Bridgers 				      priv->plat->multicast_filter_bins,
3930c623d149SAlexandre TORGUE 				      priv->plat->unicast_filter_entries,
3931c623d149SAlexandre TORGUE 				      &priv->synopsys_id);
3932f748be53SAlexandre TORGUE 	} else if (priv->plat->has_gmac4) {
3933f748be53SAlexandre TORGUE 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
3934f748be53SAlexandre TORGUE 		mac = dwmac4_setup(priv->ioaddr,
3935f748be53SAlexandre TORGUE 				   priv->plat->multicast_filter_bins,
3936f748be53SAlexandre TORGUE 				   priv->plat->unicast_filter_entries,
3937f748be53SAlexandre TORGUE 				   &priv->synopsys_id);
393803f2eecdSMarc Kleine-Budde 	} else {
3939c623d149SAlexandre TORGUE 		mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
394003f2eecdSMarc Kleine-Budde 	}
3941cf3f047bSGiuseppe CAVALLARO 	if (!mac)
3942cf3f047bSGiuseppe CAVALLARO 		return -ENOMEM;
3943cf3f047bSGiuseppe CAVALLARO 
3944cf3f047bSGiuseppe CAVALLARO 	priv->hw = mac;
3945cf3f047bSGiuseppe CAVALLARO 
39464a7d666aSGiuseppe CAVALLARO 	/* To use the chained or ring mode */
3947f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3948f748be53SAlexandre TORGUE 		priv->hw->mode = &dwmac4_ring_mode_ops;
3949f748be53SAlexandre TORGUE 	} else {
39504a7d666aSGiuseppe CAVALLARO 		if (chain_mode) {
395129896a67SGiuseppe CAVALLARO 			priv->hw->mode = &chain_mode_ops;
395238ddc59dSLABBE Corentin 			dev_info(priv->device, "Chain mode enabled\n");
39534a7d666aSGiuseppe CAVALLARO 			priv->mode = STMMAC_CHAIN_MODE;
39544a7d666aSGiuseppe CAVALLARO 		} else {
395529896a67SGiuseppe CAVALLARO 			priv->hw->mode = &ring_mode_ops;
395638ddc59dSLABBE Corentin 			dev_info(priv->device, "Ring mode enabled\n");
39574a7d666aSGiuseppe CAVALLARO 			priv->mode = STMMAC_RING_MODE;
39584a7d666aSGiuseppe CAVALLARO 		}
3959f748be53SAlexandre TORGUE 	}
39604a7d666aSGiuseppe CAVALLARO 
3961cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
3962cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
3963cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
396438ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
3965cf3f047bSGiuseppe CAVALLARO 
3966cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
3967cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
3968cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
3969cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
3970cf3f047bSGiuseppe CAVALLARO 		 */
3971cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
3972cf3f047bSGiuseppe CAVALLARO 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
39733fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
397438912bdbSDeepak SIKRI 
3975a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
3976a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
3977a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
3978a8df35d4SEzequiel Garcia 		else
397938912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
3980a8df35d4SEzequiel Garcia 
3981f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
3982f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
398338912bdbSDeepak SIKRI 
398438912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
398538912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
398638912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
398738912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
398838912bdbSDeepak SIKRI 
398938ddc59dSLABBE Corentin 	} else {
399038ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
399138ddc59dSLABBE Corentin 	}
3992cf3f047bSGiuseppe CAVALLARO 
3993f748be53SAlexandre TORGUE 	/* To use alternate (extended), normal or GMAC4 descriptor structures */
3994f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
3995f748be53SAlexandre TORGUE 		priv->hw->desc = &dwmac4_desc_ops;
3996f748be53SAlexandre TORGUE 	else
399761369d02SByungho An 		stmmac_selec_desc_mode(priv);
399861369d02SByungho An 
3999d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
4000d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
400138ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4002f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
400338ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4004d2afb5bdSGiuseppe CAVALLARO 	}
4005cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
400638ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
4007cf3f047bSGiuseppe CAVALLARO 
4008cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
400938ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4010cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
4011cf3f047bSGiuseppe CAVALLARO 	}
4012cf3f047bSGiuseppe CAVALLARO 
4013f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
401438ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
4015f748be53SAlexandre TORGUE 
4016c24602efSGiuseppe CAVALLARO 	return 0;
4017cf3f047bSGiuseppe CAVALLARO }
4018cf3f047bSGiuseppe CAVALLARO 
4019cf3f047bSGiuseppe CAVALLARO /**
4020bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
4021bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
4022ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
4023e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
4024bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
4025bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
40269afec6efSAndy Shevchenko  * Return:
402715ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
40287ac6653aSJeff Kirsher  */
402915ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
4030cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
4031e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
40327ac6653aSJeff Kirsher {
4033bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
4034bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
4035aff3d9efSJoao Pinto 	int ret = 0;
4036aff3d9efSJoao Pinto 	u32 queue;
40377ac6653aSJeff Kirsher 
4038aff3d9efSJoao Pinto 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4039aff3d9efSJoao Pinto 				  MTL_MAX_TX_QUEUES,
4040aff3d9efSJoao Pinto 				  MTL_MAX_RX_QUEUES);
404141de8d4cSJoe Perches 	if (!ndev)
404215ffac73SJoachim Eastwood 		return -ENOMEM;
40437ac6653aSJeff Kirsher 
4044bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
40457ac6653aSJeff Kirsher 
4046bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
4047bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
4048bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
4049bfab27a1SGiuseppe CAVALLARO 
4050bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
4051cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
4052cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
4053e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
4054e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
4055e56788cfSJoachim Eastwood 
4056e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
4057e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
4058e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
4059e56788cfSJoachim Eastwood 
4060e56788cfSJoachim Eastwood 	if (res->mac)
4061e56788cfSJoachim Eastwood 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4062bfab27a1SGiuseppe CAVALLARO 
4063a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
4064803f8fc4SJoachim Eastwood 
4065cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
4066cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
4067cf3f047bSGiuseppe CAVALLARO 
4068cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
4069ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
4070ceb69499SGiuseppe CAVALLARO 	 */
4071cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
4072cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
4073cf3f047bSGiuseppe CAVALLARO 
4074f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
4075f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
4076c5e4ddbdSChen-Yu Tsai 
4077cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
4078c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
4079c24602efSGiuseppe CAVALLARO 	if (ret)
408062866e98SChen-Yu Tsai 		goto error_hw_init;
4081cf3f047bSGiuseppe CAVALLARO 
4082aff3d9efSJoao Pinto 	/* Configure real RX and TX queues */
4083aff3d9efSJoao Pinto 	ndev->real_num_rx_queues = priv->plat->rx_queues_to_use;
4084aff3d9efSJoao Pinto 	ndev->real_num_tx_queues = priv->plat->tx_queues_to_use;
4085aff3d9efSJoao Pinto 
4086aff3d9efSJoao Pinto 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
4087aff3d9efSJoao Pinto 
4088cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
4089cf3f047bSGiuseppe CAVALLARO 
4090cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4091cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
4092f748be53SAlexandre TORGUE 
4093f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4094f748be53SAlexandre TORGUE 		ndev->hw_features |= NETIF_F_TSO;
4095f748be53SAlexandre TORGUE 		priv->tso = true;
409638ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
4097f748be53SAlexandre TORGUE 	}
4098bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4099bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
41007ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
41017ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
4102f646968fSPatrick McHardy 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
41037ac6653aSJeff Kirsher #endif
41047ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
41057ac6653aSJeff Kirsher 
410644770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
410744770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
410844770e11SJarod Wilson 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
410944770e11SJarod Wilson 		ndev->max_mtu = JUMBO_LEN;
411044770e11SJarod Wilson 	else
411144770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4112a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4113a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4114a2cd64f3SKweh, Hock Leong 	 */
4115a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4116a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
411744770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
4118a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
4119b618ab45SHeiner Kallweit 		dev_warn(priv->device,
4120a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
4121a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
412244770e11SJarod Wilson 
41237ac6653aSJeff Kirsher 	if (flow_ctrl)
41247ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
41257ac6653aSJeff Kirsher 
412662a2ab93SGiuseppe CAVALLARO 	/* Rx Watchdog is available in the COREs newer than the 3.40.
412762a2ab93SGiuseppe CAVALLARO 	 * In some case, for example on bugged HW this feature
412862a2ab93SGiuseppe CAVALLARO 	 * has to be disable and this can be done by passing the
412962a2ab93SGiuseppe CAVALLARO 	 * riwt_off field from the platform.
413062a2ab93SGiuseppe CAVALLARO 	 */
413162a2ab93SGiuseppe CAVALLARO 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
413262a2ab93SGiuseppe CAVALLARO 		priv->use_riwt = 1;
4133b618ab45SHeiner Kallweit 		dev_info(priv->device,
4134b618ab45SHeiner Kallweit 			 "Enable RX Mitigation via HW Watchdog Timer\n");
413562a2ab93SGiuseppe CAVALLARO 	}
413662a2ab93SGiuseppe CAVALLARO 
4137aff3d9efSJoao Pinto 	ret = alloc_dma_desc_resources(priv);
4138aff3d9efSJoao Pinto 	if (ret < 0) {
4139aff3d9efSJoao Pinto 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
4140aff3d9efSJoao Pinto 			   __func__);
4141aff3d9efSJoao Pinto 		goto init_dma_error;
4142aff3d9efSJoao Pinto 	}
4143aff3d9efSJoao Pinto 
4144aff3d9efSJoao Pinto 	ret = init_dma_desc_rings(priv->dev, GFP_KERNEL);
4145aff3d9efSJoao Pinto 	if (ret < 0) {
4146aff3d9efSJoao Pinto 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4147aff3d9efSJoao Pinto 			   __func__);
4148aff3d9efSJoao Pinto 		goto init_dma_error;
4149aff3d9efSJoao Pinto 	}
4150aff3d9efSJoao Pinto 
4151aff3d9efSJoao Pinto 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4152aff3d9efSJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4153aff3d9efSJoao Pinto 
4154aff3d9efSJoao Pinto 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4155aff3d9efSJoao Pinto 			       (64 * priv->plat->rx_queues_to_use));
4156aff3d9efSJoao Pinto 	}
41577ac6653aSJeff Kirsher 
41587ac6653aSJeff Kirsher 	spin_lock_init(&priv->lock);
41597ac6653aSJeff Kirsher 
4160cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
4161cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
4162cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4163cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
4164cd7201f4SGiuseppe CAVALLARO 	 * clock input.
4165cd7201f4SGiuseppe CAVALLARO 	 */
4166cd7201f4SGiuseppe CAVALLARO 	if (!priv->plat->clk_csr)
4167cd7201f4SGiuseppe CAVALLARO 		stmmac_clk_csr_set(priv);
4168cd7201f4SGiuseppe CAVALLARO 	else
4169cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
4170cd7201f4SGiuseppe CAVALLARO 
4171e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
4172e58bb43fSGiuseppe CAVALLARO 
41733fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
41743fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
41753fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
41764bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
41774bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
41784bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
4179b618ab45SHeiner Kallweit 			dev_err(priv->device,
418038ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
41814bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
41826a81c26fSViresh Kumar 			goto error_mdio_register;
41834bfcbd7aSFrancesco Virlinzi 		}
4184e58bb43fSGiuseppe CAVALLARO 	}
41854bfcbd7aSFrancesco Virlinzi 
418657016590SFlorian Fainelli 	ret = register_netdev(ndev);
4187b2eb09afSFlorian Fainelli 	if (ret) {
4188b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
418957016590SFlorian Fainelli 			__func__, ret);
4190b2eb09afSFlorian Fainelli 		goto error_netdev_register;
4191b2eb09afSFlorian Fainelli 	}
41927ac6653aSJeff Kirsher 
419357016590SFlorian Fainelli 	return ret;
41947ac6653aSJeff Kirsher 
41956a81c26fSViresh Kumar error_netdev_register:
4196b2eb09afSFlorian Fainelli 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4197b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4198b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4199b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
42007ac6653aSJeff Kirsher error_mdio_register:
4201aff3d9efSJoao Pinto 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4202aff3d9efSJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4203aff3d9efSJoao Pinto 
4204aff3d9efSJoao Pinto 		netif_napi_del(&rx_q->napi);
4205aff3d9efSJoao Pinto 	}
4206aff3d9efSJoao Pinto init_dma_error:
4207aff3d9efSJoao Pinto 	free_dma_desc_resources(priv);
420862866e98SChen-Yu Tsai error_hw_init:
42097ac6653aSJeff Kirsher 	free_netdev(ndev);
42107ac6653aSJeff Kirsher 
421115ffac73SJoachim Eastwood 	return ret;
42127ac6653aSJeff Kirsher }
4213b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
42147ac6653aSJeff Kirsher 
42157ac6653aSJeff Kirsher /**
42167ac6653aSJeff Kirsher  * stmmac_dvr_remove
4217f4e7bd81SJoachim Eastwood  * @dev: device pointer
42187ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4219bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
42207ac6653aSJeff Kirsher  */
4221f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
42227ac6653aSJeff Kirsher {
4223f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
42247ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
42257ac6653aSJeff Kirsher 
422638ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
42277ac6653aSJeff Kirsher 
4228ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
42297ac6653aSJeff Kirsher 
4230270c7759SLABBE Corentin 	priv->hw->mac->set_mac(priv->ioaddr, false);
42317ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
42327ac6653aSJeff Kirsher 	unregister_netdev(ndev);
4233f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
4234f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
4235f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->pclk);
4236f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->stmmac_clk);
42373fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
42383fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
42393fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4240e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
42417ac6653aSJeff Kirsher 	free_netdev(ndev);
42427ac6653aSJeff Kirsher 
42437ac6653aSJeff Kirsher 	return 0;
42447ac6653aSJeff Kirsher }
4245b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
42467ac6653aSJeff Kirsher 
4247732fdf0eSGiuseppe CAVALLARO /**
4248732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
4249f4e7bd81SJoachim Eastwood  * @dev: device pointer
4250732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
4251732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
4252732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
4253732fdf0eSGiuseppe CAVALLARO  */
4254f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
42557ac6653aSJeff Kirsher {
4256f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
42577ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
4258f8c5a875SGiuseppe CAVALLARO 	unsigned long flags;
42597ac6653aSJeff Kirsher 
42607ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
42617ac6653aSJeff Kirsher 		return 0;
42627ac6653aSJeff Kirsher 
4263d6d50c7eSPhilippe Reynes 	if (ndev->phydev)
4264d6d50c7eSPhilippe Reynes 		phy_stop(ndev->phydev);
4265102463b1SFrancesco Virlinzi 
4266f8c5a875SGiuseppe CAVALLARO 	spin_lock_irqsave(&priv->lock, flags);
42677ac6653aSJeff Kirsher 
42687ac6653aSJeff Kirsher 	netif_device_detach(ndev);
4269aff3d9efSJoao Pinto 	stmmac_stop_all_queues(priv);
42707ac6653aSJeff Kirsher 
4271aff3d9efSJoao Pinto 	stmmac_disable_all_queues(priv);
42727ac6653aSJeff Kirsher 
42737ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
4274ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
4275c24602efSGiuseppe CAVALLARO 
42767ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
427789f7f2cfSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
42787ed24bbeSVince Bridgers 		priv->hw->mac->pmt(priv->hw, priv->wolopts);
427989f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
428089f7f2cfSSrinivas Kandagatla 	} else {
4281270c7759SLABBE Corentin 		priv->hw->mac->set_mac(priv->ioaddr, false);
4282db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
4283ba1377ffSGiuseppe CAVALLARO 		/* Disable clock in case of PWM is off */
4284f573c0b9Sjpinto 		clk_disable(priv->plat->pclk);
4285f573c0b9Sjpinto 		clk_disable(priv->plat->stmmac_clk);
4286ba1377ffSGiuseppe CAVALLARO 	}
4287f8c5a875SGiuseppe CAVALLARO 	spin_unlock_irqrestore(&priv->lock, flags);
42882d871aa0SVince Bridgers 
42892d871aa0SVince Bridgers 	priv->oldlink = 0;
4290bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
4291bd00632cSLABBE Corentin 	priv->oldduplex = DUPLEX_UNKNOWN;
42927ac6653aSJeff Kirsher 	return 0;
42937ac6653aSJeff Kirsher }
4294b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
42957ac6653aSJeff Kirsher 
4296732fdf0eSGiuseppe CAVALLARO /**
4297aff3d9efSJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
4298aff3d9efSJoao Pinto  * @dev: device pointer
4299aff3d9efSJoao Pinto  */
4300aff3d9efSJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4301aff3d9efSJoao Pinto {
4302aff3d9efSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4303aff3d9efSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4304aff3d9efSJoao Pinto 	u32 queue;
4305aff3d9efSJoao Pinto 
4306aff3d9efSJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
4307aff3d9efSJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4308aff3d9efSJoao Pinto 
4309aff3d9efSJoao Pinto 		rx_q->cur_rx = 0;
4310aff3d9efSJoao Pinto 		rx_q->dirty_rx = 0;
4311aff3d9efSJoao Pinto 	}
4312aff3d9efSJoao Pinto 
4313aff3d9efSJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
4314aff3d9efSJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4315aff3d9efSJoao Pinto 
4316aff3d9efSJoao Pinto 		tx_q->cur_tx = 0;
4317aff3d9efSJoao Pinto 		tx_q->dirty_tx = 0;
4318aff3d9efSJoao Pinto 	}
4319aff3d9efSJoao Pinto }
4320aff3d9efSJoao Pinto 
4321aff3d9efSJoao Pinto /**
4322732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
4323f4e7bd81SJoachim Eastwood  * @dev: device pointer
4324732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
4325732fdf0eSGiuseppe CAVALLARO  * in a usable state.
4326732fdf0eSGiuseppe CAVALLARO  */
4327f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
43287ac6653aSJeff Kirsher {
4329f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
43307ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
4331f8c5a875SGiuseppe CAVALLARO 	unsigned long flags;
43327ac6653aSJeff Kirsher 
43337ac6653aSJeff Kirsher 	if (!netif_running(ndev))
43347ac6653aSJeff Kirsher 		return 0;
43357ac6653aSJeff Kirsher 
43367ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
43377ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
43387ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
43397ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
4340ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
4341ceb69499SGiuseppe CAVALLARO 	 */
4342623997fbSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
4343f55d84b0SVincent Palatin 		spin_lock_irqsave(&priv->lock, flags);
43447ed24bbeSVince Bridgers 		priv->hw->mac->pmt(priv->hw, 0);
4345f55d84b0SVincent Palatin 		spin_unlock_irqrestore(&priv->lock, flags);
434689f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
4347623997fbSSrinivas Kandagatla 	} else {
4348db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
43498d45e42bSLABBE Corentin 		/* enable the clk previously disabled */
4350f573c0b9Sjpinto 		clk_enable(priv->plat->stmmac_clk);
4351f573c0b9Sjpinto 		clk_enable(priv->plat->pclk);
4352623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
4353623997fbSSrinivas Kandagatla 		if (priv->mii)
4354623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
4355623997fbSSrinivas Kandagatla 	}
43567ac6653aSJeff Kirsher 
43577ac6653aSJeff Kirsher 	netif_device_attach(ndev);
43587ac6653aSJeff Kirsher 
4359f55d84b0SVincent Palatin 	spin_lock_irqsave(&priv->lock, flags);
4360f55d84b0SVincent Palatin 
4361aff3d9efSJoao Pinto 	stmmac_reset_queues_param(priv);
4362aff3d9efSJoao Pinto 
4363f748be53SAlexandre TORGUE 	/* reset private mss value to force mss context settings at
4364f748be53SAlexandre TORGUE 	 * next tso xmit (only used for gmac4).
4365f748be53SAlexandre TORGUE 	 */
4366f748be53SAlexandre TORGUE 	priv->mss = 0;
4367f748be53SAlexandre TORGUE 
4368ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
4369ae79a639SGiuseppe CAVALLARO 
4370fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
4371777da230SGiuseppe CAVALLARO 	stmmac_init_tx_coalesce(priv);
4372ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
43737ac6653aSJeff Kirsher 
4374aff3d9efSJoao Pinto 	stmmac_enable_all_queues(priv);
43757ac6653aSJeff Kirsher 
4376aff3d9efSJoao Pinto 	stmmac_start_all_queues(priv);
43777ac6653aSJeff Kirsher 
4378f8c5a875SGiuseppe CAVALLARO 	spin_unlock_irqrestore(&priv->lock, flags);
4379102463b1SFrancesco Virlinzi 
4380d6d50c7eSPhilippe Reynes 	if (ndev->phydev)
4381d6d50c7eSPhilippe Reynes 		phy_start(ndev->phydev);
4382102463b1SFrancesco Virlinzi 
43837ac6653aSJeff Kirsher 	return 0;
43847ac6653aSJeff Kirsher }
4385b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
4386ba27ec66SGiuseppe CAVALLARO 
43877ac6653aSJeff Kirsher #ifndef MODULE
43887ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
43897ac6653aSJeff Kirsher {
43907ac6653aSJeff Kirsher 	char *opt;
43917ac6653aSJeff Kirsher 
43927ac6653aSJeff Kirsher 	if (!str || !*str)
43937ac6653aSJeff Kirsher 		return -EINVAL;
43947ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
43957ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
4396ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
43977ac6653aSJeff Kirsher 				goto err;
43987ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4399ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
44007ac6653aSJeff Kirsher 				goto err;
44017ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4402ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
44037ac6653aSJeff Kirsher 				goto err;
44047ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
4405ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
44067ac6653aSJeff Kirsher 				goto err;
44077ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
4408ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
44097ac6653aSJeff Kirsher 				goto err;
44107ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4411ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
44127ac6653aSJeff Kirsher 				goto err;
44137ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
4414ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
44157ac6653aSJeff Kirsher 				goto err;
4416506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4417d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
4418d765955dSGiuseppe CAVALLARO 				goto err;
44194a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
44204a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
44214a7d666aSGiuseppe CAVALLARO 				goto err;
44227ac6653aSJeff Kirsher 		}
44237ac6653aSJeff Kirsher 	}
44247ac6653aSJeff Kirsher 	return 0;
44257ac6653aSJeff Kirsher 
44267ac6653aSJeff Kirsher err:
44277ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
44287ac6653aSJeff Kirsher 	return -EINVAL;
44297ac6653aSJeff Kirsher }
44307ac6653aSJeff Kirsher 
44317ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
4432ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
44336fc0d0f2SGiuseppe Cavallaro 
4434466c5ac8SMathieu Olivari static int __init stmmac_init(void)
4435466c5ac8SMathieu Olivari {
4436466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4437466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
4438466c5ac8SMathieu Olivari 	if (!stmmac_fs_dir) {
4439466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4440466c5ac8SMathieu Olivari 
4441466c5ac8SMathieu Olivari 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4442466c5ac8SMathieu Olivari 			pr_err("ERROR %s, debugfs create directory failed\n",
4443466c5ac8SMathieu Olivari 			       STMMAC_RESOURCE_NAME);
4444466c5ac8SMathieu Olivari 
4445466c5ac8SMathieu Olivari 			return -ENOMEM;
4446466c5ac8SMathieu Olivari 		}
4447466c5ac8SMathieu Olivari 	}
4448466c5ac8SMathieu Olivari #endif
4449466c5ac8SMathieu Olivari 
4450466c5ac8SMathieu Olivari 	return 0;
4451466c5ac8SMathieu Olivari }
4452466c5ac8SMathieu Olivari 
4453466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
4454466c5ac8SMathieu Olivari {
4455466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4456466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
4457466c5ac8SMathieu Olivari #endif
4458466c5ac8SMathieu Olivari }
4459466c5ac8SMathieu Olivari 
4460466c5ac8SMathieu Olivari module_init(stmmac_init)
4461466c5ac8SMathieu Olivari module_exit(stmmac_exit)
4462466c5ac8SMathieu Olivari 
44636fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
44646fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
44656fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
4466