xref: /openbmc/linux/drivers/net/ethernet/xilinx/xilinx_axienet_main.c (revision e50e86dbcabda570fc8a1435fe2fca97e9ab7312)
109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
28a3b7a25Sdanborkmann@iogearbox.net /*
38a3b7a25Sdanborkmann@iogearbox.net  * Xilinx Axi Ethernet device driver
48a3b7a25Sdanborkmann@iogearbox.net  *
58a3b7a25Sdanborkmann@iogearbox.net  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
68a3b7a25Sdanborkmann@iogearbox.net  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
78a3b7a25Sdanborkmann@iogearbox.net  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
859a54f30SMichal Simek  * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
959a54f30SMichal Simek  * Copyright (c) 2010 - 2011 PetaLogix
10cc37610cSRobert Hancock  * Copyright (c) 2019 - 2022 Calian Advanced Technologies
1159a54f30SMichal Simek  * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
128a3b7a25Sdanborkmann@iogearbox.net  *
138a3b7a25Sdanborkmann@iogearbox.net  * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
148a3b7a25Sdanborkmann@iogearbox.net  * and Spartan6.
158a3b7a25Sdanborkmann@iogearbox.net  *
168a3b7a25Sdanborkmann@iogearbox.net  * TODO:
178a3b7a25Sdanborkmann@iogearbox.net  *  - Add Axi Fifo support.
188a3b7a25Sdanborkmann@iogearbox.net  *  - Factor out Axi DMA code into separate driver.
198a3b7a25Sdanborkmann@iogearbox.net  *  - Test and fix basic multicast filtering.
208a3b7a25Sdanborkmann@iogearbox.net  *  - Add support for extended multicast filtering.
218a3b7a25Sdanborkmann@iogearbox.net  *  - Test basic VLAN support.
228a3b7a25Sdanborkmann@iogearbox.net  *  - Add support for extended VLAN support.
238a3b7a25Sdanborkmann@iogearbox.net  */
248a3b7a25Sdanborkmann@iogearbox.net 
2509a0354cSRobert Hancock #include <linux/clk.h>
268a3b7a25Sdanborkmann@iogearbox.net #include <linux/delay.h>
278a3b7a25Sdanborkmann@iogearbox.net #include <linux/etherdevice.h>
288a3b7a25Sdanborkmann@iogearbox.net #include <linux/module.h>
298a3b7a25Sdanborkmann@iogearbox.net #include <linux/netdevice.h>
303d40aed8SRob Herring #include <linux/of.h>
318a3b7a25Sdanborkmann@iogearbox.net #include <linux/of_mdio.h>
32da90e380STobias Klauser #include <linux/of_net.h>
339d5e8ec6SMichal Simek #include <linux/of_irq.h>
348a3b7a25Sdanborkmann@iogearbox.net #include <linux/of_address.h>
353d40aed8SRob Herring #include <linux/platform_device.h>
368a3b7a25Sdanborkmann@iogearbox.net #include <linux/skbuff.h>
370b79b8dcSRobert Hancock #include <linux/math64.h>
388a3b7a25Sdanborkmann@iogearbox.net #include <linux/phy.h>
398a3b7a25Sdanborkmann@iogearbox.net #include <linux/mii.h>
408a3b7a25Sdanborkmann@iogearbox.net #include <linux/ethtool.h>
418a3b7a25Sdanborkmann@iogearbox.net 
428a3b7a25Sdanborkmann@iogearbox.net #include "xilinx_axienet.h"
438a3b7a25Sdanborkmann@iogearbox.net 
448b09ca82SRobert Hancock /* Descriptors defines for Tx and Rx DMA */
452d19c3fdSRobert Hancock #define TX_BD_NUM_DEFAULT		128
468b09ca82SRobert Hancock #define RX_BD_NUM_DEFAULT		1024
4770f5817dSRobert Hancock #define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
488b09ca82SRobert Hancock #define TX_BD_NUM_MAX			4096
498b09ca82SRobert Hancock #define RX_BD_NUM_MAX			4096
508a3b7a25Sdanborkmann@iogearbox.net 
518a3b7a25Sdanborkmann@iogearbox.net /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
528a3b7a25Sdanborkmann@iogearbox.net #define DRIVER_NAME		"xaxienet"
538a3b7a25Sdanborkmann@iogearbox.net #define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
548a3b7a25Sdanborkmann@iogearbox.net #define DRIVER_VERSION		"1.00a"
558a3b7a25Sdanborkmann@iogearbox.net 
56867d03bcSRobert Hancock #define AXIENET_REGS_N		40
578a3b7a25Sdanborkmann@iogearbox.net 
588a3b7a25Sdanborkmann@iogearbox.net /* Match table for of_platform binding */
5974847f23SFabian Frederick static const struct of_device_id axienet_of_match[] = {
608a3b7a25Sdanborkmann@iogearbox.net 	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
618a3b7a25Sdanborkmann@iogearbox.net 	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
628a3b7a25Sdanborkmann@iogearbox.net 	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
638a3b7a25Sdanborkmann@iogearbox.net 	{},
648a3b7a25Sdanborkmann@iogearbox.net };
658a3b7a25Sdanborkmann@iogearbox.net 
668a3b7a25Sdanborkmann@iogearbox.net MODULE_DEVICE_TABLE(of, axienet_of_match);
678a3b7a25Sdanborkmann@iogearbox.net 
688a3b7a25Sdanborkmann@iogearbox.net /* Option table for setting up Axi Ethernet hardware options */
698a3b7a25Sdanborkmann@iogearbox.net static struct axienet_option axienet_options[] = {
708a3b7a25Sdanborkmann@iogearbox.net 	/* Turn on jumbo packet support for both Rx and Tx */
718a3b7a25Sdanborkmann@iogearbox.net 	{
728a3b7a25Sdanborkmann@iogearbox.net 		.opt = XAE_OPTION_JUMBO,
738a3b7a25Sdanborkmann@iogearbox.net 		.reg = XAE_TC_OFFSET,
748a3b7a25Sdanborkmann@iogearbox.net 		.m_or = XAE_TC_JUM_MASK,
758a3b7a25Sdanborkmann@iogearbox.net 	}, {
768a3b7a25Sdanborkmann@iogearbox.net 		.opt = XAE_OPTION_JUMBO,
778a3b7a25Sdanborkmann@iogearbox.net 		.reg = XAE_RCW1_OFFSET,
788a3b7a25Sdanborkmann@iogearbox.net 		.m_or = XAE_RCW1_JUM_MASK,
798a3b7a25Sdanborkmann@iogearbox.net 	}, { /* Turn on VLAN packet support for both Rx and Tx */
808a3b7a25Sdanborkmann@iogearbox.net 		.opt = XAE_OPTION_VLAN,
818a3b7a25Sdanborkmann@iogearbox.net 		.reg = XAE_TC_OFFSET,
828a3b7a25Sdanborkmann@iogearbox.net 		.m_or = XAE_TC_VLAN_MASK,
838a3b7a25Sdanborkmann@iogearbox.net 	}, {
848a3b7a25Sdanborkmann@iogearbox.net 		.opt = XAE_OPTION_VLAN,
858a3b7a25Sdanborkmann@iogearbox.net 		.reg = XAE_RCW1_OFFSET,
868a3b7a25Sdanborkmann@iogearbox.net 		.m_or = XAE_RCW1_VLAN_MASK,
878a3b7a25Sdanborkmann@iogearbox.net 	}, { /* Turn on FCS stripping on receive packets */
888a3b7a25Sdanborkmann@iogearbox.net 		.opt = XAE_OPTION_FCS_STRIP,
898a3b7a25Sdanborkmann@iogearbox.net 		.reg = XAE_RCW1_OFFSET,
908a3b7a25Sdanborkmann@iogearbox.net 		.m_or = XAE_RCW1_FCS_MASK,
918a3b7a25Sdanborkmann@iogearbox.net 	}, { /* Turn on FCS insertion on transmit packets */
928a3b7a25Sdanborkmann@iogearbox.net 		.opt = XAE_OPTION_FCS_INSERT,
938a3b7a25Sdanborkmann@iogearbox.net 		.reg = XAE_TC_OFFSET,
948a3b7a25Sdanborkmann@iogearbox.net 		.m_or = XAE_TC_FCS_MASK,
958a3b7a25Sdanborkmann@iogearbox.net 	}, { /* Turn off length/type field checking on receive packets */
968a3b7a25Sdanborkmann@iogearbox.net 		.opt = XAE_OPTION_LENTYPE_ERR,
978a3b7a25Sdanborkmann@iogearbox.net 		.reg = XAE_RCW1_OFFSET,
988a3b7a25Sdanborkmann@iogearbox.net 		.m_or = XAE_RCW1_LT_DIS_MASK,
998a3b7a25Sdanborkmann@iogearbox.net 	}, { /* Turn on Rx flow control */
1008a3b7a25Sdanborkmann@iogearbox.net 		.opt = XAE_OPTION_FLOW_CONTROL,
1018a3b7a25Sdanborkmann@iogearbox.net 		.reg = XAE_FCC_OFFSET,
1028a3b7a25Sdanborkmann@iogearbox.net 		.m_or = XAE_FCC_FCRX_MASK,
1038a3b7a25Sdanborkmann@iogearbox.net 	}, { /* Turn on Tx flow control */
1048a3b7a25Sdanborkmann@iogearbox.net 		.opt = XAE_OPTION_FLOW_CONTROL,
1058a3b7a25Sdanborkmann@iogearbox.net 		.reg = XAE_FCC_OFFSET,
1068a3b7a25Sdanborkmann@iogearbox.net 		.m_or = XAE_FCC_FCTX_MASK,
1078a3b7a25Sdanborkmann@iogearbox.net 	}, { /* Turn on promiscuous frame filtering */
1088a3b7a25Sdanborkmann@iogearbox.net 		.opt = XAE_OPTION_PROMISC,
1098a3b7a25Sdanborkmann@iogearbox.net 		.reg = XAE_FMI_OFFSET,
1108a3b7a25Sdanborkmann@iogearbox.net 		.m_or = XAE_FMI_PM_MASK,
1118a3b7a25Sdanborkmann@iogearbox.net 	}, { /* Enable transmitter */
1128a3b7a25Sdanborkmann@iogearbox.net 		.opt = XAE_OPTION_TXEN,
1138a3b7a25Sdanborkmann@iogearbox.net 		.reg = XAE_TC_OFFSET,
1148a3b7a25Sdanborkmann@iogearbox.net 		.m_or = XAE_TC_TX_MASK,
1158a3b7a25Sdanborkmann@iogearbox.net 	}, { /* Enable receiver */
1168a3b7a25Sdanborkmann@iogearbox.net 		.opt = XAE_OPTION_RXEN,
1178a3b7a25Sdanborkmann@iogearbox.net 		.reg = XAE_RCW1_OFFSET,
1188a3b7a25Sdanborkmann@iogearbox.net 		.m_or = XAE_RCW1_RX_MASK,
1198a3b7a25Sdanborkmann@iogearbox.net 	},
1208a3b7a25Sdanborkmann@iogearbox.net 	{}
1218a3b7a25Sdanborkmann@iogearbox.net };
1228a3b7a25Sdanborkmann@iogearbox.net 
1238a3b7a25Sdanborkmann@iogearbox.net /**
1248a3b7a25Sdanborkmann@iogearbox.net  * axienet_dma_in32 - Memory mapped Axi DMA register read
1258a3b7a25Sdanborkmann@iogearbox.net  * @lp:		Pointer to axienet local structure
1268a3b7a25Sdanborkmann@iogearbox.net  * @reg:	Address offset from the base address of the Axi DMA core
1278a3b7a25Sdanborkmann@iogearbox.net  *
128b0d081c5SMichal Simek  * Return: The contents of the Axi DMA register
1298a3b7a25Sdanborkmann@iogearbox.net  *
1308a3b7a25Sdanborkmann@iogearbox.net  * This function returns the contents of the corresponding Axi DMA register.
1318a3b7a25Sdanborkmann@iogearbox.net  */
axienet_dma_in32(struct axienet_local * lp,off_t reg)1328a3b7a25Sdanborkmann@iogearbox.net static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
1338a3b7a25Sdanborkmann@iogearbox.net {
134d85f5f3eSRobert Hancock 	return ioread32(lp->dma_regs + reg);
1358a3b7a25Sdanborkmann@iogearbox.net }
1368a3b7a25Sdanborkmann@iogearbox.net 
desc_set_phys_addr(struct axienet_local * lp,dma_addr_t addr,struct axidma_bd * desc)1374e958f33SAndre Przywara static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
1384e958f33SAndre Przywara 			       struct axidma_bd *desc)
1394e958f33SAndre Przywara {
1404e958f33SAndre Przywara 	desc->phys = lower_32_bits(addr);
1414e958f33SAndre Przywara 	if (lp->features & XAE_FEATURE_DMA_64BIT)
1424e958f33SAndre Przywara 		desc->phys_msb = upper_32_bits(addr);
1434e958f33SAndre Przywara }
1444e958f33SAndre Przywara 
desc_get_phys_addr(struct axienet_local * lp,struct axidma_bd * desc)1454e958f33SAndre Przywara static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
1464e958f33SAndre Przywara 				     struct axidma_bd *desc)
1474e958f33SAndre Przywara {
1484e958f33SAndre Przywara 	dma_addr_t ret = desc->phys;
1494e958f33SAndre Przywara 
1504e958f33SAndre Przywara 	if (lp->features & XAE_FEATURE_DMA_64BIT)
1514e958f33SAndre Przywara 		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
1524e958f33SAndre Przywara 
1534e958f33SAndre Przywara 	return ret;
1544e958f33SAndre Przywara }
1554e958f33SAndre Przywara 
1568a3b7a25Sdanborkmann@iogearbox.net /**
1578a3b7a25Sdanborkmann@iogearbox.net  * axienet_dma_bd_release - Release buffer descriptor rings
1588a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to the net_device structure
1598a3b7a25Sdanborkmann@iogearbox.net  *
1608a3b7a25Sdanborkmann@iogearbox.net  * This function is used to release the descriptors allocated in
1618a3b7a25Sdanborkmann@iogearbox.net  * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
1628a3b7a25Sdanborkmann@iogearbox.net  * driver stop api is called.
1638a3b7a25Sdanborkmann@iogearbox.net  */
axienet_dma_bd_release(struct net_device * ndev)1648a3b7a25Sdanborkmann@iogearbox.net static void axienet_dma_bd_release(struct net_device *ndev)
1658a3b7a25Sdanborkmann@iogearbox.net {
1668a3b7a25Sdanborkmann@iogearbox.net 	int i;
1678a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
1688a3b7a25Sdanborkmann@iogearbox.net 
169f26667a3SAndre Przywara 	/* If we end up here, tx_bd_v must have been DMA allocated. */
17017882fd4SRobert Hancock 	dma_free_coherent(lp->dev,
1718b09ca82SRobert Hancock 			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
1728a3b7a25Sdanborkmann@iogearbox.net 			  lp->tx_bd_v,
1738a3b7a25Sdanborkmann@iogearbox.net 			  lp->tx_bd_p);
174f26667a3SAndre Przywara 
175f26667a3SAndre Przywara 	if (!lp->rx_bd_v)
176f26667a3SAndre Przywara 		return;
177f26667a3SAndre Przywara 
178f26667a3SAndre Przywara 	for (i = 0; i < lp->rx_bd_num; i++) {
1794e958f33SAndre Przywara 		dma_addr_t phys;
1804e958f33SAndre Przywara 
181f26667a3SAndre Przywara 		/* A NULL skb means this descriptor has not been initialised
182f26667a3SAndre Przywara 		 * at all.
183f26667a3SAndre Przywara 		 */
184f26667a3SAndre Przywara 		if (!lp->rx_bd_v[i].skb)
185f26667a3SAndre Przywara 			break;
186f26667a3SAndre Przywara 
187f26667a3SAndre Przywara 		dev_kfree_skb(lp->rx_bd_v[i].skb);
188f26667a3SAndre Przywara 
189f26667a3SAndre Przywara 		/* For each descriptor, we programmed cntrl with the (non-zero)
190f26667a3SAndre Przywara 		 * descriptor size, after it had been successfully allocated.
191f26667a3SAndre Przywara 		 * So a non-zero value in there means we need to unmap it.
192f26667a3SAndre Przywara 		 */
1934e958f33SAndre Przywara 		if (lp->rx_bd_v[i].cntrl) {
1944e958f33SAndre Przywara 			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
19517882fd4SRobert Hancock 			dma_unmap_single(lp->dev, phys,
196f26667a3SAndre Przywara 					 lp->max_frm_size, DMA_FROM_DEVICE);
1978a3b7a25Sdanborkmann@iogearbox.net 		}
1984e958f33SAndre Przywara 	}
199f26667a3SAndre Przywara 
20017882fd4SRobert Hancock 	dma_free_coherent(lp->dev,
201f26667a3SAndre Przywara 			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
202f26667a3SAndre Przywara 			  lp->rx_bd_v,
203f26667a3SAndre Przywara 			  lp->rx_bd_p);
2048a3b7a25Sdanborkmann@iogearbox.net }
2058a3b7a25Sdanborkmann@iogearbox.net 
2068a3b7a25Sdanborkmann@iogearbox.net /**
2070b79b8dcSRobert Hancock  * axienet_usec_to_timer - Calculate IRQ delay timer value
2080b79b8dcSRobert Hancock  * @lp:		Pointer to the axienet_local structure
2090b79b8dcSRobert Hancock  * @coalesce_usec: Microseconds to convert into timer value
2100b79b8dcSRobert Hancock  */
axienet_usec_to_timer(struct axienet_local * lp,u32 coalesce_usec)2110b79b8dcSRobert Hancock static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
2120b79b8dcSRobert Hancock {
2130b79b8dcSRobert Hancock 	u32 result;
2140b79b8dcSRobert Hancock 	u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
2150b79b8dcSRobert Hancock 
2160b79b8dcSRobert Hancock 	if (lp->axi_clk)
2170b79b8dcSRobert Hancock 		clk_rate = clk_get_rate(lp->axi_clk);
2180b79b8dcSRobert Hancock 
2190b79b8dcSRobert Hancock 	/* 1 Timeout Interval = 125 * (clock period of SG clock) */
2200b79b8dcSRobert Hancock 	result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
2210b79b8dcSRobert Hancock 					 (u64)125000000);
2220b79b8dcSRobert Hancock 	if (result > 255)
2230b79b8dcSRobert Hancock 		result = 255;
2240b79b8dcSRobert Hancock 
2250b79b8dcSRobert Hancock 	return result;
2260b79b8dcSRobert Hancock }
2270b79b8dcSRobert Hancock 
2280b79b8dcSRobert Hancock /**
22984b9ccc0SRobert Hancock  * axienet_dma_start - Set up DMA registers and start DMA operation
23084b9ccc0SRobert Hancock  * @lp:		Pointer to the axienet_local structure
23184b9ccc0SRobert Hancock  */
axienet_dma_start(struct axienet_local * lp)23284b9ccc0SRobert Hancock static void axienet_dma_start(struct axienet_local *lp)
23384b9ccc0SRobert Hancock {
23484b9ccc0SRobert Hancock 	/* Start updating the Rx channel control register */
235cc37610cSRobert Hancock 	lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
2360155ae6eSRobert Hancock 			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
2370155ae6eSRobert Hancock 	/* Only set interrupt delay timer if not generating an interrupt on
2380155ae6eSRobert Hancock 	 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
2390155ae6eSRobert Hancock 	 */
2400155ae6eSRobert Hancock 	if (lp->coalesce_count_rx > 1)
2410b79b8dcSRobert Hancock 		lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
2420b79b8dcSRobert Hancock 					<< XAXIDMA_DELAY_SHIFT) |
2430155ae6eSRobert Hancock 				 XAXIDMA_IRQ_DELAY_MASK;
244cc37610cSRobert Hancock 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
24584b9ccc0SRobert Hancock 
24684b9ccc0SRobert Hancock 	/* Start updating the Tx channel control register */
2479e2bc267SRobert Hancock 	lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
2480155ae6eSRobert Hancock 			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
2490155ae6eSRobert Hancock 	/* Only set interrupt delay timer if not generating an interrupt on
2500155ae6eSRobert Hancock 	 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
2510155ae6eSRobert Hancock 	 */
2520155ae6eSRobert Hancock 	if (lp->coalesce_count_tx > 1)
2539e2bc267SRobert Hancock 		lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
2540b79b8dcSRobert Hancock 					<< XAXIDMA_DELAY_SHIFT) |
2550155ae6eSRobert Hancock 				 XAXIDMA_IRQ_DELAY_MASK;
2569e2bc267SRobert Hancock 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
25784b9ccc0SRobert Hancock 
25884b9ccc0SRobert Hancock 	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
25984b9ccc0SRobert Hancock 	 * halted state. This will make the Rx side ready for reception.
26084b9ccc0SRobert Hancock 	 */
26184b9ccc0SRobert Hancock 	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
262cc37610cSRobert Hancock 	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
263cc37610cSRobert Hancock 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
26484b9ccc0SRobert Hancock 	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
26584b9ccc0SRobert Hancock 			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
26684b9ccc0SRobert Hancock 
26784b9ccc0SRobert Hancock 	/* Write to the RS (Run-stop) bit in the Tx channel control register.
26884b9ccc0SRobert Hancock 	 * Tx channel is now ready to run. But only after we write to the
26984b9ccc0SRobert Hancock 	 * tail pointer register that the Tx channel will start transmitting.
27084b9ccc0SRobert Hancock 	 */
27184b9ccc0SRobert Hancock 	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
2729e2bc267SRobert Hancock 	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
2739e2bc267SRobert Hancock 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
27484b9ccc0SRobert Hancock }
27584b9ccc0SRobert Hancock 
27684b9ccc0SRobert Hancock /**
2778a3b7a25Sdanborkmann@iogearbox.net  * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
2788a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to the net_device structure
2798a3b7a25Sdanborkmann@iogearbox.net  *
280b0d081c5SMichal Simek  * Return: 0, on success -ENOMEM, on failure
2818a3b7a25Sdanborkmann@iogearbox.net  *
2828a3b7a25Sdanborkmann@iogearbox.net  * This function is called to initialize the Rx and Tx DMA descriptor
2838a3b7a25Sdanborkmann@iogearbox.net  * rings. This initializes the descriptors with required default values
2848a3b7a25Sdanborkmann@iogearbox.net  * and is called when Axi Ethernet driver reset is called.
2858a3b7a25Sdanborkmann@iogearbox.net  */
axienet_dma_bd_init(struct net_device * ndev)2868a3b7a25Sdanborkmann@iogearbox.net static int axienet_dma_bd_init(struct net_device *ndev)
2878a3b7a25Sdanborkmann@iogearbox.net {
2888a3b7a25Sdanborkmann@iogearbox.net 	int i;
2898a3b7a25Sdanborkmann@iogearbox.net 	struct sk_buff *skb;
2908a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
2918a3b7a25Sdanborkmann@iogearbox.net 
2928a3b7a25Sdanborkmann@iogearbox.net 	/* Reset the indexes which are used for accessing the BDs */
2938a3b7a25Sdanborkmann@iogearbox.net 	lp->tx_bd_ci = 0;
2948a3b7a25Sdanborkmann@iogearbox.net 	lp->tx_bd_tail = 0;
2958a3b7a25Sdanborkmann@iogearbox.net 	lp->rx_bd_ci = 0;
2968a3b7a25Sdanborkmann@iogearbox.net 
297850a7503SMichal Simek 	/* Allocate the Tx and Rx buffer descriptors. */
29817882fd4SRobert Hancock 	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
2998b09ca82SRobert Hancock 					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
300ede23fa8SJoe Perches 					 &lp->tx_bd_p, GFP_KERNEL);
301d0320f75SJoe Perches 	if (!lp->tx_bd_v)
302f26667a3SAndre Przywara 		return -ENOMEM;
3038a3b7a25Sdanborkmann@iogearbox.net 
30417882fd4SRobert Hancock 	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
3058b09ca82SRobert Hancock 					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
306ede23fa8SJoe Perches 					 &lp->rx_bd_p, GFP_KERNEL);
307d0320f75SJoe Perches 	if (!lp->rx_bd_v)
3088a3b7a25Sdanborkmann@iogearbox.net 		goto out;
3098a3b7a25Sdanborkmann@iogearbox.net 
3108b09ca82SRobert Hancock 	for (i = 0; i < lp->tx_bd_num; i++) {
3114e958f33SAndre Przywara 		dma_addr_t addr = lp->tx_bd_p +
3128a3b7a25Sdanborkmann@iogearbox.net 				  sizeof(*lp->tx_bd_v) *
3138b09ca82SRobert Hancock 				  ((i + 1) % lp->tx_bd_num);
3144e958f33SAndre Przywara 
3154e958f33SAndre Przywara 		lp->tx_bd_v[i].next = lower_32_bits(addr);
3164e958f33SAndre Przywara 		if (lp->features & XAE_FEATURE_DMA_64BIT)
3174e958f33SAndre Przywara 			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
3188a3b7a25Sdanborkmann@iogearbox.net 	}
3198a3b7a25Sdanborkmann@iogearbox.net 
3208b09ca82SRobert Hancock 	for (i = 0; i < lp->rx_bd_num; i++) {
3214e958f33SAndre Przywara 		dma_addr_t addr;
3224e958f33SAndre Przywara 
3234e958f33SAndre Przywara 		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
3248b09ca82SRobert Hancock 			((i + 1) % lp->rx_bd_num);
3254e958f33SAndre Przywara 		lp->rx_bd_v[i].next = lower_32_bits(addr);
3264e958f33SAndre Przywara 		if (lp->features & XAE_FEATURE_DMA_64BIT)
3274e958f33SAndre Przywara 			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
3288a3b7a25Sdanborkmann@iogearbox.net 
3298a3b7a25Sdanborkmann@iogearbox.net 		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
330720a43efSJoe Perches 		if (!skb)
3318a3b7a25Sdanborkmann@iogearbox.net 			goto out;
3328a3b7a25Sdanborkmann@iogearbox.net 
33323e6b2dcSRobert Hancock 		lp->rx_bd_v[i].skb = skb;
33417882fd4SRobert Hancock 		addr = dma_map_single(lp->dev, skb->data,
3354e958f33SAndre Przywara 				      lp->max_frm_size, DMA_FROM_DEVICE);
33617882fd4SRobert Hancock 		if (dma_mapping_error(lp->dev, addr)) {
33771791dc8SAndre Przywara 			netdev_err(ndev, "DMA mapping error\n");
33871791dc8SAndre Przywara 			goto out;
33971791dc8SAndre Przywara 		}
3404e958f33SAndre Przywara 		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
34171791dc8SAndre Przywara 
3428a3b7a25Sdanborkmann@iogearbox.net 		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
3438a3b7a25Sdanborkmann@iogearbox.net 	}
3448a3b7a25Sdanborkmann@iogearbox.net 
34584b9ccc0SRobert Hancock 	axienet_dma_start(lp);
3468a3b7a25Sdanborkmann@iogearbox.net 
3478a3b7a25Sdanborkmann@iogearbox.net 	return 0;
3488a3b7a25Sdanborkmann@iogearbox.net out:
3498a3b7a25Sdanborkmann@iogearbox.net 	axienet_dma_bd_release(ndev);
3508a3b7a25Sdanborkmann@iogearbox.net 	return -ENOMEM;
3518a3b7a25Sdanborkmann@iogearbox.net }
3528a3b7a25Sdanborkmann@iogearbox.net 
3538a3b7a25Sdanborkmann@iogearbox.net /**
3548a3b7a25Sdanborkmann@iogearbox.net  * axienet_set_mac_address - Write the MAC address
3558a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to the net_device structure
3568a3b7a25Sdanborkmann@iogearbox.net  * @address:	6 byte Address to be written as MAC address
3578a3b7a25Sdanborkmann@iogearbox.net  *
3588a3b7a25Sdanborkmann@iogearbox.net  * This function is called to initialize the MAC address of the Axi Ethernet
3598a3b7a25Sdanborkmann@iogearbox.net  * core. It writes to the UAW0 and UAW1 registers of the core.
3608a3b7a25Sdanborkmann@iogearbox.net  */
axienet_set_mac_address(struct net_device * ndev,const void * address)361da90e380STobias Klauser static void axienet_set_mac_address(struct net_device *ndev,
362da90e380STobias Klauser 				    const void *address)
3638a3b7a25Sdanborkmann@iogearbox.net {
3648a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
3658a3b7a25Sdanborkmann@iogearbox.net 
3668a3b7a25Sdanborkmann@iogearbox.net 	if (address)
367a96d317fSJakub Kicinski 		eth_hw_addr_set(ndev, address);
3688a3b7a25Sdanborkmann@iogearbox.net 	if (!is_valid_ether_addr(ndev->dev_addr))
369452349c3STobias Klauser 		eth_hw_addr_random(ndev);
3708a3b7a25Sdanborkmann@iogearbox.net 
3718a3b7a25Sdanborkmann@iogearbox.net 	/* Set up unicast MAC address filter set its mac address */
3728a3b7a25Sdanborkmann@iogearbox.net 	axienet_iow(lp, XAE_UAW0_OFFSET,
3738a3b7a25Sdanborkmann@iogearbox.net 		    (ndev->dev_addr[0]) |
3748a3b7a25Sdanborkmann@iogearbox.net 		    (ndev->dev_addr[1] << 8) |
3758a3b7a25Sdanborkmann@iogearbox.net 		    (ndev->dev_addr[2] << 16) |
3768a3b7a25Sdanborkmann@iogearbox.net 		    (ndev->dev_addr[3] << 24));
3778a3b7a25Sdanborkmann@iogearbox.net 	axienet_iow(lp, XAE_UAW1_OFFSET,
3788a3b7a25Sdanborkmann@iogearbox.net 		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
3798a3b7a25Sdanborkmann@iogearbox.net 		      ~XAE_UAW1_UNICASTADDR_MASK) |
3808a3b7a25Sdanborkmann@iogearbox.net 		     (ndev->dev_addr[4] |
3818a3b7a25Sdanborkmann@iogearbox.net 		     (ndev->dev_addr[5] << 8))));
3828a3b7a25Sdanborkmann@iogearbox.net }
3838a3b7a25Sdanborkmann@iogearbox.net 
3848a3b7a25Sdanborkmann@iogearbox.net /**
3858a3b7a25Sdanborkmann@iogearbox.net  * netdev_set_mac_address - Write the MAC address (from outside the driver)
3868a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to the net_device structure
3878a3b7a25Sdanborkmann@iogearbox.net  * @p:		6 byte Address to be written as MAC address
3888a3b7a25Sdanborkmann@iogearbox.net  *
389b0d081c5SMichal Simek  * Return: 0 for all conditions. Presently, there is no failure case.
3908a3b7a25Sdanborkmann@iogearbox.net  *
3918a3b7a25Sdanborkmann@iogearbox.net  * This function is called to initialize the MAC address of the Axi Ethernet
3928a3b7a25Sdanborkmann@iogearbox.net  * core. It calls the core specific axienet_set_mac_address. This is the
3938a3b7a25Sdanborkmann@iogearbox.net  * function that goes into net_device_ops structure entry ndo_set_mac_address.
3948a3b7a25Sdanborkmann@iogearbox.net  */
netdev_set_mac_address(struct net_device * ndev,void * p)3958a3b7a25Sdanborkmann@iogearbox.net static int netdev_set_mac_address(struct net_device *ndev, void *p)
3968a3b7a25Sdanborkmann@iogearbox.net {
3978a3b7a25Sdanborkmann@iogearbox.net 	struct sockaddr *addr = p;
3988a3b7a25Sdanborkmann@iogearbox.net 	axienet_set_mac_address(ndev, addr->sa_data);
3998a3b7a25Sdanborkmann@iogearbox.net 	return 0;
4008a3b7a25Sdanborkmann@iogearbox.net }
4018a3b7a25Sdanborkmann@iogearbox.net 
4028a3b7a25Sdanborkmann@iogearbox.net /**
4038a3b7a25Sdanborkmann@iogearbox.net  * axienet_set_multicast_list - Prepare the multicast table
4048a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to the net_device structure
4058a3b7a25Sdanborkmann@iogearbox.net  *
4068a3b7a25Sdanborkmann@iogearbox.net  * This function is called to initialize the multicast table during
4078a3b7a25Sdanborkmann@iogearbox.net  * initialization. The Axi Ethernet basic multicast support has a four-entry
4088a3b7a25Sdanborkmann@iogearbox.net  * multicast table which is initialized here. Additionally this function
4098a3b7a25Sdanborkmann@iogearbox.net  * goes into the net_device_ops structure entry ndo_set_multicast_list. This
4108a3b7a25Sdanborkmann@iogearbox.net  * means whenever the multicast table entries need to be updated this
4118a3b7a25Sdanborkmann@iogearbox.net  * function gets called.
4128a3b7a25Sdanborkmann@iogearbox.net  */
axienet_set_multicast_list(struct net_device * ndev)4138a3b7a25Sdanborkmann@iogearbox.net static void axienet_set_multicast_list(struct net_device *ndev)
4148a3b7a25Sdanborkmann@iogearbox.net {
4154bf322e5SSean Anderson 	int i = 0;
4168a3b7a25Sdanborkmann@iogearbox.net 	u32 reg, af0reg, af1reg;
4178a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
4188a3b7a25Sdanborkmann@iogearbox.net 
4198a3b7a25Sdanborkmann@iogearbox.net 	if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
4208a3b7a25Sdanborkmann@iogearbox.net 	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
4218a3b7a25Sdanborkmann@iogearbox.net 		/* We must make the kernel realize we had to move into
4228a3b7a25Sdanborkmann@iogearbox.net 		 * promiscuous mode. If it was a promiscuous mode request
423850a7503SMichal Simek 		 * the flag is already set. If not we set it.
424850a7503SMichal Simek 		 */
4258a3b7a25Sdanborkmann@iogearbox.net 		ndev->flags |= IFF_PROMISC;
4268a3b7a25Sdanborkmann@iogearbox.net 		reg = axienet_ior(lp, XAE_FMI_OFFSET);
4278a3b7a25Sdanborkmann@iogearbox.net 		reg |= XAE_FMI_PM_MASK;
4288a3b7a25Sdanborkmann@iogearbox.net 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
4298a3b7a25Sdanborkmann@iogearbox.net 		dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
4308a3b7a25Sdanborkmann@iogearbox.net 	} else if (!netdev_mc_empty(ndev)) {
4318a3b7a25Sdanborkmann@iogearbox.net 		struct netdev_hw_addr *ha;
4328a3b7a25Sdanborkmann@iogearbox.net 
43314ebcb4aSSean Anderson 		reg = axienet_ior(lp, XAE_FMI_OFFSET);
43414ebcb4aSSean Anderson 		reg &= ~XAE_FMI_PM_MASK;
43514ebcb4aSSean Anderson 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
43614ebcb4aSSean Anderson 
4378a3b7a25Sdanborkmann@iogearbox.net 		netdev_for_each_mc_addr(ha, ndev) {
4388a3b7a25Sdanborkmann@iogearbox.net 			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
4398a3b7a25Sdanborkmann@iogearbox.net 				break;
4408a3b7a25Sdanborkmann@iogearbox.net 
4418a3b7a25Sdanborkmann@iogearbox.net 			af0reg = (ha->addr[0]);
4428a3b7a25Sdanborkmann@iogearbox.net 			af0reg |= (ha->addr[1] << 8);
4438a3b7a25Sdanborkmann@iogearbox.net 			af0reg |= (ha->addr[2] << 16);
4448a3b7a25Sdanborkmann@iogearbox.net 			af0reg |= (ha->addr[3] << 24);
4458a3b7a25Sdanborkmann@iogearbox.net 
4468a3b7a25Sdanborkmann@iogearbox.net 			af1reg = (ha->addr[4]);
4478a3b7a25Sdanborkmann@iogearbox.net 			af1reg |= (ha->addr[5] << 8);
4488a3b7a25Sdanborkmann@iogearbox.net 
4498a3b7a25Sdanborkmann@iogearbox.net 			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
4508a3b7a25Sdanborkmann@iogearbox.net 			reg |= i;
4518a3b7a25Sdanborkmann@iogearbox.net 
4528a3b7a25Sdanborkmann@iogearbox.net 			axienet_iow(lp, XAE_FMI_OFFSET, reg);
4538a3b7a25Sdanborkmann@iogearbox.net 			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
4548a3b7a25Sdanborkmann@iogearbox.net 			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
4554bf322e5SSean Anderson 			axienet_iow(lp, XAE_FFE_OFFSET, 1);
4568a3b7a25Sdanborkmann@iogearbox.net 			i++;
4578a3b7a25Sdanborkmann@iogearbox.net 		}
4588a3b7a25Sdanborkmann@iogearbox.net 	} else {
4598a3b7a25Sdanborkmann@iogearbox.net 		reg = axienet_ior(lp, XAE_FMI_OFFSET);
4608a3b7a25Sdanborkmann@iogearbox.net 		reg &= ~XAE_FMI_PM_MASK;
4618a3b7a25Sdanborkmann@iogearbox.net 
4628a3b7a25Sdanborkmann@iogearbox.net 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
4634bf322e5SSean Anderson 		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
4648a3b7a25Sdanborkmann@iogearbox.net 	}
4658a3b7a25Sdanborkmann@iogearbox.net 
4664bf322e5SSean Anderson 	for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
4674bf322e5SSean Anderson 		reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
4684bf322e5SSean Anderson 		reg |= i;
4694bf322e5SSean Anderson 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
4704bf322e5SSean Anderson 		axienet_iow(lp, XAE_FFE_OFFSET, 0);
4718a3b7a25Sdanborkmann@iogearbox.net 	}
4728a3b7a25Sdanborkmann@iogearbox.net }
4738a3b7a25Sdanborkmann@iogearbox.net 
4748a3b7a25Sdanborkmann@iogearbox.net /**
4758a3b7a25Sdanborkmann@iogearbox.net  * axienet_setoptions - Set an Axi Ethernet option
4768a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to the net_device structure
4778a3b7a25Sdanborkmann@iogearbox.net  * @options:	Option to be enabled/disabled
4788a3b7a25Sdanborkmann@iogearbox.net  *
4798a3b7a25Sdanborkmann@iogearbox.net  * The Axi Ethernet core has multiple features which can be selectively turned
4808a3b7a25Sdanborkmann@iogearbox.net  * on or off. The typical options could be jumbo frame option, basic VLAN
4818a3b7a25Sdanborkmann@iogearbox.net  * option, promiscuous mode option etc. This function is used to set or clear
4828a3b7a25Sdanborkmann@iogearbox.net  * these options in the Axi Ethernet hardware. This is done through
4838a3b7a25Sdanborkmann@iogearbox.net  * axienet_option structure .
4848a3b7a25Sdanborkmann@iogearbox.net  */
axienet_setoptions(struct net_device * ndev,u32 options)4858a3b7a25Sdanborkmann@iogearbox.net static void axienet_setoptions(struct net_device *ndev, u32 options)
4868a3b7a25Sdanborkmann@iogearbox.net {
4878a3b7a25Sdanborkmann@iogearbox.net 	int reg;
4888a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
4898a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_option *tp = &axienet_options[0];
4908a3b7a25Sdanborkmann@iogearbox.net 
4918a3b7a25Sdanborkmann@iogearbox.net 	while (tp->opt) {
4928a3b7a25Sdanborkmann@iogearbox.net 		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
4938a3b7a25Sdanborkmann@iogearbox.net 		if (options & tp->opt)
4948a3b7a25Sdanborkmann@iogearbox.net 			reg |= tp->m_or;
4958a3b7a25Sdanborkmann@iogearbox.net 		axienet_iow(lp, tp->reg, reg);
4968a3b7a25Sdanborkmann@iogearbox.net 		tp++;
4978a3b7a25Sdanborkmann@iogearbox.net 	}
4988a3b7a25Sdanborkmann@iogearbox.net 
4998a3b7a25Sdanborkmann@iogearbox.net 	lp->options |= options;
5008a3b7a25Sdanborkmann@iogearbox.net }
5018a3b7a25Sdanborkmann@iogearbox.net 
__axienet_device_reset(struct axienet_local * lp)502ee44d0b7SAndre Przywara static int __axienet_device_reset(struct axienet_local *lp)
5038a3b7a25Sdanborkmann@iogearbox.net {
5042e5644b1SRobert Hancock 	u32 value;
5052e5644b1SRobert Hancock 	int ret;
506ee44d0b7SAndre Przywara 
5078a3b7a25Sdanborkmann@iogearbox.net 	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
5088a3b7a25Sdanborkmann@iogearbox.net 	 * process of Axi DMA takes a while to complete as all pending
5098a3b7a25Sdanborkmann@iogearbox.net 	 * commands/transfers will be flushed or completed during this
510850a7503SMichal Simek 	 * reset process.
511489d4d77SRobert Hancock 	 * Note that even though both TX and RX have their own reset register,
512489d4d77SRobert Hancock 	 * they both reset the entire DMA core, so only one needs to be used.
513850a7503SMichal Simek 	 */
514489d4d77SRobert Hancock 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
5152e5644b1SRobert Hancock 	ret = read_poll_timeout(axienet_dma_in32, value,
5162e5644b1SRobert Hancock 				!(value & XAXIDMA_CR_RESET_MASK),
5172e5644b1SRobert Hancock 				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
5182e5644b1SRobert Hancock 				XAXIDMA_TX_CR_OFFSET);
5192e5644b1SRobert Hancock 	if (ret) {
5202e5644b1SRobert Hancock 		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
5212e5644b1SRobert Hancock 		return ret;
5228a3b7a25Sdanborkmann@iogearbox.net 	}
523ee44d0b7SAndre Przywara 
524b400c2f4SRobert Hancock 	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
525b400c2f4SRobert Hancock 	ret = read_poll_timeout(axienet_ior, value,
526b400c2f4SRobert Hancock 				value & XAE_INT_PHYRSTCMPLT_MASK,
527b400c2f4SRobert Hancock 				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
528b400c2f4SRobert Hancock 				XAE_IS_OFFSET);
529b400c2f4SRobert Hancock 	if (ret) {
530b400c2f4SRobert Hancock 		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
531b400c2f4SRobert Hancock 		return ret;
532b400c2f4SRobert Hancock 	}
533b400c2f4SRobert Hancock 
534ee44d0b7SAndre Przywara 	return 0;
5358a3b7a25Sdanborkmann@iogearbox.net }
5368a3b7a25Sdanborkmann@iogearbox.net 
5378a3b7a25Sdanborkmann@iogearbox.net /**
53884b9ccc0SRobert Hancock  * axienet_dma_stop - Stop DMA operation
53984b9ccc0SRobert Hancock  * @lp:		Pointer to the axienet_local structure
54084b9ccc0SRobert Hancock  */
axienet_dma_stop(struct axienet_local * lp)54184b9ccc0SRobert Hancock static void axienet_dma_stop(struct axienet_local *lp)
54284b9ccc0SRobert Hancock {
54384b9ccc0SRobert Hancock 	int count;
54484b9ccc0SRobert Hancock 	u32 cr, sr;
54584b9ccc0SRobert Hancock 
54684b9ccc0SRobert Hancock 	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
54784b9ccc0SRobert Hancock 	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
54884b9ccc0SRobert Hancock 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
54984b9ccc0SRobert Hancock 	synchronize_irq(lp->rx_irq);
55084b9ccc0SRobert Hancock 
55184b9ccc0SRobert Hancock 	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
55284b9ccc0SRobert Hancock 	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
55384b9ccc0SRobert Hancock 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
55484b9ccc0SRobert Hancock 	synchronize_irq(lp->tx_irq);
55584b9ccc0SRobert Hancock 
55684b9ccc0SRobert Hancock 	/* Give DMAs a chance to halt gracefully */
55784b9ccc0SRobert Hancock 	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
55884b9ccc0SRobert Hancock 	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
55984b9ccc0SRobert Hancock 		msleep(20);
56084b9ccc0SRobert Hancock 		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
56184b9ccc0SRobert Hancock 	}
56284b9ccc0SRobert Hancock 
56384b9ccc0SRobert Hancock 	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
56484b9ccc0SRobert Hancock 	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
56584b9ccc0SRobert Hancock 		msleep(20);
56684b9ccc0SRobert Hancock 		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
56784b9ccc0SRobert Hancock 	}
56884b9ccc0SRobert Hancock 
56984b9ccc0SRobert Hancock 	/* Do a reset to ensure DMA is really stopped */
57084b9ccc0SRobert Hancock 	axienet_lock_mii(lp);
57184b9ccc0SRobert Hancock 	__axienet_device_reset(lp);
57284b9ccc0SRobert Hancock 	axienet_unlock_mii(lp);
57384b9ccc0SRobert Hancock }
57484b9ccc0SRobert Hancock 
57584b9ccc0SRobert Hancock /**
5768a3b7a25Sdanborkmann@iogearbox.net  * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
5778a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to the net_device structure
5788a3b7a25Sdanborkmann@iogearbox.net  *
5798a3b7a25Sdanborkmann@iogearbox.net  * This function is called to reset and initialize the Axi Ethernet core. This
5808a3b7a25Sdanborkmann@iogearbox.net  * is typically called during initialization. It does a reset of the Axi DMA
5818a3b7a25Sdanborkmann@iogearbox.net  * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
5828a3b7a25Sdanborkmann@iogearbox.net  * are connected to Axi Ethernet reset lines, this in turn resets the Axi
5838a3b7a25Sdanborkmann@iogearbox.net  * Ethernet core. No separate hardware reset is done for the Axi Ethernet
5848a3b7a25Sdanborkmann@iogearbox.net  * core.
585ee44d0b7SAndre Przywara  * Returns 0 on success or a negative error number otherwise.
5868a3b7a25Sdanborkmann@iogearbox.net  */
axienet_device_reset(struct net_device * ndev)587ee44d0b7SAndre Przywara static int axienet_device_reset(struct net_device *ndev)
5888a3b7a25Sdanborkmann@iogearbox.net {
5898a3b7a25Sdanborkmann@iogearbox.net 	u32 axienet_status;
5908a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
591ee44d0b7SAndre Przywara 	int ret;
5928a3b7a25Sdanborkmann@iogearbox.net 
593ee44d0b7SAndre Przywara 	ret = __axienet_device_reset(lp);
594ee44d0b7SAndre Przywara 	if (ret)
595ee44d0b7SAndre Przywara 		return ret;
5968a3b7a25Sdanborkmann@iogearbox.net 
5978a3b7a25Sdanborkmann@iogearbox.net 	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
598f080a8c3SSrikanth Thokala 	lp->options |= XAE_OPTION_VLAN;
5998a3b7a25Sdanborkmann@iogearbox.net 	lp->options &= (~XAE_OPTION_JUMBO);
6008a3b7a25Sdanborkmann@iogearbox.net 
6018a3b7a25Sdanborkmann@iogearbox.net 	if ((ndev->mtu > XAE_MTU) &&
602f080a8c3SSrikanth Thokala 	    (ndev->mtu <= XAE_JUMBO_MTU)) {
603f080a8c3SSrikanth Thokala 		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
6048a3b7a25Sdanborkmann@iogearbox.net 					XAE_TRL_SIZE;
605f080a8c3SSrikanth Thokala 
606f080a8c3SSrikanth Thokala 		if (lp->max_frm_size <= lp->rxmem)
6078a3b7a25Sdanborkmann@iogearbox.net 			lp->options |= XAE_OPTION_JUMBO;
6088a3b7a25Sdanborkmann@iogearbox.net 	}
6098a3b7a25Sdanborkmann@iogearbox.net 
610ee44d0b7SAndre Przywara 	ret = axienet_dma_bd_init(ndev);
611ee44d0b7SAndre Przywara 	if (ret) {
612c81a97b5SSrikanth Thokala 		netdev_err(ndev, "%s: descriptor allocation failed\n",
613c81a97b5SSrikanth Thokala 			   __func__);
614ee44d0b7SAndre Przywara 		return ret;
6158a3b7a25Sdanborkmann@iogearbox.net 	}
6168a3b7a25Sdanborkmann@iogearbox.net 
6178a3b7a25Sdanborkmann@iogearbox.net 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
6188a3b7a25Sdanborkmann@iogearbox.net 	axienet_status &= ~XAE_RCW1_RX_MASK;
6198a3b7a25Sdanborkmann@iogearbox.net 	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
6208a3b7a25Sdanborkmann@iogearbox.net 
6218a3b7a25Sdanborkmann@iogearbox.net 	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
6228a3b7a25Sdanborkmann@iogearbox.net 	if (axienet_status & XAE_INT_RXRJECT_MASK)
6238a3b7a25Sdanborkmann@iogearbox.net 		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
624522856ceSRobert Hancock 	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
625522856ceSRobert Hancock 		    XAE_INT_RECV_ERROR_MASK : 0);
6268a3b7a25Sdanborkmann@iogearbox.net 
6278a3b7a25Sdanborkmann@iogearbox.net 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
6288a3b7a25Sdanborkmann@iogearbox.net 
6298a3b7a25Sdanborkmann@iogearbox.net 	/* Sync default options with HW but leave receiver and
630850a7503SMichal Simek 	 * transmitter disabled.
631850a7503SMichal Simek 	 */
6328a3b7a25Sdanborkmann@iogearbox.net 	axienet_setoptions(ndev, lp->options &
6338a3b7a25Sdanborkmann@iogearbox.net 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
6348a3b7a25Sdanborkmann@iogearbox.net 	axienet_set_mac_address(ndev, NULL);
6358a3b7a25Sdanborkmann@iogearbox.net 	axienet_set_multicast_list(ndev);
6368a3b7a25Sdanborkmann@iogearbox.net 	axienet_setoptions(ndev, lp->options);
6378a3b7a25Sdanborkmann@iogearbox.net 
638860e9538SFlorian Westphal 	netif_trans_update(ndev);
639ee44d0b7SAndre Przywara 
640ee44d0b7SAndre Przywara 	return 0;
6418a3b7a25Sdanborkmann@iogearbox.net }
6428a3b7a25Sdanborkmann@iogearbox.net 
6438a3b7a25Sdanborkmann@iogearbox.net /**
644ab365c33SAndre Przywara  * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
6459e2bc267SRobert Hancock  * @lp:		Pointer to the axienet_local structure
646ab365c33SAndre Przywara  * @first_bd:	Index of first descriptor to clean up
6479e2bc267SRobert Hancock  * @nr_bds:	Max number of descriptors to clean up
6489e2bc267SRobert Hancock  * @force:	Whether to clean descriptors even if not complete
649ab365c33SAndre Przywara  * @sizep:	Pointer to a u32 filled with the total sum of all bytes
650ab365c33SAndre Przywara  *		in all cleaned-up descriptors. Ignored if NULL.
6519e2bc267SRobert Hancock  * @budget:	NAPI budget (use 0 when not called from NAPI poll)
652ab365c33SAndre Przywara  *
653ab365c33SAndre Przywara  * Would either be called after a successful transmit operation, or after
654ab365c33SAndre Przywara  * there was an error when setting up the chain.
65589bab831SSean Anderson  * Returns the number of packets handled.
656ab365c33SAndre Przywara  */
axienet_free_tx_chain(struct axienet_local * lp,u32 first_bd,int nr_bds,bool force,u32 * sizep,int budget)6579e2bc267SRobert Hancock static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
6589e2bc267SRobert Hancock 				 int nr_bds, bool force, u32 *sizep, int budget)
659ab365c33SAndre Przywara {
660ab365c33SAndre Przywara 	struct axidma_bd *cur_p;
661ab365c33SAndre Przywara 	unsigned int status;
66289bab831SSean Anderson 	int i, packets = 0;
6634e958f33SAndre Przywara 	dma_addr_t phys;
664ab365c33SAndre Przywara 
6659e2bc267SRobert Hancock 	for (i = 0; i < nr_bds; i++) {
666ab365c33SAndre Przywara 		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
667ab365c33SAndre Przywara 		status = cur_p->status;
668ab365c33SAndre Przywara 
6699e2bc267SRobert Hancock 		/* If force is not specified, clean up only descriptors
6709e2bc267SRobert Hancock 		 * that have been completed by the MAC.
671ab365c33SAndre Przywara 		 */
6729e2bc267SRobert Hancock 		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
673ab365c33SAndre Przywara 			break;
674ab365c33SAndre Przywara 
67595978df6SRobert Hancock 		/* Ensure we see complete descriptor update */
67695978df6SRobert Hancock 		dma_rmb();
6774e958f33SAndre Przywara 		phys = desc_get_phys_addr(lp, cur_p);
67817882fd4SRobert Hancock 		dma_unmap_single(lp->dev, phys,
679ab365c33SAndre Przywara 				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
680ab365c33SAndre Przywara 				 DMA_TO_DEVICE);
681ab365c33SAndre Przywara 
68289bab831SSean Anderson 		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
6839e2bc267SRobert Hancock 			napi_consume_skb(cur_p->skb, budget);
68489bab831SSean Anderson 			packets++;
68589bab831SSean Anderson 		}
686ab365c33SAndre Przywara 
687ab365c33SAndre Przywara 		cur_p->app0 = 0;
688ab365c33SAndre Przywara 		cur_p->app1 = 0;
689ab365c33SAndre Przywara 		cur_p->app2 = 0;
690ab365c33SAndre Przywara 		cur_p->app4 = 0;
691ab365c33SAndre Przywara 		cur_p->skb = NULL;
69295978df6SRobert Hancock 		/* ensure our transmit path and device don't prematurely see status cleared */
69395978df6SRobert Hancock 		wmb();
694996defd7SRobert Hancock 		cur_p->cntrl = 0;
69595978df6SRobert Hancock 		cur_p->status = 0;
696ab365c33SAndre Przywara 
697ab365c33SAndre Przywara 		if (sizep)
698ab365c33SAndre Przywara 			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
699ab365c33SAndre Przywara 	}
700ab365c33SAndre Przywara 
70189bab831SSean Anderson 	if (!force) {
70289bab831SSean Anderson 		lp->tx_bd_ci += i;
70389bab831SSean Anderson 		if (lp->tx_bd_ci >= lp->tx_bd_num)
70489bab831SSean Anderson 			lp->tx_bd_ci %= lp->tx_bd_num;
70589bab831SSean Anderson 	}
70689bab831SSean Anderson 
70789bab831SSean Anderson 	return packets;
708ab365c33SAndre Przywara }
709ab365c33SAndre Przywara 
710ab365c33SAndre Przywara /**
711bb193e3dSRobert Hancock  * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
712bb193e3dSRobert Hancock  * @lp:		Pointer to the axienet_local structure
713bb193e3dSRobert Hancock  * @num_frag:	The number of BDs to check for
714bb193e3dSRobert Hancock  *
715bb193e3dSRobert Hancock  * Return: 0, on success
716bb193e3dSRobert Hancock  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
717bb193e3dSRobert Hancock  *
718bb193e3dSRobert Hancock  * This function is invoked before BDs are allocated and transmission starts.
719bb193e3dSRobert Hancock  * This function returns 0 if a BD or group of BDs can be allocated for
720bb193e3dSRobert Hancock  * transmission. If the BD or any of the BDs are not free the function
7219e2bc267SRobert Hancock  * returns a busy status.
722bb193e3dSRobert Hancock  */
axienet_check_tx_bd_space(struct axienet_local * lp,int num_frag)723bb193e3dSRobert Hancock static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
724bb193e3dSRobert Hancock 					    int num_frag)
725bb193e3dSRobert Hancock {
726bb193e3dSRobert Hancock 	struct axidma_bd *cur_p;
727bb193e3dSRobert Hancock 
7289e2bc267SRobert Hancock 	/* Ensure we see all descriptor updates from device or TX polling */
729bb193e3dSRobert Hancock 	rmb();
730f0cf4000SRobert Hancock 	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
731f0cf4000SRobert Hancock 			     lp->tx_bd_num];
732bb193e3dSRobert Hancock 	if (cur_p->cntrl)
733bb193e3dSRobert Hancock 		return NETDEV_TX_BUSY;
734bb193e3dSRobert Hancock 	return 0;
735bb193e3dSRobert Hancock }
736bb193e3dSRobert Hancock 
737bb193e3dSRobert Hancock /**
7389e2bc267SRobert Hancock  * axienet_tx_poll - Invoked once a transmit is completed by the
7398a3b7a25Sdanborkmann@iogearbox.net  * Axi DMA Tx channel.
7409e2bc267SRobert Hancock  * @napi:	Pointer to NAPI structure.
7419e2bc267SRobert Hancock  * @budget:	Max number of TX packets to process.
7428a3b7a25Sdanborkmann@iogearbox.net  *
7439e2bc267SRobert Hancock  * Return: Number of TX packets processed.
7449e2bc267SRobert Hancock  *
7459e2bc267SRobert Hancock  * This function is invoked from the NAPI processing to notify the completion
7468a3b7a25Sdanborkmann@iogearbox.net  * of transmit operation. It clears fields in the corresponding Tx BDs and
7478a3b7a25Sdanborkmann@iogearbox.net  * unmaps the corresponding buffer so that CPU can regain ownership of the
7488a3b7a25Sdanborkmann@iogearbox.net  * buffer. It finally invokes "netif_wake_queue" to restart transmission if
7498a3b7a25Sdanborkmann@iogearbox.net  * required.
7508a3b7a25Sdanborkmann@iogearbox.net  */
axienet_tx_poll(struct napi_struct * napi,int budget)7519e2bc267SRobert Hancock static int axienet_tx_poll(struct napi_struct *napi, int budget)
7528a3b7a25Sdanborkmann@iogearbox.net {
7539e2bc267SRobert Hancock 	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
7549e2bc267SRobert Hancock 	struct net_device *ndev = lp->ndev;
755ab365c33SAndre Przywara 	u32 size = 0;
7569e2bc267SRobert Hancock 	int packets;
7578a3b7a25Sdanborkmann@iogearbox.net 
75889bab831SSean Anderson 	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
75989bab831SSean Anderson 					&size, budget);
7608a3b7a25Sdanborkmann@iogearbox.net 
7619e2bc267SRobert Hancock 	if (packets) {
762cb45a8bfSRobert Hancock 		u64_stats_update_begin(&lp->tx_stat_sync);
763cb45a8bfSRobert Hancock 		u64_stats_add(&lp->tx_packets, packets);
764cb45a8bfSRobert Hancock 		u64_stats_add(&lp->tx_bytes, size);
765cb45a8bfSRobert Hancock 		u64_stats_update_end(&lp->tx_stat_sync);
7667de44285SRobert Hancock 
7677de44285SRobert Hancock 		/* Matches barrier in axienet_start_xmit */
7687de44285SRobert Hancock 		smp_mb();
7697de44285SRobert Hancock 
770bb193e3dSRobert Hancock 		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
7718a3b7a25Sdanborkmann@iogearbox.net 			netif_wake_queue(ndev);
7728a3b7a25Sdanborkmann@iogearbox.net 	}
7738a3b7a25Sdanborkmann@iogearbox.net 
7749e2bc267SRobert Hancock 	if (packets < budget && napi_complete_done(napi, packets)) {
7759e2bc267SRobert Hancock 		/* Re-enable TX completion interrupts. This should
7769e2bc267SRobert Hancock 		 * cause an immediate interrupt if any TX packets are
7779e2bc267SRobert Hancock 		 * already pending.
7789e2bc267SRobert Hancock 		 */
7799e2bc267SRobert Hancock 		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
7809e2bc267SRobert Hancock 	}
7819e2bc267SRobert Hancock 	return packets;
7829e2bc267SRobert Hancock }
7839e2bc267SRobert Hancock 
7848a3b7a25Sdanborkmann@iogearbox.net /**
7858a3b7a25Sdanborkmann@iogearbox.net  * axienet_start_xmit - Starts the transmission.
7868a3b7a25Sdanborkmann@iogearbox.net  * @skb:	sk_buff pointer that contains data to be Txed.
7878a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to net_device structure.
7888a3b7a25Sdanborkmann@iogearbox.net  *
789b0d081c5SMichal Simek  * Return: NETDEV_TX_OK, on success
7908a3b7a25Sdanborkmann@iogearbox.net  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
7918a3b7a25Sdanborkmann@iogearbox.net  *
7928a3b7a25Sdanborkmann@iogearbox.net  * This function is invoked from upper layers to initiate transmission. The
7938a3b7a25Sdanborkmann@iogearbox.net  * function uses the next available free BDs and populates their fields to
7948a3b7a25Sdanborkmann@iogearbox.net  * start the transmission. Additionally if checksum offloading is supported,
7958a3b7a25Sdanborkmann@iogearbox.net  * it populates AXI Stream Control fields with appropriate values.
7968a3b7a25Sdanborkmann@iogearbox.net  */
79781255af8SYueHaibing static netdev_tx_t
axienet_start_xmit(struct sk_buff * skb,struct net_device * ndev)79881255af8SYueHaibing axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
7998a3b7a25Sdanborkmann@iogearbox.net {
8008a3b7a25Sdanborkmann@iogearbox.net 	u32 ii;
8018a3b7a25Sdanborkmann@iogearbox.net 	u32 num_frag;
8028a3b7a25Sdanborkmann@iogearbox.net 	u32 csum_start_off;
8038a3b7a25Sdanborkmann@iogearbox.net 	u32 csum_index_off;
8048a3b7a25Sdanborkmann@iogearbox.net 	skb_frag_t *frag;
8054e958f33SAndre Przywara 	dma_addr_t tail_p, phys;
806f0cf4000SRobert Hancock 	u32 orig_tail_ptr, new_tail_ptr;
8078a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
8088a3b7a25Sdanborkmann@iogearbox.net 	struct axidma_bd *cur_p;
809f0cf4000SRobert Hancock 
810f0cf4000SRobert Hancock 	orig_tail_ptr = lp->tx_bd_tail;
811f0cf4000SRobert Hancock 	new_tail_ptr = orig_tail_ptr;
8128a3b7a25Sdanborkmann@iogearbox.net 
8138a3b7a25Sdanborkmann@iogearbox.net 	num_frag = skb_shinfo(skb)->nr_frags;
814f0cf4000SRobert Hancock 	cur_p = &lp->tx_bd_v[orig_tail_ptr];
8158a3b7a25Sdanborkmann@iogearbox.net 
816aba57a82SRobert Hancock 	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
817bb193e3dSRobert Hancock 		/* Should not happen as last start_xmit call should have
818bb193e3dSRobert Hancock 		 * checked for sufficient space and queue should only be
819bb193e3dSRobert Hancock 		 * woken when sufficient space is available.
820bb193e3dSRobert Hancock 		 */
8217de44285SRobert Hancock 		netif_stop_queue(ndev);
822bb193e3dSRobert Hancock 		if (net_ratelimit())
823bb193e3dSRobert Hancock 			netdev_warn(ndev, "TX ring unexpectedly full\n");
8247de44285SRobert Hancock 		return NETDEV_TX_BUSY;
8258a3b7a25Sdanborkmann@iogearbox.net 	}
8268a3b7a25Sdanborkmann@iogearbox.net 
8278a3b7a25Sdanborkmann@iogearbox.net 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
8288a3b7a25Sdanborkmann@iogearbox.net 		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
8298a3b7a25Sdanborkmann@iogearbox.net 			/* Tx Full Checksum Offload Enabled */
8308a3b7a25Sdanborkmann@iogearbox.net 			cur_p->app0 |= 2;
8312ea6b18aSSamuel Holland 		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
8328a3b7a25Sdanborkmann@iogearbox.net 			csum_start_off = skb_transport_offset(skb);
8338a3b7a25Sdanborkmann@iogearbox.net 			csum_index_off = csum_start_off + skb->csum_offset;
8348a3b7a25Sdanborkmann@iogearbox.net 			/* Tx Partial Checksum Offload Enabled */
8358a3b7a25Sdanborkmann@iogearbox.net 			cur_p->app0 |= 1;
8368a3b7a25Sdanborkmann@iogearbox.net 			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
8378a3b7a25Sdanborkmann@iogearbox.net 		}
8388a3b7a25Sdanborkmann@iogearbox.net 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
8398a3b7a25Sdanborkmann@iogearbox.net 		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
8408a3b7a25Sdanborkmann@iogearbox.net 	}
8418a3b7a25Sdanborkmann@iogearbox.net 
84217882fd4SRobert Hancock 	phys = dma_map_single(lp->dev, skb->data,
8438a3b7a25Sdanborkmann@iogearbox.net 			      skb_headlen(skb), DMA_TO_DEVICE);
84417882fd4SRobert Hancock 	if (unlikely(dma_mapping_error(lp->dev, phys))) {
84571791dc8SAndre Przywara 		if (net_ratelimit())
84671791dc8SAndre Przywara 			netdev_err(ndev, "TX DMA mapping error\n");
84771791dc8SAndre Przywara 		ndev->stats.tx_dropped++;
848*b01fbbf3SWang Hai 		dev_kfree_skb_any(skb);
84971791dc8SAndre Przywara 		return NETDEV_TX_OK;
85071791dc8SAndre Przywara 	}
8514e958f33SAndre Przywara 	desc_set_phys_addr(lp, phys, cur_p);
85271791dc8SAndre Przywara 	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
8538a3b7a25Sdanborkmann@iogearbox.net 
8548a3b7a25Sdanborkmann@iogearbox.net 	for (ii = 0; ii < num_frag; ii++) {
855f0cf4000SRobert Hancock 		if (++new_tail_ptr >= lp->tx_bd_num)
856f0cf4000SRobert Hancock 			new_tail_ptr = 0;
857f0cf4000SRobert Hancock 		cur_p = &lp->tx_bd_v[new_tail_ptr];
8588a3b7a25Sdanborkmann@iogearbox.net 		frag = &skb_shinfo(skb)->frags[ii];
85917882fd4SRobert Hancock 		phys = dma_map_single(lp->dev,
8608a3b7a25Sdanborkmann@iogearbox.net 				      skb_frag_address(frag),
8618a3b7a25Sdanborkmann@iogearbox.net 				      skb_frag_size(frag),
8628a3b7a25Sdanborkmann@iogearbox.net 				      DMA_TO_DEVICE);
86317882fd4SRobert Hancock 		if (unlikely(dma_mapping_error(lp->dev, phys))) {
86471791dc8SAndre Przywara 			if (net_ratelimit())
86571791dc8SAndre Przywara 				netdev_err(ndev, "TX DMA mapping error\n");
86671791dc8SAndre Przywara 			ndev->stats.tx_dropped++;
8679e2bc267SRobert Hancock 			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
8689e2bc267SRobert Hancock 					      true, NULL, 0);
869*b01fbbf3SWang Hai 			dev_kfree_skb_any(skb);
87071791dc8SAndre Przywara 			return NETDEV_TX_OK;
87171791dc8SAndre Przywara 		}
8724e958f33SAndre Przywara 		desc_set_phys_addr(lp, phys, cur_p);
8738a3b7a25Sdanborkmann@iogearbox.net 		cur_p->cntrl = skb_frag_size(frag);
8748a3b7a25Sdanborkmann@iogearbox.net 	}
8758a3b7a25Sdanborkmann@iogearbox.net 
8768a3b7a25Sdanborkmann@iogearbox.net 	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
87723e6b2dcSRobert Hancock 	cur_p->skb = skb;
8788a3b7a25Sdanborkmann@iogearbox.net 
879f0cf4000SRobert Hancock 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
880f0cf4000SRobert Hancock 	if (++new_tail_ptr >= lp->tx_bd_num)
881f0cf4000SRobert Hancock 		new_tail_ptr = 0;
882f0cf4000SRobert Hancock 	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
883f0cf4000SRobert Hancock 
8848a3b7a25Sdanborkmann@iogearbox.net 	/* Start the transfer */
8856a00d0ddSAndre Przywara 	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
8868a3b7a25Sdanborkmann@iogearbox.net 
887bb193e3dSRobert Hancock 	/* Stop queue if next transmit may not have space */
888bb193e3dSRobert Hancock 	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
889bb193e3dSRobert Hancock 		netif_stop_queue(ndev);
890bb193e3dSRobert Hancock 
8919e2bc267SRobert Hancock 		/* Matches barrier in axienet_tx_poll */
892bb193e3dSRobert Hancock 		smp_mb();
893bb193e3dSRobert Hancock 
894bb193e3dSRobert Hancock 		/* Space might have just been freed - check again */
895bb193e3dSRobert Hancock 		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
896bb193e3dSRobert Hancock 			netif_wake_queue(ndev);
897bb193e3dSRobert Hancock 	}
898bb193e3dSRobert Hancock 
8998a3b7a25Sdanborkmann@iogearbox.net 	return NETDEV_TX_OK;
9008a3b7a25Sdanborkmann@iogearbox.net }
9018a3b7a25Sdanborkmann@iogearbox.net 
9028a3b7a25Sdanborkmann@iogearbox.net /**
9039e2bc267SRobert Hancock  * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
904cc37610cSRobert Hancock  * @napi:	Pointer to NAPI structure.
9059e2bc267SRobert Hancock  * @budget:	Max number of RX packets to process.
9068a3b7a25Sdanborkmann@iogearbox.net  *
907cc37610cSRobert Hancock  * Return: Number of RX packets processed.
9088a3b7a25Sdanborkmann@iogearbox.net  */
axienet_rx_poll(struct napi_struct * napi,int budget)9099e2bc267SRobert Hancock static int axienet_rx_poll(struct napi_struct *napi, int budget)
9108a3b7a25Sdanborkmann@iogearbox.net {
9118a3b7a25Sdanborkmann@iogearbox.net 	u32 length;
9128a3b7a25Sdanborkmann@iogearbox.net 	u32 csumstatus;
9138a3b7a25Sdanborkmann@iogearbox.net 	u32 size = 0;
914cc37610cSRobert Hancock 	int packets = 0;
91538e96b35SPeter Crosthwaite 	dma_addr_t tail_p = 0;
9168a3b7a25Sdanborkmann@iogearbox.net 	struct axidma_bd *cur_p;
917cc37610cSRobert Hancock 	struct sk_buff *skb, *new_skb;
9189e2bc267SRobert Hancock 	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
9198a3b7a25Sdanborkmann@iogearbox.net 
9208a3b7a25Sdanborkmann@iogearbox.net 	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
9218a3b7a25Sdanborkmann@iogearbox.net 
922cc37610cSRobert Hancock 	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
9234e958f33SAndre Przywara 		dma_addr_t phys;
9244e958f33SAndre Przywara 
92595978df6SRobert Hancock 		/* Ensure we see complete descriptor update */
92695978df6SRobert Hancock 		dma_rmb();
9278a3b7a25Sdanborkmann@iogearbox.net 
92823e6b2dcSRobert Hancock 		skb = cur_p->skb;
92923e6b2dcSRobert Hancock 		cur_p->skb = NULL;
9307a7d340bSRobert Hancock 
9317a7d340bSRobert Hancock 		/* skb could be NULL if a previous pass already received the
9327a7d340bSRobert Hancock 		 * packet for this slot in the ring, but failed to refill it
9337a7d340bSRobert Hancock 		 * with a newly allocated buffer. In this case, don't try to
9347a7d340bSRobert Hancock 		 * receive it again.
9357a7d340bSRobert Hancock 		 */
9367a7d340bSRobert Hancock 		if (likely(skb)) {
93723e6b2dcSRobert Hancock 			length = cur_p->app4 & 0x0000FFFF;
93823e6b2dcSRobert Hancock 
9397a7d340bSRobert Hancock 			phys = desc_get_phys_addr(lp, cur_p);
94017882fd4SRobert Hancock 			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
9417a7d340bSRobert Hancock 					 DMA_FROM_DEVICE);
9427a7d340bSRobert Hancock 
9438a3b7a25Sdanborkmann@iogearbox.net 			skb_put(skb, length);
944cc37610cSRobert Hancock 			skb->protocol = eth_type_trans(skb, lp->ndev);
9458a3b7a25Sdanborkmann@iogearbox.net 			/*skb_checksum_none_assert(skb);*/
9468a3b7a25Sdanborkmann@iogearbox.net 			skb->ip_summed = CHECKSUM_NONE;
9478a3b7a25Sdanborkmann@iogearbox.net 
9488a3b7a25Sdanborkmann@iogearbox.net 			/* if we're doing Rx csum offload, set it up */
9498a3b7a25Sdanborkmann@iogearbox.net 			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
9508a3b7a25Sdanborkmann@iogearbox.net 				csumstatus = (cur_p->app2 &
9518a3b7a25Sdanborkmann@iogearbox.net 					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
9527a7d340bSRobert Hancock 				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
9537a7d340bSRobert Hancock 				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
9548a3b7a25Sdanborkmann@iogearbox.net 					skb->ip_summed = CHECKSUM_UNNECESSARY;
9558a3b7a25Sdanborkmann@iogearbox.net 				}
9568a3b7a25Sdanborkmann@iogearbox.net 			} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
957ceffc4acSJoe Perches 				   skb->protocol == htons(ETH_P_IP) &&
9588a3b7a25Sdanborkmann@iogearbox.net 				   skb->len > 64) {
9598a3b7a25Sdanborkmann@iogearbox.net 				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
9608a3b7a25Sdanborkmann@iogearbox.net 				skb->ip_summed = CHECKSUM_COMPLETE;
9618a3b7a25Sdanborkmann@iogearbox.net 			}
9628a3b7a25Sdanborkmann@iogearbox.net 
963cc37610cSRobert Hancock 			napi_gro_receive(napi, skb);
9648a3b7a25Sdanborkmann@iogearbox.net 
9658a3b7a25Sdanborkmann@iogearbox.net 			size += length;
9668a3b7a25Sdanborkmann@iogearbox.net 			packets++;
9677a7d340bSRobert Hancock 		}
9688a3b7a25Sdanborkmann@iogearbox.net 
9696c7e7da2SRobert Hancock 		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
970720a43efSJoe Perches 		if (!new_skb)
9717a7d340bSRobert Hancock 			break;
972720a43efSJoe Perches 
97317882fd4SRobert Hancock 		phys = dma_map_single(lp->dev, new_skb->data,
9748a3b7a25Sdanborkmann@iogearbox.net 				      lp->max_frm_size,
9758a3b7a25Sdanborkmann@iogearbox.net 				      DMA_FROM_DEVICE);
97617882fd4SRobert Hancock 		if (unlikely(dma_mapping_error(lp->dev, phys))) {
97771791dc8SAndre Przywara 			if (net_ratelimit())
978cc37610cSRobert Hancock 				netdev_err(lp->ndev, "RX DMA mapping error\n");
97971791dc8SAndre Przywara 			dev_kfree_skb(new_skb);
9807a7d340bSRobert Hancock 			break;
98171791dc8SAndre Przywara 		}
9824e958f33SAndre Przywara 		desc_set_phys_addr(lp, phys, cur_p);
98371791dc8SAndre Przywara 
9848a3b7a25Sdanborkmann@iogearbox.net 		cur_p->cntrl = lp->max_frm_size;
9858a3b7a25Sdanborkmann@iogearbox.net 		cur_p->status = 0;
98623e6b2dcSRobert Hancock 		cur_p->skb = new_skb;
9878a3b7a25Sdanborkmann@iogearbox.net 
9887a7d340bSRobert Hancock 		/* Only update tail_p to mark this slot as usable after it has
9897a7d340bSRobert Hancock 		 * been successfully refilled.
9907a7d340bSRobert Hancock 		 */
9917a7d340bSRobert Hancock 		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
9927a7d340bSRobert Hancock 
9938b09ca82SRobert Hancock 		if (++lp->rx_bd_ci >= lp->rx_bd_num)
9948b09ca82SRobert Hancock 			lp->rx_bd_ci = 0;
9958a3b7a25Sdanborkmann@iogearbox.net 		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
9968a3b7a25Sdanborkmann@iogearbox.net 	}
9978a3b7a25Sdanborkmann@iogearbox.net 
998cb45a8bfSRobert Hancock 	u64_stats_update_begin(&lp->rx_stat_sync);
999cb45a8bfSRobert Hancock 	u64_stats_add(&lp->rx_packets, packets);
1000cb45a8bfSRobert Hancock 	u64_stats_add(&lp->rx_bytes, size);
1001cb45a8bfSRobert Hancock 	u64_stats_update_end(&lp->rx_stat_sync);
10028a3b7a25Sdanborkmann@iogearbox.net 
100338e96b35SPeter Crosthwaite 	if (tail_p)
10046a00d0ddSAndre Przywara 		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1005cc37610cSRobert Hancock 
1006cc37610cSRobert Hancock 	if (packets < budget && napi_complete_done(napi, packets)) {
1007cc37610cSRobert Hancock 		/* Re-enable RX completion interrupts. This should
1008cc37610cSRobert Hancock 		 * cause an immediate interrupt if any RX packets are
1009cc37610cSRobert Hancock 		 * already pending.
1010cc37610cSRobert Hancock 		 */
1011cc37610cSRobert Hancock 		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1012cc37610cSRobert Hancock 	}
1013cc37610cSRobert Hancock 	return packets;
10148a3b7a25Sdanborkmann@iogearbox.net }
10158a3b7a25Sdanborkmann@iogearbox.net 
10168a3b7a25Sdanborkmann@iogearbox.net /**
10178a3b7a25Sdanborkmann@iogearbox.net  * axienet_tx_irq - Tx Done Isr.
10188a3b7a25Sdanborkmann@iogearbox.net  * @irq:	irq number
10198a3b7a25Sdanborkmann@iogearbox.net  * @_ndev:	net_device pointer
10208a3b7a25Sdanborkmann@iogearbox.net  *
10219cbc1b68SRobert Hancock  * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
10228a3b7a25Sdanborkmann@iogearbox.net  *
10239e2bc267SRobert Hancock  * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
10249e2bc267SRobert Hancock  * TX BD processing.
10258a3b7a25Sdanborkmann@iogearbox.net  */
axienet_tx_irq(int irq,void * _ndev)10268a3b7a25Sdanborkmann@iogearbox.net static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
10278a3b7a25Sdanborkmann@iogearbox.net {
10288a3b7a25Sdanborkmann@iogearbox.net 	unsigned int status;
10298a3b7a25Sdanborkmann@iogearbox.net 	struct net_device *ndev = _ndev;
10308a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
10318a3b7a25Sdanborkmann@iogearbox.net 
10328a3b7a25Sdanborkmann@iogearbox.net 	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
103384b9ccc0SRobert Hancock 
10348a3b7a25Sdanborkmann@iogearbox.net 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
10359cbc1b68SRobert Hancock 		return IRQ_NONE;
103684b9ccc0SRobert Hancock 
103784b9ccc0SRobert Hancock 	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
103884b9ccc0SRobert Hancock 
103984b9ccc0SRobert Hancock 	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
104084b9ccc0SRobert Hancock 		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
104184b9ccc0SRobert Hancock 		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
10424e958f33SAndre Przywara 			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
10438a3b7a25Sdanborkmann@iogearbox.net 			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
104424201a64SAndre Przywara 		schedule_work(&lp->dma_err_task);
104584b9ccc0SRobert Hancock 	} else {
10469e2bc267SRobert Hancock 		/* Disable further TX completion interrupts and schedule
10479e2bc267SRobert Hancock 		 * NAPI to handle the completions.
10489e2bc267SRobert Hancock 		 */
10499e2bc267SRobert Hancock 		u32 cr = lp->tx_dma_cr;
10509e2bc267SRobert Hancock 
10519e2bc267SRobert Hancock 		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1052bcce1393SSean Anderson 		if (napi_schedule_prep(&lp->napi_tx)) {
10539e2bc267SRobert Hancock 			axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1054bcce1393SSean Anderson 			__napi_schedule(&lp->napi_tx);
1055bcce1393SSean Anderson 		}
10568a3b7a25Sdanborkmann@iogearbox.net 	}
105784b9ccc0SRobert Hancock 
10588a3b7a25Sdanborkmann@iogearbox.net 	return IRQ_HANDLED;
10598a3b7a25Sdanborkmann@iogearbox.net }
10608a3b7a25Sdanborkmann@iogearbox.net 
10618a3b7a25Sdanborkmann@iogearbox.net /**
10628a3b7a25Sdanborkmann@iogearbox.net  * axienet_rx_irq - Rx Isr.
10638a3b7a25Sdanborkmann@iogearbox.net  * @irq:	irq number
10648a3b7a25Sdanborkmann@iogearbox.net  * @_ndev:	net_device pointer
10658a3b7a25Sdanborkmann@iogearbox.net  *
10669cbc1b68SRobert Hancock  * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
10678a3b7a25Sdanborkmann@iogearbox.net  *
1068cc37610cSRobert Hancock  * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
10698a3b7a25Sdanborkmann@iogearbox.net  * processing.
10708a3b7a25Sdanborkmann@iogearbox.net  */
axienet_rx_irq(int irq,void * _ndev)10718a3b7a25Sdanborkmann@iogearbox.net static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
10728a3b7a25Sdanborkmann@iogearbox.net {
10738a3b7a25Sdanborkmann@iogearbox.net 	unsigned int status;
10748a3b7a25Sdanborkmann@iogearbox.net 	struct net_device *ndev = _ndev;
10758a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
10768a3b7a25Sdanborkmann@iogearbox.net 
10778a3b7a25Sdanborkmann@iogearbox.net 	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
107884b9ccc0SRobert Hancock 
10798a3b7a25Sdanborkmann@iogearbox.net 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
10809cbc1b68SRobert Hancock 		return IRQ_NONE;
108184b9ccc0SRobert Hancock 
108284b9ccc0SRobert Hancock 	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
108384b9ccc0SRobert Hancock 
108484b9ccc0SRobert Hancock 	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
108584b9ccc0SRobert Hancock 		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
108684b9ccc0SRobert Hancock 		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
10874e958f33SAndre Przywara 			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
10888a3b7a25Sdanborkmann@iogearbox.net 			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
108924201a64SAndre Przywara 		schedule_work(&lp->dma_err_task);
109084b9ccc0SRobert Hancock 	} else {
1091cc37610cSRobert Hancock 		/* Disable further RX completion interrupts and schedule
1092cc37610cSRobert Hancock 		 * NAPI receive.
1093cc37610cSRobert Hancock 		 */
1094cc37610cSRobert Hancock 		u32 cr = lp->rx_dma_cr;
1095cc37610cSRobert Hancock 
1096cc37610cSRobert Hancock 		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1097bcce1393SSean Anderson 		if (napi_schedule_prep(&lp->napi_rx)) {
1098cc37610cSRobert Hancock 			axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1099bcce1393SSean Anderson 			__napi_schedule(&lp->napi_rx);
1100bcce1393SSean Anderson 		}
11018a3b7a25Sdanborkmann@iogearbox.net 	}
110284b9ccc0SRobert Hancock 
11038a3b7a25Sdanborkmann@iogearbox.net 	return IRQ_HANDLED;
11048a3b7a25Sdanborkmann@iogearbox.net }
11058a3b7a25Sdanborkmann@iogearbox.net 
1106522856ceSRobert Hancock /**
1107522856ceSRobert Hancock  * axienet_eth_irq - Ethernet core Isr.
1108522856ceSRobert Hancock  * @irq:	irq number
1109522856ceSRobert Hancock  * @_ndev:	net_device pointer
1110522856ceSRobert Hancock  *
1111522856ceSRobert Hancock  * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1112522856ceSRobert Hancock  *
1113522856ceSRobert Hancock  * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1114522856ceSRobert Hancock  */
axienet_eth_irq(int irq,void * _ndev)1115522856ceSRobert Hancock static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1116522856ceSRobert Hancock {
1117522856ceSRobert Hancock 	struct net_device *ndev = _ndev;
1118522856ceSRobert Hancock 	struct axienet_local *lp = netdev_priv(ndev);
1119522856ceSRobert Hancock 	unsigned int pending;
1120522856ceSRobert Hancock 
1121522856ceSRobert Hancock 	pending = axienet_ior(lp, XAE_IP_OFFSET);
1122522856ceSRobert Hancock 	if (!pending)
1123522856ceSRobert Hancock 		return IRQ_NONE;
1124522856ceSRobert Hancock 
1125522856ceSRobert Hancock 	if (pending & XAE_INT_RXFIFOOVR_MASK)
1126522856ceSRobert Hancock 		ndev->stats.rx_missed_errors++;
1127522856ceSRobert Hancock 
1128522856ceSRobert Hancock 	if (pending & XAE_INT_RXRJECT_MASK)
1129522856ceSRobert Hancock 		ndev->stats.rx_frame_errors++;
1130522856ceSRobert Hancock 
1131522856ceSRobert Hancock 	axienet_iow(lp, XAE_IS_OFFSET, pending);
1132522856ceSRobert Hancock 	return IRQ_HANDLED;
1133522856ceSRobert Hancock }
1134522856ceSRobert Hancock 
113524201a64SAndre Przywara static void axienet_dma_err_handler(struct work_struct *work);
1136aecb55beSJeff Mahoney 
11378a3b7a25Sdanborkmann@iogearbox.net /**
11388a3b7a25Sdanborkmann@iogearbox.net  * axienet_open - Driver open routine.
11398a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to net_device structure
11408a3b7a25Sdanborkmann@iogearbox.net  *
1141b0d081c5SMichal Simek  * Return: 0, on success.
11428a3b7a25Sdanborkmann@iogearbox.net  *	    non-zero error value on failure
11438a3b7a25Sdanborkmann@iogearbox.net  *
1144f5203a3dSRobert Hancock  * This is the driver open routine. It calls phylink_start to start the
1145f5203a3dSRobert Hancock  * PHY device.
11468a3b7a25Sdanborkmann@iogearbox.net  * It also allocates interrupt service routines, enables the interrupt lines
11478a3b7a25Sdanborkmann@iogearbox.net  * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
11488a3b7a25Sdanborkmann@iogearbox.net  * descriptors are initialized.
11498a3b7a25Sdanborkmann@iogearbox.net  */
axienet_open(struct net_device * ndev)11508a3b7a25Sdanborkmann@iogearbox.net static int axienet_open(struct net_device *ndev)
11518a3b7a25Sdanborkmann@iogearbox.net {
11527789e9edSRobert Hancock 	int ret;
11538a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
11548a3b7a25Sdanborkmann@iogearbox.net 
11558a3b7a25Sdanborkmann@iogearbox.net 	dev_dbg(&ndev->dev, "axienet_open()\n");
11568a3b7a25Sdanborkmann@iogearbox.net 
1157253761a0SClayton Rayment 	/* When we do an Axi Ethernet reset, it resets the complete core
1158253761a0SClayton Rayment 	 * including the MDIO. MDIO must be disabled before resetting.
11597789e9edSRobert Hancock 	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1160850a7503SMichal Simek 	 */
1161de9c7854SDaniel Mack 	axienet_lock_mii(lp);
1162ee44d0b7SAndre Przywara 	ret = axienet_device_reset(ndev);
1163de9c7854SDaniel Mack 	axienet_unlock_mii(lp);
11648a3b7a25Sdanborkmann@iogearbox.net 
1165f5203a3dSRobert Hancock 	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1166f5203a3dSRobert Hancock 	if (ret) {
1167f5203a3dSRobert Hancock 		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1168f5203a3dSRobert Hancock 		return ret;
11698a3b7a25Sdanborkmann@iogearbox.net 	}
11708a3b7a25Sdanborkmann@iogearbox.net 
1171f5203a3dSRobert Hancock 	phylink_start(lp->phylink);
1172f5203a3dSRobert Hancock 
117324201a64SAndre Przywara 	/* Enable worker thread for Axi DMA error handling */
1174b1e1daf0SSean Anderson 	lp->stopping = false;
117524201a64SAndre Przywara 	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
117671c6c837SXiaotian Feng 
11779e2bc267SRobert Hancock 	napi_enable(&lp->napi_rx);
11789e2bc267SRobert Hancock 	napi_enable(&lp->napi_tx);
1179cc37610cSRobert Hancock 
11808a3b7a25Sdanborkmann@iogearbox.net 	/* Enable interrupts for Axi DMA Tx */
11819cbc1b68SRobert Hancock 	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
11829cbc1b68SRobert Hancock 			  ndev->name, ndev);
11838a3b7a25Sdanborkmann@iogearbox.net 	if (ret)
11848a3b7a25Sdanborkmann@iogearbox.net 		goto err_tx_irq;
11858a3b7a25Sdanborkmann@iogearbox.net 	/* Enable interrupts for Axi DMA Rx */
11869cbc1b68SRobert Hancock 	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
11879cbc1b68SRobert Hancock 			  ndev->name, ndev);
11888a3b7a25Sdanborkmann@iogearbox.net 	if (ret)
11898a3b7a25Sdanborkmann@iogearbox.net 		goto err_rx_irq;
1190522856ceSRobert Hancock 	/* Enable interrupts for Axi Ethernet core (if defined) */
1191522856ceSRobert Hancock 	if (lp->eth_irq > 0) {
1192522856ceSRobert Hancock 		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1193522856ceSRobert Hancock 				  ndev->name, ndev);
1194522856ceSRobert Hancock 		if (ret)
1195522856ceSRobert Hancock 			goto err_eth_irq;
1196522856ceSRobert Hancock 	}
119771c6c837SXiaotian Feng 
11988a3b7a25Sdanborkmann@iogearbox.net 	return 0;
11998a3b7a25Sdanborkmann@iogearbox.net 
1200522856ceSRobert Hancock err_eth_irq:
1201522856ceSRobert Hancock 	free_irq(lp->rx_irq, ndev);
12028a3b7a25Sdanborkmann@iogearbox.net err_rx_irq:
12038a3b7a25Sdanborkmann@iogearbox.net 	free_irq(lp->tx_irq, ndev);
12048a3b7a25Sdanborkmann@iogearbox.net err_tx_irq:
12059e2bc267SRobert Hancock 	napi_disable(&lp->napi_tx);
12069e2bc267SRobert Hancock 	napi_disable(&lp->napi_rx);
1207f5203a3dSRobert Hancock 	phylink_stop(lp->phylink);
1208f5203a3dSRobert Hancock 	phylink_disconnect_phy(lp->phylink);
120924201a64SAndre Przywara 	cancel_work_sync(&lp->dma_err_task);
12108a3b7a25Sdanborkmann@iogearbox.net 	dev_err(lp->dev, "request_irq() failed\n");
12118a3b7a25Sdanborkmann@iogearbox.net 	return ret;
12128a3b7a25Sdanborkmann@iogearbox.net }
12138a3b7a25Sdanborkmann@iogearbox.net 
12148a3b7a25Sdanborkmann@iogearbox.net /**
12158a3b7a25Sdanborkmann@iogearbox.net  * axienet_stop - Driver stop routine.
12168a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to net_device structure
12178a3b7a25Sdanborkmann@iogearbox.net  *
1218b0d081c5SMichal Simek  * Return: 0, on success.
12198a3b7a25Sdanborkmann@iogearbox.net  *
1220f5203a3dSRobert Hancock  * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
12218a3b7a25Sdanborkmann@iogearbox.net  * device. It also removes the interrupt handlers and disables the interrupts.
12228a3b7a25Sdanborkmann@iogearbox.net  * The Axi DMA Tx/Rx BDs are released.
12238a3b7a25Sdanborkmann@iogearbox.net  */
axienet_stop(struct net_device * ndev)12248a3b7a25Sdanborkmann@iogearbox.net static int axienet_stop(struct net_device *ndev)
12258a3b7a25Sdanborkmann@iogearbox.net {
12268a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
12278a3b7a25Sdanborkmann@iogearbox.net 
12288a3b7a25Sdanborkmann@iogearbox.net 	dev_dbg(&ndev->dev, "axienet_close()\n");
12298a3b7a25Sdanborkmann@iogearbox.net 
1230b1e1daf0SSean Anderson 	WRITE_ONCE(lp->stopping, true);
1231b1e1daf0SSean Anderson 	flush_work(&lp->dma_err_task);
1232b1e1daf0SSean Anderson 
12339e2bc267SRobert Hancock 	napi_disable(&lp->napi_tx);
12349e2bc267SRobert Hancock 	napi_disable(&lp->napi_rx);
1235cc37610cSRobert Hancock 
1236f5203a3dSRobert Hancock 	phylink_stop(lp->phylink);
1237f5203a3dSRobert Hancock 	phylink_disconnect_phy(lp->phylink);
1238f5203a3dSRobert Hancock 
12398a3b7a25Sdanborkmann@iogearbox.net 	axienet_setoptions(ndev, lp->options &
12408a3b7a25Sdanborkmann@iogearbox.net 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
12418a3b7a25Sdanborkmann@iogearbox.net 
124284b9ccc0SRobert Hancock 	axienet_dma_stop(lp);
1243489d4d77SRobert Hancock 
1244489d4d77SRobert Hancock 	axienet_iow(lp, XAE_IE_OFFSET, 0);
1245489d4d77SRobert Hancock 
124624201a64SAndre Przywara 	cancel_work_sync(&lp->dma_err_task);
12478a3b7a25Sdanborkmann@iogearbox.net 
1248522856ceSRobert Hancock 	if (lp->eth_irq > 0)
1249522856ceSRobert Hancock 		free_irq(lp->eth_irq, ndev);
12508a3b7a25Sdanborkmann@iogearbox.net 	free_irq(lp->tx_irq, ndev);
12518a3b7a25Sdanborkmann@iogearbox.net 	free_irq(lp->rx_irq, ndev);
12528a3b7a25Sdanborkmann@iogearbox.net 
12538a3b7a25Sdanborkmann@iogearbox.net 	axienet_dma_bd_release(ndev);
12548a3b7a25Sdanborkmann@iogearbox.net 	return 0;
12558a3b7a25Sdanborkmann@iogearbox.net }
12568a3b7a25Sdanborkmann@iogearbox.net 
12578a3b7a25Sdanborkmann@iogearbox.net /**
12588a3b7a25Sdanborkmann@iogearbox.net  * axienet_change_mtu - Driver change mtu routine.
12598a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to net_device structure
12608a3b7a25Sdanborkmann@iogearbox.net  * @new_mtu:	New mtu value to be applied
12618a3b7a25Sdanborkmann@iogearbox.net  *
1262b0d081c5SMichal Simek  * Return: Always returns 0 (success).
12638a3b7a25Sdanborkmann@iogearbox.net  *
12648a3b7a25Sdanborkmann@iogearbox.net  * This is the change mtu driver routine. It checks if the Axi Ethernet
12658a3b7a25Sdanborkmann@iogearbox.net  * hardware supports jumbo frames before changing the mtu. This can be
12668a3b7a25Sdanborkmann@iogearbox.net  * called only when the device is not up.
12678a3b7a25Sdanborkmann@iogearbox.net  */
axienet_change_mtu(struct net_device * ndev,int new_mtu)12688a3b7a25Sdanborkmann@iogearbox.net static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
12698a3b7a25Sdanborkmann@iogearbox.net {
12708a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
12718a3b7a25Sdanborkmann@iogearbox.net 
12728a3b7a25Sdanborkmann@iogearbox.net 	if (netif_running(ndev))
12738a3b7a25Sdanborkmann@iogearbox.net 		return -EBUSY;
1274f080a8c3SSrikanth Thokala 
1275f080a8c3SSrikanth Thokala 	if ((new_mtu + VLAN_ETH_HLEN +
1276f080a8c3SSrikanth Thokala 		XAE_TRL_SIZE) > lp->rxmem)
1277f080a8c3SSrikanth Thokala 		return -EINVAL;
1278f080a8c3SSrikanth Thokala 
12798a3b7a25Sdanborkmann@iogearbox.net 	ndev->mtu = new_mtu;
12808a3b7a25Sdanborkmann@iogearbox.net 
12818a3b7a25Sdanborkmann@iogearbox.net 	return 0;
12828a3b7a25Sdanborkmann@iogearbox.net }
12838a3b7a25Sdanborkmann@iogearbox.net 
12848a3b7a25Sdanborkmann@iogearbox.net #ifdef CONFIG_NET_POLL_CONTROLLER
12858a3b7a25Sdanborkmann@iogearbox.net /**
12868a3b7a25Sdanborkmann@iogearbox.net  * axienet_poll_controller - Axi Ethernet poll mechanism.
12878a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to net_device structure
12888a3b7a25Sdanborkmann@iogearbox.net  *
12898a3b7a25Sdanborkmann@iogearbox.net  * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
12908a3b7a25Sdanborkmann@iogearbox.net  * to polling the ISRs and are enabled back after the polling is done.
12918a3b7a25Sdanborkmann@iogearbox.net  */
axienet_poll_controller(struct net_device * ndev)12928a3b7a25Sdanborkmann@iogearbox.net static void axienet_poll_controller(struct net_device *ndev)
12938a3b7a25Sdanborkmann@iogearbox.net {
12948a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
12958a3b7a25Sdanborkmann@iogearbox.net 	disable_irq(lp->tx_irq);
12968a3b7a25Sdanborkmann@iogearbox.net 	disable_irq(lp->rx_irq);
12978a3b7a25Sdanborkmann@iogearbox.net 	axienet_rx_irq(lp->tx_irq, ndev);
12988a3b7a25Sdanborkmann@iogearbox.net 	axienet_tx_irq(lp->rx_irq, ndev);
12998a3b7a25Sdanborkmann@iogearbox.net 	enable_irq(lp->tx_irq);
13008a3b7a25Sdanborkmann@iogearbox.net 	enable_irq(lp->rx_irq);
13018a3b7a25Sdanborkmann@iogearbox.net }
13028a3b7a25Sdanborkmann@iogearbox.net #endif
13038a3b7a25Sdanborkmann@iogearbox.net 
axienet_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)13042a9b65eaSAndre Przywara static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
13052a9b65eaSAndre Przywara {
13062a9b65eaSAndre Przywara 	struct axienet_local *lp = netdev_priv(dev);
13072a9b65eaSAndre Przywara 
13082a9b65eaSAndre Przywara 	if (!netif_running(dev))
13092a9b65eaSAndre Przywara 		return -EINVAL;
13102a9b65eaSAndre Przywara 
13112a9b65eaSAndre Przywara 	return phylink_mii_ioctl(lp->phylink, rq, cmd);
13122a9b65eaSAndre Przywara }
13132a9b65eaSAndre Przywara 
1314cb45a8bfSRobert Hancock static void
axienet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1315cb45a8bfSRobert Hancock axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1316cb45a8bfSRobert Hancock {
1317cb45a8bfSRobert Hancock 	struct axienet_local *lp = netdev_priv(dev);
1318cb45a8bfSRobert Hancock 	unsigned int start;
1319cb45a8bfSRobert Hancock 
1320cb45a8bfSRobert Hancock 	netdev_stats_to_stats64(stats, &dev->stats);
1321cb45a8bfSRobert Hancock 
1322cb45a8bfSRobert Hancock 	do {
1323068c38adSThomas Gleixner 		start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1324cb45a8bfSRobert Hancock 		stats->rx_packets = u64_stats_read(&lp->rx_packets);
1325cb45a8bfSRobert Hancock 		stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1326068c38adSThomas Gleixner 	} while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1327cb45a8bfSRobert Hancock 
1328cb45a8bfSRobert Hancock 	do {
1329068c38adSThomas Gleixner 		start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1330cb45a8bfSRobert Hancock 		stats->tx_packets = u64_stats_read(&lp->tx_packets);
1331cb45a8bfSRobert Hancock 		stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1332068c38adSThomas Gleixner 	} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1333cb45a8bfSRobert Hancock }
1334cb45a8bfSRobert Hancock 
13358a3b7a25Sdanborkmann@iogearbox.net static const struct net_device_ops axienet_netdev_ops = {
13368a3b7a25Sdanborkmann@iogearbox.net 	.ndo_open = axienet_open,
13378a3b7a25Sdanborkmann@iogearbox.net 	.ndo_stop = axienet_stop,
13388a3b7a25Sdanborkmann@iogearbox.net 	.ndo_start_xmit = axienet_start_xmit,
1339cb45a8bfSRobert Hancock 	.ndo_get_stats64 = axienet_get_stats64,
13408a3b7a25Sdanborkmann@iogearbox.net 	.ndo_change_mtu	= axienet_change_mtu,
13418a3b7a25Sdanborkmann@iogearbox.net 	.ndo_set_mac_address = netdev_set_mac_address,
13428a3b7a25Sdanborkmann@iogearbox.net 	.ndo_validate_addr = eth_validate_addr,
1343a7605370SArnd Bergmann 	.ndo_eth_ioctl = axienet_ioctl,
13448a3b7a25Sdanborkmann@iogearbox.net 	.ndo_set_rx_mode = axienet_set_multicast_list,
13458a3b7a25Sdanborkmann@iogearbox.net #ifdef CONFIG_NET_POLL_CONTROLLER
13468a3b7a25Sdanborkmann@iogearbox.net 	.ndo_poll_controller = axienet_poll_controller,
13478a3b7a25Sdanborkmann@iogearbox.net #endif
13488a3b7a25Sdanborkmann@iogearbox.net };
13498a3b7a25Sdanborkmann@iogearbox.net 
13508a3b7a25Sdanborkmann@iogearbox.net /**
13518a3b7a25Sdanborkmann@iogearbox.net  * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
13528a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to net_device structure
13538a3b7a25Sdanborkmann@iogearbox.net  * @ed:		Pointer to ethtool_drvinfo structure
13548a3b7a25Sdanborkmann@iogearbox.net  *
13558a3b7a25Sdanborkmann@iogearbox.net  * This implements ethtool command for getting the driver information.
13568a3b7a25Sdanborkmann@iogearbox.net  * Issue "ethtool -i ethX" under linux prompt to execute this function.
13578a3b7a25Sdanborkmann@iogearbox.net  */
axienet_ethtools_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * ed)13588a3b7a25Sdanborkmann@iogearbox.net static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
13598a3b7a25Sdanborkmann@iogearbox.net 					 struct ethtool_drvinfo *ed)
13608a3b7a25Sdanborkmann@iogearbox.net {
1361f029c781SWolfram Sang 	strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1362f029c781SWolfram Sang 	strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
13638a3b7a25Sdanborkmann@iogearbox.net }
13648a3b7a25Sdanborkmann@iogearbox.net 
13658a3b7a25Sdanborkmann@iogearbox.net /**
13668a3b7a25Sdanborkmann@iogearbox.net  * axienet_ethtools_get_regs_len - Get the total regs length present in the
13678a3b7a25Sdanborkmann@iogearbox.net  *				   AxiEthernet core.
13688a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to net_device structure
13698a3b7a25Sdanborkmann@iogearbox.net  *
13708a3b7a25Sdanborkmann@iogearbox.net  * This implements ethtool command for getting the total register length
13718a3b7a25Sdanborkmann@iogearbox.net  * information.
1372b0d081c5SMichal Simek  *
1373b0d081c5SMichal Simek  * Return: the total regs length
13748a3b7a25Sdanborkmann@iogearbox.net  */
axienet_ethtools_get_regs_len(struct net_device * ndev)13758a3b7a25Sdanborkmann@iogearbox.net static int axienet_ethtools_get_regs_len(struct net_device *ndev)
13768a3b7a25Sdanborkmann@iogearbox.net {
13778a3b7a25Sdanborkmann@iogearbox.net 	return sizeof(u32) * AXIENET_REGS_N;
13788a3b7a25Sdanborkmann@iogearbox.net }
13798a3b7a25Sdanborkmann@iogearbox.net 
13808a3b7a25Sdanborkmann@iogearbox.net /**
13818a3b7a25Sdanborkmann@iogearbox.net  * axienet_ethtools_get_regs - Dump the contents of all registers present
13828a3b7a25Sdanborkmann@iogearbox.net  *			       in AxiEthernet core.
13838a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to net_device structure
13848a3b7a25Sdanborkmann@iogearbox.net  * @regs:	Pointer to ethtool_regs structure
13858a3b7a25Sdanborkmann@iogearbox.net  * @ret:	Void pointer used to return the contents of the registers.
13868a3b7a25Sdanborkmann@iogearbox.net  *
13878a3b7a25Sdanborkmann@iogearbox.net  * This implements ethtool command for getting the Axi Ethernet register dump.
13888a3b7a25Sdanborkmann@iogearbox.net  * Issue "ethtool -d ethX" to execute this function.
13898a3b7a25Sdanborkmann@iogearbox.net  */
axienet_ethtools_get_regs(struct net_device * ndev,struct ethtool_regs * regs,void * ret)13908a3b7a25Sdanborkmann@iogearbox.net static void axienet_ethtools_get_regs(struct net_device *ndev,
13918a3b7a25Sdanborkmann@iogearbox.net 				      struct ethtool_regs *regs, void *ret)
13928a3b7a25Sdanborkmann@iogearbox.net {
13938a3b7a25Sdanborkmann@iogearbox.net 	u32 *data = (u32 *)ret;
13948a3b7a25Sdanborkmann@iogearbox.net 	size_t len = sizeof(u32) * AXIENET_REGS_N;
13958a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
13968a3b7a25Sdanborkmann@iogearbox.net 
13978a3b7a25Sdanborkmann@iogearbox.net 	regs->version = 0;
13988a3b7a25Sdanborkmann@iogearbox.net 	regs->len = len;
13998a3b7a25Sdanborkmann@iogearbox.net 
14008a3b7a25Sdanborkmann@iogearbox.net 	memset(data, 0, len);
14018a3b7a25Sdanborkmann@iogearbox.net 	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
14028a3b7a25Sdanborkmann@iogearbox.net 	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
14038a3b7a25Sdanborkmann@iogearbox.net 	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
14048a3b7a25Sdanborkmann@iogearbox.net 	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
14058a3b7a25Sdanborkmann@iogearbox.net 	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
14068a3b7a25Sdanborkmann@iogearbox.net 	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
14078a3b7a25Sdanborkmann@iogearbox.net 	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
14088a3b7a25Sdanborkmann@iogearbox.net 	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
14098a3b7a25Sdanborkmann@iogearbox.net 	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
14108a3b7a25Sdanborkmann@iogearbox.net 	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
14118a3b7a25Sdanborkmann@iogearbox.net 	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
14128a3b7a25Sdanborkmann@iogearbox.net 	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
14138a3b7a25Sdanborkmann@iogearbox.net 	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
14148a3b7a25Sdanborkmann@iogearbox.net 	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
14158a3b7a25Sdanborkmann@iogearbox.net 	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
14168a3b7a25Sdanborkmann@iogearbox.net 	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
14178a3b7a25Sdanborkmann@iogearbox.net 	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
14188a3b7a25Sdanborkmann@iogearbox.net 	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
14198a3b7a25Sdanborkmann@iogearbox.net 	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
14208a3b7a25Sdanborkmann@iogearbox.net 	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
14218a3b7a25Sdanborkmann@iogearbox.net 	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
14228a3b7a25Sdanborkmann@iogearbox.net 	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
14238a3b7a25Sdanborkmann@iogearbox.net 	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
14248a3b7a25Sdanborkmann@iogearbox.net 	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
14258a3b7a25Sdanborkmann@iogearbox.net 	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
14268a3b7a25Sdanborkmann@iogearbox.net 	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
14278a3b7a25Sdanborkmann@iogearbox.net 	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
14288a3b7a25Sdanborkmann@iogearbox.net 	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1429867d03bcSRobert Hancock 	data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1430867d03bcSRobert Hancock 	data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1431867d03bcSRobert Hancock 	data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1432867d03bcSRobert Hancock 	data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1433867d03bcSRobert Hancock 	data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1434867d03bcSRobert Hancock 	data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1435867d03bcSRobert Hancock 	data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1436867d03bcSRobert Hancock 	data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
14378a3b7a25Sdanborkmann@iogearbox.net }
14388a3b7a25Sdanborkmann@iogearbox.net 
143974624944SHao Chen static void
axienet_ethtools_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)144074624944SHao Chen axienet_ethtools_get_ringparam(struct net_device *ndev,
144174624944SHao Chen 			       struct ethtool_ringparam *ering,
144274624944SHao Chen 			       struct kernel_ethtool_ringparam *kernel_ering,
144374624944SHao Chen 			       struct netlink_ext_ack *extack)
14448b09ca82SRobert Hancock {
14458b09ca82SRobert Hancock 	struct axienet_local *lp = netdev_priv(ndev);
14468b09ca82SRobert Hancock 
14478b09ca82SRobert Hancock 	ering->rx_max_pending = RX_BD_NUM_MAX;
14488b09ca82SRobert Hancock 	ering->rx_mini_max_pending = 0;
14498b09ca82SRobert Hancock 	ering->rx_jumbo_max_pending = 0;
14508b09ca82SRobert Hancock 	ering->tx_max_pending = TX_BD_NUM_MAX;
14518b09ca82SRobert Hancock 	ering->rx_pending = lp->rx_bd_num;
14528b09ca82SRobert Hancock 	ering->rx_mini_pending = 0;
14538b09ca82SRobert Hancock 	ering->rx_jumbo_pending = 0;
14548b09ca82SRobert Hancock 	ering->tx_pending = lp->tx_bd_num;
14558b09ca82SRobert Hancock }
14568b09ca82SRobert Hancock 
145774624944SHao Chen static int
axienet_ethtools_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)145874624944SHao Chen axienet_ethtools_set_ringparam(struct net_device *ndev,
145974624944SHao Chen 			       struct ethtool_ringparam *ering,
146074624944SHao Chen 			       struct kernel_ethtool_ringparam *kernel_ering,
146174624944SHao Chen 			       struct netlink_ext_ack *extack)
14628b09ca82SRobert Hancock {
14638b09ca82SRobert Hancock 	struct axienet_local *lp = netdev_priv(ndev);
14648b09ca82SRobert Hancock 
14658b09ca82SRobert Hancock 	if (ering->rx_pending > RX_BD_NUM_MAX ||
14668b09ca82SRobert Hancock 	    ering->rx_mini_pending ||
14678b09ca82SRobert Hancock 	    ering->rx_jumbo_pending ||
146870f5817dSRobert Hancock 	    ering->tx_pending < TX_BD_NUM_MIN ||
146970f5817dSRobert Hancock 	    ering->tx_pending > TX_BD_NUM_MAX)
14708b09ca82SRobert Hancock 		return -EINVAL;
14718b09ca82SRobert Hancock 
14728b09ca82SRobert Hancock 	if (netif_running(ndev))
14738b09ca82SRobert Hancock 		return -EBUSY;
14748b09ca82SRobert Hancock 
14758b09ca82SRobert Hancock 	lp->rx_bd_num = ering->rx_pending;
14768b09ca82SRobert Hancock 	lp->tx_bd_num = ering->tx_pending;
14778b09ca82SRobert Hancock 	return 0;
14788b09ca82SRobert Hancock }
14798b09ca82SRobert Hancock 
14808a3b7a25Sdanborkmann@iogearbox.net /**
14818a3b7a25Sdanborkmann@iogearbox.net  * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
14828a3b7a25Sdanborkmann@iogearbox.net  *				     Tx and Rx paths.
14838a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to net_device structure
14848a3b7a25Sdanborkmann@iogearbox.net  * @epauseparm:	Pointer to ethtool_pauseparam structure.
14858a3b7a25Sdanborkmann@iogearbox.net  *
14868a3b7a25Sdanborkmann@iogearbox.net  * This implements ethtool command for getting axi ethernet pause frame
14878a3b7a25Sdanborkmann@iogearbox.net  * setting. Issue "ethtool -a ethX" to execute this function.
14888a3b7a25Sdanborkmann@iogearbox.net  */
14898a3b7a25Sdanborkmann@iogearbox.net static void
axienet_ethtools_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)14908a3b7a25Sdanborkmann@iogearbox.net axienet_ethtools_get_pauseparam(struct net_device *ndev,
14918a3b7a25Sdanborkmann@iogearbox.net 				struct ethtool_pauseparam *epauseparm)
14928a3b7a25Sdanborkmann@iogearbox.net {
14938a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
1494f5203a3dSRobert Hancock 
1495f5203a3dSRobert Hancock 	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
14968a3b7a25Sdanborkmann@iogearbox.net }
14978a3b7a25Sdanborkmann@iogearbox.net 
14988a3b7a25Sdanborkmann@iogearbox.net /**
14998a3b7a25Sdanborkmann@iogearbox.net  * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
15008a3b7a25Sdanborkmann@iogearbox.net  *				     settings.
15018a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to net_device structure
1502b0d081c5SMichal Simek  * @epauseparm:Pointer to ethtool_pauseparam structure
15038a3b7a25Sdanborkmann@iogearbox.net  *
15048a3b7a25Sdanborkmann@iogearbox.net  * This implements ethtool command for enabling flow control on Rx and Tx
15058a3b7a25Sdanborkmann@iogearbox.net  * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
15068a3b7a25Sdanborkmann@iogearbox.net  * function.
1507b0d081c5SMichal Simek  *
1508b0d081c5SMichal Simek  * Return: 0 on success, -EFAULT if device is running
15098a3b7a25Sdanborkmann@iogearbox.net  */
15108a3b7a25Sdanborkmann@iogearbox.net static int
axienet_ethtools_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)15118a3b7a25Sdanborkmann@iogearbox.net axienet_ethtools_set_pauseparam(struct net_device *ndev,
15128a3b7a25Sdanborkmann@iogearbox.net 				struct ethtool_pauseparam *epauseparm)
15138a3b7a25Sdanborkmann@iogearbox.net {
15148a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
15158a3b7a25Sdanborkmann@iogearbox.net 
1516f5203a3dSRobert Hancock 	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
15178a3b7a25Sdanborkmann@iogearbox.net }
15188a3b7a25Sdanborkmann@iogearbox.net 
15198a3b7a25Sdanborkmann@iogearbox.net /**
15208a3b7a25Sdanborkmann@iogearbox.net  * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
15218a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to net_device structure
15228a3b7a25Sdanborkmann@iogearbox.net  * @ecoalesce:	Pointer to ethtool_coalesce structure
1523f3ccfda1SYufeng Mo  * @kernel_coal: ethtool CQE mode setting structure
1524f3ccfda1SYufeng Mo  * @extack:	extack for reporting error messages
15258a3b7a25Sdanborkmann@iogearbox.net  *
15268a3b7a25Sdanborkmann@iogearbox.net  * This implements ethtool command for getting the DMA interrupt coalescing
15278a3b7a25Sdanborkmann@iogearbox.net  * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
15288a3b7a25Sdanborkmann@iogearbox.net  * execute this function.
1529b0d081c5SMichal Simek  *
1530b0d081c5SMichal Simek  * Return: 0 always
15318a3b7a25Sdanborkmann@iogearbox.net  */
1532f3ccfda1SYufeng Mo static int
axienet_ethtools_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1533f3ccfda1SYufeng Mo axienet_ethtools_get_coalesce(struct net_device *ndev,
1534f3ccfda1SYufeng Mo 			      struct ethtool_coalesce *ecoalesce,
1535f3ccfda1SYufeng Mo 			      struct kernel_ethtool_coalesce *kernel_coal,
1536f3ccfda1SYufeng Mo 			      struct netlink_ext_ack *extack)
15378a3b7a25Sdanborkmann@iogearbox.net {
15388a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
15390b79b8dcSRobert Hancock 
15400b79b8dcSRobert Hancock 	ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
15410b79b8dcSRobert Hancock 	ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
15420b79b8dcSRobert Hancock 	ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
15430b79b8dcSRobert Hancock 	ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
15448a3b7a25Sdanborkmann@iogearbox.net 	return 0;
15458a3b7a25Sdanborkmann@iogearbox.net }
15468a3b7a25Sdanborkmann@iogearbox.net 
15478a3b7a25Sdanborkmann@iogearbox.net /**
15488a3b7a25Sdanborkmann@iogearbox.net  * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
15498a3b7a25Sdanborkmann@iogearbox.net  * @ndev:	Pointer to net_device structure
15508a3b7a25Sdanborkmann@iogearbox.net  * @ecoalesce:	Pointer to ethtool_coalesce structure
1551f3ccfda1SYufeng Mo  * @kernel_coal: ethtool CQE mode setting structure
1552f3ccfda1SYufeng Mo  * @extack:	extack for reporting error messages
15538a3b7a25Sdanborkmann@iogearbox.net  *
15548a3b7a25Sdanborkmann@iogearbox.net  * This implements ethtool command for setting the DMA interrupt coalescing
15558a3b7a25Sdanborkmann@iogearbox.net  * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
15568a3b7a25Sdanborkmann@iogearbox.net  * prompt to execute this function.
1557b0d081c5SMichal Simek  *
1558b0d081c5SMichal Simek  * Return: 0, on success, Non-zero error value on failure.
15598a3b7a25Sdanborkmann@iogearbox.net  */
1560f3ccfda1SYufeng Mo static int
axienet_ethtools_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1561f3ccfda1SYufeng Mo axienet_ethtools_set_coalesce(struct net_device *ndev,
1562f3ccfda1SYufeng Mo 			      struct ethtool_coalesce *ecoalesce,
1563f3ccfda1SYufeng Mo 			      struct kernel_ethtool_coalesce *kernel_coal,
1564f3ccfda1SYufeng Mo 			      struct netlink_ext_ack *extack)
15658a3b7a25Sdanborkmann@iogearbox.net {
15668a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
15678a3b7a25Sdanborkmann@iogearbox.net 
15688a3b7a25Sdanborkmann@iogearbox.net 	if (netif_running(ndev)) {
1569c81a97b5SSrikanth Thokala 		netdev_err(ndev,
1570c81a97b5SSrikanth Thokala 			   "Please stop netif before applying configuration\n");
15718a3b7a25Sdanborkmann@iogearbox.net 		return -EFAULT;
15728a3b7a25Sdanborkmann@iogearbox.net 	}
15738a3b7a25Sdanborkmann@iogearbox.net 
15748a3b7a25Sdanborkmann@iogearbox.net 	if (ecoalesce->rx_max_coalesced_frames)
15758a3b7a25Sdanborkmann@iogearbox.net 		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
15760b79b8dcSRobert Hancock 	if (ecoalesce->rx_coalesce_usecs)
15770b79b8dcSRobert Hancock 		lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
15788a3b7a25Sdanborkmann@iogearbox.net 	if (ecoalesce->tx_max_coalesced_frames)
15798a3b7a25Sdanborkmann@iogearbox.net 		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
15800b79b8dcSRobert Hancock 	if (ecoalesce->tx_coalesce_usecs)
15810b79b8dcSRobert Hancock 		lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
15828a3b7a25Sdanborkmann@iogearbox.net 
15838a3b7a25Sdanborkmann@iogearbox.net 	return 0;
15848a3b7a25Sdanborkmann@iogearbox.net }
15858a3b7a25Sdanborkmann@iogearbox.net 
1586f5203a3dSRobert Hancock static int
axienet_ethtools_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)1587f5203a3dSRobert Hancock axienet_ethtools_get_link_ksettings(struct net_device *ndev,
1588f5203a3dSRobert Hancock 				    struct ethtool_link_ksettings *cmd)
1589f5203a3dSRobert Hancock {
1590f5203a3dSRobert Hancock 	struct axienet_local *lp = netdev_priv(ndev);
1591f5203a3dSRobert Hancock 
1592f5203a3dSRobert Hancock 	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
1593f5203a3dSRobert Hancock }
1594f5203a3dSRobert Hancock 
1595f5203a3dSRobert Hancock static int
axienet_ethtools_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)1596f5203a3dSRobert Hancock axienet_ethtools_set_link_ksettings(struct net_device *ndev,
1597f5203a3dSRobert Hancock 				    const struct ethtool_link_ksettings *cmd)
1598f5203a3dSRobert Hancock {
1599f5203a3dSRobert Hancock 	struct axienet_local *lp = netdev_priv(ndev);
1600f5203a3dSRobert Hancock 
1601f5203a3dSRobert Hancock 	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
1602f5203a3dSRobert Hancock }
1603f5203a3dSRobert Hancock 
axienet_ethtools_nway_reset(struct net_device * dev)160466b51663SRobert Hancock static int axienet_ethtools_nway_reset(struct net_device *dev)
160566b51663SRobert Hancock {
160666b51663SRobert Hancock 	struct axienet_local *lp = netdev_priv(dev);
160766b51663SRobert Hancock 
160866b51663SRobert Hancock 	return phylink_ethtool_nway_reset(lp->phylink);
160966b51663SRobert Hancock }
161066b51663SRobert Hancock 
1611c7735f1bSJulia Lawall static const struct ethtool_ops axienet_ethtool_ops = {
16120b79b8dcSRobert Hancock 	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
16130b79b8dcSRobert Hancock 				     ETHTOOL_COALESCE_USECS,
16148a3b7a25Sdanborkmann@iogearbox.net 	.get_drvinfo    = axienet_ethtools_get_drvinfo,
16158a3b7a25Sdanborkmann@iogearbox.net 	.get_regs_len   = axienet_ethtools_get_regs_len,
16168a3b7a25Sdanborkmann@iogearbox.net 	.get_regs       = axienet_ethtools_get_regs,
16178a3b7a25Sdanborkmann@iogearbox.net 	.get_link       = ethtool_op_get_link,
16188b09ca82SRobert Hancock 	.get_ringparam	= axienet_ethtools_get_ringparam,
16198b09ca82SRobert Hancock 	.set_ringparam	= axienet_ethtools_set_ringparam,
16208a3b7a25Sdanborkmann@iogearbox.net 	.get_pauseparam = axienet_ethtools_get_pauseparam,
16218a3b7a25Sdanborkmann@iogearbox.net 	.set_pauseparam = axienet_ethtools_set_pauseparam,
16228a3b7a25Sdanborkmann@iogearbox.net 	.get_coalesce   = axienet_ethtools_get_coalesce,
16238a3b7a25Sdanborkmann@iogearbox.net 	.set_coalesce   = axienet_ethtools_set_coalesce,
1624f5203a3dSRobert Hancock 	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
1625f5203a3dSRobert Hancock 	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
162666b51663SRobert Hancock 	.nway_reset	= axienet_ethtools_nway_reset,
1627f5203a3dSRobert Hancock };
1628f5203a3dSRobert Hancock 
pcs_to_axienet_local(struct phylink_pcs * pcs)16297a86be6aSRussell King (Oracle) static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
16307a86be6aSRussell King (Oracle) {
16317a86be6aSRussell King (Oracle) 	return container_of(pcs, struct axienet_local, pcs);
16327a86be6aSRussell King (Oracle) }
16337a86be6aSRussell King (Oracle) 
axienet_pcs_get_state(struct phylink_pcs * pcs,struct phylink_link_state * state)16347a86be6aSRussell King (Oracle) static void axienet_pcs_get_state(struct phylink_pcs *pcs,
1635f5203a3dSRobert Hancock 				  struct phylink_link_state *state)
1636f5203a3dSRobert Hancock {
16377a86be6aSRussell King (Oracle) 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1638f5203a3dSRobert Hancock 
16397a86be6aSRussell King (Oracle) 	phylink_mii_c22_pcs_get_state(pcs_phy, state);
1640f5203a3dSRobert Hancock }
1641f5203a3dSRobert Hancock 
axienet_pcs_an_restart(struct phylink_pcs * pcs)16427a86be6aSRussell King (Oracle) static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
1643f5203a3dSRobert Hancock {
16447a86be6aSRussell King (Oracle) 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
16451a025560SRobert Hancock 
16467a86be6aSRussell King (Oracle) 	phylink_mii_c22_pcs_an_restart(pcs_phy);
1647f5203a3dSRobert Hancock }
1648f5203a3dSRobert Hancock 
axienet_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)1649febf2aafSRussell King (Oracle) static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
16507a86be6aSRussell King (Oracle) 			      phy_interface_t interface,
16517a86be6aSRussell King (Oracle) 			      const unsigned long *advertising,
16527a86be6aSRussell King (Oracle) 			      bool permit_pause_to_mac)
16536c8f06bbSRobert Hancock {
16547a86be6aSRussell King (Oracle) 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
16557a86be6aSRussell King (Oracle) 	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
16566c8f06bbSRobert Hancock 	struct axienet_local *lp = netdev_priv(ndev);
16576c8f06bbSRobert Hancock 	int ret;
16586c8f06bbSRobert Hancock 
16597a86be6aSRussell King (Oracle) 	if (lp->switch_x_sgmii) {
166003854d8aSRussell King (Oracle) 		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
16617a86be6aSRussell King (Oracle) 				    interface == PHY_INTERFACE_MODE_SGMII ?
16626c8f06bbSRobert Hancock 					XLNX_MII_STD_SELECT_SGMII : 0);
16637a86be6aSRussell King (Oracle) 		if (ret < 0) {
16647a86be6aSRussell King (Oracle) 			netdev_warn(ndev,
16657a86be6aSRussell King (Oracle) 				    "Failed to switch PHY interface: %d\n",
16666c8f06bbSRobert Hancock 				    ret);
16676c8f06bbSRobert Hancock 			return ret;
16686c8f06bbSRobert Hancock 		}
16696c8f06bbSRobert Hancock 	}
16706c8f06bbSRobert Hancock 
1671febf2aafSRussell King (Oracle) 	ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
1672febf2aafSRussell King (Oracle) 					 neg_mode);
16737a86be6aSRussell King (Oracle) 	if (ret < 0)
16747a86be6aSRussell King (Oracle) 		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
16757a86be6aSRussell King (Oracle) 
16767a86be6aSRussell King (Oracle) 	return ret;
16777a86be6aSRussell King (Oracle) }
16787a86be6aSRussell King (Oracle) 
16797a86be6aSRussell King (Oracle) static const struct phylink_pcs_ops axienet_pcs_ops = {
16807a86be6aSRussell King (Oracle) 	.pcs_get_state = axienet_pcs_get_state,
16817a86be6aSRussell King (Oracle) 	.pcs_config = axienet_pcs_config,
16827a86be6aSRussell King (Oracle) 	.pcs_an_restart = axienet_pcs_an_restart,
16837a86be6aSRussell King (Oracle) };
16847a86be6aSRussell King (Oracle) 
axienet_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)16857a86be6aSRussell King (Oracle) static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
16867a86be6aSRussell King (Oracle) 						  phy_interface_t interface)
16877a86be6aSRussell King (Oracle) {
16887a86be6aSRussell King (Oracle) 	struct net_device *ndev = to_net_dev(config->dev);
16897a86be6aSRussell King (Oracle) 	struct axienet_local *lp = netdev_priv(ndev);
16907a86be6aSRussell King (Oracle) 
16917a86be6aSRussell King (Oracle) 	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
16927a86be6aSRussell King (Oracle) 	    interface ==  PHY_INTERFACE_MODE_SGMII)
16937a86be6aSRussell King (Oracle) 		return &lp->pcs;
16947a86be6aSRussell King (Oracle) 
16957a86be6aSRussell King (Oracle) 	return NULL;
16967a86be6aSRussell King (Oracle) }
16977a86be6aSRussell King (Oracle) 
axienet_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)1698f5203a3dSRobert Hancock static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
1699f5203a3dSRobert Hancock 			       const struct phylink_link_state *state)
1700f5203a3dSRobert Hancock {
17017a86be6aSRussell King (Oracle) 	/* nothing meaningful to do */
170295347842SRussell King }
170395347842SRussell King 
axienet_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)170495347842SRussell King static void axienet_mac_link_down(struct phylink_config *config,
170595347842SRussell King 				  unsigned int mode,
170695347842SRussell King 				  phy_interface_t interface)
170795347842SRussell King {
170895347842SRussell King 	/* nothing meaningful to do */
170995347842SRussell King }
171095347842SRussell King 
axienet_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)171195347842SRussell King static void axienet_mac_link_up(struct phylink_config *config,
171295347842SRussell King 				struct phy_device *phy,
171395347842SRussell King 				unsigned int mode, phy_interface_t interface,
171495347842SRussell King 				int speed, int duplex,
171595347842SRussell King 				bool tx_pause, bool rx_pause)
171695347842SRussell King {
1717f5203a3dSRobert Hancock 	struct net_device *ndev = to_net_dev(config->dev);
1718f5203a3dSRobert Hancock 	struct axienet_local *lp = netdev_priv(ndev);
1719f5203a3dSRobert Hancock 	u32 emmc_reg, fcc_reg;
1720f5203a3dSRobert Hancock 
1721f5203a3dSRobert Hancock 	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
1722f5203a3dSRobert Hancock 	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
1723f5203a3dSRobert Hancock 
172495347842SRussell King 	switch (speed) {
1725f5203a3dSRobert Hancock 	case SPEED_1000:
1726f5203a3dSRobert Hancock 		emmc_reg |= XAE_EMMC_LINKSPD_1000;
1727f5203a3dSRobert Hancock 		break;
1728f5203a3dSRobert Hancock 	case SPEED_100:
1729f5203a3dSRobert Hancock 		emmc_reg |= XAE_EMMC_LINKSPD_100;
1730f5203a3dSRobert Hancock 		break;
1731f5203a3dSRobert Hancock 	case SPEED_10:
1732f5203a3dSRobert Hancock 		emmc_reg |= XAE_EMMC_LINKSPD_10;
1733f5203a3dSRobert Hancock 		break;
1734f5203a3dSRobert Hancock 	default:
1735f5203a3dSRobert Hancock 		dev_err(&ndev->dev,
1736f5203a3dSRobert Hancock 			"Speed other than 10, 100 or 1Gbps is not supported\n");
1737f5203a3dSRobert Hancock 		break;
1738f5203a3dSRobert Hancock 	}
1739f5203a3dSRobert Hancock 
1740f5203a3dSRobert Hancock 	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
1741f5203a3dSRobert Hancock 
1742f5203a3dSRobert Hancock 	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
174395347842SRussell King 	if (tx_pause)
1744f5203a3dSRobert Hancock 		fcc_reg |= XAE_FCC_FCTX_MASK;
1745f5203a3dSRobert Hancock 	else
1746f5203a3dSRobert Hancock 		fcc_reg &= ~XAE_FCC_FCTX_MASK;
174795347842SRussell King 	if (rx_pause)
1748f5203a3dSRobert Hancock 		fcc_reg |= XAE_FCC_FCRX_MASK;
1749f5203a3dSRobert Hancock 	else
1750f5203a3dSRobert Hancock 		fcc_reg &= ~XAE_FCC_FCRX_MASK;
1751f5203a3dSRobert Hancock 	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
1752f5203a3dSRobert Hancock }
1753f5203a3dSRobert Hancock 
1754f5203a3dSRobert Hancock static const struct phylink_mac_ops axienet_phylink_ops = {
17557a86be6aSRussell King (Oracle) 	.mac_select_pcs = axienet_mac_select_pcs,
1756f5203a3dSRobert Hancock 	.mac_config = axienet_mac_config,
1757f5203a3dSRobert Hancock 	.mac_link_down = axienet_mac_link_down,
1758f5203a3dSRobert Hancock 	.mac_link_up = axienet_mac_link_up,
17598a3b7a25Sdanborkmann@iogearbox.net };
17608a3b7a25Sdanborkmann@iogearbox.net 
17618a3b7a25Sdanborkmann@iogearbox.net /**
176224201a64SAndre Przywara  * axienet_dma_err_handler - Work queue task for Axi DMA Error
176324201a64SAndre Przywara  * @work:	pointer to work_struct
17648a3b7a25Sdanborkmann@iogearbox.net  *
17658a3b7a25Sdanborkmann@iogearbox.net  * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
17668a3b7a25Sdanborkmann@iogearbox.net  * Tx/Rx BDs.
17678a3b7a25Sdanborkmann@iogearbox.net  */
axienet_dma_err_handler(struct work_struct * work)176824201a64SAndre Przywara static void axienet_dma_err_handler(struct work_struct *work)
17698a3b7a25Sdanborkmann@iogearbox.net {
177084b9ccc0SRobert Hancock 	u32 i;
17718a3b7a25Sdanborkmann@iogearbox.net 	u32 axienet_status;
177284b9ccc0SRobert Hancock 	struct axidma_bd *cur_p;
177324201a64SAndre Przywara 	struct axienet_local *lp = container_of(work, struct axienet_local,
177424201a64SAndre Przywara 						dma_err_task);
17758a3b7a25Sdanborkmann@iogearbox.net 	struct net_device *ndev = lp->ndev;
17768a3b7a25Sdanborkmann@iogearbox.net 
1777b1e1daf0SSean Anderson 	/* Don't bother if we are going to stop anyway */
1778b1e1daf0SSean Anderson 	if (READ_ONCE(lp->stopping))
1779b1e1daf0SSean Anderson 		return;
1780b1e1daf0SSean Anderson 
17819e2bc267SRobert Hancock 	napi_disable(&lp->napi_tx);
17829e2bc267SRobert Hancock 	napi_disable(&lp->napi_rx);
1783cc37610cSRobert Hancock 
17848a3b7a25Sdanborkmann@iogearbox.net 	axienet_setoptions(ndev, lp->options &
17858a3b7a25Sdanborkmann@iogearbox.net 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
178684b9ccc0SRobert Hancock 
178784b9ccc0SRobert Hancock 	axienet_dma_stop(lp);
17888a3b7a25Sdanborkmann@iogearbox.net 
17898b09ca82SRobert Hancock 	for (i = 0; i < lp->tx_bd_num; i++) {
17908a3b7a25Sdanborkmann@iogearbox.net 		cur_p = &lp->tx_bd_v[i];
17914e958f33SAndre Przywara 		if (cur_p->cntrl) {
17924e958f33SAndre Przywara 			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
17934e958f33SAndre Przywara 
179417882fd4SRobert Hancock 			dma_unmap_single(lp->dev, addr,
17958a3b7a25Sdanborkmann@iogearbox.net 					 (cur_p->cntrl &
17968a3b7a25Sdanborkmann@iogearbox.net 					  XAXIDMA_BD_CTRL_LENGTH_MASK),
17978a3b7a25Sdanborkmann@iogearbox.net 					 DMA_TO_DEVICE);
17984e958f33SAndre Przywara 		}
179923e6b2dcSRobert Hancock 		if (cur_p->skb)
180023e6b2dcSRobert Hancock 			dev_kfree_skb_irq(cur_p->skb);
18018a3b7a25Sdanborkmann@iogearbox.net 		cur_p->phys = 0;
18024e958f33SAndre Przywara 		cur_p->phys_msb = 0;
18038a3b7a25Sdanborkmann@iogearbox.net 		cur_p->cntrl = 0;
18048a3b7a25Sdanborkmann@iogearbox.net 		cur_p->status = 0;
18058a3b7a25Sdanborkmann@iogearbox.net 		cur_p->app0 = 0;
18068a3b7a25Sdanborkmann@iogearbox.net 		cur_p->app1 = 0;
18078a3b7a25Sdanborkmann@iogearbox.net 		cur_p->app2 = 0;
18088a3b7a25Sdanborkmann@iogearbox.net 		cur_p->app3 = 0;
18098a3b7a25Sdanborkmann@iogearbox.net 		cur_p->app4 = 0;
181023e6b2dcSRobert Hancock 		cur_p->skb = NULL;
18118a3b7a25Sdanborkmann@iogearbox.net 	}
18128a3b7a25Sdanborkmann@iogearbox.net 
18138b09ca82SRobert Hancock 	for (i = 0; i < lp->rx_bd_num; i++) {
18148a3b7a25Sdanborkmann@iogearbox.net 		cur_p = &lp->rx_bd_v[i];
18158a3b7a25Sdanborkmann@iogearbox.net 		cur_p->status = 0;
18168a3b7a25Sdanborkmann@iogearbox.net 		cur_p->app0 = 0;
18178a3b7a25Sdanborkmann@iogearbox.net 		cur_p->app1 = 0;
18188a3b7a25Sdanborkmann@iogearbox.net 		cur_p->app2 = 0;
18198a3b7a25Sdanborkmann@iogearbox.net 		cur_p->app3 = 0;
18208a3b7a25Sdanborkmann@iogearbox.net 		cur_p->app4 = 0;
18218a3b7a25Sdanborkmann@iogearbox.net 	}
18228a3b7a25Sdanborkmann@iogearbox.net 
18238a3b7a25Sdanborkmann@iogearbox.net 	lp->tx_bd_ci = 0;
18248a3b7a25Sdanborkmann@iogearbox.net 	lp->tx_bd_tail = 0;
18258a3b7a25Sdanborkmann@iogearbox.net 	lp->rx_bd_ci = 0;
18268a3b7a25Sdanborkmann@iogearbox.net 
182784b9ccc0SRobert Hancock 	axienet_dma_start(lp);
18288a3b7a25Sdanborkmann@iogearbox.net 
18298a3b7a25Sdanborkmann@iogearbox.net 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
18308a3b7a25Sdanborkmann@iogearbox.net 	axienet_status &= ~XAE_RCW1_RX_MASK;
18318a3b7a25Sdanborkmann@iogearbox.net 	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
18328a3b7a25Sdanborkmann@iogearbox.net 
18338a3b7a25Sdanborkmann@iogearbox.net 	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
18348a3b7a25Sdanborkmann@iogearbox.net 	if (axienet_status & XAE_INT_RXRJECT_MASK)
18358a3b7a25Sdanborkmann@iogearbox.net 		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1836522856ceSRobert Hancock 	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
1837522856ceSRobert Hancock 		    XAE_INT_RECV_ERROR_MASK : 0);
18388a3b7a25Sdanborkmann@iogearbox.net 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
18398a3b7a25Sdanborkmann@iogearbox.net 
18408a3b7a25Sdanborkmann@iogearbox.net 	/* Sync default options with HW but leave receiver and
1841850a7503SMichal Simek 	 * transmitter disabled.
1842850a7503SMichal Simek 	 */
18438a3b7a25Sdanborkmann@iogearbox.net 	axienet_setoptions(ndev, lp->options &
18448a3b7a25Sdanborkmann@iogearbox.net 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
18458a3b7a25Sdanborkmann@iogearbox.net 	axienet_set_mac_address(ndev, NULL);
18468a3b7a25Sdanborkmann@iogearbox.net 	axienet_set_multicast_list(ndev);
18479e2bc267SRobert Hancock 	napi_enable(&lp->napi_rx);
18489e2bc267SRobert Hancock 	napi_enable(&lp->napi_tx);
18494ea83a05SAndy Chiu 	axienet_setoptions(ndev, lp->options);
18508a3b7a25Sdanborkmann@iogearbox.net }
18518a3b7a25Sdanborkmann@iogearbox.net 
18528a3b7a25Sdanborkmann@iogearbox.net /**
18532be58620SSrikanth Thokala  * axienet_probe - Axi Ethernet probe function.
185495219aa5SSrikanth Thokala  * @pdev:	Pointer to platform device structure.
18558a3b7a25Sdanborkmann@iogearbox.net  *
1856b0d081c5SMichal Simek  * Return: 0, on success
18578a3b7a25Sdanborkmann@iogearbox.net  *	    Non-zero error value on failure.
18588a3b7a25Sdanborkmann@iogearbox.net  *
18598a3b7a25Sdanborkmann@iogearbox.net  * This is the probe routine for Axi Ethernet driver. This is called before
18608a3b7a25Sdanborkmann@iogearbox.net  * any other driver routines are invoked. It allocates and sets up the Ethernet
18618a3b7a25Sdanborkmann@iogearbox.net  * device. Parses through device tree and populates fields of
18628a3b7a25Sdanborkmann@iogearbox.net  * axienet_local. It registers the Ethernet device.
18638a3b7a25Sdanborkmann@iogearbox.net  */
axienet_probe(struct platform_device * pdev)18642be58620SSrikanth Thokala static int axienet_probe(struct platform_device *pdev)
18658a3b7a25Sdanborkmann@iogearbox.net {
18668495659bSSrikanth Thokala 	int ret;
18678a3b7a25Sdanborkmann@iogearbox.net 	struct device_node *np;
18688a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp;
18698a3b7a25Sdanborkmann@iogearbox.net 	struct net_device *ndev;
187028ef9ebdSRobert Hancock 	struct resource *ethres;
187183216e39SMichael Walle 	u8 mac_addr[ETH_ALEN];
18725fff0151SAndre Przywara 	int addr_width = 32;
18738495659bSSrikanth Thokala 	u32 value;
18748a3b7a25Sdanborkmann@iogearbox.net 
18758a3b7a25Sdanborkmann@iogearbox.net 	ndev = alloc_etherdev(sizeof(*lp));
187641de8d4cSJoe Perches 	if (!ndev)
18778a3b7a25Sdanborkmann@iogearbox.net 		return -ENOMEM;
18788a3b7a25Sdanborkmann@iogearbox.net 
187995219aa5SSrikanth Thokala 	platform_set_drvdata(pdev, ndev);
18808a3b7a25Sdanborkmann@iogearbox.net 
188195219aa5SSrikanth Thokala 	SET_NETDEV_DEV(ndev, &pdev->dev);
18828a3b7a25Sdanborkmann@iogearbox.net 	ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
188328e24c62SEric Dumazet 	ndev->features = NETIF_F_SG;
18848a3b7a25Sdanborkmann@iogearbox.net 	ndev->netdev_ops = &axienet_netdev_ops;
18858a3b7a25Sdanborkmann@iogearbox.net 	ndev->ethtool_ops = &axienet_ethtool_ops;
18868a3b7a25Sdanborkmann@iogearbox.net 
1887d894be57SJarod Wilson 	/* MTU range: 64 - 9000 */
1888d894be57SJarod Wilson 	ndev->min_mtu = 64;
1889d894be57SJarod Wilson 	ndev->max_mtu = XAE_JUMBO_MTU;
1890d894be57SJarod Wilson 
18918a3b7a25Sdanborkmann@iogearbox.net 	lp = netdev_priv(ndev);
18928a3b7a25Sdanborkmann@iogearbox.net 	lp->ndev = ndev;
189395219aa5SSrikanth Thokala 	lp->dev = &pdev->dev;
18948a3b7a25Sdanborkmann@iogearbox.net 	lp->options = XAE_OPTION_DEFAULTS;
18958b09ca82SRobert Hancock 	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
18968b09ca82SRobert Hancock 	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
189757baf8ccSRobert Hancock 
1898cb45a8bfSRobert Hancock 	u64_stats_init(&lp->rx_stat_sync);
1899cb45a8bfSRobert Hancock 	u64_stats_init(&lp->tx_stat_sync);
1900cb45a8bfSRobert Hancock 
1901b48b89f9SJakub Kicinski 	netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
1902b48b89f9SJakub Kicinski 	netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
1903cc37610cSRobert Hancock 
1904b11bfb9aSRobert Hancock 	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
1905b11bfb9aSRobert Hancock 	if (!lp->axi_clk) {
1906b11bfb9aSRobert Hancock 		/* For backward compatibility, if named AXI clock is not present,
1907b11bfb9aSRobert Hancock 		 * treat the first clock specified as the AXI clock.
1908b11bfb9aSRobert Hancock 		 */
1909b11bfb9aSRobert Hancock 		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
1910b11bfb9aSRobert Hancock 	}
1911b11bfb9aSRobert Hancock 	if (IS_ERR(lp->axi_clk)) {
1912b11bfb9aSRobert Hancock 		ret = PTR_ERR(lp->axi_clk);
191357baf8ccSRobert Hancock 		goto free_netdev;
191457baf8ccSRobert Hancock 	}
1915b11bfb9aSRobert Hancock 	ret = clk_prepare_enable(lp->axi_clk);
191657baf8ccSRobert Hancock 	if (ret) {
1917b11bfb9aSRobert Hancock 		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
191857baf8ccSRobert Hancock 		goto free_netdev;
191957baf8ccSRobert Hancock 	}
192057baf8ccSRobert Hancock 
1921b11bfb9aSRobert Hancock 	lp->misc_clks[0].id = "axis_clk";
1922b11bfb9aSRobert Hancock 	lp->misc_clks[1].id = "ref_clk";
1923b11bfb9aSRobert Hancock 	lp->misc_clks[2].id = "mgt_clk";
1924b11bfb9aSRobert Hancock 
1925b11bfb9aSRobert Hancock 	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
1926b11bfb9aSRobert Hancock 	if (ret)
1927b11bfb9aSRobert Hancock 		goto cleanup_clk;
1928b11bfb9aSRobert Hancock 
1929b11bfb9aSRobert Hancock 	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
1930b11bfb9aSRobert Hancock 	if (ret)
1931b11bfb9aSRobert Hancock 		goto cleanup_clk;
1932b11bfb9aSRobert Hancock 
19338a3b7a25Sdanborkmann@iogearbox.net 	/* Map device registers */
193447651c51SYang Yingliang 	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
1935fcc028c1SKrzysztof Kozlowski 	if (IS_ERR(lp->regs)) {
1936fcc028c1SKrzysztof Kozlowski 		ret = PTR_ERR(lp->regs);
193759cd4f19SRobert Hancock 		goto cleanup_clk;
19388a3b7a25Sdanborkmann@iogearbox.net 	}
19397fa0043dSRobert Hancock 	lp->regs_start = ethres->start;
194046aa27dfSSrikanth Thokala 
19418a3b7a25Sdanborkmann@iogearbox.net 	/* Setup checksum offload, but default to off if not specified */
19428a3b7a25Sdanborkmann@iogearbox.net 	lp->features = 0;
19438a3b7a25Sdanborkmann@iogearbox.net 
19448495659bSSrikanth Thokala 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
19458495659bSSrikanth Thokala 	if (!ret) {
19468495659bSSrikanth Thokala 		switch (value) {
19478a3b7a25Sdanborkmann@iogearbox.net 		case 1:
19488a3b7a25Sdanborkmann@iogearbox.net 			lp->csum_offload_on_tx_path =
19498a3b7a25Sdanborkmann@iogearbox.net 				XAE_FEATURE_PARTIAL_TX_CSUM;
19508a3b7a25Sdanborkmann@iogearbox.net 			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
19518a3b7a25Sdanborkmann@iogearbox.net 			/* Can checksum TCP/UDP over IPv4. */
19528a3b7a25Sdanborkmann@iogearbox.net 			ndev->features |= NETIF_F_IP_CSUM;
19538a3b7a25Sdanborkmann@iogearbox.net 			break;
19548a3b7a25Sdanborkmann@iogearbox.net 		case 2:
19558a3b7a25Sdanborkmann@iogearbox.net 			lp->csum_offload_on_tx_path =
19568a3b7a25Sdanborkmann@iogearbox.net 				XAE_FEATURE_FULL_TX_CSUM;
19578a3b7a25Sdanborkmann@iogearbox.net 			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
19588a3b7a25Sdanborkmann@iogearbox.net 			/* Can checksum TCP/UDP over IPv4. */
19598a3b7a25Sdanborkmann@iogearbox.net 			ndev->features |= NETIF_F_IP_CSUM;
19608a3b7a25Sdanborkmann@iogearbox.net 			break;
19618a3b7a25Sdanborkmann@iogearbox.net 		default:
19628a3b7a25Sdanborkmann@iogearbox.net 			lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
19638a3b7a25Sdanborkmann@iogearbox.net 		}
19648a3b7a25Sdanborkmann@iogearbox.net 	}
19658495659bSSrikanth Thokala 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
19668495659bSSrikanth Thokala 	if (!ret) {
19678495659bSSrikanth Thokala 		switch (value) {
19688a3b7a25Sdanborkmann@iogearbox.net 		case 1:
19698a3b7a25Sdanborkmann@iogearbox.net 			lp->csum_offload_on_rx_path =
19708a3b7a25Sdanborkmann@iogearbox.net 				XAE_FEATURE_PARTIAL_RX_CSUM;
19718a3b7a25Sdanborkmann@iogearbox.net 			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
19728a3b7a25Sdanborkmann@iogearbox.net 			break;
19738a3b7a25Sdanborkmann@iogearbox.net 		case 2:
19748a3b7a25Sdanborkmann@iogearbox.net 			lp->csum_offload_on_rx_path =
19758a3b7a25Sdanborkmann@iogearbox.net 				XAE_FEATURE_FULL_RX_CSUM;
19768a3b7a25Sdanborkmann@iogearbox.net 			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
19778a3b7a25Sdanborkmann@iogearbox.net 			break;
19788a3b7a25Sdanborkmann@iogearbox.net 		default:
19798a3b7a25Sdanborkmann@iogearbox.net 			lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
19808a3b7a25Sdanborkmann@iogearbox.net 		}
19818a3b7a25Sdanborkmann@iogearbox.net 	}
19828a3b7a25Sdanborkmann@iogearbox.net 	/* For supporting jumbo frames, the Axi Ethernet hardware must have
1983f080a8c3SSrikanth Thokala 	 * a larger Rx/Tx Memory. Typically, the size must be large so that
1984f080a8c3SSrikanth Thokala 	 * we can enable jumbo option and start supporting jumbo frames.
1985f080a8c3SSrikanth Thokala 	 * Here we check for memory allocated for Rx/Tx in the hardware from
1986f080a8c3SSrikanth Thokala 	 * the device-tree and accordingly set flags.
1987f080a8c3SSrikanth Thokala 	 */
19888495659bSSrikanth Thokala 	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
1989ee06b172SAlvaro G. M 
19906c8f06bbSRobert Hancock 	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
19916c8f06bbSRobert Hancock 						   "xlnx,switch-x-sgmii");
19926c8f06bbSRobert Hancock 
1993ee06b172SAlvaro G. M 	/* Start with the proprietary, and broken phy_type */
1994ee06b172SAlvaro G. M 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
1995ee06b172SAlvaro G. M 	if (!ret) {
1996ee06b172SAlvaro G. M 		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
1997ee06b172SAlvaro G. M 		switch (value) {
1998ee06b172SAlvaro G. M 		case XAE_PHY_TYPE_MII:
1999ee06b172SAlvaro G. M 			lp->phy_mode = PHY_INTERFACE_MODE_MII;
2000ee06b172SAlvaro G. M 			break;
2001ee06b172SAlvaro G. M 		case XAE_PHY_TYPE_GMII:
2002ee06b172SAlvaro G. M 			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2003ee06b172SAlvaro G. M 			break;
2004ee06b172SAlvaro G. M 		case XAE_PHY_TYPE_RGMII_2_0:
2005ee06b172SAlvaro G. M 			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2006ee06b172SAlvaro G. M 			break;
2007ee06b172SAlvaro G. M 		case XAE_PHY_TYPE_SGMII:
2008ee06b172SAlvaro G. M 			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2009ee06b172SAlvaro G. M 			break;
2010ee06b172SAlvaro G. M 		case XAE_PHY_TYPE_1000BASE_X:
2011ee06b172SAlvaro G. M 			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2012ee06b172SAlvaro G. M 			break;
2013ee06b172SAlvaro G. M 		default:
2014ee06b172SAlvaro G. M 			ret = -EINVAL;
201559cd4f19SRobert Hancock 			goto cleanup_clk;
2016ee06b172SAlvaro G. M 		}
2017ee06b172SAlvaro G. M 	} else {
20180c65b2b9SAndrew Lunn 		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
20190c65b2b9SAndrew Lunn 		if (ret)
202059cd4f19SRobert Hancock 			goto cleanup_clk;
2021ee06b172SAlvaro G. M 	}
20226c8f06bbSRobert Hancock 	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
20236c8f06bbSRobert Hancock 	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
20246c8f06bbSRobert Hancock 		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
20256c8f06bbSRobert Hancock 		ret = -EINVAL;
202659cd4f19SRobert Hancock 		goto cleanup_clk;
20276c8f06bbSRobert Hancock 	}
20288a3b7a25Sdanborkmann@iogearbox.net 
20298a3b7a25Sdanborkmann@iogearbox.net 	/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
203095219aa5SSrikanth Thokala 	np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
203128ef9ebdSRobert Hancock 	if (np) {
203228ef9ebdSRobert Hancock 		struct resource dmares;
203328ef9ebdSRobert Hancock 
203446aa27dfSSrikanth Thokala 		ret = of_address_to_resource(np, 0, &dmares);
203546aa27dfSSrikanth Thokala 		if (ret) {
203628ef9ebdSRobert Hancock 			dev_err(&pdev->dev,
203728ef9ebdSRobert Hancock 				"unable to get DMA resource\n");
2038fa3a419dSWen Yang 			of_node_put(np);
203959cd4f19SRobert Hancock 			goto cleanup_clk;
204046aa27dfSSrikanth Thokala 		}
204128ef9ebdSRobert Hancock 		lp->dma_regs = devm_ioremap_resource(&pdev->dev,
204228ef9ebdSRobert Hancock 						     &dmares);
204328ef9ebdSRobert Hancock 		lp->rx_irq = irq_of_parse_and_map(np, 1);
204428ef9ebdSRobert Hancock 		lp->tx_irq = irq_of_parse_and_map(np, 0);
204528ef9ebdSRobert Hancock 		of_node_put(np);
2046d6349e3eSAndre Przywara 		lp->eth_irq = platform_get_irq_optional(pdev, 0);
204728ef9ebdSRobert Hancock 	} else {
204828ef9ebdSRobert Hancock 		/* Check for these resources directly on the Ethernet node. */
204947651c51SYang Yingliang 		lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
205028ef9ebdSRobert Hancock 		lp->rx_irq = platform_get_irq(pdev, 1);
205128ef9ebdSRobert Hancock 		lp->tx_irq = platform_get_irq(pdev, 0);
2052d6349e3eSAndre Przywara 		lp->eth_irq = platform_get_irq_optional(pdev, 2);
205328ef9ebdSRobert Hancock 	}
2054fcc028c1SKrzysztof Kozlowski 	if (IS_ERR(lp->dma_regs)) {
205546aa27dfSSrikanth Thokala 		dev_err(&pdev->dev, "could not map DMA regs\n");
2056fcc028c1SKrzysztof Kozlowski 		ret = PTR_ERR(lp->dma_regs);
205759cd4f19SRobert Hancock 		goto cleanup_clk;
20588a3b7a25Sdanborkmann@iogearbox.net 	}
2059cb59c87dSMichal Simek 	if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
206095219aa5SSrikanth Thokala 		dev_err(&pdev->dev, "could not determine irqs\n");
20618a3b7a25Sdanborkmann@iogearbox.net 		ret = -ENOMEM;
206259cd4f19SRobert Hancock 		goto cleanup_clk;
20638a3b7a25Sdanborkmann@iogearbox.net 	}
20648a3b7a25Sdanborkmann@iogearbox.net 
2065f1bc9fc4SMaxim Kochetkov 	/* Reset core now that clocks are enabled, prior to accessing MDIO */
2066f1bc9fc4SMaxim Kochetkov 	ret = __axienet_device_reset(lp);
2067f1bc9fc4SMaxim Kochetkov 	if (ret)
2068f1bc9fc4SMaxim Kochetkov 		goto cleanup_clk;
2069f1bc9fc4SMaxim Kochetkov 
2070f735c40eSAndre Przywara 	/* Autodetect the need for 64-bit DMA pointers.
2071f735c40eSAndre Przywara 	 * When the IP is configured for a bus width bigger than 32 bits,
2072f735c40eSAndre Przywara 	 * writing the MSB registers is mandatory, even if they are all 0.
2073f735c40eSAndre Przywara 	 * We can detect this case by writing all 1's to one such register
2074f735c40eSAndre Przywara 	 * and see if that sticks: when the IP is configured for 32 bits
2075f735c40eSAndre Przywara 	 * only, those registers are RES0.
2076f735c40eSAndre Przywara 	 * Those MSB registers were introduced in IP v7.1, which we check first.
2077f735c40eSAndre Przywara 	 */
2078f735c40eSAndre Przywara 	if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2079f735c40eSAndre Przywara 		void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2080f735c40eSAndre Przywara 
2081f735c40eSAndre Przywara 		iowrite32(0x0, desc);
2082f735c40eSAndre Przywara 		if (ioread32(desc) == 0) {	/* sanity check */
2083f735c40eSAndre Przywara 			iowrite32(0xffffffff, desc);
2084f735c40eSAndre Przywara 			if (ioread32(desc) > 0) {
2085f735c40eSAndre Przywara 				lp->features |= XAE_FEATURE_DMA_64BIT;
20865fff0151SAndre Przywara 				addr_width = 64;
2087f735c40eSAndre Przywara 				dev_info(&pdev->dev,
2088f735c40eSAndre Przywara 					 "autodetected 64-bit DMA range\n");
2089f735c40eSAndre Przywara 			}
2090f735c40eSAndre Przywara 			iowrite32(0x0, desc);
2091f735c40eSAndre Przywara 		}
2092f735c40eSAndre Przywara 	}
209300be43a7SAndy Chiu 	if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
209400be43a7SAndy Chiu 		dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
20952e7bf4a6SYang Yingliang 		ret = -EINVAL;
209600be43a7SAndy Chiu 		goto cleanup_clk;
209700be43a7SAndy Chiu 	}
2098f735c40eSAndre Przywara 
20995fff0151SAndre Przywara 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
21005fff0151SAndre Przywara 	if (ret) {
21015fff0151SAndre Przywara 		dev_err(&pdev->dev, "No suitable DMA available\n");
210259cd4f19SRobert Hancock 		goto cleanup_clk;
21035fff0151SAndre Przywara 	}
21045fff0151SAndre Przywara 
2105522856ceSRobert Hancock 	/* Check for Ethernet core IRQ (optional) */
2106522856ceSRobert Hancock 	if (lp->eth_irq <= 0)
2107522856ceSRobert Hancock 		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2108522856ceSRobert Hancock 
21098a3b7a25Sdanborkmann@iogearbox.net 	/* Retrieve the MAC address */
211083216e39SMichael Walle 	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
211183216e39SMichael Walle 	if (!ret) {
2112da90e380STobias Klauser 		axienet_set_mac_address(ndev, mac_addr);
211383216e39SMichael Walle 	} else {
211483216e39SMichael Walle 		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
211583216e39SMichael Walle 			 ret);
211683216e39SMichael Walle 		axienet_set_mac_address(ndev, NULL);
211783216e39SMichael Walle 	}
21188a3b7a25Sdanborkmann@iogearbox.net 
21198a3b7a25Sdanborkmann@iogearbox.net 	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
21200b79b8dcSRobert Hancock 	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
21218a3b7a25Sdanborkmann@iogearbox.net 	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
21220b79b8dcSRobert Hancock 	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
21238a3b7a25Sdanborkmann@iogearbox.net 
212409a0354cSRobert Hancock 	ret = axienet_mdio_setup(lp);
21258a3b7a25Sdanborkmann@iogearbox.net 	if (ret)
212609a0354cSRobert Hancock 		dev_warn(&pdev->dev,
212709a0354cSRobert Hancock 			 "error registering MDIO bus: %d\n", ret);
2128d1c4f93eSAndy Chiu 
21291a025560SRobert Hancock 	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
21301a025560SRobert Hancock 	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
213119c7a439SAndy Chiu 		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2132ab3a5d4cSAndy Chiu 		if (!np) {
213319c7a439SAndy Chiu 			/* Deprecated: Always use "pcs-handle" for pcs_phy.
213419c7a439SAndy Chiu 			 * Falling back to "phy-handle" here is only for
213519c7a439SAndy Chiu 			 * backward compatibility with old device trees.
213619c7a439SAndy Chiu 			 */
213719c7a439SAndy Chiu 			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
213819c7a439SAndy Chiu 		}
213919c7a439SAndy Chiu 		if (!np) {
214019c7a439SAndy Chiu 			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
21411a025560SRobert Hancock 			ret = -EINVAL;
214259cd4f19SRobert Hancock 			goto cleanup_mdio;
21431a025560SRobert Hancock 		}
2144ab3a5d4cSAndy Chiu 		lp->pcs_phy = of_mdio_find_device(np);
21451a025560SRobert Hancock 		if (!lp->pcs_phy) {
21461a025560SRobert Hancock 			ret = -EPROBE_DEFER;
2147ab3a5d4cSAndy Chiu 			of_node_put(np);
214859cd4f19SRobert Hancock 			goto cleanup_mdio;
21491a025560SRobert Hancock 		}
2150ab3a5d4cSAndy Chiu 		of_node_put(np);
21517a86be6aSRussell King (Oracle) 		lp->pcs.ops = &axienet_pcs_ops;
2152febf2aafSRussell King (Oracle) 		lp->pcs.neg_mode = true;
21537a86be6aSRussell King (Oracle) 		lp->pcs.poll = true;
21541a025560SRobert Hancock 	}
21558a3b7a25Sdanborkmann@iogearbox.net 
2156f5203a3dSRobert Hancock 	lp->phylink_config.dev = &ndev->dev;
2157f5203a3dSRobert Hancock 	lp->phylink_config.type = PHYLINK_NETDEV;
215872a47e1aSRussell King (Oracle) 	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
215972a47e1aSRussell King (Oracle) 		MAC_10FD | MAC_100FD | MAC_1000FD;
2160f5203a3dSRobert Hancock 
2161136a3fa2SRussell King (Oracle) 	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2162136a3fa2SRussell King (Oracle) 	if (lp->switch_x_sgmii) {
2163136a3fa2SRussell King (Oracle) 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2164136a3fa2SRussell King (Oracle) 			  lp->phylink_config.supported_interfaces);
2165136a3fa2SRussell King (Oracle) 		__set_bit(PHY_INTERFACE_MODE_SGMII,
2166136a3fa2SRussell King (Oracle) 			  lp->phylink_config.supported_interfaces);
2167136a3fa2SRussell King (Oracle) 	}
2168136a3fa2SRussell King (Oracle) 
2169f5203a3dSRobert Hancock 	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2170f5203a3dSRobert Hancock 				     lp->phy_mode,
2171f5203a3dSRobert Hancock 				     &axienet_phylink_ops);
2172f5203a3dSRobert Hancock 	if (IS_ERR(lp->phylink)) {
2173f5203a3dSRobert Hancock 		ret = PTR_ERR(lp->phylink);
2174f5203a3dSRobert Hancock 		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
217559cd4f19SRobert Hancock 		goto cleanup_mdio;
2176f5203a3dSRobert Hancock 	}
2177f5203a3dSRobert Hancock 
21788a3b7a25Sdanborkmann@iogearbox.net 	ret = register_netdev(lp->ndev);
21798a3b7a25Sdanborkmann@iogearbox.net 	if (ret) {
21808a3b7a25Sdanborkmann@iogearbox.net 		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
218159cd4f19SRobert Hancock 		goto cleanup_phylink;
21828a3b7a25Sdanborkmann@iogearbox.net 	}
21838a3b7a25Sdanborkmann@iogearbox.net 
21848a3b7a25Sdanborkmann@iogearbox.net 	return 0;
21858a3b7a25Sdanborkmann@iogearbox.net 
218659cd4f19SRobert Hancock cleanup_phylink:
218759cd4f19SRobert Hancock 	phylink_destroy(lp->phylink);
218859cd4f19SRobert Hancock 
218959cd4f19SRobert Hancock cleanup_mdio:
219059cd4f19SRobert Hancock 	if (lp->pcs_phy)
219159cd4f19SRobert Hancock 		put_device(&lp->pcs_phy->dev);
219259cd4f19SRobert Hancock 	if (lp->mii_bus)
219359cd4f19SRobert Hancock 		axienet_mdio_teardown(lp);
219459cd4f19SRobert Hancock cleanup_clk:
2195b11bfb9aSRobert Hancock 	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2196b11bfb9aSRobert Hancock 	clk_disable_unprepare(lp->axi_clk);
219759cd4f19SRobert Hancock 
219846aa27dfSSrikanth Thokala free_netdev:
21998a3b7a25Sdanborkmann@iogearbox.net 	free_netdev(ndev);
220046aa27dfSSrikanth Thokala 
22018a3b7a25Sdanborkmann@iogearbox.net 	return ret;
22028a3b7a25Sdanborkmann@iogearbox.net }
22038a3b7a25Sdanborkmann@iogearbox.net 
axienet_remove(struct platform_device * pdev)22042be58620SSrikanth Thokala static int axienet_remove(struct platform_device *pdev)
22058a3b7a25Sdanborkmann@iogearbox.net {
220695219aa5SSrikanth Thokala 	struct net_device *ndev = platform_get_drvdata(pdev);
22078a3b7a25Sdanborkmann@iogearbox.net 	struct axienet_local *lp = netdev_priv(ndev);
22088a3b7a25Sdanborkmann@iogearbox.net 
22098a3b7a25Sdanborkmann@iogearbox.net 	unregister_netdev(ndev);
2210f5203a3dSRobert Hancock 
2211f5203a3dSRobert Hancock 	if (lp->phylink)
2212f5203a3dSRobert Hancock 		phylink_destroy(lp->phylink);
2213f5203a3dSRobert Hancock 
22141a025560SRobert Hancock 	if (lp->pcs_phy)
22151a025560SRobert Hancock 		put_device(&lp->pcs_phy->dev);
22161a025560SRobert Hancock 
2217e7a3d116SRobert Hancock 	axienet_mdio_teardown(lp);
22188a3b7a25Sdanborkmann@iogearbox.net 
2219b11bfb9aSRobert Hancock 	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2220b11bfb9aSRobert Hancock 	clk_disable_unprepare(lp->axi_clk);
222109a0354cSRobert Hancock 
22228a3b7a25Sdanborkmann@iogearbox.net 	free_netdev(ndev);
22238a3b7a25Sdanborkmann@iogearbox.net 
22248a3b7a25Sdanborkmann@iogearbox.net 	return 0;
22258a3b7a25Sdanborkmann@iogearbox.net }
22268a3b7a25Sdanborkmann@iogearbox.net 
axienet_shutdown(struct platform_device * pdev)222770c50265SRobert Hancock static void axienet_shutdown(struct platform_device *pdev)
222870c50265SRobert Hancock {
222970c50265SRobert Hancock 	struct net_device *ndev = platform_get_drvdata(pdev);
223070c50265SRobert Hancock 
223170c50265SRobert Hancock 	rtnl_lock();
223270c50265SRobert Hancock 	netif_device_detach(ndev);
223370c50265SRobert Hancock 
223470c50265SRobert Hancock 	if (netif_running(ndev))
223570c50265SRobert Hancock 		dev_close(ndev);
223670c50265SRobert Hancock 
223770c50265SRobert Hancock 	rtnl_unlock();
223870c50265SRobert Hancock }
223970c50265SRobert Hancock 
axienet_suspend(struct device * dev)2240a3de357bSAndy Chiu static int axienet_suspend(struct device *dev)
2241a3de357bSAndy Chiu {
2242a3de357bSAndy Chiu 	struct net_device *ndev = dev_get_drvdata(dev);
2243a3de357bSAndy Chiu 
2244a3de357bSAndy Chiu 	if (!netif_running(ndev))
2245a3de357bSAndy Chiu 		return 0;
2246a3de357bSAndy Chiu 
2247a3de357bSAndy Chiu 	netif_device_detach(ndev);
2248a3de357bSAndy Chiu 
2249a3de357bSAndy Chiu 	rtnl_lock();
2250a3de357bSAndy Chiu 	axienet_stop(ndev);
2251a3de357bSAndy Chiu 	rtnl_unlock();
2252a3de357bSAndy Chiu 
2253a3de357bSAndy Chiu 	return 0;
2254a3de357bSAndy Chiu }
2255a3de357bSAndy Chiu 
axienet_resume(struct device * dev)2256a3de357bSAndy Chiu static int axienet_resume(struct device *dev)
2257a3de357bSAndy Chiu {
2258a3de357bSAndy Chiu 	struct net_device *ndev = dev_get_drvdata(dev);
2259a3de357bSAndy Chiu 
2260a3de357bSAndy Chiu 	if (!netif_running(ndev))
2261a3de357bSAndy Chiu 		return 0;
2262a3de357bSAndy Chiu 
2263a3de357bSAndy Chiu 	rtnl_lock();
2264a3de357bSAndy Chiu 	axienet_open(ndev);
2265a3de357bSAndy Chiu 	rtnl_unlock();
2266a3de357bSAndy Chiu 
2267a3de357bSAndy Chiu 	netif_device_attach(ndev);
2268a3de357bSAndy Chiu 
2269a3de357bSAndy Chiu 	return 0;
2270a3de357bSAndy Chiu }
2271a3de357bSAndy Chiu 
2272a3de357bSAndy Chiu static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
2273a3de357bSAndy Chiu 				axienet_suspend, axienet_resume);
2274a3de357bSAndy Chiu 
22752be58620SSrikanth Thokala static struct platform_driver axienet_driver = {
22762be58620SSrikanth Thokala 	.probe = axienet_probe,
22772be58620SSrikanth Thokala 	.remove = axienet_remove,
227870c50265SRobert Hancock 	.shutdown = axienet_shutdown,
22798a3b7a25Sdanborkmann@iogearbox.net 	.driver = {
22808a3b7a25Sdanborkmann@iogearbox.net 		 .name = "xilinx_axienet",
2281a3de357bSAndy Chiu 		 .pm = &axienet_pm_ops,
22828a3b7a25Sdanborkmann@iogearbox.net 		 .of_match_table = axienet_of_match,
22838a3b7a25Sdanborkmann@iogearbox.net 	},
22848a3b7a25Sdanborkmann@iogearbox.net };
22858a3b7a25Sdanborkmann@iogearbox.net 
22862be58620SSrikanth Thokala module_platform_driver(axienet_driver);
22878a3b7a25Sdanborkmann@iogearbox.net 
22888a3b7a25Sdanborkmann@iogearbox.net MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
22898a3b7a25Sdanborkmann@iogearbox.net MODULE_AUTHOR("Xilinx");
22908a3b7a25Sdanborkmann@iogearbox.net MODULE_LICENSE("GPL");
2291