xref: /openbmc/linux/drivers/net/ethernet/xilinx/ll_temac_main.c (revision 46eeaa11bdd1bc9e077bdf741d32ca7235d263c6)
109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2b13ad8f4SJeff Kirsher /*
3b13ad8f4SJeff Kirsher  * Driver for Xilinx TEMAC Ethernet device
4b13ad8f4SJeff Kirsher  *
5b13ad8f4SJeff Kirsher  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
6b13ad8f4SJeff Kirsher  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
7b13ad8f4SJeff Kirsher  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8b13ad8f4SJeff Kirsher  *
9b13ad8f4SJeff Kirsher  * This is a driver for the Xilinx ll_temac ipcore which is often used
10b13ad8f4SJeff Kirsher  * in the Virtex and Spartan series of chips.
11b13ad8f4SJeff Kirsher  *
12b13ad8f4SJeff Kirsher  * Notes:
13b13ad8f4SJeff Kirsher  * - The ll_temac hardware uses indirect access for many of the TEMAC
14b13ad8f4SJeff Kirsher  *   registers, include the MDIO bus.  However, indirect access to MDIO
15b13ad8f4SJeff Kirsher  *   registers take considerably more clock cycles than to TEMAC registers.
16b13ad8f4SJeff Kirsher  *   MDIO accesses are long, so threads doing them should probably sleep
17b13ad8f4SJeff Kirsher  *   rather than busywait.  However, since only one indirect access can be
18b13ad8f4SJeff Kirsher  *   in progress at any given time, that means that *all* indirect accesses
19b13ad8f4SJeff Kirsher  *   could end up sleeping (to wait for an MDIO access to complete).
20b13ad8f4SJeff Kirsher  *   Fortunately none of the indirect accesses are on the 'hot' path for tx
21b13ad8f4SJeff Kirsher  *   or rx, so this should be okay.
22b13ad8f4SJeff Kirsher  *
23b13ad8f4SJeff Kirsher  * TODO:
24b13ad8f4SJeff Kirsher  * - Factor out locallink DMA code into separate driver
25b13ad8f4SJeff Kirsher  * - Fix support for hardware checksumming.
26b13ad8f4SJeff Kirsher  * - Testing.  Lots and lots of testing.
27b13ad8f4SJeff Kirsher  *
28b13ad8f4SJeff Kirsher  */
29b13ad8f4SJeff Kirsher 
30b13ad8f4SJeff Kirsher #include <linux/delay.h>
31b13ad8f4SJeff Kirsher #include <linux/etherdevice.h>
32b13ad8f4SJeff Kirsher #include <linux/mii.h>
33b13ad8f4SJeff Kirsher #include <linux/module.h>
34b13ad8f4SJeff Kirsher #include <linux/mutex.h>
35b13ad8f4SJeff Kirsher #include <linux/netdevice.h>
368425c41dSEsben Haabendal #include <linux/if_ether.h>
37b13ad8f4SJeff Kirsher #include <linux/of.h>
385c9f303eSRob Herring #include <linux/of_irq.h>
39b13ad8f4SJeff Kirsher #include <linux/of_mdio.h>
4006205472STobias Klauser #include <linux/of_net.h>
413d40aed8SRob Herring #include <linux/platform_device.h>
42b13ad8f4SJeff Kirsher #include <linux/skbuff.h>
43b13ad8f4SJeff Kirsher #include <linux/spinlock.h>
44b13ad8f4SJeff Kirsher #include <linux/tcp.h>      /* needed for sizeof(tcphdr) */
45b13ad8f4SJeff Kirsher #include <linux/udp.h>      /* needed for sizeof(udphdr) */
46b13ad8f4SJeff Kirsher #include <linux/phy.h>
47b13ad8f4SJeff Kirsher #include <linux/in.h>
48b13ad8f4SJeff Kirsher #include <linux/io.h>
49b13ad8f4SJeff Kirsher #include <linux/ip.h>
50b13ad8f4SJeff Kirsher #include <linux/slab.h>
51b13ad8f4SJeff Kirsher #include <linux/interrupt.h>
521d63b8d6SEsben Haabendal #include <linux/workqueue.h>
53b13ad8f4SJeff Kirsher #include <linux/dma-mapping.h>
541bd33bf0SEsben Haabendal #include <linux/processor.h>
558425c41dSEsben Haabendal #include <linux/platform_data/xilinx-ll-temac.h>
56b13ad8f4SJeff Kirsher 
57b13ad8f4SJeff Kirsher #include "ll_temac.h"
58b13ad8f4SJeff Kirsher 
59f7b261bfSEsben Haabendal /* Descriptors defines for Tx and Rx DMA */
60f7b261bfSEsben Haabendal #define TX_BD_NUM_DEFAULT		64
61f7b261bfSEsben Haabendal #define RX_BD_NUM_DEFAULT		1024
62f7b261bfSEsben Haabendal #define TX_BD_NUM_MAX			4096
63f7b261bfSEsben Haabendal #define RX_BD_NUM_MAX			4096
64b13ad8f4SJeff Kirsher 
65b13ad8f4SJeff Kirsher /* ---------------------------------------------------------------------
66b13ad8f4SJeff Kirsher  * Low level register access functions
67b13ad8f4SJeff Kirsher  */
68b13ad8f4SJeff Kirsher 
_temac_ior_be(struct temac_local * lp,int offset)696e05b833SYueHaibing static u32 _temac_ior_be(struct temac_local *lp, int offset)
70b13ad8f4SJeff Kirsher {
71a3246dc4SEsben Haabendal 	return ioread32be(lp->regs + offset);
72b13ad8f4SJeff Kirsher }
73b13ad8f4SJeff Kirsher 
_temac_iow_be(struct temac_local * lp,int offset,u32 value)746e05b833SYueHaibing static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
75b13ad8f4SJeff Kirsher {
76a3246dc4SEsben Haabendal 	return iowrite32be(value, lp->regs + offset);
77a3246dc4SEsben Haabendal }
78a3246dc4SEsben Haabendal 
_temac_ior_le(struct temac_local * lp,int offset)796e05b833SYueHaibing static u32 _temac_ior_le(struct temac_local *lp, int offset)
80a3246dc4SEsben Haabendal {
81a3246dc4SEsben Haabendal 	return ioread32(lp->regs + offset);
82a3246dc4SEsben Haabendal }
83a3246dc4SEsben Haabendal 
_temac_iow_le(struct temac_local * lp,int offset,u32 value)846e05b833SYueHaibing static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
85a3246dc4SEsben Haabendal {
86a3246dc4SEsben Haabendal 	return iowrite32(value, lp->regs + offset);
87b13ad8f4SJeff Kirsher }
88b13ad8f4SJeff Kirsher 
hard_acs_rdy(struct temac_local * lp)891bd33bf0SEsben Haabendal static bool hard_acs_rdy(struct temac_local *lp)
901bd33bf0SEsben Haabendal {
911bd33bf0SEsben Haabendal 	return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
921bd33bf0SEsben Haabendal }
931bd33bf0SEsben Haabendal 
hard_acs_rdy_or_timeout(struct temac_local * lp,ktime_t timeout)941bd33bf0SEsben Haabendal static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
951bd33bf0SEsben Haabendal {
961bd33bf0SEsben Haabendal 	ktime_t cur = ktime_get();
971bd33bf0SEsben Haabendal 
981bd33bf0SEsben Haabendal 	return hard_acs_rdy(lp) || ktime_after(cur, timeout);
991bd33bf0SEsben Haabendal }
1001bd33bf0SEsben Haabendal 
1011bd33bf0SEsben Haabendal /* Poll for maximum 20 ms.  This is similar to the 2 jiffies @ 100 Hz
1021bd33bf0SEsben Haabendal  * that was used before, and should cover MDIO bus speed down to 3200
1031bd33bf0SEsben Haabendal  * Hz.
1041bd33bf0SEsben Haabendal  */
1051bd33bf0SEsben Haabendal #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
1061bd33bf0SEsben Haabendal 
10781929a4aSJesse Brandeburg /*
1081bd33bf0SEsben Haabendal  * temac_indirect_busywait - Wait for current indirect register access
1091bd33bf0SEsben Haabendal  * to complete.
1101bd33bf0SEsben Haabendal  */
temac_indirect_busywait(struct temac_local * lp)111b13ad8f4SJeff Kirsher int temac_indirect_busywait(struct temac_local *lp)
112b13ad8f4SJeff Kirsher {
1131bd33bf0SEsben Haabendal 	ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
114b13ad8f4SJeff Kirsher 
1151bd33bf0SEsben Haabendal 	spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
1161bd33bf0SEsben Haabendal 	if (WARN_ON(!hard_acs_rdy(lp)))
117b13ad8f4SJeff Kirsher 		return -ETIMEDOUT;
1187dfd0dccShuangjunxian 
119b13ad8f4SJeff Kirsher 	return 0;
120b13ad8f4SJeff Kirsher }
121b13ad8f4SJeff Kirsher 
12281929a4aSJesse Brandeburg /*
1231bd33bf0SEsben Haabendal  * temac_indirect_in32 - Indirect register read access.  This function
1241bd33bf0SEsben Haabendal  * must be called without lp->indirect_lock being held.
125b13ad8f4SJeff Kirsher  */
temac_indirect_in32(struct temac_local * lp,int reg)126b13ad8f4SJeff Kirsher u32 temac_indirect_in32(struct temac_local *lp, int reg)
127b13ad8f4SJeff Kirsher {
1281bd33bf0SEsben Haabendal 	unsigned long flags;
1291bd33bf0SEsben Haabendal 	int val;
130b13ad8f4SJeff Kirsher 
1311bd33bf0SEsben Haabendal 	spin_lock_irqsave(lp->indirect_lock, flags);
1321bd33bf0SEsben Haabendal 	val = temac_indirect_in32_locked(lp, reg);
1331bd33bf0SEsben Haabendal 	spin_unlock_irqrestore(lp->indirect_lock, flags);
134b13ad8f4SJeff Kirsher 	return val;
135b13ad8f4SJeff Kirsher }
136b13ad8f4SJeff Kirsher 
13781929a4aSJesse Brandeburg /*
1381bd33bf0SEsben Haabendal  * temac_indirect_in32_locked - Indirect register read access.  This
1391bd33bf0SEsben Haabendal  * function must be called with lp->indirect_lock being held.  Use
1401bd33bf0SEsben Haabendal  * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
1411bd33bf0SEsben Haabendal  * repeated lock/unlock and to ensure uninterrupted access to indirect
1421bd33bf0SEsben Haabendal  * registers.
1431bd33bf0SEsben Haabendal  */
temac_indirect_in32_locked(struct temac_local * lp,int reg)1441bd33bf0SEsben Haabendal u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
1451bd33bf0SEsben Haabendal {
1461bd33bf0SEsben Haabendal 	/* This initial wait should normally not spin, as we always
1471bd33bf0SEsben Haabendal 	 * try to wait for indirect access to complete before
1481bd33bf0SEsben Haabendal 	 * releasing the indirect_lock.
1491bd33bf0SEsben Haabendal 	 */
1501bd33bf0SEsben Haabendal 	if (WARN_ON(temac_indirect_busywait(lp)))
1511bd33bf0SEsben Haabendal 		return -ETIMEDOUT;
1521bd33bf0SEsben Haabendal 	/* Initiate read from indirect register */
1531bd33bf0SEsben Haabendal 	temac_iow(lp, XTE_CTL0_OFFSET, reg);
1541bd33bf0SEsben Haabendal 	/* Wait for indirect register access to complete.  We really
1551bd33bf0SEsben Haabendal 	 * should not see timeouts, and could even end up causing
1561bd33bf0SEsben Haabendal 	 * problem for following indirect access, so let's make a bit
1571bd33bf0SEsben Haabendal 	 * of WARN noise.
1581bd33bf0SEsben Haabendal 	 */
1591bd33bf0SEsben Haabendal 	if (WARN_ON(temac_indirect_busywait(lp)))
1601bd33bf0SEsben Haabendal 		return -ETIMEDOUT;
1611bd33bf0SEsben Haabendal 	/* Value is ready now */
1621bd33bf0SEsben Haabendal 	return temac_ior(lp, XTE_LSW0_OFFSET);
1631bd33bf0SEsben Haabendal }
1641bd33bf0SEsben Haabendal 
16581929a4aSJesse Brandeburg /*
1661bd33bf0SEsben Haabendal  * temac_indirect_out32 - Indirect register write access.  This function
1671bd33bf0SEsben Haabendal  * must be called without lp->indirect_lock being held.
168b13ad8f4SJeff Kirsher  */
temac_indirect_out32(struct temac_local * lp,int reg,u32 value)169b13ad8f4SJeff Kirsher void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
170b13ad8f4SJeff Kirsher {
1711bd33bf0SEsben Haabendal 	unsigned long flags;
1721bd33bf0SEsben Haabendal 
1731bd33bf0SEsben Haabendal 	spin_lock_irqsave(lp->indirect_lock, flags);
1741bd33bf0SEsben Haabendal 	temac_indirect_out32_locked(lp, reg, value);
1751bd33bf0SEsben Haabendal 	spin_unlock_irqrestore(lp->indirect_lock, flags);
1761bd33bf0SEsben Haabendal }
1771bd33bf0SEsben Haabendal 
17881929a4aSJesse Brandeburg /*
1791bd33bf0SEsben Haabendal  * temac_indirect_out32_locked - Indirect register write access.  This
1801bd33bf0SEsben Haabendal  * function must be called with lp->indirect_lock being held.  Use
1811bd33bf0SEsben Haabendal  * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
1821bd33bf0SEsben Haabendal  * repeated lock/unlock and to ensure uninterrupted access to indirect
1831bd33bf0SEsben Haabendal  * registers.
1841bd33bf0SEsben Haabendal  */
temac_indirect_out32_locked(struct temac_local * lp,int reg,u32 value)1851bd33bf0SEsben Haabendal void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
1861bd33bf0SEsben Haabendal {
1871bd33bf0SEsben Haabendal 	/* As in temac_indirect_in32_locked(), we should normally not
1881bd33bf0SEsben Haabendal 	 * spin here.  And if it happens, we actually end up silently
1891bd33bf0SEsben Haabendal 	 * ignoring the write request.  Ouch.
1901bd33bf0SEsben Haabendal 	 */
1911bd33bf0SEsben Haabendal 	if (WARN_ON(temac_indirect_busywait(lp)))
192b13ad8f4SJeff Kirsher 		return;
1931bd33bf0SEsben Haabendal 	/* Initiate write to indirect register */
194b13ad8f4SJeff Kirsher 	temac_iow(lp, XTE_LSW0_OFFSET, value);
195b13ad8f4SJeff Kirsher 	temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
1961bd33bf0SEsben Haabendal 	/* As in temac_indirect_in32_locked(), we should not see timeouts
1971bd33bf0SEsben Haabendal 	 * here.  And if it happens, we continue before the write has
1981bd33bf0SEsben Haabendal 	 * completed.  Not good.
1991bd33bf0SEsben Haabendal 	 */
2001bd33bf0SEsben Haabendal 	WARN_ON(temac_indirect_busywait(lp));
201b13ad8f4SJeff Kirsher }
202b13ad8f4SJeff Kirsher 
20381929a4aSJesse Brandeburg /*
204a3246dc4SEsben Haabendal  * temac_dma_in32_* - Memory mapped DMA read, these function expects a
205a3246dc4SEsben Haabendal  * register input that is based on DCR word addresses which are then
206a3246dc4SEsben Haabendal  * converted to memory mapped byte addresses.  To be assigned to
207a3246dc4SEsben Haabendal  * lp->dma_in32.
208b13ad8f4SJeff Kirsher  */
temac_dma_in32_be(struct temac_local * lp,int reg)209a3246dc4SEsben Haabendal static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
210b13ad8f4SJeff Kirsher {
211a3246dc4SEsben Haabendal 	return ioread32be(lp->sdma_regs + (reg << 2));
212a3246dc4SEsben Haabendal }
213a3246dc4SEsben Haabendal 
temac_dma_in32_le(struct temac_local * lp,int reg)214a3246dc4SEsben Haabendal static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
215a3246dc4SEsben Haabendal {
216a3246dc4SEsben Haabendal 	return ioread32(lp->sdma_regs + (reg << 2));
217b13ad8f4SJeff Kirsher }
218b13ad8f4SJeff Kirsher 
21981929a4aSJesse Brandeburg /*
220a3246dc4SEsben Haabendal  * temac_dma_out32_* - Memory mapped DMA read, these function expects
221a3246dc4SEsben Haabendal  * a register input that is based on DCR word addresses which are then
222a3246dc4SEsben Haabendal  * converted to memory mapped byte addresses.  To be assigned to
223a3246dc4SEsben Haabendal  * lp->dma_out32.
224b13ad8f4SJeff Kirsher  */
temac_dma_out32_be(struct temac_local * lp,int reg,u32 value)225a3246dc4SEsben Haabendal static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
226b13ad8f4SJeff Kirsher {
227a3246dc4SEsben Haabendal 	iowrite32be(value, lp->sdma_regs + (reg << 2));
228a3246dc4SEsben Haabendal }
229a3246dc4SEsben Haabendal 
temac_dma_out32_le(struct temac_local * lp,int reg,u32 value)230a3246dc4SEsben Haabendal static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
231a3246dc4SEsben Haabendal {
232a3246dc4SEsben Haabendal 	iowrite32(value, lp->sdma_regs + (reg << 2));
233b13ad8f4SJeff Kirsher }
234b13ad8f4SJeff Kirsher 
235b13ad8f4SJeff Kirsher /* DMA register access functions can be DCR based or memory mapped.
236b13ad8f4SJeff Kirsher  * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
237b13ad8f4SJeff Kirsher  * memory mapped.
238b13ad8f4SJeff Kirsher  */
239b13ad8f4SJeff Kirsher #ifdef CONFIG_PPC_DCR
240b13ad8f4SJeff Kirsher 
24181929a4aSJesse Brandeburg /*
242b13ad8f4SJeff Kirsher  * temac_dma_dcr_in32 - DCR based DMA read
243b13ad8f4SJeff Kirsher  */
temac_dma_dcr_in(struct temac_local * lp,int reg)244b13ad8f4SJeff Kirsher static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
245b13ad8f4SJeff Kirsher {
246b13ad8f4SJeff Kirsher 	return dcr_read(lp->sdma_dcrs, reg);
247b13ad8f4SJeff Kirsher }
248b13ad8f4SJeff Kirsher 
24981929a4aSJesse Brandeburg /*
250b13ad8f4SJeff Kirsher  * temac_dma_dcr_out32 - DCR based DMA write
251b13ad8f4SJeff Kirsher  */
temac_dma_dcr_out(struct temac_local * lp,int reg,u32 value)252b13ad8f4SJeff Kirsher static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
253b13ad8f4SJeff Kirsher {
254b13ad8f4SJeff Kirsher 	dcr_write(lp->sdma_dcrs, reg, value);
255b13ad8f4SJeff Kirsher }
256b13ad8f4SJeff Kirsher 
25781929a4aSJesse Brandeburg /*
258b13ad8f4SJeff Kirsher  * temac_dcr_setup - If the DMA is DCR based, then setup the address and
259b13ad8f4SJeff Kirsher  * I/O  functions
260b13ad8f4SJeff Kirsher  */
temac_dcr_setup(struct temac_local * lp,struct platform_device * op,struct device_node * np)261b13ad8f4SJeff Kirsher static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
262b13ad8f4SJeff Kirsher 			   struct device_node *np)
263b13ad8f4SJeff Kirsher {
264b13ad8f4SJeff Kirsher 	unsigned int dcrs;
265b13ad8f4SJeff Kirsher 
266b13ad8f4SJeff Kirsher 	/* setup the dcr address mapping if it's in the device tree */
267b13ad8f4SJeff Kirsher 
268b13ad8f4SJeff Kirsher 	dcrs = dcr_resource_start(np, 0);
269b13ad8f4SJeff Kirsher 	if (dcrs != 0) {
270b13ad8f4SJeff Kirsher 		lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
271b13ad8f4SJeff Kirsher 		lp->dma_in = temac_dma_dcr_in;
272b13ad8f4SJeff Kirsher 		lp->dma_out = temac_dma_dcr_out;
273b13ad8f4SJeff Kirsher 		dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
274b13ad8f4SJeff Kirsher 		return 0;
275b13ad8f4SJeff Kirsher 	}
276b13ad8f4SJeff Kirsher 	/* no DCR in the device tree, indicate a failure */
277b13ad8f4SJeff Kirsher 	return -1;
278b13ad8f4SJeff Kirsher }
279b13ad8f4SJeff Kirsher 
280b13ad8f4SJeff Kirsher #else
281b13ad8f4SJeff Kirsher 
282b13ad8f4SJeff Kirsher /*
283b13ad8f4SJeff Kirsher  * temac_dcr_setup - This is a stub for when DCR is not supported,
2848425c41dSEsben Haabendal  * such as with MicroBlaze and x86
285b13ad8f4SJeff Kirsher  */
temac_dcr_setup(struct temac_local * lp,struct platform_device * op,struct device_node * np)286b13ad8f4SJeff Kirsher static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
287b13ad8f4SJeff Kirsher 			   struct device_node *np)
288b13ad8f4SJeff Kirsher {
289b13ad8f4SJeff Kirsher 	return -1;
290b13ad8f4SJeff Kirsher }
291b13ad8f4SJeff Kirsher 
292b13ad8f4SJeff Kirsher #endif
293b13ad8f4SJeff Kirsher 
29481929a4aSJesse Brandeburg /*
29549ce9c2cSBen Hutchings  * temac_dma_bd_release - Release buffer descriptor rings
296b13ad8f4SJeff Kirsher  */
temac_dma_bd_release(struct net_device * ndev)297b13ad8f4SJeff Kirsher static void temac_dma_bd_release(struct net_device *ndev)
298b13ad8f4SJeff Kirsher {
299b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
300b13ad8f4SJeff Kirsher 	int i;
301b13ad8f4SJeff Kirsher 
30250ec1538SRicardo Ribalda 	/* Reset Local Link (DMA) */
30350ec1538SRicardo Ribalda 	lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
30450ec1538SRicardo Ribalda 
305f7b261bfSEsben Haabendal 	for (i = 0; i < lp->rx_bd_num; i++) {
306b13ad8f4SJeff Kirsher 		if (!lp->rx_skb[i])
307b13ad8f4SJeff Kirsher 			break;
308b13ad8f4SJeff Kirsher 		dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
309b13ad8f4SJeff Kirsher 				 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
310b13ad8f4SJeff Kirsher 		dev_kfree_skb(lp->rx_skb[i]);
311b13ad8f4SJeff Kirsher 	}
312b13ad8f4SJeff Kirsher 	if (lp->rx_bd_v)
313b13ad8f4SJeff Kirsher 		dma_free_coherent(ndev->dev.parent,
314f7b261bfSEsben Haabendal 				  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
315b13ad8f4SJeff Kirsher 				  lp->rx_bd_v, lp->rx_bd_p);
316b13ad8f4SJeff Kirsher 	if (lp->tx_bd_v)
317b13ad8f4SJeff Kirsher 		dma_free_coherent(ndev->dev.parent,
318f7b261bfSEsben Haabendal 				  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
319b13ad8f4SJeff Kirsher 				  lp->tx_bd_v, lp->tx_bd_p);
320b13ad8f4SJeff Kirsher }
321b13ad8f4SJeff Kirsher 
32281929a4aSJesse Brandeburg /*
323b13ad8f4SJeff Kirsher  * temac_dma_bd_init - Setup buffer descriptor rings
324b13ad8f4SJeff Kirsher  */
temac_dma_bd_init(struct net_device * ndev)325b13ad8f4SJeff Kirsher static int temac_dma_bd_init(struct net_device *ndev)
326b13ad8f4SJeff Kirsher {
327b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
328b13ad8f4SJeff Kirsher 	struct sk_buff *skb;
329fdd7454eSEsben Haabendal 	dma_addr_t skb_dma_addr;
330b13ad8f4SJeff Kirsher 	int i;
331b13ad8f4SJeff Kirsher 
332f7b261bfSEsben Haabendal 	lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
333f7b261bfSEsben Haabendal 				  sizeof(*lp->rx_skb), GFP_KERNEL);
334b2adaca9SJoe Perches 	if (!lp->rx_skb)
335b13ad8f4SJeff Kirsher 		goto out;
336b2adaca9SJoe Perches 
337b13ad8f4SJeff Kirsher 	/* allocate the tx and rx ring buffer descriptors. */
338b13ad8f4SJeff Kirsher 	/* returns a virtual address and a physical address. */
339750afb08SLuis Chamberlain 	lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
340f7b261bfSEsben Haabendal 					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
341ede23fa8SJoe Perches 					 &lp->tx_bd_p, GFP_KERNEL);
342d0320f75SJoe Perches 	if (!lp->tx_bd_v)
343b13ad8f4SJeff Kirsher 		goto out;
344d0320f75SJoe Perches 
345750afb08SLuis Chamberlain 	lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
346f7b261bfSEsben Haabendal 					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
347ede23fa8SJoe Perches 					 &lp->rx_bd_p, GFP_KERNEL);
348d0320f75SJoe Perches 	if (!lp->rx_bd_v)
349b13ad8f4SJeff Kirsher 		goto out;
350b13ad8f4SJeff Kirsher 
351f7b261bfSEsben Haabendal 	for (i = 0; i < lp->tx_bd_num; i++) {
352fdd7454eSEsben Haabendal 		lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
353f7b261bfSEsben Haabendal 			+ sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
354b13ad8f4SJeff Kirsher 	}
355b13ad8f4SJeff Kirsher 
356f7b261bfSEsben Haabendal 	for (i = 0; i < lp->rx_bd_num; i++) {
357fdd7454eSEsben Haabendal 		lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
358f7b261bfSEsben Haabendal 			+ sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
359b13ad8f4SJeff Kirsher 
36060f8ad23SChristophe JAILLET 		skb = __netdev_alloc_skb_ip_align(ndev,
36160f8ad23SChristophe JAILLET 						  XTE_MAX_JUMBO_FRAME_SIZE,
36260f8ad23SChristophe JAILLET 						  GFP_KERNEL);
363720a43efSJoe Perches 		if (!skb)
364b13ad8f4SJeff Kirsher 			goto out;
365720a43efSJoe Perches 
366b13ad8f4SJeff Kirsher 		lp->rx_skb[i] = skb;
367b13ad8f4SJeff Kirsher 		/* returns physical address of skb->data */
368fdd7454eSEsben Haabendal 		skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
369b13ad8f4SJeff Kirsher 					      XTE_MAX_JUMBO_FRAME_SIZE,
370b13ad8f4SJeff Kirsher 					      DMA_FROM_DEVICE);
371d07c849cSEsben Haabendal 		if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
372d07c849cSEsben Haabendal 			goto out;
373fdd7454eSEsben Haabendal 		lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
374fdd7454eSEsben Haabendal 		lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
375fdd7454eSEsben Haabendal 		lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
376b13ad8f4SJeff Kirsher 	}
377b13ad8f4SJeff Kirsher 
3787e97a194SEsben Haabendal 	/* Configure DMA channel (irq setup) */
379227d4617SEsben Haabendal 	lp->dma_out(lp, TX_CHNL_CTRL,
380227d4617SEsben Haabendal 		    lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
3817e97a194SEsben Haabendal 		    0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
3827e97a194SEsben Haabendal 		    CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
3837e97a194SEsben Haabendal 		    CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
384227d4617SEsben Haabendal 	lp->dma_out(lp, RX_CHNL_CTRL,
385227d4617SEsben Haabendal 		    lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
3867e97a194SEsben Haabendal 		    CHNL_CTRL_IRQ_IOE |
3877e97a194SEsben Haabendal 		    CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
3887e97a194SEsben Haabendal 		    CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
389b13ad8f4SJeff Kirsher 
3907167cf0eSRicardo Ribalda 	/* Init descriptor indexes */
3917167cf0eSRicardo Ribalda 	lp->tx_bd_ci = 0;
3927167cf0eSRicardo Ribalda 	lp->tx_bd_tail = 0;
3937167cf0eSRicardo Ribalda 	lp->rx_bd_ci = 0;
394f7b261bfSEsben Haabendal 	lp->rx_bd_tail = lp->rx_bd_num - 1;
3957167cf0eSRicardo Ribalda 
39673f7375dSEsben Haabendal 	/* Enable RX DMA transfers */
39773f7375dSEsben Haabendal 	wmb();
39873f7375dSEsben Haabendal 	lp->dma_out(lp, RX_CURDESC_PTR,  lp->rx_bd_p);
39973f7375dSEsben Haabendal 	lp->dma_out(lp, RX_TAILDESC_PTR,
400770d9c67SEsben Haabendal 		       lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
40173f7375dSEsben Haabendal 
40273f7375dSEsben Haabendal 	/* Prepare for TX DMA transfer */
40373f7375dSEsben Haabendal 	lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
40473f7375dSEsben Haabendal 
405b13ad8f4SJeff Kirsher 	return 0;
406b13ad8f4SJeff Kirsher 
407b13ad8f4SJeff Kirsher out:
408b13ad8f4SJeff Kirsher 	temac_dma_bd_release(ndev);
409b13ad8f4SJeff Kirsher 	return -ENOMEM;
410b13ad8f4SJeff Kirsher }
411b13ad8f4SJeff Kirsher 
412b13ad8f4SJeff Kirsher /* ---------------------------------------------------------------------
413b13ad8f4SJeff Kirsher  * net_device_ops
414b13ad8f4SJeff Kirsher  */
415b13ad8f4SJeff Kirsher 
temac_do_set_mac_address(struct net_device * ndev)41604e406dcSJiri Pirko static void temac_do_set_mac_address(struct net_device *ndev)
417b13ad8f4SJeff Kirsher {
418b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
4191bd33bf0SEsben Haabendal 	unsigned long flags;
420b13ad8f4SJeff Kirsher 
421b13ad8f4SJeff Kirsher 	/* set up unicast MAC address filter set its mac address */
4221bd33bf0SEsben Haabendal 	spin_lock_irqsave(lp->indirect_lock, flags);
4231bd33bf0SEsben Haabendal 	temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
424b13ad8f4SJeff Kirsher 				    (ndev->dev_addr[0]) |
425b13ad8f4SJeff Kirsher 				    (ndev->dev_addr[1] << 8) |
426b13ad8f4SJeff Kirsher 				    (ndev->dev_addr[2] << 16) |
427b13ad8f4SJeff Kirsher 				    (ndev->dev_addr[3] << 24));
428b13ad8f4SJeff Kirsher 	/* There are reserved bits in EUAW1
42975124116Shuangjunxian 	 * so don't affect them Set MAC bits [47:32] in EUAW1
43075124116Shuangjunxian 	 */
4311bd33bf0SEsben Haabendal 	temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
432b13ad8f4SJeff Kirsher 				    (ndev->dev_addr[4] & 0x000000ff) |
433b13ad8f4SJeff Kirsher 				    (ndev->dev_addr[5] << 8));
4341bd33bf0SEsben Haabendal 	spin_unlock_irqrestore(lp->indirect_lock, flags);
43504e406dcSJiri Pirko }
436b13ad8f4SJeff Kirsher 
temac_init_mac_address(struct net_device * ndev,const void * address)43706205472STobias Klauser static int temac_init_mac_address(struct net_device *ndev, const void *address)
43804e406dcSJiri Pirko {
439a96d317fSJakub Kicinski 	eth_hw_addr_set(ndev, address);
44004e406dcSJiri Pirko 	if (!is_valid_ether_addr(ndev->dev_addr))
44104e406dcSJiri Pirko 		eth_hw_addr_random(ndev);
44204e406dcSJiri Pirko 	temac_do_set_mac_address(ndev);
443b13ad8f4SJeff Kirsher 	return 0;
444b13ad8f4SJeff Kirsher }
445b13ad8f4SJeff Kirsher 
temac_set_mac_address(struct net_device * ndev,void * p)44604e406dcSJiri Pirko static int temac_set_mac_address(struct net_device *ndev, void *p)
447b13ad8f4SJeff Kirsher {
448b13ad8f4SJeff Kirsher 	struct sockaddr *addr = p;
449b13ad8f4SJeff Kirsher 
45004e406dcSJiri Pirko 	if (!is_valid_ether_addr(addr->sa_data))
45104e406dcSJiri Pirko 		return -EADDRNOTAVAIL;
452a96d317fSJakub Kicinski 	eth_hw_addr_set(ndev, addr->sa_data);
45304e406dcSJiri Pirko 	temac_do_set_mac_address(ndev);
45404e406dcSJiri Pirko 	return 0;
455b13ad8f4SJeff Kirsher }
456b13ad8f4SJeff Kirsher 
temac_set_multicast_list(struct net_device * ndev)457b13ad8f4SJeff Kirsher static void temac_set_multicast_list(struct net_device *ndev)
458b13ad8f4SJeff Kirsher {
459b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
4601bd33bf0SEsben Haabendal 	u32 multi_addr_msw, multi_addr_lsw;
461dfb569f2SEsben Haabendal 	int i = 0;
4621bd33bf0SEsben Haabendal 	unsigned long flags;
4631bd33bf0SEsben Haabendal 	bool promisc_mode_disabled = false;
464b13ad8f4SJeff Kirsher 
4651bd33bf0SEsben Haabendal 	if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
4661bd33bf0SEsben Haabendal 	    (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
467b13ad8f4SJeff Kirsher 		temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
468b13ad8f4SJeff Kirsher 		dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
4691bd33bf0SEsben Haabendal 		return;
4701bd33bf0SEsben Haabendal 	}
4711bd33bf0SEsben Haabendal 
4721bd33bf0SEsben Haabendal 	spin_lock_irqsave(lp->indirect_lock, flags);
4731bd33bf0SEsben Haabendal 
4741bd33bf0SEsben Haabendal 	if (!netdev_mc_empty(ndev)) {
475b13ad8f4SJeff Kirsher 		struct netdev_hw_addr *ha;
476b13ad8f4SJeff Kirsher 
477b13ad8f4SJeff Kirsher 		netdev_for_each_mc_addr(ha, ndev) {
4781bd33bf0SEsben Haabendal 			if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
479b13ad8f4SJeff Kirsher 				break;
480b13ad8f4SJeff Kirsher 			multi_addr_msw = ((ha->addr[3] << 24) |
481b13ad8f4SJeff Kirsher 					  (ha->addr[2] << 16) |
482b13ad8f4SJeff Kirsher 					  (ha->addr[1] << 8) |
483b13ad8f4SJeff Kirsher 					  (ha->addr[0]));
4841bd33bf0SEsben Haabendal 			temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
485b13ad8f4SJeff Kirsher 						    multi_addr_msw);
486b13ad8f4SJeff Kirsher 			multi_addr_lsw = ((ha->addr[5] << 8) |
487b13ad8f4SJeff Kirsher 					  (ha->addr[4]) | (i << 16));
4881bd33bf0SEsben Haabendal 			temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
489b13ad8f4SJeff Kirsher 						    multi_addr_lsw);
490b13ad8f4SJeff Kirsher 			i++;
491b13ad8f4SJeff Kirsher 		}
4921b3fa5cfSEsben Haabendal 	}
4931b3fa5cfSEsben Haabendal 
4941b3fa5cfSEsben Haabendal 	/* Clear all or remaining/unused address table entries */
4951b3fa5cfSEsben Haabendal 	while (i < MULTICAST_CAM_TABLE_NUM) {
4961bd33bf0SEsben Haabendal 		temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
4971bd33bf0SEsben Haabendal 		temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
4981b3fa5cfSEsben Haabendal 		i++;
4991bd33bf0SEsben Haabendal 	}
5001bd33bf0SEsben Haabendal 
5011bd33bf0SEsben Haabendal 	/* Enable address filter block if currently disabled */
5021bd33bf0SEsben Haabendal 	if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
5031bd33bf0SEsben Haabendal 	    & XTE_AFM_EPPRM_MASK) {
5041bd33bf0SEsben Haabendal 		temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
5051bd33bf0SEsben Haabendal 		promisc_mode_disabled = true;
5061bd33bf0SEsben Haabendal 	}
5071bd33bf0SEsben Haabendal 
5081bd33bf0SEsben Haabendal 	spin_unlock_irqrestore(lp->indirect_lock, flags);
5091bd33bf0SEsben Haabendal 
5101bd33bf0SEsben Haabendal 	if (promisc_mode_disabled)
5111bd33bf0SEsben Haabendal 		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
512b13ad8f4SJeff Kirsher }
513b13ad8f4SJeff Kirsher 
51484ea0dedSMichal Simek static struct temac_option {
515b13ad8f4SJeff Kirsher 	int flg;
516b13ad8f4SJeff Kirsher 	u32 opt;
517b13ad8f4SJeff Kirsher 	u32 reg;
518b13ad8f4SJeff Kirsher 	u32 m_or;
519b13ad8f4SJeff Kirsher 	u32 m_and;
520b13ad8f4SJeff Kirsher } temac_options[] = {
521b13ad8f4SJeff Kirsher 	/* Turn on jumbo packet support for both Rx and Tx */
522b13ad8f4SJeff Kirsher 	{
523b13ad8f4SJeff Kirsher 		.opt = XTE_OPTION_JUMBO,
524b13ad8f4SJeff Kirsher 		.reg = XTE_TXC_OFFSET,
525b13ad8f4SJeff Kirsher 		.m_or = XTE_TXC_TXJMBO_MASK,
526b13ad8f4SJeff Kirsher 	},
527b13ad8f4SJeff Kirsher 	{
528b13ad8f4SJeff Kirsher 		.opt = XTE_OPTION_JUMBO,
529b13ad8f4SJeff Kirsher 		.reg = XTE_RXC1_OFFSET,
530b13ad8f4SJeff Kirsher 		.m_or = XTE_RXC1_RXJMBO_MASK,
531b13ad8f4SJeff Kirsher 	},
532b13ad8f4SJeff Kirsher 	/* Turn on VLAN packet support for both Rx and Tx */
533b13ad8f4SJeff Kirsher 	{
534b13ad8f4SJeff Kirsher 		.opt = XTE_OPTION_VLAN,
535b13ad8f4SJeff Kirsher 		.reg = XTE_TXC_OFFSET,
536b13ad8f4SJeff Kirsher 		.m_or = XTE_TXC_TXVLAN_MASK,
537b13ad8f4SJeff Kirsher 	},
538b13ad8f4SJeff Kirsher 	{
539b13ad8f4SJeff Kirsher 		.opt = XTE_OPTION_VLAN,
540b13ad8f4SJeff Kirsher 		.reg = XTE_RXC1_OFFSET,
541b13ad8f4SJeff Kirsher 		.m_or = XTE_RXC1_RXVLAN_MASK,
542b13ad8f4SJeff Kirsher 	},
543b13ad8f4SJeff Kirsher 	/* Turn on FCS stripping on receive packets */
544b13ad8f4SJeff Kirsher 	{
545b13ad8f4SJeff Kirsher 		.opt = XTE_OPTION_FCS_STRIP,
546b13ad8f4SJeff Kirsher 		.reg = XTE_RXC1_OFFSET,
547b13ad8f4SJeff Kirsher 		.m_or = XTE_RXC1_RXFCS_MASK,
548b13ad8f4SJeff Kirsher 	},
549b13ad8f4SJeff Kirsher 	/* Turn on FCS insertion on transmit packets */
550b13ad8f4SJeff Kirsher 	{
551b13ad8f4SJeff Kirsher 		.opt = XTE_OPTION_FCS_INSERT,
552b13ad8f4SJeff Kirsher 		.reg = XTE_TXC_OFFSET,
553b13ad8f4SJeff Kirsher 		.m_or = XTE_TXC_TXFCS_MASK,
554b13ad8f4SJeff Kirsher 	},
555b13ad8f4SJeff Kirsher 	/* Turn on length/type field checking on receive packets */
556b13ad8f4SJeff Kirsher 	{
557b13ad8f4SJeff Kirsher 		.opt = XTE_OPTION_LENTYPE_ERR,
558b13ad8f4SJeff Kirsher 		.reg = XTE_RXC1_OFFSET,
559b13ad8f4SJeff Kirsher 		.m_or = XTE_RXC1_RXLT_MASK,
560b13ad8f4SJeff Kirsher 	},
561b13ad8f4SJeff Kirsher 	/* Turn on flow control */
562b13ad8f4SJeff Kirsher 	{
563b13ad8f4SJeff Kirsher 		.opt = XTE_OPTION_FLOW_CONTROL,
564b13ad8f4SJeff Kirsher 		.reg = XTE_FCC_OFFSET,
565b13ad8f4SJeff Kirsher 		.m_or = XTE_FCC_RXFLO_MASK,
566b13ad8f4SJeff Kirsher 	},
567b13ad8f4SJeff Kirsher 	/* Turn on flow control */
568b13ad8f4SJeff Kirsher 	{
569b13ad8f4SJeff Kirsher 		.opt = XTE_OPTION_FLOW_CONTROL,
570b13ad8f4SJeff Kirsher 		.reg = XTE_FCC_OFFSET,
571b13ad8f4SJeff Kirsher 		.m_or = XTE_FCC_TXFLO_MASK,
572b13ad8f4SJeff Kirsher 	},
573b13ad8f4SJeff Kirsher 	/* Turn on promiscuous frame filtering (all frames are received ) */
574b13ad8f4SJeff Kirsher 	{
575b13ad8f4SJeff Kirsher 		.opt = XTE_OPTION_PROMISC,
576b13ad8f4SJeff Kirsher 		.reg = XTE_AFM_OFFSET,
577b13ad8f4SJeff Kirsher 		.m_or = XTE_AFM_EPPRM_MASK,
578b13ad8f4SJeff Kirsher 	},
579b13ad8f4SJeff Kirsher 	/* Enable transmitter if not already enabled */
580b13ad8f4SJeff Kirsher 	{
581b13ad8f4SJeff Kirsher 		.opt = XTE_OPTION_TXEN,
582b13ad8f4SJeff Kirsher 		.reg = XTE_TXC_OFFSET,
583b13ad8f4SJeff Kirsher 		.m_or = XTE_TXC_TXEN_MASK,
584b13ad8f4SJeff Kirsher 	},
585b13ad8f4SJeff Kirsher 	/* Enable receiver? */
586b13ad8f4SJeff Kirsher 	{
587b13ad8f4SJeff Kirsher 		.opt = XTE_OPTION_RXEN,
588b13ad8f4SJeff Kirsher 		.reg = XTE_RXC1_OFFSET,
589b13ad8f4SJeff Kirsher 		.m_or = XTE_RXC1_RXEN_MASK,
590b13ad8f4SJeff Kirsher 	},
591b13ad8f4SJeff Kirsher 	{}
592b13ad8f4SJeff Kirsher };
593b13ad8f4SJeff Kirsher 
59481929a4aSJesse Brandeburg /*
595b13ad8f4SJeff Kirsher  * temac_setoptions
596b13ad8f4SJeff Kirsher  */
temac_setoptions(struct net_device * ndev,u32 options)597b13ad8f4SJeff Kirsher static u32 temac_setoptions(struct net_device *ndev, u32 options)
598b13ad8f4SJeff Kirsher {
599b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
600b13ad8f4SJeff Kirsher 	struct temac_option *tp = &temac_options[0];
601b13ad8f4SJeff Kirsher 	int reg;
6021bd33bf0SEsben Haabendal 	unsigned long flags;
603b13ad8f4SJeff Kirsher 
6041bd33bf0SEsben Haabendal 	spin_lock_irqsave(lp->indirect_lock, flags);
605b13ad8f4SJeff Kirsher 	while (tp->opt) {
6061bd33bf0SEsben Haabendal 		reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
6071bd33bf0SEsben Haabendal 		if (options & tp->opt) {
608b13ad8f4SJeff Kirsher 			reg |= tp->m_or;
6091bd33bf0SEsben Haabendal 			temac_indirect_out32_locked(lp, tp->reg, reg);
6101bd33bf0SEsben Haabendal 		}
611b13ad8f4SJeff Kirsher 		tp++;
612b13ad8f4SJeff Kirsher 	}
6131bd33bf0SEsben Haabendal 	spin_unlock_irqrestore(lp->indirect_lock, flags);
614b13ad8f4SJeff Kirsher 	lp->options |= options;
615b13ad8f4SJeff Kirsher 
616b13ad8f4SJeff Kirsher 	return 0;
617b13ad8f4SJeff Kirsher }
618b13ad8f4SJeff Kirsher 
619b13ad8f4SJeff Kirsher /* Initialize temac */
temac_device_reset(struct net_device * ndev)620b13ad8f4SJeff Kirsher static void temac_device_reset(struct net_device *ndev)
621b13ad8f4SJeff Kirsher {
622b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
623b13ad8f4SJeff Kirsher 	u32 timeout;
624b13ad8f4SJeff Kirsher 	u32 val;
6251bd33bf0SEsben Haabendal 	unsigned long flags;
626b13ad8f4SJeff Kirsher 
627b13ad8f4SJeff Kirsher 	/* Perform a software reset */
628b13ad8f4SJeff Kirsher 
629b13ad8f4SJeff Kirsher 	/* 0x300 host enable bit ? */
630b13ad8f4SJeff Kirsher 	/* reset PHY through control register ?:1 */
631b13ad8f4SJeff Kirsher 
632b13ad8f4SJeff Kirsher 	dev_dbg(&ndev->dev, "%s()\n", __func__);
633b13ad8f4SJeff Kirsher 
634b13ad8f4SJeff Kirsher 	/* Reset the receiver and wait for it to finish reset */
635b13ad8f4SJeff Kirsher 	temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
636b13ad8f4SJeff Kirsher 	timeout = 1000;
637b13ad8f4SJeff Kirsher 	while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
638b13ad8f4SJeff Kirsher 		udelay(1);
639b13ad8f4SJeff Kirsher 		if (--timeout == 0) {
640b13ad8f4SJeff Kirsher 			dev_err(&ndev->dev,
641653de988SHaoyue Xu 				"%s RX reset timeout!!\n", __func__);
642b13ad8f4SJeff Kirsher 			break;
643b13ad8f4SJeff Kirsher 		}
644b13ad8f4SJeff Kirsher 	}
645b13ad8f4SJeff Kirsher 
646b13ad8f4SJeff Kirsher 	/* Reset the transmitter and wait for it to finish reset */
647b13ad8f4SJeff Kirsher 	temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
648b13ad8f4SJeff Kirsher 	timeout = 1000;
649b13ad8f4SJeff Kirsher 	while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
650b13ad8f4SJeff Kirsher 		udelay(1);
651b13ad8f4SJeff Kirsher 		if (--timeout == 0) {
652b13ad8f4SJeff Kirsher 			dev_err(&ndev->dev,
653653de988SHaoyue Xu 				"%s TX reset timeout!!\n", __func__);
654b13ad8f4SJeff Kirsher 			break;
655b13ad8f4SJeff Kirsher 		}
656b13ad8f4SJeff Kirsher 	}
657b13ad8f4SJeff Kirsher 
658b13ad8f4SJeff Kirsher 	/* Disable the receiver */
6591bd33bf0SEsben Haabendal 	spin_lock_irqsave(lp->indirect_lock, flags);
6601bd33bf0SEsben Haabendal 	val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
6611bd33bf0SEsben Haabendal 	temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
6621bd33bf0SEsben Haabendal 				    val & ~XTE_RXC1_RXEN_MASK);
6631bd33bf0SEsben Haabendal 	spin_unlock_irqrestore(lp->indirect_lock, flags);
664b13ad8f4SJeff Kirsher 
665b13ad8f4SJeff Kirsher 	/* Reset Local Link (DMA) */
666b13ad8f4SJeff Kirsher 	lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
667b13ad8f4SJeff Kirsher 	timeout = 1000;
668b13ad8f4SJeff Kirsher 	while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
669b13ad8f4SJeff Kirsher 		udelay(1);
670b13ad8f4SJeff Kirsher 		if (--timeout == 0) {
671b13ad8f4SJeff Kirsher 			dev_err(&ndev->dev,
672653de988SHaoyue Xu 				"%s DMA reset timeout!!\n", __func__);
673b13ad8f4SJeff Kirsher 			break;
674b13ad8f4SJeff Kirsher 		}
675b13ad8f4SJeff Kirsher 	}
676b13ad8f4SJeff Kirsher 	lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
677b13ad8f4SJeff Kirsher 
678b13ad8f4SJeff Kirsher 	if (temac_dma_bd_init(ndev)) {
679b13ad8f4SJeff Kirsher 		dev_err(&ndev->dev,
680653de988SHaoyue Xu 			"%s descriptor allocation failed\n", __func__);
681b13ad8f4SJeff Kirsher 	}
682b13ad8f4SJeff Kirsher 
6831bd33bf0SEsben Haabendal 	spin_lock_irqsave(lp->indirect_lock, flags);
6841bd33bf0SEsben Haabendal 	temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
6851bd33bf0SEsben Haabendal 	temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
6861bd33bf0SEsben Haabendal 	temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
6871bd33bf0SEsben Haabendal 	temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
6881bd33bf0SEsben Haabendal 	spin_unlock_irqrestore(lp->indirect_lock, flags);
689b13ad8f4SJeff Kirsher 
690b13ad8f4SJeff Kirsher 	/* Sync default options with HW
69175124116Shuangjunxian 	 * but leave receiver and transmitter disabled.
69275124116Shuangjunxian 	 */
693b13ad8f4SJeff Kirsher 	temac_setoptions(ndev,
694b13ad8f4SJeff Kirsher 			 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
695b13ad8f4SJeff Kirsher 
69604e406dcSJiri Pirko 	temac_do_set_mac_address(ndev);
697b13ad8f4SJeff Kirsher 
698b13ad8f4SJeff Kirsher 	/* Set address filter table */
699b13ad8f4SJeff Kirsher 	temac_set_multicast_list(ndev);
700b13ad8f4SJeff Kirsher 	if (temac_setoptions(ndev, lp->options))
701b13ad8f4SJeff Kirsher 		dev_err(&ndev->dev, "Error setting TEMAC options\n");
702b13ad8f4SJeff Kirsher 
703b13ad8f4SJeff Kirsher 	/* Init Driver variable */
704860e9538SFlorian Westphal 	netif_trans_update(ndev); /* prevent tx timeout */
705b13ad8f4SJeff Kirsher }
706b13ad8f4SJeff Kirsher 
temac_adjust_link(struct net_device * ndev)70784ea0dedSMichal Simek static void temac_adjust_link(struct net_device *ndev)
708b13ad8f4SJeff Kirsher {
709b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
71031abbe34SPhilippe Reynes 	struct phy_device *phy = ndev->phydev;
711b13ad8f4SJeff Kirsher 	u32 mii_speed;
712b13ad8f4SJeff Kirsher 	int link_state;
7131bd33bf0SEsben Haabendal 	unsigned long flags;
714b13ad8f4SJeff Kirsher 
715b13ad8f4SJeff Kirsher 	/* hash together the state values to decide if something has changed */
716b13ad8f4SJeff Kirsher 	link_state = phy->speed | (phy->duplex << 1) | phy->link;
717b13ad8f4SJeff Kirsher 
718b13ad8f4SJeff Kirsher 	if (lp->last_link != link_state) {
7191bd33bf0SEsben Haabendal 		spin_lock_irqsave(lp->indirect_lock, flags);
7201bd33bf0SEsben Haabendal 		mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
721b13ad8f4SJeff Kirsher 		mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
722b13ad8f4SJeff Kirsher 
723b13ad8f4SJeff Kirsher 		switch (phy->speed) {
724a0a85097Shuangjunxian 		case SPEED_1000:
725a0a85097Shuangjunxian 			mii_speed |= XTE_EMCFG_LINKSPD_1000;
726a0a85097Shuangjunxian 			break;
727a0a85097Shuangjunxian 		case SPEED_100:
728a0a85097Shuangjunxian 			mii_speed |= XTE_EMCFG_LINKSPD_100;
729a0a85097Shuangjunxian 			break;
730a0a85097Shuangjunxian 		case SPEED_10:
731a0a85097Shuangjunxian 			mii_speed |= XTE_EMCFG_LINKSPD_10;
732a0a85097Shuangjunxian 			break;
733b13ad8f4SJeff Kirsher 		}
734b13ad8f4SJeff Kirsher 
735b13ad8f4SJeff Kirsher 		/* Write new speed setting out to TEMAC */
7361bd33bf0SEsben Haabendal 		temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
7371bd33bf0SEsben Haabendal 		spin_unlock_irqrestore(lp->indirect_lock, flags);
7381bd33bf0SEsben Haabendal 
739b13ad8f4SJeff Kirsher 		lp->last_link = link_state;
740b13ad8f4SJeff Kirsher 		phy_print_status(phy);
741b13ad8f4SJeff Kirsher 	}
742b13ad8f4SJeff Kirsher }
743b13ad8f4SJeff Kirsher 
744d84aec42SEsben Haabendal #ifdef CONFIG_64BIT
745d84aec42SEsben Haabendal 
ptr_to_txbd(void * p,struct cdmac_bd * bd)7466e05b833SYueHaibing static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
747d84aec42SEsben Haabendal {
748d84aec42SEsben Haabendal 	bd->app3 = (u32)(((u64)p) >> 32);
749d84aec42SEsben Haabendal 	bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
750d84aec42SEsben Haabendal }
751d84aec42SEsben Haabendal 
ptr_from_txbd(struct cdmac_bd * bd)7526e05b833SYueHaibing static void *ptr_from_txbd(struct cdmac_bd *bd)
753d84aec42SEsben Haabendal {
754d84aec42SEsben Haabendal 	return (void *)(((u64)(bd->app3) << 32) | bd->app4);
755d84aec42SEsben Haabendal }
756d84aec42SEsben Haabendal 
757d84aec42SEsben Haabendal #else
758d84aec42SEsben Haabendal 
ptr_to_txbd(void * p,struct cdmac_bd * bd)7596e05b833SYueHaibing static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
760d84aec42SEsben Haabendal {
761d84aec42SEsben Haabendal 	bd->app4 = (u32)p;
762d84aec42SEsben Haabendal }
763d84aec42SEsben Haabendal 
ptr_from_txbd(struct cdmac_bd * bd)7646e05b833SYueHaibing static void *ptr_from_txbd(struct cdmac_bd *bd)
765d84aec42SEsben Haabendal {
766d84aec42SEsben Haabendal 	return (void *)(bd->app4);
767d84aec42SEsben Haabendal }
768d84aec42SEsben Haabendal 
769d84aec42SEsben Haabendal #endif
770d84aec42SEsben Haabendal 
temac_start_xmit_done(struct net_device * ndev)771b13ad8f4SJeff Kirsher static void temac_start_xmit_done(struct net_device *ndev)
772b13ad8f4SJeff Kirsher {
773b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
774b13ad8f4SJeff Kirsher 	struct cdmac_bd *cur_p;
775b13ad8f4SJeff Kirsher 	unsigned int stat = 0;
776d84aec42SEsben Haabendal 	struct sk_buff *skb;
777b13ad8f4SJeff Kirsher 
778b13ad8f4SJeff Kirsher 	cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
779fdd7454eSEsben Haabendal 	stat = be32_to_cpu(cur_p->app0);
780b13ad8f4SJeff Kirsher 
781b13ad8f4SJeff Kirsher 	while (stat & STS_CTRL_APP0_CMPLT) {
78228d9fab4SEsben Haabendal 		/* Make sure that the other fields are read after bd is
78328d9fab4SEsben Haabendal 		 * released by dma
78428d9fab4SEsben Haabendal 		 */
78528d9fab4SEsben Haabendal 		rmb();
786fdd7454eSEsben Haabendal 		dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
787fdd7454eSEsben Haabendal 				 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
788d84aec42SEsben Haabendal 		skb = (struct sk_buff *)ptr_from_txbd(cur_p);
789d84aec42SEsben Haabendal 		if (skb)
790d84aec42SEsben Haabendal 			dev_consume_skb_irq(skb);
791b13ad8f4SJeff Kirsher 		cur_p->app1 = 0;
792b13ad8f4SJeff Kirsher 		cur_p->app2 = 0;
793b13ad8f4SJeff Kirsher 		cur_p->app3 = 0;
794b13ad8f4SJeff Kirsher 		cur_p->app4 = 0;
795b13ad8f4SJeff Kirsher 
796b13ad8f4SJeff Kirsher 		ndev->stats.tx_packets++;
797fdd7454eSEsben Haabendal 		ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
798b13ad8f4SJeff Kirsher 
79928d9fab4SEsben Haabendal 		/* app0 must be visible last, as it is used to flag
80028d9fab4SEsben Haabendal 		 * availability of the bd
80128d9fab4SEsben Haabendal 		 */
80228d9fab4SEsben Haabendal 		smp_mb();
80328d9fab4SEsben Haabendal 		cur_p->app0 = 0;
80428d9fab4SEsben Haabendal 
805b13ad8f4SJeff Kirsher 		lp->tx_bd_ci++;
806f7b261bfSEsben Haabendal 		if (lp->tx_bd_ci >= lp->tx_bd_num)
807b13ad8f4SJeff Kirsher 			lp->tx_bd_ci = 0;
808b13ad8f4SJeff Kirsher 
809b13ad8f4SJeff Kirsher 		cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
810fdd7454eSEsben Haabendal 		stat = be32_to_cpu(cur_p->app0);
811b13ad8f4SJeff Kirsher 	}
812b13ad8f4SJeff Kirsher 
81384823ff8SEsben Haabendal 	/* Matches barrier in temac_start_xmit */
81484823ff8SEsben Haabendal 	smp_mb();
81584823ff8SEsben Haabendal 
816b13ad8f4SJeff Kirsher 	netif_wake_queue(ndev);
817b13ad8f4SJeff Kirsher }
818b13ad8f4SJeff Kirsher 
temac_check_tx_bd_space(struct temac_local * lp,int num_frag)819b13ad8f4SJeff Kirsher static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
820b13ad8f4SJeff Kirsher {
821b13ad8f4SJeff Kirsher 	struct cdmac_bd *cur_p;
822b13ad8f4SJeff Kirsher 	int tail;
823b13ad8f4SJeff Kirsher 
824b13ad8f4SJeff Kirsher 	tail = lp->tx_bd_tail;
825b13ad8f4SJeff Kirsher 	cur_p = &lp->tx_bd_v[tail];
826b13ad8f4SJeff Kirsher 
827b13ad8f4SJeff Kirsher 	do {
828b13ad8f4SJeff Kirsher 		if (cur_p->app0)
829b13ad8f4SJeff Kirsher 			return NETDEV_TX_BUSY;
830b13ad8f4SJeff Kirsher 
83128d9fab4SEsben Haabendal 		/* Make sure to read next bd app0 after this one */
83228d9fab4SEsben Haabendal 		rmb();
83328d9fab4SEsben Haabendal 
834b13ad8f4SJeff Kirsher 		tail++;
835f7b261bfSEsben Haabendal 		if (tail >= lp->tx_bd_num)
836b13ad8f4SJeff Kirsher 			tail = 0;
837b13ad8f4SJeff Kirsher 
838b13ad8f4SJeff Kirsher 		cur_p = &lp->tx_bd_v[tail];
839b13ad8f4SJeff Kirsher 		num_frag--;
840b13ad8f4SJeff Kirsher 	} while (num_frag >= 0);
841b13ad8f4SJeff Kirsher 
842b13ad8f4SJeff Kirsher 	return 0;
843b13ad8f4SJeff Kirsher }
844b13ad8f4SJeff Kirsher 
84581255af8SYueHaibing static netdev_tx_t
temac_start_xmit(struct sk_buff * skb,struct net_device * ndev)84681255af8SYueHaibing temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
847b13ad8f4SJeff Kirsher {
848b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
849b13ad8f4SJeff Kirsher 	struct cdmac_bd *cur_p;
8507c462a0cSEsben Haabendal 	dma_addr_t tail_p, skb_dma_addr;
851b13ad8f4SJeff Kirsher 	int ii;
852b13ad8f4SJeff Kirsher 	unsigned long num_frag;
853b13ad8f4SJeff Kirsher 	skb_frag_t *frag;
854b13ad8f4SJeff Kirsher 
855b13ad8f4SJeff Kirsher 	num_frag = skb_shinfo(skb)->nr_frags;
856b13ad8f4SJeff Kirsher 	frag = &skb_shinfo(skb)->frags[0];
857b13ad8f4SJeff Kirsher 	cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
858b13ad8f4SJeff Kirsher 
8592c9938e7SEsben Haabendal 	if (temac_check_tx_bd_space(lp, num_frag + 1)) {
86084823ff8SEsben Haabendal 		if (netif_queue_stopped(ndev))
861b13ad8f4SJeff Kirsher 			return NETDEV_TX_BUSY;
86284823ff8SEsben Haabendal 
86384823ff8SEsben Haabendal 		netif_stop_queue(ndev);
86484823ff8SEsben Haabendal 
86584823ff8SEsben Haabendal 		/* Matches barrier in temac_start_xmit_done */
86684823ff8SEsben Haabendal 		smp_mb();
86784823ff8SEsben Haabendal 
86884823ff8SEsben Haabendal 		/* Space might have just been freed - check again */
869c364df24SEsben Haabendal 		if (temac_check_tx_bd_space(lp, num_frag + 1))
87084823ff8SEsben Haabendal 			return NETDEV_TX_BUSY;
87184823ff8SEsben Haabendal 
87284823ff8SEsben Haabendal 		netif_wake_queue(ndev);
873b13ad8f4SJeff Kirsher 	}
874b13ad8f4SJeff Kirsher 
875b13ad8f4SJeff Kirsher 	cur_p->app0 = 0;
876b13ad8f4SJeff Kirsher 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
877b13ad8f4SJeff Kirsher 		unsigned int csum_start_off = skb_checksum_start_offset(skb);
878b13ad8f4SJeff Kirsher 		unsigned int csum_index_off = csum_start_off + skb->csum_offset;
879b13ad8f4SJeff Kirsher 
880fdd7454eSEsben Haabendal 		cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
881fdd7454eSEsben Haabendal 		cur_p->app1 = cpu_to_be32((csum_start_off << 16)
882fdd7454eSEsben Haabendal 					  | csum_index_off);
883b13ad8f4SJeff Kirsher 		cur_p->app2 = 0;  /* initial checksum seed */
884b13ad8f4SJeff Kirsher 	}
885b13ad8f4SJeff Kirsher 
886fdd7454eSEsben Haabendal 	cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
887fdd7454eSEsben Haabendal 	skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
88844d4f8d7SMichal Simek 				      skb_headlen(skb), DMA_TO_DEVICE);
889fdd7454eSEsben Haabendal 	cur_p->len = cpu_to_be32(skb_headlen(skb));
8901d63b8d6SEsben Haabendal 	if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
8911d63b8d6SEsben Haabendal 		dev_kfree_skb_any(skb);
8921d63b8d6SEsben Haabendal 		ndev->stats.tx_dropped++;
8931d63b8d6SEsben Haabendal 		return NETDEV_TX_OK;
8941d63b8d6SEsben Haabendal 	}
895fdd7454eSEsben Haabendal 	cur_p->phys = cpu_to_be32(skb_dma_addr);
896b13ad8f4SJeff Kirsher 
897b13ad8f4SJeff Kirsher 	for (ii = 0; ii < num_frag; ii++) {
898f7b261bfSEsben Haabendal 		if (++lp->tx_bd_tail >= lp->tx_bd_num)
899b13ad8f4SJeff Kirsher 			lp->tx_bd_tail = 0;
900b13ad8f4SJeff Kirsher 
901b13ad8f4SJeff Kirsher 		cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
902fdd7454eSEsben Haabendal 		skb_dma_addr = dma_map_single(ndev->dev.parent,
9033ed6f695SIan Campbell 					      skb_frag_address(frag),
904fdd7454eSEsben Haabendal 					      skb_frag_size(frag),
905fdd7454eSEsben Haabendal 					      DMA_TO_DEVICE);
906d07c849cSEsben Haabendal 		if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
907d07c849cSEsben Haabendal 			if (--lp->tx_bd_tail < 0)
908f7b261bfSEsben Haabendal 				lp->tx_bd_tail = lp->tx_bd_num - 1;
909d07c849cSEsben Haabendal 			cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
910d07c849cSEsben Haabendal 			while (--ii >= 0) {
911d07c849cSEsben Haabendal 				--frag;
912d07c849cSEsben Haabendal 				dma_unmap_single(ndev->dev.parent,
913d07c849cSEsben Haabendal 						 be32_to_cpu(cur_p->phys),
914d07c849cSEsben Haabendal 						 skb_frag_size(frag),
915d07c849cSEsben Haabendal 						 DMA_TO_DEVICE);
916d07c849cSEsben Haabendal 				if (--lp->tx_bd_tail < 0)
917f7b261bfSEsben Haabendal 					lp->tx_bd_tail = lp->tx_bd_num - 1;
918d07c849cSEsben Haabendal 				cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
919d07c849cSEsben Haabendal 			}
920d07c849cSEsben Haabendal 			dma_unmap_single(ndev->dev.parent,
921d07c849cSEsben Haabendal 					 be32_to_cpu(cur_p->phys),
922d07c849cSEsben Haabendal 					 skb_headlen(skb), DMA_TO_DEVICE);
9231d63b8d6SEsben Haabendal 			dev_kfree_skb_any(skb);
9241d63b8d6SEsben Haabendal 			ndev->stats.tx_dropped++;
9251d63b8d6SEsben Haabendal 			return NETDEV_TX_OK;
926d07c849cSEsben Haabendal 		}
927fdd7454eSEsben Haabendal 		cur_p->phys = cpu_to_be32(skb_dma_addr);
928fdd7454eSEsben Haabendal 		cur_p->len = cpu_to_be32(skb_frag_size(frag));
929b13ad8f4SJeff Kirsher 		cur_p->app0 = 0;
930b13ad8f4SJeff Kirsher 		frag++;
931b13ad8f4SJeff Kirsher 	}
932fdd7454eSEsben Haabendal 	cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
933b13ad8f4SJeff Kirsher 
9346aa32217SEsben Haabendal 	/* Mark last fragment with skb address, so it can be consumed
9356aa32217SEsben Haabendal 	 * in temac_start_xmit_done()
9366aa32217SEsben Haabendal 	 */
9376aa32217SEsben Haabendal 	ptr_to_txbd((void *)skb, cur_p);
9386aa32217SEsben Haabendal 
939b13ad8f4SJeff Kirsher 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
940b13ad8f4SJeff Kirsher 	lp->tx_bd_tail++;
941f7b261bfSEsben Haabendal 	if (lp->tx_bd_tail >= lp->tx_bd_num)
942b13ad8f4SJeff Kirsher 		lp->tx_bd_tail = 0;
943b13ad8f4SJeff Kirsher 
944b13ad8f4SJeff Kirsher 	skb_tx_timestamp(skb);
945b13ad8f4SJeff Kirsher 
946b13ad8f4SJeff Kirsher 	/* Kick off the transfer */
94773f7375dSEsben Haabendal 	wmb();
948b13ad8f4SJeff Kirsher 	lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
949b13ad8f4SJeff Kirsher 
950ce03b94bSEsben Haabendal 	if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
951f6396341SEsben Haabendal 		netif_stop_queue(ndev);
952f6396341SEsben Haabendal 
953b13ad8f4SJeff Kirsher 	return NETDEV_TX_OK;
954b13ad8f4SJeff Kirsher }
955b13ad8f4SJeff Kirsher 
ll_temac_recv_buffers_available(struct temac_local * lp)9561d63b8d6SEsben Haabendal static int ll_temac_recv_buffers_available(struct temac_local *lp)
9571d63b8d6SEsben Haabendal {
9581d63b8d6SEsben Haabendal 	int available;
9591d63b8d6SEsben Haabendal 
9601d63b8d6SEsben Haabendal 	if (!lp->rx_skb[lp->rx_bd_ci])
9611d63b8d6SEsben Haabendal 		return 0;
9621d63b8d6SEsben Haabendal 	available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
9631d63b8d6SEsben Haabendal 	if (available <= 0)
964f7b261bfSEsben Haabendal 		available += lp->rx_bd_num;
9651d63b8d6SEsben Haabendal 	return available;
9661d63b8d6SEsben Haabendal }
967b13ad8f4SJeff Kirsher 
ll_temac_recv(struct net_device * ndev)968b13ad8f4SJeff Kirsher static void ll_temac_recv(struct net_device *ndev)
969b13ad8f4SJeff Kirsher {
970b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
971b13ad8f4SJeff Kirsher 	unsigned long flags;
972770d9c67SEsben Haabendal 	int rx_bd;
973770d9c67SEsben Haabendal 	bool update_tail = false;
974b13ad8f4SJeff Kirsher 
975b13ad8f4SJeff Kirsher 	spin_lock_irqsave(&lp->rx_lock, flags);
976b13ad8f4SJeff Kirsher 
977770d9c67SEsben Haabendal 	/* Process all received buffers, passing them on network
978770d9c67SEsben Haabendal 	 * stack.  After this, the buffer descriptors will be in an
979770d9c67SEsben Haabendal 	 * un-allocated stage, where no skb is allocated for it, and
980770d9c67SEsben Haabendal 	 * they are therefore not available for TEMAC/DMA.
981770d9c67SEsben Haabendal 	 */
982770d9c67SEsben Haabendal 	do {
983770d9c67SEsben Haabendal 		struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
984770d9c67SEsben Haabendal 		struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
985770d9c67SEsben Haabendal 		unsigned int bdstat = be32_to_cpu(bd->app0);
986770d9c67SEsben Haabendal 		int length;
987b13ad8f4SJeff Kirsher 
988770d9c67SEsben Haabendal 		/* While this should not normally happen, we can end
989770d9c67SEsben Haabendal 		 * here when GFP_ATOMIC allocations fail, and we
990770d9c67SEsben Haabendal 		 * therefore have un-allocated buffers.
991770d9c67SEsben Haabendal 		 */
992770d9c67SEsben Haabendal 		if (!skb)
993770d9c67SEsben Haabendal 			break;
994b13ad8f4SJeff Kirsher 
995770d9c67SEsben Haabendal 		/* Loop over all completed buffer descriptors */
996770d9c67SEsben Haabendal 		if (!(bdstat & STS_CTRL_APP0_CMPLT))
997770d9c67SEsben Haabendal 			break;
998b13ad8f4SJeff Kirsher 
999770d9c67SEsben Haabendal 		dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
1000a8c9bd3bSEsben Haabendal 				 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
1001770d9c67SEsben Haabendal 		/* The buffer is not valid for DMA anymore */
1002770d9c67SEsben Haabendal 		bd->phys = 0;
1003770d9c67SEsben Haabendal 		bd->len = 0;
1004b13ad8f4SJeff Kirsher 
1005770d9c67SEsben Haabendal 		length = be32_to_cpu(bd->app4) & 0x3FFF;
1006b13ad8f4SJeff Kirsher 		skb_put(skb, length);
1007b13ad8f4SJeff Kirsher 		skb->protocol = eth_type_trans(skb, ndev);
1008b13ad8f4SJeff Kirsher 		skb_checksum_none_assert(skb);
1009b13ad8f4SJeff Kirsher 
1010b13ad8f4SJeff Kirsher 		/* if we're doing rx csum offload, set it up */
1011b13ad8f4SJeff Kirsher 		if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
1012ceffc4acSJoe Perches 		    (skb->protocol == htons(ETH_P_IP)) &&
1013b13ad8f4SJeff Kirsher 		    (skb->len > 64)) {
1014fdd7454eSEsben Haabendal 			/* Convert from device endianness (be32) to cpu
10158aba73efSTom Rix 			 * endianness, and if necessary swap the bytes
1016fdd7454eSEsben Haabendal 			 * (back) for proper IP checksum byte order
1017fdd7454eSEsben Haabendal 			 * (be16).
1018fdd7454eSEsben Haabendal 			 */
1019770d9c67SEsben Haabendal 			skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
1020b13ad8f4SJeff Kirsher 			skb->ip_summed = CHECKSUM_COMPLETE;
1021b13ad8f4SJeff Kirsher 		}
1022b13ad8f4SJeff Kirsher 
1023b13ad8f4SJeff Kirsher 		if (!skb_defer_rx_timestamp(skb))
1024b13ad8f4SJeff Kirsher 			netif_rx(skb);
1025770d9c67SEsben Haabendal 		/* The skb buffer is now owned by network stack above */
1026770d9c67SEsben Haabendal 		lp->rx_skb[lp->rx_bd_ci] = NULL;
1027b13ad8f4SJeff Kirsher 
1028b13ad8f4SJeff Kirsher 		ndev->stats.rx_packets++;
1029b13ad8f4SJeff Kirsher 		ndev->stats.rx_bytes += length;
1030b13ad8f4SJeff Kirsher 
1031770d9c67SEsben Haabendal 		rx_bd = lp->rx_bd_ci;
1032f7b261bfSEsben Haabendal 		if (++lp->rx_bd_ci >= lp->rx_bd_num)
1033770d9c67SEsben Haabendal 			lp->rx_bd_ci = 0;
1034770d9c67SEsben Haabendal 	} while (rx_bd != lp->rx_bd_tail);
1035770d9c67SEsben Haabendal 
10361d63b8d6SEsben Haabendal 	/* DMA operations will halt when the last buffer descriptor is
10371d63b8d6SEsben Haabendal 	 * processed (ie. the one pointed to by RX_TAILDESC_PTR).
10381d63b8d6SEsben Haabendal 	 * When that happens, no more interrupt events will be
10391d63b8d6SEsben Haabendal 	 * generated.  No IRQ_COAL or IRQ_DLY, and not even an
10401d63b8d6SEsben Haabendal 	 * IRQ_ERR.  To avoid stalling, we schedule a delayed work
10411d63b8d6SEsben Haabendal 	 * when there is a potential risk of that happening.  The work
10421d63b8d6SEsben Haabendal 	 * will call this function, and thus re-schedule itself until
10431d63b8d6SEsben Haabendal 	 * enough buffers are available again.
10441d63b8d6SEsben Haabendal 	 */
10451d63b8d6SEsben Haabendal 	if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
10461d63b8d6SEsben Haabendal 		schedule_delayed_work(&lp->restart_work, HZ / 1000);
10471d63b8d6SEsben Haabendal 
1048770d9c67SEsben Haabendal 	/* Allocate new buffers for those buffer descriptors that were
1049770d9c67SEsben Haabendal 	 * passed to network stack.  Note that GFP_ATOMIC allocations
1050770d9c67SEsben Haabendal 	 * can fail (e.g. when a larger burst of GFP_ATOMIC
1051770d9c67SEsben Haabendal 	 * allocations occurs), so while we try to allocate all
1052770d9c67SEsben Haabendal 	 * buffers in the same interrupt where they were processed, we
1053770d9c67SEsben Haabendal 	 * continue with what we could get in case of allocation
1054770d9c67SEsben Haabendal 	 * failure.  Allocation of remaining buffers will be retried
1055770d9c67SEsben Haabendal 	 * in following calls.
1056770d9c67SEsben Haabendal 	 */
1057770d9c67SEsben Haabendal 	while (1) {
1058770d9c67SEsben Haabendal 		struct sk_buff *skb;
1059770d9c67SEsben Haabendal 		struct cdmac_bd *bd;
1060770d9c67SEsben Haabendal 		dma_addr_t skb_dma_addr;
1061770d9c67SEsben Haabendal 
1062770d9c67SEsben Haabendal 		rx_bd = lp->rx_bd_tail + 1;
1063f7b261bfSEsben Haabendal 		if (rx_bd >= lp->rx_bd_num)
1064770d9c67SEsben Haabendal 			rx_bd = 0;
1065770d9c67SEsben Haabendal 		bd = &lp->rx_bd_v[rx_bd];
1066770d9c67SEsben Haabendal 
1067770d9c67SEsben Haabendal 		if (bd->phys)
1068770d9c67SEsben Haabendal 			break;	/* All skb's allocated */
1069770d9c67SEsben Haabendal 
1070770d9c67SEsben Haabendal 		skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
1071770d9c67SEsben Haabendal 		if (!skb) {
1072770d9c67SEsben Haabendal 			dev_warn(&ndev->dev, "skb alloc failed\n");
1073770d9c67SEsben Haabendal 			break;
1074b13ad8f4SJeff Kirsher 		}
1075b13ad8f4SJeff Kirsher 
1076770d9c67SEsben Haabendal 		skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
1077b13ad8f4SJeff Kirsher 					      XTE_MAX_JUMBO_FRAME_SIZE,
1078b13ad8f4SJeff Kirsher 					      DMA_FROM_DEVICE);
1079770d9c67SEsben Haabendal 		if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
1080770d9c67SEsben Haabendal 						   skb_dma_addr))) {
1081770d9c67SEsben Haabendal 			dev_kfree_skb_any(skb);
1082770d9c67SEsben Haabendal 			break;
1083b13ad8f4SJeff Kirsher 		}
1084770d9c67SEsben Haabendal 
1085770d9c67SEsben Haabendal 		bd->phys = cpu_to_be32(skb_dma_addr);
1086770d9c67SEsben Haabendal 		bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
1087770d9c67SEsben Haabendal 		bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
1088770d9c67SEsben Haabendal 		lp->rx_skb[rx_bd] = skb;
1089770d9c67SEsben Haabendal 
1090770d9c67SEsben Haabendal 		lp->rx_bd_tail = rx_bd;
1091770d9c67SEsben Haabendal 		update_tail = true;
1092770d9c67SEsben Haabendal 	}
1093770d9c67SEsben Haabendal 
1094770d9c67SEsben Haabendal 	/* Move tail pointer when buffers have been allocated */
1095770d9c67SEsben Haabendal 	if (update_tail) {
1096770d9c67SEsben Haabendal 		lp->dma_out(lp, RX_TAILDESC_PTR,
1097770d9c67SEsben Haabendal 			lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
1098770d9c67SEsben Haabendal 	}
1099b13ad8f4SJeff Kirsher 
1100b13ad8f4SJeff Kirsher 	spin_unlock_irqrestore(&lp->rx_lock, flags);
1101b13ad8f4SJeff Kirsher }
1102b13ad8f4SJeff Kirsher 
11031d63b8d6SEsben Haabendal /* Function scheduled to ensure a restart in case of DMA halt
11041d63b8d6SEsben Haabendal  * condition caused by running out of buffer descriptors.
11051d63b8d6SEsben Haabendal  */
ll_temac_restart_work_func(struct work_struct * work)11061d63b8d6SEsben Haabendal static void ll_temac_restart_work_func(struct work_struct *work)
11071d63b8d6SEsben Haabendal {
11081d63b8d6SEsben Haabendal 	struct temac_local *lp = container_of(work, struct temac_local,
11091d63b8d6SEsben Haabendal 					      restart_work.work);
11101d63b8d6SEsben Haabendal 	struct net_device *ndev = lp->ndev;
11111d63b8d6SEsben Haabendal 
11121d63b8d6SEsben Haabendal 	ll_temac_recv(ndev);
11131d63b8d6SEsben Haabendal }
11141d63b8d6SEsben Haabendal 
ll_temac_tx_irq(int irq,void * _ndev)1115b13ad8f4SJeff Kirsher static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
1116b13ad8f4SJeff Kirsher {
1117b13ad8f4SJeff Kirsher 	struct net_device *ndev = _ndev;
1118b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
1119b13ad8f4SJeff Kirsher 	unsigned int status;
1120b13ad8f4SJeff Kirsher 
1121b13ad8f4SJeff Kirsher 	status = lp->dma_in(lp, TX_IRQ_REG);
1122b13ad8f4SJeff Kirsher 	lp->dma_out(lp, TX_IRQ_REG, status);
1123b13ad8f4SJeff Kirsher 
1124b13ad8f4SJeff Kirsher 	if (status & (IRQ_COAL | IRQ_DLY))
1125b13ad8f4SJeff Kirsher 		temac_start_xmit_done(lp->ndev);
11265db9c740SEsben Haabendal 	if (status & (IRQ_ERR | IRQ_DMAERR))
11275db9c740SEsben Haabendal 		dev_err_ratelimited(&ndev->dev,
11285db9c740SEsben Haabendal 				    "TX error 0x%x TX_CHNL_STS=0x%08x\n",
11295db9c740SEsben Haabendal 				    status, lp->dma_in(lp, TX_CHNL_STS));
1130b13ad8f4SJeff Kirsher 
1131b13ad8f4SJeff Kirsher 	return IRQ_HANDLED;
1132b13ad8f4SJeff Kirsher }
1133b13ad8f4SJeff Kirsher 
ll_temac_rx_irq(int irq,void * _ndev)1134b13ad8f4SJeff Kirsher static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
1135b13ad8f4SJeff Kirsher {
1136b13ad8f4SJeff Kirsher 	struct net_device *ndev = _ndev;
1137b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
1138b13ad8f4SJeff Kirsher 	unsigned int status;
1139b13ad8f4SJeff Kirsher 
1140b13ad8f4SJeff Kirsher 	/* Read and clear the status registers */
1141b13ad8f4SJeff Kirsher 	status = lp->dma_in(lp, RX_IRQ_REG);
1142b13ad8f4SJeff Kirsher 	lp->dma_out(lp, RX_IRQ_REG, status);
1143b13ad8f4SJeff Kirsher 
1144b13ad8f4SJeff Kirsher 	if (status & (IRQ_COAL | IRQ_DLY))
1145b13ad8f4SJeff Kirsher 		ll_temac_recv(lp->ndev);
11465db9c740SEsben Haabendal 	if (status & (IRQ_ERR | IRQ_DMAERR))
11475db9c740SEsben Haabendal 		dev_err_ratelimited(&ndev->dev,
11485db9c740SEsben Haabendal 				    "RX error 0x%x RX_CHNL_STS=0x%08x\n",
11495db9c740SEsben Haabendal 				    status, lp->dma_in(lp, RX_CHNL_STS));
1150b13ad8f4SJeff Kirsher 
1151b13ad8f4SJeff Kirsher 	return IRQ_HANDLED;
1152b13ad8f4SJeff Kirsher }
1153b13ad8f4SJeff Kirsher 
temac_open(struct net_device * ndev)1154b13ad8f4SJeff Kirsher static int temac_open(struct net_device *ndev)
1155b13ad8f4SJeff Kirsher {
1156b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
115731abbe34SPhilippe Reynes 	struct phy_device *phydev = NULL;
1158b13ad8f4SJeff Kirsher 	int rc;
1159b13ad8f4SJeff Kirsher 
1160b13ad8f4SJeff Kirsher 	dev_dbg(&ndev->dev, "temac_open()\n");
1161b13ad8f4SJeff Kirsher 
1162b13ad8f4SJeff Kirsher 	if (lp->phy_node) {
116331abbe34SPhilippe Reynes 		phydev = of_phy_connect(lp->ndev, lp->phy_node,
1164b13ad8f4SJeff Kirsher 					temac_adjust_link, 0, 0);
116531abbe34SPhilippe Reynes 		if (!phydev) {
1166b13ad8f4SJeff Kirsher 			dev_err(lp->dev, "of_phy_connect() failed\n");
1167b13ad8f4SJeff Kirsher 			return -ENODEV;
1168b13ad8f4SJeff Kirsher 		}
11698425c41dSEsben Haabendal 		phy_start(phydev);
11708425c41dSEsben Haabendal 	} else if (strlen(lp->phy_name) > 0) {
11718425c41dSEsben Haabendal 		phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
11728425c41dSEsben Haabendal 				     lp->phy_interface);
11731ffc4b7cSDan Carpenter 		if (IS_ERR(phydev)) {
11748425c41dSEsben Haabendal 			dev_err(lp->dev, "phy_connect() failed\n");
11751ffc4b7cSDan Carpenter 			return PTR_ERR(phydev);
11768425c41dSEsben Haabendal 		}
117731abbe34SPhilippe Reynes 		phy_start(phydev);
1178b13ad8f4SJeff Kirsher 	}
1179b13ad8f4SJeff Kirsher 
118050ec1538SRicardo Ribalda 	temac_device_reset(ndev);
118150ec1538SRicardo Ribalda 
1182b13ad8f4SJeff Kirsher 	rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
1183b13ad8f4SJeff Kirsher 	if (rc)
1184b13ad8f4SJeff Kirsher 		goto err_tx_irq;
1185b13ad8f4SJeff Kirsher 	rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
1186b13ad8f4SJeff Kirsher 	if (rc)
1187b13ad8f4SJeff Kirsher 		goto err_rx_irq;
1188b13ad8f4SJeff Kirsher 
1189b13ad8f4SJeff Kirsher 	return 0;
1190b13ad8f4SJeff Kirsher 
1191b13ad8f4SJeff Kirsher  err_rx_irq:
1192b13ad8f4SJeff Kirsher 	free_irq(lp->tx_irq, ndev);
1193b13ad8f4SJeff Kirsher  err_tx_irq:
119431abbe34SPhilippe Reynes 	if (phydev)
119531abbe34SPhilippe Reynes 		phy_disconnect(phydev);
1196b13ad8f4SJeff Kirsher 	dev_err(lp->dev, "request_irq() failed\n");
1197b13ad8f4SJeff Kirsher 	return rc;
1198b13ad8f4SJeff Kirsher }
1199b13ad8f4SJeff Kirsher 
temac_stop(struct net_device * ndev)1200b13ad8f4SJeff Kirsher static int temac_stop(struct net_device *ndev)
1201b13ad8f4SJeff Kirsher {
1202b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
120331abbe34SPhilippe Reynes 	struct phy_device *phydev = ndev->phydev;
1204b13ad8f4SJeff Kirsher 
1205b13ad8f4SJeff Kirsher 	dev_dbg(&ndev->dev, "temac_close()\n");
1206b13ad8f4SJeff Kirsher 
12071d63b8d6SEsben Haabendal 	cancel_delayed_work_sync(&lp->restart_work);
12081d63b8d6SEsben Haabendal 
1209b13ad8f4SJeff Kirsher 	free_irq(lp->tx_irq, ndev);
1210b13ad8f4SJeff Kirsher 	free_irq(lp->rx_irq, ndev);
1211b13ad8f4SJeff Kirsher 
121231abbe34SPhilippe Reynes 	if (phydev)
121331abbe34SPhilippe Reynes 		phy_disconnect(phydev);
1214b13ad8f4SJeff Kirsher 
1215b13ad8f4SJeff Kirsher 	temac_dma_bd_release(ndev);
1216b13ad8f4SJeff Kirsher 
1217b13ad8f4SJeff Kirsher 	return 0;
1218b13ad8f4SJeff Kirsher }
1219b13ad8f4SJeff Kirsher 
1220b13ad8f4SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
1221b13ad8f4SJeff Kirsher static void
temac_poll_controller(struct net_device * ndev)1222b13ad8f4SJeff Kirsher temac_poll_controller(struct net_device *ndev)
1223b13ad8f4SJeff Kirsher {
1224b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
1225b13ad8f4SJeff Kirsher 
1226b13ad8f4SJeff Kirsher 	disable_irq(lp->tx_irq);
1227b13ad8f4SJeff Kirsher 	disable_irq(lp->rx_irq);
1228b13ad8f4SJeff Kirsher 
1229b13ad8f4SJeff Kirsher 	ll_temac_rx_irq(lp->tx_irq, ndev);
1230b13ad8f4SJeff Kirsher 	ll_temac_tx_irq(lp->rx_irq, ndev);
1231b13ad8f4SJeff Kirsher 
1232b13ad8f4SJeff Kirsher 	enable_irq(lp->tx_irq);
1233b13ad8f4SJeff Kirsher 	enable_irq(lp->rx_irq);
1234b13ad8f4SJeff Kirsher }
1235b13ad8f4SJeff Kirsher #endif
1236b13ad8f4SJeff Kirsher 
1237b13ad8f4SJeff Kirsher static const struct net_device_ops temac_netdev_ops = {
1238b13ad8f4SJeff Kirsher 	.ndo_open = temac_open,
1239b13ad8f4SJeff Kirsher 	.ndo_stop = temac_stop,
1240b13ad8f4SJeff Kirsher 	.ndo_start_xmit = temac_start_xmit,
12410127cd54SEsben Haabendal 	.ndo_set_rx_mode = temac_set_multicast_list,
124204e406dcSJiri Pirko 	.ndo_set_mac_address = temac_set_mac_address,
1243b13ad8f4SJeff Kirsher 	.ndo_validate_addr = eth_validate_addr,
1244a7605370SArnd Bergmann 	.ndo_eth_ioctl = phy_do_ioctl_running,
1245b13ad8f4SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
1246b13ad8f4SJeff Kirsher 	.ndo_poll_controller = temac_poll_controller,
1247b13ad8f4SJeff Kirsher #endif
1248b13ad8f4SJeff Kirsher };
1249b13ad8f4SJeff Kirsher 
1250b13ad8f4SJeff Kirsher /* ---------------------------------------------------------------------
1251b13ad8f4SJeff Kirsher  * SYSFS device attributes
1252b13ad8f4SJeff Kirsher  */
temac_show_llink_regs(struct device * dev,struct device_attribute * attr,char * buf)1253b13ad8f4SJeff Kirsher static ssize_t temac_show_llink_regs(struct device *dev,
1254b13ad8f4SJeff Kirsher 				     struct device_attribute *attr, char *buf)
1255b13ad8f4SJeff Kirsher {
1256b13ad8f4SJeff Kirsher 	struct net_device *ndev = dev_get_drvdata(dev);
1257b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
1258b13ad8f4SJeff Kirsher 	int i, len = 0;
1259b13ad8f4SJeff Kirsher 
1260b13ad8f4SJeff Kirsher 	for (i = 0; i < 0x11; i++)
1261b13ad8f4SJeff Kirsher 		len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
1262b13ad8f4SJeff Kirsher 			       (i % 8) == 7 ? "\n" : " ");
1263b13ad8f4SJeff Kirsher 	len += sprintf(buf + len, "\n");
1264b13ad8f4SJeff Kirsher 
1265b13ad8f4SJeff Kirsher 	return len;
1266b13ad8f4SJeff Kirsher }
1267b13ad8f4SJeff Kirsher 
1268b13ad8f4SJeff Kirsher static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
1269b13ad8f4SJeff Kirsher 
1270b13ad8f4SJeff Kirsher static struct attribute *temac_device_attrs[] = {
1271b13ad8f4SJeff Kirsher 	&dev_attr_llink_regs.attr,
1272b13ad8f4SJeff Kirsher 	NULL,
1273b13ad8f4SJeff Kirsher };
1274b13ad8f4SJeff Kirsher 
1275b13ad8f4SJeff Kirsher static const struct attribute_group temac_attr_group = {
1276b13ad8f4SJeff Kirsher 	.attrs = temac_device_attrs,
1277b13ad8f4SJeff Kirsher };
1278b13ad8f4SJeff Kirsher 
1279f7b261bfSEsben Haabendal /* ---------------------------------------------------------------------
1280f7b261bfSEsben Haabendal  * ethtool support
1281f7b261bfSEsben Haabendal  */
1282f7b261bfSEsben Haabendal 
128374624944SHao Chen static void
ll_temac_ethtools_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)128474624944SHao Chen ll_temac_ethtools_get_ringparam(struct net_device *ndev,
128574624944SHao Chen 				struct ethtool_ringparam *ering,
128674624944SHao Chen 				struct kernel_ethtool_ringparam *kernel_ering,
128774624944SHao Chen 				struct netlink_ext_ack *extack)
1288f7b261bfSEsben Haabendal {
1289f7b261bfSEsben Haabendal 	struct temac_local *lp = netdev_priv(ndev);
1290f7b261bfSEsben Haabendal 
1291f7b261bfSEsben Haabendal 	ering->rx_max_pending = RX_BD_NUM_MAX;
1292f7b261bfSEsben Haabendal 	ering->rx_mini_max_pending = 0;
1293f7b261bfSEsben Haabendal 	ering->rx_jumbo_max_pending = 0;
1294f7b261bfSEsben Haabendal 	ering->tx_max_pending = TX_BD_NUM_MAX;
1295f7b261bfSEsben Haabendal 	ering->rx_pending = lp->rx_bd_num;
1296f7b261bfSEsben Haabendal 	ering->rx_mini_pending = 0;
1297f7b261bfSEsben Haabendal 	ering->rx_jumbo_pending = 0;
1298f7b261bfSEsben Haabendal 	ering->tx_pending = lp->tx_bd_num;
1299f7b261bfSEsben Haabendal }
1300f7b261bfSEsben Haabendal 
130174624944SHao Chen static int
ll_temac_ethtools_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)130274624944SHao Chen ll_temac_ethtools_set_ringparam(struct net_device *ndev,
130374624944SHao Chen 				struct ethtool_ringparam *ering,
130474624944SHao Chen 				struct kernel_ethtool_ringparam *kernel_ering,
130574624944SHao Chen 				struct netlink_ext_ack *extack)
1306f7b261bfSEsben Haabendal {
1307f7b261bfSEsben Haabendal 	struct temac_local *lp = netdev_priv(ndev);
1308f7b261bfSEsben Haabendal 
1309f7b261bfSEsben Haabendal 	if (ering->rx_pending > RX_BD_NUM_MAX ||
1310f7b261bfSEsben Haabendal 	    ering->rx_mini_pending ||
1311f7b261bfSEsben Haabendal 	    ering->rx_jumbo_pending ||
1312f7b261bfSEsben Haabendal 	    ering->rx_pending > TX_BD_NUM_MAX)
1313f7b261bfSEsben Haabendal 		return -EINVAL;
1314f7b261bfSEsben Haabendal 
1315f7b261bfSEsben Haabendal 	if (netif_running(ndev))
1316f7b261bfSEsben Haabendal 		return -EBUSY;
1317f7b261bfSEsben Haabendal 
1318f7b261bfSEsben Haabendal 	lp->rx_bd_num = ering->rx_pending;
1319f7b261bfSEsben Haabendal 	lp->tx_bd_num = ering->tx_pending;
1320f7b261bfSEsben Haabendal 	return 0;
1321f7b261bfSEsben Haabendal }
1322f7b261bfSEsben Haabendal 
1323f3ccfda1SYufeng Mo static int
ll_temac_ethtools_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1324f3ccfda1SYufeng Mo ll_temac_ethtools_get_coalesce(struct net_device *ndev,
1325f3ccfda1SYufeng Mo 			       struct ethtool_coalesce *ec,
1326f3ccfda1SYufeng Mo 			       struct kernel_ethtool_coalesce *kernel_coal,
1327f3ccfda1SYufeng Mo 			       struct netlink_ext_ack *extack)
1328227d4617SEsben Haabendal {
1329227d4617SEsben Haabendal 	struct temac_local *lp = netdev_priv(ndev);
1330227d4617SEsben Haabendal 
1331227d4617SEsben Haabendal 	ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
1332227d4617SEsben Haabendal 	ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
1333227d4617SEsben Haabendal 	ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
1334227d4617SEsben Haabendal 	ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
1335227d4617SEsben Haabendal 	return 0;
1336227d4617SEsben Haabendal }
1337227d4617SEsben Haabendal 
1338f3ccfda1SYufeng Mo static int
ll_temac_ethtools_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1339f3ccfda1SYufeng Mo ll_temac_ethtools_set_coalesce(struct net_device *ndev,
1340f3ccfda1SYufeng Mo 			       struct ethtool_coalesce *ec,
1341f3ccfda1SYufeng Mo 			       struct kernel_ethtool_coalesce *kernel_coal,
1342f3ccfda1SYufeng Mo 			       struct netlink_ext_ack *extack)
1343227d4617SEsben Haabendal {
1344227d4617SEsben Haabendal 	struct temac_local *lp = netdev_priv(ndev);
1345227d4617SEsben Haabendal 
1346227d4617SEsben Haabendal 	if (netif_running(ndev)) {
1347227d4617SEsben Haabendal 		netdev_err(ndev,
1348227d4617SEsben Haabendal 			   "Please stop netif before applying configuration\n");
1349227d4617SEsben Haabendal 		return -EFAULT;
1350227d4617SEsben Haabendal 	}
1351227d4617SEsben Haabendal 
1352227d4617SEsben Haabendal 	if (ec->rx_max_coalesced_frames)
1353227d4617SEsben Haabendal 		lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
1354227d4617SEsben Haabendal 	if (ec->tx_max_coalesced_frames)
1355227d4617SEsben Haabendal 		lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
1356227d4617SEsben Haabendal 	/* With typical LocalLink clock speed of 200 MHz and
1357227d4617SEsben Haabendal 	 * C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
1358227d4617SEsben Haabendal 	 */
1359227d4617SEsben Haabendal 	if (ec->rx_coalesce_usecs)
1360227d4617SEsben Haabendal 		lp->coalesce_delay_rx =
1361227d4617SEsben Haabendal 			min(255U, (ec->rx_coalesce_usecs * 100) / 512);
1362227d4617SEsben Haabendal 	if (ec->tx_coalesce_usecs)
1363227d4617SEsben Haabendal 		lp->coalesce_delay_tx =
1364227d4617SEsben Haabendal 			min(255U, (ec->tx_coalesce_usecs * 100) / 512);
1365227d4617SEsben Haabendal 
1366227d4617SEsben Haabendal 	return 0;
1367227d4617SEsben Haabendal }
1368227d4617SEsben Haabendal 
13699eac2d4dSRicardo static const struct ethtool_ops temac_ethtool_ops = {
1370e62780e6SJakub Kicinski 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1371e62780e6SJakub Kicinski 				     ETHTOOL_COALESCE_MAX_FRAMES,
137216250527SFlorian Fainelli 	.nway_reset = phy_ethtool_nway_reset,
13739eac2d4dSRicardo 	.get_link = ethtool_op_get_link,
1374f85e5ea2SRichard Cochran 	.get_ts_info = ethtool_op_get_ts_info,
1375e6dab902SPhilippe Reynes 	.get_link_ksettings = phy_ethtool_get_link_ksettings,
1376e6dab902SPhilippe Reynes 	.set_link_ksettings = phy_ethtool_set_link_ksettings,
1377f7b261bfSEsben Haabendal 	.get_ringparam	= ll_temac_ethtools_get_ringparam,
1378f7b261bfSEsben Haabendal 	.set_ringparam	= ll_temac_ethtools_set_ringparam,
1379227d4617SEsben Haabendal 	.get_coalesce	= ll_temac_ethtools_get_coalesce,
1380227d4617SEsben Haabendal 	.set_coalesce	= ll_temac_ethtools_set_coalesce,
13819eac2d4dSRicardo };
13829eac2d4dSRicardo 
temac_probe(struct platform_device * pdev)13838425c41dSEsben Haabendal static int temac_probe(struct platform_device *pdev)
1384b13ad8f4SJeff Kirsher {
13858425c41dSEsben Haabendal 	struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
13868425c41dSEsben Haabendal 	struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
1387b13ad8f4SJeff Kirsher 	struct temac_local *lp;
1388b13ad8f4SJeff Kirsher 	struct net_device *ndev;
138983216e39SMichael Walle 	u8 addr[ETH_ALEN];
1390b13ad8f4SJeff Kirsher 	__be32 *p;
1391a3246dc4SEsben Haabendal 	bool little_endian;
139206205472STobias Klauser 	int rc = 0;
1393b13ad8f4SJeff Kirsher 
1394b13ad8f4SJeff Kirsher 	/* Init network device structure */
1395a63625d2SEsben Haabendal 	ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
139641de8d4cSJoe Perches 	if (!ndev)
1397b13ad8f4SJeff Kirsher 		return -ENOMEM;
139841de8d4cSJoe Perches 
13998425c41dSEsben Haabendal 	platform_set_drvdata(pdev, ndev);
14008425c41dSEsben Haabendal 	SET_NETDEV_DEV(ndev, &pdev->dev);
140128e24c62SEric Dumazet 	ndev->features = NETIF_F_SG;
1402b13ad8f4SJeff Kirsher 	ndev->netdev_ops = &temac_netdev_ops;
14039eac2d4dSRicardo 	ndev->ethtool_ops = &temac_ethtool_ops;
1404b13ad8f4SJeff Kirsher #if 0
1405b13ad8f4SJeff Kirsher 	ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
1406b13ad8f4SJeff Kirsher 	ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
1407b13ad8f4SJeff Kirsher 	ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
1408b13ad8f4SJeff Kirsher 	ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
1409f646968fSPatrick McHardy 	ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
1410f646968fSPatrick McHardy 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
1411f646968fSPatrick McHardy 	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
1412b13ad8f4SJeff Kirsher 	ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
1413b13ad8f4SJeff Kirsher 	ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
1414b13ad8f4SJeff Kirsher 	ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
1415b13ad8f4SJeff Kirsher 	ndev->features |= NETIF_F_LRO; /* large receive offload */
1416b13ad8f4SJeff Kirsher #endif
1417b13ad8f4SJeff Kirsher 
1418b13ad8f4SJeff Kirsher 	/* setup temac private info structure */
1419b13ad8f4SJeff Kirsher 	lp = netdev_priv(ndev);
1420b13ad8f4SJeff Kirsher 	lp->ndev = ndev;
14218425c41dSEsben Haabendal 	lp->dev = &pdev->dev;
1422b13ad8f4SJeff Kirsher 	lp->options = XTE_OPTION_DEFAULTS;
1423f7b261bfSEsben Haabendal 	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1424f7b261bfSEsben Haabendal 	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1425b13ad8f4SJeff Kirsher 	spin_lock_init(&lp->rx_lock);
14261d63b8d6SEsben Haabendal 	INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
1427f14f5c11SEsben Haabendal 
1428f14f5c11SEsben Haabendal 	/* Setup mutex for synchronization of indirect register access */
1429f14f5c11SEsben Haabendal 	if (pdata) {
14301bd33bf0SEsben Haabendal 		if (!pdata->indirect_lock) {
1431f14f5c11SEsben Haabendal 			dev_err(&pdev->dev,
14321bd33bf0SEsben Haabendal 				"indirect_lock missing in platform_data\n");
1433f14f5c11SEsben Haabendal 			return -EINVAL;
1434f14f5c11SEsben Haabendal 		}
14351bd33bf0SEsben Haabendal 		lp->indirect_lock = pdata->indirect_lock;
1436f14f5c11SEsben Haabendal 	} else {
14371bd33bf0SEsben Haabendal 		lp->indirect_lock = devm_kmalloc(&pdev->dev,
14381bd33bf0SEsben Haabendal 						 sizeof(*lp->indirect_lock),
1439f14f5c11SEsben Haabendal 						 GFP_KERNEL);
1440b352c346SXiaoke Wang 		if (!lp->indirect_lock)
1441b352c346SXiaoke Wang 			return -ENOMEM;
14421bd33bf0SEsben Haabendal 		spin_lock_init(lp->indirect_lock);
1443f14f5c11SEsben Haabendal 	}
1444b13ad8f4SJeff Kirsher 
1445b13ad8f4SJeff Kirsher 	/* map device registers */
1446*476eed5fSClaus Hansen Ries 	lp->regs = devm_platform_ioremap_resource(pdev, 0);
1447bd69058fSWang Hai 	if (IS_ERR(lp->regs)) {
14488425c41dSEsben Haabendal 		dev_err(&pdev->dev, "could not map TEMAC registers\n");
1449c4db9934SWei Yongjun 		return -ENOMEM;
1450b13ad8f4SJeff Kirsher 	}
1451b13ad8f4SJeff Kirsher 
1452a3246dc4SEsben Haabendal 	/* Select register access functions with the specified
1453a3246dc4SEsben Haabendal 	 * endianness mode.  Default for OF devices is big-endian.
1454a3246dc4SEsben Haabendal 	 */
1455a3246dc4SEsben Haabendal 	little_endian = false;
14561a87e641SRob Herring 	if (temac_np)
14571a87e641SRob Herring 		little_endian = of_property_read_bool(temac_np, "little-endian");
14581a87e641SRob Herring 	else if (pdata)
1459a3246dc4SEsben Haabendal 		little_endian = pdata->reg_little_endian;
14601a87e641SRob Herring 
1461a3246dc4SEsben Haabendal 	if (little_endian) {
1462a3246dc4SEsben Haabendal 		lp->temac_ior = _temac_ior_le;
1463a3246dc4SEsben Haabendal 		lp->temac_iow = _temac_iow_le;
1464a3246dc4SEsben Haabendal 	} else {
1465a3246dc4SEsben Haabendal 		lp->temac_ior = _temac_ior_be;
1466a3246dc4SEsben Haabendal 		lp->temac_iow = _temac_iow_be;
1467a3246dc4SEsben Haabendal 	}
1468a3246dc4SEsben Haabendal 
1469b13ad8f4SJeff Kirsher 	/* Setup checksum offload, but default to off if not specified */
1470b13ad8f4SJeff Kirsher 	lp->temac_features = 0;
14718425c41dSEsben Haabendal 	if (temac_np) {
14728425c41dSEsben Haabendal 		p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
14738425c41dSEsben Haabendal 		if (p && be32_to_cpu(*p))
1474b13ad8f4SJeff Kirsher 			lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
14758425c41dSEsben Haabendal 		p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
1476b13ad8f4SJeff Kirsher 		if (p && be32_to_cpu(*p))
1477b13ad8f4SJeff Kirsher 			lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
14788425c41dSEsben Haabendal 	} else if (pdata) {
14798425c41dSEsben Haabendal 		if (pdata->txcsum)
14808425c41dSEsben Haabendal 			lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
14818425c41dSEsben Haabendal 		if (pdata->rxcsum)
14828425c41dSEsben Haabendal 			lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
14838425c41dSEsben Haabendal 	}
14848425c41dSEsben Haabendal 	if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
14858425c41dSEsben Haabendal 		/* Can checksum TCP/UDP over IPv4. */
14868425c41dSEsben Haabendal 		ndev->features |= NETIF_F_IP_CSUM;
1487b13ad8f4SJeff Kirsher 
1488227d4617SEsben Haabendal 	/* Defaults for IRQ delay/coalescing setup.  These are
1489227d4617SEsben Haabendal 	 * configuration values, so does not belong in device-tree.
1490227d4617SEsben Haabendal 	 */
1491227d4617SEsben Haabendal 	lp->coalesce_delay_tx = 0x10;
1492227d4617SEsben Haabendal 	lp->coalesce_count_tx = 0x22;
1493227d4617SEsben Haabendal 	lp->coalesce_delay_rx = 0xff;
1494227d4617SEsben Haabendal 	lp->coalesce_count_rx = 0x07;
1495227d4617SEsben Haabendal 
14968425c41dSEsben Haabendal 	/* Setup LocalLink DMA */
14978425c41dSEsben Haabendal 	if (temac_np) {
14988425c41dSEsben Haabendal 		/* Find the DMA node, map the DMA registers, and
14998425c41dSEsben Haabendal 		 * decode the DMA IRQs.
15008425c41dSEsben Haabendal 		 */
15018425c41dSEsben Haabendal 		dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
15028425c41dSEsben Haabendal 		if (!dma_np) {
15038425c41dSEsben Haabendal 			dev_err(&pdev->dev, "could not find DMA node\n");
1504a63625d2SEsben Haabendal 			return -ENODEV;
1505b13ad8f4SJeff Kirsher 		}
1506b13ad8f4SJeff Kirsher 
15078425c41dSEsben Haabendal 		/* Setup the DMA register accesses, could be DCR or
15088425c41dSEsben Haabendal 		 * memory mapped.
15098425c41dSEsben Haabendal 		 */
15108425c41dSEsben Haabendal 		if (temac_dcr_setup(lp, pdev, dma_np)) {
1511b13ad8f4SJeff Kirsher 			/* no DCR in the device tree, try non-DCR */
15128425c41dSEsben Haabendal 			lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
15138425c41dSEsben Haabendal 						      NULL);
15148425c41dSEsben Haabendal 			if (IS_ERR(lp->sdma_regs)) {
15158425c41dSEsben Haabendal 				dev_err(&pdev->dev,
15168425c41dSEsben Haabendal 					"unable to map DMA registers\n");
15178425c41dSEsben Haabendal 				of_node_put(dma_np);
15188425c41dSEsben Haabendal 				return PTR_ERR(lp->sdma_regs);
15198425c41dSEsben Haabendal 			}
1520be8d9d05SWang Qing 			if (of_property_read_bool(dma_np, "little-endian")) {
1521a3246dc4SEsben Haabendal 				lp->dma_in = temac_dma_in32_le;
1522a3246dc4SEsben Haabendal 				lp->dma_out = temac_dma_out32_le;
1523a3246dc4SEsben Haabendal 			} else {
1524a3246dc4SEsben Haabendal 				lp->dma_in = temac_dma_in32_be;
1525a3246dc4SEsben Haabendal 				lp->dma_out = temac_dma_out32_be;
1526a3246dc4SEsben Haabendal 			}
15278425c41dSEsben Haabendal 			dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1528b13ad8f4SJeff Kirsher 		}
1529b13ad8f4SJeff Kirsher 
15308425c41dSEsben Haabendal 		/* Get DMA RX and TX interrupts */
15318425c41dSEsben Haabendal 		lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
15328425c41dSEsben Haabendal 		lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1533b13ad8f4SJeff Kirsher 
15348425c41dSEsben Haabendal 		/* Finished with the DMA node; drop the reference */
15358425c41dSEsben Haabendal 		of_node_put(dma_np);
15368425c41dSEsben Haabendal 	} else if (pdata) {
15378425c41dSEsben Haabendal 		/* 2nd memory resource specifies DMA registers */
1538cc6596fcSZhang Changzhong 		lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
1539cc6596fcSZhang Changzhong 		if (IS_ERR(lp->sdma_regs)) {
15408425c41dSEsben Haabendal 			dev_err(&pdev->dev,
15418425c41dSEsben Haabendal 				"could not map DMA registers\n");
1542cc6596fcSZhang Changzhong 			return PTR_ERR(lp->sdma_regs);
15438425c41dSEsben Haabendal 		}
1544a3246dc4SEsben Haabendal 		if (pdata->dma_little_endian) {
1545a3246dc4SEsben Haabendal 			lp->dma_in = temac_dma_in32_le;
1546a3246dc4SEsben Haabendal 			lp->dma_out = temac_dma_out32_le;
1547a3246dc4SEsben Haabendal 		} else {
1548a3246dc4SEsben Haabendal 			lp->dma_in = temac_dma_in32_be;
1549a3246dc4SEsben Haabendal 			lp->dma_out = temac_dma_out32_be;
1550a3246dc4SEsben Haabendal 		}
1551b13ad8f4SJeff Kirsher 
15528425c41dSEsben Haabendal 		/* Get DMA RX and TX interrupts */
15538425c41dSEsben Haabendal 		lp->rx_irq = platform_get_irq(pdev, 0);
15548425c41dSEsben Haabendal 		lp->tx_irq = platform_get_irq(pdev, 1);
15557e97a194SEsben Haabendal 
15567e97a194SEsben Haabendal 		/* IRQ delay/coalescing setup */
1557227d4617SEsben Haabendal 		if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
1558227d4617SEsben Haabendal 			lp->coalesce_delay_tx = pdata->tx_irq_timeout;
1559227d4617SEsben Haabendal 			lp->coalesce_count_tx = pdata->tx_irq_count;
1560227d4617SEsben Haabendal 		}
15611d63b8d6SEsben Haabendal 		if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
1562227d4617SEsben Haabendal 			lp->coalesce_delay_rx = pdata->rx_irq_timeout;
15631d63b8d6SEsben Haabendal 			lp->coalesce_count_rx = pdata->rx_irq_count;
15641d63b8d6SEsben Haabendal 		}
1565b13ad8f4SJeff Kirsher 	}
1566b13ad8f4SJeff Kirsher 
15678425c41dSEsben Haabendal 	/* Error handle returned DMA RX and TX interrupts */
1568ef45e840SDan Carpenter 	if (lp->rx_irq <= 0) {
1569ef45e840SDan Carpenter 		rc = lp->rx_irq ?: -EINVAL;
1570ef45e840SDan Carpenter 		return dev_err_probe(&pdev->dev, rc,
157175ae8c28SYang Yingliang 				     "could not get DMA RX irq\n");
1572ef45e840SDan Carpenter 	}
1573ef45e840SDan Carpenter 	if (lp->tx_irq <= 0) {
1574ef45e840SDan Carpenter 		rc = lp->tx_irq ?: -EINVAL;
1575ef45e840SDan Carpenter 		return dev_err_probe(&pdev->dev, rc,
157675ae8c28SYang Yingliang 				     "could not get DMA TX irq\n");
1577ef45e840SDan Carpenter 	}
1578b13ad8f4SJeff Kirsher 
15798425c41dSEsben Haabendal 	if (temac_np) {
1580b13ad8f4SJeff Kirsher 		/* Retrieve the MAC address */
158183216e39SMichael Walle 		rc = of_get_mac_address(temac_np, addr);
158283216e39SMichael Walle 		if (rc) {
15838425c41dSEsben Haabendal 			dev_err(&pdev->dev, "could not find MAC address\n");
1584a63625d2SEsben Haabendal 			return -ENODEV;
1585b13ad8f4SJeff Kirsher 		}
158606205472STobias Klauser 		temac_init_mac_address(ndev, addr);
15878425c41dSEsben Haabendal 	} else if (pdata) {
15888425c41dSEsben Haabendal 		temac_init_mac_address(ndev, pdata->mac_addr);
15898425c41dSEsben Haabendal 	}
1590b13ad8f4SJeff Kirsher 
1591a63625d2SEsben Haabendal 	rc = temac_mdio_setup(lp, pdev);
1592b13ad8f4SJeff Kirsher 	if (rc)
15938425c41dSEsben Haabendal 		dev_warn(&pdev->dev, "error registering MDIO bus\n");
1594b13ad8f4SJeff Kirsher 
15958425c41dSEsben Haabendal 	if (temac_np) {
15968425c41dSEsben Haabendal 		lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1597b13ad8f4SJeff Kirsher 		if (lp->phy_node)
15988425c41dSEsben Haabendal 			dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
15998425c41dSEsben Haabendal 	} else if (pdata) {
16008425c41dSEsben Haabendal 		snprintf(lp->phy_name, sizeof(lp->phy_name),
16018425c41dSEsben Haabendal 			 PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
16028425c41dSEsben Haabendal 		lp->phy_interface = pdata->phy_interface;
16038425c41dSEsben Haabendal 	}
1604b13ad8f4SJeff Kirsher 
1605b13ad8f4SJeff Kirsher 	/* Add the device attributes */
1606b13ad8f4SJeff Kirsher 	rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1607b13ad8f4SJeff Kirsher 	if (rc) {
1608b13ad8f4SJeff Kirsher 		dev_err(lp->dev, "Error creating sysfs files\n");
1609a63625d2SEsben Haabendal 		goto err_sysfs_create;
1610b13ad8f4SJeff Kirsher 	}
1611b13ad8f4SJeff Kirsher 
1612b13ad8f4SJeff Kirsher 	rc = register_netdev(lp->ndev);
1613b13ad8f4SJeff Kirsher 	if (rc) {
1614b13ad8f4SJeff Kirsher 		dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
1615b13ad8f4SJeff Kirsher 		goto err_register_ndev;
1616b13ad8f4SJeff Kirsher 	}
1617b13ad8f4SJeff Kirsher 
1618b13ad8f4SJeff Kirsher 	return 0;
1619b13ad8f4SJeff Kirsher 
1620b13ad8f4SJeff Kirsher err_register_ndev:
1621b13ad8f4SJeff Kirsher 	sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1622a63625d2SEsben Haabendal err_sysfs_create:
16238425c41dSEsben Haabendal 	if (lp->phy_node)
1624a63625d2SEsben Haabendal 		of_node_put(lp->phy_node);
1625a63625d2SEsben Haabendal 	temac_mdio_teardown(lp);
1626b13ad8f4SJeff Kirsher 	return rc;
1627b13ad8f4SJeff Kirsher }
1628b13ad8f4SJeff Kirsher 
temac_remove(struct platform_device * pdev)16298425c41dSEsben Haabendal static int temac_remove(struct platform_device *pdev)
1630b13ad8f4SJeff Kirsher {
16318425c41dSEsben Haabendal 	struct net_device *ndev = platform_get_drvdata(pdev);
1632b13ad8f4SJeff Kirsher 	struct temac_local *lp = netdev_priv(ndev);
1633b13ad8f4SJeff Kirsher 
1634b13ad8f4SJeff Kirsher 	unregister_netdev(ndev);
1635b13ad8f4SJeff Kirsher 	sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
16368425c41dSEsben Haabendal 	if (lp->phy_node)
1637b13ad8f4SJeff Kirsher 		of_node_put(lp->phy_node);
1638a63625d2SEsben Haabendal 	temac_mdio_teardown(lp);
1639b13ad8f4SJeff Kirsher 	return 0;
1640b13ad8f4SJeff Kirsher }
1641b13ad8f4SJeff Kirsher 
164274847f23SFabian Frederick static const struct of_device_id temac_of_match[] = {
1643b13ad8f4SJeff Kirsher 	{ .compatible = "xlnx,xps-ll-temac-1.01.b", },
1644b13ad8f4SJeff Kirsher 	{ .compatible = "xlnx,xps-ll-temac-2.00.a", },
1645b13ad8f4SJeff Kirsher 	{ .compatible = "xlnx,xps-ll-temac-2.02.a", },
1646b13ad8f4SJeff Kirsher 	{ .compatible = "xlnx,xps-ll-temac-2.03.a", },
1647b13ad8f4SJeff Kirsher 	{},
1648b13ad8f4SJeff Kirsher };
1649b13ad8f4SJeff Kirsher MODULE_DEVICE_TABLE(of, temac_of_match);
1650b13ad8f4SJeff Kirsher 
16518425c41dSEsben Haabendal static struct platform_driver temac_driver = {
16528425c41dSEsben Haabendal 	.probe = temac_probe,
16538425c41dSEsben Haabendal 	.remove = temac_remove,
1654b13ad8f4SJeff Kirsher 	.driver = {
1655b13ad8f4SJeff Kirsher 		.name = "xilinx_temac",
1656b13ad8f4SJeff Kirsher 		.of_match_table = temac_of_match,
1657b13ad8f4SJeff Kirsher 	},
1658b13ad8f4SJeff Kirsher };
1659b13ad8f4SJeff Kirsher 
16608425c41dSEsben Haabendal module_platform_driver(temac_driver);
1661b13ad8f4SJeff Kirsher 
1662b13ad8f4SJeff Kirsher MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1663b13ad8f4SJeff Kirsher MODULE_AUTHOR("Yoshio Kashiwagi");
1664b13ad8f4SJeff Kirsher MODULE_LICENSE("GPL");
1665