1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Driver for Xilinx TEMAC Ethernet device
4  *
5  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
6  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
7  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8  *
9  * This is a driver for the Xilinx ll_temac ipcore which is often used
10  * in the Virtex and Spartan series of chips.
11  *
12  * Notes:
13  * - The ll_temac hardware uses indirect access for many of the TEMAC
14  *   registers, include the MDIO bus.  However, indirect access to MDIO
15  *   registers take considerably more clock cycles than to TEMAC registers.
16  *   MDIO accesses are long, so threads doing them should probably sleep
17  *   rather than busywait.  However, since only one indirect access can be
18  *   in progress at any given time, that means that *all* indirect accesses
19  *   could end up sleeping (to wait for an MDIO access to complete).
20  *   Fortunately none of the indirect accesses are on the 'hot' path for tx
21  *   or rx, so this should be okay.
22  *
23  * TODO:
24  * - Factor out locallink DMA code into separate driver
25  * - Fix support for hardware checksumming.
26  * - Testing.  Lots and lots of testing.
27  *
28  */
29 
30 #include <linux/delay.h>
31 #include <linux/etherdevice.h>
32 #include <linux/mii.h>
33 #include <linux/module.h>
34 #include <linux/mutex.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_ether.h>
37 #include <linux/of.h>
38 #include <linux/of_device.h>
39 #include <linux/of_irq.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
42 #include <linux/of_platform.h>
43 #include <linux/of_address.h>
44 #include <linux/skbuff.h>
45 #include <linux/spinlock.h>
46 #include <linux/tcp.h>      /* needed for sizeof(tcphdr) */
47 #include <linux/udp.h>      /* needed for sizeof(udphdr) */
48 #include <linux/phy.h>
49 #include <linux/in.h>
50 #include <linux/io.h>
51 #include <linux/ip.h>
52 #include <linux/slab.h>
53 #include <linux/interrupt.h>
54 #include <linux/workqueue.h>
55 #include <linux/dma-mapping.h>
56 #include <linux/processor.h>
57 #include <linux/platform_data/xilinx-ll-temac.h>
58 
59 #include "ll_temac.h"
60 
61 /* Descriptors defines for Tx and Rx DMA */
62 #define TX_BD_NUM_DEFAULT		64
63 #define RX_BD_NUM_DEFAULT		1024
64 #define TX_BD_NUM_MAX			4096
65 #define RX_BD_NUM_MAX			4096
66 
67 /* ---------------------------------------------------------------------
68  * Low level register access functions
69  */
70 
71 static u32 _temac_ior_be(struct temac_local *lp, int offset)
72 {
73 	return ioread32be(lp->regs + offset);
74 }
75 
76 static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
77 {
78 	return iowrite32be(value, lp->regs + offset);
79 }
80 
81 static u32 _temac_ior_le(struct temac_local *lp, int offset)
82 {
83 	return ioread32(lp->regs + offset);
84 }
85 
86 static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
87 {
88 	return iowrite32(value, lp->regs + offset);
89 }
90 
91 static bool hard_acs_rdy(struct temac_local *lp)
92 {
93 	return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
94 }
95 
96 static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
97 {
98 	ktime_t cur = ktime_get();
99 
100 	return hard_acs_rdy(lp) || ktime_after(cur, timeout);
101 }
102 
103 /* Poll for maximum 20 ms.  This is similar to the 2 jiffies @ 100 Hz
104  * that was used before, and should cover MDIO bus speed down to 3200
105  * Hz.
106  */
107 #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
108 
109 /*
110  * temac_indirect_busywait - Wait for current indirect register access
111  * to complete.
112  */
113 int temac_indirect_busywait(struct temac_local *lp)
114 {
115 	ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
116 
117 	spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
118 	if (WARN_ON(!hard_acs_rdy(lp)))
119 		return -ETIMEDOUT;
120 
121 	return 0;
122 }
123 
124 /*
125  * temac_indirect_in32 - Indirect register read access.  This function
126  * must be called without lp->indirect_lock being held.
127  */
128 u32 temac_indirect_in32(struct temac_local *lp, int reg)
129 {
130 	unsigned long flags;
131 	int val;
132 
133 	spin_lock_irqsave(lp->indirect_lock, flags);
134 	val = temac_indirect_in32_locked(lp, reg);
135 	spin_unlock_irqrestore(lp->indirect_lock, flags);
136 	return val;
137 }
138 
139 /*
140  * temac_indirect_in32_locked - Indirect register read access.  This
141  * function must be called with lp->indirect_lock being held.  Use
142  * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
143  * repeated lock/unlock and to ensure uninterrupted access to indirect
144  * registers.
145  */
146 u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
147 {
148 	/* This initial wait should normally not spin, as we always
149 	 * try to wait for indirect access to complete before
150 	 * releasing the indirect_lock.
151 	 */
152 	if (WARN_ON(temac_indirect_busywait(lp)))
153 		return -ETIMEDOUT;
154 	/* Initiate read from indirect register */
155 	temac_iow(lp, XTE_CTL0_OFFSET, reg);
156 	/* Wait for indirect register access to complete.  We really
157 	 * should not see timeouts, and could even end up causing
158 	 * problem for following indirect access, so let's make a bit
159 	 * of WARN noise.
160 	 */
161 	if (WARN_ON(temac_indirect_busywait(lp)))
162 		return -ETIMEDOUT;
163 	/* Value is ready now */
164 	return temac_ior(lp, XTE_LSW0_OFFSET);
165 }
166 
167 /*
168  * temac_indirect_out32 - Indirect register write access.  This function
169  * must be called without lp->indirect_lock being held.
170  */
171 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
172 {
173 	unsigned long flags;
174 
175 	spin_lock_irqsave(lp->indirect_lock, flags);
176 	temac_indirect_out32_locked(lp, reg, value);
177 	spin_unlock_irqrestore(lp->indirect_lock, flags);
178 }
179 
180 /*
181  * temac_indirect_out32_locked - Indirect register write access.  This
182  * function must be called with lp->indirect_lock being held.  Use
183  * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
184  * repeated lock/unlock and to ensure uninterrupted access to indirect
185  * registers.
186  */
187 void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
188 {
189 	/* As in temac_indirect_in32_locked(), we should normally not
190 	 * spin here.  And if it happens, we actually end up silently
191 	 * ignoring the write request.  Ouch.
192 	 */
193 	if (WARN_ON(temac_indirect_busywait(lp)))
194 		return;
195 	/* Initiate write to indirect register */
196 	temac_iow(lp, XTE_LSW0_OFFSET, value);
197 	temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
198 	/* As in temac_indirect_in32_locked(), we should not see timeouts
199 	 * here.  And if it happens, we continue before the write has
200 	 * completed.  Not good.
201 	 */
202 	WARN_ON(temac_indirect_busywait(lp));
203 }
204 
205 /*
206  * temac_dma_in32_* - Memory mapped DMA read, these function expects a
207  * register input that is based on DCR word addresses which are then
208  * converted to memory mapped byte addresses.  To be assigned to
209  * lp->dma_in32.
210  */
211 static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
212 {
213 	return ioread32be(lp->sdma_regs + (reg << 2));
214 }
215 
216 static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
217 {
218 	return ioread32(lp->sdma_regs + (reg << 2));
219 }
220 
221 /*
222  * temac_dma_out32_* - Memory mapped DMA read, these function expects
223  * a register input that is based on DCR word addresses which are then
224  * converted to memory mapped byte addresses.  To be assigned to
225  * lp->dma_out32.
226  */
227 static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
228 {
229 	iowrite32be(value, lp->sdma_regs + (reg << 2));
230 }
231 
232 static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
233 {
234 	iowrite32(value, lp->sdma_regs + (reg << 2));
235 }
236 
237 /* DMA register access functions can be DCR based or memory mapped.
238  * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
239  * memory mapped.
240  */
241 #ifdef CONFIG_PPC_DCR
242 
243 /*
244  * temac_dma_dcr_in32 - DCR based DMA read
245  */
246 static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
247 {
248 	return dcr_read(lp->sdma_dcrs, reg);
249 }
250 
251 /*
252  * temac_dma_dcr_out32 - DCR based DMA write
253  */
254 static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
255 {
256 	dcr_write(lp->sdma_dcrs, reg, value);
257 }
258 
259 /*
260  * temac_dcr_setup - If the DMA is DCR based, then setup the address and
261  * I/O  functions
262  */
263 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
264 			   struct device_node *np)
265 {
266 	unsigned int dcrs;
267 
268 	/* setup the dcr address mapping if it's in the device tree */
269 
270 	dcrs = dcr_resource_start(np, 0);
271 	if (dcrs != 0) {
272 		lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
273 		lp->dma_in = temac_dma_dcr_in;
274 		lp->dma_out = temac_dma_dcr_out;
275 		dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
276 		return 0;
277 	}
278 	/* no DCR in the device tree, indicate a failure */
279 	return -1;
280 }
281 
282 #else
283 
284 /*
285  * temac_dcr_setup - This is a stub for when DCR is not supported,
286  * such as with MicroBlaze and x86
287  */
288 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
289 			   struct device_node *np)
290 {
291 	return -1;
292 }
293 
294 #endif
295 
296 /*
297  * temac_dma_bd_release - Release buffer descriptor rings
298  */
299 static void temac_dma_bd_release(struct net_device *ndev)
300 {
301 	struct temac_local *lp = netdev_priv(ndev);
302 	int i;
303 
304 	/* Reset Local Link (DMA) */
305 	lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
306 
307 	for (i = 0; i < lp->rx_bd_num; i++) {
308 		if (!lp->rx_skb[i])
309 			break;
310 		dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
311 				 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
312 		dev_kfree_skb(lp->rx_skb[i]);
313 	}
314 	if (lp->rx_bd_v)
315 		dma_free_coherent(ndev->dev.parent,
316 				  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
317 				  lp->rx_bd_v, lp->rx_bd_p);
318 	if (lp->tx_bd_v)
319 		dma_free_coherent(ndev->dev.parent,
320 				  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
321 				  lp->tx_bd_v, lp->tx_bd_p);
322 }
323 
324 /*
325  * temac_dma_bd_init - Setup buffer descriptor rings
326  */
327 static int temac_dma_bd_init(struct net_device *ndev)
328 {
329 	struct temac_local *lp = netdev_priv(ndev);
330 	struct sk_buff *skb;
331 	dma_addr_t skb_dma_addr;
332 	int i;
333 
334 	lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
335 				  sizeof(*lp->rx_skb), GFP_KERNEL);
336 	if (!lp->rx_skb)
337 		goto out;
338 
339 	/* allocate the tx and rx ring buffer descriptors. */
340 	/* returns a virtual address and a physical address. */
341 	lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
342 					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
343 					 &lp->tx_bd_p, GFP_KERNEL);
344 	if (!lp->tx_bd_v)
345 		goto out;
346 
347 	lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
348 					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
349 					 &lp->rx_bd_p, GFP_KERNEL);
350 	if (!lp->rx_bd_v)
351 		goto out;
352 
353 	for (i = 0; i < lp->tx_bd_num; i++) {
354 		lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
355 			+ sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
356 	}
357 
358 	for (i = 0; i < lp->rx_bd_num; i++) {
359 		lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
360 			+ sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
361 
362 		skb = __netdev_alloc_skb_ip_align(ndev,
363 						  XTE_MAX_JUMBO_FRAME_SIZE,
364 						  GFP_KERNEL);
365 		if (!skb)
366 			goto out;
367 
368 		lp->rx_skb[i] = skb;
369 		/* returns physical address of skb->data */
370 		skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
371 					      XTE_MAX_JUMBO_FRAME_SIZE,
372 					      DMA_FROM_DEVICE);
373 		if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
374 			goto out;
375 		lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
376 		lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
377 		lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
378 	}
379 
380 	/* Configure DMA channel (irq setup) */
381 	lp->dma_out(lp, TX_CHNL_CTRL,
382 		    lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
383 		    0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
384 		    CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
385 		    CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
386 	lp->dma_out(lp, RX_CHNL_CTRL,
387 		    lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
388 		    CHNL_CTRL_IRQ_IOE |
389 		    CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
390 		    CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
391 
392 	/* Init descriptor indexes */
393 	lp->tx_bd_ci = 0;
394 	lp->tx_bd_tail = 0;
395 	lp->rx_bd_ci = 0;
396 	lp->rx_bd_tail = lp->rx_bd_num - 1;
397 
398 	/* Enable RX DMA transfers */
399 	wmb();
400 	lp->dma_out(lp, RX_CURDESC_PTR,  lp->rx_bd_p);
401 	lp->dma_out(lp, RX_TAILDESC_PTR,
402 		       lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
403 
404 	/* Prepare for TX DMA transfer */
405 	lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
406 
407 	return 0;
408 
409 out:
410 	temac_dma_bd_release(ndev);
411 	return -ENOMEM;
412 }
413 
414 /* ---------------------------------------------------------------------
415  * net_device_ops
416  */
417 
418 static void temac_do_set_mac_address(struct net_device *ndev)
419 {
420 	struct temac_local *lp = netdev_priv(ndev);
421 	unsigned long flags;
422 
423 	/* set up unicast MAC address filter set its mac address */
424 	spin_lock_irqsave(lp->indirect_lock, flags);
425 	temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
426 				    (ndev->dev_addr[0]) |
427 				    (ndev->dev_addr[1] << 8) |
428 				    (ndev->dev_addr[2] << 16) |
429 				    (ndev->dev_addr[3] << 24));
430 	/* There are reserved bits in EUAW1
431 	 * so don't affect them Set MAC bits [47:32] in EUAW1
432 	 */
433 	temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
434 				    (ndev->dev_addr[4] & 0x000000ff) |
435 				    (ndev->dev_addr[5] << 8));
436 	spin_unlock_irqrestore(lp->indirect_lock, flags);
437 }
438 
439 static int temac_init_mac_address(struct net_device *ndev, const void *address)
440 {
441 	eth_hw_addr_set(ndev, address);
442 	if (!is_valid_ether_addr(ndev->dev_addr))
443 		eth_hw_addr_random(ndev);
444 	temac_do_set_mac_address(ndev);
445 	return 0;
446 }
447 
448 static int temac_set_mac_address(struct net_device *ndev, void *p)
449 {
450 	struct sockaddr *addr = p;
451 
452 	if (!is_valid_ether_addr(addr->sa_data))
453 		return -EADDRNOTAVAIL;
454 	eth_hw_addr_set(ndev, addr->sa_data);
455 	temac_do_set_mac_address(ndev);
456 	return 0;
457 }
458 
459 static void temac_set_multicast_list(struct net_device *ndev)
460 {
461 	struct temac_local *lp = netdev_priv(ndev);
462 	u32 multi_addr_msw, multi_addr_lsw;
463 	int i = 0;
464 	unsigned long flags;
465 	bool promisc_mode_disabled = false;
466 
467 	if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
468 	    (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
469 		temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
470 		dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
471 		return;
472 	}
473 
474 	spin_lock_irqsave(lp->indirect_lock, flags);
475 
476 	if (!netdev_mc_empty(ndev)) {
477 		struct netdev_hw_addr *ha;
478 
479 		netdev_for_each_mc_addr(ha, ndev) {
480 			if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
481 				break;
482 			multi_addr_msw = ((ha->addr[3] << 24) |
483 					  (ha->addr[2] << 16) |
484 					  (ha->addr[1] << 8) |
485 					  (ha->addr[0]));
486 			temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
487 						    multi_addr_msw);
488 			multi_addr_lsw = ((ha->addr[5] << 8) |
489 					  (ha->addr[4]) | (i << 16));
490 			temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
491 						    multi_addr_lsw);
492 			i++;
493 		}
494 	}
495 
496 	/* Clear all or remaining/unused address table entries */
497 	while (i < MULTICAST_CAM_TABLE_NUM) {
498 		temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
499 		temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
500 		i++;
501 	}
502 
503 	/* Enable address filter block if currently disabled */
504 	if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
505 	    & XTE_AFM_EPPRM_MASK) {
506 		temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
507 		promisc_mode_disabled = true;
508 	}
509 
510 	spin_unlock_irqrestore(lp->indirect_lock, flags);
511 
512 	if (promisc_mode_disabled)
513 		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
514 }
515 
516 static struct temac_option {
517 	int flg;
518 	u32 opt;
519 	u32 reg;
520 	u32 m_or;
521 	u32 m_and;
522 } temac_options[] = {
523 	/* Turn on jumbo packet support for both Rx and Tx */
524 	{
525 		.opt = XTE_OPTION_JUMBO,
526 		.reg = XTE_TXC_OFFSET,
527 		.m_or = XTE_TXC_TXJMBO_MASK,
528 	},
529 	{
530 		.opt = XTE_OPTION_JUMBO,
531 		.reg = XTE_RXC1_OFFSET,
532 		.m_or = XTE_RXC1_RXJMBO_MASK,
533 	},
534 	/* Turn on VLAN packet support for both Rx and Tx */
535 	{
536 		.opt = XTE_OPTION_VLAN,
537 		.reg = XTE_TXC_OFFSET,
538 		.m_or = XTE_TXC_TXVLAN_MASK,
539 	},
540 	{
541 		.opt = XTE_OPTION_VLAN,
542 		.reg = XTE_RXC1_OFFSET,
543 		.m_or = XTE_RXC1_RXVLAN_MASK,
544 	},
545 	/* Turn on FCS stripping on receive packets */
546 	{
547 		.opt = XTE_OPTION_FCS_STRIP,
548 		.reg = XTE_RXC1_OFFSET,
549 		.m_or = XTE_RXC1_RXFCS_MASK,
550 	},
551 	/* Turn on FCS insertion on transmit packets */
552 	{
553 		.opt = XTE_OPTION_FCS_INSERT,
554 		.reg = XTE_TXC_OFFSET,
555 		.m_or = XTE_TXC_TXFCS_MASK,
556 	},
557 	/* Turn on length/type field checking on receive packets */
558 	{
559 		.opt = XTE_OPTION_LENTYPE_ERR,
560 		.reg = XTE_RXC1_OFFSET,
561 		.m_or = XTE_RXC1_RXLT_MASK,
562 	},
563 	/* Turn on flow control */
564 	{
565 		.opt = XTE_OPTION_FLOW_CONTROL,
566 		.reg = XTE_FCC_OFFSET,
567 		.m_or = XTE_FCC_RXFLO_MASK,
568 	},
569 	/* Turn on flow control */
570 	{
571 		.opt = XTE_OPTION_FLOW_CONTROL,
572 		.reg = XTE_FCC_OFFSET,
573 		.m_or = XTE_FCC_TXFLO_MASK,
574 	},
575 	/* Turn on promiscuous frame filtering (all frames are received ) */
576 	{
577 		.opt = XTE_OPTION_PROMISC,
578 		.reg = XTE_AFM_OFFSET,
579 		.m_or = XTE_AFM_EPPRM_MASK,
580 	},
581 	/* Enable transmitter if not already enabled */
582 	{
583 		.opt = XTE_OPTION_TXEN,
584 		.reg = XTE_TXC_OFFSET,
585 		.m_or = XTE_TXC_TXEN_MASK,
586 	},
587 	/* Enable receiver? */
588 	{
589 		.opt = XTE_OPTION_RXEN,
590 		.reg = XTE_RXC1_OFFSET,
591 		.m_or = XTE_RXC1_RXEN_MASK,
592 	},
593 	{}
594 };
595 
596 /*
597  * temac_setoptions
598  */
599 static u32 temac_setoptions(struct net_device *ndev, u32 options)
600 {
601 	struct temac_local *lp = netdev_priv(ndev);
602 	struct temac_option *tp = &temac_options[0];
603 	int reg;
604 	unsigned long flags;
605 
606 	spin_lock_irqsave(lp->indirect_lock, flags);
607 	while (tp->opt) {
608 		reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
609 		if (options & tp->opt) {
610 			reg |= tp->m_or;
611 			temac_indirect_out32_locked(lp, tp->reg, reg);
612 		}
613 		tp++;
614 	}
615 	spin_unlock_irqrestore(lp->indirect_lock, flags);
616 	lp->options |= options;
617 
618 	return 0;
619 }
620 
621 /* Initialize temac */
622 static void temac_device_reset(struct net_device *ndev)
623 {
624 	struct temac_local *lp = netdev_priv(ndev);
625 	u32 timeout;
626 	u32 val;
627 	unsigned long flags;
628 
629 	/* Perform a software reset */
630 
631 	/* 0x300 host enable bit ? */
632 	/* reset PHY through control register ?:1 */
633 
634 	dev_dbg(&ndev->dev, "%s()\n", __func__);
635 
636 	/* Reset the receiver and wait for it to finish reset */
637 	temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
638 	timeout = 1000;
639 	while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
640 		udelay(1);
641 		if (--timeout == 0) {
642 			dev_err(&ndev->dev,
643 				"%s RX reset timeout!!\n", __func__);
644 			break;
645 		}
646 	}
647 
648 	/* Reset the transmitter and wait for it to finish reset */
649 	temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
650 	timeout = 1000;
651 	while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
652 		udelay(1);
653 		if (--timeout == 0) {
654 			dev_err(&ndev->dev,
655 				"%s TX reset timeout!!\n", __func__);
656 			break;
657 		}
658 	}
659 
660 	/* Disable the receiver */
661 	spin_lock_irqsave(lp->indirect_lock, flags);
662 	val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
663 	temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
664 				    val & ~XTE_RXC1_RXEN_MASK);
665 	spin_unlock_irqrestore(lp->indirect_lock, flags);
666 
667 	/* Reset Local Link (DMA) */
668 	lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
669 	timeout = 1000;
670 	while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
671 		udelay(1);
672 		if (--timeout == 0) {
673 			dev_err(&ndev->dev,
674 				"%s DMA reset timeout!!\n", __func__);
675 			break;
676 		}
677 	}
678 	lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
679 
680 	if (temac_dma_bd_init(ndev)) {
681 		dev_err(&ndev->dev,
682 			"%s descriptor allocation failed\n", __func__);
683 	}
684 
685 	spin_lock_irqsave(lp->indirect_lock, flags);
686 	temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
687 	temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
688 	temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
689 	temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
690 	spin_unlock_irqrestore(lp->indirect_lock, flags);
691 
692 	/* Sync default options with HW
693 	 * but leave receiver and transmitter disabled.
694 	 */
695 	temac_setoptions(ndev,
696 			 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
697 
698 	temac_do_set_mac_address(ndev);
699 
700 	/* Set address filter table */
701 	temac_set_multicast_list(ndev);
702 	if (temac_setoptions(ndev, lp->options))
703 		dev_err(&ndev->dev, "Error setting TEMAC options\n");
704 
705 	/* Init Driver variable */
706 	netif_trans_update(ndev); /* prevent tx timeout */
707 }
708 
709 static void temac_adjust_link(struct net_device *ndev)
710 {
711 	struct temac_local *lp = netdev_priv(ndev);
712 	struct phy_device *phy = ndev->phydev;
713 	u32 mii_speed;
714 	int link_state;
715 	unsigned long flags;
716 
717 	/* hash together the state values to decide if something has changed */
718 	link_state = phy->speed | (phy->duplex << 1) | phy->link;
719 
720 	if (lp->last_link != link_state) {
721 		spin_lock_irqsave(lp->indirect_lock, flags);
722 		mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
723 		mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
724 
725 		switch (phy->speed) {
726 		case SPEED_1000:
727 			mii_speed |= XTE_EMCFG_LINKSPD_1000;
728 			break;
729 		case SPEED_100:
730 			mii_speed |= XTE_EMCFG_LINKSPD_100;
731 			break;
732 		case SPEED_10:
733 			mii_speed |= XTE_EMCFG_LINKSPD_10;
734 			break;
735 		}
736 
737 		/* Write new speed setting out to TEMAC */
738 		temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
739 		spin_unlock_irqrestore(lp->indirect_lock, flags);
740 
741 		lp->last_link = link_state;
742 		phy_print_status(phy);
743 	}
744 }
745 
746 #ifdef CONFIG_64BIT
747 
748 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
749 {
750 	bd->app3 = (u32)(((u64)p) >> 32);
751 	bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
752 }
753 
754 static void *ptr_from_txbd(struct cdmac_bd *bd)
755 {
756 	return (void *)(((u64)(bd->app3) << 32) | bd->app4);
757 }
758 
759 #else
760 
761 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
762 {
763 	bd->app4 = (u32)p;
764 }
765 
766 static void *ptr_from_txbd(struct cdmac_bd *bd)
767 {
768 	return (void *)(bd->app4);
769 }
770 
771 #endif
772 
773 static void temac_start_xmit_done(struct net_device *ndev)
774 {
775 	struct temac_local *lp = netdev_priv(ndev);
776 	struct cdmac_bd *cur_p;
777 	unsigned int stat = 0;
778 	struct sk_buff *skb;
779 
780 	cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
781 	stat = be32_to_cpu(cur_p->app0);
782 
783 	while (stat & STS_CTRL_APP0_CMPLT) {
784 		/* Make sure that the other fields are read after bd is
785 		 * released by dma
786 		 */
787 		rmb();
788 		dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
789 				 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
790 		skb = (struct sk_buff *)ptr_from_txbd(cur_p);
791 		if (skb)
792 			dev_consume_skb_irq(skb);
793 		cur_p->app1 = 0;
794 		cur_p->app2 = 0;
795 		cur_p->app3 = 0;
796 		cur_p->app4 = 0;
797 
798 		ndev->stats.tx_packets++;
799 		ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
800 
801 		/* app0 must be visible last, as it is used to flag
802 		 * availability of the bd
803 		 */
804 		smp_mb();
805 		cur_p->app0 = 0;
806 
807 		lp->tx_bd_ci++;
808 		if (lp->tx_bd_ci >= lp->tx_bd_num)
809 			lp->tx_bd_ci = 0;
810 
811 		cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
812 		stat = be32_to_cpu(cur_p->app0);
813 	}
814 
815 	/* Matches barrier in temac_start_xmit */
816 	smp_mb();
817 
818 	netif_wake_queue(ndev);
819 }
820 
821 static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
822 {
823 	struct cdmac_bd *cur_p;
824 	int tail;
825 
826 	tail = lp->tx_bd_tail;
827 	cur_p = &lp->tx_bd_v[tail];
828 
829 	do {
830 		if (cur_p->app0)
831 			return NETDEV_TX_BUSY;
832 
833 		/* Make sure to read next bd app0 after this one */
834 		rmb();
835 
836 		tail++;
837 		if (tail >= lp->tx_bd_num)
838 			tail = 0;
839 
840 		cur_p = &lp->tx_bd_v[tail];
841 		num_frag--;
842 	} while (num_frag >= 0);
843 
844 	return 0;
845 }
846 
847 static netdev_tx_t
848 temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
849 {
850 	struct temac_local *lp = netdev_priv(ndev);
851 	struct cdmac_bd *cur_p;
852 	dma_addr_t tail_p, skb_dma_addr;
853 	int ii;
854 	unsigned long num_frag;
855 	skb_frag_t *frag;
856 
857 	num_frag = skb_shinfo(skb)->nr_frags;
858 	frag = &skb_shinfo(skb)->frags[0];
859 	cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
860 
861 	if (temac_check_tx_bd_space(lp, num_frag + 1)) {
862 		if (netif_queue_stopped(ndev))
863 			return NETDEV_TX_BUSY;
864 
865 		netif_stop_queue(ndev);
866 
867 		/* Matches barrier in temac_start_xmit_done */
868 		smp_mb();
869 
870 		/* Space might have just been freed - check again */
871 		if (temac_check_tx_bd_space(lp, num_frag + 1))
872 			return NETDEV_TX_BUSY;
873 
874 		netif_wake_queue(ndev);
875 	}
876 
877 	cur_p->app0 = 0;
878 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
879 		unsigned int csum_start_off = skb_checksum_start_offset(skb);
880 		unsigned int csum_index_off = csum_start_off + skb->csum_offset;
881 
882 		cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
883 		cur_p->app1 = cpu_to_be32((csum_start_off << 16)
884 					  | csum_index_off);
885 		cur_p->app2 = 0;  /* initial checksum seed */
886 	}
887 
888 	cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
889 	skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
890 				      skb_headlen(skb), DMA_TO_DEVICE);
891 	cur_p->len = cpu_to_be32(skb_headlen(skb));
892 	if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
893 		dev_kfree_skb_any(skb);
894 		ndev->stats.tx_dropped++;
895 		return NETDEV_TX_OK;
896 	}
897 	cur_p->phys = cpu_to_be32(skb_dma_addr);
898 
899 	for (ii = 0; ii < num_frag; ii++) {
900 		if (++lp->tx_bd_tail >= lp->tx_bd_num)
901 			lp->tx_bd_tail = 0;
902 
903 		cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
904 		skb_dma_addr = dma_map_single(ndev->dev.parent,
905 					      skb_frag_address(frag),
906 					      skb_frag_size(frag),
907 					      DMA_TO_DEVICE);
908 		if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
909 			if (--lp->tx_bd_tail < 0)
910 				lp->tx_bd_tail = lp->tx_bd_num - 1;
911 			cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
912 			while (--ii >= 0) {
913 				--frag;
914 				dma_unmap_single(ndev->dev.parent,
915 						 be32_to_cpu(cur_p->phys),
916 						 skb_frag_size(frag),
917 						 DMA_TO_DEVICE);
918 				if (--lp->tx_bd_tail < 0)
919 					lp->tx_bd_tail = lp->tx_bd_num - 1;
920 				cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
921 			}
922 			dma_unmap_single(ndev->dev.parent,
923 					 be32_to_cpu(cur_p->phys),
924 					 skb_headlen(skb), DMA_TO_DEVICE);
925 			dev_kfree_skb_any(skb);
926 			ndev->stats.tx_dropped++;
927 			return NETDEV_TX_OK;
928 		}
929 		cur_p->phys = cpu_to_be32(skb_dma_addr);
930 		cur_p->len = cpu_to_be32(skb_frag_size(frag));
931 		cur_p->app0 = 0;
932 		frag++;
933 	}
934 	cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
935 
936 	/* Mark last fragment with skb address, so it can be consumed
937 	 * in temac_start_xmit_done()
938 	 */
939 	ptr_to_txbd((void *)skb, cur_p);
940 
941 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
942 	lp->tx_bd_tail++;
943 	if (lp->tx_bd_tail >= lp->tx_bd_num)
944 		lp->tx_bd_tail = 0;
945 
946 	skb_tx_timestamp(skb);
947 
948 	/* Kick off the transfer */
949 	wmb();
950 	lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
951 
952 	if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
953 		netif_stop_queue(ndev);
954 
955 	return NETDEV_TX_OK;
956 }
957 
958 static int ll_temac_recv_buffers_available(struct temac_local *lp)
959 {
960 	int available;
961 
962 	if (!lp->rx_skb[lp->rx_bd_ci])
963 		return 0;
964 	available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
965 	if (available <= 0)
966 		available += lp->rx_bd_num;
967 	return available;
968 }
969 
970 static void ll_temac_recv(struct net_device *ndev)
971 {
972 	struct temac_local *lp = netdev_priv(ndev);
973 	unsigned long flags;
974 	int rx_bd;
975 	bool update_tail = false;
976 
977 	spin_lock_irqsave(&lp->rx_lock, flags);
978 
979 	/* Process all received buffers, passing them on network
980 	 * stack.  After this, the buffer descriptors will be in an
981 	 * un-allocated stage, where no skb is allocated for it, and
982 	 * they are therefore not available for TEMAC/DMA.
983 	 */
984 	do {
985 		struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
986 		struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
987 		unsigned int bdstat = be32_to_cpu(bd->app0);
988 		int length;
989 
990 		/* While this should not normally happen, we can end
991 		 * here when GFP_ATOMIC allocations fail, and we
992 		 * therefore have un-allocated buffers.
993 		 */
994 		if (!skb)
995 			break;
996 
997 		/* Loop over all completed buffer descriptors */
998 		if (!(bdstat & STS_CTRL_APP0_CMPLT))
999 			break;
1000 
1001 		dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
1002 				 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
1003 		/* The buffer is not valid for DMA anymore */
1004 		bd->phys = 0;
1005 		bd->len = 0;
1006 
1007 		length = be32_to_cpu(bd->app4) & 0x3FFF;
1008 		skb_put(skb, length);
1009 		skb->protocol = eth_type_trans(skb, ndev);
1010 		skb_checksum_none_assert(skb);
1011 
1012 		/* if we're doing rx csum offload, set it up */
1013 		if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
1014 		    (skb->protocol == htons(ETH_P_IP)) &&
1015 		    (skb->len > 64)) {
1016 			/* Convert from device endianness (be32) to cpu
1017 			 * endianness, and if necessary swap the bytes
1018 			 * (back) for proper IP checksum byte order
1019 			 * (be16).
1020 			 */
1021 			skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
1022 			skb->ip_summed = CHECKSUM_COMPLETE;
1023 		}
1024 
1025 		if (!skb_defer_rx_timestamp(skb))
1026 			netif_rx(skb);
1027 		/* The skb buffer is now owned by network stack above */
1028 		lp->rx_skb[lp->rx_bd_ci] = NULL;
1029 
1030 		ndev->stats.rx_packets++;
1031 		ndev->stats.rx_bytes += length;
1032 
1033 		rx_bd = lp->rx_bd_ci;
1034 		if (++lp->rx_bd_ci >= lp->rx_bd_num)
1035 			lp->rx_bd_ci = 0;
1036 	} while (rx_bd != lp->rx_bd_tail);
1037 
1038 	/* DMA operations will halt when the last buffer descriptor is
1039 	 * processed (ie. the one pointed to by RX_TAILDESC_PTR).
1040 	 * When that happens, no more interrupt events will be
1041 	 * generated.  No IRQ_COAL or IRQ_DLY, and not even an
1042 	 * IRQ_ERR.  To avoid stalling, we schedule a delayed work
1043 	 * when there is a potential risk of that happening.  The work
1044 	 * will call this function, and thus re-schedule itself until
1045 	 * enough buffers are available again.
1046 	 */
1047 	if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
1048 		schedule_delayed_work(&lp->restart_work, HZ / 1000);
1049 
1050 	/* Allocate new buffers for those buffer descriptors that were
1051 	 * passed to network stack.  Note that GFP_ATOMIC allocations
1052 	 * can fail (e.g. when a larger burst of GFP_ATOMIC
1053 	 * allocations occurs), so while we try to allocate all
1054 	 * buffers in the same interrupt where they were processed, we
1055 	 * continue with what we could get in case of allocation
1056 	 * failure.  Allocation of remaining buffers will be retried
1057 	 * in following calls.
1058 	 */
1059 	while (1) {
1060 		struct sk_buff *skb;
1061 		struct cdmac_bd *bd;
1062 		dma_addr_t skb_dma_addr;
1063 
1064 		rx_bd = lp->rx_bd_tail + 1;
1065 		if (rx_bd >= lp->rx_bd_num)
1066 			rx_bd = 0;
1067 		bd = &lp->rx_bd_v[rx_bd];
1068 
1069 		if (bd->phys)
1070 			break;	/* All skb's allocated */
1071 
1072 		skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
1073 		if (!skb) {
1074 			dev_warn(&ndev->dev, "skb alloc failed\n");
1075 			break;
1076 		}
1077 
1078 		skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
1079 					      XTE_MAX_JUMBO_FRAME_SIZE,
1080 					      DMA_FROM_DEVICE);
1081 		if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
1082 						   skb_dma_addr))) {
1083 			dev_kfree_skb_any(skb);
1084 			break;
1085 		}
1086 
1087 		bd->phys = cpu_to_be32(skb_dma_addr);
1088 		bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
1089 		bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
1090 		lp->rx_skb[rx_bd] = skb;
1091 
1092 		lp->rx_bd_tail = rx_bd;
1093 		update_tail = true;
1094 	}
1095 
1096 	/* Move tail pointer when buffers have been allocated */
1097 	if (update_tail) {
1098 		lp->dma_out(lp, RX_TAILDESC_PTR,
1099 			lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
1100 	}
1101 
1102 	spin_unlock_irqrestore(&lp->rx_lock, flags);
1103 }
1104 
1105 /* Function scheduled to ensure a restart in case of DMA halt
1106  * condition caused by running out of buffer descriptors.
1107  */
1108 static void ll_temac_restart_work_func(struct work_struct *work)
1109 {
1110 	struct temac_local *lp = container_of(work, struct temac_local,
1111 					      restart_work.work);
1112 	struct net_device *ndev = lp->ndev;
1113 
1114 	ll_temac_recv(ndev);
1115 }
1116 
1117 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
1118 {
1119 	struct net_device *ndev = _ndev;
1120 	struct temac_local *lp = netdev_priv(ndev);
1121 	unsigned int status;
1122 
1123 	status = lp->dma_in(lp, TX_IRQ_REG);
1124 	lp->dma_out(lp, TX_IRQ_REG, status);
1125 
1126 	if (status & (IRQ_COAL | IRQ_DLY))
1127 		temac_start_xmit_done(lp->ndev);
1128 	if (status & (IRQ_ERR | IRQ_DMAERR))
1129 		dev_err_ratelimited(&ndev->dev,
1130 				    "TX error 0x%x TX_CHNL_STS=0x%08x\n",
1131 				    status, lp->dma_in(lp, TX_CHNL_STS));
1132 
1133 	return IRQ_HANDLED;
1134 }
1135 
1136 static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
1137 {
1138 	struct net_device *ndev = _ndev;
1139 	struct temac_local *lp = netdev_priv(ndev);
1140 	unsigned int status;
1141 
1142 	/* Read and clear the status registers */
1143 	status = lp->dma_in(lp, RX_IRQ_REG);
1144 	lp->dma_out(lp, RX_IRQ_REG, status);
1145 
1146 	if (status & (IRQ_COAL | IRQ_DLY))
1147 		ll_temac_recv(lp->ndev);
1148 	if (status & (IRQ_ERR | IRQ_DMAERR))
1149 		dev_err_ratelimited(&ndev->dev,
1150 				    "RX error 0x%x RX_CHNL_STS=0x%08x\n",
1151 				    status, lp->dma_in(lp, RX_CHNL_STS));
1152 
1153 	return IRQ_HANDLED;
1154 }
1155 
1156 static int temac_open(struct net_device *ndev)
1157 {
1158 	struct temac_local *lp = netdev_priv(ndev);
1159 	struct phy_device *phydev = NULL;
1160 	int rc;
1161 
1162 	dev_dbg(&ndev->dev, "temac_open()\n");
1163 
1164 	if (lp->phy_node) {
1165 		phydev = of_phy_connect(lp->ndev, lp->phy_node,
1166 					temac_adjust_link, 0, 0);
1167 		if (!phydev) {
1168 			dev_err(lp->dev, "of_phy_connect() failed\n");
1169 			return -ENODEV;
1170 		}
1171 		phy_start(phydev);
1172 	} else if (strlen(lp->phy_name) > 0) {
1173 		phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
1174 				     lp->phy_interface);
1175 		if (IS_ERR(phydev)) {
1176 			dev_err(lp->dev, "phy_connect() failed\n");
1177 			return PTR_ERR(phydev);
1178 		}
1179 		phy_start(phydev);
1180 	}
1181 
1182 	temac_device_reset(ndev);
1183 
1184 	rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
1185 	if (rc)
1186 		goto err_tx_irq;
1187 	rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
1188 	if (rc)
1189 		goto err_rx_irq;
1190 
1191 	return 0;
1192 
1193  err_rx_irq:
1194 	free_irq(lp->tx_irq, ndev);
1195  err_tx_irq:
1196 	if (phydev)
1197 		phy_disconnect(phydev);
1198 	dev_err(lp->dev, "request_irq() failed\n");
1199 	return rc;
1200 }
1201 
1202 static int temac_stop(struct net_device *ndev)
1203 {
1204 	struct temac_local *lp = netdev_priv(ndev);
1205 	struct phy_device *phydev = ndev->phydev;
1206 
1207 	dev_dbg(&ndev->dev, "temac_close()\n");
1208 
1209 	cancel_delayed_work_sync(&lp->restart_work);
1210 
1211 	free_irq(lp->tx_irq, ndev);
1212 	free_irq(lp->rx_irq, ndev);
1213 
1214 	if (phydev)
1215 		phy_disconnect(phydev);
1216 
1217 	temac_dma_bd_release(ndev);
1218 
1219 	return 0;
1220 }
1221 
1222 #ifdef CONFIG_NET_POLL_CONTROLLER
1223 static void
1224 temac_poll_controller(struct net_device *ndev)
1225 {
1226 	struct temac_local *lp = netdev_priv(ndev);
1227 
1228 	disable_irq(lp->tx_irq);
1229 	disable_irq(lp->rx_irq);
1230 
1231 	ll_temac_rx_irq(lp->tx_irq, ndev);
1232 	ll_temac_tx_irq(lp->rx_irq, ndev);
1233 
1234 	enable_irq(lp->tx_irq);
1235 	enable_irq(lp->rx_irq);
1236 }
1237 #endif
1238 
1239 static const struct net_device_ops temac_netdev_ops = {
1240 	.ndo_open = temac_open,
1241 	.ndo_stop = temac_stop,
1242 	.ndo_start_xmit = temac_start_xmit,
1243 	.ndo_set_rx_mode = temac_set_multicast_list,
1244 	.ndo_set_mac_address = temac_set_mac_address,
1245 	.ndo_validate_addr = eth_validate_addr,
1246 	.ndo_eth_ioctl = phy_do_ioctl_running,
1247 #ifdef CONFIG_NET_POLL_CONTROLLER
1248 	.ndo_poll_controller = temac_poll_controller,
1249 #endif
1250 };
1251 
1252 /* ---------------------------------------------------------------------
1253  * SYSFS device attributes
1254  */
1255 static ssize_t temac_show_llink_regs(struct device *dev,
1256 				     struct device_attribute *attr, char *buf)
1257 {
1258 	struct net_device *ndev = dev_get_drvdata(dev);
1259 	struct temac_local *lp = netdev_priv(ndev);
1260 	int i, len = 0;
1261 
1262 	for (i = 0; i < 0x11; i++)
1263 		len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
1264 			       (i % 8) == 7 ? "\n" : " ");
1265 	len += sprintf(buf + len, "\n");
1266 
1267 	return len;
1268 }
1269 
1270 static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
1271 
1272 static struct attribute *temac_device_attrs[] = {
1273 	&dev_attr_llink_regs.attr,
1274 	NULL,
1275 };
1276 
1277 static const struct attribute_group temac_attr_group = {
1278 	.attrs = temac_device_attrs,
1279 };
1280 
1281 /* ---------------------------------------------------------------------
1282  * ethtool support
1283  */
1284 
1285 static void
1286 ll_temac_ethtools_get_ringparam(struct net_device *ndev,
1287 				struct ethtool_ringparam *ering,
1288 				struct kernel_ethtool_ringparam *kernel_ering,
1289 				struct netlink_ext_ack *extack)
1290 {
1291 	struct temac_local *lp = netdev_priv(ndev);
1292 
1293 	ering->rx_max_pending = RX_BD_NUM_MAX;
1294 	ering->rx_mini_max_pending = 0;
1295 	ering->rx_jumbo_max_pending = 0;
1296 	ering->tx_max_pending = TX_BD_NUM_MAX;
1297 	ering->rx_pending = lp->rx_bd_num;
1298 	ering->rx_mini_pending = 0;
1299 	ering->rx_jumbo_pending = 0;
1300 	ering->tx_pending = lp->tx_bd_num;
1301 }
1302 
1303 static int
1304 ll_temac_ethtools_set_ringparam(struct net_device *ndev,
1305 				struct ethtool_ringparam *ering,
1306 				struct kernel_ethtool_ringparam *kernel_ering,
1307 				struct netlink_ext_ack *extack)
1308 {
1309 	struct temac_local *lp = netdev_priv(ndev);
1310 
1311 	if (ering->rx_pending > RX_BD_NUM_MAX ||
1312 	    ering->rx_mini_pending ||
1313 	    ering->rx_jumbo_pending ||
1314 	    ering->rx_pending > TX_BD_NUM_MAX)
1315 		return -EINVAL;
1316 
1317 	if (netif_running(ndev))
1318 		return -EBUSY;
1319 
1320 	lp->rx_bd_num = ering->rx_pending;
1321 	lp->tx_bd_num = ering->tx_pending;
1322 	return 0;
1323 }
1324 
1325 static int
1326 ll_temac_ethtools_get_coalesce(struct net_device *ndev,
1327 			       struct ethtool_coalesce *ec,
1328 			       struct kernel_ethtool_coalesce *kernel_coal,
1329 			       struct netlink_ext_ack *extack)
1330 {
1331 	struct temac_local *lp = netdev_priv(ndev);
1332 
1333 	ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
1334 	ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
1335 	ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
1336 	ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
1337 	return 0;
1338 }
1339 
1340 static int
1341 ll_temac_ethtools_set_coalesce(struct net_device *ndev,
1342 			       struct ethtool_coalesce *ec,
1343 			       struct kernel_ethtool_coalesce *kernel_coal,
1344 			       struct netlink_ext_ack *extack)
1345 {
1346 	struct temac_local *lp = netdev_priv(ndev);
1347 
1348 	if (netif_running(ndev)) {
1349 		netdev_err(ndev,
1350 			   "Please stop netif before applying configuration\n");
1351 		return -EFAULT;
1352 	}
1353 
1354 	if (ec->rx_max_coalesced_frames)
1355 		lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
1356 	if (ec->tx_max_coalesced_frames)
1357 		lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
1358 	/* With typical LocalLink clock speed of 200 MHz and
1359 	 * C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
1360 	 */
1361 	if (ec->rx_coalesce_usecs)
1362 		lp->coalesce_delay_rx =
1363 			min(255U, (ec->rx_coalesce_usecs * 100) / 512);
1364 	if (ec->tx_coalesce_usecs)
1365 		lp->coalesce_delay_tx =
1366 			min(255U, (ec->tx_coalesce_usecs * 100) / 512);
1367 
1368 	return 0;
1369 }
1370 
1371 static const struct ethtool_ops temac_ethtool_ops = {
1372 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1373 				     ETHTOOL_COALESCE_MAX_FRAMES,
1374 	.nway_reset = phy_ethtool_nway_reset,
1375 	.get_link = ethtool_op_get_link,
1376 	.get_ts_info = ethtool_op_get_ts_info,
1377 	.get_link_ksettings = phy_ethtool_get_link_ksettings,
1378 	.set_link_ksettings = phy_ethtool_set_link_ksettings,
1379 	.get_ringparam	= ll_temac_ethtools_get_ringparam,
1380 	.set_ringparam	= ll_temac_ethtools_set_ringparam,
1381 	.get_coalesce	= ll_temac_ethtools_get_coalesce,
1382 	.set_coalesce	= ll_temac_ethtools_set_coalesce,
1383 };
1384 
1385 static int temac_probe(struct platform_device *pdev)
1386 {
1387 	struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1388 	struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
1389 	struct temac_local *lp;
1390 	struct net_device *ndev;
1391 	u8 addr[ETH_ALEN];
1392 	__be32 *p;
1393 	bool little_endian;
1394 	int rc = 0;
1395 
1396 	/* Init network device structure */
1397 	ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
1398 	if (!ndev)
1399 		return -ENOMEM;
1400 
1401 	platform_set_drvdata(pdev, ndev);
1402 	SET_NETDEV_DEV(ndev, &pdev->dev);
1403 	ndev->features = NETIF_F_SG;
1404 	ndev->netdev_ops = &temac_netdev_ops;
1405 	ndev->ethtool_ops = &temac_ethtool_ops;
1406 #if 0
1407 	ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
1408 	ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
1409 	ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
1410 	ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
1411 	ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
1412 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
1413 	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
1414 	ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
1415 	ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
1416 	ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
1417 	ndev->features |= NETIF_F_LRO; /* large receive offload */
1418 #endif
1419 
1420 	/* setup temac private info structure */
1421 	lp = netdev_priv(ndev);
1422 	lp->ndev = ndev;
1423 	lp->dev = &pdev->dev;
1424 	lp->options = XTE_OPTION_DEFAULTS;
1425 	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1426 	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1427 	spin_lock_init(&lp->rx_lock);
1428 	INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
1429 
1430 	/* Setup mutex for synchronization of indirect register access */
1431 	if (pdata) {
1432 		if (!pdata->indirect_lock) {
1433 			dev_err(&pdev->dev,
1434 				"indirect_lock missing in platform_data\n");
1435 			return -EINVAL;
1436 		}
1437 		lp->indirect_lock = pdata->indirect_lock;
1438 	} else {
1439 		lp->indirect_lock = devm_kmalloc(&pdev->dev,
1440 						 sizeof(*lp->indirect_lock),
1441 						 GFP_KERNEL);
1442 		if (!lp->indirect_lock)
1443 			return -ENOMEM;
1444 		spin_lock_init(lp->indirect_lock);
1445 	}
1446 
1447 	/* map device registers */
1448 	lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
1449 	if (IS_ERR(lp->regs)) {
1450 		dev_err(&pdev->dev, "could not map TEMAC registers\n");
1451 		return -ENOMEM;
1452 	}
1453 
1454 	/* Select register access functions with the specified
1455 	 * endianness mode.  Default for OF devices is big-endian.
1456 	 */
1457 	little_endian = false;
1458 	if (temac_np) {
1459 		if (of_get_property(temac_np, "little-endian", NULL))
1460 			little_endian = true;
1461 	} else if (pdata) {
1462 		little_endian = pdata->reg_little_endian;
1463 	}
1464 	if (little_endian) {
1465 		lp->temac_ior = _temac_ior_le;
1466 		lp->temac_iow = _temac_iow_le;
1467 	} else {
1468 		lp->temac_ior = _temac_ior_be;
1469 		lp->temac_iow = _temac_iow_be;
1470 	}
1471 
1472 	/* Setup checksum offload, but default to off if not specified */
1473 	lp->temac_features = 0;
1474 	if (temac_np) {
1475 		p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
1476 		if (p && be32_to_cpu(*p))
1477 			lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1478 		p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
1479 		if (p && be32_to_cpu(*p))
1480 			lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1481 	} else if (pdata) {
1482 		if (pdata->txcsum)
1483 			lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1484 		if (pdata->rxcsum)
1485 			lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1486 	}
1487 	if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
1488 		/* Can checksum TCP/UDP over IPv4. */
1489 		ndev->features |= NETIF_F_IP_CSUM;
1490 
1491 	/* Defaults for IRQ delay/coalescing setup.  These are
1492 	 * configuration values, so does not belong in device-tree.
1493 	 */
1494 	lp->coalesce_delay_tx = 0x10;
1495 	lp->coalesce_count_tx = 0x22;
1496 	lp->coalesce_delay_rx = 0xff;
1497 	lp->coalesce_count_rx = 0x07;
1498 
1499 	/* Setup LocalLink DMA */
1500 	if (temac_np) {
1501 		/* Find the DMA node, map the DMA registers, and
1502 		 * decode the DMA IRQs.
1503 		 */
1504 		dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
1505 		if (!dma_np) {
1506 			dev_err(&pdev->dev, "could not find DMA node\n");
1507 			return -ENODEV;
1508 		}
1509 
1510 		/* Setup the DMA register accesses, could be DCR or
1511 		 * memory mapped.
1512 		 */
1513 		if (temac_dcr_setup(lp, pdev, dma_np)) {
1514 			/* no DCR in the device tree, try non-DCR */
1515 			lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
1516 						      NULL);
1517 			if (IS_ERR(lp->sdma_regs)) {
1518 				dev_err(&pdev->dev,
1519 					"unable to map DMA registers\n");
1520 				of_node_put(dma_np);
1521 				return PTR_ERR(lp->sdma_regs);
1522 			}
1523 			if (of_property_read_bool(dma_np, "little-endian")) {
1524 				lp->dma_in = temac_dma_in32_le;
1525 				lp->dma_out = temac_dma_out32_le;
1526 			} else {
1527 				lp->dma_in = temac_dma_in32_be;
1528 				lp->dma_out = temac_dma_out32_be;
1529 			}
1530 			dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1531 		}
1532 
1533 		/* Get DMA RX and TX interrupts */
1534 		lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
1535 		lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1536 
1537 		/* Finished with the DMA node; drop the reference */
1538 		of_node_put(dma_np);
1539 	} else if (pdata) {
1540 		/* 2nd memory resource specifies DMA registers */
1541 		lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
1542 		if (IS_ERR(lp->sdma_regs)) {
1543 			dev_err(&pdev->dev,
1544 				"could not map DMA registers\n");
1545 			return PTR_ERR(lp->sdma_regs);
1546 		}
1547 		if (pdata->dma_little_endian) {
1548 			lp->dma_in = temac_dma_in32_le;
1549 			lp->dma_out = temac_dma_out32_le;
1550 		} else {
1551 			lp->dma_in = temac_dma_in32_be;
1552 			lp->dma_out = temac_dma_out32_be;
1553 		}
1554 
1555 		/* Get DMA RX and TX interrupts */
1556 		lp->rx_irq = platform_get_irq(pdev, 0);
1557 		lp->tx_irq = platform_get_irq(pdev, 1);
1558 
1559 		/* IRQ delay/coalescing setup */
1560 		if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
1561 			lp->coalesce_delay_tx = pdata->tx_irq_timeout;
1562 			lp->coalesce_count_tx = pdata->tx_irq_count;
1563 		}
1564 		if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
1565 			lp->coalesce_delay_rx = pdata->rx_irq_timeout;
1566 			lp->coalesce_count_rx = pdata->rx_irq_count;
1567 		}
1568 	}
1569 
1570 	/* Error handle returned DMA RX and TX interrupts */
1571 	if (lp->rx_irq < 0)
1572 		return dev_err_probe(&pdev->dev, lp->rx_irq,
1573 				     "could not get DMA RX irq\n");
1574 	if (lp->tx_irq < 0)
1575 		return dev_err_probe(&pdev->dev, lp->tx_irq,
1576 				     "could not get DMA TX irq\n");
1577 
1578 	if (temac_np) {
1579 		/* Retrieve the MAC address */
1580 		rc = of_get_mac_address(temac_np, addr);
1581 		if (rc) {
1582 			dev_err(&pdev->dev, "could not find MAC address\n");
1583 			return -ENODEV;
1584 		}
1585 		temac_init_mac_address(ndev, addr);
1586 	} else if (pdata) {
1587 		temac_init_mac_address(ndev, pdata->mac_addr);
1588 	}
1589 
1590 	rc = temac_mdio_setup(lp, pdev);
1591 	if (rc)
1592 		dev_warn(&pdev->dev, "error registering MDIO bus\n");
1593 
1594 	if (temac_np) {
1595 		lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1596 		if (lp->phy_node)
1597 			dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
1598 	} else if (pdata) {
1599 		snprintf(lp->phy_name, sizeof(lp->phy_name),
1600 			 PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
1601 		lp->phy_interface = pdata->phy_interface;
1602 	}
1603 
1604 	/* Add the device attributes */
1605 	rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1606 	if (rc) {
1607 		dev_err(lp->dev, "Error creating sysfs files\n");
1608 		goto err_sysfs_create;
1609 	}
1610 
1611 	rc = register_netdev(lp->ndev);
1612 	if (rc) {
1613 		dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
1614 		goto err_register_ndev;
1615 	}
1616 
1617 	return 0;
1618 
1619 err_register_ndev:
1620 	sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1621 err_sysfs_create:
1622 	if (lp->phy_node)
1623 		of_node_put(lp->phy_node);
1624 	temac_mdio_teardown(lp);
1625 	return rc;
1626 }
1627 
1628 static int temac_remove(struct platform_device *pdev)
1629 {
1630 	struct net_device *ndev = platform_get_drvdata(pdev);
1631 	struct temac_local *lp = netdev_priv(ndev);
1632 
1633 	unregister_netdev(ndev);
1634 	sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1635 	if (lp->phy_node)
1636 		of_node_put(lp->phy_node);
1637 	temac_mdio_teardown(lp);
1638 	return 0;
1639 }
1640 
1641 static const struct of_device_id temac_of_match[] = {
1642 	{ .compatible = "xlnx,xps-ll-temac-1.01.b", },
1643 	{ .compatible = "xlnx,xps-ll-temac-2.00.a", },
1644 	{ .compatible = "xlnx,xps-ll-temac-2.02.a", },
1645 	{ .compatible = "xlnx,xps-ll-temac-2.03.a", },
1646 	{},
1647 };
1648 MODULE_DEVICE_TABLE(of, temac_of_match);
1649 
1650 static struct platform_driver temac_driver = {
1651 	.probe = temac_probe,
1652 	.remove = temac_remove,
1653 	.driver = {
1654 		.name = "xilinx_temac",
1655 		.of_match_table = temac_of_match,
1656 	},
1657 };
1658 
1659 module_platform_driver(temac_driver);
1660 
1661 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1662 MODULE_AUTHOR("Yoshio Kashiwagi");
1663 MODULE_LICENSE("GPL");
1664