1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Driver for Xilinx TEMAC Ethernet device
4  *
5  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
6  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
7  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8  *
9  * This is a driver for the Xilinx ll_temac ipcore which is often used
10  * in the Virtex and Spartan series of chips.
11  *
12  * Notes:
13  * - The ll_temac hardware uses indirect access for many of the TEMAC
14  *   registers, include the MDIO bus.  However, indirect access to MDIO
15  *   registers take considerably more clock cycles than to TEMAC registers.
16  *   MDIO accesses are long, so threads doing them should probably sleep
17  *   rather than busywait.  However, since only one indirect access can be
18  *   in progress at any given time, that means that *all* indirect accesses
19  *   could end up sleeping (to wait for an MDIO access to complete).
20  *   Fortunately none of the indirect accesses are on the 'hot' path for tx
21  *   or rx, so this should be okay.
22  *
23  * TODO:
24  * - Factor out locallink DMA code into separate driver
25  * - Fix support for hardware checksumming.
26  * - Testing.  Lots and lots of testing.
27  *
28  */
29 
30 #include <linux/delay.h>
31 #include <linux/etherdevice.h>
32 #include <linux/mii.h>
33 #include <linux/module.h>
34 #include <linux/mutex.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_ether.h>
37 #include <linux/of.h>
38 #include <linux/of_device.h>
39 #include <linux/of_irq.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
42 #include <linux/of_platform.h>
43 #include <linux/of_address.h>
44 #include <linux/skbuff.h>
45 #include <linux/spinlock.h>
46 #include <linux/tcp.h>      /* needed for sizeof(tcphdr) */
47 #include <linux/udp.h>      /* needed for sizeof(udphdr) */
48 #include <linux/phy.h>
49 #include <linux/in.h>
50 #include <linux/io.h>
51 #include <linux/ip.h>
52 #include <linux/slab.h>
53 #include <linux/interrupt.h>
54 #include <linux/dma-mapping.h>
55 #include <linux/processor.h>
56 #include <linux/platform_data/xilinx-ll-temac.h>
57 
58 #include "ll_temac.h"
59 
60 #define TX_BD_NUM   64
61 #define RX_BD_NUM   128
62 
63 /* ---------------------------------------------------------------------
64  * Low level register access functions
65  */
66 
67 static u32 _temac_ior_be(struct temac_local *lp, int offset)
68 {
69 	return ioread32be(lp->regs + offset);
70 }
71 
72 static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
73 {
74 	return iowrite32be(value, lp->regs + offset);
75 }
76 
77 static u32 _temac_ior_le(struct temac_local *lp, int offset)
78 {
79 	return ioread32(lp->regs + offset);
80 }
81 
82 static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
83 {
84 	return iowrite32(value, lp->regs + offset);
85 }
86 
87 static bool hard_acs_rdy(struct temac_local *lp)
88 {
89 	return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
90 }
91 
92 static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
93 {
94 	ktime_t cur = ktime_get();
95 
96 	return hard_acs_rdy(lp) || ktime_after(cur, timeout);
97 }
98 
99 /* Poll for maximum 20 ms.  This is similar to the 2 jiffies @ 100 Hz
100  * that was used before, and should cover MDIO bus speed down to 3200
101  * Hz.
102  */
103 #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
104 
105 /**
106  * temac_indirect_busywait - Wait for current indirect register access
107  * to complete.
108  */
109 int temac_indirect_busywait(struct temac_local *lp)
110 {
111 	ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
112 
113 	spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
114 	if (WARN_ON(!hard_acs_rdy(lp)))
115 		return -ETIMEDOUT;
116 	else
117 		return 0;
118 }
119 
120 /**
121  * temac_indirect_in32 - Indirect register read access.  This function
122  * must be called without lp->indirect_lock being held.
123  */
124 u32 temac_indirect_in32(struct temac_local *lp, int reg)
125 {
126 	unsigned long flags;
127 	int val;
128 
129 	spin_lock_irqsave(lp->indirect_lock, flags);
130 	val = temac_indirect_in32_locked(lp, reg);
131 	spin_unlock_irqrestore(lp->indirect_lock, flags);
132 	return val;
133 }
134 
135 /**
136  * temac_indirect_in32_locked - Indirect register read access.  This
137  * function must be called with lp->indirect_lock being held.  Use
138  * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
139  * repeated lock/unlock and to ensure uninterrupted access to indirect
140  * registers.
141  */
142 u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
143 {
144 	/* This initial wait should normally not spin, as we always
145 	 * try to wait for indirect access to complete before
146 	 * releasing the indirect_lock.
147 	 */
148 	if (WARN_ON(temac_indirect_busywait(lp)))
149 		return -ETIMEDOUT;
150 	/* Initiate read from indirect register */
151 	temac_iow(lp, XTE_CTL0_OFFSET, reg);
152 	/* Wait for indirect register access to complete.  We really
153 	 * should not see timeouts, and could even end up causing
154 	 * problem for following indirect access, so let's make a bit
155 	 * of WARN noise.
156 	 */
157 	if (WARN_ON(temac_indirect_busywait(lp)))
158 		return -ETIMEDOUT;
159 	/* Value is ready now */
160 	return temac_ior(lp, XTE_LSW0_OFFSET);
161 }
162 
163 /**
164  * temac_indirect_out32 - Indirect register write access.  This function
165  * must be called without lp->indirect_lock being held.
166  */
167 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
168 {
169 	unsigned long flags;
170 
171 	spin_lock_irqsave(lp->indirect_lock, flags);
172 	temac_indirect_out32_locked(lp, reg, value);
173 	spin_unlock_irqrestore(lp->indirect_lock, flags);
174 }
175 
176 /**
177  * temac_indirect_out32_locked - Indirect register write access.  This
178  * function must be called with lp->indirect_lock being held.  Use
179  * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
180  * repeated lock/unlock and to ensure uninterrupted access to indirect
181  * registers.
182  */
183 void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
184 {
185 	/* As in temac_indirect_in32_locked(), we should normally not
186 	 * spin here.  And if it happens, we actually end up silently
187 	 * ignoring the write request.  Ouch.
188 	 */
189 	if (WARN_ON(temac_indirect_busywait(lp)))
190 		return;
191 	/* Initiate write to indirect register */
192 	temac_iow(lp, XTE_LSW0_OFFSET, value);
193 	temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
194 	/* As in temac_indirect_in32_locked(), we should not see timeouts
195 	 * here.  And if it happens, we continue before the write has
196 	 * completed.  Not good.
197 	 */
198 	WARN_ON(temac_indirect_busywait(lp));
199 }
200 
201 /**
202  * temac_dma_in32_* - Memory mapped DMA read, these function expects a
203  * register input that is based on DCR word addresses which are then
204  * converted to memory mapped byte addresses.  To be assigned to
205  * lp->dma_in32.
206  */
207 static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
208 {
209 	return ioread32be(lp->sdma_regs + (reg << 2));
210 }
211 
212 static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
213 {
214 	return ioread32(lp->sdma_regs + (reg << 2));
215 }
216 
217 /**
218  * temac_dma_out32_* - Memory mapped DMA read, these function expects
219  * a register input that is based on DCR word addresses which are then
220  * converted to memory mapped byte addresses.  To be assigned to
221  * lp->dma_out32.
222  */
223 static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
224 {
225 	iowrite32be(value, lp->sdma_regs + (reg << 2));
226 }
227 
228 static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
229 {
230 	iowrite32(value, lp->sdma_regs + (reg << 2));
231 }
232 
233 /* DMA register access functions can be DCR based or memory mapped.
234  * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
235  * memory mapped.
236  */
237 #ifdef CONFIG_PPC_DCR
238 
239 /**
240  * temac_dma_dcr_in32 - DCR based DMA read
241  */
242 static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
243 {
244 	return dcr_read(lp->sdma_dcrs, reg);
245 }
246 
247 /**
248  * temac_dma_dcr_out32 - DCR based DMA write
249  */
250 static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
251 {
252 	dcr_write(lp->sdma_dcrs, reg, value);
253 }
254 
255 /**
256  * temac_dcr_setup - If the DMA is DCR based, then setup the address and
257  * I/O  functions
258  */
259 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
260 				struct device_node *np)
261 {
262 	unsigned int dcrs;
263 
264 	/* setup the dcr address mapping if it's in the device tree */
265 
266 	dcrs = dcr_resource_start(np, 0);
267 	if (dcrs != 0) {
268 		lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
269 		lp->dma_in = temac_dma_dcr_in;
270 		lp->dma_out = temac_dma_dcr_out;
271 		dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
272 		return 0;
273 	}
274 	/* no DCR in the device tree, indicate a failure */
275 	return -1;
276 }
277 
278 #else
279 
280 /*
281  * temac_dcr_setup - This is a stub for when DCR is not supported,
282  * such as with MicroBlaze and x86
283  */
284 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
285 				struct device_node *np)
286 {
287 	return -1;
288 }
289 
290 #endif
291 
292 /**
293  * temac_dma_bd_release - Release buffer descriptor rings
294  */
295 static void temac_dma_bd_release(struct net_device *ndev)
296 {
297 	struct temac_local *lp = netdev_priv(ndev);
298 	int i;
299 
300 	/* Reset Local Link (DMA) */
301 	lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
302 
303 	for (i = 0; i < RX_BD_NUM; i++) {
304 		if (!lp->rx_skb[i])
305 			break;
306 		else {
307 			dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
308 					XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
309 			dev_kfree_skb(lp->rx_skb[i]);
310 		}
311 	}
312 	if (lp->rx_bd_v)
313 		dma_free_coherent(ndev->dev.parent,
314 				sizeof(*lp->rx_bd_v) * RX_BD_NUM,
315 				lp->rx_bd_v, lp->rx_bd_p);
316 	if (lp->tx_bd_v)
317 		dma_free_coherent(ndev->dev.parent,
318 				sizeof(*lp->tx_bd_v) * TX_BD_NUM,
319 				lp->tx_bd_v, lp->tx_bd_p);
320 }
321 
322 /**
323  * temac_dma_bd_init - Setup buffer descriptor rings
324  */
325 static int temac_dma_bd_init(struct net_device *ndev)
326 {
327 	struct temac_local *lp = netdev_priv(ndev);
328 	struct sk_buff *skb;
329 	dma_addr_t skb_dma_addr;
330 	int i;
331 
332 	lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb),
333 				  GFP_KERNEL);
334 	if (!lp->rx_skb)
335 		goto out;
336 
337 	/* allocate the tx and rx ring buffer descriptors. */
338 	/* returns a virtual address and a physical address. */
339 	lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
340 					 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
341 					 &lp->tx_bd_p, GFP_KERNEL);
342 	if (!lp->tx_bd_v)
343 		goto out;
344 
345 	lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
346 					 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
347 					 &lp->rx_bd_p, GFP_KERNEL);
348 	if (!lp->rx_bd_v)
349 		goto out;
350 
351 	for (i = 0; i < TX_BD_NUM; i++) {
352 		lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
353 				+ sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM));
354 	}
355 
356 	for (i = 0; i < RX_BD_NUM; i++) {
357 		lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
358 				+ sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM));
359 
360 		skb = netdev_alloc_skb_ip_align(ndev,
361 						XTE_MAX_JUMBO_FRAME_SIZE);
362 		if (!skb)
363 			goto out;
364 
365 		lp->rx_skb[i] = skb;
366 		/* returns physical address of skb->data */
367 		skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
368 					      XTE_MAX_JUMBO_FRAME_SIZE,
369 					      DMA_FROM_DEVICE);
370 		lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
371 		lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
372 		lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
373 	}
374 
375 	/* Configure DMA channel (irq setup) */
376 	lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl |
377 		    0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
378 		    CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
379 		    CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
380 	lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl |
381 		    CHNL_CTRL_IRQ_IOE |
382 		    CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
383 		    CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
384 
385 	/* Init descriptor indexes */
386 	lp->tx_bd_ci = 0;
387 	lp->tx_bd_next = 0;
388 	lp->tx_bd_tail = 0;
389 	lp->rx_bd_ci = 0;
390 
391 	/* Enable RX DMA transfers */
392 	wmb();
393 	lp->dma_out(lp, RX_CURDESC_PTR,  lp->rx_bd_p);
394 	lp->dma_out(lp, RX_TAILDESC_PTR,
395 		       lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
396 
397 	/* Prepare for TX DMA transfer */
398 	lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
399 
400 	return 0;
401 
402 out:
403 	temac_dma_bd_release(ndev);
404 	return -ENOMEM;
405 }
406 
407 /* ---------------------------------------------------------------------
408  * net_device_ops
409  */
410 
411 static void temac_do_set_mac_address(struct net_device *ndev)
412 {
413 	struct temac_local *lp = netdev_priv(ndev);
414 	unsigned long flags;
415 
416 	/* set up unicast MAC address filter set its mac address */
417 	spin_lock_irqsave(lp->indirect_lock, flags);
418 	temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
419 				    (ndev->dev_addr[0]) |
420 				    (ndev->dev_addr[1] << 8) |
421 				    (ndev->dev_addr[2] << 16) |
422 				    (ndev->dev_addr[3] << 24));
423 	/* There are reserved bits in EUAW1
424 	 * so don't affect them Set MAC bits [47:32] in EUAW1 */
425 	temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
426 				    (ndev->dev_addr[4] & 0x000000ff) |
427 				    (ndev->dev_addr[5] << 8));
428 	spin_unlock_irqrestore(lp->indirect_lock, flags);
429 }
430 
431 static int temac_init_mac_address(struct net_device *ndev, const void *address)
432 {
433 	ether_addr_copy(ndev->dev_addr, address);
434 	if (!is_valid_ether_addr(ndev->dev_addr))
435 		eth_hw_addr_random(ndev);
436 	temac_do_set_mac_address(ndev);
437 	return 0;
438 }
439 
440 static int temac_set_mac_address(struct net_device *ndev, void *p)
441 {
442 	struct sockaddr *addr = p;
443 
444 	if (!is_valid_ether_addr(addr->sa_data))
445 		return -EADDRNOTAVAIL;
446 	memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
447 	temac_do_set_mac_address(ndev);
448 	return 0;
449 }
450 
451 static void temac_set_multicast_list(struct net_device *ndev)
452 {
453 	struct temac_local *lp = netdev_priv(ndev);
454 	u32 multi_addr_msw, multi_addr_lsw;
455 	int i = 0;
456 	unsigned long flags;
457 	bool promisc_mode_disabled = false;
458 
459 	if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
460 	    (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
461 		temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
462 		dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
463 		return;
464 	}
465 
466 	spin_lock_irqsave(lp->indirect_lock, flags);
467 
468 	if (!netdev_mc_empty(ndev)) {
469 		struct netdev_hw_addr *ha;
470 
471 		netdev_for_each_mc_addr(ha, ndev) {
472 			if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
473 				break;
474 			multi_addr_msw = ((ha->addr[3] << 24) |
475 					  (ha->addr[2] << 16) |
476 					  (ha->addr[1] << 8) |
477 					  (ha->addr[0]));
478 			temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
479 						    multi_addr_msw);
480 			multi_addr_lsw = ((ha->addr[5] << 8) |
481 					  (ha->addr[4]) | (i << 16));
482 			temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
483 						    multi_addr_lsw);
484 			i++;
485 		}
486 	}
487 
488 	/* Clear all or remaining/unused address table entries */
489 	while (i < MULTICAST_CAM_TABLE_NUM) {
490 		temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
491 		temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
492 		i++;
493 	}
494 
495 	/* Enable address filter block if currently disabled */
496 	if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
497 	    & XTE_AFM_EPPRM_MASK) {
498 		temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
499 		promisc_mode_disabled = true;
500 	}
501 
502 	spin_unlock_irqrestore(lp->indirect_lock, flags);
503 
504 	if (promisc_mode_disabled)
505 		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
506 }
507 
508 static struct temac_option {
509 	int flg;
510 	u32 opt;
511 	u32 reg;
512 	u32 m_or;
513 	u32 m_and;
514 } temac_options[] = {
515 	/* Turn on jumbo packet support for both Rx and Tx */
516 	{
517 		.opt = XTE_OPTION_JUMBO,
518 		.reg = XTE_TXC_OFFSET,
519 		.m_or = XTE_TXC_TXJMBO_MASK,
520 	},
521 	{
522 		.opt = XTE_OPTION_JUMBO,
523 		.reg = XTE_RXC1_OFFSET,
524 		.m_or =XTE_RXC1_RXJMBO_MASK,
525 	},
526 	/* Turn on VLAN packet support for both Rx and Tx */
527 	{
528 		.opt = XTE_OPTION_VLAN,
529 		.reg = XTE_TXC_OFFSET,
530 		.m_or =XTE_TXC_TXVLAN_MASK,
531 	},
532 	{
533 		.opt = XTE_OPTION_VLAN,
534 		.reg = XTE_RXC1_OFFSET,
535 		.m_or =XTE_RXC1_RXVLAN_MASK,
536 	},
537 	/* Turn on FCS stripping on receive packets */
538 	{
539 		.opt = XTE_OPTION_FCS_STRIP,
540 		.reg = XTE_RXC1_OFFSET,
541 		.m_or =XTE_RXC1_RXFCS_MASK,
542 	},
543 	/* Turn on FCS insertion on transmit packets */
544 	{
545 		.opt = XTE_OPTION_FCS_INSERT,
546 		.reg = XTE_TXC_OFFSET,
547 		.m_or =XTE_TXC_TXFCS_MASK,
548 	},
549 	/* Turn on length/type field checking on receive packets */
550 	{
551 		.opt = XTE_OPTION_LENTYPE_ERR,
552 		.reg = XTE_RXC1_OFFSET,
553 		.m_or =XTE_RXC1_RXLT_MASK,
554 	},
555 	/* Turn on flow control */
556 	{
557 		.opt = XTE_OPTION_FLOW_CONTROL,
558 		.reg = XTE_FCC_OFFSET,
559 		.m_or =XTE_FCC_RXFLO_MASK,
560 	},
561 	/* Turn on flow control */
562 	{
563 		.opt = XTE_OPTION_FLOW_CONTROL,
564 		.reg = XTE_FCC_OFFSET,
565 		.m_or =XTE_FCC_TXFLO_MASK,
566 	},
567 	/* Turn on promiscuous frame filtering (all frames are received ) */
568 	{
569 		.opt = XTE_OPTION_PROMISC,
570 		.reg = XTE_AFM_OFFSET,
571 		.m_or =XTE_AFM_EPPRM_MASK,
572 	},
573 	/* Enable transmitter if not already enabled */
574 	{
575 		.opt = XTE_OPTION_TXEN,
576 		.reg = XTE_TXC_OFFSET,
577 		.m_or =XTE_TXC_TXEN_MASK,
578 	},
579 	/* Enable receiver? */
580 	{
581 		.opt = XTE_OPTION_RXEN,
582 		.reg = XTE_RXC1_OFFSET,
583 		.m_or =XTE_RXC1_RXEN_MASK,
584 	},
585 	{}
586 };
587 
588 /**
589  * temac_setoptions
590  */
591 static u32 temac_setoptions(struct net_device *ndev, u32 options)
592 {
593 	struct temac_local *lp = netdev_priv(ndev);
594 	struct temac_option *tp = &temac_options[0];
595 	int reg;
596 	unsigned long flags;
597 
598 	spin_lock_irqsave(lp->indirect_lock, flags);
599 	while (tp->opt) {
600 		reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
601 		if (options & tp->opt) {
602 			reg |= tp->m_or;
603 			temac_indirect_out32_locked(lp, tp->reg, reg);
604 		}
605 		tp++;
606 	}
607 	spin_unlock_irqrestore(lp->indirect_lock, flags);
608 	lp->options |= options;
609 
610 	return 0;
611 }
612 
613 /* Initialize temac */
614 static void temac_device_reset(struct net_device *ndev)
615 {
616 	struct temac_local *lp = netdev_priv(ndev);
617 	u32 timeout;
618 	u32 val;
619 	unsigned long flags;
620 
621 	/* Perform a software reset */
622 
623 	/* 0x300 host enable bit ? */
624 	/* reset PHY through control register ?:1 */
625 
626 	dev_dbg(&ndev->dev, "%s()\n", __func__);
627 
628 	/* Reset the receiver and wait for it to finish reset */
629 	temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
630 	timeout = 1000;
631 	while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
632 		udelay(1);
633 		if (--timeout == 0) {
634 			dev_err(&ndev->dev,
635 				"temac_device_reset RX reset timeout!!\n");
636 			break;
637 		}
638 	}
639 
640 	/* Reset the transmitter and wait for it to finish reset */
641 	temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
642 	timeout = 1000;
643 	while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
644 		udelay(1);
645 		if (--timeout == 0) {
646 			dev_err(&ndev->dev,
647 				"temac_device_reset TX reset timeout!!\n");
648 			break;
649 		}
650 	}
651 
652 	/* Disable the receiver */
653 	spin_lock_irqsave(lp->indirect_lock, flags);
654 	val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
655 	temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
656 				    val & ~XTE_RXC1_RXEN_MASK);
657 	spin_unlock_irqrestore(lp->indirect_lock, flags);
658 
659 	/* Reset Local Link (DMA) */
660 	lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
661 	timeout = 1000;
662 	while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
663 		udelay(1);
664 		if (--timeout == 0) {
665 			dev_err(&ndev->dev,
666 				"temac_device_reset DMA reset timeout!!\n");
667 			break;
668 		}
669 	}
670 	lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
671 
672 	if (temac_dma_bd_init(ndev)) {
673 		dev_err(&ndev->dev,
674 				"temac_device_reset descriptor allocation failed\n");
675 	}
676 
677 	spin_lock_irqsave(lp->indirect_lock, flags);
678 	temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
679 	temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
680 	temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
681 	temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
682 	spin_unlock_irqrestore(lp->indirect_lock, flags);
683 
684 	/* Sync default options with HW
685 	 * but leave receiver and transmitter disabled.  */
686 	temac_setoptions(ndev,
687 			 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
688 
689 	temac_do_set_mac_address(ndev);
690 
691 	/* Set address filter table */
692 	temac_set_multicast_list(ndev);
693 	if (temac_setoptions(ndev, lp->options))
694 		dev_err(&ndev->dev, "Error setting TEMAC options\n");
695 
696 	/* Init Driver variable */
697 	netif_trans_update(ndev); /* prevent tx timeout */
698 }
699 
700 static void temac_adjust_link(struct net_device *ndev)
701 {
702 	struct temac_local *lp = netdev_priv(ndev);
703 	struct phy_device *phy = ndev->phydev;
704 	u32 mii_speed;
705 	int link_state;
706 	unsigned long flags;
707 
708 	/* hash together the state values to decide if something has changed */
709 	link_state = phy->speed | (phy->duplex << 1) | phy->link;
710 
711 	if (lp->last_link != link_state) {
712 		spin_lock_irqsave(lp->indirect_lock, flags);
713 		mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
714 		mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
715 
716 		switch (phy->speed) {
717 		case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
718 		case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
719 		case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
720 		}
721 
722 		/* Write new speed setting out to TEMAC */
723 		temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
724 		spin_unlock_irqrestore(lp->indirect_lock, flags);
725 
726 		lp->last_link = link_state;
727 		phy_print_status(phy);
728 	}
729 }
730 
731 #ifdef CONFIG_64BIT
732 
733 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
734 {
735 	bd->app3 = (u32)(((u64)p) >> 32);
736 	bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
737 }
738 
739 static void *ptr_from_txbd(struct cdmac_bd *bd)
740 {
741 	return (void *)(((u64)(bd->app3) << 32) | bd->app4);
742 }
743 
744 #else
745 
746 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
747 {
748 	bd->app4 = (u32)p;
749 }
750 
751 static void *ptr_from_txbd(struct cdmac_bd *bd)
752 {
753 	return (void *)(bd->app4);
754 }
755 
756 #endif
757 
758 static void temac_start_xmit_done(struct net_device *ndev)
759 {
760 	struct temac_local *lp = netdev_priv(ndev);
761 	struct cdmac_bd *cur_p;
762 	unsigned int stat = 0;
763 	struct sk_buff *skb;
764 
765 	cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
766 	stat = be32_to_cpu(cur_p->app0);
767 
768 	while (stat & STS_CTRL_APP0_CMPLT) {
769 		dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
770 				 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
771 		skb = (struct sk_buff *)ptr_from_txbd(cur_p);
772 		if (skb)
773 			dev_consume_skb_irq(skb);
774 		cur_p->app0 = 0;
775 		cur_p->app1 = 0;
776 		cur_p->app2 = 0;
777 		cur_p->app3 = 0;
778 		cur_p->app4 = 0;
779 
780 		ndev->stats.tx_packets++;
781 		ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
782 
783 		lp->tx_bd_ci++;
784 		if (lp->tx_bd_ci >= TX_BD_NUM)
785 			lp->tx_bd_ci = 0;
786 
787 		cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
788 		stat = be32_to_cpu(cur_p->app0);
789 	}
790 
791 	netif_wake_queue(ndev);
792 }
793 
794 static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
795 {
796 	struct cdmac_bd *cur_p;
797 	int tail;
798 
799 	tail = lp->tx_bd_tail;
800 	cur_p = &lp->tx_bd_v[tail];
801 
802 	do {
803 		if (cur_p->app0)
804 			return NETDEV_TX_BUSY;
805 
806 		tail++;
807 		if (tail >= TX_BD_NUM)
808 			tail = 0;
809 
810 		cur_p = &lp->tx_bd_v[tail];
811 		num_frag--;
812 	} while (num_frag >= 0);
813 
814 	return 0;
815 }
816 
817 static netdev_tx_t
818 temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
819 {
820 	struct temac_local *lp = netdev_priv(ndev);
821 	struct cdmac_bd *cur_p;
822 	dma_addr_t start_p, tail_p, skb_dma_addr;
823 	int ii;
824 	unsigned long num_frag;
825 	skb_frag_t *frag;
826 
827 	num_frag = skb_shinfo(skb)->nr_frags;
828 	frag = &skb_shinfo(skb)->frags[0];
829 	start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
830 	cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
831 
832 	if (temac_check_tx_bd_space(lp, num_frag + 1)) {
833 		if (!netif_queue_stopped(ndev))
834 			netif_stop_queue(ndev);
835 		return NETDEV_TX_BUSY;
836 	}
837 
838 	cur_p->app0 = 0;
839 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
840 		unsigned int csum_start_off = skb_checksum_start_offset(skb);
841 		unsigned int csum_index_off = csum_start_off + skb->csum_offset;
842 
843 		cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
844 		cur_p->app1 = cpu_to_be32((csum_start_off << 16)
845 					  | csum_index_off);
846 		cur_p->app2 = 0;  /* initial checksum seed */
847 	}
848 
849 	cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
850 	skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
851 				      skb_headlen(skb), DMA_TO_DEVICE);
852 	cur_p->len = cpu_to_be32(skb_headlen(skb));
853 	cur_p->phys = cpu_to_be32(skb_dma_addr);
854 	ptr_to_txbd((void *)skb, cur_p);
855 
856 	for (ii = 0; ii < num_frag; ii++) {
857 		lp->tx_bd_tail++;
858 		if (lp->tx_bd_tail >= TX_BD_NUM)
859 			lp->tx_bd_tail = 0;
860 
861 		cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
862 		skb_dma_addr = dma_map_single(ndev->dev.parent,
863 					      skb_frag_address(frag),
864 					      skb_frag_size(frag),
865 					      DMA_TO_DEVICE);
866 		cur_p->phys = cpu_to_be32(skb_dma_addr);
867 		cur_p->len = cpu_to_be32(skb_frag_size(frag));
868 		cur_p->app0 = 0;
869 		frag++;
870 	}
871 	cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
872 
873 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
874 	lp->tx_bd_tail++;
875 	if (lp->tx_bd_tail >= TX_BD_NUM)
876 		lp->tx_bd_tail = 0;
877 
878 	skb_tx_timestamp(skb);
879 
880 	/* Kick off the transfer */
881 	wmb();
882 	lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
883 
884 	return NETDEV_TX_OK;
885 }
886 
887 
888 static void ll_temac_recv(struct net_device *ndev)
889 {
890 	struct temac_local *lp = netdev_priv(ndev);
891 	struct sk_buff *skb, *new_skb;
892 	unsigned int bdstat;
893 	struct cdmac_bd *cur_p;
894 	dma_addr_t tail_p, skb_dma_addr;
895 	int length;
896 	unsigned long flags;
897 
898 	spin_lock_irqsave(&lp->rx_lock, flags);
899 
900 	tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
901 	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
902 
903 	bdstat = be32_to_cpu(cur_p->app0);
904 	while ((bdstat & STS_CTRL_APP0_CMPLT)) {
905 
906 		skb = lp->rx_skb[lp->rx_bd_ci];
907 		length = be32_to_cpu(cur_p->app4) & 0x3FFF;
908 
909 		dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
910 				 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
911 
912 		skb_put(skb, length);
913 		skb->protocol = eth_type_trans(skb, ndev);
914 		skb_checksum_none_assert(skb);
915 
916 		/* if we're doing rx csum offload, set it up */
917 		if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
918 		    (skb->protocol == htons(ETH_P_IP)) &&
919 		    (skb->len > 64)) {
920 
921 			/* Convert from device endianness (be32) to cpu
922 			 * endiannes, and if necessary swap the bytes
923 			 * (back) for proper IP checksum byte order
924 			 * (be16).
925 			 */
926 			skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF);
927 			skb->ip_summed = CHECKSUM_COMPLETE;
928 		}
929 
930 		if (!skb_defer_rx_timestamp(skb))
931 			netif_rx(skb);
932 
933 		ndev->stats.rx_packets++;
934 		ndev->stats.rx_bytes += length;
935 
936 		new_skb = netdev_alloc_skb_ip_align(ndev,
937 						XTE_MAX_JUMBO_FRAME_SIZE);
938 		if (!new_skb) {
939 			spin_unlock_irqrestore(&lp->rx_lock, flags);
940 			return;
941 		}
942 
943 		cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
944 		skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data,
945 					      XTE_MAX_JUMBO_FRAME_SIZE,
946 					      DMA_FROM_DEVICE);
947 		cur_p->phys = cpu_to_be32(skb_dma_addr);
948 		cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
949 		lp->rx_skb[lp->rx_bd_ci] = new_skb;
950 
951 		lp->rx_bd_ci++;
952 		if (lp->rx_bd_ci >= RX_BD_NUM)
953 			lp->rx_bd_ci = 0;
954 
955 		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
956 		bdstat = be32_to_cpu(cur_p->app0);
957 	}
958 	lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
959 
960 	spin_unlock_irqrestore(&lp->rx_lock, flags);
961 }
962 
963 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
964 {
965 	struct net_device *ndev = _ndev;
966 	struct temac_local *lp = netdev_priv(ndev);
967 	unsigned int status;
968 
969 	status = lp->dma_in(lp, TX_IRQ_REG);
970 	lp->dma_out(lp, TX_IRQ_REG, status);
971 
972 	if (status & (IRQ_COAL | IRQ_DLY))
973 		temac_start_xmit_done(lp->ndev);
974 	if (status & (IRQ_ERR | IRQ_DMAERR))
975 		dev_err_ratelimited(&ndev->dev,
976 				    "TX error 0x%x TX_CHNL_STS=0x%08x\n",
977 				    status, lp->dma_in(lp, TX_CHNL_STS));
978 
979 	return IRQ_HANDLED;
980 }
981 
982 static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
983 {
984 	struct net_device *ndev = _ndev;
985 	struct temac_local *lp = netdev_priv(ndev);
986 	unsigned int status;
987 
988 	/* Read and clear the status registers */
989 	status = lp->dma_in(lp, RX_IRQ_REG);
990 	lp->dma_out(lp, RX_IRQ_REG, status);
991 
992 	if (status & (IRQ_COAL | IRQ_DLY))
993 		ll_temac_recv(lp->ndev);
994 	if (status & (IRQ_ERR | IRQ_DMAERR))
995 		dev_err_ratelimited(&ndev->dev,
996 				    "RX error 0x%x RX_CHNL_STS=0x%08x\n",
997 				    status, lp->dma_in(lp, RX_CHNL_STS));
998 
999 	return IRQ_HANDLED;
1000 }
1001 
1002 static int temac_open(struct net_device *ndev)
1003 {
1004 	struct temac_local *lp = netdev_priv(ndev);
1005 	struct phy_device *phydev = NULL;
1006 	int rc;
1007 
1008 	dev_dbg(&ndev->dev, "temac_open()\n");
1009 
1010 	if (lp->phy_node) {
1011 		phydev = of_phy_connect(lp->ndev, lp->phy_node,
1012 					temac_adjust_link, 0, 0);
1013 		if (!phydev) {
1014 			dev_err(lp->dev, "of_phy_connect() failed\n");
1015 			return -ENODEV;
1016 		}
1017 		phy_start(phydev);
1018 	} else if (strlen(lp->phy_name) > 0) {
1019 		phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
1020 				     lp->phy_interface);
1021 		if (IS_ERR(phydev)) {
1022 			dev_err(lp->dev, "phy_connect() failed\n");
1023 			return PTR_ERR(phydev);
1024 		}
1025 		phy_start(phydev);
1026 	}
1027 
1028 	temac_device_reset(ndev);
1029 
1030 	rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
1031 	if (rc)
1032 		goto err_tx_irq;
1033 	rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
1034 	if (rc)
1035 		goto err_rx_irq;
1036 
1037 	return 0;
1038 
1039  err_rx_irq:
1040 	free_irq(lp->tx_irq, ndev);
1041  err_tx_irq:
1042 	if (phydev)
1043 		phy_disconnect(phydev);
1044 	dev_err(lp->dev, "request_irq() failed\n");
1045 	return rc;
1046 }
1047 
1048 static int temac_stop(struct net_device *ndev)
1049 {
1050 	struct temac_local *lp = netdev_priv(ndev);
1051 	struct phy_device *phydev = ndev->phydev;
1052 
1053 	dev_dbg(&ndev->dev, "temac_close()\n");
1054 
1055 	free_irq(lp->tx_irq, ndev);
1056 	free_irq(lp->rx_irq, ndev);
1057 
1058 	if (phydev)
1059 		phy_disconnect(phydev);
1060 
1061 	temac_dma_bd_release(ndev);
1062 
1063 	return 0;
1064 }
1065 
1066 #ifdef CONFIG_NET_POLL_CONTROLLER
1067 static void
1068 temac_poll_controller(struct net_device *ndev)
1069 {
1070 	struct temac_local *lp = netdev_priv(ndev);
1071 
1072 	disable_irq(lp->tx_irq);
1073 	disable_irq(lp->rx_irq);
1074 
1075 	ll_temac_rx_irq(lp->tx_irq, ndev);
1076 	ll_temac_tx_irq(lp->rx_irq, ndev);
1077 
1078 	enable_irq(lp->tx_irq);
1079 	enable_irq(lp->rx_irq);
1080 }
1081 #endif
1082 
1083 static const struct net_device_ops temac_netdev_ops = {
1084 	.ndo_open = temac_open,
1085 	.ndo_stop = temac_stop,
1086 	.ndo_start_xmit = temac_start_xmit,
1087 	.ndo_set_rx_mode = temac_set_multicast_list,
1088 	.ndo_set_mac_address = temac_set_mac_address,
1089 	.ndo_validate_addr = eth_validate_addr,
1090 	.ndo_do_ioctl = phy_do_ioctl_running,
1091 #ifdef CONFIG_NET_POLL_CONTROLLER
1092 	.ndo_poll_controller = temac_poll_controller,
1093 #endif
1094 };
1095 
1096 /* ---------------------------------------------------------------------
1097  * SYSFS device attributes
1098  */
1099 static ssize_t temac_show_llink_regs(struct device *dev,
1100 				     struct device_attribute *attr, char *buf)
1101 {
1102 	struct net_device *ndev = dev_get_drvdata(dev);
1103 	struct temac_local *lp = netdev_priv(ndev);
1104 	int i, len = 0;
1105 
1106 	for (i = 0; i < 0x11; i++)
1107 		len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
1108 			       (i % 8) == 7 ? "\n" : " ");
1109 	len += sprintf(buf + len, "\n");
1110 
1111 	return len;
1112 }
1113 
1114 static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
1115 
1116 static struct attribute *temac_device_attrs[] = {
1117 	&dev_attr_llink_regs.attr,
1118 	NULL,
1119 };
1120 
1121 static const struct attribute_group temac_attr_group = {
1122 	.attrs = temac_device_attrs,
1123 };
1124 
1125 /* ethtool support */
1126 static const struct ethtool_ops temac_ethtool_ops = {
1127 	.nway_reset = phy_ethtool_nway_reset,
1128 	.get_link = ethtool_op_get_link,
1129 	.get_ts_info = ethtool_op_get_ts_info,
1130 	.get_link_ksettings = phy_ethtool_get_link_ksettings,
1131 	.set_link_ksettings = phy_ethtool_set_link_ksettings,
1132 };
1133 
1134 static int temac_probe(struct platform_device *pdev)
1135 {
1136 	struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1137 	struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
1138 	struct temac_local *lp;
1139 	struct net_device *ndev;
1140 	struct resource *res;
1141 	const void *addr;
1142 	__be32 *p;
1143 	bool little_endian;
1144 	int rc = 0;
1145 
1146 	/* Init network device structure */
1147 	ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
1148 	if (!ndev)
1149 		return -ENOMEM;
1150 
1151 	platform_set_drvdata(pdev, ndev);
1152 	SET_NETDEV_DEV(ndev, &pdev->dev);
1153 	ndev->features = NETIF_F_SG;
1154 	ndev->netdev_ops = &temac_netdev_ops;
1155 	ndev->ethtool_ops = &temac_ethtool_ops;
1156 #if 0
1157 	ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
1158 	ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
1159 	ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
1160 	ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
1161 	ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
1162 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
1163 	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
1164 	ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
1165 	ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
1166 	ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
1167 	ndev->features |= NETIF_F_LRO; /* large receive offload */
1168 #endif
1169 
1170 	/* setup temac private info structure */
1171 	lp = netdev_priv(ndev);
1172 	lp->ndev = ndev;
1173 	lp->dev = &pdev->dev;
1174 	lp->options = XTE_OPTION_DEFAULTS;
1175 	spin_lock_init(&lp->rx_lock);
1176 
1177 	/* Setup mutex for synchronization of indirect register access */
1178 	if (pdata) {
1179 		if (!pdata->indirect_lock) {
1180 			dev_err(&pdev->dev,
1181 				"indirect_lock missing in platform_data\n");
1182 			return -EINVAL;
1183 		}
1184 		lp->indirect_lock = pdata->indirect_lock;
1185 	} else {
1186 		lp->indirect_lock = devm_kmalloc(&pdev->dev,
1187 						 sizeof(*lp->indirect_lock),
1188 						 GFP_KERNEL);
1189 		spin_lock_init(lp->indirect_lock);
1190 	}
1191 
1192 	/* map device registers */
1193 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1194 	lp->regs = devm_ioremap(&pdev->dev, res->start,
1195 					resource_size(res));
1196 	if (IS_ERR(lp->regs)) {
1197 		dev_err(&pdev->dev, "could not map TEMAC registers\n");
1198 		return PTR_ERR(lp->regs);
1199 	}
1200 
1201 	/* Select register access functions with the specified
1202 	 * endianness mode.  Default for OF devices is big-endian.
1203 	 */
1204 	little_endian = false;
1205 	if (temac_np) {
1206 		if (of_get_property(temac_np, "little-endian", NULL))
1207 			little_endian = true;
1208 	} else if (pdata) {
1209 		little_endian = pdata->reg_little_endian;
1210 	}
1211 	if (little_endian) {
1212 		lp->temac_ior = _temac_ior_le;
1213 		lp->temac_iow = _temac_iow_le;
1214 	} else {
1215 		lp->temac_ior = _temac_ior_be;
1216 		lp->temac_iow = _temac_iow_be;
1217 	}
1218 
1219 	/* Setup checksum offload, but default to off if not specified */
1220 	lp->temac_features = 0;
1221 	if (temac_np) {
1222 		p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
1223 		if (p && be32_to_cpu(*p))
1224 			lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1225 		p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
1226 		if (p && be32_to_cpu(*p))
1227 			lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1228 	} else if (pdata) {
1229 		if (pdata->txcsum)
1230 			lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1231 		if (pdata->rxcsum)
1232 			lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1233 	}
1234 	if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
1235 		/* Can checksum TCP/UDP over IPv4. */
1236 		ndev->features |= NETIF_F_IP_CSUM;
1237 
1238 	/* Setup LocalLink DMA */
1239 	if (temac_np) {
1240 		/* Find the DMA node, map the DMA registers, and
1241 		 * decode the DMA IRQs.
1242 		 */
1243 		dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
1244 		if (!dma_np) {
1245 			dev_err(&pdev->dev, "could not find DMA node\n");
1246 			return -ENODEV;
1247 		}
1248 
1249 		/* Setup the DMA register accesses, could be DCR or
1250 		 * memory mapped.
1251 		 */
1252 		if (temac_dcr_setup(lp, pdev, dma_np)) {
1253 			/* no DCR in the device tree, try non-DCR */
1254 			lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
1255 						      NULL);
1256 			if (IS_ERR(lp->sdma_regs)) {
1257 				dev_err(&pdev->dev,
1258 					"unable to map DMA registers\n");
1259 				of_node_put(dma_np);
1260 				return PTR_ERR(lp->sdma_regs);
1261 			}
1262 			if (of_get_property(dma_np, "little-endian", NULL)) {
1263 				lp->dma_in = temac_dma_in32_le;
1264 				lp->dma_out = temac_dma_out32_le;
1265 			} else {
1266 				lp->dma_in = temac_dma_in32_be;
1267 				lp->dma_out = temac_dma_out32_be;
1268 			}
1269 			dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1270 		}
1271 
1272 		/* Get DMA RX and TX interrupts */
1273 		lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
1274 		lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1275 
1276 		/* Use defaults for IRQ delay/coalescing setup.  These
1277 		 * are configuration values, so does not belong in
1278 		 * device-tree.
1279 		 */
1280 		lp->tx_chnl_ctrl = 0x10220000;
1281 		lp->rx_chnl_ctrl = 0xff070000;
1282 
1283 		/* Finished with the DMA node; drop the reference */
1284 		of_node_put(dma_np);
1285 	} else if (pdata) {
1286 		/* 2nd memory resource specifies DMA registers */
1287 		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1288 		lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
1289 						     resource_size(res));
1290 		if (IS_ERR(lp->sdma_regs)) {
1291 			dev_err(&pdev->dev,
1292 				"could not map DMA registers\n");
1293 			return PTR_ERR(lp->sdma_regs);
1294 		}
1295 		if (pdata->dma_little_endian) {
1296 			lp->dma_in = temac_dma_in32_le;
1297 			lp->dma_out = temac_dma_out32_le;
1298 		} else {
1299 			lp->dma_in = temac_dma_in32_be;
1300 			lp->dma_out = temac_dma_out32_be;
1301 		}
1302 
1303 		/* Get DMA RX and TX interrupts */
1304 		lp->rx_irq = platform_get_irq(pdev, 0);
1305 		lp->tx_irq = platform_get_irq(pdev, 1);
1306 
1307 		/* IRQ delay/coalescing setup */
1308 		if (pdata->tx_irq_timeout || pdata->tx_irq_count)
1309 			lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) |
1310 				(pdata->tx_irq_count << 16);
1311 		else
1312 			lp->tx_chnl_ctrl = 0x10220000;
1313 		if (pdata->rx_irq_timeout || pdata->rx_irq_count)
1314 			lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
1315 				(pdata->rx_irq_count << 16);
1316 		else
1317 			lp->rx_chnl_ctrl = 0xff070000;
1318 	}
1319 
1320 	/* Error handle returned DMA RX and TX interrupts */
1321 	if (lp->rx_irq < 0) {
1322 		if (lp->rx_irq != -EPROBE_DEFER)
1323 			dev_err(&pdev->dev, "could not get DMA RX irq\n");
1324 		return lp->rx_irq;
1325 	}
1326 	if (lp->tx_irq < 0) {
1327 		if (lp->tx_irq != -EPROBE_DEFER)
1328 			dev_err(&pdev->dev, "could not get DMA TX irq\n");
1329 		return lp->tx_irq;
1330 	}
1331 
1332 	if (temac_np) {
1333 		/* Retrieve the MAC address */
1334 		addr = of_get_mac_address(temac_np);
1335 		if (IS_ERR(addr)) {
1336 			dev_err(&pdev->dev, "could not find MAC address\n");
1337 			return -ENODEV;
1338 		}
1339 		temac_init_mac_address(ndev, addr);
1340 	} else if (pdata) {
1341 		temac_init_mac_address(ndev, pdata->mac_addr);
1342 	}
1343 
1344 	rc = temac_mdio_setup(lp, pdev);
1345 	if (rc)
1346 		dev_warn(&pdev->dev, "error registering MDIO bus\n");
1347 
1348 	if (temac_np) {
1349 		lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1350 		if (lp->phy_node)
1351 			dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
1352 	} else if (pdata) {
1353 		snprintf(lp->phy_name, sizeof(lp->phy_name),
1354 			 PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
1355 		lp->phy_interface = pdata->phy_interface;
1356 	}
1357 
1358 	/* Add the device attributes */
1359 	rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1360 	if (rc) {
1361 		dev_err(lp->dev, "Error creating sysfs files\n");
1362 		goto err_sysfs_create;
1363 	}
1364 
1365 	rc = register_netdev(lp->ndev);
1366 	if (rc) {
1367 		dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
1368 		goto err_register_ndev;
1369 	}
1370 
1371 	return 0;
1372 
1373 err_register_ndev:
1374 	sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1375 err_sysfs_create:
1376 	if (lp->phy_node)
1377 		of_node_put(lp->phy_node);
1378 	temac_mdio_teardown(lp);
1379 	return rc;
1380 }
1381 
1382 static int temac_remove(struct platform_device *pdev)
1383 {
1384 	struct net_device *ndev = platform_get_drvdata(pdev);
1385 	struct temac_local *lp = netdev_priv(ndev);
1386 
1387 	unregister_netdev(ndev);
1388 	sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1389 	if (lp->phy_node)
1390 		of_node_put(lp->phy_node);
1391 	temac_mdio_teardown(lp);
1392 	return 0;
1393 }
1394 
1395 static const struct of_device_id temac_of_match[] = {
1396 	{ .compatible = "xlnx,xps-ll-temac-1.01.b", },
1397 	{ .compatible = "xlnx,xps-ll-temac-2.00.a", },
1398 	{ .compatible = "xlnx,xps-ll-temac-2.02.a", },
1399 	{ .compatible = "xlnx,xps-ll-temac-2.03.a", },
1400 	{},
1401 };
1402 MODULE_DEVICE_TABLE(of, temac_of_match);
1403 
1404 static struct platform_driver temac_driver = {
1405 	.probe = temac_probe,
1406 	.remove = temac_remove,
1407 	.driver = {
1408 		.name = "xilinx_temac",
1409 		.of_match_table = temac_of_match,
1410 	},
1411 };
1412 
1413 module_platform_driver(temac_driver);
1414 
1415 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1416 MODULE_AUTHOR("Yoshio Kashiwagi");
1417 MODULE_LICENSE("GPL");
1418