1 /*
2  * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * Driver for the ARC EMAC 10100 (hardware revision 5)
9  *
10  * Contributors:
11  *		Amit Bhor
12  *		Sameer Dhavale
13  *		Vineet Gupta
14  */
15 
16 #include <linux/etherdevice.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/module.h>
20 #include <linux/of_address.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_mdio.h>
23 #include <linux/of_net.h>
24 #include <linux/of_platform.h>
25 
26 #include "emac.h"
27 
28 #define DRV_NAME	"arc_emac"
29 #define DRV_VERSION	"1.0"
30 
31 /**
32  * arc_emac_adjust_link - Adjust the PHY link duplex.
33  * @ndev:	Pointer to the net_device structure.
34  *
35  * This function is called to change the duplex setting after auto negotiation
36  * is done by the PHY.
37  */
38 static void arc_emac_adjust_link(struct net_device *ndev)
39 {
40 	struct arc_emac_priv *priv = netdev_priv(ndev);
41 	struct phy_device *phy_dev = priv->phy_dev;
42 	unsigned int reg, state_changed = 0;
43 
44 	if (priv->link != phy_dev->link) {
45 		priv->link = phy_dev->link;
46 		state_changed = 1;
47 	}
48 
49 	if (priv->speed != phy_dev->speed) {
50 		priv->speed = phy_dev->speed;
51 		state_changed = 1;
52 	}
53 
54 	if (priv->duplex != phy_dev->duplex) {
55 		reg = arc_reg_get(priv, R_CTRL);
56 
57 		if (DUPLEX_FULL == phy_dev->duplex)
58 			reg |= ENFL_MASK;
59 		else
60 			reg &= ~ENFL_MASK;
61 
62 		arc_reg_set(priv, R_CTRL, reg);
63 		priv->duplex = phy_dev->duplex;
64 		state_changed = 1;
65 	}
66 
67 	if (state_changed)
68 		phy_print_status(phy_dev);
69 }
70 
71 /**
72  * arc_emac_get_settings - Get PHY settings.
73  * @ndev:	Pointer to net_device structure.
74  * @cmd:	Pointer to ethtool_cmd structure.
75  *
76  * This implements ethtool command for getting PHY settings. If PHY could
77  * not be found, the function returns -ENODEV. This function calls the
78  * relevant PHY ethtool API to get the PHY settings.
79  * Issue "ethtool ethX" under linux prompt to execute this function.
80  */
81 static int arc_emac_get_settings(struct net_device *ndev,
82 				 struct ethtool_cmd *cmd)
83 {
84 	struct arc_emac_priv *priv = netdev_priv(ndev);
85 
86 	return phy_ethtool_gset(priv->phy_dev, cmd);
87 }
88 
89 /**
90  * arc_emac_set_settings - Set PHY settings as passed in the argument.
91  * @ndev:	Pointer to net_device structure.
92  * @cmd:	Pointer to ethtool_cmd structure.
93  *
94  * This implements ethtool command for setting various PHY settings. If PHY
95  * could not be found, the function returns -ENODEV. This function calls the
96  * relevant PHY ethtool API to set the PHY.
97  * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
98  * function.
99  */
100 static int arc_emac_set_settings(struct net_device *ndev,
101 				 struct ethtool_cmd *cmd)
102 {
103 	struct arc_emac_priv *priv = netdev_priv(ndev);
104 
105 	if (!capable(CAP_NET_ADMIN))
106 		return -EPERM;
107 
108 	return phy_ethtool_sset(priv->phy_dev, cmd);
109 }
110 
111 /**
112  * arc_emac_get_drvinfo - Get EMAC driver information.
113  * @ndev:	Pointer to net_device structure.
114  * @info:	Pointer to ethtool_drvinfo structure.
115  *
116  * This implements ethtool command for getting the driver information.
117  * Issue "ethtool -i ethX" under linux prompt to execute this function.
118  */
119 static void arc_emac_get_drvinfo(struct net_device *ndev,
120 				 struct ethtool_drvinfo *info)
121 {
122 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
123 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
124 }
125 
126 static const struct ethtool_ops arc_emac_ethtool_ops = {
127 	.get_settings	= arc_emac_get_settings,
128 	.set_settings	= arc_emac_set_settings,
129 	.get_drvinfo	= arc_emac_get_drvinfo,
130 	.get_link	= ethtool_op_get_link,
131 };
132 
133 #define FIRST_OR_LAST_MASK	(FIRST_MASK | LAST_MASK)
134 
135 /**
136  * arc_emac_tx_clean - clears processed by EMAC Tx BDs.
137  * @ndev:	Pointer to the network device.
138  */
139 static void arc_emac_tx_clean(struct net_device *ndev)
140 {
141 	struct arc_emac_priv *priv = netdev_priv(ndev);
142 	struct net_device_stats *stats = &priv->stats;
143 	unsigned int i;
144 
145 	for (i = 0; i < TX_BD_NUM; i++) {
146 		unsigned int *txbd_dirty = &priv->txbd_dirty;
147 		struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty];
148 		struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty];
149 		struct sk_buff *skb = tx_buff->skb;
150 		unsigned int info = le32_to_cpu(txbd->info);
151 
152 		if ((info & FOR_EMAC) || !txbd->data)
153 			break;
154 
155 		if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
156 			stats->tx_errors++;
157 			stats->tx_dropped++;
158 
159 			if (info & DEFR)
160 				stats->tx_carrier_errors++;
161 
162 			if (info & LTCL)
163 				stats->collisions++;
164 
165 			if (info & UFLO)
166 				stats->tx_fifo_errors++;
167 		} else if (likely(info & FIRST_OR_LAST_MASK)) {
168 			stats->tx_packets++;
169 			stats->tx_bytes += skb->len;
170 		}
171 
172 		dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
173 				 dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
174 
175 		/* return the sk_buff to system */
176 		dev_kfree_skb_irq(skb);
177 
178 		txbd->data = 0;
179 		txbd->info = 0;
180 
181 		*txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
182 
183 		if (netif_queue_stopped(ndev))
184 			netif_wake_queue(ndev);
185 	}
186 }
187 
188 /**
189  * arc_emac_rx - processing of Rx packets.
190  * @ndev:	Pointer to the network device.
191  * @budget:	How many BDs to process on 1 call.
192  *
193  * returns:	Number of processed BDs
194  *
195  * Iterate through Rx BDs and deliver received packages to upper layer.
196  */
197 static int arc_emac_rx(struct net_device *ndev, int budget)
198 {
199 	struct arc_emac_priv *priv = netdev_priv(ndev);
200 	unsigned int work_done;
201 
202 	for (work_done = 0; work_done < budget; work_done++) {
203 		unsigned int *last_rx_bd = &priv->last_rx_bd;
204 		struct net_device_stats *stats = &priv->stats;
205 		struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
206 		struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
207 		unsigned int pktlen, info = le32_to_cpu(rxbd->info);
208 		struct sk_buff *skb;
209 		dma_addr_t addr;
210 
211 		if (unlikely((info & OWN_MASK) == FOR_EMAC))
212 			break;
213 
214 		/* Make a note that we saw a packet at this BD.
215 		 * So next time, driver starts from this + 1
216 		 */
217 		*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
218 
219 		if (unlikely((info & FIRST_OR_LAST_MASK) !=
220 			     FIRST_OR_LAST_MASK)) {
221 			/* We pre-allocate buffers of MTU size so incoming
222 			 * packets won't be split/chained.
223 			 */
224 			if (net_ratelimit())
225 				netdev_err(ndev, "incomplete packet received\n");
226 
227 			/* Return ownership to EMAC */
228 			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
229 			stats->rx_errors++;
230 			stats->rx_length_errors++;
231 			continue;
232 		}
233 
234 		pktlen = info & LEN_MASK;
235 		stats->rx_packets++;
236 		stats->rx_bytes += pktlen;
237 		skb = rx_buff->skb;
238 		skb_put(skb, pktlen);
239 		skb->dev = ndev;
240 		skb->protocol = eth_type_trans(skb, ndev);
241 
242 		dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
243 				 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
244 
245 		/* Prepare the BD for next cycle */
246 		rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
247 							 EMAC_BUFFER_SIZE);
248 		if (unlikely(!rx_buff->skb)) {
249 			stats->rx_errors++;
250 			/* Because receive_skb is below, increment rx_dropped */
251 			stats->rx_dropped++;
252 			continue;
253 		}
254 
255 		/* receive_skb only if new skb was allocated to avoid holes */
256 		netif_receive_skb(skb);
257 
258 		addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
259 				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
260 		if (dma_mapping_error(&ndev->dev, addr)) {
261 			if (net_ratelimit())
262 				netdev_err(ndev, "cannot dma map\n");
263 			dev_kfree_skb(rx_buff->skb);
264 			stats->rx_errors++;
265 			continue;
266 		}
267 		dma_unmap_addr_set(rx_buff, addr, addr);
268 		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
269 
270 		rxbd->data = cpu_to_le32(addr);
271 
272 		/* Make sure pointer to data buffer is set */
273 		wmb();
274 
275 		/* Return ownership to EMAC */
276 		rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
277 	}
278 
279 	return work_done;
280 }
281 
282 /**
283  * arc_emac_poll - NAPI poll handler.
284  * @napi:	Pointer to napi_struct structure.
285  * @budget:	How many BDs to process on 1 call.
286  *
287  * returns:	Number of processed BDs
288  */
289 static int arc_emac_poll(struct napi_struct *napi, int budget)
290 {
291 	struct net_device *ndev = napi->dev;
292 	struct arc_emac_priv *priv = netdev_priv(ndev);
293 	unsigned int work_done;
294 
295 	arc_emac_tx_clean(ndev);
296 
297 	work_done = arc_emac_rx(ndev, budget);
298 	if (work_done < budget) {
299 		napi_complete(napi);
300 		arc_reg_or(priv, R_ENABLE, RXINT_MASK);
301 	}
302 
303 	return work_done;
304 }
305 
306 /**
307  * arc_emac_intr - Global interrupt handler for EMAC.
308  * @irq:		irq number.
309  * @dev_instance:	device instance.
310  *
311  * returns: IRQ_HANDLED for all cases.
312  *
313  * ARC EMAC has only 1 interrupt line, and depending on bits raised in
314  * STATUS register we may tell what is a reason for interrupt to fire.
315  */
316 static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
317 {
318 	struct net_device *ndev = dev_instance;
319 	struct arc_emac_priv *priv = netdev_priv(ndev);
320 	struct net_device_stats *stats = &priv->stats;
321 	unsigned int status;
322 
323 	status = arc_reg_get(priv, R_STATUS);
324 	status &= ~MDIO_MASK;
325 
326 	/* Reset all flags except "MDIO complete" */
327 	arc_reg_set(priv, R_STATUS, status);
328 
329 	if (status & RXINT_MASK) {
330 		if (likely(napi_schedule_prep(&priv->napi))) {
331 			arc_reg_clr(priv, R_ENABLE, RXINT_MASK);
332 			__napi_schedule(&priv->napi);
333 		}
334 	}
335 
336 	if (status & ERR_MASK) {
337 		/* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding
338 		 * 8-bit error counter overrun.
339 		 */
340 
341 		if (status & MSER_MASK) {
342 			stats->rx_missed_errors += 0x100;
343 			stats->rx_errors += 0x100;
344 		}
345 
346 		if (status & RXCR_MASK) {
347 			stats->rx_crc_errors += 0x100;
348 			stats->rx_errors += 0x100;
349 		}
350 
351 		if (status & RXFR_MASK) {
352 			stats->rx_frame_errors += 0x100;
353 			stats->rx_errors += 0x100;
354 		}
355 
356 		if (status & RXFL_MASK) {
357 			stats->rx_over_errors += 0x100;
358 			stats->rx_errors += 0x100;
359 		}
360 	}
361 
362 	return IRQ_HANDLED;
363 }
364 
365 /**
366  * arc_emac_open - Open the network device.
367  * @ndev:	Pointer to the network device.
368  *
369  * returns: 0, on success or non-zero error value on failure.
370  *
371  * This function sets the MAC address, requests and enables an IRQ
372  * for the EMAC device and starts the Tx queue.
373  * It also connects to the phy device.
374  */
375 static int arc_emac_open(struct net_device *ndev)
376 {
377 	struct arc_emac_priv *priv = netdev_priv(ndev);
378 	struct phy_device *phy_dev = priv->phy_dev;
379 	int i;
380 
381 	phy_dev->autoneg = AUTONEG_ENABLE;
382 	phy_dev->speed = 0;
383 	phy_dev->duplex = 0;
384 	phy_dev->advertising = phy_dev->supported;
385 
386 	if (priv->max_speed > 100) {
387 		phy_dev->advertising &= PHY_GBIT_FEATURES;
388 	} else if (priv->max_speed <= 100) {
389 		phy_dev->advertising &= PHY_BASIC_FEATURES;
390 		if (priv->max_speed <= 10) {
391 			phy_dev->advertising &= ~SUPPORTED_100baseT_Half;
392 			phy_dev->advertising &= ~SUPPORTED_100baseT_Full;
393 		}
394 	}
395 
396 	priv->last_rx_bd = 0;
397 
398 	/* Allocate and set buffers for Rx BD's */
399 	for (i = 0; i < RX_BD_NUM; i++) {
400 		dma_addr_t addr;
401 		unsigned int *last_rx_bd = &priv->last_rx_bd;
402 		struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
403 		struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
404 
405 		rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
406 							 EMAC_BUFFER_SIZE);
407 		if (unlikely(!rx_buff->skb))
408 			return -ENOMEM;
409 
410 		addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
411 				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
412 		if (dma_mapping_error(&ndev->dev, addr)) {
413 			netdev_err(ndev, "cannot dma map\n");
414 			dev_kfree_skb(rx_buff->skb);
415 			return -ENOMEM;
416 		}
417 		dma_unmap_addr_set(rx_buff, addr, addr);
418 		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
419 
420 		rxbd->data = cpu_to_le32(addr);
421 
422 		/* Make sure pointer to data buffer is set */
423 		wmb();
424 
425 		/* Return ownership to EMAC */
426 		rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
427 
428 		*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
429 	}
430 
431 	/* Clean Tx BD's */
432 	memset(priv->txbd, 0, TX_RING_SZ);
433 
434 	/* Initialize logical address filter */
435 	arc_reg_set(priv, R_LAFL, 0);
436 	arc_reg_set(priv, R_LAFH, 0);
437 
438 	/* Set BD ring pointers for device side */
439 	arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma);
440 	arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
441 
442 	/* Enable interrupts */
443 	arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
444 
445 	/* Set CONTROL */
446 	arc_reg_set(priv, R_CTRL,
447 		     (RX_BD_NUM << 24) |	/* RX BD table length */
448 		     (TX_BD_NUM << 16) |	/* TX BD table length */
449 		     TXRN_MASK | RXRN_MASK);
450 
451 	napi_enable(&priv->napi);
452 
453 	/* Enable EMAC */
454 	arc_reg_or(priv, R_CTRL, EN_MASK);
455 
456 	phy_start_aneg(priv->phy_dev);
457 
458 	netif_start_queue(ndev);
459 
460 	return 0;
461 }
462 
463 /**
464  * arc_emac_stop - Close the network device.
465  * @ndev:	Pointer to the network device.
466  *
467  * This function stops the Tx queue, disables interrupts and frees the IRQ for
468  * the EMAC device.
469  * It also disconnects the PHY device associated with the EMAC device.
470  */
471 static int arc_emac_stop(struct net_device *ndev)
472 {
473 	struct arc_emac_priv *priv = netdev_priv(ndev);
474 
475 	napi_disable(&priv->napi);
476 	netif_stop_queue(ndev);
477 
478 	/* Disable interrupts */
479 	arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
480 
481 	/* Disable EMAC */
482 	arc_reg_clr(priv, R_CTRL, EN_MASK);
483 
484 	return 0;
485 }
486 
487 /**
488  * arc_emac_stats - Get system network statistics.
489  * @ndev:	Pointer to net_device structure.
490  *
491  * Returns the address of the device statistics structure.
492  * Statistics are updated in interrupt handler.
493  */
494 static struct net_device_stats *arc_emac_stats(struct net_device *ndev)
495 {
496 	struct arc_emac_priv *priv = netdev_priv(ndev);
497 	struct net_device_stats *stats = &priv->stats;
498 	unsigned long miss, rxerr;
499 	u8 rxcrc, rxfram, rxoflow;
500 
501 	rxerr = arc_reg_get(priv, R_RXERR);
502 	miss = arc_reg_get(priv, R_MISS);
503 
504 	rxcrc = rxerr;
505 	rxfram = rxerr >> 8;
506 	rxoflow = rxerr >> 16;
507 
508 	stats->rx_errors += miss;
509 	stats->rx_errors += rxcrc + rxfram + rxoflow;
510 
511 	stats->rx_over_errors += rxoflow;
512 	stats->rx_frame_errors += rxfram;
513 	stats->rx_crc_errors += rxcrc;
514 	stats->rx_missed_errors += miss;
515 
516 	return stats;
517 }
518 
519 /**
520  * arc_emac_tx - Starts the data transmission.
521  * @skb:	sk_buff pointer that contains data to be Transmitted.
522  * @ndev:	Pointer to net_device structure.
523  *
524  * returns: NETDEV_TX_OK, on success
525  *		NETDEV_TX_BUSY, if any of the descriptors are not free.
526  *
527  * This function is invoked from upper layers to initiate transmission.
528  */
529 static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
530 {
531 	struct arc_emac_priv *priv = netdev_priv(ndev);
532 	unsigned int len, *txbd_curr = &priv->txbd_curr;
533 	struct net_device_stats *stats = &priv->stats;
534 	__le32 *info = &priv->txbd[*txbd_curr].info;
535 	dma_addr_t addr;
536 
537 	if (skb_padto(skb, ETH_ZLEN))
538 		return NETDEV_TX_OK;
539 
540 	len = max_t(unsigned int, ETH_ZLEN, skb->len);
541 
542 	/* EMAC still holds this buffer in its possession.
543 	 * CPU must not modify this buffer descriptor
544 	 */
545 	if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
546 		netif_stop_queue(ndev);
547 		return NETDEV_TX_BUSY;
548 	}
549 
550 	addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
551 			      DMA_TO_DEVICE);
552 
553 	if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
554 		stats->tx_dropped++;
555 		stats->tx_errors++;
556 		dev_kfree_skb(skb);
557 		return NETDEV_TX_OK;
558 	}
559 	dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
560 	dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
561 
562 	priv->tx_buff[*txbd_curr].skb = skb;
563 	priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
564 
565 	/* Make sure pointer to data buffer is set */
566 	wmb();
567 
568 	*info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
569 
570 	/* Increment index to point to the next BD */
571 	*txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
572 
573 	/* Get "info" of the next BD */
574 	info = &priv->txbd[*txbd_curr].info;
575 
576 	/* Check if if Tx BD ring is full - next BD is still owned by EMAC */
577 	if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
578 		netif_stop_queue(ndev);
579 
580 	arc_reg_set(priv, R_STATUS, TXPL_MASK);
581 
582 	skb_tx_timestamp(skb);
583 
584 	return NETDEV_TX_OK;
585 }
586 
587 /**
588  * arc_emac_set_address - Set the MAC address for this device.
589  * @ndev:	Pointer to net_device structure.
590  * @p:		6 byte Address to be written as MAC address.
591  *
592  * This function copies the HW address from the sockaddr structure to the
593  * net_device structure and updates the address in HW.
594  *
595  * returns:	-EBUSY if the net device is busy or 0 if the address is set
596  *		successfully.
597  */
598 static int arc_emac_set_address(struct net_device *ndev, void *p)
599 {
600 	struct arc_emac_priv *priv = netdev_priv(ndev);
601 	struct sockaddr *addr = p;
602 	unsigned int addr_low, addr_hi;
603 
604 	if (netif_running(ndev))
605 		return -EBUSY;
606 
607 	if (!is_valid_ether_addr(addr->sa_data))
608 		return -EADDRNOTAVAIL;
609 
610 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
611 
612 	addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
613 	addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
614 
615 	arc_reg_set(priv, R_ADDRL, addr_low);
616 	arc_reg_set(priv, R_ADDRH, addr_hi);
617 
618 	return 0;
619 }
620 
621 static const struct net_device_ops arc_emac_netdev_ops = {
622 	.ndo_open		= arc_emac_open,
623 	.ndo_stop		= arc_emac_stop,
624 	.ndo_start_xmit		= arc_emac_tx,
625 	.ndo_set_mac_address	= arc_emac_set_address,
626 	.ndo_get_stats		= arc_emac_stats,
627 };
628 
629 static int arc_emac_probe(struct platform_device *pdev)
630 {
631 	struct resource res_regs;
632 	struct device_node *phy_node;
633 	struct arc_emac_priv *priv;
634 	struct net_device *ndev;
635 	const char *mac_addr;
636 	unsigned int id, clock_frequency, irq;
637 	int err;
638 
639 	if (!pdev->dev.of_node)
640 		return -ENODEV;
641 
642 	/* Get PHY from device tree */
643 	phy_node = of_parse_phandle(pdev->dev.of_node, "phy", 0);
644 	if (!phy_node) {
645 		dev_err(&pdev->dev, "failed to retrieve phy description from device tree\n");
646 		return -ENODEV;
647 	}
648 
649 	/* Get EMAC registers base address from device tree */
650 	err = of_address_to_resource(pdev->dev.of_node, 0, &res_regs);
651 	if (err) {
652 		dev_err(&pdev->dev, "failed to retrieve registers base from device tree\n");
653 		return -ENODEV;
654 	}
655 
656 	/* Get CPU clock frequency from device tree */
657 	if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
658 				 &clock_frequency)) {
659 		dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
660 		return -EINVAL;
661 	}
662 
663 	/* Get IRQ from device tree */
664 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
665 	if (!irq) {
666 		dev_err(&pdev->dev, "failed to retrieve <irq> value from device tree\n");
667 		return -ENODEV;
668 	}
669 
670 	ndev = alloc_etherdev(sizeof(struct arc_emac_priv));
671 	if (!ndev)
672 		return -ENOMEM;
673 
674 	platform_set_drvdata(pdev, ndev);
675 	SET_NETDEV_DEV(ndev, &pdev->dev);
676 
677 	ndev->netdev_ops = &arc_emac_netdev_ops;
678 	ndev->ethtool_ops = &arc_emac_ethtool_ops;
679 	ndev->watchdog_timeo = TX_TIMEOUT;
680 	/* FIXME :: no multicast support yet */
681 	ndev->flags &= ~IFF_MULTICAST;
682 
683 	priv = netdev_priv(ndev);
684 	priv->dev = &pdev->dev;
685 	priv->ndev = ndev;
686 
687 	priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
688 	if (IS_ERR(priv->regs)) {
689 		err = PTR_ERR(priv->regs);
690 		goto out;
691 	}
692 	dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs);
693 
694 	id = arc_reg_get(priv, R_ID);
695 
696 	/* Check for EMAC revision 5 or 7, magic number */
697 	if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
698 		dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id);
699 		err = -ENODEV;
700 		goto out;
701 	}
702 	dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id);
703 
704 	/* Set poll rate so that it polls every 1 ms */
705 	arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
706 
707 	/* Get max speed of operation from device tree */
708 	if (of_property_read_u32(pdev->dev.of_node, "max-speed",
709 				 &priv->max_speed)) {
710 		dev_err(&pdev->dev, "failed to retrieve <max-speed> from device tree\n");
711 		err = -EINVAL;
712 		goto out;
713 	}
714 
715 	ndev->irq = irq;
716 	dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq);
717 
718 	/* Register interrupt handler for device */
719 	err = devm_request_irq(&pdev->dev, ndev->irq, arc_emac_intr, 0,
720 			       ndev->name, ndev);
721 	if (err) {
722 		dev_err(&pdev->dev, "could not allocate IRQ\n");
723 		goto out;
724 	}
725 
726 	/* Get MAC address from device tree */
727 	mac_addr = of_get_mac_address(pdev->dev.of_node);
728 
729 	if (mac_addr)
730 		memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
731 	else
732 		eth_hw_addr_random(ndev);
733 
734 	dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
735 
736 	/* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
737 	priv->rxbd = dmam_alloc_coherent(&pdev->dev, RX_RING_SZ + TX_RING_SZ,
738 					 &priv->rxbd_dma, GFP_KERNEL);
739 
740 	if (!priv->rxbd) {
741 		dev_err(&pdev->dev, "failed to allocate data buffers\n");
742 		err = -ENOMEM;
743 		goto out;
744 	}
745 
746 	priv->txbd = priv->rxbd + RX_BD_NUM;
747 
748 	priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ;
749 	dev_dbg(&pdev->dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n",
750 		(unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma);
751 
752 	err = arc_mdio_probe(pdev, priv);
753 	if (err) {
754 		dev_err(&pdev->dev, "failed to probe MII bus\n");
755 		goto out;
756 	}
757 
758 	priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
759 				       PHY_INTERFACE_MODE_MII);
760 	if (!priv->phy_dev) {
761 		dev_err(&pdev->dev, "of_phy_connect() failed\n");
762 		err = -ENODEV;
763 		goto out;
764 	}
765 
766 	dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n",
767 		 priv->phy_dev->drv->name, priv->phy_dev->phy_id);
768 
769 	netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
770 
771 	err = register_netdev(ndev);
772 	if (err) {
773 		netif_napi_del(&priv->napi);
774 		dev_err(&pdev->dev, "failed to register network device\n");
775 		goto out;
776 	}
777 
778 	return 0;
779 
780 out:
781 	free_netdev(ndev);
782 	return err;
783 }
784 
785 static int arc_emac_remove(struct platform_device *pdev)
786 {
787 	struct net_device *ndev = platform_get_drvdata(pdev);
788 	struct arc_emac_priv *priv = netdev_priv(ndev);
789 
790 	phy_disconnect(priv->phy_dev);
791 	priv->phy_dev = NULL;
792 	arc_mdio_remove(priv);
793 	unregister_netdev(ndev);
794 	netif_napi_del(&priv->napi);
795 	free_netdev(ndev);
796 
797 	return 0;
798 }
799 
800 static const struct of_device_id arc_emac_dt_ids[] = {
801 	{ .compatible = "snps,arc-emac" },
802 	{ /* Sentinel */ }
803 };
804 MODULE_DEVICE_TABLE(of, arc_emac_dt_ids);
805 
806 static struct platform_driver arc_emac_driver = {
807 	.probe = arc_emac_probe,
808 	.remove = arc_emac_remove,
809 	.driver = {
810 		.name = DRV_NAME,
811 		.owner = THIS_MODULE,
812 		.of_match_table  = arc_emac_dt_ids,
813 		},
814 };
815 
816 module_platform_driver(arc_emac_driver);
817 
818 MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>");
819 MODULE_DESCRIPTION("ARC EMAC driver");
820 MODULE_LICENSE("GPL");
821