1 /* 2 * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * Driver for the ARC EMAC 10100 (hardware revision 5) 9 * 10 * Contributors: 11 * Amit Bhor 12 * Sameer Dhavale 13 * Vineet Gupta 14 */ 15 16 #include <linux/crc32.h> 17 #include <linux/etherdevice.h> 18 #include <linux/interrupt.h> 19 #include <linux/io.h> 20 #include <linux/module.h> 21 #include <linux/of_address.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_mdio.h> 24 #include <linux/of_net.h> 25 #include <linux/of_platform.h> 26 27 #include "emac.h" 28 29 #define DRV_NAME "arc_emac" 30 #define DRV_VERSION "1.0" 31 32 /** 33 * arc_emac_adjust_link - Adjust the PHY link duplex. 34 * @ndev: Pointer to the net_device structure. 35 * 36 * This function is called to change the duplex setting after auto negotiation 37 * is done by the PHY. 38 */ 39 static void arc_emac_adjust_link(struct net_device *ndev) 40 { 41 struct arc_emac_priv *priv = netdev_priv(ndev); 42 struct phy_device *phy_dev = priv->phy_dev; 43 unsigned int reg, state_changed = 0; 44 45 if (priv->link != phy_dev->link) { 46 priv->link = phy_dev->link; 47 state_changed = 1; 48 } 49 50 if (priv->speed != phy_dev->speed) { 51 priv->speed = phy_dev->speed; 52 state_changed = 1; 53 } 54 55 if (priv->duplex != phy_dev->duplex) { 56 reg = arc_reg_get(priv, R_CTRL); 57 58 if (DUPLEX_FULL == phy_dev->duplex) 59 reg |= ENFL_MASK; 60 else 61 reg &= ~ENFL_MASK; 62 63 arc_reg_set(priv, R_CTRL, reg); 64 priv->duplex = phy_dev->duplex; 65 state_changed = 1; 66 } 67 68 if (state_changed) 69 phy_print_status(phy_dev); 70 } 71 72 /** 73 * arc_emac_get_settings - Get PHY settings. 74 * @ndev: Pointer to net_device structure. 75 * @cmd: Pointer to ethtool_cmd structure. 76 * 77 * This implements ethtool command for getting PHY settings. If PHY could 78 * not be found, the function returns -ENODEV. This function calls the 79 * relevant PHY ethtool API to get the PHY settings. 80 * Issue "ethtool ethX" under linux prompt to execute this function. 81 */ 82 static int arc_emac_get_settings(struct net_device *ndev, 83 struct ethtool_cmd *cmd) 84 { 85 struct arc_emac_priv *priv = netdev_priv(ndev); 86 87 return phy_ethtool_gset(priv->phy_dev, cmd); 88 } 89 90 /** 91 * arc_emac_set_settings - Set PHY settings as passed in the argument. 92 * @ndev: Pointer to net_device structure. 93 * @cmd: Pointer to ethtool_cmd structure. 94 * 95 * This implements ethtool command for setting various PHY settings. If PHY 96 * could not be found, the function returns -ENODEV. This function calls the 97 * relevant PHY ethtool API to set the PHY. 98 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this 99 * function. 100 */ 101 static int arc_emac_set_settings(struct net_device *ndev, 102 struct ethtool_cmd *cmd) 103 { 104 struct arc_emac_priv *priv = netdev_priv(ndev); 105 106 if (!capable(CAP_NET_ADMIN)) 107 return -EPERM; 108 109 return phy_ethtool_sset(priv->phy_dev, cmd); 110 } 111 112 /** 113 * arc_emac_get_drvinfo - Get EMAC driver information. 114 * @ndev: Pointer to net_device structure. 115 * @info: Pointer to ethtool_drvinfo structure. 116 * 117 * This implements ethtool command for getting the driver information. 118 * Issue "ethtool -i ethX" under linux prompt to execute this function. 119 */ 120 static void arc_emac_get_drvinfo(struct net_device *ndev, 121 struct ethtool_drvinfo *info) 122 { 123 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 124 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 125 } 126 127 static const struct ethtool_ops arc_emac_ethtool_ops = { 128 .get_settings = arc_emac_get_settings, 129 .set_settings = arc_emac_set_settings, 130 .get_drvinfo = arc_emac_get_drvinfo, 131 .get_link = ethtool_op_get_link, 132 }; 133 134 #define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK) 135 136 /** 137 * arc_emac_tx_clean - clears processed by EMAC Tx BDs. 138 * @ndev: Pointer to the network device. 139 */ 140 static void arc_emac_tx_clean(struct net_device *ndev) 141 { 142 struct arc_emac_priv *priv = netdev_priv(ndev); 143 struct net_device_stats *stats = &priv->stats; 144 unsigned int i; 145 146 for (i = 0; i < TX_BD_NUM; i++) { 147 unsigned int *txbd_dirty = &priv->txbd_dirty; 148 struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty]; 149 struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty]; 150 struct sk_buff *skb = tx_buff->skb; 151 unsigned int info = le32_to_cpu(txbd->info); 152 153 if ((info & FOR_EMAC) || !txbd->data) 154 break; 155 156 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { 157 stats->tx_errors++; 158 stats->tx_dropped++; 159 160 if (info & DEFR) 161 stats->tx_carrier_errors++; 162 163 if (info & LTCL) 164 stats->collisions++; 165 166 if (info & UFLO) 167 stats->tx_fifo_errors++; 168 } else if (likely(info & FIRST_OR_LAST_MASK)) { 169 stats->tx_packets++; 170 stats->tx_bytes += skb->len; 171 } 172 173 dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), 174 dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); 175 176 /* return the sk_buff to system */ 177 dev_kfree_skb_irq(skb); 178 179 txbd->data = 0; 180 txbd->info = 0; 181 182 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; 183 184 if (netif_queue_stopped(ndev)) 185 netif_wake_queue(ndev); 186 } 187 } 188 189 /** 190 * arc_emac_rx - processing of Rx packets. 191 * @ndev: Pointer to the network device. 192 * @budget: How many BDs to process on 1 call. 193 * 194 * returns: Number of processed BDs 195 * 196 * Iterate through Rx BDs and deliver received packages to upper layer. 197 */ 198 static int arc_emac_rx(struct net_device *ndev, int budget) 199 { 200 struct arc_emac_priv *priv = netdev_priv(ndev); 201 unsigned int work_done; 202 203 for (work_done = 0; work_done < budget; work_done++) { 204 unsigned int *last_rx_bd = &priv->last_rx_bd; 205 struct net_device_stats *stats = &priv->stats; 206 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; 207 struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; 208 unsigned int pktlen, info = le32_to_cpu(rxbd->info); 209 struct sk_buff *skb; 210 dma_addr_t addr; 211 212 if (unlikely((info & OWN_MASK) == FOR_EMAC)) 213 break; 214 215 /* Make a note that we saw a packet at this BD. 216 * So next time, driver starts from this + 1 217 */ 218 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; 219 220 if (unlikely((info & FIRST_OR_LAST_MASK) != 221 FIRST_OR_LAST_MASK)) { 222 /* We pre-allocate buffers of MTU size so incoming 223 * packets won't be split/chained. 224 */ 225 if (net_ratelimit()) 226 netdev_err(ndev, "incomplete packet received\n"); 227 228 /* Return ownership to EMAC */ 229 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 230 stats->rx_errors++; 231 stats->rx_length_errors++; 232 continue; 233 } 234 235 pktlen = info & LEN_MASK; 236 stats->rx_packets++; 237 stats->rx_bytes += pktlen; 238 skb = rx_buff->skb; 239 skb_put(skb, pktlen); 240 skb->dev = ndev; 241 skb->protocol = eth_type_trans(skb, ndev); 242 243 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), 244 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); 245 246 /* Prepare the BD for next cycle */ 247 rx_buff->skb = netdev_alloc_skb_ip_align(ndev, 248 EMAC_BUFFER_SIZE); 249 if (unlikely(!rx_buff->skb)) { 250 stats->rx_errors++; 251 /* Because receive_skb is below, increment rx_dropped */ 252 stats->rx_dropped++; 253 continue; 254 } 255 256 /* receive_skb only if new skb was allocated to avoid holes */ 257 netif_receive_skb(skb); 258 259 addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, 260 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); 261 if (dma_mapping_error(&ndev->dev, addr)) { 262 if (net_ratelimit()) 263 netdev_err(ndev, "cannot dma map\n"); 264 dev_kfree_skb(rx_buff->skb); 265 stats->rx_errors++; 266 continue; 267 } 268 dma_unmap_addr_set(rx_buff, addr, addr); 269 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); 270 271 rxbd->data = cpu_to_le32(addr); 272 273 /* Make sure pointer to data buffer is set */ 274 wmb(); 275 276 /* Return ownership to EMAC */ 277 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 278 } 279 280 return work_done; 281 } 282 283 /** 284 * arc_emac_poll - NAPI poll handler. 285 * @napi: Pointer to napi_struct structure. 286 * @budget: How many BDs to process on 1 call. 287 * 288 * returns: Number of processed BDs 289 */ 290 static int arc_emac_poll(struct napi_struct *napi, int budget) 291 { 292 struct net_device *ndev = napi->dev; 293 struct arc_emac_priv *priv = netdev_priv(ndev); 294 unsigned int work_done; 295 296 arc_emac_tx_clean(ndev); 297 298 work_done = arc_emac_rx(ndev, budget); 299 if (work_done < budget) { 300 napi_complete(napi); 301 arc_reg_or(priv, R_ENABLE, RXINT_MASK); 302 } 303 304 return work_done; 305 } 306 307 /** 308 * arc_emac_intr - Global interrupt handler for EMAC. 309 * @irq: irq number. 310 * @dev_instance: device instance. 311 * 312 * returns: IRQ_HANDLED for all cases. 313 * 314 * ARC EMAC has only 1 interrupt line, and depending on bits raised in 315 * STATUS register we may tell what is a reason for interrupt to fire. 316 */ 317 static irqreturn_t arc_emac_intr(int irq, void *dev_instance) 318 { 319 struct net_device *ndev = dev_instance; 320 struct arc_emac_priv *priv = netdev_priv(ndev); 321 struct net_device_stats *stats = &priv->stats; 322 unsigned int status; 323 324 status = arc_reg_get(priv, R_STATUS); 325 status &= ~MDIO_MASK; 326 327 /* Reset all flags except "MDIO complete" */ 328 arc_reg_set(priv, R_STATUS, status); 329 330 if (status & RXINT_MASK) { 331 if (likely(napi_schedule_prep(&priv->napi))) { 332 arc_reg_clr(priv, R_ENABLE, RXINT_MASK); 333 __napi_schedule(&priv->napi); 334 } 335 } 336 337 if (status & ERR_MASK) { 338 /* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding 339 * 8-bit error counter overrun. 340 */ 341 342 if (status & MSER_MASK) { 343 stats->rx_missed_errors += 0x100; 344 stats->rx_errors += 0x100; 345 } 346 347 if (status & RXCR_MASK) { 348 stats->rx_crc_errors += 0x100; 349 stats->rx_errors += 0x100; 350 } 351 352 if (status & RXFR_MASK) { 353 stats->rx_frame_errors += 0x100; 354 stats->rx_errors += 0x100; 355 } 356 357 if (status & RXFL_MASK) { 358 stats->rx_over_errors += 0x100; 359 stats->rx_errors += 0x100; 360 } 361 } 362 363 return IRQ_HANDLED; 364 } 365 366 #ifdef CONFIG_NET_POLL_CONTROLLER 367 static void arc_emac_poll_controller(struct net_device *dev) 368 { 369 disable_irq(dev->irq); 370 arc_emac_intr(dev->irq, dev); 371 enable_irq(dev->irq); 372 } 373 #endif 374 375 /** 376 * arc_emac_open - Open the network device. 377 * @ndev: Pointer to the network device. 378 * 379 * returns: 0, on success or non-zero error value on failure. 380 * 381 * This function sets the MAC address, requests and enables an IRQ 382 * for the EMAC device and starts the Tx queue. 383 * It also connects to the phy device. 384 */ 385 static int arc_emac_open(struct net_device *ndev) 386 { 387 struct arc_emac_priv *priv = netdev_priv(ndev); 388 struct phy_device *phy_dev = priv->phy_dev; 389 int i; 390 391 phy_dev->autoneg = AUTONEG_ENABLE; 392 phy_dev->speed = 0; 393 phy_dev->duplex = 0; 394 phy_dev->advertising &= phy_dev->supported; 395 396 priv->last_rx_bd = 0; 397 398 /* Allocate and set buffers for Rx BD's */ 399 for (i = 0; i < RX_BD_NUM; i++) { 400 dma_addr_t addr; 401 unsigned int *last_rx_bd = &priv->last_rx_bd; 402 struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; 403 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; 404 405 rx_buff->skb = netdev_alloc_skb_ip_align(ndev, 406 EMAC_BUFFER_SIZE); 407 if (unlikely(!rx_buff->skb)) 408 return -ENOMEM; 409 410 addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, 411 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); 412 if (dma_mapping_error(&ndev->dev, addr)) { 413 netdev_err(ndev, "cannot dma map\n"); 414 dev_kfree_skb(rx_buff->skb); 415 return -ENOMEM; 416 } 417 dma_unmap_addr_set(rx_buff, addr, addr); 418 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); 419 420 rxbd->data = cpu_to_le32(addr); 421 422 /* Make sure pointer to data buffer is set */ 423 wmb(); 424 425 /* Return ownership to EMAC */ 426 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 427 428 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; 429 } 430 431 /* Clean Tx BD's */ 432 memset(priv->txbd, 0, TX_RING_SZ); 433 434 /* Initialize logical address filter */ 435 arc_reg_set(priv, R_LAFL, 0); 436 arc_reg_set(priv, R_LAFH, 0); 437 438 /* Set BD ring pointers for device side */ 439 arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma); 440 arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); 441 442 /* Enable interrupts */ 443 arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); 444 445 /* Set CONTROL */ 446 arc_reg_set(priv, R_CTRL, 447 (RX_BD_NUM << 24) | /* RX BD table length */ 448 (TX_BD_NUM << 16) | /* TX BD table length */ 449 TXRN_MASK | RXRN_MASK); 450 451 napi_enable(&priv->napi); 452 453 /* Enable EMAC */ 454 arc_reg_or(priv, R_CTRL, EN_MASK); 455 456 phy_start_aneg(priv->phy_dev); 457 458 netif_start_queue(ndev); 459 460 return 0; 461 } 462 463 /** 464 * arc_emac_set_rx_mode - Change the receive filtering mode. 465 * @ndev: Pointer to the network device. 466 * 467 * This function enables/disables promiscuous or all-multicast mode 468 * and updates the multicast filtering list of the network device. 469 */ 470 static void arc_emac_set_rx_mode(struct net_device *ndev) 471 { 472 struct arc_emac_priv *priv = netdev_priv(ndev); 473 474 if (ndev->flags & IFF_PROMISC) { 475 arc_reg_or(priv, R_CTRL, PROM_MASK); 476 } else { 477 arc_reg_clr(priv, R_CTRL, PROM_MASK); 478 479 if (ndev->flags & IFF_ALLMULTI) { 480 arc_reg_set(priv, R_LAFL, ~0); 481 arc_reg_set(priv, R_LAFH, ~0); 482 } else { 483 struct netdev_hw_addr *ha; 484 unsigned int filter[2] = { 0, 0 }; 485 int bit; 486 487 netdev_for_each_mc_addr(ha, ndev) { 488 bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26; 489 filter[bit >> 5] |= 1 << (bit & 31); 490 } 491 492 arc_reg_set(priv, R_LAFL, filter[0]); 493 arc_reg_set(priv, R_LAFH, filter[1]); 494 } 495 } 496 } 497 498 /** 499 * arc_emac_stop - Close the network device. 500 * @ndev: Pointer to the network device. 501 * 502 * This function stops the Tx queue, disables interrupts and frees the IRQ for 503 * the EMAC device. 504 * It also disconnects the PHY device associated with the EMAC device. 505 */ 506 static int arc_emac_stop(struct net_device *ndev) 507 { 508 struct arc_emac_priv *priv = netdev_priv(ndev); 509 510 napi_disable(&priv->napi); 511 netif_stop_queue(ndev); 512 513 /* Disable interrupts */ 514 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); 515 516 /* Disable EMAC */ 517 arc_reg_clr(priv, R_CTRL, EN_MASK); 518 519 return 0; 520 } 521 522 /** 523 * arc_emac_stats - Get system network statistics. 524 * @ndev: Pointer to net_device structure. 525 * 526 * Returns the address of the device statistics structure. 527 * Statistics are updated in interrupt handler. 528 */ 529 static struct net_device_stats *arc_emac_stats(struct net_device *ndev) 530 { 531 struct arc_emac_priv *priv = netdev_priv(ndev); 532 struct net_device_stats *stats = &priv->stats; 533 unsigned long miss, rxerr; 534 u8 rxcrc, rxfram, rxoflow; 535 536 rxerr = arc_reg_get(priv, R_RXERR); 537 miss = arc_reg_get(priv, R_MISS); 538 539 rxcrc = rxerr; 540 rxfram = rxerr >> 8; 541 rxoflow = rxerr >> 16; 542 543 stats->rx_errors += miss; 544 stats->rx_errors += rxcrc + rxfram + rxoflow; 545 546 stats->rx_over_errors += rxoflow; 547 stats->rx_frame_errors += rxfram; 548 stats->rx_crc_errors += rxcrc; 549 stats->rx_missed_errors += miss; 550 551 return stats; 552 } 553 554 /** 555 * arc_emac_tx - Starts the data transmission. 556 * @skb: sk_buff pointer that contains data to be Transmitted. 557 * @ndev: Pointer to net_device structure. 558 * 559 * returns: NETDEV_TX_OK, on success 560 * NETDEV_TX_BUSY, if any of the descriptors are not free. 561 * 562 * This function is invoked from upper layers to initiate transmission. 563 */ 564 static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) 565 { 566 struct arc_emac_priv *priv = netdev_priv(ndev); 567 unsigned int len, *txbd_curr = &priv->txbd_curr; 568 struct net_device_stats *stats = &priv->stats; 569 __le32 *info = &priv->txbd[*txbd_curr].info; 570 dma_addr_t addr; 571 572 if (skb_padto(skb, ETH_ZLEN)) 573 return NETDEV_TX_OK; 574 575 len = max_t(unsigned int, ETH_ZLEN, skb->len); 576 577 /* EMAC still holds this buffer in its possession. 578 * CPU must not modify this buffer descriptor 579 */ 580 if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) { 581 netif_stop_queue(ndev); 582 return NETDEV_TX_BUSY; 583 } 584 585 addr = dma_map_single(&ndev->dev, (void *)skb->data, len, 586 DMA_TO_DEVICE); 587 588 if (unlikely(dma_mapping_error(&ndev->dev, addr))) { 589 stats->tx_dropped++; 590 stats->tx_errors++; 591 dev_kfree_skb(skb); 592 return NETDEV_TX_OK; 593 } 594 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); 595 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); 596 597 priv->tx_buff[*txbd_curr].skb = skb; 598 priv->txbd[*txbd_curr].data = cpu_to_le32(addr); 599 600 /* Make sure pointer to data buffer is set */ 601 wmb(); 602 603 skb_tx_timestamp(skb); 604 605 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 606 607 /* Increment index to point to the next BD */ 608 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; 609 610 /* Get "info" of the next BD */ 611 info = &priv->txbd[*txbd_curr].info; 612 613 /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ 614 if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) 615 netif_stop_queue(ndev); 616 617 arc_reg_set(priv, R_STATUS, TXPL_MASK); 618 619 return NETDEV_TX_OK; 620 } 621 622 static void arc_emac_set_address_internal(struct net_device *ndev) 623 { 624 struct arc_emac_priv *priv = netdev_priv(ndev); 625 unsigned int addr_low, addr_hi; 626 627 addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); 628 addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]); 629 630 arc_reg_set(priv, R_ADDRL, addr_low); 631 arc_reg_set(priv, R_ADDRH, addr_hi); 632 } 633 634 /** 635 * arc_emac_set_address - Set the MAC address for this device. 636 * @ndev: Pointer to net_device structure. 637 * @p: 6 byte Address to be written as MAC address. 638 * 639 * This function copies the HW address from the sockaddr structure to the 640 * net_device structure and updates the address in HW. 641 * 642 * returns: -EBUSY if the net device is busy or 0 if the address is set 643 * successfully. 644 */ 645 static int arc_emac_set_address(struct net_device *ndev, void *p) 646 { 647 struct sockaddr *addr = p; 648 649 if (netif_running(ndev)) 650 return -EBUSY; 651 652 if (!is_valid_ether_addr(addr->sa_data)) 653 return -EADDRNOTAVAIL; 654 655 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 656 657 arc_emac_set_address_internal(ndev); 658 659 return 0; 660 } 661 662 static const struct net_device_ops arc_emac_netdev_ops = { 663 .ndo_open = arc_emac_open, 664 .ndo_stop = arc_emac_stop, 665 .ndo_start_xmit = arc_emac_tx, 666 .ndo_set_mac_address = arc_emac_set_address, 667 .ndo_get_stats = arc_emac_stats, 668 .ndo_set_rx_mode = arc_emac_set_rx_mode, 669 #ifdef CONFIG_NET_POLL_CONTROLLER 670 .ndo_poll_controller = arc_emac_poll_controller, 671 #endif 672 }; 673 674 static int arc_emac_probe(struct platform_device *pdev) 675 { 676 struct resource res_regs; 677 struct device_node *phy_node; 678 struct arc_emac_priv *priv; 679 struct net_device *ndev; 680 const char *mac_addr; 681 unsigned int id, clock_frequency, irq; 682 int err; 683 684 if (!pdev->dev.of_node) 685 return -ENODEV; 686 687 /* Get PHY from device tree */ 688 phy_node = of_parse_phandle(pdev->dev.of_node, "phy", 0); 689 if (!phy_node) { 690 dev_err(&pdev->dev, "failed to retrieve phy description from device tree\n"); 691 return -ENODEV; 692 } 693 694 /* Get EMAC registers base address from device tree */ 695 err = of_address_to_resource(pdev->dev.of_node, 0, &res_regs); 696 if (err) { 697 dev_err(&pdev->dev, "failed to retrieve registers base from device tree\n"); 698 return -ENODEV; 699 } 700 701 /* Get IRQ from device tree */ 702 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 703 if (!irq) { 704 dev_err(&pdev->dev, "failed to retrieve <irq> value from device tree\n"); 705 return -ENODEV; 706 } 707 708 ndev = alloc_etherdev(sizeof(struct arc_emac_priv)); 709 if (!ndev) 710 return -ENOMEM; 711 712 platform_set_drvdata(pdev, ndev); 713 SET_NETDEV_DEV(ndev, &pdev->dev); 714 715 ndev->netdev_ops = &arc_emac_netdev_ops; 716 ndev->ethtool_ops = &arc_emac_ethtool_ops; 717 ndev->watchdog_timeo = TX_TIMEOUT; 718 /* FIXME :: no multicast support yet */ 719 ndev->flags &= ~IFF_MULTICAST; 720 721 priv = netdev_priv(ndev); 722 priv->dev = &pdev->dev; 723 priv->ndev = ndev; 724 725 priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs); 726 if (IS_ERR(priv->regs)) { 727 err = PTR_ERR(priv->regs); 728 goto out_netdev; 729 } 730 dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs); 731 732 priv->clk = of_clk_get(pdev->dev.of_node, 0); 733 if (IS_ERR(priv->clk)) { 734 /* Get CPU clock frequency from device tree */ 735 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", 736 &clock_frequency)) { 737 dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n"); 738 err = -EINVAL; 739 goto out_netdev; 740 } 741 } else { 742 err = clk_prepare_enable(priv->clk); 743 if (err) { 744 dev_err(&pdev->dev, "failed to enable clock\n"); 745 goto out_clkget; 746 } 747 748 clock_frequency = clk_get_rate(priv->clk); 749 } 750 751 id = arc_reg_get(priv, R_ID); 752 753 /* Check for EMAC revision 5 or 7, magic number */ 754 if (!(id == 0x0005fd02 || id == 0x0007fd02)) { 755 dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id); 756 err = -ENODEV; 757 goto out_clken; 758 } 759 dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id); 760 761 /* Set poll rate so that it polls every 1 ms */ 762 arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000); 763 764 ndev->irq = irq; 765 dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq); 766 767 /* Register interrupt handler for device */ 768 err = devm_request_irq(&pdev->dev, ndev->irq, arc_emac_intr, 0, 769 ndev->name, ndev); 770 if (err) { 771 dev_err(&pdev->dev, "could not allocate IRQ\n"); 772 goto out_clken; 773 } 774 775 /* Get MAC address from device tree */ 776 mac_addr = of_get_mac_address(pdev->dev.of_node); 777 778 if (mac_addr) 779 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); 780 else 781 eth_hw_addr_random(ndev); 782 783 arc_emac_set_address_internal(ndev); 784 dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); 785 786 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ 787 priv->rxbd = dmam_alloc_coherent(&pdev->dev, RX_RING_SZ + TX_RING_SZ, 788 &priv->rxbd_dma, GFP_KERNEL); 789 790 if (!priv->rxbd) { 791 dev_err(&pdev->dev, "failed to allocate data buffers\n"); 792 err = -ENOMEM; 793 goto out_clken; 794 } 795 796 priv->txbd = priv->rxbd + RX_BD_NUM; 797 798 priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ; 799 dev_dbg(&pdev->dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n", 800 (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma); 801 802 err = arc_mdio_probe(pdev, priv); 803 if (err) { 804 dev_err(&pdev->dev, "failed to probe MII bus\n"); 805 goto out_clken; 806 } 807 808 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, 809 PHY_INTERFACE_MODE_MII); 810 if (!priv->phy_dev) { 811 dev_err(&pdev->dev, "of_phy_connect() failed\n"); 812 err = -ENODEV; 813 goto out_mdio; 814 } 815 816 dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n", 817 priv->phy_dev->drv->name, priv->phy_dev->phy_id); 818 819 netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); 820 821 err = register_netdev(ndev); 822 if (err) { 823 dev_err(&pdev->dev, "failed to register network device\n"); 824 goto out_netif_api; 825 } 826 827 return 0; 828 829 out_netif_api: 830 netif_napi_del(&priv->napi); 831 phy_disconnect(priv->phy_dev); 832 priv->phy_dev = NULL; 833 out_mdio: 834 arc_mdio_remove(priv); 835 out_clken: 836 if (!IS_ERR(priv->clk)) 837 clk_disable_unprepare(priv->clk); 838 out_clkget: 839 if (!IS_ERR(priv->clk)) 840 clk_put(priv->clk); 841 out_netdev: 842 free_netdev(ndev); 843 return err; 844 } 845 846 static int arc_emac_remove(struct platform_device *pdev) 847 { 848 struct net_device *ndev = platform_get_drvdata(pdev); 849 struct arc_emac_priv *priv = netdev_priv(ndev); 850 851 phy_disconnect(priv->phy_dev); 852 priv->phy_dev = NULL; 853 arc_mdio_remove(priv); 854 unregister_netdev(ndev); 855 netif_napi_del(&priv->napi); 856 857 if (!IS_ERR(priv->clk)) { 858 clk_disable_unprepare(priv->clk); 859 clk_put(priv->clk); 860 } 861 862 free_netdev(ndev); 863 864 return 0; 865 } 866 867 static const struct of_device_id arc_emac_dt_ids[] = { 868 { .compatible = "snps,arc-emac" }, 869 { /* Sentinel */ } 870 }; 871 MODULE_DEVICE_TABLE(of, arc_emac_dt_ids); 872 873 static struct platform_driver arc_emac_driver = { 874 .probe = arc_emac_probe, 875 .remove = arc_emac_remove, 876 .driver = { 877 .name = DRV_NAME, 878 .owner = THIS_MODULE, 879 .of_match_table = arc_emac_dt_ids, 880 }, 881 }; 882 883 module_platform_driver(arc_emac_driver); 884 885 MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>"); 886 MODULE_DESCRIPTION("ARC EMAC driver"); 887 MODULE_LICENSE("GPL"); 888