1 /* 2 * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * Driver for the ARC EMAC 10100 (hardware revision 5) 9 * 10 * Contributors: 11 * Amit Bhor 12 * Sameer Dhavale 13 * Vineet Gupta 14 */ 15 16 #include <linux/crc32.h> 17 #include <linux/etherdevice.h> 18 #include <linux/interrupt.h> 19 #include <linux/io.h> 20 #include <linux/module.h> 21 #include <linux/of_address.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_mdio.h> 24 #include <linux/of_net.h> 25 #include <linux/of_platform.h> 26 27 #include "emac.h" 28 29 30 /** 31 * arc_emac_tx_avail - Return the number of available slots in the tx ring. 32 * @priv: Pointer to ARC EMAC private data structure. 33 * 34 * returns: the number of slots available for transmission in tx the ring. 35 */ 36 static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) 37 { 38 return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM; 39 } 40 41 /** 42 * arc_emac_adjust_link - Adjust the PHY link duplex. 43 * @ndev: Pointer to the net_device structure. 44 * 45 * This function is called to change the duplex setting after auto negotiation 46 * is done by the PHY. 47 */ 48 static void arc_emac_adjust_link(struct net_device *ndev) 49 { 50 struct arc_emac_priv *priv = netdev_priv(ndev); 51 struct phy_device *phy_dev = priv->phy_dev; 52 unsigned int reg, state_changed = 0; 53 54 if (priv->link != phy_dev->link) { 55 priv->link = phy_dev->link; 56 state_changed = 1; 57 } 58 59 if (priv->speed != phy_dev->speed) { 60 priv->speed = phy_dev->speed; 61 state_changed = 1; 62 if (priv->set_mac_speed) 63 priv->set_mac_speed(priv, priv->speed); 64 } 65 66 if (priv->duplex != phy_dev->duplex) { 67 reg = arc_reg_get(priv, R_CTRL); 68 69 if (DUPLEX_FULL == phy_dev->duplex) 70 reg |= ENFL_MASK; 71 else 72 reg &= ~ENFL_MASK; 73 74 arc_reg_set(priv, R_CTRL, reg); 75 priv->duplex = phy_dev->duplex; 76 state_changed = 1; 77 } 78 79 if (state_changed) 80 phy_print_status(phy_dev); 81 } 82 83 /** 84 * arc_emac_get_settings - Get PHY settings. 85 * @ndev: Pointer to net_device structure. 86 * @cmd: Pointer to ethtool_cmd structure. 87 * 88 * This implements ethtool command for getting PHY settings. If PHY could 89 * not be found, the function returns -ENODEV. This function calls the 90 * relevant PHY ethtool API to get the PHY settings. 91 * Issue "ethtool ethX" under linux prompt to execute this function. 92 */ 93 static int arc_emac_get_settings(struct net_device *ndev, 94 struct ethtool_cmd *cmd) 95 { 96 struct arc_emac_priv *priv = netdev_priv(ndev); 97 98 return phy_ethtool_gset(priv->phy_dev, cmd); 99 } 100 101 /** 102 * arc_emac_set_settings - Set PHY settings as passed in the argument. 103 * @ndev: Pointer to net_device structure. 104 * @cmd: Pointer to ethtool_cmd structure. 105 * 106 * This implements ethtool command for setting various PHY settings. If PHY 107 * could not be found, the function returns -ENODEV. This function calls the 108 * relevant PHY ethtool API to set the PHY. 109 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this 110 * function. 111 */ 112 static int arc_emac_set_settings(struct net_device *ndev, 113 struct ethtool_cmd *cmd) 114 { 115 struct arc_emac_priv *priv = netdev_priv(ndev); 116 117 if (!capable(CAP_NET_ADMIN)) 118 return -EPERM; 119 120 return phy_ethtool_sset(priv->phy_dev, cmd); 121 } 122 123 /** 124 * arc_emac_get_drvinfo - Get EMAC driver information. 125 * @ndev: Pointer to net_device structure. 126 * @info: Pointer to ethtool_drvinfo structure. 127 * 128 * This implements ethtool command for getting the driver information. 129 * Issue "ethtool -i ethX" under linux prompt to execute this function. 130 */ 131 static void arc_emac_get_drvinfo(struct net_device *ndev, 132 struct ethtool_drvinfo *info) 133 { 134 struct arc_emac_priv *priv = netdev_priv(ndev); 135 136 strlcpy(info->driver, priv->drv_name, sizeof(info->driver)); 137 strlcpy(info->version, priv->drv_version, sizeof(info->version)); 138 } 139 140 static const struct ethtool_ops arc_emac_ethtool_ops = { 141 .get_settings = arc_emac_get_settings, 142 .set_settings = arc_emac_set_settings, 143 .get_drvinfo = arc_emac_get_drvinfo, 144 .get_link = ethtool_op_get_link, 145 }; 146 147 #define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK) 148 149 /** 150 * arc_emac_tx_clean - clears processed by EMAC Tx BDs. 151 * @ndev: Pointer to the network device. 152 */ 153 static void arc_emac_tx_clean(struct net_device *ndev) 154 { 155 struct arc_emac_priv *priv = netdev_priv(ndev); 156 struct net_device_stats *stats = &ndev->stats; 157 unsigned int i; 158 159 for (i = 0; i < TX_BD_NUM; i++) { 160 unsigned int *txbd_dirty = &priv->txbd_dirty; 161 struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty]; 162 struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty]; 163 struct sk_buff *skb = tx_buff->skb; 164 unsigned int info = le32_to_cpu(txbd->info); 165 166 if ((info & FOR_EMAC) || !txbd->data || !skb) 167 break; 168 169 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { 170 stats->tx_errors++; 171 stats->tx_dropped++; 172 173 if (info & DEFR) 174 stats->tx_carrier_errors++; 175 176 if (info & LTCL) 177 stats->collisions++; 178 179 if (info & UFLO) 180 stats->tx_fifo_errors++; 181 } else if (likely(info & FIRST_OR_LAST_MASK)) { 182 stats->tx_packets++; 183 stats->tx_bytes += skb->len; 184 } 185 186 dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), 187 dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); 188 189 /* return the sk_buff to system */ 190 dev_kfree_skb_irq(skb); 191 192 txbd->data = 0; 193 txbd->info = 0; 194 tx_buff->skb = NULL; 195 196 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; 197 } 198 199 /* Ensure that txbd_dirty is visible to tx() before checking 200 * for queue stopped. 201 */ 202 smp_mb(); 203 204 if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv)) 205 netif_wake_queue(ndev); 206 } 207 208 /** 209 * arc_emac_rx - processing of Rx packets. 210 * @ndev: Pointer to the network device. 211 * @budget: How many BDs to process on 1 call. 212 * 213 * returns: Number of processed BDs 214 * 215 * Iterate through Rx BDs and deliver received packages to upper layer. 216 */ 217 static int arc_emac_rx(struct net_device *ndev, int budget) 218 { 219 struct arc_emac_priv *priv = netdev_priv(ndev); 220 unsigned int work_done; 221 222 for (work_done = 0; work_done < budget; work_done++) { 223 unsigned int *last_rx_bd = &priv->last_rx_bd; 224 struct net_device_stats *stats = &ndev->stats; 225 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; 226 struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; 227 unsigned int pktlen, info = le32_to_cpu(rxbd->info); 228 struct sk_buff *skb; 229 dma_addr_t addr; 230 231 if (unlikely((info & OWN_MASK) == FOR_EMAC)) 232 break; 233 234 /* Make a note that we saw a packet at this BD. 235 * So next time, driver starts from this + 1 236 */ 237 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; 238 239 if (unlikely((info & FIRST_OR_LAST_MASK) != 240 FIRST_OR_LAST_MASK)) { 241 /* We pre-allocate buffers of MTU size so incoming 242 * packets won't be split/chained. 243 */ 244 if (net_ratelimit()) 245 netdev_err(ndev, "incomplete packet received\n"); 246 247 /* Return ownership to EMAC */ 248 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 249 stats->rx_errors++; 250 stats->rx_length_errors++; 251 continue; 252 } 253 254 pktlen = info & LEN_MASK; 255 stats->rx_packets++; 256 stats->rx_bytes += pktlen; 257 skb = rx_buff->skb; 258 skb_put(skb, pktlen); 259 skb->dev = ndev; 260 skb->protocol = eth_type_trans(skb, ndev); 261 262 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), 263 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); 264 265 /* Prepare the BD for next cycle */ 266 rx_buff->skb = netdev_alloc_skb_ip_align(ndev, 267 EMAC_BUFFER_SIZE); 268 if (unlikely(!rx_buff->skb)) { 269 stats->rx_errors++; 270 /* Because receive_skb is below, increment rx_dropped */ 271 stats->rx_dropped++; 272 continue; 273 } 274 275 /* receive_skb only if new skb was allocated to avoid holes */ 276 netif_receive_skb(skb); 277 278 addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, 279 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); 280 if (dma_mapping_error(&ndev->dev, addr)) { 281 if (net_ratelimit()) 282 netdev_err(ndev, "cannot dma map\n"); 283 dev_kfree_skb(rx_buff->skb); 284 stats->rx_errors++; 285 continue; 286 } 287 dma_unmap_addr_set(rx_buff, addr, addr); 288 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); 289 290 rxbd->data = cpu_to_le32(addr); 291 292 /* Make sure pointer to data buffer is set */ 293 wmb(); 294 295 /* Return ownership to EMAC */ 296 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 297 } 298 299 return work_done; 300 } 301 302 /** 303 * arc_emac_poll - NAPI poll handler. 304 * @napi: Pointer to napi_struct structure. 305 * @budget: How many BDs to process on 1 call. 306 * 307 * returns: Number of processed BDs 308 */ 309 static int arc_emac_poll(struct napi_struct *napi, int budget) 310 { 311 struct net_device *ndev = napi->dev; 312 struct arc_emac_priv *priv = netdev_priv(ndev); 313 unsigned int work_done; 314 315 arc_emac_tx_clean(ndev); 316 317 work_done = arc_emac_rx(ndev, budget); 318 if (work_done < budget) { 319 napi_complete(napi); 320 arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); 321 } 322 323 return work_done; 324 } 325 326 /** 327 * arc_emac_intr - Global interrupt handler for EMAC. 328 * @irq: irq number. 329 * @dev_instance: device instance. 330 * 331 * returns: IRQ_HANDLED for all cases. 332 * 333 * ARC EMAC has only 1 interrupt line, and depending on bits raised in 334 * STATUS register we may tell what is a reason for interrupt to fire. 335 */ 336 static irqreturn_t arc_emac_intr(int irq, void *dev_instance) 337 { 338 struct net_device *ndev = dev_instance; 339 struct arc_emac_priv *priv = netdev_priv(ndev); 340 struct net_device_stats *stats = &ndev->stats; 341 unsigned int status; 342 343 status = arc_reg_get(priv, R_STATUS); 344 status &= ~MDIO_MASK; 345 346 /* Reset all flags except "MDIO complete" */ 347 arc_reg_set(priv, R_STATUS, status); 348 349 if (status & (RXINT_MASK | TXINT_MASK)) { 350 if (likely(napi_schedule_prep(&priv->napi))) { 351 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); 352 __napi_schedule(&priv->napi); 353 } 354 } 355 356 if (status & ERR_MASK) { 357 /* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding 358 * 8-bit error counter overrun. 359 */ 360 361 if (status & MSER_MASK) { 362 stats->rx_missed_errors += 0x100; 363 stats->rx_errors += 0x100; 364 } 365 366 if (status & RXCR_MASK) { 367 stats->rx_crc_errors += 0x100; 368 stats->rx_errors += 0x100; 369 } 370 371 if (status & RXFR_MASK) { 372 stats->rx_frame_errors += 0x100; 373 stats->rx_errors += 0x100; 374 } 375 376 if (status & RXFL_MASK) { 377 stats->rx_over_errors += 0x100; 378 stats->rx_errors += 0x100; 379 } 380 } 381 382 return IRQ_HANDLED; 383 } 384 385 #ifdef CONFIG_NET_POLL_CONTROLLER 386 static void arc_emac_poll_controller(struct net_device *dev) 387 { 388 disable_irq(dev->irq); 389 arc_emac_intr(dev->irq, dev); 390 enable_irq(dev->irq); 391 } 392 #endif 393 394 /** 395 * arc_emac_open - Open the network device. 396 * @ndev: Pointer to the network device. 397 * 398 * returns: 0, on success or non-zero error value on failure. 399 * 400 * This function sets the MAC address, requests and enables an IRQ 401 * for the EMAC device and starts the Tx queue. 402 * It also connects to the phy device. 403 */ 404 static int arc_emac_open(struct net_device *ndev) 405 { 406 struct arc_emac_priv *priv = netdev_priv(ndev); 407 struct phy_device *phy_dev = priv->phy_dev; 408 int i; 409 410 phy_dev->autoneg = AUTONEG_ENABLE; 411 phy_dev->speed = 0; 412 phy_dev->duplex = 0; 413 phy_dev->advertising &= phy_dev->supported; 414 415 priv->last_rx_bd = 0; 416 417 /* Allocate and set buffers for Rx BD's */ 418 for (i = 0; i < RX_BD_NUM; i++) { 419 dma_addr_t addr; 420 unsigned int *last_rx_bd = &priv->last_rx_bd; 421 struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; 422 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; 423 424 rx_buff->skb = netdev_alloc_skb_ip_align(ndev, 425 EMAC_BUFFER_SIZE); 426 if (unlikely(!rx_buff->skb)) 427 return -ENOMEM; 428 429 addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, 430 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); 431 if (dma_mapping_error(&ndev->dev, addr)) { 432 netdev_err(ndev, "cannot dma map\n"); 433 dev_kfree_skb(rx_buff->skb); 434 return -ENOMEM; 435 } 436 dma_unmap_addr_set(rx_buff, addr, addr); 437 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); 438 439 rxbd->data = cpu_to_le32(addr); 440 441 /* Make sure pointer to data buffer is set */ 442 wmb(); 443 444 /* Return ownership to EMAC */ 445 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); 446 447 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; 448 } 449 450 priv->txbd_curr = 0; 451 priv->txbd_dirty = 0; 452 453 /* Clean Tx BD's */ 454 memset(priv->txbd, 0, TX_RING_SZ); 455 456 /* Initialize logical address filter */ 457 arc_reg_set(priv, R_LAFL, 0); 458 arc_reg_set(priv, R_LAFH, 0); 459 460 /* Set BD ring pointers for device side */ 461 arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma); 462 arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); 463 464 /* Enable interrupts */ 465 arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); 466 467 /* Set CONTROL */ 468 arc_reg_set(priv, R_CTRL, 469 (RX_BD_NUM << 24) | /* RX BD table length */ 470 (TX_BD_NUM << 16) | /* TX BD table length */ 471 TXRN_MASK | RXRN_MASK); 472 473 napi_enable(&priv->napi); 474 475 /* Enable EMAC */ 476 arc_reg_or(priv, R_CTRL, EN_MASK); 477 478 phy_start_aneg(priv->phy_dev); 479 480 netif_start_queue(ndev); 481 482 return 0; 483 } 484 485 /** 486 * arc_emac_set_rx_mode - Change the receive filtering mode. 487 * @ndev: Pointer to the network device. 488 * 489 * This function enables/disables promiscuous or all-multicast mode 490 * and updates the multicast filtering list of the network device. 491 */ 492 static void arc_emac_set_rx_mode(struct net_device *ndev) 493 { 494 struct arc_emac_priv *priv = netdev_priv(ndev); 495 496 if (ndev->flags & IFF_PROMISC) { 497 arc_reg_or(priv, R_CTRL, PROM_MASK); 498 } else { 499 arc_reg_clr(priv, R_CTRL, PROM_MASK); 500 501 if (ndev->flags & IFF_ALLMULTI) { 502 arc_reg_set(priv, R_LAFL, ~0); 503 arc_reg_set(priv, R_LAFH, ~0); 504 } else { 505 struct netdev_hw_addr *ha; 506 unsigned int filter[2] = { 0, 0 }; 507 int bit; 508 509 netdev_for_each_mc_addr(ha, ndev) { 510 bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26; 511 filter[bit >> 5] |= 1 << (bit & 31); 512 } 513 514 arc_reg_set(priv, R_LAFL, filter[0]); 515 arc_reg_set(priv, R_LAFH, filter[1]); 516 } 517 } 518 } 519 520 /** 521 * arc_free_tx_queue - free skb from tx queue 522 * @ndev: Pointer to the network device. 523 * 524 * This function must be called while EMAC disable 525 */ 526 static void arc_free_tx_queue(struct net_device *ndev) 527 { 528 struct arc_emac_priv *priv = netdev_priv(ndev); 529 unsigned int i; 530 531 for (i = 0; i < TX_BD_NUM; i++) { 532 struct arc_emac_bd *txbd = &priv->txbd[i]; 533 struct buffer_state *tx_buff = &priv->tx_buff[i]; 534 535 if (tx_buff->skb) { 536 dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), 537 dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); 538 539 /* return the sk_buff to system */ 540 dev_kfree_skb_irq(tx_buff->skb); 541 } 542 543 txbd->info = 0; 544 txbd->data = 0; 545 tx_buff->skb = NULL; 546 } 547 } 548 549 /** 550 * arc_free_rx_queue - free skb from rx queue 551 * @ndev: Pointer to the network device. 552 * 553 * This function must be called while EMAC disable 554 */ 555 static void arc_free_rx_queue(struct net_device *ndev) 556 { 557 struct arc_emac_priv *priv = netdev_priv(ndev); 558 unsigned int i; 559 560 for (i = 0; i < RX_BD_NUM; i++) { 561 struct arc_emac_bd *rxbd = &priv->rxbd[i]; 562 struct buffer_state *rx_buff = &priv->rx_buff[i]; 563 564 if (rx_buff->skb) { 565 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), 566 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); 567 568 /* return the sk_buff to system */ 569 dev_kfree_skb_irq(rx_buff->skb); 570 } 571 572 rxbd->info = 0; 573 rxbd->data = 0; 574 rx_buff->skb = NULL; 575 } 576 } 577 578 /** 579 * arc_emac_stop - Close the network device. 580 * @ndev: Pointer to the network device. 581 * 582 * This function stops the Tx queue, disables interrupts and frees the IRQ for 583 * the EMAC device. 584 * It also disconnects the PHY device associated with the EMAC device. 585 */ 586 static int arc_emac_stop(struct net_device *ndev) 587 { 588 struct arc_emac_priv *priv = netdev_priv(ndev); 589 590 napi_disable(&priv->napi); 591 netif_stop_queue(ndev); 592 593 /* Disable interrupts */ 594 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); 595 596 /* Disable EMAC */ 597 arc_reg_clr(priv, R_CTRL, EN_MASK); 598 599 /* Return the sk_buff to system */ 600 arc_free_tx_queue(ndev); 601 arc_free_rx_queue(ndev); 602 603 return 0; 604 } 605 606 /** 607 * arc_emac_stats - Get system network statistics. 608 * @ndev: Pointer to net_device structure. 609 * 610 * Returns the address of the device statistics structure. 611 * Statistics are updated in interrupt handler. 612 */ 613 static struct net_device_stats *arc_emac_stats(struct net_device *ndev) 614 { 615 struct arc_emac_priv *priv = netdev_priv(ndev); 616 struct net_device_stats *stats = &ndev->stats; 617 unsigned long miss, rxerr; 618 u8 rxcrc, rxfram, rxoflow; 619 620 rxerr = arc_reg_get(priv, R_RXERR); 621 miss = arc_reg_get(priv, R_MISS); 622 623 rxcrc = rxerr; 624 rxfram = rxerr >> 8; 625 rxoflow = rxerr >> 16; 626 627 stats->rx_errors += miss; 628 stats->rx_errors += rxcrc + rxfram + rxoflow; 629 630 stats->rx_over_errors += rxoflow; 631 stats->rx_frame_errors += rxfram; 632 stats->rx_crc_errors += rxcrc; 633 stats->rx_missed_errors += miss; 634 635 return stats; 636 } 637 638 /** 639 * arc_emac_tx - Starts the data transmission. 640 * @skb: sk_buff pointer that contains data to be Transmitted. 641 * @ndev: Pointer to net_device structure. 642 * 643 * returns: NETDEV_TX_OK, on success 644 * NETDEV_TX_BUSY, if any of the descriptors are not free. 645 * 646 * This function is invoked from upper layers to initiate transmission. 647 */ 648 static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) 649 { 650 struct arc_emac_priv *priv = netdev_priv(ndev); 651 unsigned int len, *txbd_curr = &priv->txbd_curr; 652 struct net_device_stats *stats = &ndev->stats; 653 __le32 *info = &priv->txbd[*txbd_curr].info; 654 dma_addr_t addr; 655 656 if (skb_padto(skb, ETH_ZLEN)) 657 return NETDEV_TX_OK; 658 659 len = max_t(unsigned int, ETH_ZLEN, skb->len); 660 661 if (unlikely(!arc_emac_tx_avail(priv))) { 662 netif_stop_queue(ndev); 663 netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n"); 664 return NETDEV_TX_BUSY; 665 } 666 667 addr = dma_map_single(&ndev->dev, (void *)skb->data, len, 668 DMA_TO_DEVICE); 669 670 if (unlikely(dma_mapping_error(&ndev->dev, addr))) { 671 stats->tx_dropped++; 672 stats->tx_errors++; 673 dev_kfree_skb(skb); 674 return NETDEV_TX_OK; 675 } 676 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); 677 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); 678 679 priv->txbd[*txbd_curr].data = cpu_to_le32(addr); 680 681 /* Make sure pointer to data buffer is set */ 682 wmb(); 683 684 skb_tx_timestamp(skb); 685 686 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 687 688 /* Make sure info word is set */ 689 wmb(); 690 691 priv->tx_buff[*txbd_curr].skb = skb; 692 693 /* Increment index to point to the next BD */ 694 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; 695 696 /* Ensure that tx_clean() sees the new txbd_curr before 697 * checking the queue status. This prevents an unneeded wake 698 * of the queue in tx_clean(). 699 */ 700 smp_mb(); 701 702 if (!arc_emac_tx_avail(priv)) { 703 netif_stop_queue(ndev); 704 /* Refresh tx_dirty */ 705 smp_mb(); 706 if (arc_emac_tx_avail(priv)) 707 netif_start_queue(ndev); 708 } 709 710 arc_reg_set(priv, R_STATUS, TXPL_MASK); 711 712 return NETDEV_TX_OK; 713 } 714 715 static void arc_emac_set_address_internal(struct net_device *ndev) 716 { 717 struct arc_emac_priv *priv = netdev_priv(ndev); 718 unsigned int addr_low, addr_hi; 719 720 addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); 721 addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]); 722 723 arc_reg_set(priv, R_ADDRL, addr_low); 724 arc_reg_set(priv, R_ADDRH, addr_hi); 725 } 726 727 /** 728 * arc_emac_set_address - Set the MAC address for this device. 729 * @ndev: Pointer to net_device structure. 730 * @p: 6 byte Address to be written as MAC address. 731 * 732 * This function copies the HW address from the sockaddr structure to the 733 * net_device structure and updates the address in HW. 734 * 735 * returns: -EBUSY if the net device is busy or 0 if the address is set 736 * successfully. 737 */ 738 static int arc_emac_set_address(struct net_device *ndev, void *p) 739 { 740 struct sockaddr *addr = p; 741 742 if (netif_running(ndev)) 743 return -EBUSY; 744 745 if (!is_valid_ether_addr(addr->sa_data)) 746 return -EADDRNOTAVAIL; 747 748 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 749 750 arc_emac_set_address_internal(ndev); 751 752 return 0; 753 } 754 755 static const struct net_device_ops arc_emac_netdev_ops = { 756 .ndo_open = arc_emac_open, 757 .ndo_stop = arc_emac_stop, 758 .ndo_start_xmit = arc_emac_tx, 759 .ndo_set_mac_address = arc_emac_set_address, 760 .ndo_get_stats = arc_emac_stats, 761 .ndo_set_rx_mode = arc_emac_set_rx_mode, 762 #ifdef CONFIG_NET_POLL_CONTROLLER 763 .ndo_poll_controller = arc_emac_poll_controller, 764 #endif 765 }; 766 767 int arc_emac_probe(struct net_device *ndev, int interface) 768 { 769 struct device *dev = ndev->dev.parent; 770 struct resource res_regs; 771 struct device_node *phy_node; 772 struct arc_emac_priv *priv; 773 const char *mac_addr; 774 unsigned int id, clock_frequency, irq; 775 int err; 776 777 778 /* Get PHY from device tree */ 779 phy_node = of_parse_phandle(dev->of_node, "phy", 0); 780 if (!phy_node) { 781 dev_err(dev, "failed to retrieve phy description from device tree\n"); 782 return -ENODEV; 783 } 784 785 /* Get EMAC registers base address from device tree */ 786 err = of_address_to_resource(dev->of_node, 0, &res_regs); 787 if (err) { 788 dev_err(dev, "failed to retrieve registers base from device tree\n"); 789 return -ENODEV; 790 } 791 792 /* Get IRQ from device tree */ 793 irq = irq_of_parse_and_map(dev->of_node, 0); 794 if (!irq) { 795 dev_err(dev, "failed to retrieve <irq> value from device tree\n"); 796 return -ENODEV; 797 } 798 799 800 ndev->netdev_ops = &arc_emac_netdev_ops; 801 ndev->ethtool_ops = &arc_emac_ethtool_ops; 802 ndev->watchdog_timeo = TX_TIMEOUT; 803 /* FIXME :: no multicast support yet */ 804 ndev->flags &= ~IFF_MULTICAST; 805 806 priv = netdev_priv(ndev); 807 priv->dev = dev; 808 809 priv->regs = devm_ioremap_resource(dev, &res_regs); 810 if (IS_ERR(priv->regs)) { 811 return PTR_ERR(priv->regs); 812 } 813 dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); 814 815 if (priv->clk) { 816 err = clk_prepare_enable(priv->clk); 817 if (err) { 818 dev_err(dev, "failed to enable clock\n"); 819 return err; 820 } 821 822 clock_frequency = clk_get_rate(priv->clk); 823 } else { 824 /* Get CPU clock frequency from device tree */ 825 if (of_property_read_u32(dev->of_node, "clock-frequency", 826 &clock_frequency)) { 827 dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n"); 828 return -EINVAL; 829 } 830 } 831 832 id = arc_reg_get(priv, R_ID); 833 834 /* Check for EMAC revision 5 or 7, magic number */ 835 if (!(id == 0x0005fd02 || id == 0x0007fd02)) { 836 dev_err(dev, "ARC EMAC not detected, id=0x%x\n", id); 837 err = -ENODEV; 838 goto out_clken; 839 } 840 dev_info(dev, "ARC EMAC detected with id: 0x%x\n", id); 841 842 /* Set poll rate so that it polls every 1 ms */ 843 arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000); 844 845 ndev->irq = irq; 846 dev_info(dev, "IRQ is %d\n", ndev->irq); 847 848 /* Register interrupt handler for device */ 849 err = devm_request_irq(dev, ndev->irq, arc_emac_intr, 0, 850 ndev->name, ndev); 851 if (err) { 852 dev_err(dev, "could not allocate IRQ\n"); 853 goto out_clken; 854 } 855 856 /* Get MAC address from device tree */ 857 mac_addr = of_get_mac_address(dev->of_node); 858 859 if (mac_addr) 860 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); 861 else 862 eth_hw_addr_random(ndev); 863 864 arc_emac_set_address_internal(ndev); 865 dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); 866 867 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ 868 priv->rxbd = dmam_alloc_coherent(dev, RX_RING_SZ + TX_RING_SZ, 869 &priv->rxbd_dma, GFP_KERNEL); 870 871 if (!priv->rxbd) { 872 dev_err(dev, "failed to allocate data buffers\n"); 873 err = -ENOMEM; 874 goto out_clken; 875 } 876 877 priv->txbd = priv->rxbd + RX_BD_NUM; 878 879 priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ; 880 dev_dbg(dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n", 881 (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma); 882 883 err = arc_mdio_probe(priv); 884 if (err) { 885 dev_err(dev, "failed to probe MII bus\n"); 886 goto out_clken; 887 } 888 889 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, 890 interface); 891 if (!priv->phy_dev) { 892 dev_err(dev, "of_phy_connect() failed\n"); 893 err = -ENODEV; 894 goto out_mdio; 895 } 896 897 dev_info(dev, "connected to %s phy with id 0x%x\n", 898 priv->phy_dev->drv->name, priv->phy_dev->phy_id); 899 900 netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); 901 902 err = register_netdev(ndev); 903 if (err) { 904 dev_err(dev, "failed to register network device\n"); 905 goto out_netif_api; 906 } 907 908 return 0; 909 910 out_netif_api: 911 netif_napi_del(&priv->napi); 912 phy_disconnect(priv->phy_dev); 913 priv->phy_dev = NULL; 914 out_mdio: 915 arc_mdio_remove(priv); 916 out_clken: 917 if (priv->clk) 918 clk_disable_unprepare(priv->clk); 919 return err; 920 } 921 EXPORT_SYMBOL_GPL(arc_emac_probe); 922 923 int arc_emac_remove(struct net_device *ndev) 924 { 925 struct arc_emac_priv *priv = netdev_priv(ndev); 926 927 phy_disconnect(priv->phy_dev); 928 priv->phy_dev = NULL; 929 arc_mdio_remove(priv); 930 unregister_netdev(ndev); 931 netif_napi_del(&priv->napi); 932 933 if (!IS_ERR(priv->clk)) { 934 clk_disable_unprepare(priv->clk); 935 } 936 937 938 return 0; 939 } 940 EXPORT_SYMBOL_GPL(arc_emac_remove); 941 942 MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>"); 943 MODULE_DESCRIPTION("ARC EMAC driver"); 944 MODULE_LICENSE("GPL"); 945