1 /* 2 * Faraday FTGMAC100 Gigabit Ethernet 3 * 4 * (C) Copyright 2009-2011 Faraday Technology 5 * Po-Yu Chuang <ratbert@faraday-tech.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 */ 21 22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 24 #include <linux/dma-mapping.h> 25 #include <linux/etherdevice.h> 26 #include <linux/ethtool.h> 27 #include <linux/interrupt.h> 28 #include <linux/io.h> 29 #include <linux/module.h> 30 #include <linux/netdevice.h> 31 #include <linux/of.h> 32 #include <linux/phy.h> 33 #include <linux/platform_device.h> 34 #include <linux/property.h> 35 #include <net/ip.h> 36 #include <net/ncsi.h> 37 38 #include "ftgmac100.h" 39 40 #define DRV_NAME "ftgmac100" 41 #define DRV_VERSION "0.7" 42 43 #define RX_QUEUE_ENTRIES 256 /* must be power of 2 */ 44 #define TX_QUEUE_ENTRIES 512 /* must be power of 2 */ 45 46 #define MAX_PKT_SIZE 1536 47 #define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */ 48 49 /* Min number of tx ring entries before stopping queue */ 50 #define TX_THRESHOLD (MAX_SKB_FRAGS + 1) 51 52 struct ftgmac100_descs { 53 struct ftgmac100_rxdes rxdes[RX_QUEUE_ENTRIES]; 54 struct ftgmac100_txdes txdes[TX_QUEUE_ENTRIES]; 55 }; 56 57 struct ftgmac100 { 58 /* Registers */ 59 struct resource *res; 60 void __iomem *base; 61 62 struct ftgmac100_descs *descs; 63 dma_addr_t descs_dma_addr; 64 65 /* Rx ring */ 66 struct sk_buff *rx_skbs[RX_QUEUE_ENTRIES]; 67 unsigned int rx_pointer; 68 u32 rxdes0_edorr_mask; 69 70 /* Tx ring */ 71 struct sk_buff *tx_skbs[TX_QUEUE_ENTRIES]; 72 unsigned int tx_clean_pointer; 73 unsigned int tx_pointer; 74 u32 txdes0_edotr_mask; 75 76 /* Scratch page to use when rx skb alloc fails */ 77 void *rx_scratch; 78 dma_addr_t rx_scratch_dma; 79 80 /* Component structures */ 81 struct net_device *netdev; 82 struct device *dev; 83 struct ncsi_dev *ndev; 84 struct napi_struct napi; 85 struct work_struct reset_task; 86 struct mii_bus *mii_bus; 87 88 /* Link management */ 89 int cur_speed; 90 int cur_duplex; 91 bool use_ncsi; 92 93 /* Misc */ 94 bool need_mac_restart; 95 bool is_aspeed; 96 }; 97 98 static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr) 99 { 100 iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR); 101 } 102 103 static void ftgmac100_set_rx_buffer_size(struct ftgmac100 *priv, 104 unsigned int size) 105 { 106 size = FTGMAC100_RBSR_SIZE(size); 107 iowrite32(size, priv->base + FTGMAC100_OFFSET_RBSR); 108 } 109 110 static void ftgmac100_set_normal_prio_tx_ring_base(struct ftgmac100 *priv, 111 dma_addr_t addr) 112 { 113 iowrite32(addr, priv->base + FTGMAC100_OFFSET_NPTXR_BADR); 114 } 115 116 static void ftgmac100_txdma_normal_prio_start_polling(struct ftgmac100 *priv) 117 { 118 iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD); 119 } 120 121 static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr) 122 { 123 struct net_device *netdev = priv->netdev; 124 int i; 125 126 /* NOTE: reset clears all registers */ 127 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 128 iowrite32(maccr | FTGMAC100_MACCR_SW_RST, 129 priv->base + FTGMAC100_OFFSET_MACCR); 130 for (i = 0; i < 50; i++) { 131 unsigned int maccr; 132 133 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 134 if (!(maccr & FTGMAC100_MACCR_SW_RST)) 135 return 0; 136 137 udelay(1); 138 } 139 140 netdev_err(netdev, "Hardware reset failed\n"); 141 return -EIO; 142 } 143 144 static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv) 145 { 146 u32 maccr = 0; 147 148 switch (priv->cur_speed) { 149 case SPEED_10: 150 case 0: /* no link */ 151 break; 152 153 case SPEED_100: 154 maccr |= FTGMAC100_MACCR_FAST_MODE; 155 break; 156 157 case SPEED_1000: 158 maccr |= FTGMAC100_MACCR_GIGA_MODE; 159 break; 160 default: 161 netdev_err(priv->netdev, "Unknown speed %d !\n", 162 priv->cur_speed); 163 break; 164 } 165 166 /* (Re)initialize the queue pointers */ 167 priv->rx_pointer = 0; 168 priv->tx_clean_pointer = 0; 169 priv->tx_pointer = 0; 170 171 /* The doc says reset twice with 10us interval */ 172 if (ftgmac100_reset_mac(priv, maccr)) 173 return -EIO; 174 usleep_range(10, 1000); 175 return ftgmac100_reset_mac(priv, maccr); 176 } 177 178 static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac) 179 { 180 unsigned int maddr = mac[0] << 8 | mac[1]; 181 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 182 183 iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR); 184 iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR); 185 } 186 187 static void ftgmac100_initial_mac(struct ftgmac100 *priv) 188 { 189 u8 mac[ETH_ALEN]; 190 unsigned int m; 191 unsigned int l; 192 void *addr; 193 194 addr = device_get_mac_address(priv->dev, mac, ETH_ALEN); 195 if (addr) { 196 ether_addr_copy(priv->netdev->dev_addr, mac); 197 dev_info(priv->dev, "Read MAC address %pM from device tree\n", 198 mac); 199 return; 200 } 201 202 m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR); 203 l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR); 204 205 mac[0] = (m >> 8) & 0xff; 206 mac[1] = m & 0xff; 207 mac[2] = (l >> 24) & 0xff; 208 mac[3] = (l >> 16) & 0xff; 209 mac[4] = (l >> 8) & 0xff; 210 mac[5] = l & 0xff; 211 212 if (is_valid_ether_addr(mac)) { 213 ether_addr_copy(priv->netdev->dev_addr, mac); 214 dev_info(priv->dev, "Read MAC address %pM from chip\n", mac); 215 } else { 216 eth_hw_addr_random(priv->netdev); 217 dev_info(priv->dev, "Generated random MAC address %pM\n", 218 priv->netdev->dev_addr); 219 } 220 } 221 222 static int ftgmac100_set_mac_addr(struct net_device *dev, void *p) 223 { 224 int ret; 225 226 ret = eth_prepare_mac_addr_change(dev, p); 227 if (ret < 0) 228 return ret; 229 230 eth_commit_mac_addr_change(dev, p); 231 ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr); 232 233 return 0; 234 } 235 236 static void ftgmac100_init_hw(struct ftgmac100 *priv) 237 { 238 /* setup ring buffer base registers */ 239 ftgmac100_set_rx_ring_base(priv, 240 priv->descs_dma_addr + 241 offsetof(struct ftgmac100_descs, rxdes)); 242 ftgmac100_set_normal_prio_tx_ring_base(priv, 243 priv->descs_dma_addr + 244 offsetof(struct ftgmac100_descs, txdes)); 245 246 ftgmac100_set_rx_buffer_size(priv, RX_BUF_SIZE); 247 248 iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), priv->base + FTGMAC100_OFFSET_APTC); 249 250 ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr); 251 } 252 253 static void ftgmac100_start_hw(struct ftgmac100 *priv) 254 { 255 u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 256 257 /* Keep the original GMAC and FAST bits */ 258 maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE); 259 260 /* Add all the main enable bits */ 261 maccr |= FTGMAC100_MACCR_TXDMA_EN | 262 FTGMAC100_MACCR_RXDMA_EN | 263 FTGMAC100_MACCR_TXMAC_EN | 264 FTGMAC100_MACCR_RXMAC_EN | 265 FTGMAC100_MACCR_CRC_APD | 266 FTGMAC100_MACCR_PHY_LINK_LEVEL | 267 FTGMAC100_MACCR_RX_RUNT | 268 FTGMAC100_MACCR_RX_BROADPKT; 269 270 /* Add other bits as needed */ 271 if (priv->cur_duplex == DUPLEX_FULL) 272 maccr |= FTGMAC100_MACCR_FULLDUP; 273 274 /* Hit the HW */ 275 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 276 } 277 278 static void ftgmac100_stop_hw(struct ftgmac100 *priv) 279 { 280 iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR); 281 } 282 283 static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry, 284 struct ftgmac100_rxdes *rxdes, gfp_t gfp) 285 { 286 struct net_device *netdev = priv->netdev; 287 struct sk_buff *skb; 288 dma_addr_t map; 289 int err; 290 291 skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); 292 if (unlikely(!skb)) { 293 if (net_ratelimit()) 294 netdev_warn(netdev, "failed to allocate rx skb\n"); 295 err = -ENOMEM; 296 map = priv->rx_scratch_dma; 297 } else { 298 map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE, 299 DMA_FROM_DEVICE); 300 if (unlikely(dma_mapping_error(priv->dev, map))) { 301 if (net_ratelimit()) 302 netdev_err(netdev, "failed to map rx page\n"); 303 dev_kfree_skb_any(skb); 304 map = priv->rx_scratch_dma; 305 skb = NULL; 306 err = -ENOMEM; 307 } 308 } 309 310 /* Store skb */ 311 priv->rx_skbs[entry] = skb; 312 313 /* Store DMA address into RX desc */ 314 rxdes->rxdes3 = cpu_to_le32(map); 315 316 /* Ensure the above is ordered vs clearing the OWN bit */ 317 dma_wmb(); 318 319 /* Clean status (which resets own bit) */ 320 if (entry == (RX_QUEUE_ENTRIES - 1)) 321 rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask); 322 else 323 rxdes->rxdes0 = 0; 324 325 return 0; 326 } 327 328 static int ftgmac100_next_rx_pointer(int pointer) 329 { 330 return (pointer + 1) & (RX_QUEUE_ENTRIES - 1); 331 } 332 333 static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status) 334 { 335 struct net_device *netdev = priv->netdev; 336 337 if (status & FTGMAC100_RXDES0_RX_ERR) 338 netdev->stats.rx_errors++; 339 340 if (status & FTGMAC100_RXDES0_CRC_ERR) 341 netdev->stats.rx_crc_errors++; 342 343 if (status & (FTGMAC100_RXDES0_FTL | 344 FTGMAC100_RXDES0_RUNT | 345 FTGMAC100_RXDES0_RX_ODD_NB)) 346 netdev->stats.rx_length_errors++; 347 } 348 349 static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed) 350 { 351 struct net_device *netdev = priv->netdev; 352 struct ftgmac100_rxdes *rxdes; 353 struct sk_buff *skb; 354 unsigned int pointer, size; 355 u32 status, csum_vlan; 356 dma_addr_t map; 357 358 /* Grab next RX descriptor */ 359 pointer = priv->rx_pointer; 360 rxdes = &priv->descs->rxdes[pointer]; 361 362 /* Grab descriptor status */ 363 status = le32_to_cpu(rxdes->rxdes0); 364 365 /* Do we have a packet ? */ 366 if (!(status & FTGMAC100_RXDES0_RXPKT_RDY)) 367 return false; 368 369 /* Order subsequent reads with the test for the ready bit */ 370 dma_rmb(); 371 372 /* We don't cope with fragmented RX packets */ 373 if (unlikely(!(status & FTGMAC100_RXDES0_FRS) || 374 !(status & FTGMAC100_RXDES0_LRS))) 375 goto drop; 376 377 /* Grab received size and csum vlan field in the descriptor */ 378 size = status & FTGMAC100_RXDES0_VDBC; 379 csum_vlan = le32_to_cpu(rxdes->rxdes1); 380 381 /* Any error (other than csum offload) flagged ? */ 382 if (unlikely(status & RXDES0_ANY_ERROR)) { 383 /* Correct for incorrect flagging of runt packets 384 * with vlan tags... Just accept a runt packet that 385 * has been flagged as vlan and whose size is at 386 * least 60 bytes. 387 */ 388 if ((status & FTGMAC100_RXDES0_RUNT) && 389 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) && 390 (size >= 60)) 391 status &= ~FTGMAC100_RXDES0_RUNT; 392 393 /* Any error still in there ? */ 394 if (status & RXDES0_ANY_ERROR) { 395 ftgmac100_rx_packet_error(priv, status); 396 goto drop; 397 } 398 } 399 400 /* If the packet had no skb (failed to allocate earlier) 401 * then try to allocate one and skip 402 */ 403 skb = priv->rx_skbs[pointer]; 404 if (!unlikely(skb)) { 405 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 406 goto drop; 407 } 408 409 if (unlikely(status & FTGMAC100_RXDES0_MULTICAST)) 410 netdev->stats.multicast++; 411 412 /* If the HW found checksum errors, bounce it to software. 413 * 414 * If we didn't, we need to see if the packet was recognized 415 * by HW as one of the supported checksummed protocols before 416 * we accept the HW test results. 417 */ 418 if (netdev->features & NETIF_F_RXCSUM) { 419 u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR | 420 FTGMAC100_RXDES1_UDP_CHKSUM_ERR | 421 FTGMAC100_RXDES1_IP_CHKSUM_ERR; 422 if ((csum_vlan & err_bits) || 423 !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK)) 424 skb->ip_summed = CHECKSUM_NONE; 425 else 426 skb->ip_summed = CHECKSUM_UNNECESSARY; 427 } 428 429 /* Transfer received size to skb */ 430 skb_put(skb, size); 431 432 /* Tear down DMA mapping, do necessary cache management */ 433 map = le32_to_cpu(rxdes->rxdes3); 434 435 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU) 436 /* When we don't have an iommu, we can save cycles by not 437 * invalidating the cache for the part of the packet that 438 * wasn't received. 439 */ 440 dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE); 441 #else 442 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 443 #endif 444 445 446 /* Resplenish rx ring */ 447 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 448 priv->rx_pointer = ftgmac100_next_rx_pointer(pointer); 449 450 skb->protocol = eth_type_trans(skb, netdev); 451 452 netdev->stats.rx_packets++; 453 netdev->stats.rx_bytes += size; 454 455 /* push packet to protocol stack */ 456 if (skb->ip_summed == CHECKSUM_NONE) 457 netif_receive_skb(skb); 458 else 459 napi_gro_receive(&priv->napi, skb); 460 461 (*processed)++; 462 return true; 463 464 drop: 465 /* Clean rxdes0 (which resets own bit) */ 466 rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask); 467 priv->rx_pointer = ftgmac100_next_rx_pointer(pointer); 468 netdev->stats.rx_dropped++; 469 return true; 470 } 471 472 static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv, 473 unsigned int index) 474 { 475 if (index == (TX_QUEUE_ENTRIES - 1)) 476 return priv->txdes0_edotr_mask; 477 else 478 return 0; 479 } 480 481 static int ftgmac100_next_tx_pointer(int pointer) 482 { 483 return (pointer + 1) & (TX_QUEUE_ENTRIES - 1); 484 } 485 486 static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv) 487 { 488 /* Returns the number of available slots in the TX queue 489 * 490 * This always leaves one free slot so we don't have to 491 * worry about empty vs. full, and this simplifies the 492 * test for ftgmac100_tx_buf_cleanable() below 493 */ 494 return (priv->tx_clean_pointer - priv->tx_pointer - 1) & 495 (TX_QUEUE_ENTRIES - 1); 496 } 497 498 static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv) 499 { 500 return priv->tx_pointer != priv->tx_clean_pointer; 501 } 502 503 static void ftgmac100_free_tx_packet(struct ftgmac100 *priv, 504 unsigned int pointer, 505 struct sk_buff *skb, 506 struct ftgmac100_txdes *txdes, 507 u32 ctl_stat) 508 { 509 dma_addr_t map = le32_to_cpu(txdes->txdes3); 510 size_t len; 511 512 if (ctl_stat & FTGMAC100_TXDES0_FTS) { 513 len = skb_headlen(skb); 514 dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE); 515 } else { 516 len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat); 517 dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE); 518 } 519 520 /* Free SKB on last segment */ 521 if (ctl_stat & FTGMAC100_TXDES0_LTS) 522 dev_kfree_skb(skb); 523 priv->tx_skbs[pointer] = NULL; 524 } 525 526 static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv) 527 { 528 struct net_device *netdev = priv->netdev; 529 struct ftgmac100_txdes *txdes; 530 struct sk_buff *skb; 531 unsigned int pointer; 532 u32 ctl_stat; 533 534 pointer = priv->tx_clean_pointer; 535 txdes = &priv->descs->txdes[pointer]; 536 537 ctl_stat = le32_to_cpu(txdes->txdes0); 538 if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN) 539 return false; 540 541 skb = priv->tx_skbs[pointer]; 542 netdev->stats.tx_packets++; 543 netdev->stats.tx_bytes += skb->len; 544 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 545 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 546 547 priv->tx_clean_pointer = ftgmac100_next_tx_pointer(pointer); 548 549 return true; 550 } 551 552 static void ftgmac100_tx_complete(struct ftgmac100 *priv) 553 { 554 struct net_device *netdev = priv->netdev; 555 556 /* Process all completed packets */ 557 while (ftgmac100_tx_buf_cleanable(priv) && 558 ftgmac100_tx_complete_packet(priv)) 559 ; 560 561 /* Restart queue if needed */ 562 smp_mb(); 563 if (unlikely(netif_queue_stopped(netdev) && 564 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) { 565 struct netdev_queue *txq; 566 567 txq = netdev_get_tx_queue(netdev, 0); 568 __netif_tx_lock(txq, smp_processor_id()); 569 if (netif_queue_stopped(netdev) && 570 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 571 netif_wake_queue(netdev); 572 __netif_tx_unlock(txq); 573 } 574 } 575 576 static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan) 577 { 578 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 579 u8 ip_proto = ip_hdr(skb)->protocol; 580 581 *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM; 582 switch(ip_proto) { 583 case IPPROTO_TCP: 584 *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM; 585 return true; 586 case IPPROTO_UDP: 587 *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM; 588 return true; 589 case IPPROTO_IP: 590 return true; 591 } 592 } 593 return skb_checksum_help(skb) == 0; 594 } 595 596 static int ftgmac100_hard_start_xmit(struct sk_buff *skb, 597 struct net_device *netdev) 598 { 599 struct ftgmac100 *priv = netdev_priv(netdev); 600 struct ftgmac100_txdes *txdes, *first; 601 unsigned int pointer, nfrags, len, i, j; 602 u32 f_ctl_stat, ctl_stat, csum_vlan; 603 dma_addr_t map; 604 605 /* The HW doesn't pad small frames */ 606 if (eth_skb_pad(skb)) { 607 netdev->stats.tx_dropped++; 608 return NETDEV_TX_OK; 609 } 610 611 /* Reject oversize packets */ 612 if (unlikely(skb->len > MAX_PKT_SIZE)) { 613 if (net_ratelimit()) 614 netdev_dbg(netdev, "tx packet too big\n"); 615 goto drop; 616 } 617 618 /* Do we have a limit on #fragments ? I yet have to get a reply 619 * from Aspeed. If there's one I haven't hit it. 620 */ 621 nfrags = skb_shinfo(skb)->nr_frags; 622 623 /* Get header len */ 624 len = skb_headlen(skb); 625 626 /* Map the packet head */ 627 map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); 628 if (dma_mapping_error(priv->dev, map)) { 629 if (net_ratelimit()) 630 netdev_err(netdev, "map tx packet head failed\n"); 631 goto drop; 632 } 633 634 /* Grab the next free tx descriptor */ 635 pointer = priv->tx_pointer; 636 txdes = first = &priv->descs->txdes[pointer]; 637 638 /* Setup it up with the packet head. Don't write the head to the 639 * ring just yet 640 */ 641 priv->tx_skbs[pointer] = skb; 642 f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 643 f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 644 f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 645 f_ctl_stat |= FTGMAC100_TXDES0_FTS; 646 if (nfrags == 0) 647 f_ctl_stat |= FTGMAC100_TXDES0_LTS; 648 txdes->txdes3 = cpu_to_le32(map); 649 650 /* Setup HW checksumming */ 651 csum_vlan = 0; 652 if (skb->ip_summed == CHECKSUM_PARTIAL && 653 !ftgmac100_prep_tx_csum(skb, &csum_vlan)) 654 goto drop; 655 txdes->txdes1 = cpu_to_le32(csum_vlan); 656 657 /* Next descriptor */ 658 pointer = ftgmac100_next_tx_pointer(pointer); 659 660 /* Add the fragments */ 661 for (i = 0; i < nfrags; i++) { 662 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 663 664 len = frag->size; 665 666 /* Map it */ 667 map = skb_frag_dma_map(priv->dev, frag, 0, len, 668 DMA_TO_DEVICE); 669 if (dma_mapping_error(priv->dev, map)) 670 goto dma_err; 671 672 /* Setup descriptor */ 673 priv->tx_skbs[pointer] = skb; 674 txdes = &priv->descs->txdes[pointer]; 675 ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 676 ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 677 ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 678 if (i == (nfrags - 1)) 679 ctl_stat |= FTGMAC100_TXDES0_LTS; 680 txdes->txdes0 = cpu_to_le32(ctl_stat); 681 txdes->txdes1 = 0; 682 txdes->txdes3 = cpu_to_le32(map); 683 684 /* Next one */ 685 pointer = ftgmac100_next_tx_pointer(pointer); 686 } 687 688 /* Order the previous packet and descriptor udpates 689 * before setting the OWN bit on the first descriptor. 690 */ 691 dma_wmb(); 692 first->txdes0 = cpu_to_le32(f_ctl_stat); 693 694 /* Update next TX pointer */ 695 priv->tx_pointer = pointer; 696 697 /* If there isn't enough room for all the fragments of a new packet 698 * in the TX ring, stop the queue. The sequence below is race free 699 * vs. a concurrent restart in ftgmac100_poll() 700 */ 701 if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) { 702 netif_stop_queue(netdev); 703 /* Order the queue stop with the test below */ 704 smp_mb(); 705 if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 706 netif_wake_queue(netdev); 707 } 708 709 ftgmac100_txdma_normal_prio_start_polling(priv); 710 711 return NETDEV_TX_OK; 712 713 dma_err: 714 if (net_ratelimit()) 715 netdev_err(netdev, "map tx fragment failed\n"); 716 717 /* Free head */ 718 pointer = priv->tx_pointer; 719 ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat); 720 first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask); 721 722 /* Then all fragments */ 723 for (j = 0; j < i; j++) { 724 pointer = ftgmac100_next_tx_pointer(pointer); 725 txdes = &priv->descs->txdes[pointer]; 726 ctl_stat = le32_to_cpu(txdes->txdes0); 727 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 728 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 729 } 730 731 /* This cannot be reached if we successfully mapped the 732 * last fragment, so we know ftgmac100_free_tx_packet() 733 * hasn't freed the skb yet. 734 */ 735 drop: 736 /* Drop the packet */ 737 dev_kfree_skb_any(skb); 738 netdev->stats.tx_dropped++; 739 740 return NETDEV_TX_OK; 741 } 742 743 static void ftgmac100_free_buffers(struct ftgmac100 *priv) 744 { 745 int i; 746 747 /* Free all RX buffers */ 748 for (i = 0; i < RX_QUEUE_ENTRIES; i++) { 749 struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i]; 750 struct sk_buff *skb = priv->rx_skbs[i]; 751 dma_addr_t map = le32_to_cpu(rxdes->rxdes3); 752 753 if (!skb) 754 continue; 755 756 priv->rx_skbs[i] = NULL; 757 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 758 dev_kfree_skb_any(skb); 759 } 760 761 /* Free all TX buffers */ 762 for (i = 0; i < TX_QUEUE_ENTRIES; i++) { 763 struct ftgmac100_txdes *txdes = &priv->descs->txdes[i]; 764 struct sk_buff *skb = priv->tx_skbs[i]; 765 766 if (!skb) 767 continue; 768 ftgmac100_free_tx_packet(priv, i, skb, txdes, 769 le32_to_cpu(txdes->txdes0)); 770 } 771 } 772 773 static void ftgmac100_free_rings(struct ftgmac100 *priv) 774 { 775 /* Free descriptors */ 776 if (priv->descs) 777 dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs), 778 priv->descs, priv->descs_dma_addr); 779 780 /* Free scratch packet buffer */ 781 if (priv->rx_scratch) 782 dma_free_coherent(priv->dev, RX_BUF_SIZE, 783 priv->rx_scratch, priv->rx_scratch_dma); 784 } 785 786 static int ftgmac100_alloc_rings(struct ftgmac100 *priv) 787 { 788 /* Allocate descriptors */ 789 priv->descs = dma_zalloc_coherent(priv->dev, 790 sizeof(struct ftgmac100_descs), 791 &priv->descs_dma_addr, GFP_KERNEL); 792 if (!priv->descs) 793 return -ENOMEM; 794 795 /* Allocate scratch packet buffer */ 796 priv->rx_scratch = dma_alloc_coherent(priv->dev, 797 RX_BUF_SIZE, 798 &priv->rx_scratch_dma, 799 GFP_KERNEL); 800 if (!priv->rx_scratch) 801 return -ENOMEM; 802 803 return 0; 804 } 805 806 static void ftgmac100_init_rings(struct ftgmac100 *priv) 807 { 808 struct ftgmac100_rxdes *rxdes; 809 struct ftgmac100_txdes *txdes; 810 int i; 811 812 /* Initialize RX ring */ 813 for (i = 0; i < RX_QUEUE_ENTRIES; i++) { 814 rxdes = &priv->descs->rxdes[i]; 815 rxdes->rxdes0 = 0; 816 rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma); 817 } 818 /* Mark the end of the ring */ 819 rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask); 820 821 /* Initialize TX ring */ 822 for (i = 0; i < TX_QUEUE_ENTRIES; i++) { 823 txdes = &priv->descs->txdes[i]; 824 txdes->txdes0 = 0; 825 } 826 txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask); 827 } 828 829 static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv) 830 { 831 int i; 832 833 for (i = 0; i < RX_QUEUE_ENTRIES; i++) { 834 struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i]; 835 836 if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL)) 837 return -ENOMEM; 838 } 839 return 0; 840 } 841 842 static void ftgmac100_adjust_link(struct net_device *netdev) 843 { 844 struct ftgmac100 *priv = netdev_priv(netdev); 845 struct phy_device *phydev = netdev->phydev; 846 int new_speed; 847 848 /* We store "no link" as speed 0 */ 849 if (!phydev->link) 850 new_speed = 0; 851 else 852 new_speed = phydev->speed; 853 854 if (phydev->speed == priv->cur_speed && 855 phydev->duplex == priv->cur_duplex) 856 return; 857 858 /* Print status if we have a link or we had one and just lost it, 859 * don't print otherwise. 860 */ 861 if (new_speed || priv->cur_speed) 862 phy_print_status(phydev); 863 864 priv->cur_speed = new_speed; 865 priv->cur_duplex = phydev->duplex; 866 867 /* Link is down, do nothing else */ 868 if (!new_speed) 869 return; 870 871 /* Disable all interrupts */ 872 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 873 874 /* Reset the adapter asynchronously */ 875 schedule_work(&priv->reset_task); 876 } 877 878 static int ftgmac100_mii_probe(struct ftgmac100 *priv) 879 { 880 struct net_device *netdev = priv->netdev; 881 struct phy_device *phydev; 882 883 phydev = phy_find_first(priv->mii_bus); 884 if (!phydev) { 885 netdev_info(netdev, "%s: no PHY found\n", netdev->name); 886 return -ENODEV; 887 } 888 889 phydev = phy_connect(netdev, phydev_name(phydev), 890 &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII); 891 892 if (IS_ERR(phydev)) { 893 netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); 894 return PTR_ERR(phydev); 895 } 896 897 return 0; 898 } 899 900 static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) 901 { 902 struct net_device *netdev = bus->priv; 903 struct ftgmac100 *priv = netdev_priv(netdev); 904 unsigned int phycr; 905 int i; 906 907 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 908 909 /* preserve MDC cycle threshold */ 910 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 911 912 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 913 FTGMAC100_PHYCR_REGAD(regnum) | 914 FTGMAC100_PHYCR_MIIRD; 915 916 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 917 918 for (i = 0; i < 10; i++) { 919 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 920 921 if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) { 922 int data; 923 924 data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA); 925 return FTGMAC100_PHYDATA_MIIRDATA(data); 926 } 927 928 udelay(100); 929 } 930 931 netdev_err(netdev, "mdio read timed out\n"); 932 return -EIO; 933 } 934 935 static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr, 936 int regnum, u16 value) 937 { 938 struct net_device *netdev = bus->priv; 939 struct ftgmac100 *priv = netdev_priv(netdev); 940 unsigned int phycr; 941 int data; 942 int i; 943 944 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 945 946 /* preserve MDC cycle threshold */ 947 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 948 949 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 950 FTGMAC100_PHYCR_REGAD(regnum) | 951 FTGMAC100_PHYCR_MIIWR; 952 953 data = FTGMAC100_PHYDATA_MIIWDATA(value); 954 955 iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA); 956 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 957 958 for (i = 0; i < 10; i++) { 959 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 960 961 if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0) 962 return 0; 963 964 udelay(100); 965 } 966 967 netdev_err(netdev, "mdio write timed out\n"); 968 return -EIO; 969 } 970 971 static void ftgmac100_get_drvinfo(struct net_device *netdev, 972 struct ethtool_drvinfo *info) 973 { 974 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 975 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 976 strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); 977 } 978 979 static const struct ethtool_ops ftgmac100_ethtool_ops = { 980 .get_drvinfo = ftgmac100_get_drvinfo, 981 .get_link = ethtool_op_get_link, 982 .get_link_ksettings = phy_ethtool_get_link_ksettings, 983 .set_link_ksettings = phy_ethtool_set_link_ksettings, 984 }; 985 986 static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id) 987 { 988 struct net_device *netdev = dev_id; 989 struct ftgmac100 *priv = netdev_priv(netdev); 990 unsigned int status, new_mask = FTGMAC100_INT_BAD; 991 992 /* Fetch and clear interrupt bits, process abnormal ones */ 993 status = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 994 iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR); 995 if (unlikely(status & FTGMAC100_INT_BAD)) { 996 997 /* RX buffer unavailable */ 998 if (status & FTGMAC100_INT_NO_RXBUF) 999 netdev->stats.rx_over_errors++; 1000 1001 /* received packet lost due to RX FIFO full */ 1002 if (status & FTGMAC100_INT_RPKT_LOST) 1003 netdev->stats.rx_fifo_errors++; 1004 1005 /* sent packet lost due to excessive TX collision */ 1006 if (status & FTGMAC100_INT_XPKT_LOST) 1007 netdev->stats.tx_fifo_errors++; 1008 1009 /* AHB error -> Reset the chip */ 1010 if (status & FTGMAC100_INT_AHB_ERR) { 1011 if (net_ratelimit()) 1012 netdev_warn(netdev, 1013 "AHB bus error ! Resetting chip.\n"); 1014 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1015 schedule_work(&priv->reset_task); 1016 return IRQ_HANDLED; 1017 } 1018 1019 /* We may need to restart the MAC after such errors, delay 1020 * this until after we have freed some Rx buffers though 1021 */ 1022 priv->need_mac_restart = true; 1023 1024 /* Disable those errors until we restart */ 1025 new_mask &= ~status; 1026 } 1027 1028 /* Only enable "bad" interrupts while NAPI is on */ 1029 iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER); 1030 1031 /* Schedule NAPI bh */ 1032 napi_schedule_irqoff(&priv->napi); 1033 1034 return IRQ_HANDLED; 1035 } 1036 1037 static bool ftgmac100_check_rx(struct ftgmac100 *priv) 1038 { 1039 struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[priv->rx_pointer]; 1040 1041 /* Do we have a packet ? */ 1042 return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY)); 1043 } 1044 1045 static int ftgmac100_poll(struct napi_struct *napi, int budget) 1046 { 1047 struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi); 1048 int work_done = 0; 1049 bool more; 1050 1051 /* Handle TX completions */ 1052 if (ftgmac100_tx_buf_cleanable(priv)) 1053 ftgmac100_tx_complete(priv); 1054 1055 /* Handle RX packets */ 1056 do { 1057 more = ftgmac100_rx_packet(priv, &work_done); 1058 } while (more && work_done < budget); 1059 1060 1061 /* The interrupt is telling us to kick the MAC back to life 1062 * after an RX overflow 1063 */ 1064 if (unlikely(priv->need_mac_restart)) { 1065 ftgmac100_start_hw(priv); 1066 1067 /* Re-enable "bad" interrupts */ 1068 iowrite32(FTGMAC100_INT_BAD, 1069 priv->base + FTGMAC100_OFFSET_IER); 1070 } 1071 1072 /* As long as we are waiting for transmit packets to be 1073 * completed we keep NAPI going 1074 */ 1075 if (ftgmac100_tx_buf_cleanable(priv)) 1076 work_done = budget; 1077 1078 if (work_done < budget) { 1079 /* We are about to re-enable all interrupts. However 1080 * the HW has been latching RX/TX packet interrupts while 1081 * they were masked. So we clear them first, then we need 1082 * to re-check if there's something to process 1083 */ 1084 iowrite32(FTGMAC100_INT_RXTX, 1085 priv->base + FTGMAC100_OFFSET_ISR); 1086 if (ftgmac100_check_rx(priv) || 1087 ftgmac100_tx_buf_cleanable(priv)) 1088 return budget; 1089 1090 /* deschedule NAPI */ 1091 napi_complete(napi); 1092 1093 /* enable all interrupts */ 1094 iowrite32(FTGMAC100_INT_ALL, 1095 priv->base + FTGMAC100_OFFSET_IER); 1096 } 1097 1098 return work_done; 1099 } 1100 1101 static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err) 1102 { 1103 int err = 0; 1104 1105 /* Re-init descriptors (adjust queue sizes) */ 1106 ftgmac100_init_rings(priv); 1107 1108 /* Realloc rx descriptors */ 1109 err = ftgmac100_alloc_rx_buffers(priv); 1110 if (err && !ignore_alloc_err) 1111 return err; 1112 1113 /* Reinit and restart HW */ 1114 ftgmac100_init_hw(priv); 1115 ftgmac100_start_hw(priv); 1116 1117 /* Re-enable the device */ 1118 napi_enable(&priv->napi); 1119 netif_start_queue(priv->netdev); 1120 1121 /* Enable all interrupts */ 1122 iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER); 1123 1124 return err; 1125 } 1126 1127 static void ftgmac100_reset_task(struct work_struct *work) 1128 { 1129 struct ftgmac100 *priv = container_of(work, struct ftgmac100, 1130 reset_task); 1131 struct net_device *netdev = priv->netdev; 1132 int err; 1133 1134 netdev_dbg(netdev, "Resetting NIC...\n"); 1135 1136 /* Lock the world */ 1137 rtnl_lock(); 1138 if (netdev->phydev) 1139 mutex_lock(&netdev->phydev->lock); 1140 if (priv->mii_bus) 1141 mutex_lock(&priv->mii_bus->mdio_lock); 1142 1143 1144 /* Check if the interface is still up */ 1145 if (!netif_running(netdev)) 1146 goto bail; 1147 1148 /* Stop the network stack */ 1149 netif_trans_update(netdev); 1150 napi_disable(&priv->napi); 1151 netif_tx_disable(netdev); 1152 1153 /* Stop and reset the MAC */ 1154 ftgmac100_stop_hw(priv); 1155 err = ftgmac100_reset_and_config_mac(priv); 1156 if (err) { 1157 /* Not much we can do ... it might come back... */ 1158 netdev_err(netdev, "attempting to continue...\n"); 1159 } 1160 1161 /* Free all rx and tx buffers */ 1162 ftgmac100_free_buffers(priv); 1163 1164 /* Setup everything again and restart chip */ 1165 ftgmac100_init_all(priv, true); 1166 1167 netdev_dbg(netdev, "Reset done !\n"); 1168 bail: 1169 if (priv->mii_bus) 1170 mutex_unlock(&priv->mii_bus->mdio_lock); 1171 if (netdev->phydev) 1172 mutex_unlock(&netdev->phydev->lock); 1173 rtnl_unlock(); 1174 } 1175 1176 static int ftgmac100_open(struct net_device *netdev) 1177 { 1178 struct ftgmac100 *priv = netdev_priv(netdev); 1179 int err; 1180 1181 /* Allocate ring buffers */ 1182 err = ftgmac100_alloc_rings(priv); 1183 if (err) { 1184 netdev_err(netdev, "Failed to allocate descriptors\n"); 1185 return err; 1186 } 1187 1188 /* When using NC-SI we force the speed to 100Mbit/s full duplex, 1189 * 1190 * Otherwise we leave it set to 0 (no link), the link 1191 * message from the PHY layer will handle setting it up to 1192 * something else if needed. 1193 */ 1194 if (priv->use_ncsi) { 1195 priv->cur_duplex = DUPLEX_FULL; 1196 priv->cur_speed = SPEED_100; 1197 } else { 1198 priv->cur_duplex = 0; 1199 priv->cur_speed = 0; 1200 } 1201 1202 /* Reset the hardware */ 1203 err = ftgmac100_reset_and_config_mac(priv); 1204 if (err) 1205 goto err_hw; 1206 1207 /* Initialize NAPI */ 1208 netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64); 1209 1210 /* Grab our interrupt */ 1211 err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev); 1212 if (err) { 1213 netdev_err(netdev, "failed to request irq %d\n", netdev->irq); 1214 goto err_irq; 1215 } 1216 1217 /* Start things up */ 1218 err = ftgmac100_init_all(priv, false); 1219 if (err) { 1220 netdev_err(netdev, "Failed to allocate packet buffers\n"); 1221 goto err_alloc; 1222 } 1223 1224 if (netdev->phydev) { 1225 /* If we have a PHY, start polling */ 1226 phy_start(netdev->phydev); 1227 } else if (priv->use_ncsi) { 1228 /* If using NC-SI, set our carrier on and start the stack */ 1229 netif_carrier_on(netdev); 1230 1231 /* Start the NCSI device */ 1232 err = ncsi_start_dev(priv->ndev); 1233 if (err) 1234 goto err_ncsi; 1235 } 1236 1237 return 0; 1238 1239 err_ncsi: 1240 napi_disable(&priv->napi); 1241 netif_stop_queue(netdev); 1242 err_alloc: 1243 ftgmac100_free_buffers(priv); 1244 free_irq(netdev->irq, netdev); 1245 err_irq: 1246 netif_napi_del(&priv->napi); 1247 err_hw: 1248 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1249 ftgmac100_free_rings(priv); 1250 return err; 1251 } 1252 1253 static int ftgmac100_stop(struct net_device *netdev) 1254 { 1255 struct ftgmac100 *priv = netdev_priv(netdev); 1256 1257 /* Note about the reset task: We are called with the rtnl lock 1258 * held, so we are synchronized against the core of the reset 1259 * task. We must not try to synchronously cancel it otherwise 1260 * we can deadlock. But since it will test for netif_running() 1261 * which has already been cleared by the net core, we don't 1262 * anything special to do. 1263 */ 1264 1265 /* disable all interrupts */ 1266 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1267 1268 netif_stop_queue(netdev); 1269 napi_disable(&priv->napi); 1270 netif_napi_del(&priv->napi); 1271 if (netdev->phydev) 1272 phy_stop(netdev->phydev); 1273 else if (priv->use_ncsi) 1274 ncsi_stop_dev(priv->ndev); 1275 1276 ftgmac100_stop_hw(priv); 1277 free_irq(netdev->irq, netdev); 1278 ftgmac100_free_buffers(priv); 1279 ftgmac100_free_rings(priv); 1280 1281 return 0; 1282 } 1283 1284 /* optional */ 1285 static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1286 { 1287 if (!netdev->phydev) 1288 return -ENXIO; 1289 1290 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 1291 } 1292 1293 static void ftgmac100_tx_timeout(struct net_device *netdev) 1294 { 1295 struct ftgmac100 *priv = netdev_priv(netdev); 1296 1297 /* Disable all interrupts */ 1298 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1299 1300 /* Do the reset outside of interrupt context */ 1301 schedule_work(&priv->reset_task); 1302 } 1303 1304 static const struct net_device_ops ftgmac100_netdev_ops = { 1305 .ndo_open = ftgmac100_open, 1306 .ndo_stop = ftgmac100_stop, 1307 .ndo_start_xmit = ftgmac100_hard_start_xmit, 1308 .ndo_set_mac_address = ftgmac100_set_mac_addr, 1309 .ndo_validate_addr = eth_validate_addr, 1310 .ndo_do_ioctl = ftgmac100_do_ioctl, 1311 .ndo_tx_timeout = ftgmac100_tx_timeout, 1312 }; 1313 1314 static int ftgmac100_setup_mdio(struct net_device *netdev) 1315 { 1316 struct ftgmac100 *priv = netdev_priv(netdev); 1317 struct platform_device *pdev = to_platform_device(priv->dev); 1318 int i, err = 0; 1319 u32 reg; 1320 1321 /* initialize mdio bus */ 1322 priv->mii_bus = mdiobus_alloc(); 1323 if (!priv->mii_bus) 1324 return -EIO; 1325 1326 if (priv->is_aspeed) { 1327 /* This driver supports the old MDIO interface */ 1328 reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR); 1329 reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE; 1330 iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR); 1331 }; 1332 1333 priv->mii_bus->name = "ftgmac100_mdio"; 1334 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", 1335 pdev->name, pdev->id); 1336 priv->mii_bus->priv = priv->netdev; 1337 priv->mii_bus->read = ftgmac100_mdiobus_read; 1338 priv->mii_bus->write = ftgmac100_mdiobus_write; 1339 1340 for (i = 0; i < PHY_MAX_ADDR; i++) 1341 priv->mii_bus->irq[i] = PHY_POLL; 1342 1343 err = mdiobus_register(priv->mii_bus); 1344 if (err) { 1345 dev_err(priv->dev, "Cannot register MDIO bus!\n"); 1346 goto err_register_mdiobus; 1347 } 1348 1349 err = ftgmac100_mii_probe(priv); 1350 if (err) { 1351 dev_err(priv->dev, "MII Probe failed!\n"); 1352 goto err_mii_probe; 1353 } 1354 1355 return 0; 1356 1357 err_mii_probe: 1358 mdiobus_unregister(priv->mii_bus); 1359 err_register_mdiobus: 1360 mdiobus_free(priv->mii_bus); 1361 return err; 1362 } 1363 1364 static void ftgmac100_destroy_mdio(struct net_device *netdev) 1365 { 1366 struct ftgmac100 *priv = netdev_priv(netdev); 1367 1368 if (!netdev->phydev) 1369 return; 1370 1371 phy_disconnect(netdev->phydev); 1372 mdiobus_unregister(priv->mii_bus); 1373 mdiobus_free(priv->mii_bus); 1374 } 1375 1376 static void ftgmac100_ncsi_handler(struct ncsi_dev *nd) 1377 { 1378 if (unlikely(nd->state != ncsi_dev_state_functional)) 1379 return; 1380 1381 netdev_info(nd->dev, "NCSI interface %s\n", 1382 nd->link_up ? "up" : "down"); 1383 } 1384 1385 static int ftgmac100_probe(struct platform_device *pdev) 1386 { 1387 struct resource *res; 1388 int irq; 1389 struct net_device *netdev; 1390 struct ftgmac100 *priv; 1391 struct device_node *np; 1392 int err = 0; 1393 1394 if (!pdev) 1395 return -ENODEV; 1396 1397 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1398 if (!res) 1399 return -ENXIO; 1400 1401 irq = platform_get_irq(pdev, 0); 1402 if (irq < 0) 1403 return irq; 1404 1405 /* setup net_device */ 1406 netdev = alloc_etherdev(sizeof(*priv)); 1407 if (!netdev) { 1408 err = -ENOMEM; 1409 goto err_alloc_etherdev; 1410 } 1411 1412 SET_NETDEV_DEV(netdev, &pdev->dev); 1413 1414 netdev->ethtool_ops = &ftgmac100_ethtool_ops; 1415 netdev->netdev_ops = &ftgmac100_netdev_ops; 1416 netdev->watchdog_timeo = 5 * HZ; 1417 1418 platform_set_drvdata(pdev, netdev); 1419 1420 /* setup private data */ 1421 priv = netdev_priv(netdev); 1422 priv->netdev = netdev; 1423 priv->dev = &pdev->dev; 1424 INIT_WORK(&priv->reset_task, ftgmac100_reset_task); 1425 1426 /* map io memory */ 1427 priv->res = request_mem_region(res->start, resource_size(res), 1428 dev_name(&pdev->dev)); 1429 if (!priv->res) { 1430 dev_err(&pdev->dev, "Could not reserve memory region\n"); 1431 err = -ENOMEM; 1432 goto err_req_mem; 1433 } 1434 1435 priv->base = ioremap(res->start, resource_size(res)); 1436 if (!priv->base) { 1437 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 1438 err = -EIO; 1439 goto err_ioremap; 1440 } 1441 1442 netdev->irq = irq; 1443 1444 /* MAC address from chip or random one */ 1445 ftgmac100_initial_mac(priv); 1446 1447 np = pdev->dev.of_node; 1448 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") || 1449 of_device_is_compatible(np, "aspeed,ast2500-mac"))) { 1450 priv->rxdes0_edorr_mask = BIT(30); 1451 priv->txdes0_edotr_mask = BIT(30); 1452 priv->is_aspeed = true; 1453 } else { 1454 priv->rxdes0_edorr_mask = BIT(15); 1455 priv->txdes0_edotr_mask = BIT(15); 1456 } 1457 1458 if (np && of_get_property(np, "use-ncsi", NULL)) { 1459 if (!IS_ENABLED(CONFIG_NET_NCSI)) { 1460 dev_err(&pdev->dev, "NCSI stack not enabled\n"); 1461 goto err_ncsi_dev; 1462 } 1463 1464 dev_info(&pdev->dev, "Using NCSI interface\n"); 1465 priv->use_ncsi = true; 1466 priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler); 1467 if (!priv->ndev) 1468 goto err_ncsi_dev; 1469 } else { 1470 priv->use_ncsi = false; 1471 err = ftgmac100_setup_mdio(netdev); 1472 if (err) 1473 goto err_setup_mdio; 1474 } 1475 1476 /* Base feature set */ 1477 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | 1478 NETIF_F_GRO | NETIF_F_SG; 1479 1480 /* AST2400 doesn't have working HW checksum generation */ 1481 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac"))) 1482 netdev->hw_features &= ~NETIF_F_HW_CSUM; 1483 if (np && of_get_property(np, "no-hw-checksum", NULL)) 1484 netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); 1485 netdev->features |= netdev->hw_features; 1486 1487 /* register network device */ 1488 err = register_netdev(netdev); 1489 if (err) { 1490 dev_err(&pdev->dev, "Failed to register netdev\n"); 1491 goto err_register_netdev; 1492 } 1493 1494 netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base); 1495 1496 return 0; 1497 1498 err_ncsi_dev: 1499 err_register_netdev: 1500 ftgmac100_destroy_mdio(netdev); 1501 err_setup_mdio: 1502 iounmap(priv->base); 1503 err_ioremap: 1504 release_resource(priv->res); 1505 err_req_mem: 1506 netif_napi_del(&priv->napi); 1507 free_netdev(netdev); 1508 err_alloc_etherdev: 1509 return err; 1510 } 1511 1512 static int ftgmac100_remove(struct platform_device *pdev) 1513 { 1514 struct net_device *netdev; 1515 struct ftgmac100 *priv; 1516 1517 netdev = platform_get_drvdata(pdev); 1518 priv = netdev_priv(netdev); 1519 1520 unregister_netdev(netdev); 1521 1522 /* There's a small chance the reset task will have been re-queued, 1523 * during stop, make sure it's gone before we free the structure. 1524 */ 1525 cancel_work_sync(&priv->reset_task); 1526 1527 ftgmac100_destroy_mdio(netdev); 1528 1529 iounmap(priv->base); 1530 release_resource(priv->res); 1531 1532 netif_napi_del(&priv->napi); 1533 free_netdev(netdev); 1534 return 0; 1535 } 1536 1537 static const struct of_device_id ftgmac100_of_match[] = { 1538 { .compatible = "faraday,ftgmac100" }, 1539 { } 1540 }; 1541 MODULE_DEVICE_TABLE(of, ftgmac100_of_match); 1542 1543 static struct platform_driver ftgmac100_driver = { 1544 .probe = ftgmac100_probe, 1545 .remove = ftgmac100_remove, 1546 .driver = { 1547 .name = DRV_NAME, 1548 .of_match_table = ftgmac100_of_match, 1549 }, 1550 }; 1551 module_platform_driver(ftgmac100_driver); 1552 1553 MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1554 MODULE_DESCRIPTION("FTGMAC100 driver"); 1555 MODULE_LICENSE("GPL"); 1556