1 /* 2 * Faraday FTGMAC100 Gigabit Ethernet 3 * 4 * (C) Copyright 2009-2011 Faraday Technology 5 * Po-Yu Chuang <ratbert@faraday-tech.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 */ 21 22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 24 #include <linux/dma-mapping.h> 25 #include <linux/etherdevice.h> 26 #include <linux/ethtool.h> 27 #include <linux/interrupt.h> 28 #include <linux/io.h> 29 #include <linux/module.h> 30 #include <linux/netdevice.h> 31 #include <linux/of.h> 32 #include <linux/phy.h> 33 #include <linux/platform_device.h> 34 #include <linux/property.h> 35 #include <net/ip.h> 36 #include <net/ncsi.h> 37 38 #include "ftgmac100.h" 39 40 #define DRV_NAME "ftgmac100" 41 #define DRV_VERSION "0.7" 42 43 /* Arbitrary values, I am not sure the HW has limits */ 44 #define MAX_RX_QUEUE_ENTRIES 1024 45 #define MAX_TX_QUEUE_ENTRIES 1024 46 #define MIN_RX_QUEUE_ENTRIES 32 47 #define MIN_TX_QUEUE_ENTRIES 32 48 49 /* Defaults */ 50 #define DEF_RX_QUEUE_ENTRIES 128 51 #define DEF_TX_QUEUE_ENTRIES 128 52 53 #define MAX_PKT_SIZE 1536 54 #define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */ 55 56 /* Min number of tx ring entries before stopping queue */ 57 #define TX_THRESHOLD (MAX_SKB_FRAGS + 1) 58 59 struct ftgmac100 { 60 /* Registers */ 61 struct resource *res; 62 void __iomem *base; 63 64 /* Rx ring */ 65 unsigned int rx_q_entries; 66 struct ftgmac100_rxdes *rxdes; 67 dma_addr_t rxdes_dma; 68 struct sk_buff **rx_skbs; 69 unsigned int rx_pointer; 70 u32 rxdes0_edorr_mask; 71 72 /* Tx ring */ 73 unsigned int tx_q_entries; 74 struct ftgmac100_txdes *txdes; 75 dma_addr_t txdes_dma; 76 struct sk_buff **tx_skbs; 77 unsigned int tx_clean_pointer; 78 unsigned int tx_pointer; 79 u32 txdes0_edotr_mask; 80 81 /* Used to signal the reset task of ring change request */ 82 unsigned int new_rx_q_entries; 83 unsigned int new_tx_q_entries; 84 85 /* Scratch page to use when rx skb alloc fails */ 86 void *rx_scratch; 87 dma_addr_t rx_scratch_dma; 88 89 /* Component structures */ 90 struct net_device *netdev; 91 struct device *dev; 92 struct ncsi_dev *ndev; 93 struct napi_struct napi; 94 struct work_struct reset_task; 95 struct mii_bus *mii_bus; 96 97 /* Link management */ 98 int cur_speed; 99 int cur_duplex; 100 bool use_ncsi; 101 102 /* Misc */ 103 bool need_mac_restart; 104 bool is_aspeed; 105 }; 106 107 static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr) 108 { 109 struct net_device *netdev = priv->netdev; 110 int i; 111 112 /* NOTE: reset clears all registers */ 113 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 114 iowrite32(maccr | FTGMAC100_MACCR_SW_RST, 115 priv->base + FTGMAC100_OFFSET_MACCR); 116 for (i = 0; i < 50; i++) { 117 unsigned int maccr; 118 119 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 120 if (!(maccr & FTGMAC100_MACCR_SW_RST)) 121 return 0; 122 123 udelay(1); 124 } 125 126 netdev_err(netdev, "Hardware reset failed\n"); 127 return -EIO; 128 } 129 130 static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv) 131 { 132 u32 maccr = 0; 133 134 switch (priv->cur_speed) { 135 case SPEED_10: 136 case 0: /* no link */ 137 break; 138 139 case SPEED_100: 140 maccr |= FTGMAC100_MACCR_FAST_MODE; 141 break; 142 143 case SPEED_1000: 144 maccr |= FTGMAC100_MACCR_GIGA_MODE; 145 break; 146 default: 147 netdev_err(priv->netdev, "Unknown speed %d !\n", 148 priv->cur_speed); 149 break; 150 } 151 152 /* (Re)initialize the queue pointers */ 153 priv->rx_pointer = 0; 154 priv->tx_clean_pointer = 0; 155 priv->tx_pointer = 0; 156 157 /* The doc says reset twice with 10us interval */ 158 if (ftgmac100_reset_mac(priv, maccr)) 159 return -EIO; 160 usleep_range(10, 1000); 161 return ftgmac100_reset_mac(priv, maccr); 162 } 163 164 static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac) 165 { 166 unsigned int maddr = mac[0] << 8 | mac[1]; 167 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 168 169 iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR); 170 iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR); 171 } 172 173 static void ftgmac100_initial_mac(struct ftgmac100 *priv) 174 { 175 u8 mac[ETH_ALEN]; 176 unsigned int m; 177 unsigned int l; 178 void *addr; 179 180 addr = device_get_mac_address(priv->dev, mac, ETH_ALEN); 181 if (addr) { 182 ether_addr_copy(priv->netdev->dev_addr, mac); 183 dev_info(priv->dev, "Read MAC address %pM from device tree\n", 184 mac); 185 return; 186 } 187 188 m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR); 189 l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR); 190 191 mac[0] = (m >> 8) & 0xff; 192 mac[1] = m & 0xff; 193 mac[2] = (l >> 24) & 0xff; 194 mac[3] = (l >> 16) & 0xff; 195 mac[4] = (l >> 8) & 0xff; 196 mac[5] = l & 0xff; 197 198 if (is_valid_ether_addr(mac)) { 199 ether_addr_copy(priv->netdev->dev_addr, mac); 200 dev_info(priv->dev, "Read MAC address %pM from chip\n", mac); 201 } else { 202 eth_hw_addr_random(priv->netdev); 203 dev_info(priv->dev, "Generated random MAC address %pM\n", 204 priv->netdev->dev_addr); 205 } 206 } 207 208 static int ftgmac100_set_mac_addr(struct net_device *dev, void *p) 209 { 210 int ret; 211 212 ret = eth_prepare_mac_addr_change(dev, p); 213 if (ret < 0) 214 return ret; 215 216 eth_commit_mac_addr_change(dev, p); 217 ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr); 218 219 return 0; 220 } 221 222 static void ftgmac100_init_hw(struct ftgmac100 *priv) 223 { 224 u32 reg, rfifo_sz, tfifo_sz; 225 226 /* Clear stale interrupts */ 227 reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 228 iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR); 229 230 /* Setup RX ring buffer base */ 231 iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR); 232 233 /* Setup TX ring buffer base */ 234 iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR); 235 236 /* Configure RX buffer size */ 237 iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE), 238 priv->base + FTGMAC100_OFFSET_RBSR); 239 240 /* Set RX descriptor autopoll */ 241 iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), 242 priv->base + FTGMAC100_OFFSET_APTC); 243 244 /* Write MAC address */ 245 ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr); 246 247 /* Configure descriptor sizes and increase burst sizes according 248 * to values in Aspeed SDK. The FIFO arbitration is enabled and 249 * the thresholds set based on the recommended values in the 250 * AST2400 specification. 251 */ 252 iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) | /* 2*8 bytes RX descs */ 253 FTGMAC100_DBLAC_TXDES_SIZE(2) | /* 2*8 bytes TX descs */ 254 FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */ 255 FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */ 256 FTGMAC100_DBLAC_RX_THR_EN | /* Enable fifo threshold arb */ 257 FTGMAC100_DBLAC_RXFIFO_HTHR(6) | /* 6/8 of FIFO high threshold */ 258 FTGMAC100_DBLAC_RXFIFO_LTHR(2), /* 2/8 of FIFO low threshold */ 259 priv->base + FTGMAC100_OFFSET_DBLAC); 260 261 /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt 262 * mitigation doesn't seem to provide any benefit with NAPI so leave 263 * it at that. 264 */ 265 iowrite32(FTGMAC100_ITC_RXINT_THR(1) | 266 FTGMAC100_ITC_TXINT_THR(1), 267 priv->base + FTGMAC100_OFFSET_ITC); 268 269 /* Configure FIFO sizes in the TPAFCR register */ 270 reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR); 271 rfifo_sz = reg & 0x00000007; 272 tfifo_sz = (reg >> 3) & 0x00000007; 273 reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR); 274 reg &= ~0x3f000000; 275 reg |= (tfifo_sz << 27); 276 reg |= (rfifo_sz << 24); 277 iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR); 278 } 279 280 static void ftgmac100_start_hw(struct ftgmac100 *priv) 281 { 282 u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 283 284 /* Keep the original GMAC and FAST bits */ 285 maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE); 286 287 /* Add all the main enable bits */ 288 maccr |= FTGMAC100_MACCR_TXDMA_EN | 289 FTGMAC100_MACCR_RXDMA_EN | 290 FTGMAC100_MACCR_TXMAC_EN | 291 FTGMAC100_MACCR_RXMAC_EN | 292 FTGMAC100_MACCR_CRC_APD | 293 FTGMAC100_MACCR_PHY_LINK_LEVEL | 294 FTGMAC100_MACCR_RX_RUNT | 295 FTGMAC100_MACCR_RX_BROADPKT; 296 297 /* Add other bits as needed */ 298 if (priv->cur_duplex == DUPLEX_FULL) 299 maccr |= FTGMAC100_MACCR_FULLDUP; 300 301 /* Hit the HW */ 302 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 303 } 304 305 static void ftgmac100_stop_hw(struct ftgmac100 *priv) 306 { 307 iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR); 308 } 309 310 static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry, 311 struct ftgmac100_rxdes *rxdes, gfp_t gfp) 312 { 313 struct net_device *netdev = priv->netdev; 314 struct sk_buff *skb; 315 dma_addr_t map; 316 int err; 317 318 skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); 319 if (unlikely(!skb)) { 320 if (net_ratelimit()) 321 netdev_warn(netdev, "failed to allocate rx skb\n"); 322 err = -ENOMEM; 323 map = priv->rx_scratch_dma; 324 } else { 325 map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE, 326 DMA_FROM_DEVICE); 327 if (unlikely(dma_mapping_error(priv->dev, map))) { 328 if (net_ratelimit()) 329 netdev_err(netdev, "failed to map rx page\n"); 330 dev_kfree_skb_any(skb); 331 map = priv->rx_scratch_dma; 332 skb = NULL; 333 err = -ENOMEM; 334 } 335 } 336 337 /* Store skb */ 338 priv->rx_skbs[entry] = skb; 339 340 /* Store DMA address into RX desc */ 341 rxdes->rxdes3 = cpu_to_le32(map); 342 343 /* Ensure the above is ordered vs clearing the OWN bit */ 344 dma_wmb(); 345 346 /* Clean status (which resets own bit) */ 347 if (entry == (priv->rx_q_entries - 1)) 348 rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask); 349 else 350 rxdes->rxdes0 = 0; 351 352 return 0; 353 } 354 355 static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv, 356 unsigned int pointer) 357 { 358 return (pointer + 1) & (priv->rx_q_entries - 1); 359 } 360 361 static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status) 362 { 363 struct net_device *netdev = priv->netdev; 364 365 if (status & FTGMAC100_RXDES0_RX_ERR) 366 netdev->stats.rx_errors++; 367 368 if (status & FTGMAC100_RXDES0_CRC_ERR) 369 netdev->stats.rx_crc_errors++; 370 371 if (status & (FTGMAC100_RXDES0_FTL | 372 FTGMAC100_RXDES0_RUNT | 373 FTGMAC100_RXDES0_RX_ODD_NB)) 374 netdev->stats.rx_length_errors++; 375 } 376 377 static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed) 378 { 379 struct net_device *netdev = priv->netdev; 380 struct ftgmac100_rxdes *rxdes; 381 struct sk_buff *skb; 382 unsigned int pointer, size; 383 u32 status, csum_vlan; 384 dma_addr_t map; 385 386 /* Grab next RX descriptor */ 387 pointer = priv->rx_pointer; 388 rxdes = &priv->rxdes[pointer]; 389 390 /* Grab descriptor status */ 391 status = le32_to_cpu(rxdes->rxdes0); 392 393 /* Do we have a packet ? */ 394 if (!(status & FTGMAC100_RXDES0_RXPKT_RDY)) 395 return false; 396 397 /* Order subsequent reads with the test for the ready bit */ 398 dma_rmb(); 399 400 /* We don't cope with fragmented RX packets */ 401 if (unlikely(!(status & FTGMAC100_RXDES0_FRS) || 402 !(status & FTGMAC100_RXDES0_LRS))) 403 goto drop; 404 405 /* Grab received size and csum vlan field in the descriptor */ 406 size = status & FTGMAC100_RXDES0_VDBC; 407 csum_vlan = le32_to_cpu(rxdes->rxdes1); 408 409 /* Any error (other than csum offload) flagged ? */ 410 if (unlikely(status & RXDES0_ANY_ERROR)) { 411 /* Correct for incorrect flagging of runt packets 412 * with vlan tags... Just accept a runt packet that 413 * has been flagged as vlan and whose size is at 414 * least 60 bytes. 415 */ 416 if ((status & FTGMAC100_RXDES0_RUNT) && 417 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) && 418 (size >= 60)) 419 status &= ~FTGMAC100_RXDES0_RUNT; 420 421 /* Any error still in there ? */ 422 if (status & RXDES0_ANY_ERROR) { 423 ftgmac100_rx_packet_error(priv, status); 424 goto drop; 425 } 426 } 427 428 /* If the packet had no skb (failed to allocate earlier) 429 * then try to allocate one and skip 430 */ 431 skb = priv->rx_skbs[pointer]; 432 if (!unlikely(skb)) { 433 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 434 goto drop; 435 } 436 437 if (unlikely(status & FTGMAC100_RXDES0_MULTICAST)) 438 netdev->stats.multicast++; 439 440 /* If the HW found checksum errors, bounce it to software. 441 * 442 * If we didn't, we need to see if the packet was recognized 443 * by HW as one of the supported checksummed protocols before 444 * we accept the HW test results. 445 */ 446 if (netdev->features & NETIF_F_RXCSUM) { 447 u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR | 448 FTGMAC100_RXDES1_UDP_CHKSUM_ERR | 449 FTGMAC100_RXDES1_IP_CHKSUM_ERR; 450 if ((csum_vlan & err_bits) || 451 !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK)) 452 skb->ip_summed = CHECKSUM_NONE; 453 else 454 skb->ip_summed = CHECKSUM_UNNECESSARY; 455 } 456 457 /* Transfer received size to skb */ 458 skb_put(skb, size); 459 460 /* Tear down DMA mapping, do necessary cache management */ 461 map = le32_to_cpu(rxdes->rxdes3); 462 463 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU) 464 /* When we don't have an iommu, we can save cycles by not 465 * invalidating the cache for the part of the packet that 466 * wasn't received. 467 */ 468 dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE); 469 #else 470 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 471 #endif 472 473 474 /* Resplenish rx ring */ 475 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 476 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); 477 478 skb->protocol = eth_type_trans(skb, netdev); 479 480 netdev->stats.rx_packets++; 481 netdev->stats.rx_bytes += size; 482 483 /* push packet to protocol stack */ 484 if (skb->ip_summed == CHECKSUM_NONE) 485 netif_receive_skb(skb); 486 else 487 napi_gro_receive(&priv->napi, skb); 488 489 (*processed)++; 490 return true; 491 492 drop: 493 /* Clean rxdes0 (which resets own bit) */ 494 rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask); 495 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); 496 netdev->stats.rx_dropped++; 497 return true; 498 } 499 500 static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv, 501 unsigned int index) 502 { 503 if (index == (priv->tx_q_entries - 1)) 504 return priv->txdes0_edotr_mask; 505 else 506 return 0; 507 } 508 509 static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv, 510 unsigned int pointer) 511 { 512 return (pointer + 1) & (priv->tx_q_entries - 1); 513 } 514 515 static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv) 516 { 517 /* Returns the number of available slots in the TX queue 518 * 519 * This always leaves one free slot so we don't have to 520 * worry about empty vs. full, and this simplifies the 521 * test for ftgmac100_tx_buf_cleanable() below 522 */ 523 return (priv->tx_clean_pointer - priv->tx_pointer - 1) & 524 (priv->tx_q_entries - 1); 525 } 526 527 static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv) 528 { 529 return priv->tx_pointer != priv->tx_clean_pointer; 530 } 531 532 static void ftgmac100_free_tx_packet(struct ftgmac100 *priv, 533 unsigned int pointer, 534 struct sk_buff *skb, 535 struct ftgmac100_txdes *txdes, 536 u32 ctl_stat) 537 { 538 dma_addr_t map = le32_to_cpu(txdes->txdes3); 539 size_t len; 540 541 if (ctl_stat & FTGMAC100_TXDES0_FTS) { 542 len = skb_headlen(skb); 543 dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE); 544 } else { 545 len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat); 546 dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE); 547 } 548 549 /* Free SKB on last segment */ 550 if (ctl_stat & FTGMAC100_TXDES0_LTS) 551 dev_kfree_skb(skb); 552 priv->tx_skbs[pointer] = NULL; 553 } 554 555 static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv) 556 { 557 struct net_device *netdev = priv->netdev; 558 struct ftgmac100_txdes *txdes; 559 struct sk_buff *skb; 560 unsigned int pointer; 561 u32 ctl_stat; 562 563 pointer = priv->tx_clean_pointer; 564 txdes = &priv->txdes[pointer]; 565 566 ctl_stat = le32_to_cpu(txdes->txdes0); 567 if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN) 568 return false; 569 570 skb = priv->tx_skbs[pointer]; 571 netdev->stats.tx_packets++; 572 netdev->stats.tx_bytes += skb->len; 573 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 574 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 575 576 priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer); 577 578 return true; 579 } 580 581 static void ftgmac100_tx_complete(struct ftgmac100 *priv) 582 { 583 struct net_device *netdev = priv->netdev; 584 585 /* Process all completed packets */ 586 while (ftgmac100_tx_buf_cleanable(priv) && 587 ftgmac100_tx_complete_packet(priv)) 588 ; 589 590 /* Restart queue if needed */ 591 smp_mb(); 592 if (unlikely(netif_queue_stopped(netdev) && 593 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) { 594 struct netdev_queue *txq; 595 596 txq = netdev_get_tx_queue(netdev, 0); 597 __netif_tx_lock(txq, smp_processor_id()); 598 if (netif_queue_stopped(netdev) && 599 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 600 netif_wake_queue(netdev); 601 __netif_tx_unlock(txq); 602 } 603 } 604 605 static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan) 606 { 607 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 608 u8 ip_proto = ip_hdr(skb)->protocol; 609 610 *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM; 611 switch(ip_proto) { 612 case IPPROTO_TCP: 613 *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM; 614 return true; 615 case IPPROTO_UDP: 616 *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM; 617 return true; 618 case IPPROTO_IP: 619 return true; 620 } 621 } 622 return skb_checksum_help(skb) == 0; 623 } 624 625 static int ftgmac100_hard_start_xmit(struct sk_buff *skb, 626 struct net_device *netdev) 627 { 628 struct ftgmac100 *priv = netdev_priv(netdev); 629 struct ftgmac100_txdes *txdes, *first; 630 unsigned int pointer, nfrags, len, i, j; 631 u32 f_ctl_stat, ctl_stat, csum_vlan; 632 dma_addr_t map; 633 634 /* The HW doesn't pad small frames */ 635 if (eth_skb_pad(skb)) { 636 netdev->stats.tx_dropped++; 637 return NETDEV_TX_OK; 638 } 639 640 /* Reject oversize packets */ 641 if (unlikely(skb->len > MAX_PKT_SIZE)) { 642 if (net_ratelimit()) 643 netdev_dbg(netdev, "tx packet too big\n"); 644 goto drop; 645 } 646 647 /* Do we have a limit on #fragments ? I yet have to get a reply 648 * from Aspeed. If there's one I haven't hit it. 649 */ 650 nfrags = skb_shinfo(skb)->nr_frags; 651 652 /* Get header len */ 653 len = skb_headlen(skb); 654 655 /* Map the packet head */ 656 map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); 657 if (dma_mapping_error(priv->dev, map)) { 658 if (net_ratelimit()) 659 netdev_err(netdev, "map tx packet head failed\n"); 660 goto drop; 661 } 662 663 /* Grab the next free tx descriptor */ 664 pointer = priv->tx_pointer; 665 txdes = first = &priv->txdes[pointer]; 666 667 /* Setup it up with the packet head. Don't write the head to the 668 * ring just yet 669 */ 670 priv->tx_skbs[pointer] = skb; 671 f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 672 f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 673 f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 674 f_ctl_stat |= FTGMAC100_TXDES0_FTS; 675 if (nfrags == 0) 676 f_ctl_stat |= FTGMAC100_TXDES0_LTS; 677 txdes->txdes3 = cpu_to_le32(map); 678 679 /* Setup HW checksumming */ 680 csum_vlan = 0; 681 if (skb->ip_summed == CHECKSUM_PARTIAL && 682 !ftgmac100_prep_tx_csum(skb, &csum_vlan)) 683 goto drop; 684 txdes->txdes1 = cpu_to_le32(csum_vlan); 685 686 /* Next descriptor */ 687 pointer = ftgmac100_next_tx_pointer(priv, pointer); 688 689 /* Add the fragments */ 690 for (i = 0; i < nfrags; i++) { 691 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 692 693 len = frag->size; 694 695 /* Map it */ 696 map = skb_frag_dma_map(priv->dev, frag, 0, len, 697 DMA_TO_DEVICE); 698 if (dma_mapping_error(priv->dev, map)) 699 goto dma_err; 700 701 /* Setup descriptor */ 702 priv->tx_skbs[pointer] = skb; 703 txdes = &priv->txdes[pointer]; 704 ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 705 ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 706 ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 707 if (i == (nfrags - 1)) 708 ctl_stat |= FTGMAC100_TXDES0_LTS; 709 txdes->txdes0 = cpu_to_le32(ctl_stat); 710 txdes->txdes1 = 0; 711 txdes->txdes3 = cpu_to_le32(map); 712 713 /* Next one */ 714 pointer = ftgmac100_next_tx_pointer(priv, pointer); 715 } 716 717 /* Order the previous packet and descriptor udpates 718 * before setting the OWN bit on the first descriptor. 719 */ 720 dma_wmb(); 721 first->txdes0 = cpu_to_le32(f_ctl_stat); 722 723 /* Update next TX pointer */ 724 priv->tx_pointer = pointer; 725 726 /* If there isn't enough room for all the fragments of a new packet 727 * in the TX ring, stop the queue. The sequence below is race free 728 * vs. a concurrent restart in ftgmac100_poll() 729 */ 730 if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) { 731 netif_stop_queue(netdev); 732 /* Order the queue stop with the test below */ 733 smp_mb(); 734 if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 735 netif_wake_queue(netdev); 736 } 737 738 /* Poke transmitter to read the updated TX descriptors */ 739 iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD); 740 741 return NETDEV_TX_OK; 742 743 dma_err: 744 if (net_ratelimit()) 745 netdev_err(netdev, "map tx fragment failed\n"); 746 747 /* Free head */ 748 pointer = priv->tx_pointer; 749 ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat); 750 first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask); 751 752 /* Then all fragments */ 753 for (j = 0; j < i; j++) { 754 pointer = ftgmac100_next_tx_pointer(priv, pointer); 755 txdes = &priv->txdes[pointer]; 756 ctl_stat = le32_to_cpu(txdes->txdes0); 757 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 758 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 759 } 760 761 /* This cannot be reached if we successfully mapped the 762 * last fragment, so we know ftgmac100_free_tx_packet() 763 * hasn't freed the skb yet. 764 */ 765 drop: 766 /* Drop the packet */ 767 dev_kfree_skb_any(skb); 768 netdev->stats.tx_dropped++; 769 770 return NETDEV_TX_OK; 771 } 772 773 static void ftgmac100_free_buffers(struct ftgmac100 *priv) 774 { 775 int i; 776 777 /* Free all RX buffers */ 778 for (i = 0; i < priv->rx_q_entries; i++) { 779 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; 780 struct sk_buff *skb = priv->rx_skbs[i]; 781 dma_addr_t map = le32_to_cpu(rxdes->rxdes3); 782 783 if (!skb) 784 continue; 785 786 priv->rx_skbs[i] = NULL; 787 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 788 dev_kfree_skb_any(skb); 789 } 790 791 /* Free all TX buffers */ 792 for (i = 0; i < priv->tx_q_entries; i++) { 793 struct ftgmac100_txdes *txdes = &priv->txdes[i]; 794 struct sk_buff *skb = priv->tx_skbs[i]; 795 796 if (!skb) 797 continue; 798 ftgmac100_free_tx_packet(priv, i, skb, txdes, 799 le32_to_cpu(txdes->txdes0)); 800 } 801 } 802 803 static void ftgmac100_free_rings(struct ftgmac100 *priv) 804 { 805 /* Free skb arrays */ 806 kfree(priv->rx_skbs); 807 kfree(priv->tx_skbs); 808 809 /* Free descriptors */ 810 if (priv->rxdes) 811 dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES * 812 sizeof(struct ftgmac100_rxdes), 813 priv->rxdes, priv->rxdes_dma); 814 priv->rxdes = NULL; 815 816 if (priv->txdes) 817 dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES * 818 sizeof(struct ftgmac100_txdes), 819 priv->txdes, priv->txdes_dma); 820 priv->txdes = NULL; 821 822 /* Free scratch packet buffer */ 823 if (priv->rx_scratch) 824 dma_free_coherent(priv->dev, RX_BUF_SIZE, 825 priv->rx_scratch, priv->rx_scratch_dma); 826 } 827 828 static int ftgmac100_alloc_rings(struct ftgmac100 *priv) 829 { 830 /* Allocate skb arrays */ 831 priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *), 832 GFP_KERNEL); 833 if (!priv->rx_skbs) 834 return -ENOMEM; 835 priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *), 836 GFP_KERNEL); 837 if (!priv->tx_skbs) 838 return -ENOMEM; 839 840 /* Allocate descriptors */ 841 priv->rxdes = dma_zalloc_coherent(priv->dev, 842 MAX_RX_QUEUE_ENTRIES * 843 sizeof(struct ftgmac100_rxdes), 844 &priv->rxdes_dma, GFP_KERNEL); 845 if (!priv->rxdes) 846 return -ENOMEM; 847 priv->txdes = dma_zalloc_coherent(priv->dev, 848 MAX_TX_QUEUE_ENTRIES * 849 sizeof(struct ftgmac100_txdes), 850 &priv->txdes_dma, GFP_KERNEL); 851 if (!priv->txdes) 852 return -ENOMEM; 853 854 /* Allocate scratch packet buffer */ 855 priv->rx_scratch = dma_alloc_coherent(priv->dev, 856 RX_BUF_SIZE, 857 &priv->rx_scratch_dma, 858 GFP_KERNEL); 859 if (!priv->rx_scratch) 860 return -ENOMEM; 861 862 return 0; 863 } 864 865 static void ftgmac100_init_rings(struct ftgmac100 *priv) 866 { 867 struct ftgmac100_rxdes *rxdes = NULL; 868 struct ftgmac100_txdes *txdes = NULL; 869 int i; 870 871 /* Update entries counts */ 872 priv->rx_q_entries = priv->new_rx_q_entries; 873 priv->tx_q_entries = priv->new_tx_q_entries; 874 875 if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES)) 876 return; 877 878 /* Initialize RX ring */ 879 for (i = 0; i < priv->rx_q_entries; i++) { 880 rxdes = &priv->rxdes[i]; 881 rxdes->rxdes0 = 0; 882 rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma); 883 } 884 /* Mark the end of the ring */ 885 rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask); 886 887 if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES)) 888 return; 889 890 /* Initialize TX ring */ 891 for (i = 0; i < priv->tx_q_entries; i++) { 892 txdes = &priv->txdes[i]; 893 txdes->txdes0 = 0; 894 } 895 txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask); 896 } 897 898 static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv) 899 { 900 int i; 901 902 for (i = 0; i < priv->rx_q_entries; i++) { 903 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; 904 905 if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL)) 906 return -ENOMEM; 907 } 908 return 0; 909 } 910 911 static void ftgmac100_adjust_link(struct net_device *netdev) 912 { 913 struct ftgmac100 *priv = netdev_priv(netdev); 914 struct phy_device *phydev = netdev->phydev; 915 int new_speed; 916 917 /* We store "no link" as speed 0 */ 918 if (!phydev->link) 919 new_speed = 0; 920 else 921 new_speed = phydev->speed; 922 923 if (phydev->speed == priv->cur_speed && 924 phydev->duplex == priv->cur_duplex) 925 return; 926 927 /* Print status if we have a link or we had one and just lost it, 928 * don't print otherwise. 929 */ 930 if (new_speed || priv->cur_speed) 931 phy_print_status(phydev); 932 933 priv->cur_speed = new_speed; 934 priv->cur_duplex = phydev->duplex; 935 936 /* Link is down, do nothing else */ 937 if (!new_speed) 938 return; 939 940 /* Disable all interrupts */ 941 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 942 943 /* Reset the adapter asynchronously */ 944 schedule_work(&priv->reset_task); 945 } 946 947 static int ftgmac100_mii_probe(struct ftgmac100 *priv) 948 { 949 struct net_device *netdev = priv->netdev; 950 struct phy_device *phydev; 951 952 phydev = phy_find_first(priv->mii_bus); 953 if (!phydev) { 954 netdev_info(netdev, "%s: no PHY found\n", netdev->name); 955 return -ENODEV; 956 } 957 958 phydev = phy_connect(netdev, phydev_name(phydev), 959 &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII); 960 961 if (IS_ERR(phydev)) { 962 netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); 963 return PTR_ERR(phydev); 964 } 965 966 return 0; 967 } 968 969 static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) 970 { 971 struct net_device *netdev = bus->priv; 972 struct ftgmac100 *priv = netdev_priv(netdev); 973 unsigned int phycr; 974 int i; 975 976 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 977 978 /* preserve MDC cycle threshold */ 979 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 980 981 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 982 FTGMAC100_PHYCR_REGAD(regnum) | 983 FTGMAC100_PHYCR_MIIRD; 984 985 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 986 987 for (i = 0; i < 10; i++) { 988 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 989 990 if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) { 991 int data; 992 993 data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA); 994 return FTGMAC100_PHYDATA_MIIRDATA(data); 995 } 996 997 udelay(100); 998 } 999 1000 netdev_err(netdev, "mdio read timed out\n"); 1001 return -EIO; 1002 } 1003 1004 static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr, 1005 int regnum, u16 value) 1006 { 1007 struct net_device *netdev = bus->priv; 1008 struct ftgmac100 *priv = netdev_priv(netdev); 1009 unsigned int phycr; 1010 int data; 1011 int i; 1012 1013 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1014 1015 /* preserve MDC cycle threshold */ 1016 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 1017 1018 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 1019 FTGMAC100_PHYCR_REGAD(regnum) | 1020 FTGMAC100_PHYCR_MIIWR; 1021 1022 data = FTGMAC100_PHYDATA_MIIWDATA(value); 1023 1024 iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA); 1025 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 1026 1027 for (i = 0; i < 10; i++) { 1028 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1029 1030 if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0) 1031 return 0; 1032 1033 udelay(100); 1034 } 1035 1036 netdev_err(netdev, "mdio write timed out\n"); 1037 return -EIO; 1038 } 1039 1040 static void ftgmac100_get_drvinfo(struct net_device *netdev, 1041 struct ethtool_drvinfo *info) 1042 { 1043 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1044 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1045 strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); 1046 } 1047 1048 static void ftgmac100_get_ringparam(struct net_device *netdev, 1049 struct ethtool_ringparam *ering) 1050 { 1051 struct ftgmac100 *priv = netdev_priv(netdev); 1052 1053 memset(ering, 0, sizeof(*ering)); 1054 ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES; 1055 ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES; 1056 ering->rx_pending = priv->rx_q_entries; 1057 ering->tx_pending = priv->tx_q_entries; 1058 } 1059 1060 static int ftgmac100_set_ringparam(struct net_device *netdev, 1061 struct ethtool_ringparam *ering) 1062 { 1063 struct ftgmac100 *priv = netdev_priv(netdev); 1064 1065 if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES || 1066 ering->tx_pending > MAX_TX_QUEUE_ENTRIES || 1067 ering->rx_pending < MIN_RX_QUEUE_ENTRIES || 1068 ering->tx_pending < MIN_TX_QUEUE_ENTRIES || 1069 !is_power_of_2(ering->rx_pending) || 1070 !is_power_of_2(ering->tx_pending)) 1071 return -EINVAL; 1072 1073 priv->new_rx_q_entries = ering->rx_pending; 1074 priv->new_tx_q_entries = ering->tx_pending; 1075 if (netif_running(netdev)) 1076 schedule_work(&priv->reset_task); 1077 1078 return 0; 1079 } 1080 1081 static const struct ethtool_ops ftgmac100_ethtool_ops = { 1082 .get_drvinfo = ftgmac100_get_drvinfo, 1083 .get_link = ethtool_op_get_link, 1084 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1085 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1086 .nway_reset = phy_ethtool_nway_reset, 1087 .get_ringparam = ftgmac100_get_ringparam, 1088 .set_ringparam = ftgmac100_set_ringparam, 1089 }; 1090 1091 static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id) 1092 { 1093 struct net_device *netdev = dev_id; 1094 struct ftgmac100 *priv = netdev_priv(netdev); 1095 unsigned int status, new_mask = FTGMAC100_INT_BAD; 1096 1097 /* Fetch and clear interrupt bits, process abnormal ones */ 1098 status = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 1099 iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR); 1100 if (unlikely(status & FTGMAC100_INT_BAD)) { 1101 1102 /* RX buffer unavailable */ 1103 if (status & FTGMAC100_INT_NO_RXBUF) 1104 netdev->stats.rx_over_errors++; 1105 1106 /* received packet lost due to RX FIFO full */ 1107 if (status & FTGMAC100_INT_RPKT_LOST) 1108 netdev->stats.rx_fifo_errors++; 1109 1110 /* sent packet lost due to excessive TX collision */ 1111 if (status & FTGMAC100_INT_XPKT_LOST) 1112 netdev->stats.tx_fifo_errors++; 1113 1114 /* AHB error -> Reset the chip */ 1115 if (status & FTGMAC100_INT_AHB_ERR) { 1116 if (net_ratelimit()) 1117 netdev_warn(netdev, 1118 "AHB bus error ! Resetting chip.\n"); 1119 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1120 schedule_work(&priv->reset_task); 1121 return IRQ_HANDLED; 1122 } 1123 1124 /* We may need to restart the MAC after such errors, delay 1125 * this until after we have freed some Rx buffers though 1126 */ 1127 priv->need_mac_restart = true; 1128 1129 /* Disable those errors until we restart */ 1130 new_mask &= ~status; 1131 } 1132 1133 /* Only enable "bad" interrupts while NAPI is on */ 1134 iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER); 1135 1136 /* Schedule NAPI bh */ 1137 napi_schedule_irqoff(&priv->napi); 1138 1139 return IRQ_HANDLED; 1140 } 1141 1142 static bool ftgmac100_check_rx(struct ftgmac100 *priv) 1143 { 1144 struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer]; 1145 1146 /* Do we have a packet ? */ 1147 return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY)); 1148 } 1149 1150 static int ftgmac100_poll(struct napi_struct *napi, int budget) 1151 { 1152 struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi); 1153 int work_done = 0; 1154 bool more; 1155 1156 /* Handle TX completions */ 1157 if (ftgmac100_tx_buf_cleanable(priv)) 1158 ftgmac100_tx_complete(priv); 1159 1160 /* Handle RX packets */ 1161 do { 1162 more = ftgmac100_rx_packet(priv, &work_done); 1163 } while (more && work_done < budget); 1164 1165 1166 /* The interrupt is telling us to kick the MAC back to life 1167 * after an RX overflow 1168 */ 1169 if (unlikely(priv->need_mac_restart)) { 1170 ftgmac100_start_hw(priv); 1171 1172 /* Re-enable "bad" interrupts */ 1173 iowrite32(FTGMAC100_INT_BAD, 1174 priv->base + FTGMAC100_OFFSET_IER); 1175 } 1176 1177 /* As long as we are waiting for transmit packets to be 1178 * completed we keep NAPI going 1179 */ 1180 if (ftgmac100_tx_buf_cleanable(priv)) 1181 work_done = budget; 1182 1183 if (work_done < budget) { 1184 /* We are about to re-enable all interrupts. However 1185 * the HW has been latching RX/TX packet interrupts while 1186 * they were masked. So we clear them first, then we need 1187 * to re-check if there's something to process 1188 */ 1189 iowrite32(FTGMAC100_INT_RXTX, 1190 priv->base + FTGMAC100_OFFSET_ISR); 1191 if (ftgmac100_check_rx(priv) || 1192 ftgmac100_tx_buf_cleanable(priv)) 1193 return budget; 1194 1195 /* deschedule NAPI */ 1196 napi_complete(napi); 1197 1198 /* enable all interrupts */ 1199 iowrite32(FTGMAC100_INT_ALL, 1200 priv->base + FTGMAC100_OFFSET_IER); 1201 } 1202 1203 return work_done; 1204 } 1205 1206 static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err) 1207 { 1208 int err = 0; 1209 1210 /* Re-init descriptors (adjust queue sizes) */ 1211 ftgmac100_init_rings(priv); 1212 1213 /* Realloc rx descriptors */ 1214 err = ftgmac100_alloc_rx_buffers(priv); 1215 if (err && !ignore_alloc_err) 1216 return err; 1217 1218 /* Reinit and restart HW */ 1219 ftgmac100_init_hw(priv); 1220 ftgmac100_start_hw(priv); 1221 1222 /* Re-enable the device */ 1223 napi_enable(&priv->napi); 1224 netif_start_queue(priv->netdev); 1225 1226 /* Enable all interrupts */ 1227 iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER); 1228 1229 return err; 1230 } 1231 1232 static void ftgmac100_reset_task(struct work_struct *work) 1233 { 1234 struct ftgmac100 *priv = container_of(work, struct ftgmac100, 1235 reset_task); 1236 struct net_device *netdev = priv->netdev; 1237 int err; 1238 1239 netdev_dbg(netdev, "Resetting NIC...\n"); 1240 1241 /* Lock the world */ 1242 rtnl_lock(); 1243 if (netdev->phydev) 1244 mutex_lock(&netdev->phydev->lock); 1245 if (priv->mii_bus) 1246 mutex_lock(&priv->mii_bus->mdio_lock); 1247 1248 1249 /* Check if the interface is still up */ 1250 if (!netif_running(netdev)) 1251 goto bail; 1252 1253 /* Stop the network stack */ 1254 netif_trans_update(netdev); 1255 napi_disable(&priv->napi); 1256 netif_tx_disable(netdev); 1257 1258 /* Stop and reset the MAC */ 1259 ftgmac100_stop_hw(priv); 1260 err = ftgmac100_reset_and_config_mac(priv); 1261 if (err) { 1262 /* Not much we can do ... it might come back... */ 1263 netdev_err(netdev, "attempting to continue...\n"); 1264 } 1265 1266 /* Free all rx and tx buffers */ 1267 ftgmac100_free_buffers(priv); 1268 1269 /* Setup everything again and restart chip */ 1270 ftgmac100_init_all(priv, true); 1271 1272 netdev_dbg(netdev, "Reset done !\n"); 1273 bail: 1274 if (priv->mii_bus) 1275 mutex_unlock(&priv->mii_bus->mdio_lock); 1276 if (netdev->phydev) 1277 mutex_unlock(&netdev->phydev->lock); 1278 rtnl_unlock(); 1279 } 1280 1281 static int ftgmac100_open(struct net_device *netdev) 1282 { 1283 struct ftgmac100 *priv = netdev_priv(netdev); 1284 int err; 1285 1286 /* Allocate ring buffers */ 1287 err = ftgmac100_alloc_rings(priv); 1288 if (err) { 1289 netdev_err(netdev, "Failed to allocate descriptors\n"); 1290 return err; 1291 } 1292 1293 /* When using NC-SI we force the speed to 100Mbit/s full duplex, 1294 * 1295 * Otherwise we leave it set to 0 (no link), the link 1296 * message from the PHY layer will handle setting it up to 1297 * something else if needed. 1298 */ 1299 if (priv->use_ncsi) { 1300 priv->cur_duplex = DUPLEX_FULL; 1301 priv->cur_speed = SPEED_100; 1302 } else { 1303 priv->cur_duplex = 0; 1304 priv->cur_speed = 0; 1305 } 1306 1307 /* Reset the hardware */ 1308 err = ftgmac100_reset_and_config_mac(priv); 1309 if (err) 1310 goto err_hw; 1311 1312 /* Initialize NAPI */ 1313 netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64); 1314 1315 /* Grab our interrupt */ 1316 err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev); 1317 if (err) { 1318 netdev_err(netdev, "failed to request irq %d\n", netdev->irq); 1319 goto err_irq; 1320 } 1321 1322 /* Start things up */ 1323 err = ftgmac100_init_all(priv, false); 1324 if (err) { 1325 netdev_err(netdev, "Failed to allocate packet buffers\n"); 1326 goto err_alloc; 1327 } 1328 1329 if (netdev->phydev) { 1330 /* If we have a PHY, start polling */ 1331 phy_start(netdev->phydev); 1332 } else if (priv->use_ncsi) { 1333 /* If using NC-SI, set our carrier on and start the stack */ 1334 netif_carrier_on(netdev); 1335 1336 /* Start the NCSI device */ 1337 err = ncsi_start_dev(priv->ndev); 1338 if (err) 1339 goto err_ncsi; 1340 } 1341 1342 return 0; 1343 1344 err_ncsi: 1345 napi_disable(&priv->napi); 1346 netif_stop_queue(netdev); 1347 err_alloc: 1348 ftgmac100_free_buffers(priv); 1349 free_irq(netdev->irq, netdev); 1350 err_irq: 1351 netif_napi_del(&priv->napi); 1352 err_hw: 1353 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1354 ftgmac100_free_rings(priv); 1355 return err; 1356 } 1357 1358 static int ftgmac100_stop(struct net_device *netdev) 1359 { 1360 struct ftgmac100 *priv = netdev_priv(netdev); 1361 1362 /* Note about the reset task: We are called with the rtnl lock 1363 * held, so we are synchronized against the core of the reset 1364 * task. We must not try to synchronously cancel it otherwise 1365 * we can deadlock. But since it will test for netif_running() 1366 * which has already been cleared by the net core, we don't 1367 * anything special to do. 1368 */ 1369 1370 /* disable all interrupts */ 1371 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1372 1373 netif_stop_queue(netdev); 1374 napi_disable(&priv->napi); 1375 netif_napi_del(&priv->napi); 1376 if (netdev->phydev) 1377 phy_stop(netdev->phydev); 1378 else if (priv->use_ncsi) 1379 ncsi_stop_dev(priv->ndev); 1380 1381 ftgmac100_stop_hw(priv); 1382 free_irq(netdev->irq, netdev); 1383 ftgmac100_free_buffers(priv); 1384 ftgmac100_free_rings(priv); 1385 1386 return 0; 1387 } 1388 1389 /* optional */ 1390 static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1391 { 1392 if (!netdev->phydev) 1393 return -ENXIO; 1394 1395 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 1396 } 1397 1398 static void ftgmac100_tx_timeout(struct net_device *netdev) 1399 { 1400 struct ftgmac100 *priv = netdev_priv(netdev); 1401 1402 /* Disable all interrupts */ 1403 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1404 1405 /* Do the reset outside of interrupt context */ 1406 schedule_work(&priv->reset_task); 1407 } 1408 1409 static const struct net_device_ops ftgmac100_netdev_ops = { 1410 .ndo_open = ftgmac100_open, 1411 .ndo_stop = ftgmac100_stop, 1412 .ndo_start_xmit = ftgmac100_hard_start_xmit, 1413 .ndo_set_mac_address = ftgmac100_set_mac_addr, 1414 .ndo_validate_addr = eth_validate_addr, 1415 .ndo_do_ioctl = ftgmac100_do_ioctl, 1416 .ndo_tx_timeout = ftgmac100_tx_timeout, 1417 }; 1418 1419 static int ftgmac100_setup_mdio(struct net_device *netdev) 1420 { 1421 struct ftgmac100 *priv = netdev_priv(netdev); 1422 struct platform_device *pdev = to_platform_device(priv->dev); 1423 int i, err = 0; 1424 u32 reg; 1425 1426 /* initialize mdio bus */ 1427 priv->mii_bus = mdiobus_alloc(); 1428 if (!priv->mii_bus) 1429 return -EIO; 1430 1431 if (priv->is_aspeed) { 1432 /* This driver supports the old MDIO interface */ 1433 reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR); 1434 reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE; 1435 iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR); 1436 }; 1437 1438 priv->mii_bus->name = "ftgmac100_mdio"; 1439 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", 1440 pdev->name, pdev->id); 1441 priv->mii_bus->priv = priv->netdev; 1442 priv->mii_bus->read = ftgmac100_mdiobus_read; 1443 priv->mii_bus->write = ftgmac100_mdiobus_write; 1444 1445 for (i = 0; i < PHY_MAX_ADDR; i++) 1446 priv->mii_bus->irq[i] = PHY_POLL; 1447 1448 err = mdiobus_register(priv->mii_bus); 1449 if (err) { 1450 dev_err(priv->dev, "Cannot register MDIO bus!\n"); 1451 goto err_register_mdiobus; 1452 } 1453 1454 err = ftgmac100_mii_probe(priv); 1455 if (err) { 1456 dev_err(priv->dev, "MII Probe failed!\n"); 1457 goto err_mii_probe; 1458 } 1459 1460 return 0; 1461 1462 err_mii_probe: 1463 mdiobus_unregister(priv->mii_bus); 1464 err_register_mdiobus: 1465 mdiobus_free(priv->mii_bus); 1466 return err; 1467 } 1468 1469 static void ftgmac100_destroy_mdio(struct net_device *netdev) 1470 { 1471 struct ftgmac100 *priv = netdev_priv(netdev); 1472 1473 if (!netdev->phydev) 1474 return; 1475 1476 phy_disconnect(netdev->phydev); 1477 mdiobus_unregister(priv->mii_bus); 1478 mdiobus_free(priv->mii_bus); 1479 } 1480 1481 static void ftgmac100_ncsi_handler(struct ncsi_dev *nd) 1482 { 1483 if (unlikely(nd->state != ncsi_dev_state_functional)) 1484 return; 1485 1486 netdev_info(nd->dev, "NCSI interface %s\n", 1487 nd->link_up ? "up" : "down"); 1488 } 1489 1490 static int ftgmac100_probe(struct platform_device *pdev) 1491 { 1492 struct resource *res; 1493 int irq; 1494 struct net_device *netdev; 1495 struct ftgmac100 *priv; 1496 struct device_node *np; 1497 int err = 0; 1498 1499 if (!pdev) 1500 return -ENODEV; 1501 1502 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1503 if (!res) 1504 return -ENXIO; 1505 1506 irq = platform_get_irq(pdev, 0); 1507 if (irq < 0) 1508 return irq; 1509 1510 /* setup net_device */ 1511 netdev = alloc_etherdev(sizeof(*priv)); 1512 if (!netdev) { 1513 err = -ENOMEM; 1514 goto err_alloc_etherdev; 1515 } 1516 1517 SET_NETDEV_DEV(netdev, &pdev->dev); 1518 1519 netdev->ethtool_ops = &ftgmac100_ethtool_ops; 1520 netdev->netdev_ops = &ftgmac100_netdev_ops; 1521 netdev->watchdog_timeo = 5 * HZ; 1522 1523 platform_set_drvdata(pdev, netdev); 1524 1525 /* setup private data */ 1526 priv = netdev_priv(netdev); 1527 priv->netdev = netdev; 1528 priv->dev = &pdev->dev; 1529 INIT_WORK(&priv->reset_task, ftgmac100_reset_task); 1530 1531 /* map io memory */ 1532 priv->res = request_mem_region(res->start, resource_size(res), 1533 dev_name(&pdev->dev)); 1534 if (!priv->res) { 1535 dev_err(&pdev->dev, "Could not reserve memory region\n"); 1536 err = -ENOMEM; 1537 goto err_req_mem; 1538 } 1539 1540 priv->base = ioremap(res->start, resource_size(res)); 1541 if (!priv->base) { 1542 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 1543 err = -EIO; 1544 goto err_ioremap; 1545 } 1546 1547 netdev->irq = irq; 1548 1549 /* MAC address from chip or random one */ 1550 ftgmac100_initial_mac(priv); 1551 1552 np = pdev->dev.of_node; 1553 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") || 1554 of_device_is_compatible(np, "aspeed,ast2500-mac"))) { 1555 priv->rxdes0_edorr_mask = BIT(30); 1556 priv->txdes0_edotr_mask = BIT(30); 1557 priv->is_aspeed = true; 1558 } else { 1559 priv->rxdes0_edorr_mask = BIT(15); 1560 priv->txdes0_edotr_mask = BIT(15); 1561 } 1562 1563 if (np && of_get_property(np, "use-ncsi", NULL)) { 1564 if (!IS_ENABLED(CONFIG_NET_NCSI)) { 1565 dev_err(&pdev->dev, "NCSI stack not enabled\n"); 1566 goto err_ncsi_dev; 1567 } 1568 1569 dev_info(&pdev->dev, "Using NCSI interface\n"); 1570 priv->use_ncsi = true; 1571 priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler); 1572 if (!priv->ndev) 1573 goto err_ncsi_dev; 1574 } else { 1575 priv->use_ncsi = false; 1576 err = ftgmac100_setup_mdio(netdev); 1577 if (err) 1578 goto err_setup_mdio; 1579 } 1580 1581 /* Default ring sizes */ 1582 priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES; 1583 priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES; 1584 1585 /* Base feature set */ 1586 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | 1587 NETIF_F_GRO | NETIF_F_SG; 1588 1589 /* AST2400 doesn't have working HW checksum generation */ 1590 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac"))) 1591 netdev->hw_features &= ~NETIF_F_HW_CSUM; 1592 if (np && of_get_property(np, "no-hw-checksum", NULL)) 1593 netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); 1594 netdev->features |= netdev->hw_features; 1595 1596 /* register network device */ 1597 err = register_netdev(netdev); 1598 if (err) { 1599 dev_err(&pdev->dev, "Failed to register netdev\n"); 1600 goto err_register_netdev; 1601 } 1602 1603 netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base); 1604 1605 return 0; 1606 1607 err_ncsi_dev: 1608 err_register_netdev: 1609 ftgmac100_destroy_mdio(netdev); 1610 err_setup_mdio: 1611 iounmap(priv->base); 1612 err_ioremap: 1613 release_resource(priv->res); 1614 err_req_mem: 1615 netif_napi_del(&priv->napi); 1616 free_netdev(netdev); 1617 err_alloc_etherdev: 1618 return err; 1619 } 1620 1621 static int ftgmac100_remove(struct platform_device *pdev) 1622 { 1623 struct net_device *netdev; 1624 struct ftgmac100 *priv; 1625 1626 netdev = platform_get_drvdata(pdev); 1627 priv = netdev_priv(netdev); 1628 1629 unregister_netdev(netdev); 1630 1631 /* There's a small chance the reset task will have been re-queued, 1632 * during stop, make sure it's gone before we free the structure. 1633 */ 1634 cancel_work_sync(&priv->reset_task); 1635 1636 ftgmac100_destroy_mdio(netdev); 1637 1638 iounmap(priv->base); 1639 release_resource(priv->res); 1640 1641 netif_napi_del(&priv->napi); 1642 free_netdev(netdev); 1643 return 0; 1644 } 1645 1646 static const struct of_device_id ftgmac100_of_match[] = { 1647 { .compatible = "faraday,ftgmac100" }, 1648 { } 1649 }; 1650 MODULE_DEVICE_TABLE(of, ftgmac100_of_match); 1651 1652 static struct platform_driver ftgmac100_driver = { 1653 .probe = ftgmac100_probe, 1654 .remove = ftgmac100_remove, 1655 .driver = { 1656 .name = DRV_NAME, 1657 .of_match_table = ftgmac100_of_match, 1658 }, 1659 }; 1660 module_platform_driver(ftgmac100_driver); 1661 1662 MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1663 MODULE_DESCRIPTION("FTGMAC100 driver"); 1664 MODULE_LICENSE("GPL"); 1665