1 /* 2 * Faraday FTGMAC100 Gigabit Ethernet 3 * 4 * (C) Copyright 2009-2011 Faraday Technology 5 * Po-Yu Chuang <ratbert@faraday-tech.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 */ 21 22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 24 #include <linux/dma-mapping.h> 25 #include <linux/etherdevice.h> 26 #include <linux/ethtool.h> 27 #include <linux/interrupt.h> 28 #include <linux/io.h> 29 #include <linux/module.h> 30 #include <linux/netdevice.h> 31 #include <linux/of.h> 32 #include <linux/phy.h> 33 #include <linux/platform_device.h> 34 #include <linux/property.h> 35 #include <net/ip.h> 36 #include <net/ncsi.h> 37 38 #include "ftgmac100.h" 39 40 #define DRV_NAME "ftgmac100" 41 #define DRV_VERSION "0.7" 42 43 /* Arbitrary values, I am not sure the HW has limits */ 44 #define MAX_RX_QUEUE_ENTRIES 1024 45 #define MAX_TX_QUEUE_ENTRIES 1024 46 #define MIN_RX_QUEUE_ENTRIES 32 47 #define MIN_TX_QUEUE_ENTRIES 32 48 49 /* Defaults */ 50 #define DEF_RX_QUEUE_ENTRIES 256 51 #define DEF_TX_QUEUE_ENTRIES 512 52 53 #define MAX_PKT_SIZE 1536 54 #define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */ 55 56 /* Min number of tx ring entries before stopping queue */ 57 #define TX_THRESHOLD (MAX_SKB_FRAGS + 1) 58 59 struct ftgmac100 { 60 /* Registers */ 61 struct resource *res; 62 void __iomem *base; 63 64 /* Rx ring */ 65 unsigned int rx_q_entries; 66 struct ftgmac100_rxdes *rxdes; 67 dma_addr_t rxdes_dma; 68 struct sk_buff **rx_skbs; 69 unsigned int rx_pointer; 70 u32 rxdes0_edorr_mask; 71 72 /* Tx ring */ 73 unsigned int tx_q_entries; 74 struct ftgmac100_txdes *txdes; 75 dma_addr_t txdes_dma; 76 struct sk_buff **tx_skbs; 77 unsigned int tx_clean_pointer; 78 unsigned int tx_pointer; 79 u32 txdes0_edotr_mask; 80 81 /* Used to signal the reset task of ring change request */ 82 unsigned int new_rx_q_entries; 83 unsigned int new_tx_q_entries; 84 85 /* Scratch page to use when rx skb alloc fails */ 86 void *rx_scratch; 87 dma_addr_t rx_scratch_dma; 88 89 /* Component structures */ 90 struct net_device *netdev; 91 struct device *dev; 92 struct ncsi_dev *ndev; 93 struct napi_struct napi; 94 struct work_struct reset_task; 95 struct mii_bus *mii_bus; 96 97 /* Link management */ 98 int cur_speed; 99 int cur_duplex; 100 bool use_ncsi; 101 102 /* Misc */ 103 bool need_mac_restart; 104 bool is_aspeed; 105 }; 106 107 static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr) 108 { 109 struct net_device *netdev = priv->netdev; 110 int i; 111 112 /* NOTE: reset clears all registers */ 113 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 114 iowrite32(maccr | FTGMAC100_MACCR_SW_RST, 115 priv->base + FTGMAC100_OFFSET_MACCR); 116 for (i = 0; i < 50; i++) { 117 unsigned int maccr; 118 119 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 120 if (!(maccr & FTGMAC100_MACCR_SW_RST)) 121 return 0; 122 123 udelay(1); 124 } 125 126 netdev_err(netdev, "Hardware reset failed\n"); 127 return -EIO; 128 } 129 130 static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv) 131 { 132 u32 maccr = 0; 133 134 switch (priv->cur_speed) { 135 case SPEED_10: 136 case 0: /* no link */ 137 break; 138 139 case SPEED_100: 140 maccr |= FTGMAC100_MACCR_FAST_MODE; 141 break; 142 143 case SPEED_1000: 144 maccr |= FTGMAC100_MACCR_GIGA_MODE; 145 break; 146 default: 147 netdev_err(priv->netdev, "Unknown speed %d !\n", 148 priv->cur_speed); 149 break; 150 } 151 152 /* (Re)initialize the queue pointers */ 153 priv->rx_pointer = 0; 154 priv->tx_clean_pointer = 0; 155 priv->tx_pointer = 0; 156 157 /* The doc says reset twice with 10us interval */ 158 if (ftgmac100_reset_mac(priv, maccr)) 159 return -EIO; 160 usleep_range(10, 1000); 161 return ftgmac100_reset_mac(priv, maccr); 162 } 163 164 static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac) 165 { 166 unsigned int maddr = mac[0] << 8 | mac[1]; 167 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 168 169 iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR); 170 iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR); 171 } 172 173 static void ftgmac100_initial_mac(struct ftgmac100 *priv) 174 { 175 u8 mac[ETH_ALEN]; 176 unsigned int m; 177 unsigned int l; 178 void *addr; 179 180 addr = device_get_mac_address(priv->dev, mac, ETH_ALEN); 181 if (addr) { 182 ether_addr_copy(priv->netdev->dev_addr, mac); 183 dev_info(priv->dev, "Read MAC address %pM from device tree\n", 184 mac); 185 return; 186 } 187 188 m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR); 189 l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR); 190 191 mac[0] = (m >> 8) & 0xff; 192 mac[1] = m & 0xff; 193 mac[2] = (l >> 24) & 0xff; 194 mac[3] = (l >> 16) & 0xff; 195 mac[4] = (l >> 8) & 0xff; 196 mac[5] = l & 0xff; 197 198 if (is_valid_ether_addr(mac)) { 199 ether_addr_copy(priv->netdev->dev_addr, mac); 200 dev_info(priv->dev, "Read MAC address %pM from chip\n", mac); 201 } else { 202 eth_hw_addr_random(priv->netdev); 203 dev_info(priv->dev, "Generated random MAC address %pM\n", 204 priv->netdev->dev_addr); 205 } 206 } 207 208 static int ftgmac100_set_mac_addr(struct net_device *dev, void *p) 209 { 210 int ret; 211 212 ret = eth_prepare_mac_addr_change(dev, p); 213 if (ret < 0) 214 return ret; 215 216 eth_commit_mac_addr_change(dev, p); 217 ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr); 218 219 return 0; 220 } 221 222 static void ftgmac100_init_hw(struct ftgmac100 *priv) 223 { 224 u32 reg, rfifo_sz, tfifo_sz; 225 226 /* Clear stale interrupts */ 227 reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 228 iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR); 229 230 /* Setup RX ring buffer base */ 231 iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR); 232 233 /* Setup TX ring buffer base */ 234 iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR); 235 236 /* Configure RX buffer size */ 237 iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE), 238 priv->base + FTGMAC100_OFFSET_RBSR); 239 240 /* Set RX descriptor autopoll */ 241 iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), 242 priv->base + FTGMAC100_OFFSET_APTC); 243 244 /* Write MAC address */ 245 ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr); 246 247 /* Configure descriptor sizes and increase burst sizes according 248 * to values in Aspeed SDK. The FIFO arbitration is enabled and 249 * the thresholds set based on the recommended values in the 250 * AST2400 specification. 251 */ 252 iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) | /* 2*8 bytes RX descs */ 253 FTGMAC100_DBLAC_TXDES_SIZE(2) | /* 2*8 bytes TX descs */ 254 FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */ 255 FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */ 256 FTGMAC100_DBLAC_RX_THR_EN | /* Enable fifo threshold arb */ 257 FTGMAC100_DBLAC_RXFIFO_HTHR(6) | /* 6/8 of FIFO high threshold */ 258 FTGMAC100_DBLAC_RXFIFO_LTHR(2), /* 2/8 of FIFO low threshold */ 259 priv->base + FTGMAC100_OFFSET_DBLAC); 260 261 /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt 262 * mitigation doesn't seem to provide any benefit with NAPI so leave 263 * it at that. 264 */ 265 iowrite32(FTGMAC100_ITC_RXINT_THR(1) | 266 FTGMAC100_ITC_TXINT_THR(1), 267 priv->base + FTGMAC100_OFFSET_ITC); 268 269 /* Configure FIFO sizes in the TPAFCR register */ 270 reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR); 271 rfifo_sz = reg & 0x00000007; 272 tfifo_sz = (reg >> 3) & 0x00000007; 273 reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR); 274 reg &= ~0x3f000000; 275 reg |= (tfifo_sz << 27); 276 reg |= (rfifo_sz << 24); 277 iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR); 278 } 279 280 static void ftgmac100_start_hw(struct ftgmac100 *priv) 281 { 282 u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 283 284 /* Keep the original GMAC and FAST bits */ 285 maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE); 286 287 /* Add all the main enable bits */ 288 maccr |= FTGMAC100_MACCR_TXDMA_EN | 289 FTGMAC100_MACCR_RXDMA_EN | 290 FTGMAC100_MACCR_TXMAC_EN | 291 FTGMAC100_MACCR_RXMAC_EN | 292 FTGMAC100_MACCR_CRC_APD | 293 FTGMAC100_MACCR_PHY_LINK_LEVEL | 294 FTGMAC100_MACCR_RX_RUNT | 295 FTGMAC100_MACCR_RX_BROADPKT; 296 297 /* Add other bits as needed */ 298 if (priv->cur_duplex == DUPLEX_FULL) 299 maccr |= FTGMAC100_MACCR_FULLDUP; 300 301 /* Hit the HW */ 302 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 303 } 304 305 static void ftgmac100_stop_hw(struct ftgmac100 *priv) 306 { 307 iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR); 308 } 309 310 static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry, 311 struct ftgmac100_rxdes *rxdes, gfp_t gfp) 312 { 313 struct net_device *netdev = priv->netdev; 314 struct sk_buff *skb; 315 dma_addr_t map; 316 int err; 317 318 skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); 319 if (unlikely(!skb)) { 320 if (net_ratelimit()) 321 netdev_warn(netdev, "failed to allocate rx skb\n"); 322 err = -ENOMEM; 323 map = priv->rx_scratch_dma; 324 } else { 325 map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE, 326 DMA_FROM_DEVICE); 327 if (unlikely(dma_mapping_error(priv->dev, map))) { 328 if (net_ratelimit()) 329 netdev_err(netdev, "failed to map rx page\n"); 330 dev_kfree_skb_any(skb); 331 map = priv->rx_scratch_dma; 332 skb = NULL; 333 err = -ENOMEM; 334 } 335 } 336 337 /* Store skb */ 338 priv->rx_skbs[entry] = skb; 339 340 /* Store DMA address into RX desc */ 341 rxdes->rxdes3 = cpu_to_le32(map); 342 343 /* Ensure the above is ordered vs clearing the OWN bit */ 344 dma_wmb(); 345 346 /* Clean status (which resets own bit) */ 347 if (entry == (priv->rx_q_entries - 1)) 348 rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask); 349 else 350 rxdes->rxdes0 = 0; 351 352 return 0; 353 } 354 355 static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv, 356 unsigned int pointer) 357 { 358 return (pointer + 1) & (priv->rx_q_entries - 1); 359 } 360 361 static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status) 362 { 363 struct net_device *netdev = priv->netdev; 364 365 if (status & FTGMAC100_RXDES0_RX_ERR) 366 netdev->stats.rx_errors++; 367 368 if (status & FTGMAC100_RXDES0_CRC_ERR) 369 netdev->stats.rx_crc_errors++; 370 371 if (status & (FTGMAC100_RXDES0_FTL | 372 FTGMAC100_RXDES0_RUNT | 373 FTGMAC100_RXDES0_RX_ODD_NB)) 374 netdev->stats.rx_length_errors++; 375 } 376 377 static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed) 378 { 379 struct net_device *netdev = priv->netdev; 380 struct ftgmac100_rxdes *rxdes; 381 struct sk_buff *skb; 382 unsigned int pointer, size; 383 u32 status, csum_vlan; 384 dma_addr_t map; 385 386 /* Grab next RX descriptor */ 387 pointer = priv->rx_pointer; 388 rxdes = &priv->rxdes[pointer]; 389 390 /* Grab descriptor status */ 391 status = le32_to_cpu(rxdes->rxdes0); 392 393 /* Do we have a packet ? */ 394 if (!(status & FTGMAC100_RXDES0_RXPKT_RDY)) 395 return false; 396 397 /* Order subsequent reads with the test for the ready bit */ 398 dma_rmb(); 399 400 /* We don't cope with fragmented RX packets */ 401 if (unlikely(!(status & FTGMAC100_RXDES0_FRS) || 402 !(status & FTGMAC100_RXDES0_LRS))) 403 goto drop; 404 405 /* Grab received size and csum vlan field in the descriptor */ 406 size = status & FTGMAC100_RXDES0_VDBC; 407 csum_vlan = le32_to_cpu(rxdes->rxdes1); 408 409 /* Any error (other than csum offload) flagged ? */ 410 if (unlikely(status & RXDES0_ANY_ERROR)) { 411 /* Correct for incorrect flagging of runt packets 412 * with vlan tags... Just accept a runt packet that 413 * has been flagged as vlan and whose size is at 414 * least 60 bytes. 415 */ 416 if ((status & FTGMAC100_RXDES0_RUNT) && 417 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) && 418 (size >= 60)) 419 status &= ~FTGMAC100_RXDES0_RUNT; 420 421 /* Any error still in there ? */ 422 if (status & RXDES0_ANY_ERROR) { 423 ftgmac100_rx_packet_error(priv, status); 424 goto drop; 425 } 426 } 427 428 /* If the packet had no skb (failed to allocate earlier) 429 * then try to allocate one and skip 430 */ 431 skb = priv->rx_skbs[pointer]; 432 if (!unlikely(skb)) { 433 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 434 goto drop; 435 } 436 437 if (unlikely(status & FTGMAC100_RXDES0_MULTICAST)) 438 netdev->stats.multicast++; 439 440 /* If the HW found checksum errors, bounce it to software. 441 * 442 * If we didn't, we need to see if the packet was recognized 443 * by HW as one of the supported checksummed protocols before 444 * we accept the HW test results. 445 */ 446 if (netdev->features & NETIF_F_RXCSUM) { 447 u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR | 448 FTGMAC100_RXDES1_UDP_CHKSUM_ERR | 449 FTGMAC100_RXDES1_IP_CHKSUM_ERR; 450 if ((csum_vlan & err_bits) || 451 !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK)) 452 skb->ip_summed = CHECKSUM_NONE; 453 else 454 skb->ip_summed = CHECKSUM_UNNECESSARY; 455 } 456 457 /* Transfer received size to skb */ 458 skb_put(skb, size); 459 460 /* Tear down DMA mapping, do necessary cache management */ 461 map = le32_to_cpu(rxdes->rxdes3); 462 463 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU) 464 /* When we don't have an iommu, we can save cycles by not 465 * invalidating the cache for the part of the packet that 466 * wasn't received. 467 */ 468 dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE); 469 #else 470 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 471 #endif 472 473 474 /* Resplenish rx ring */ 475 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 476 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); 477 478 skb->protocol = eth_type_trans(skb, netdev); 479 480 netdev->stats.rx_packets++; 481 netdev->stats.rx_bytes += size; 482 483 /* push packet to protocol stack */ 484 if (skb->ip_summed == CHECKSUM_NONE) 485 netif_receive_skb(skb); 486 else 487 napi_gro_receive(&priv->napi, skb); 488 489 (*processed)++; 490 return true; 491 492 drop: 493 /* Clean rxdes0 (which resets own bit) */ 494 rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask); 495 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); 496 netdev->stats.rx_dropped++; 497 return true; 498 } 499 500 static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv, 501 unsigned int index) 502 { 503 if (index == (priv->tx_q_entries - 1)) 504 return priv->txdes0_edotr_mask; 505 else 506 return 0; 507 } 508 509 static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv, 510 unsigned int pointer) 511 { 512 return (pointer + 1) & (priv->tx_q_entries - 1); 513 } 514 515 static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv) 516 { 517 /* Returns the number of available slots in the TX queue 518 * 519 * This always leaves one free slot so we don't have to 520 * worry about empty vs. full, and this simplifies the 521 * test for ftgmac100_tx_buf_cleanable() below 522 */ 523 return (priv->tx_clean_pointer - priv->tx_pointer - 1) & 524 (priv->tx_q_entries - 1); 525 } 526 527 static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv) 528 { 529 return priv->tx_pointer != priv->tx_clean_pointer; 530 } 531 532 static void ftgmac100_free_tx_packet(struct ftgmac100 *priv, 533 unsigned int pointer, 534 struct sk_buff *skb, 535 struct ftgmac100_txdes *txdes, 536 u32 ctl_stat) 537 { 538 dma_addr_t map = le32_to_cpu(txdes->txdes3); 539 size_t len; 540 541 if (ctl_stat & FTGMAC100_TXDES0_FTS) { 542 len = skb_headlen(skb); 543 dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE); 544 } else { 545 len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat); 546 dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE); 547 } 548 549 /* Free SKB on last segment */ 550 if (ctl_stat & FTGMAC100_TXDES0_LTS) 551 dev_kfree_skb(skb); 552 priv->tx_skbs[pointer] = NULL; 553 } 554 555 static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv) 556 { 557 struct net_device *netdev = priv->netdev; 558 struct ftgmac100_txdes *txdes; 559 struct sk_buff *skb; 560 unsigned int pointer; 561 u32 ctl_stat; 562 563 pointer = priv->tx_clean_pointer; 564 txdes = &priv->txdes[pointer]; 565 566 ctl_stat = le32_to_cpu(txdes->txdes0); 567 if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN) 568 return false; 569 570 skb = priv->tx_skbs[pointer]; 571 netdev->stats.tx_packets++; 572 netdev->stats.tx_bytes += skb->len; 573 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 574 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 575 576 priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer); 577 578 return true; 579 } 580 581 static void ftgmac100_tx_complete(struct ftgmac100 *priv) 582 { 583 struct net_device *netdev = priv->netdev; 584 585 /* Process all completed packets */ 586 while (ftgmac100_tx_buf_cleanable(priv) && 587 ftgmac100_tx_complete_packet(priv)) 588 ; 589 590 /* Restart queue if needed */ 591 smp_mb(); 592 if (unlikely(netif_queue_stopped(netdev) && 593 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) { 594 struct netdev_queue *txq; 595 596 txq = netdev_get_tx_queue(netdev, 0); 597 __netif_tx_lock(txq, smp_processor_id()); 598 if (netif_queue_stopped(netdev) && 599 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 600 netif_wake_queue(netdev); 601 __netif_tx_unlock(txq); 602 } 603 } 604 605 static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan) 606 { 607 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 608 u8 ip_proto = ip_hdr(skb)->protocol; 609 610 *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM; 611 switch(ip_proto) { 612 case IPPROTO_TCP: 613 *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM; 614 return true; 615 case IPPROTO_UDP: 616 *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM; 617 return true; 618 case IPPROTO_IP: 619 return true; 620 } 621 } 622 return skb_checksum_help(skb) == 0; 623 } 624 625 static int ftgmac100_hard_start_xmit(struct sk_buff *skb, 626 struct net_device *netdev) 627 { 628 struct ftgmac100 *priv = netdev_priv(netdev); 629 struct ftgmac100_txdes *txdes, *first; 630 unsigned int pointer, nfrags, len, i, j; 631 u32 f_ctl_stat, ctl_stat, csum_vlan; 632 dma_addr_t map; 633 634 /* The HW doesn't pad small frames */ 635 if (eth_skb_pad(skb)) { 636 netdev->stats.tx_dropped++; 637 return NETDEV_TX_OK; 638 } 639 640 /* Reject oversize packets */ 641 if (unlikely(skb->len > MAX_PKT_SIZE)) { 642 if (net_ratelimit()) 643 netdev_dbg(netdev, "tx packet too big\n"); 644 goto drop; 645 } 646 647 /* Do we have a limit on #fragments ? I yet have to get a reply 648 * from Aspeed. If there's one I haven't hit it. 649 */ 650 nfrags = skb_shinfo(skb)->nr_frags; 651 652 /* Get header len */ 653 len = skb_headlen(skb); 654 655 /* Map the packet head */ 656 map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); 657 if (dma_mapping_error(priv->dev, map)) { 658 if (net_ratelimit()) 659 netdev_err(netdev, "map tx packet head failed\n"); 660 goto drop; 661 } 662 663 /* Grab the next free tx descriptor */ 664 pointer = priv->tx_pointer; 665 txdes = first = &priv->txdes[pointer]; 666 667 /* Setup it up with the packet head. Don't write the head to the 668 * ring just yet 669 */ 670 priv->tx_skbs[pointer] = skb; 671 f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 672 f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 673 f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 674 f_ctl_stat |= FTGMAC100_TXDES0_FTS; 675 if (nfrags == 0) 676 f_ctl_stat |= FTGMAC100_TXDES0_LTS; 677 txdes->txdes3 = cpu_to_le32(map); 678 679 /* Setup HW checksumming */ 680 csum_vlan = 0; 681 if (skb->ip_summed == CHECKSUM_PARTIAL && 682 !ftgmac100_prep_tx_csum(skb, &csum_vlan)) 683 goto drop; 684 txdes->txdes1 = cpu_to_le32(csum_vlan); 685 686 /* Next descriptor */ 687 pointer = ftgmac100_next_tx_pointer(priv, pointer); 688 689 /* Add the fragments */ 690 for (i = 0; i < nfrags; i++) { 691 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 692 693 len = frag->size; 694 695 /* Map it */ 696 map = skb_frag_dma_map(priv->dev, frag, 0, len, 697 DMA_TO_DEVICE); 698 if (dma_mapping_error(priv->dev, map)) 699 goto dma_err; 700 701 /* Setup descriptor */ 702 priv->tx_skbs[pointer] = skb; 703 txdes = &priv->txdes[pointer]; 704 ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 705 ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 706 ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 707 if (i == (nfrags - 1)) 708 ctl_stat |= FTGMAC100_TXDES0_LTS; 709 txdes->txdes0 = cpu_to_le32(ctl_stat); 710 txdes->txdes1 = 0; 711 txdes->txdes3 = cpu_to_le32(map); 712 713 /* Next one */ 714 pointer = ftgmac100_next_tx_pointer(priv, pointer); 715 } 716 717 /* Order the previous packet and descriptor udpates 718 * before setting the OWN bit on the first descriptor. 719 */ 720 dma_wmb(); 721 first->txdes0 = cpu_to_le32(f_ctl_stat); 722 723 /* Update next TX pointer */ 724 priv->tx_pointer = pointer; 725 726 /* If there isn't enough room for all the fragments of a new packet 727 * in the TX ring, stop the queue. The sequence below is race free 728 * vs. a concurrent restart in ftgmac100_poll() 729 */ 730 if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) { 731 netif_stop_queue(netdev); 732 /* Order the queue stop with the test below */ 733 smp_mb(); 734 if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 735 netif_wake_queue(netdev); 736 } 737 738 /* Poke transmitter to read the updated TX descriptors */ 739 iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD); 740 741 return NETDEV_TX_OK; 742 743 dma_err: 744 if (net_ratelimit()) 745 netdev_err(netdev, "map tx fragment failed\n"); 746 747 /* Free head */ 748 pointer = priv->tx_pointer; 749 ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat); 750 first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask); 751 752 /* Then all fragments */ 753 for (j = 0; j < i; j++) { 754 pointer = ftgmac100_next_tx_pointer(priv, pointer); 755 txdes = &priv->txdes[pointer]; 756 ctl_stat = le32_to_cpu(txdes->txdes0); 757 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 758 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 759 } 760 761 /* This cannot be reached if we successfully mapped the 762 * last fragment, so we know ftgmac100_free_tx_packet() 763 * hasn't freed the skb yet. 764 */ 765 drop: 766 /* Drop the packet */ 767 dev_kfree_skb_any(skb); 768 netdev->stats.tx_dropped++; 769 770 return NETDEV_TX_OK; 771 } 772 773 static void ftgmac100_free_buffers(struct ftgmac100 *priv) 774 { 775 int i; 776 777 /* Free all RX buffers */ 778 for (i = 0; i < priv->rx_q_entries; i++) { 779 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; 780 struct sk_buff *skb = priv->rx_skbs[i]; 781 dma_addr_t map = le32_to_cpu(rxdes->rxdes3); 782 783 if (!skb) 784 continue; 785 786 priv->rx_skbs[i] = NULL; 787 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 788 dev_kfree_skb_any(skb); 789 } 790 791 /* Free all TX buffers */ 792 for (i = 0; i < priv->tx_q_entries; i++) { 793 struct ftgmac100_txdes *txdes = &priv->txdes[i]; 794 struct sk_buff *skb = priv->tx_skbs[i]; 795 796 if (!skb) 797 continue; 798 ftgmac100_free_tx_packet(priv, i, skb, txdes, 799 le32_to_cpu(txdes->txdes0)); 800 } 801 } 802 803 static void ftgmac100_free_rings(struct ftgmac100 *priv) 804 { 805 /* Free skb arrays */ 806 kfree(priv->rx_skbs); 807 kfree(priv->tx_skbs); 808 809 /* Free descriptors */ 810 if (priv->rxdes) 811 dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES * 812 sizeof(struct ftgmac100_rxdes), 813 priv->rxdes, priv->rxdes_dma); 814 priv->rxdes = NULL; 815 816 if (priv->txdes) 817 dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES * 818 sizeof(struct ftgmac100_txdes), 819 priv->txdes, priv->txdes_dma); 820 priv->txdes = NULL; 821 822 /* Free scratch packet buffer */ 823 if (priv->rx_scratch) 824 dma_free_coherent(priv->dev, RX_BUF_SIZE, 825 priv->rx_scratch, priv->rx_scratch_dma); 826 } 827 828 static int ftgmac100_alloc_rings(struct ftgmac100 *priv) 829 { 830 /* Allocate skb arrays */ 831 priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *), 832 GFP_KERNEL); 833 if (!priv->rx_skbs) 834 return -ENOMEM; 835 priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *), 836 GFP_KERNEL); 837 if (!priv->tx_skbs) 838 return -ENOMEM; 839 840 /* Allocate descriptors */ 841 priv->rxdes = dma_zalloc_coherent(priv->dev, 842 MAX_RX_QUEUE_ENTRIES * 843 sizeof(struct ftgmac100_rxdes), 844 &priv->rxdes_dma, GFP_KERNEL); 845 if (!priv->rxdes) 846 return -ENOMEM; 847 priv->txdes = dma_zalloc_coherent(priv->dev, 848 MAX_TX_QUEUE_ENTRIES * 849 sizeof(struct ftgmac100_txdes), 850 &priv->txdes_dma, GFP_KERNEL); 851 if (!priv->txdes) 852 return -ENOMEM; 853 854 /* Allocate scratch packet buffer */ 855 priv->rx_scratch = dma_alloc_coherent(priv->dev, 856 RX_BUF_SIZE, 857 &priv->rx_scratch_dma, 858 GFP_KERNEL); 859 if (!priv->rx_scratch) 860 return -ENOMEM; 861 862 return 0; 863 } 864 865 static void ftgmac100_init_rings(struct ftgmac100 *priv) 866 { 867 struct ftgmac100_rxdes *rxdes = NULL; 868 struct ftgmac100_txdes *txdes = NULL; 869 int i; 870 871 /* Update entries counts */ 872 priv->rx_q_entries = priv->new_rx_q_entries; 873 priv->tx_q_entries = priv->new_tx_q_entries; 874 875 if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES)) 876 return; 877 878 /* Initialize RX ring */ 879 for (i = 0; i < priv->rx_q_entries; i++) { 880 rxdes = &priv->rxdes[i]; 881 rxdes->rxdes0 = 0; 882 rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma); 883 } 884 /* Mark the end of the ring */ 885 rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask); 886 887 if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES)) 888 return; 889 890 /* Initialize TX ring */ 891 for (i = 0; i < priv->tx_q_entries; i++) { 892 txdes = &priv->txdes[i]; 893 txdes->txdes0 = 0; 894 } 895 txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask); 896 } 897 898 static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv) 899 { 900 int i; 901 902 for (i = 0; i < priv->rx_q_entries; i++) { 903 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; 904 905 if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL)) 906 return -ENOMEM; 907 } 908 return 0; 909 } 910 911 static void ftgmac100_adjust_link(struct net_device *netdev) 912 { 913 struct ftgmac100 *priv = netdev_priv(netdev); 914 struct phy_device *phydev = netdev->phydev; 915 int new_speed; 916 917 /* We store "no link" as speed 0 */ 918 if (!phydev->link) 919 new_speed = 0; 920 else 921 new_speed = phydev->speed; 922 923 if (phydev->speed == priv->cur_speed && 924 phydev->duplex == priv->cur_duplex) 925 return; 926 927 /* Print status if we have a link or we had one and just lost it, 928 * don't print otherwise. 929 */ 930 if (new_speed || priv->cur_speed) 931 phy_print_status(phydev); 932 933 priv->cur_speed = new_speed; 934 priv->cur_duplex = phydev->duplex; 935 936 /* Link is down, do nothing else */ 937 if (!new_speed) 938 return; 939 940 /* Disable all interrupts */ 941 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 942 943 /* Reset the adapter asynchronously */ 944 schedule_work(&priv->reset_task); 945 } 946 947 static int ftgmac100_mii_probe(struct ftgmac100 *priv) 948 { 949 struct net_device *netdev = priv->netdev; 950 struct phy_device *phydev; 951 952 phydev = phy_find_first(priv->mii_bus); 953 if (!phydev) { 954 netdev_info(netdev, "%s: no PHY found\n", netdev->name); 955 return -ENODEV; 956 } 957 958 phydev = phy_connect(netdev, phydev_name(phydev), 959 &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII); 960 961 if (IS_ERR(phydev)) { 962 netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); 963 return PTR_ERR(phydev); 964 } 965 966 return 0; 967 } 968 969 static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) 970 { 971 struct net_device *netdev = bus->priv; 972 struct ftgmac100 *priv = netdev_priv(netdev); 973 unsigned int phycr; 974 int i; 975 976 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 977 978 /* preserve MDC cycle threshold */ 979 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 980 981 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 982 FTGMAC100_PHYCR_REGAD(regnum) | 983 FTGMAC100_PHYCR_MIIRD; 984 985 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 986 987 for (i = 0; i < 10; i++) { 988 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 989 990 if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) { 991 int data; 992 993 data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA); 994 return FTGMAC100_PHYDATA_MIIRDATA(data); 995 } 996 997 udelay(100); 998 } 999 1000 netdev_err(netdev, "mdio read timed out\n"); 1001 return -EIO; 1002 } 1003 1004 static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr, 1005 int regnum, u16 value) 1006 { 1007 struct net_device *netdev = bus->priv; 1008 struct ftgmac100 *priv = netdev_priv(netdev); 1009 unsigned int phycr; 1010 int data; 1011 int i; 1012 1013 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1014 1015 /* preserve MDC cycle threshold */ 1016 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 1017 1018 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 1019 FTGMAC100_PHYCR_REGAD(regnum) | 1020 FTGMAC100_PHYCR_MIIWR; 1021 1022 data = FTGMAC100_PHYDATA_MIIWDATA(value); 1023 1024 iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA); 1025 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 1026 1027 for (i = 0; i < 10; i++) { 1028 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1029 1030 if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0) 1031 return 0; 1032 1033 udelay(100); 1034 } 1035 1036 netdev_err(netdev, "mdio write timed out\n"); 1037 return -EIO; 1038 } 1039 1040 static void ftgmac100_get_drvinfo(struct net_device *netdev, 1041 struct ethtool_drvinfo *info) 1042 { 1043 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1044 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1045 strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); 1046 } 1047 1048 static int ftgmac100_nway_reset(struct net_device *ndev) 1049 { 1050 if (!ndev->phydev) 1051 return -ENXIO; 1052 return phy_start_aneg(ndev->phydev); 1053 } 1054 1055 static void ftgmac100_get_ringparam(struct net_device *netdev, 1056 struct ethtool_ringparam *ering) 1057 { 1058 struct ftgmac100 *priv = netdev_priv(netdev); 1059 1060 memset(ering, 0, sizeof(*ering)); 1061 ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES; 1062 ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES; 1063 ering->rx_pending = priv->rx_q_entries; 1064 ering->tx_pending = priv->tx_q_entries; 1065 } 1066 1067 static int ftgmac100_set_ringparam(struct net_device *netdev, 1068 struct ethtool_ringparam *ering) 1069 { 1070 struct ftgmac100 *priv = netdev_priv(netdev); 1071 1072 if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES || 1073 ering->tx_pending > MAX_TX_QUEUE_ENTRIES || 1074 ering->rx_pending < MIN_RX_QUEUE_ENTRIES || 1075 ering->tx_pending < MIN_TX_QUEUE_ENTRIES || 1076 !is_power_of_2(ering->rx_pending) || 1077 !is_power_of_2(ering->tx_pending)) 1078 return -EINVAL; 1079 1080 priv->new_rx_q_entries = ering->rx_pending; 1081 priv->new_tx_q_entries = ering->tx_pending; 1082 if (netif_running(netdev)) 1083 schedule_work(&priv->reset_task); 1084 1085 return 0; 1086 } 1087 1088 static const struct ethtool_ops ftgmac100_ethtool_ops = { 1089 .get_drvinfo = ftgmac100_get_drvinfo, 1090 .get_link = ethtool_op_get_link, 1091 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1092 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1093 .get_ringparam = ftgmac100_get_ringparam, 1094 .set_ringparam = ftgmac100_set_ringparam, 1095 }; 1096 1097 static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id) 1098 { 1099 struct net_device *netdev = dev_id; 1100 struct ftgmac100 *priv = netdev_priv(netdev); 1101 unsigned int status, new_mask = FTGMAC100_INT_BAD; 1102 1103 /* Fetch and clear interrupt bits, process abnormal ones */ 1104 status = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 1105 iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR); 1106 if (unlikely(status & FTGMAC100_INT_BAD)) { 1107 1108 /* RX buffer unavailable */ 1109 if (status & FTGMAC100_INT_NO_RXBUF) 1110 netdev->stats.rx_over_errors++; 1111 1112 /* received packet lost due to RX FIFO full */ 1113 if (status & FTGMAC100_INT_RPKT_LOST) 1114 netdev->stats.rx_fifo_errors++; 1115 1116 /* sent packet lost due to excessive TX collision */ 1117 if (status & FTGMAC100_INT_XPKT_LOST) 1118 netdev->stats.tx_fifo_errors++; 1119 1120 /* AHB error -> Reset the chip */ 1121 if (status & FTGMAC100_INT_AHB_ERR) { 1122 if (net_ratelimit()) 1123 netdev_warn(netdev, 1124 "AHB bus error ! Resetting chip.\n"); 1125 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1126 schedule_work(&priv->reset_task); 1127 return IRQ_HANDLED; 1128 } 1129 1130 /* We may need to restart the MAC after such errors, delay 1131 * this until after we have freed some Rx buffers though 1132 */ 1133 priv->need_mac_restart = true; 1134 1135 /* Disable those errors until we restart */ 1136 new_mask &= ~status; 1137 } 1138 1139 /* Only enable "bad" interrupts while NAPI is on */ 1140 iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER); 1141 1142 /* Schedule NAPI bh */ 1143 napi_schedule_irqoff(&priv->napi); 1144 1145 return IRQ_HANDLED; 1146 } 1147 1148 static bool ftgmac100_check_rx(struct ftgmac100 *priv) 1149 { 1150 struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer]; 1151 1152 /* Do we have a packet ? */ 1153 return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY)); 1154 } 1155 1156 static int ftgmac100_poll(struct napi_struct *napi, int budget) 1157 { 1158 struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi); 1159 int work_done = 0; 1160 bool more; 1161 1162 /* Handle TX completions */ 1163 if (ftgmac100_tx_buf_cleanable(priv)) 1164 ftgmac100_tx_complete(priv); 1165 1166 /* Handle RX packets */ 1167 do { 1168 more = ftgmac100_rx_packet(priv, &work_done); 1169 } while (more && work_done < budget); 1170 1171 1172 /* The interrupt is telling us to kick the MAC back to life 1173 * after an RX overflow 1174 */ 1175 if (unlikely(priv->need_mac_restart)) { 1176 ftgmac100_start_hw(priv); 1177 1178 /* Re-enable "bad" interrupts */ 1179 iowrite32(FTGMAC100_INT_BAD, 1180 priv->base + FTGMAC100_OFFSET_IER); 1181 } 1182 1183 /* As long as we are waiting for transmit packets to be 1184 * completed we keep NAPI going 1185 */ 1186 if (ftgmac100_tx_buf_cleanable(priv)) 1187 work_done = budget; 1188 1189 if (work_done < budget) { 1190 /* We are about to re-enable all interrupts. However 1191 * the HW has been latching RX/TX packet interrupts while 1192 * they were masked. So we clear them first, then we need 1193 * to re-check if there's something to process 1194 */ 1195 iowrite32(FTGMAC100_INT_RXTX, 1196 priv->base + FTGMAC100_OFFSET_ISR); 1197 if (ftgmac100_check_rx(priv) || 1198 ftgmac100_tx_buf_cleanable(priv)) 1199 return budget; 1200 1201 /* deschedule NAPI */ 1202 napi_complete(napi); 1203 1204 /* enable all interrupts */ 1205 iowrite32(FTGMAC100_INT_ALL, 1206 priv->base + FTGMAC100_OFFSET_IER); 1207 } 1208 1209 return work_done; 1210 } 1211 1212 static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err) 1213 { 1214 int err = 0; 1215 1216 /* Re-init descriptors (adjust queue sizes) */ 1217 ftgmac100_init_rings(priv); 1218 1219 /* Realloc rx descriptors */ 1220 err = ftgmac100_alloc_rx_buffers(priv); 1221 if (err && !ignore_alloc_err) 1222 return err; 1223 1224 /* Reinit and restart HW */ 1225 ftgmac100_init_hw(priv); 1226 ftgmac100_start_hw(priv); 1227 1228 /* Re-enable the device */ 1229 napi_enable(&priv->napi); 1230 netif_start_queue(priv->netdev); 1231 1232 /* Enable all interrupts */ 1233 iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER); 1234 1235 return err; 1236 } 1237 1238 static void ftgmac100_reset_task(struct work_struct *work) 1239 { 1240 struct ftgmac100 *priv = container_of(work, struct ftgmac100, 1241 reset_task); 1242 struct net_device *netdev = priv->netdev; 1243 int err; 1244 1245 netdev_dbg(netdev, "Resetting NIC...\n"); 1246 1247 /* Lock the world */ 1248 rtnl_lock(); 1249 if (netdev->phydev) 1250 mutex_lock(&netdev->phydev->lock); 1251 if (priv->mii_bus) 1252 mutex_lock(&priv->mii_bus->mdio_lock); 1253 1254 1255 /* Check if the interface is still up */ 1256 if (!netif_running(netdev)) 1257 goto bail; 1258 1259 /* Stop the network stack */ 1260 netif_trans_update(netdev); 1261 napi_disable(&priv->napi); 1262 netif_tx_disable(netdev); 1263 1264 /* Stop and reset the MAC */ 1265 ftgmac100_stop_hw(priv); 1266 err = ftgmac100_reset_and_config_mac(priv); 1267 if (err) { 1268 /* Not much we can do ... it might come back... */ 1269 netdev_err(netdev, "attempting to continue...\n"); 1270 } 1271 1272 /* Free all rx and tx buffers */ 1273 ftgmac100_free_buffers(priv); 1274 1275 /* Setup everything again and restart chip */ 1276 ftgmac100_init_all(priv, true); 1277 1278 netdev_dbg(netdev, "Reset done !\n"); 1279 bail: 1280 if (priv->mii_bus) 1281 mutex_unlock(&priv->mii_bus->mdio_lock); 1282 if (netdev->phydev) 1283 mutex_unlock(&netdev->phydev->lock); 1284 rtnl_unlock(); 1285 } 1286 1287 static int ftgmac100_open(struct net_device *netdev) 1288 { 1289 struct ftgmac100 *priv = netdev_priv(netdev); 1290 int err; 1291 1292 /* Allocate ring buffers */ 1293 err = ftgmac100_alloc_rings(priv); 1294 if (err) { 1295 netdev_err(netdev, "Failed to allocate descriptors\n"); 1296 return err; 1297 } 1298 1299 /* When using NC-SI we force the speed to 100Mbit/s full duplex, 1300 * 1301 * Otherwise we leave it set to 0 (no link), the link 1302 * message from the PHY layer will handle setting it up to 1303 * something else if needed. 1304 */ 1305 if (priv->use_ncsi) { 1306 priv->cur_duplex = DUPLEX_FULL; 1307 priv->cur_speed = SPEED_100; 1308 } else { 1309 priv->cur_duplex = 0; 1310 priv->cur_speed = 0; 1311 } 1312 1313 /* Reset the hardware */ 1314 err = ftgmac100_reset_and_config_mac(priv); 1315 if (err) 1316 goto err_hw; 1317 1318 /* Initialize NAPI */ 1319 netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64); 1320 1321 /* Grab our interrupt */ 1322 err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev); 1323 if (err) { 1324 netdev_err(netdev, "failed to request irq %d\n", netdev->irq); 1325 goto err_irq; 1326 } 1327 1328 /* Start things up */ 1329 err = ftgmac100_init_all(priv, false); 1330 if (err) { 1331 netdev_err(netdev, "Failed to allocate packet buffers\n"); 1332 goto err_alloc; 1333 } 1334 1335 if (netdev->phydev) { 1336 /* If we have a PHY, start polling */ 1337 phy_start(netdev->phydev); 1338 } else if (priv->use_ncsi) { 1339 /* If using NC-SI, set our carrier on and start the stack */ 1340 netif_carrier_on(netdev); 1341 1342 /* Start the NCSI device */ 1343 err = ncsi_start_dev(priv->ndev); 1344 if (err) 1345 goto err_ncsi; 1346 } 1347 1348 return 0; 1349 1350 err_ncsi: 1351 napi_disable(&priv->napi); 1352 netif_stop_queue(netdev); 1353 err_alloc: 1354 ftgmac100_free_buffers(priv); 1355 free_irq(netdev->irq, netdev); 1356 err_irq: 1357 netif_napi_del(&priv->napi); 1358 err_hw: 1359 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1360 ftgmac100_free_rings(priv); 1361 return err; 1362 } 1363 1364 static int ftgmac100_stop(struct net_device *netdev) 1365 { 1366 struct ftgmac100 *priv = netdev_priv(netdev); 1367 1368 /* Note about the reset task: We are called with the rtnl lock 1369 * held, so we are synchronized against the core of the reset 1370 * task. We must not try to synchronously cancel it otherwise 1371 * we can deadlock. But since it will test for netif_running() 1372 * which has already been cleared by the net core, we don't 1373 * anything special to do. 1374 */ 1375 1376 /* disable all interrupts */ 1377 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1378 1379 netif_stop_queue(netdev); 1380 napi_disable(&priv->napi); 1381 netif_napi_del(&priv->napi); 1382 if (netdev->phydev) 1383 phy_stop(netdev->phydev); 1384 else if (priv->use_ncsi) 1385 ncsi_stop_dev(priv->ndev); 1386 1387 ftgmac100_stop_hw(priv); 1388 free_irq(netdev->irq, netdev); 1389 ftgmac100_free_buffers(priv); 1390 ftgmac100_free_rings(priv); 1391 1392 return 0; 1393 } 1394 1395 /* optional */ 1396 static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1397 { 1398 if (!netdev->phydev) 1399 return -ENXIO; 1400 1401 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 1402 } 1403 1404 static void ftgmac100_tx_timeout(struct net_device *netdev) 1405 { 1406 struct ftgmac100 *priv = netdev_priv(netdev); 1407 1408 /* Disable all interrupts */ 1409 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1410 1411 /* Do the reset outside of interrupt context */ 1412 schedule_work(&priv->reset_task); 1413 } 1414 1415 static const struct net_device_ops ftgmac100_netdev_ops = { 1416 .ndo_open = ftgmac100_open, 1417 .ndo_stop = ftgmac100_stop, 1418 .ndo_start_xmit = ftgmac100_hard_start_xmit, 1419 .ndo_set_mac_address = ftgmac100_set_mac_addr, 1420 .ndo_validate_addr = eth_validate_addr, 1421 .ndo_do_ioctl = ftgmac100_do_ioctl, 1422 .ndo_tx_timeout = ftgmac100_tx_timeout, 1423 }; 1424 1425 static int ftgmac100_setup_mdio(struct net_device *netdev) 1426 { 1427 struct ftgmac100 *priv = netdev_priv(netdev); 1428 struct platform_device *pdev = to_platform_device(priv->dev); 1429 int i, err = 0; 1430 u32 reg; 1431 1432 /* initialize mdio bus */ 1433 priv->mii_bus = mdiobus_alloc(); 1434 if (!priv->mii_bus) 1435 return -EIO; 1436 1437 if (priv->is_aspeed) { 1438 /* This driver supports the old MDIO interface */ 1439 reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR); 1440 reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE; 1441 iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR); 1442 }; 1443 1444 priv->mii_bus->name = "ftgmac100_mdio"; 1445 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", 1446 pdev->name, pdev->id); 1447 priv->mii_bus->priv = priv->netdev; 1448 priv->mii_bus->read = ftgmac100_mdiobus_read; 1449 priv->mii_bus->write = ftgmac100_mdiobus_write; 1450 1451 for (i = 0; i < PHY_MAX_ADDR; i++) 1452 priv->mii_bus->irq[i] = PHY_POLL; 1453 1454 err = mdiobus_register(priv->mii_bus); 1455 if (err) { 1456 dev_err(priv->dev, "Cannot register MDIO bus!\n"); 1457 goto err_register_mdiobus; 1458 } 1459 1460 err = ftgmac100_mii_probe(priv); 1461 if (err) { 1462 dev_err(priv->dev, "MII Probe failed!\n"); 1463 goto err_mii_probe; 1464 } 1465 1466 return 0; 1467 1468 err_mii_probe: 1469 mdiobus_unregister(priv->mii_bus); 1470 err_register_mdiobus: 1471 mdiobus_free(priv->mii_bus); 1472 return err; 1473 } 1474 1475 static void ftgmac100_destroy_mdio(struct net_device *netdev) 1476 { 1477 struct ftgmac100 *priv = netdev_priv(netdev); 1478 1479 if (!netdev->phydev) 1480 return; 1481 1482 phy_disconnect(netdev->phydev); 1483 mdiobus_unregister(priv->mii_bus); 1484 mdiobus_free(priv->mii_bus); 1485 } 1486 1487 static void ftgmac100_ncsi_handler(struct ncsi_dev *nd) 1488 { 1489 if (unlikely(nd->state != ncsi_dev_state_functional)) 1490 return; 1491 1492 netdev_info(nd->dev, "NCSI interface %s\n", 1493 nd->link_up ? "up" : "down"); 1494 } 1495 1496 static int ftgmac100_probe(struct platform_device *pdev) 1497 { 1498 struct resource *res; 1499 int irq; 1500 struct net_device *netdev; 1501 struct ftgmac100 *priv; 1502 struct device_node *np; 1503 int err = 0; 1504 1505 if (!pdev) 1506 return -ENODEV; 1507 1508 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1509 if (!res) 1510 return -ENXIO; 1511 1512 irq = platform_get_irq(pdev, 0); 1513 if (irq < 0) 1514 return irq; 1515 1516 /* setup net_device */ 1517 netdev = alloc_etherdev(sizeof(*priv)); 1518 if (!netdev) { 1519 err = -ENOMEM; 1520 goto err_alloc_etherdev; 1521 } 1522 1523 SET_NETDEV_DEV(netdev, &pdev->dev); 1524 1525 netdev->ethtool_ops = &ftgmac100_ethtool_ops; 1526 netdev->netdev_ops = &ftgmac100_netdev_ops; 1527 netdev->watchdog_timeo = 5 * HZ; 1528 1529 platform_set_drvdata(pdev, netdev); 1530 1531 /* setup private data */ 1532 priv = netdev_priv(netdev); 1533 priv->netdev = netdev; 1534 priv->dev = &pdev->dev; 1535 INIT_WORK(&priv->reset_task, ftgmac100_reset_task); 1536 1537 /* map io memory */ 1538 priv->res = request_mem_region(res->start, resource_size(res), 1539 dev_name(&pdev->dev)); 1540 if (!priv->res) { 1541 dev_err(&pdev->dev, "Could not reserve memory region\n"); 1542 err = -ENOMEM; 1543 goto err_req_mem; 1544 } 1545 1546 priv->base = ioremap(res->start, resource_size(res)); 1547 if (!priv->base) { 1548 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 1549 err = -EIO; 1550 goto err_ioremap; 1551 } 1552 1553 netdev->irq = irq; 1554 1555 /* MAC address from chip or random one */ 1556 ftgmac100_initial_mac(priv); 1557 1558 np = pdev->dev.of_node; 1559 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") || 1560 of_device_is_compatible(np, "aspeed,ast2500-mac"))) { 1561 priv->rxdes0_edorr_mask = BIT(30); 1562 priv->txdes0_edotr_mask = BIT(30); 1563 priv->is_aspeed = true; 1564 } else { 1565 priv->rxdes0_edorr_mask = BIT(15); 1566 priv->txdes0_edotr_mask = BIT(15); 1567 } 1568 1569 if (np && of_get_property(np, "use-ncsi", NULL)) { 1570 if (!IS_ENABLED(CONFIG_NET_NCSI)) { 1571 dev_err(&pdev->dev, "NCSI stack not enabled\n"); 1572 goto err_ncsi_dev; 1573 } 1574 1575 dev_info(&pdev->dev, "Using NCSI interface\n"); 1576 priv->use_ncsi = true; 1577 priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler); 1578 if (!priv->ndev) 1579 goto err_ncsi_dev; 1580 } else { 1581 priv->use_ncsi = false; 1582 err = ftgmac100_setup_mdio(netdev); 1583 if (err) 1584 goto err_setup_mdio; 1585 } 1586 1587 /* Default ring sizes */ 1588 priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES; 1589 priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES; 1590 1591 /* Base feature set */ 1592 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | 1593 NETIF_F_GRO | NETIF_F_SG; 1594 1595 /* AST2400 doesn't have working HW checksum generation */ 1596 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac"))) 1597 netdev->hw_features &= ~NETIF_F_HW_CSUM; 1598 if (np && of_get_property(np, "no-hw-checksum", NULL)) 1599 netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); 1600 netdev->features |= netdev->hw_features; 1601 1602 /* register network device */ 1603 err = register_netdev(netdev); 1604 if (err) { 1605 dev_err(&pdev->dev, "Failed to register netdev\n"); 1606 goto err_register_netdev; 1607 } 1608 1609 netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base); 1610 1611 return 0; 1612 1613 err_ncsi_dev: 1614 err_register_netdev: 1615 ftgmac100_destroy_mdio(netdev); 1616 err_setup_mdio: 1617 iounmap(priv->base); 1618 err_ioremap: 1619 release_resource(priv->res); 1620 err_req_mem: 1621 netif_napi_del(&priv->napi); 1622 free_netdev(netdev); 1623 err_alloc_etherdev: 1624 return err; 1625 } 1626 1627 static int ftgmac100_remove(struct platform_device *pdev) 1628 { 1629 struct net_device *netdev; 1630 struct ftgmac100 *priv; 1631 1632 netdev = platform_get_drvdata(pdev); 1633 priv = netdev_priv(netdev); 1634 1635 unregister_netdev(netdev); 1636 1637 /* There's a small chance the reset task will have been re-queued, 1638 * during stop, make sure it's gone before we free the structure. 1639 */ 1640 cancel_work_sync(&priv->reset_task); 1641 1642 ftgmac100_destroy_mdio(netdev); 1643 1644 iounmap(priv->base); 1645 release_resource(priv->res); 1646 1647 netif_napi_del(&priv->napi); 1648 free_netdev(netdev); 1649 return 0; 1650 } 1651 1652 static const struct of_device_id ftgmac100_of_match[] = { 1653 { .compatible = "faraday,ftgmac100" }, 1654 { } 1655 }; 1656 MODULE_DEVICE_TABLE(of, ftgmac100_of_match); 1657 1658 static struct platform_driver ftgmac100_driver = { 1659 .probe = ftgmac100_probe, 1660 .remove = ftgmac100_remove, 1661 .driver = { 1662 .name = DRV_NAME, 1663 .of_match_table = ftgmac100_of_match, 1664 }, 1665 }; 1666 module_platform_driver(ftgmac100_driver); 1667 1668 MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1669 MODULE_DESCRIPTION("FTGMAC100 driver"); 1670 MODULE_LICENSE("GPL"); 1671