1 /* 2 * Faraday FTGMAC100 Gigabit Ethernet 3 * 4 * (C) Copyright 2009-2011 Faraday Technology 5 * Po-Yu Chuang <ratbert@faraday-tech.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 */ 21 22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 24 #include <linux/dma-mapping.h> 25 #include <linux/etherdevice.h> 26 #include <linux/ethtool.h> 27 #include <linux/interrupt.h> 28 #include <linux/io.h> 29 #include <linux/module.h> 30 #include <linux/netdevice.h> 31 #include <linux/of.h> 32 #include <linux/phy.h> 33 #include <linux/platform_device.h> 34 #include <linux/property.h> 35 #include <net/ip.h> 36 #include <net/ncsi.h> 37 38 #include "ftgmac100.h" 39 40 #define DRV_NAME "ftgmac100" 41 #define DRV_VERSION "0.7" 42 43 #define RX_QUEUE_ENTRIES 256 /* must be power of 2 */ 44 #define TX_QUEUE_ENTRIES 512 /* must be power of 2 */ 45 46 #define MAX_PKT_SIZE 1536 47 #define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */ 48 49 /* Min number of tx ring entries before stopping queue */ 50 #define TX_THRESHOLD (1) 51 52 struct ftgmac100_descs { 53 struct ftgmac100_rxdes rxdes[RX_QUEUE_ENTRIES]; 54 struct ftgmac100_txdes txdes[TX_QUEUE_ENTRIES]; 55 }; 56 57 struct ftgmac100 { 58 /* Registers */ 59 struct resource *res; 60 void __iomem *base; 61 62 struct ftgmac100_descs *descs; 63 dma_addr_t descs_dma_addr; 64 65 /* Rx ring */ 66 struct sk_buff *rx_skbs[RX_QUEUE_ENTRIES]; 67 unsigned int rx_pointer; 68 u32 rxdes0_edorr_mask; 69 70 /* Tx ring */ 71 struct sk_buff *tx_skbs[TX_QUEUE_ENTRIES]; 72 unsigned int tx_clean_pointer; 73 unsigned int tx_pointer; 74 u32 txdes0_edotr_mask; 75 76 /* Scratch page to use when rx skb alloc fails */ 77 void *rx_scratch; 78 dma_addr_t rx_scratch_dma; 79 80 /* Component structures */ 81 struct net_device *netdev; 82 struct device *dev; 83 struct ncsi_dev *ndev; 84 struct napi_struct napi; 85 struct work_struct reset_task; 86 struct mii_bus *mii_bus; 87 88 /* Link management */ 89 int cur_speed; 90 int cur_duplex; 91 bool use_ncsi; 92 93 /* Misc */ 94 bool need_mac_restart; 95 }; 96 97 static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr) 98 { 99 iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR); 100 } 101 102 static void ftgmac100_set_rx_buffer_size(struct ftgmac100 *priv, 103 unsigned int size) 104 { 105 size = FTGMAC100_RBSR_SIZE(size); 106 iowrite32(size, priv->base + FTGMAC100_OFFSET_RBSR); 107 } 108 109 static void ftgmac100_set_normal_prio_tx_ring_base(struct ftgmac100 *priv, 110 dma_addr_t addr) 111 { 112 iowrite32(addr, priv->base + FTGMAC100_OFFSET_NPTXR_BADR); 113 } 114 115 static void ftgmac100_txdma_normal_prio_start_polling(struct ftgmac100 *priv) 116 { 117 iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD); 118 } 119 120 static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr) 121 { 122 struct net_device *netdev = priv->netdev; 123 int i; 124 125 /* NOTE: reset clears all registers */ 126 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 127 iowrite32(maccr | FTGMAC100_MACCR_SW_RST, 128 priv->base + FTGMAC100_OFFSET_MACCR); 129 for (i = 0; i < 50; i++) { 130 unsigned int maccr; 131 132 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 133 if (!(maccr & FTGMAC100_MACCR_SW_RST)) 134 return 0; 135 136 udelay(1); 137 } 138 139 netdev_err(netdev, "Hardware reset failed\n"); 140 return -EIO; 141 } 142 143 static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv) 144 { 145 u32 maccr = 0; 146 147 switch (priv->cur_speed) { 148 case SPEED_10: 149 case 0: /* no link */ 150 break; 151 152 case SPEED_100: 153 maccr |= FTGMAC100_MACCR_FAST_MODE; 154 break; 155 156 case SPEED_1000: 157 maccr |= FTGMAC100_MACCR_GIGA_MODE; 158 break; 159 default: 160 netdev_err(priv->netdev, "Unknown speed %d !\n", 161 priv->cur_speed); 162 break; 163 } 164 165 /* (Re)initialize the queue pointers */ 166 priv->rx_pointer = 0; 167 priv->tx_clean_pointer = 0; 168 priv->tx_pointer = 0; 169 170 /* The doc says reset twice with 10us interval */ 171 if (ftgmac100_reset_mac(priv, maccr)) 172 return -EIO; 173 usleep_range(10, 1000); 174 return ftgmac100_reset_mac(priv, maccr); 175 } 176 177 static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac) 178 { 179 unsigned int maddr = mac[0] << 8 | mac[1]; 180 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 181 182 iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR); 183 iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR); 184 } 185 186 static void ftgmac100_setup_mac(struct ftgmac100 *priv) 187 { 188 u8 mac[ETH_ALEN]; 189 unsigned int m; 190 unsigned int l; 191 void *addr; 192 193 addr = device_get_mac_address(priv->dev, mac, ETH_ALEN); 194 if (addr) { 195 ether_addr_copy(priv->netdev->dev_addr, mac); 196 dev_info(priv->dev, "Read MAC address %pM from device tree\n", 197 mac); 198 return; 199 } 200 201 m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR); 202 l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR); 203 204 mac[0] = (m >> 8) & 0xff; 205 mac[1] = m & 0xff; 206 mac[2] = (l >> 24) & 0xff; 207 mac[3] = (l >> 16) & 0xff; 208 mac[4] = (l >> 8) & 0xff; 209 mac[5] = l & 0xff; 210 211 if (is_valid_ether_addr(mac)) { 212 ether_addr_copy(priv->netdev->dev_addr, mac); 213 dev_info(priv->dev, "Read MAC address %pM from chip\n", mac); 214 } else { 215 eth_hw_addr_random(priv->netdev); 216 dev_info(priv->dev, "Generated random MAC address %pM\n", 217 priv->netdev->dev_addr); 218 } 219 } 220 221 static int ftgmac100_set_mac_addr(struct net_device *dev, void *p) 222 { 223 int ret; 224 225 ret = eth_prepare_mac_addr_change(dev, p); 226 if (ret < 0) 227 return ret; 228 229 eth_commit_mac_addr_change(dev, p); 230 ftgmac100_set_mac(netdev_priv(dev), dev->dev_addr); 231 232 return 0; 233 } 234 235 static void ftgmac100_init_hw(struct ftgmac100 *priv) 236 { 237 /* setup ring buffer base registers */ 238 ftgmac100_set_rx_ring_base(priv, 239 priv->descs_dma_addr + 240 offsetof(struct ftgmac100_descs, rxdes)); 241 ftgmac100_set_normal_prio_tx_ring_base(priv, 242 priv->descs_dma_addr + 243 offsetof(struct ftgmac100_descs, txdes)); 244 245 ftgmac100_set_rx_buffer_size(priv, RX_BUF_SIZE); 246 247 iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), priv->base + FTGMAC100_OFFSET_APTC); 248 249 ftgmac100_set_mac(priv, priv->netdev->dev_addr); 250 } 251 252 static void ftgmac100_start_hw(struct ftgmac100 *priv) 253 { 254 u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 255 256 /* Keep the original GMAC and FAST bits */ 257 maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE); 258 259 /* Add all the main enable bits */ 260 maccr |= FTGMAC100_MACCR_TXDMA_EN | 261 FTGMAC100_MACCR_RXDMA_EN | 262 FTGMAC100_MACCR_TXMAC_EN | 263 FTGMAC100_MACCR_RXMAC_EN | 264 FTGMAC100_MACCR_CRC_APD | 265 FTGMAC100_MACCR_PHY_LINK_LEVEL | 266 FTGMAC100_MACCR_RX_RUNT | 267 FTGMAC100_MACCR_RX_BROADPKT; 268 269 /* Add other bits as needed */ 270 if (priv->cur_duplex == DUPLEX_FULL) 271 maccr |= FTGMAC100_MACCR_FULLDUP; 272 273 /* Hit the HW */ 274 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 275 } 276 277 static void ftgmac100_stop_hw(struct ftgmac100 *priv) 278 { 279 iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR); 280 } 281 282 static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry, 283 struct ftgmac100_rxdes *rxdes, gfp_t gfp) 284 { 285 struct net_device *netdev = priv->netdev; 286 struct sk_buff *skb; 287 dma_addr_t map; 288 int err; 289 290 skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); 291 if (unlikely(!skb)) { 292 if (net_ratelimit()) 293 netdev_warn(netdev, "failed to allocate rx skb\n"); 294 err = -ENOMEM; 295 map = priv->rx_scratch_dma; 296 } else { 297 map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE, 298 DMA_FROM_DEVICE); 299 if (unlikely(dma_mapping_error(priv->dev, map))) { 300 if (net_ratelimit()) 301 netdev_err(netdev, "failed to map rx page\n"); 302 dev_kfree_skb_any(skb); 303 map = priv->rx_scratch_dma; 304 skb = NULL; 305 err = -ENOMEM; 306 } 307 } 308 309 /* Store skb */ 310 priv->rx_skbs[entry] = skb; 311 312 /* Store DMA address into RX desc */ 313 rxdes->rxdes3 = cpu_to_le32(map); 314 315 /* Ensure the above is ordered vs clearing the OWN bit */ 316 dma_wmb(); 317 318 /* Clean status (which resets own bit) */ 319 if (entry == (RX_QUEUE_ENTRIES - 1)) 320 rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask); 321 else 322 rxdes->rxdes0 = 0; 323 324 return 0; 325 } 326 327 static int ftgmac100_next_rx_pointer(int pointer) 328 { 329 return (pointer + 1) & (RX_QUEUE_ENTRIES - 1); 330 } 331 332 static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status) 333 { 334 struct net_device *netdev = priv->netdev; 335 336 if (status & FTGMAC100_RXDES0_RX_ERR) 337 netdev->stats.rx_errors++; 338 339 if (status & FTGMAC100_RXDES0_CRC_ERR) 340 netdev->stats.rx_crc_errors++; 341 342 if (status & (FTGMAC100_RXDES0_FTL | 343 FTGMAC100_RXDES0_RUNT | 344 FTGMAC100_RXDES0_RX_ODD_NB)) 345 netdev->stats.rx_length_errors++; 346 } 347 348 static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed) 349 { 350 struct net_device *netdev = priv->netdev; 351 struct ftgmac100_rxdes *rxdes; 352 struct sk_buff *skb; 353 unsigned int pointer, size; 354 u32 status, csum_vlan; 355 dma_addr_t map; 356 357 /* Grab next RX descriptor */ 358 pointer = priv->rx_pointer; 359 rxdes = &priv->descs->rxdes[pointer]; 360 361 /* Grab descriptor status */ 362 status = le32_to_cpu(rxdes->rxdes0); 363 364 /* Do we have a packet ? */ 365 if (!(status & FTGMAC100_RXDES0_RXPKT_RDY)) 366 return false; 367 368 /* Order subsequent reads with the test for the ready bit */ 369 dma_rmb(); 370 371 /* We don't cope with fragmented RX packets */ 372 if (unlikely(!(status & FTGMAC100_RXDES0_FRS) || 373 !(status & FTGMAC100_RXDES0_LRS))) 374 goto drop; 375 376 /* Grab received size and csum vlan field in the descriptor */ 377 size = status & FTGMAC100_RXDES0_VDBC; 378 csum_vlan = le32_to_cpu(rxdes->rxdes1); 379 380 /* Any error (other than csum offload) flagged ? */ 381 if (unlikely(status & RXDES0_ANY_ERROR)) { 382 /* Correct for incorrect flagging of runt packets 383 * with vlan tags... Just accept a runt packet that 384 * has been flagged as vlan and whose size is at 385 * least 60 bytes. 386 */ 387 if ((status & FTGMAC100_RXDES0_RUNT) && 388 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) && 389 (size >= 60)) 390 status &= ~FTGMAC100_RXDES0_RUNT; 391 392 /* Any error still in there ? */ 393 if (status & RXDES0_ANY_ERROR) { 394 ftgmac100_rx_packet_error(priv, status); 395 goto drop; 396 } 397 } 398 399 /* If the packet had no skb (failed to allocate earlier) 400 * then try to allocate one and skip 401 */ 402 skb = priv->rx_skbs[pointer]; 403 if (!unlikely(skb)) { 404 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 405 goto drop; 406 } 407 408 if (unlikely(status & FTGMAC100_RXDES0_MULTICAST)) 409 netdev->stats.multicast++; 410 411 /* If the HW found checksum errors, bounce it to software. 412 * 413 * If we didn't, we need to see if the packet was recognized 414 * by HW as one of the supported checksummed protocols before 415 * we accept the HW test results. 416 */ 417 if (netdev->features & NETIF_F_RXCSUM) { 418 u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR | 419 FTGMAC100_RXDES1_UDP_CHKSUM_ERR | 420 FTGMAC100_RXDES1_IP_CHKSUM_ERR; 421 if ((csum_vlan & err_bits) || 422 !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK)) 423 skb->ip_summed = CHECKSUM_NONE; 424 else 425 skb->ip_summed = CHECKSUM_UNNECESSARY; 426 } 427 428 /* Transfer received size to skb */ 429 skb_put(skb, size); 430 431 /* Tear down DMA mapping, do necessary cache management */ 432 map = le32_to_cpu(rxdes->rxdes3); 433 434 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU) 435 /* When we don't have an iommu, we can save cycles by not 436 * invalidating the cache for the part of the packet that 437 * wasn't received. 438 */ 439 dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE); 440 #else 441 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 442 #endif 443 444 445 /* Resplenish rx ring */ 446 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 447 priv->rx_pointer = ftgmac100_next_rx_pointer(pointer); 448 449 skb->protocol = eth_type_trans(skb, netdev); 450 451 netdev->stats.rx_packets++; 452 netdev->stats.rx_bytes += size; 453 454 /* push packet to protocol stack */ 455 if (skb->ip_summed == CHECKSUM_NONE) 456 netif_receive_skb(skb); 457 else 458 napi_gro_receive(&priv->napi, skb); 459 460 (*processed)++; 461 return true; 462 463 drop: 464 /* Clean rxdes0 (which resets own bit) */ 465 rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask); 466 priv->rx_pointer = ftgmac100_next_rx_pointer(pointer); 467 netdev->stats.rx_dropped++; 468 return true; 469 } 470 471 static void ftgmac100_txdes_reset(const struct ftgmac100 *priv, 472 struct ftgmac100_txdes *txdes) 473 { 474 /* clear all except end of ring bit */ 475 txdes->txdes0 &= cpu_to_le32(priv->txdes0_edotr_mask); 476 txdes->txdes1 = 0; 477 txdes->txdes2 = 0; 478 txdes->txdes3 = 0; 479 } 480 481 static bool ftgmac100_txdes_owned_by_dma(struct ftgmac100_txdes *txdes) 482 { 483 return txdes->txdes0 & cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN); 484 } 485 486 static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes) 487 { 488 txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN); 489 } 490 491 static void ftgmac100_txdes_set_end_of_ring(const struct ftgmac100 *priv, 492 struct ftgmac100_txdes *txdes) 493 { 494 txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask); 495 } 496 497 static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes) 498 { 499 txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_FTS); 500 } 501 502 static void ftgmac100_txdes_set_last_segment(struct ftgmac100_txdes *txdes) 503 { 504 txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_LTS); 505 } 506 507 static void ftgmac100_txdes_set_buffer_size(struct ftgmac100_txdes *txdes, 508 unsigned int len) 509 { 510 txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXBUF_SIZE(len)); 511 } 512 513 static void ftgmac100_txdes_set_txint(struct ftgmac100_txdes *txdes) 514 { 515 txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TXIC); 516 } 517 518 static void ftgmac100_txdes_set_tcpcs(struct ftgmac100_txdes *txdes) 519 { 520 txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TCP_CHKSUM); 521 } 522 523 static void ftgmac100_txdes_set_udpcs(struct ftgmac100_txdes *txdes) 524 { 525 txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_UDP_CHKSUM); 526 } 527 528 static void ftgmac100_txdes_set_ipcs(struct ftgmac100_txdes *txdes) 529 { 530 txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_IP_CHKSUM); 531 } 532 533 static void ftgmac100_txdes_set_dma_addr(struct ftgmac100_txdes *txdes, 534 dma_addr_t addr) 535 { 536 txdes->txdes3 = cpu_to_le32(addr); 537 } 538 539 static dma_addr_t ftgmac100_txdes_get_dma_addr(struct ftgmac100_txdes *txdes) 540 { 541 return le32_to_cpu(txdes->txdes3); 542 } 543 544 static int ftgmac100_next_tx_pointer(int pointer) 545 { 546 return (pointer + 1) & (TX_QUEUE_ENTRIES - 1); 547 } 548 549 static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv) 550 { 551 /* Returns the number of available slots in the TX queue 552 * 553 * This always leaves one free slot so we don't have to 554 * worry about empty vs. full, and this simplifies the 555 * test for ftgmac100_tx_buf_cleanable() below 556 */ 557 return (priv->tx_clean_pointer - priv->tx_pointer - 1) & 558 (TX_QUEUE_ENTRIES - 1); 559 } 560 561 static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv) 562 { 563 return priv->tx_pointer != priv->tx_clean_pointer; 564 } 565 566 static void ftgmac100_free_tx_packet(struct ftgmac100 *priv, 567 unsigned int pointer, 568 struct sk_buff *skb, 569 struct ftgmac100_txdes *txdes) 570 { 571 dma_addr_t map; 572 573 map = ftgmac100_txdes_get_dma_addr(txdes); 574 575 dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); 576 577 dev_kfree_skb(skb); 578 priv->tx_skbs[pointer] = NULL; 579 580 ftgmac100_txdes_reset(priv, txdes); 581 } 582 583 static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv) 584 { 585 struct net_device *netdev = priv->netdev; 586 struct ftgmac100_txdes *txdes; 587 struct sk_buff *skb; 588 unsigned int pointer; 589 590 pointer = priv->tx_clean_pointer; 591 txdes = &priv->descs->txdes[pointer]; 592 593 if (ftgmac100_txdes_owned_by_dma(txdes)) 594 return false; 595 596 skb = priv->tx_skbs[pointer]; 597 netdev->stats.tx_packets++; 598 netdev->stats.tx_bytes += skb->len; 599 ftgmac100_free_tx_packet(priv, pointer, skb, txdes); 600 601 priv->tx_clean_pointer = ftgmac100_next_tx_pointer(pointer); 602 603 return true; 604 } 605 606 static void ftgmac100_tx_complete(struct ftgmac100 *priv) 607 { 608 struct net_device *netdev = priv->netdev; 609 610 /* Process all completed packets */ 611 while (ftgmac100_tx_buf_cleanable(priv) && 612 ftgmac100_tx_complete_packet(priv)) 613 ; 614 615 /* Restart queue if needed */ 616 smp_mb(); 617 if (unlikely(netif_queue_stopped(netdev) && 618 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) { 619 struct netdev_queue *txq; 620 621 txq = netdev_get_tx_queue(netdev, 0); 622 __netif_tx_lock(txq, smp_processor_id()); 623 if (netif_queue_stopped(netdev) && 624 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 625 netif_wake_queue(netdev); 626 __netif_tx_unlock(txq); 627 } 628 } 629 630 static int ftgmac100_hard_start_xmit(struct sk_buff *skb, 631 struct net_device *netdev) 632 { 633 struct ftgmac100 *priv = netdev_priv(netdev); 634 struct ftgmac100_txdes *txdes; 635 unsigned int pointer; 636 dma_addr_t map; 637 638 /* The HW doesn't pad small frames */ 639 if (eth_skb_pad(skb)) { 640 netdev->stats.tx_dropped++; 641 return NETDEV_TX_OK; 642 } 643 644 /* Reject oversize packets */ 645 if (unlikely(skb->len > MAX_PKT_SIZE)) { 646 if (net_ratelimit()) 647 netdev_dbg(netdev, "tx packet too big\n"); 648 goto drop; 649 } 650 651 map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 652 if (unlikely(dma_mapping_error(priv->dev, map))) { 653 /* drop packet */ 654 if (net_ratelimit()) 655 netdev_err(netdev, "map socket buffer failed\n"); 656 goto drop; 657 } 658 659 /* Grab the next free tx descriptor */ 660 pointer = priv->tx_pointer; 661 txdes = &priv->descs->txdes[pointer]; 662 663 /* setup TX descriptor */ 664 priv->tx_skbs[pointer] = skb; 665 ftgmac100_txdes_set_dma_addr(txdes, map); 666 ftgmac100_txdes_set_buffer_size(txdes, skb->len); 667 668 ftgmac100_txdes_set_first_segment(txdes); 669 ftgmac100_txdes_set_last_segment(txdes); 670 ftgmac100_txdes_set_txint(txdes); 671 if (skb->ip_summed == CHECKSUM_PARTIAL) { 672 __be16 protocol = skb->protocol; 673 674 if (protocol == cpu_to_be16(ETH_P_IP)) { 675 u8 ip_proto = ip_hdr(skb)->protocol; 676 677 ftgmac100_txdes_set_ipcs(txdes); 678 if (ip_proto == IPPROTO_TCP) 679 ftgmac100_txdes_set_tcpcs(txdes); 680 else if (ip_proto == IPPROTO_UDP) 681 ftgmac100_txdes_set_udpcs(txdes); 682 } 683 } 684 685 /* Order the previous packet and descriptor udpates 686 * before setting the OWN bit. 687 */ 688 dma_wmb(); 689 ftgmac100_txdes_set_dma_own(txdes); 690 691 /* Update next TX pointer */ 692 priv->tx_pointer = ftgmac100_next_tx_pointer(pointer); 693 694 /* If there isn't enough room for all the fragments of a new packet 695 * in the TX ring, stop the queue. The sequence below is race free 696 * vs. a concurrent restart in ftgmac100_poll() 697 */ 698 if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) { 699 netif_stop_queue(netdev); 700 /* Order the queue stop with the test below */ 701 smp_mb(); 702 if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 703 netif_wake_queue(netdev); 704 } 705 706 ftgmac100_txdma_normal_prio_start_polling(priv); 707 708 return NETDEV_TX_OK; 709 710 drop: 711 /* Drop the packet */ 712 dev_kfree_skb_any(skb); 713 netdev->stats.tx_dropped++; 714 715 return NETDEV_TX_OK; 716 } 717 718 static void ftgmac100_free_buffers(struct ftgmac100 *priv) 719 { 720 int i; 721 722 /* Free all RX buffers */ 723 for (i = 0; i < RX_QUEUE_ENTRIES; i++) { 724 struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i]; 725 struct sk_buff *skb = priv->rx_skbs[i]; 726 dma_addr_t map = le32_to_cpu(rxdes->rxdes3); 727 728 if (!skb) 729 continue; 730 731 priv->rx_skbs[i] = NULL; 732 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 733 dev_kfree_skb_any(skb); 734 } 735 736 /* Free all TX buffers */ 737 for (i = 0; i < TX_QUEUE_ENTRIES; i++) { 738 struct ftgmac100_txdes *txdes = &priv->descs->txdes[i]; 739 struct sk_buff *skb = priv->tx_skbs[i]; 740 741 if (skb) 742 ftgmac100_free_tx_packet(priv, i, skb, txdes); 743 } 744 } 745 746 static void ftgmac100_free_rings(struct ftgmac100 *priv) 747 { 748 /* Free descriptors */ 749 if (priv->descs) 750 dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs), 751 priv->descs, priv->descs_dma_addr); 752 753 /* Free scratch packet buffer */ 754 if (priv->rx_scratch) 755 dma_free_coherent(priv->dev, RX_BUF_SIZE, 756 priv->rx_scratch, priv->rx_scratch_dma); 757 } 758 759 static int ftgmac100_alloc_rings(struct ftgmac100 *priv) 760 { 761 /* Allocate descriptors */ 762 priv->descs = dma_zalloc_coherent(priv->dev, 763 sizeof(struct ftgmac100_descs), 764 &priv->descs_dma_addr, GFP_KERNEL); 765 if (!priv->descs) 766 return -ENOMEM; 767 768 /* Allocate scratch packet buffer */ 769 priv->rx_scratch = dma_alloc_coherent(priv->dev, 770 RX_BUF_SIZE, 771 &priv->rx_scratch_dma, 772 GFP_KERNEL); 773 if (!priv->rx_scratch) 774 return -ENOMEM; 775 776 return 0; 777 } 778 779 static void ftgmac100_init_rings(struct ftgmac100 *priv) 780 { 781 struct ftgmac100_rxdes *rxdes; 782 int i; 783 784 /* Initialize RX ring */ 785 for (i = 0; i < RX_QUEUE_ENTRIES; i++) { 786 rxdes = &priv->descs->rxdes[i]; 787 rxdes->rxdes0 = 0; 788 rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma); 789 } 790 /* Mark the end of the ring */ 791 rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask); 792 793 /* Initialize TX ring */ 794 for (i = 0; i < TX_QUEUE_ENTRIES; i++) 795 priv->descs->txdes[i].txdes0 = 0; 796 ftgmac100_txdes_set_end_of_ring(priv, &priv->descs->txdes[i -1]); 797 } 798 799 static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv) 800 { 801 int i; 802 803 for (i = 0; i < RX_QUEUE_ENTRIES; i++) { 804 struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i]; 805 806 if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL)) 807 return -ENOMEM; 808 } 809 return 0; 810 } 811 812 static void ftgmac100_adjust_link(struct net_device *netdev) 813 { 814 struct ftgmac100 *priv = netdev_priv(netdev); 815 struct phy_device *phydev = netdev->phydev; 816 int new_speed; 817 818 /* We store "no link" as speed 0 */ 819 if (!phydev->link) 820 new_speed = 0; 821 else 822 new_speed = phydev->speed; 823 824 if (phydev->speed == priv->cur_speed && 825 phydev->duplex == priv->cur_duplex) 826 return; 827 828 /* Print status if we have a link or we had one and just lost it, 829 * don't print otherwise. 830 */ 831 if (new_speed || priv->cur_speed) 832 phy_print_status(phydev); 833 834 priv->cur_speed = new_speed; 835 priv->cur_duplex = phydev->duplex; 836 837 /* Link is down, do nothing else */ 838 if (!new_speed) 839 return; 840 841 /* Disable all interrupts */ 842 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 843 844 /* Reset the adapter asynchronously */ 845 schedule_work(&priv->reset_task); 846 } 847 848 static int ftgmac100_mii_probe(struct ftgmac100 *priv) 849 { 850 struct net_device *netdev = priv->netdev; 851 struct phy_device *phydev; 852 853 phydev = phy_find_first(priv->mii_bus); 854 if (!phydev) { 855 netdev_info(netdev, "%s: no PHY found\n", netdev->name); 856 return -ENODEV; 857 } 858 859 phydev = phy_connect(netdev, phydev_name(phydev), 860 &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII); 861 862 if (IS_ERR(phydev)) { 863 netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); 864 return PTR_ERR(phydev); 865 } 866 867 return 0; 868 } 869 870 static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) 871 { 872 struct net_device *netdev = bus->priv; 873 struct ftgmac100 *priv = netdev_priv(netdev); 874 unsigned int phycr; 875 int i; 876 877 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 878 879 /* preserve MDC cycle threshold */ 880 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 881 882 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 883 FTGMAC100_PHYCR_REGAD(regnum) | 884 FTGMAC100_PHYCR_MIIRD; 885 886 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 887 888 for (i = 0; i < 10; i++) { 889 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 890 891 if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) { 892 int data; 893 894 data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA); 895 return FTGMAC100_PHYDATA_MIIRDATA(data); 896 } 897 898 udelay(100); 899 } 900 901 netdev_err(netdev, "mdio read timed out\n"); 902 return -EIO; 903 } 904 905 static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr, 906 int regnum, u16 value) 907 { 908 struct net_device *netdev = bus->priv; 909 struct ftgmac100 *priv = netdev_priv(netdev); 910 unsigned int phycr; 911 int data; 912 int i; 913 914 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 915 916 /* preserve MDC cycle threshold */ 917 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 918 919 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 920 FTGMAC100_PHYCR_REGAD(regnum) | 921 FTGMAC100_PHYCR_MIIWR; 922 923 data = FTGMAC100_PHYDATA_MIIWDATA(value); 924 925 iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA); 926 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 927 928 for (i = 0; i < 10; i++) { 929 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 930 931 if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0) 932 return 0; 933 934 udelay(100); 935 } 936 937 netdev_err(netdev, "mdio write timed out\n"); 938 return -EIO; 939 } 940 941 static void ftgmac100_get_drvinfo(struct net_device *netdev, 942 struct ethtool_drvinfo *info) 943 { 944 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 945 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 946 strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); 947 } 948 949 static const struct ethtool_ops ftgmac100_ethtool_ops = { 950 .get_drvinfo = ftgmac100_get_drvinfo, 951 .get_link = ethtool_op_get_link, 952 .get_link_ksettings = phy_ethtool_get_link_ksettings, 953 .set_link_ksettings = phy_ethtool_set_link_ksettings, 954 }; 955 956 static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id) 957 { 958 struct net_device *netdev = dev_id; 959 struct ftgmac100 *priv = netdev_priv(netdev); 960 unsigned int status, new_mask = FTGMAC100_INT_BAD; 961 962 /* Fetch and clear interrupt bits, process abnormal ones */ 963 status = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 964 iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR); 965 if (unlikely(status & FTGMAC100_INT_BAD)) { 966 967 /* RX buffer unavailable */ 968 if (status & FTGMAC100_INT_NO_RXBUF) 969 netdev->stats.rx_over_errors++; 970 971 /* received packet lost due to RX FIFO full */ 972 if (status & FTGMAC100_INT_RPKT_LOST) 973 netdev->stats.rx_fifo_errors++; 974 975 /* sent packet lost due to excessive TX collision */ 976 if (status & FTGMAC100_INT_XPKT_LOST) 977 netdev->stats.tx_fifo_errors++; 978 979 /* AHB error -> Reset the chip */ 980 if (status & FTGMAC100_INT_AHB_ERR) { 981 if (net_ratelimit()) 982 netdev_warn(netdev, 983 "AHB bus error ! Resetting chip.\n"); 984 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 985 schedule_work(&priv->reset_task); 986 return IRQ_HANDLED; 987 } 988 989 /* We may need to restart the MAC after such errors, delay 990 * this until after we have freed some Rx buffers though 991 */ 992 priv->need_mac_restart = true; 993 994 /* Disable those errors until we restart */ 995 new_mask &= ~status; 996 } 997 998 /* Only enable "bad" interrupts while NAPI is on */ 999 iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER); 1000 1001 /* Schedule NAPI bh */ 1002 napi_schedule_irqoff(&priv->napi); 1003 1004 return IRQ_HANDLED; 1005 } 1006 1007 static bool ftgmac100_check_rx(struct ftgmac100 *priv) 1008 { 1009 struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[priv->rx_pointer]; 1010 1011 /* Do we have a packet ? */ 1012 return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY)); 1013 } 1014 1015 static int ftgmac100_poll(struct napi_struct *napi, int budget) 1016 { 1017 struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi); 1018 int work_done = 0; 1019 bool more; 1020 1021 /* Handle TX completions */ 1022 if (ftgmac100_tx_buf_cleanable(priv)) 1023 ftgmac100_tx_complete(priv); 1024 1025 /* Handle RX packets */ 1026 do { 1027 more = ftgmac100_rx_packet(priv, &work_done); 1028 } while (more && work_done < budget); 1029 1030 1031 /* The interrupt is telling us to kick the MAC back to life 1032 * after an RX overflow 1033 */ 1034 if (unlikely(priv->need_mac_restart)) { 1035 ftgmac100_start_hw(priv); 1036 1037 /* Re-enable "bad" interrupts */ 1038 iowrite32(FTGMAC100_INT_BAD, 1039 priv->base + FTGMAC100_OFFSET_IER); 1040 } 1041 1042 /* As long as we are waiting for transmit packets to be 1043 * completed we keep NAPI going 1044 */ 1045 if (ftgmac100_tx_buf_cleanable(priv)) 1046 work_done = budget; 1047 1048 if (work_done < budget) { 1049 /* We are about to re-enable all interrupts. However 1050 * the HW has been latching RX/TX packet interrupts while 1051 * they were masked. So we clear them first, then we need 1052 * to re-check if there's something to process 1053 */ 1054 iowrite32(FTGMAC100_INT_RXTX, 1055 priv->base + FTGMAC100_OFFSET_ISR); 1056 if (ftgmac100_check_rx(priv) || 1057 ftgmac100_tx_buf_cleanable(priv)) 1058 return budget; 1059 1060 /* deschedule NAPI */ 1061 napi_complete(napi); 1062 1063 /* enable all interrupts */ 1064 iowrite32(FTGMAC100_INT_ALL, 1065 priv->base + FTGMAC100_OFFSET_IER); 1066 } 1067 1068 return work_done; 1069 } 1070 1071 static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err) 1072 { 1073 int err = 0; 1074 1075 /* Re-init descriptors (adjust queue sizes) */ 1076 ftgmac100_init_rings(priv); 1077 1078 /* Realloc rx descriptors */ 1079 err = ftgmac100_alloc_rx_buffers(priv); 1080 if (err && !ignore_alloc_err) 1081 return err; 1082 1083 /* Reinit and restart HW */ 1084 ftgmac100_init_hw(priv); 1085 ftgmac100_start_hw(priv); 1086 1087 /* Re-enable the device */ 1088 napi_enable(&priv->napi); 1089 netif_start_queue(priv->netdev); 1090 1091 /* Enable all interrupts */ 1092 iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER); 1093 1094 return err; 1095 } 1096 1097 static void ftgmac100_reset_task(struct work_struct *work) 1098 { 1099 struct ftgmac100 *priv = container_of(work, struct ftgmac100, 1100 reset_task); 1101 struct net_device *netdev = priv->netdev; 1102 int err; 1103 1104 netdev_dbg(netdev, "Resetting NIC...\n"); 1105 1106 /* Lock the world */ 1107 rtnl_lock(); 1108 if (netdev->phydev) 1109 mutex_lock(&netdev->phydev->lock); 1110 if (priv->mii_bus) 1111 mutex_lock(&priv->mii_bus->mdio_lock); 1112 1113 1114 /* Check if the interface is still up */ 1115 if (!netif_running(netdev)) 1116 goto bail; 1117 1118 /* Stop the network stack */ 1119 netif_trans_update(netdev); 1120 napi_disable(&priv->napi); 1121 netif_tx_disable(netdev); 1122 1123 /* Stop and reset the MAC */ 1124 ftgmac100_stop_hw(priv); 1125 err = ftgmac100_reset_and_config_mac(priv); 1126 if (err) { 1127 /* Not much we can do ... it might come back... */ 1128 netdev_err(netdev, "attempting to continue...\n"); 1129 } 1130 1131 /* Free all rx and tx buffers */ 1132 ftgmac100_free_buffers(priv); 1133 1134 /* Setup everything again and restart chip */ 1135 ftgmac100_init_all(priv, true); 1136 1137 netdev_dbg(netdev, "Reset done !\n"); 1138 bail: 1139 if (priv->mii_bus) 1140 mutex_unlock(&priv->mii_bus->mdio_lock); 1141 if (netdev->phydev) 1142 mutex_unlock(&netdev->phydev->lock); 1143 rtnl_unlock(); 1144 } 1145 1146 static int ftgmac100_open(struct net_device *netdev) 1147 { 1148 struct ftgmac100 *priv = netdev_priv(netdev); 1149 int err; 1150 1151 /* Allocate ring buffers */ 1152 err = ftgmac100_alloc_rings(priv); 1153 if (err) { 1154 netdev_err(netdev, "Failed to allocate descriptors\n"); 1155 return err; 1156 } 1157 1158 /* When using NC-SI we force the speed to 100Mbit/s full duplex, 1159 * 1160 * Otherwise we leave it set to 0 (no link), the link 1161 * message from the PHY layer will handle setting it up to 1162 * something else if needed. 1163 */ 1164 if (priv->use_ncsi) { 1165 priv->cur_duplex = DUPLEX_FULL; 1166 priv->cur_speed = SPEED_100; 1167 } else { 1168 priv->cur_duplex = 0; 1169 priv->cur_speed = 0; 1170 } 1171 1172 /* Reset the hardware */ 1173 err = ftgmac100_reset_and_config_mac(priv); 1174 if (err) 1175 goto err_hw; 1176 1177 /* Initialize NAPI */ 1178 netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64); 1179 1180 /* Grab our interrupt */ 1181 err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev); 1182 if (err) { 1183 netdev_err(netdev, "failed to request irq %d\n", netdev->irq); 1184 goto err_irq; 1185 } 1186 1187 /* Start things up */ 1188 err = ftgmac100_init_all(priv, false); 1189 if (err) { 1190 netdev_err(netdev, "Failed to allocate packet buffers\n"); 1191 goto err_alloc; 1192 } 1193 1194 if (netdev->phydev) { 1195 /* If we have a PHY, start polling */ 1196 phy_start(netdev->phydev); 1197 } else if (priv->use_ncsi) { 1198 /* If using NC-SI, set our carrier on and start the stack */ 1199 netif_carrier_on(netdev); 1200 1201 /* Start the NCSI device */ 1202 err = ncsi_start_dev(priv->ndev); 1203 if (err) 1204 goto err_ncsi; 1205 } 1206 1207 return 0; 1208 1209 err_ncsi: 1210 napi_disable(&priv->napi); 1211 netif_stop_queue(netdev); 1212 err_alloc: 1213 ftgmac100_free_buffers(priv); 1214 free_irq(netdev->irq, netdev); 1215 err_irq: 1216 netif_napi_del(&priv->napi); 1217 err_hw: 1218 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1219 ftgmac100_free_rings(priv); 1220 return err; 1221 } 1222 1223 static int ftgmac100_stop(struct net_device *netdev) 1224 { 1225 struct ftgmac100 *priv = netdev_priv(netdev); 1226 1227 /* Note about the reset task: We are called with the rtnl lock 1228 * held, so we are synchronized against the core of the reset 1229 * task. We must not try to synchronously cancel it otherwise 1230 * we can deadlock. But since it will test for netif_running() 1231 * which has already been cleared by the net core, we don't 1232 * anything special to do. 1233 */ 1234 1235 /* disable all interrupts */ 1236 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1237 1238 netif_stop_queue(netdev); 1239 napi_disable(&priv->napi); 1240 netif_napi_del(&priv->napi); 1241 if (netdev->phydev) 1242 phy_stop(netdev->phydev); 1243 else if (priv->use_ncsi) 1244 ncsi_stop_dev(priv->ndev); 1245 1246 ftgmac100_stop_hw(priv); 1247 free_irq(netdev->irq, netdev); 1248 ftgmac100_free_buffers(priv); 1249 ftgmac100_free_rings(priv); 1250 1251 return 0; 1252 } 1253 1254 /* optional */ 1255 static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1256 { 1257 if (!netdev->phydev) 1258 return -ENXIO; 1259 1260 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 1261 } 1262 1263 static void ftgmac100_tx_timeout(struct net_device *netdev) 1264 { 1265 struct ftgmac100 *priv = netdev_priv(netdev); 1266 1267 /* Disable all interrupts */ 1268 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1269 1270 /* Do the reset outside of interrupt context */ 1271 schedule_work(&priv->reset_task); 1272 } 1273 1274 static const struct net_device_ops ftgmac100_netdev_ops = { 1275 .ndo_open = ftgmac100_open, 1276 .ndo_stop = ftgmac100_stop, 1277 .ndo_start_xmit = ftgmac100_hard_start_xmit, 1278 .ndo_set_mac_address = ftgmac100_set_mac_addr, 1279 .ndo_validate_addr = eth_validate_addr, 1280 .ndo_do_ioctl = ftgmac100_do_ioctl, 1281 .ndo_tx_timeout = ftgmac100_tx_timeout, 1282 }; 1283 1284 static int ftgmac100_setup_mdio(struct net_device *netdev) 1285 { 1286 struct ftgmac100 *priv = netdev_priv(netdev); 1287 struct platform_device *pdev = to_platform_device(priv->dev); 1288 int i, err = 0; 1289 u32 reg; 1290 1291 /* initialize mdio bus */ 1292 priv->mii_bus = mdiobus_alloc(); 1293 if (!priv->mii_bus) 1294 return -EIO; 1295 1296 if (of_machine_is_compatible("aspeed,ast2400") || 1297 of_machine_is_compatible("aspeed,ast2500")) { 1298 /* This driver supports the old MDIO interface */ 1299 reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR); 1300 reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE; 1301 iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR); 1302 }; 1303 1304 priv->mii_bus->name = "ftgmac100_mdio"; 1305 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", 1306 pdev->name, pdev->id); 1307 priv->mii_bus->priv = priv->netdev; 1308 priv->mii_bus->read = ftgmac100_mdiobus_read; 1309 priv->mii_bus->write = ftgmac100_mdiobus_write; 1310 1311 for (i = 0; i < PHY_MAX_ADDR; i++) 1312 priv->mii_bus->irq[i] = PHY_POLL; 1313 1314 err = mdiobus_register(priv->mii_bus); 1315 if (err) { 1316 dev_err(priv->dev, "Cannot register MDIO bus!\n"); 1317 goto err_register_mdiobus; 1318 } 1319 1320 err = ftgmac100_mii_probe(priv); 1321 if (err) { 1322 dev_err(priv->dev, "MII Probe failed!\n"); 1323 goto err_mii_probe; 1324 } 1325 1326 return 0; 1327 1328 err_mii_probe: 1329 mdiobus_unregister(priv->mii_bus); 1330 err_register_mdiobus: 1331 mdiobus_free(priv->mii_bus); 1332 return err; 1333 } 1334 1335 static void ftgmac100_destroy_mdio(struct net_device *netdev) 1336 { 1337 struct ftgmac100 *priv = netdev_priv(netdev); 1338 1339 if (!netdev->phydev) 1340 return; 1341 1342 phy_disconnect(netdev->phydev); 1343 mdiobus_unregister(priv->mii_bus); 1344 mdiobus_free(priv->mii_bus); 1345 } 1346 1347 static void ftgmac100_ncsi_handler(struct ncsi_dev *nd) 1348 { 1349 if (unlikely(nd->state != ncsi_dev_state_functional)) 1350 return; 1351 1352 netdev_info(nd->dev, "NCSI interface %s\n", 1353 nd->link_up ? "up" : "down"); 1354 } 1355 1356 static int ftgmac100_probe(struct platform_device *pdev) 1357 { 1358 struct resource *res; 1359 int irq; 1360 struct net_device *netdev; 1361 struct ftgmac100 *priv; 1362 int err = 0; 1363 1364 if (!pdev) 1365 return -ENODEV; 1366 1367 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1368 if (!res) 1369 return -ENXIO; 1370 1371 irq = platform_get_irq(pdev, 0); 1372 if (irq < 0) 1373 return irq; 1374 1375 /* setup net_device */ 1376 netdev = alloc_etherdev(sizeof(*priv)); 1377 if (!netdev) { 1378 err = -ENOMEM; 1379 goto err_alloc_etherdev; 1380 } 1381 1382 SET_NETDEV_DEV(netdev, &pdev->dev); 1383 1384 netdev->ethtool_ops = &ftgmac100_ethtool_ops; 1385 netdev->netdev_ops = &ftgmac100_netdev_ops; 1386 netdev->watchdog_timeo = 5 * HZ; 1387 1388 platform_set_drvdata(pdev, netdev); 1389 1390 /* setup private data */ 1391 priv = netdev_priv(netdev); 1392 priv->netdev = netdev; 1393 priv->dev = &pdev->dev; 1394 INIT_WORK(&priv->reset_task, ftgmac100_reset_task); 1395 1396 /* map io memory */ 1397 priv->res = request_mem_region(res->start, resource_size(res), 1398 dev_name(&pdev->dev)); 1399 if (!priv->res) { 1400 dev_err(&pdev->dev, "Could not reserve memory region\n"); 1401 err = -ENOMEM; 1402 goto err_req_mem; 1403 } 1404 1405 priv->base = ioremap(res->start, resource_size(res)); 1406 if (!priv->base) { 1407 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 1408 err = -EIO; 1409 goto err_ioremap; 1410 } 1411 1412 netdev->irq = irq; 1413 1414 /* MAC address from chip or random one */ 1415 ftgmac100_setup_mac(priv); 1416 1417 if (of_machine_is_compatible("aspeed,ast2400") || 1418 of_machine_is_compatible("aspeed,ast2500")) { 1419 priv->rxdes0_edorr_mask = BIT(30); 1420 priv->txdes0_edotr_mask = BIT(30); 1421 } else { 1422 priv->rxdes0_edorr_mask = BIT(15); 1423 priv->txdes0_edotr_mask = BIT(15); 1424 } 1425 1426 if (pdev->dev.of_node && 1427 of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) { 1428 if (!IS_ENABLED(CONFIG_NET_NCSI)) { 1429 dev_err(&pdev->dev, "NCSI stack not enabled\n"); 1430 goto err_ncsi_dev; 1431 } 1432 1433 dev_info(&pdev->dev, "Using NCSI interface\n"); 1434 priv->use_ncsi = true; 1435 priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler); 1436 if (!priv->ndev) 1437 goto err_ncsi_dev; 1438 } else { 1439 priv->use_ncsi = false; 1440 err = ftgmac100_setup_mdio(netdev); 1441 if (err) 1442 goto err_setup_mdio; 1443 } 1444 1445 /* We have to disable on-chip IP checksum functionality 1446 * when NCSI is enabled on the interface. It doesn't work 1447 * in that case. 1448 */ 1449 netdev->features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_GRO; 1450 if (priv->use_ncsi && 1451 of_get_property(pdev->dev.of_node, "no-hw-checksum", NULL)) 1452 netdev->features &= ~NETIF_F_IP_CSUM; 1453 1454 1455 /* register network device */ 1456 err = register_netdev(netdev); 1457 if (err) { 1458 dev_err(&pdev->dev, "Failed to register netdev\n"); 1459 goto err_register_netdev; 1460 } 1461 1462 netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base); 1463 1464 return 0; 1465 1466 err_ncsi_dev: 1467 err_register_netdev: 1468 ftgmac100_destroy_mdio(netdev); 1469 err_setup_mdio: 1470 iounmap(priv->base); 1471 err_ioremap: 1472 release_resource(priv->res); 1473 err_req_mem: 1474 netif_napi_del(&priv->napi); 1475 free_netdev(netdev); 1476 err_alloc_etherdev: 1477 return err; 1478 } 1479 1480 static int ftgmac100_remove(struct platform_device *pdev) 1481 { 1482 struct net_device *netdev; 1483 struct ftgmac100 *priv; 1484 1485 netdev = platform_get_drvdata(pdev); 1486 priv = netdev_priv(netdev); 1487 1488 unregister_netdev(netdev); 1489 1490 /* There's a small chance the reset task will have been re-queued, 1491 * during stop, make sure it's gone before we free the structure. 1492 */ 1493 cancel_work_sync(&priv->reset_task); 1494 1495 ftgmac100_destroy_mdio(netdev); 1496 1497 iounmap(priv->base); 1498 release_resource(priv->res); 1499 1500 netif_napi_del(&priv->napi); 1501 free_netdev(netdev); 1502 return 0; 1503 } 1504 1505 static const struct of_device_id ftgmac100_of_match[] = { 1506 { .compatible = "faraday,ftgmac100" }, 1507 { } 1508 }; 1509 MODULE_DEVICE_TABLE(of, ftgmac100_of_match); 1510 1511 static struct platform_driver ftgmac100_driver = { 1512 .probe = ftgmac100_probe, 1513 .remove = ftgmac100_remove, 1514 .driver = { 1515 .name = DRV_NAME, 1516 .of_match_table = ftgmac100_of_match, 1517 }, 1518 }; 1519 module_platform_driver(ftgmac100_driver); 1520 1521 MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1522 MODULE_DESCRIPTION("FTGMAC100 driver"); 1523 MODULE_LICENSE("GPL"); 1524