1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Faraday FTGMAC100 Gigabit Ethernet 4 * 5 * (C) Copyright 2009-2011 Faraday Technology 6 * Po-Yu Chuang <ratbert@faraday-tech.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/clk.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/etherdevice.h> 14 #include <linux/ethtool.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/module.h> 18 #include <linux/netdevice.h> 19 #include <linux/of.h> 20 #include <linux/of_mdio.h> 21 #include <linux/phy.h> 22 #include <linux/platform_device.h> 23 #include <linux/property.h> 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 #include <linux/of_net.h> 27 #include <net/ip.h> 28 #include <net/ncsi.h> 29 30 #include "ftgmac100.h" 31 32 #define DRV_NAME "ftgmac100" 33 #define DRV_VERSION "0.7" 34 35 /* Arbitrary values, I am not sure the HW has limits */ 36 #define MAX_RX_QUEUE_ENTRIES 1024 37 #define MAX_TX_QUEUE_ENTRIES 1024 38 #define MIN_RX_QUEUE_ENTRIES 32 39 #define MIN_TX_QUEUE_ENTRIES 32 40 41 /* Defaults */ 42 #define DEF_RX_QUEUE_ENTRIES 128 43 #define DEF_TX_QUEUE_ENTRIES 128 44 45 #define MAX_PKT_SIZE 1536 46 #define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */ 47 48 /* Min number of tx ring entries before stopping queue */ 49 #define TX_THRESHOLD (MAX_SKB_FRAGS + 1) 50 51 #define FTGMAC_100MHZ 100000000 52 #define FTGMAC_25MHZ 25000000 53 54 struct ftgmac100 { 55 /* Registers */ 56 struct resource *res; 57 void __iomem *base; 58 59 /* Rx ring */ 60 unsigned int rx_q_entries; 61 struct ftgmac100_rxdes *rxdes; 62 dma_addr_t rxdes_dma; 63 struct sk_buff **rx_skbs; 64 unsigned int rx_pointer; 65 u32 rxdes0_edorr_mask; 66 67 /* Tx ring */ 68 unsigned int tx_q_entries; 69 struct ftgmac100_txdes *txdes; 70 dma_addr_t txdes_dma; 71 struct sk_buff **tx_skbs; 72 unsigned int tx_clean_pointer; 73 unsigned int tx_pointer; 74 u32 txdes0_edotr_mask; 75 76 /* Used to signal the reset task of ring change request */ 77 unsigned int new_rx_q_entries; 78 unsigned int new_tx_q_entries; 79 80 /* Scratch page to use when rx skb alloc fails */ 81 void *rx_scratch; 82 dma_addr_t rx_scratch_dma; 83 84 /* Component structures */ 85 struct net_device *netdev; 86 struct device *dev; 87 struct ncsi_dev *ndev; 88 struct napi_struct napi; 89 struct work_struct reset_task; 90 struct mii_bus *mii_bus; 91 struct clk *clk; 92 93 /* Link management */ 94 int cur_speed; 95 int cur_duplex; 96 bool use_ncsi; 97 98 /* Multicast filter settings */ 99 u32 maht0; 100 u32 maht1; 101 102 /* Flow control settings */ 103 bool tx_pause; 104 bool rx_pause; 105 bool aneg_pause; 106 107 /* Misc */ 108 bool need_mac_restart; 109 bool is_aspeed; 110 }; 111 112 static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr) 113 { 114 struct net_device *netdev = priv->netdev; 115 int i; 116 117 /* NOTE: reset clears all registers */ 118 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 119 iowrite32(maccr | FTGMAC100_MACCR_SW_RST, 120 priv->base + FTGMAC100_OFFSET_MACCR); 121 for (i = 0; i < 200; i++) { 122 unsigned int maccr; 123 124 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 125 if (!(maccr & FTGMAC100_MACCR_SW_RST)) 126 return 0; 127 128 udelay(1); 129 } 130 131 netdev_err(netdev, "Hardware reset failed\n"); 132 return -EIO; 133 } 134 135 static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv) 136 { 137 u32 maccr = 0; 138 139 switch (priv->cur_speed) { 140 case SPEED_10: 141 case 0: /* no link */ 142 break; 143 144 case SPEED_100: 145 maccr |= FTGMAC100_MACCR_FAST_MODE; 146 break; 147 148 case SPEED_1000: 149 maccr |= FTGMAC100_MACCR_GIGA_MODE; 150 break; 151 default: 152 netdev_err(priv->netdev, "Unknown speed %d !\n", 153 priv->cur_speed); 154 break; 155 } 156 157 /* (Re)initialize the queue pointers */ 158 priv->rx_pointer = 0; 159 priv->tx_clean_pointer = 0; 160 priv->tx_pointer = 0; 161 162 /* The doc says reset twice with 10us interval */ 163 if (ftgmac100_reset_mac(priv, maccr)) 164 return -EIO; 165 usleep_range(10, 1000); 166 return ftgmac100_reset_mac(priv, maccr); 167 } 168 169 static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac) 170 { 171 unsigned int maddr = mac[0] << 8 | mac[1]; 172 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 173 174 iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR); 175 iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR); 176 } 177 178 static void ftgmac100_initial_mac(struct ftgmac100 *priv) 179 { 180 u8 mac[ETH_ALEN]; 181 unsigned int m; 182 unsigned int l; 183 void *addr; 184 185 addr = device_get_mac_address(priv->dev, mac, ETH_ALEN); 186 if (addr) { 187 ether_addr_copy(priv->netdev->dev_addr, mac); 188 dev_info(priv->dev, "Read MAC address %pM from device tree\n", 189 mac); 190 return; 191 } 192 193 m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR); 194 l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR); 195 196 mac[0] = (m >> 8) & 0xff; 197 mac[1] = m & 0xff; 198 mac[2] = (l >> 24) & 0xff; 199 mac[3] = (l >> 16) & 0xff; 200 mac[4] = (l >> 8) & 0xff; 201 mac[5] = l & 0xff; 202 203 if (is_valid_ether_addr(mac)) { 204 ether_addr_copy(priv->netdev->dev_addr, mac); 205 dev_info(priv->dev, "Read MAC address %pM from chip\n", mac); 206 } else { 207 eth_hw_addr_random(priv->netdev); 208 dev_info(priv->dev, "Generated random MAC address %pM\n", 209 priv->netdev->dev_addr); 210 } 211 } 212 213 static int ftgmac100_set_mac_addr(struct net_device *dev, void *p) 214 { 215 int ret; 216 217 ret = eth_prepare_mac_addr_change(dev, p); 218 if (ret < 0) 219 return ret; 220 221 eth_commit_mac_addr_change(dev, p); 222 ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr); 223 224 return 0; 225 } 226 227 static void ftgmac100_config_pause(struct ftgmac100 *priv) 228 { 229 u32 fcr = FTGMAC100_FCR_PAUSE_TIME(16); 230 231 /* Throttle tx queue when receiving pause frames */ 232 if (priv->rx_pause) 233 fcr |= FTGMAC100_FCR_FC_EN; 234 235 /* Enables sending pause frames when the RX queue is past a 236 * certain threshold. 237 */ 238 if (priv->tx_pause) 239 fcr |= FTGMAC100_FCR_FCTHR_EN; 240 241 iowrite32(fcr, priv->base + FTGMAC100_OFFSET_FCR); 242 } 243 244 static void ftgmac100_init_hw(struct ftgmac100 *priv) 245 { 246 u32 reg, rfifo_sz, tfifo_sz; 247 248 /* Clear stale interrupts */ 249 reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 250 iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR); 251 252 /* Setup RX ring buffer base */ 253 iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR); 254 255 /* Setup TX ring buffer base */ 256 iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR); 257 258 /* Configure RX buffer size */ 259 iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE), 260 priv->base + FTGMAC100_OFFSET_RBSR); 261 262 /* Set RX descriptor autopoll */ 263 iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), 264 priv->base + FTGMAC100_OFFSET_APTC); 265 266 /* Write MAC address */ 267 ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr); 268 269 /* Write multicast filter */ 270 iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0); 271 iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1); 272 273 /* Configure descriptor sizes and increase burst sizes according 274 * to values in Aspeed SDK. The FIFO arbitration is enabled and 275 * the thresholds set based on the recommended values in the 276 * AST2400 specification. 277 */ 278 iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) | /* 2*8 bytes RX descs */ 279 FTGMAC100_DBLAC_TXDES_SIZE(2) | /* 2*8 bytes TX descs */ 280 FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */ 281 FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */ 282 FTGMAC100_DBLAC_RX_THR_EN | /* Enable fifo threshold arb */ 283 FTGMAC100_DBLAC_RXFIFO_HTHR(6) | /* 6/8 of FIFO high threshold */ 284 FTGMAC100_DBLAC_RXFIFO_LTHR(2), /* 2/8 of FIFO low threshold */ 285 priv->base + FTGMAC100_OFFSET_DBLAC); 286 287 /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt 288 * mitigation doesn't seem to provide any benefit with NAPI so leave 289 * it at that. 290 */ 291 iowrite32(FTGMAC100_ITC_RXINT_THR(1) | 292 FTGMAC100_ITC_TXINT_THR(1), 293 priv->base + FTGMAC100_OFFSET_ITC); 294 295 /* Configure FIFO sizes in the TPAFCR register */ 296 reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR); 297 rfifo_sz = reg & 0x00000007; 298 tfifo_sz = (reg >> 3) & 0x00000007; 299 reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR); 300 reg &= ~0x3f000000; 301 reg |= (tfifo_sz << 27); 302 reg |= (rfifo_sz << 24); 303 iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR); 304 } 305 306 static void ftgmac100_start_hw(struct ftgmac100 *priv) 307 { 308 u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 309 310 /* Keep the original GMAC and FAST bits */ 311 maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE); 312 313 /* Add all the main enable bits */ 314 maccr |= FTGMAC100_MACCR_TXDMA_EN | 315 FTGMAC100_MACCR_RXDMA_EN | 316 FTGMAC100_MACCR_TXMAC_EN | 317 FTGMAC100_MACCR_RXMAC_EN | 318 FTGMAC100_MACCR_CRC_APD | 319 FTGMAC100_MACCR_PHY_LINK_LEVEL | 320 FTGMAC100_MACCR_RX_RUNT | 321 FTGMAC100_MACCR_RX_BROADPKT; 322 323 /* Add other bits as needed */ 324 if (priv->cur_duplex == DUPLEX_FULL) 325 maccr |= FTGMAC100_MACCR_FULLDUP; 326 if (priv->netdev->flags & IFF_PROMISC) 327 maccr |= FTGMAC100_MACCR_RX_ALL; 328 if (priv->netdev->flags & IFF_ALLMULTI) 329 maccr |= FTGMAC100_MACCR_RX_MULTIPKT; 330 else if (netdev_mc_count(priv->netdev)) 331 maccr |= FTGMAC100_MACCR_HT_MULTI_EN; 332 333 /* Vlan filtering enabled */ 334 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 335 maccr |= FTGMAC100_MACCR_RM_VLAN; 336 337 /* Hit the HW */ 338 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 339 } 340 341 static void ftgmac100_stop_hw(struct ftgmac100 *priv) 342 { 343 iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR); 344 } 345 346 static void ftgmac100_calc_mc_hash(struct ftgmac100 *priv) 347 { 348 struct netdev_hw_addr *ha; 349 350 priv->maht1 = 0; 351 priv->maht0 = 0; 352 netdev_for_each_mc_addr(ha, priv->netdev) { 353 u32 crc_val = ether_crc_le(ETH_ALEN, ha->addr); 354 355 crc_val = (~(crc_val >> 2)) & 0x3f; 356 if (crc_val >= 32) 357 priv->maht1 |= 1ul << (crc_val - 32); 358 else 359 priv->maht0 |= 1ul << (crc_val); 360 } 361 } 362 363 static void ftgmac100_set_rx_mode(struct net_device *netdev) 364 { 365 struct ftgmac100 *priv = netdev_priv(netdev); 366 367 /* Setup the hash filter */ 368 ftgmac100_calc_mc_hash(priv); 369 370 /* Interface down ? that's all there is to do */ 371 if (!netif_running(netdev)) 372 return; 373 374 /* Update the HW */ 375 iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0); 376 iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1); 377 378 /* Reconfigure MACCR */ 379 ftgmac100_start_hw(priv); 380 } 381 382 static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry, 383 struct ftgmac100_rxdes *rxdes, gfp_t gfp) 384 { 385 struct net_device *netdev = priv->netdev; 386 struct sk_buff *skb; 387 dma_addr_t map; 388 int err = 0; 389 390 skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); 391 if (unlikely(!skb)) { 392 if (net_ratelimit()) 393 netdev_warn(netdev, "failed to allocate rx skb\n"); 394 err = -ENOMEM; 395 map = priv->rx_scratch_dma; 396 } else { 397 map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE, 398 DMA_FROM_DEVICE); 399 if (unlikely(dma_mapping_error(priv->dev, map))) { 400 if (net_ratelimit()) 401 netdev_err(netdev, "failed to map rx page\n"); 402 dev_kfree_skb_any(skb); 403 map = priv->rx_scratch_dma; 404 skb = NULL; 405 err = -ENOMEM; 406 } 407 } 408 409 /* Store skb */ 410 priv->rx_skbs[entry] = skb; 411 412 /* Store DMA address into RX desc */ 413 rxdes->rxdes3 = cpu_to_le32(map); 414 415 /* Ensure the above is ordered vs clearing the OWN bit */ 416 dma_wmb(); 417 418 /* Clean status (which resets own bit) */ 419 if (entry == (priv->rx_q_entries - 1)) 420 rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask); 421 else 422 rxdes->rxdes0 = 0; 423 424 return err; 425 } 426 427 static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv, 428 unsigned int pointer) 429 { 430 return (pointer + 1) & (priv->rx_q_entries - 1); 431 } 432 433 static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status) 434 { 435 struct net_device *netdev = priv->netdev; 436 437 if (status & FTGMAC100_RXDES0_RX_ERR) 438 netdev->stats.rx_errors++; 439 440 if (status & FTGMAC100_RXDES0_CRC_ERR) 441 netdev->stats.rx_crc_errors++; 442 443 if (status & (FTGMAC100_RXDES0_FTL | 444 FTGMAC100_RXDES0_RUNT | 445 FTGMAC100_RXDES0_RX_ODD_NB)) 446 netdev->stats.rx_length_errors++; 447 } 448 449 static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed) 450 { 451 struct net_device *netdev = priv->netdev; 452 struct ftgmac100_rxdes *rxdes; 453 struct sk_buff *skb; 454 unsigned int pointer, size; 455 u32 status, csum_vlan; 456 dma_addr_t map; 457 458 /* Grab next RX descriptor */ 459 pointer = priv->rx_pointer; 460 rxdes = &priv->rxdes[pointer]; 461 462 /* Grab descriptor status */ 463 status = le32_to_cpu(rxdes->rxdes0); 464 465 /* Do we have a packet ? */ 466 if (!(status & FTGMAC100_RXDES0_RXPKT_RDY)) 467 return false; 468 469 /* Order subsequent reads with the test for the ready bit */ 470 dma_rmb(); 471 472 /* We don't cope with fragmented RX packets */ 473 if (unlikely(!(status & FTGMAC100_RXDES0_FRS) || 474 !(status & FTGMAC100_RXDES0_LRS))) 475 goto drop; 476 477 /* Grab received size and csum vlan field in the descriptor */ 478 size = status & FTGMAC100_RXDES0_VDBC; 479 csum_vlan = le32_to_cpu(rxdes->rxdes1); 480 481 /* Any error (other than csum offload) flagged ? */ 482 if (unlikely(status & RXDES0_ANY_ERROR)) { 483 /* Correct for incorrect flagging of runt packets 484 * with vlan tags... Just accept a runt packet that 485 * has been flagged as vlan and whose size is at 486 * least 60 bytes. 487 */ 488 if ((status & FTGMAC100_RXDES0_RUNT) && 489 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) && 490 (size >= 60)) 491 status &= ~FTGMAC100_RXDES0_RUNT; 492 493 /* Any error still in there ? */ 494 if (status & RXDES0_ANY_ERROR) { 495 ftgmac100_rx_packet_error(priv, status); 496 goto drop; 497 } 498 } 499 500 /* If the packet had no skb (failed to allocate earlier) 501 * then try to allocate one and skip 502 */ 503 skb = priv->rx_skbs[pointer]; 504 if (!unlikely(skb)) { 505 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 506 goto drop; 507 } 508 509 if (unlikely(status & FTGMAC100_RXDES0_MULTICAST)) 510 netdev->stats.multicast++; 511 512 /* If the HW found checksum errors, bounce it to software. 513 * 514 * If we didn't, we need to see if the packet was recognized 515 * by HW as one of the supported checksummed protocols before 516 * we accept the HW test results. 517 */ 518 if (netdev->features & NETIF_F_RXCSUM) { 519 u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR | 520 FTGMAC100_RXDES1_UDP_CHKSUM_ERR | 521 FTGMAC100_RXDES1_IP_CHKSUM_ERR; 522 if ((csum_vlan & err_bits) || 523 !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK)) 524 skb->ip_summed = CHECKSUM_NONE; 525 else 526 skb->ip_summed = CHECKSUM_UNNECESSARY; 527 } 528 529 /* Transfer received size to skb */ 530 skb_put(skb, size); 531 532 /* Extract vlan tag */ 533 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 534 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL)) 535 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 536 csum_vlan & 0xffff); 537 538 /* Tear down DMA mapping, do necessary cache management */ 539 map = le32_to_cpu(rxdes->rxdes3); 540 541 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU) 542 /* When we don't have an iommu, we can save cycles by not 543 * invalidating the cache for the part of the packet that 544 * wasn't received. 545 */ 546 dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE); 547 #else 548 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 549 #endif 550 551 552 /* Resplenish rx ring */ 553 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 554 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); 555 556 skb->protocol = eth_type_trans(skb, netdev); 557 558 netdev->stats.rx_packets++; 559 netdev->stats.rx_bytes += size; 560 561 /* push packet to protocol stack */ 562 if (skb->ip_summed == CHECKSUM_NONE) 563 netif_receive_skb(skb); 564 else 565 napi_gro_receive(&priv->napi, skb); 566 567 (*processed)++; 568 return true; 569 570 drop: 571 /* Clean rxdes0 (which resets own bit) */ 572 rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask); 573 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); 574 netdev->stats.rx_dropped++; 575 return true; 576 } 577 578 static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv, 579 unsigned int index) 580 { 581 if (index == (priv->tx_q_entries - 1)) 582 return priv->txdes0_edotr_mask; 583 else 584 return 0; 585 } 586 587 static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv, 588 unsigned int pointer) 589 { 590 return (pointer + 1) & (priv->tx_q_entries - 1); 591 } 592 593 static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv) 594 { 595 /* Returns the number of available slots in the TX queue 596 * 597 * This always leaves one free slot so we don't have to 598 * worry about empty vs. full, and this simplifies the 599 * test for ftgmac100_tx_buf_cleanable() below 600 */ 601 return (priv->tx_clean_pointer - priv->tx_pointer - 1) & 602 (priv->tx_q_entries - 1); 603 } 604 605 static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv) 606 { 607 return priv->tx_pointer != priv->tx_clean_pointer; 608 } 609 610 static void ftgmac100_free_tx_packet(struct ftgmac100 *priv, 611 unsigned int pointer, 612 struct sk_buff *skb, 613 struct ftgmac100_txdes *txdes, 614 u32 ctl_stat) 615 { 616 dma_addr_t map = le32_to_cpu(txdes->txdes3); 617 size_t len; 618 619 if (ctl_stat & FTGMAC100_TXDES0_FTS) { 620 len = skb_headlen(skb); 621 dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE); 622 } else { 623 len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat); 624 dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE); 625 } 626 627 /* Free SKB on last segment */ 628 if (ctl_stat & FTGMAC100_TXDES0_LTS) 629 dev_kfree_skb(skb); 630 priv->tx_skbs[pointer] = NULL; 631 } 632 633 static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv) 634 { 635 struct net_device *netdev = priv->netdev; 636 struct ftgmac100_txdes *txdes; 637 struct sk_buff *skb; 638 unsigned int pointer; 639 u32 ctl_stat; 640 641 pointer = priv->tx_clean_pointer; 642 txdes = &priv->txdes[pointer]; 643 644 ctl_stat = le32_to_cpu(txdes->txdes0); 645 if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN) 646 return false; 647 648 skb = priv->tx_skbs[pointer]; 649 netdev->stats.tx_packets++; 650 netdev->stats.tx_bytes += skb->len; 651 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 652 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 653 654 priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer); 655 656 return true; 657 } 658 659 static void ftgmac100_tx_complete(struct ftgmac100 *priv) 660 { 661 struct net_device *netdev = priv->netdev; 662 663 /* Process all completed packets */ 664 while (ftgmac100_tx_buf_cleanable(priv) && 665 ftgmac100_tx_complete_packet(priv)) 666 ; 667 668 /* Restart queue if needed */ 669 smp_mb(); 670 if (unlikely(netif_queue_stopped(netdev) && 671 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) { 672 struct netdev_queue *txq; 673 674 txq = netdev_get_tx_queue(netdev, 0); 675 __netif_tx_lock(txq, smp_processor_id()); 676 if (netif_queue_stopped(netdev) && 677 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 678 netif_wake_queue(netdev); 679 __netif_tx_unlock(txq); 680 } 681 } 682 683 static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan) 684 { 685 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 686 u8 ip_proto = ip_hdr(skb)->protocol; 687 688 *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM; 689 switch(ip_proto) { 690 case IPPROTO_TCP: 691 *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM; 692 return true; 693 case IPPROTO_UDP: 694 *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM; 695 return true; 696 case IPPROTO_IP: 697 return true; 698 } 699 } 700 return skb_checksum_help(skb) == 0; 701 } 702 703 static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, 704 struct net_device *netdev) 705 { 706 struct ftgmac100 *priv = netdev_priv(netdev); 707 struct ftgmac100_txdes *txdes, *first; 708 unsigned int pointer, nfrags, len, i, j; 709 u32 f_ctl_stat, ctl_stat, csum_vlan; 710 dma_addr_t map; 711 712 /* The HW doesn't pad small frames */ 713 if (eth_skb_pad(skb)) { 714 netdev->stats.tx_dropped++; 715 return NETDEV_TX_OK; 716 } 717 718 /* Reject oversize packets */ 719 if (unlikely(skb->len > MAX_PKT_SIZE)) { 720 if (net_ratelimit()) 721 netdev_dbg(netdev, "tx packet too big\n"); 722 goto drop; 723 } 724 725 /* Do we have a limit on #fragments ? I yet have to get a reply 726 * from Aspeed. If there's one I haven't hit it. 727 */ 728 nfrags = skb_shinfo(skb)->nr_frags; 729 730 /* Setup HW checksumming */ 731 csum_vlan = 0; 732 if (skb->ip_summed == CHECKSUM_PARTIAL && 733 !ftgmac100_prep_tx_csum(skb, &csum_vlan)) 734 goto drop; 735 736 /* Add VLAN tag */ 737 if (skb_vlan_tag_present(skb)) { 738 csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG; 739 csum_vlan |= skb_vlan_tag_get(skb) & 0xffff; 740 } 741 742 /* Get header len */ 743 len = skb_headlen(skb); 744 745 /* Map the packet head */ 746 map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); 747 if (dma_mapping_error(priv->dev, map)) { 748 if (net_ratelimit()) 749 netdev_err(netdev, "map tx packet head failed\n"); 750 goto drop; 751 } 752 753 /* Grab the next free tx descriptor */ 754 pointer = priv->tx_pointer; 755 txdes = first = &priv->txdes[pointer]; 756 757 /* Setup it up with the packet head. Don't write the head to the 758 * ring just yet 759 */ 760 priv->tx_skbs[pointer] = skb; 761 f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 762 f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 763 f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 764 f_ctl_stat |= FTGMAC100_TXDES0_FTS; 765 if (nfrags == 0) 766 f_ctl_stat |= FTGMAC100_TXDES0_LTS; 767 txdes->txdes3 = cpu_to_le32(map); 768 txdes->txdes1 = cpu_to_le32(csum_vlan); 769 770 /* Next descriptor */ 771 pointer = ftgmac100_next_tx_pointer(priv, pointer); 772 773 /* Add the fragments */ 774 for (i = 0; i < nfrags; i++) { 775 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 776 777 len = skb_frag_size(frag); 778 779 /* Map it */ 780 map = skb_frag_dma_map(priv->dev, frag, 0, len, 781 DMA_TO_DEVICE); 782 if (dma_mapping_error(priv->dev, map)) 783 goto dma_err; 784 785 /* Setup descriptor */ 786 priv->tx_skbs[pointer] = skb; 787 txdes = &priv->txdes[pointer]; 788 ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 789 ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 790 ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 791 if (i == (nfrags - 1)) 792 ctl_stat |= FTGMAC100_TXDES0_LTS; 793 txdes->txdes0 = cpu_to_le32(ctl_stat); 794 txdes->txdes1 = 0; 795 txdes->txdes3 = cpu_to_le32(map); 796 797 /* Next one */ 798 pointer = ftgmac100_next_tx_pointer(priv, pointer); 799 } 800 801 /* Order the previous packet and descriptor udpates 802 * before setting the OWN bit on the first descriptor. 803 */ 804 dma_wmb(); 805 first->txdes0 = cpu_to_le32(f_ctl_stat); 806 807 /* Update next TX pointer */ 808 priv->tx_pointer = pointer; 809 810 /* If there isn't enough room for all the fragments of a new packet 811 * in the TX ring, stop the queue. The sequence below is race free 812 * vs. a concurrent restart in ftgmac100_poll() 813 */ 814 if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) { 815 netif_stop_queue(netdev); 816 /* Order the queue stop with the test below */ 817 smp_mb(); 818 if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 819 netif_wake_queue(netdev); 820 } 821 822 /* Poke transmitter to read the updated TX descriptors */ 823 iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD); 824 825 return NETDEV_TX_OK; 826 827 dma_err: 828 if (net_ratelimit()) 829 netdev_err(netdev, "map tx fragment failed\n"); 830 831 /* Free head */ 832 pointer = priv->tx_pointer; 833 ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat); 834 first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask); 835 836 /* Then all fragments */ 837 for (j = 0; j < i; j++) { 838 pointer = ftgmac100_next_tx_pointer(priv, pointer); 839 txdes = &priv->txdes[pointer]; 840 ctl_stat = le32_to_cpu(txdes->txdes0); 841 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 842 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 843 } 844 845 /* This cannot be reached if we successfully mapped the 846 * last fragment, so we know ftgmac100_free_tx_packet() 847 * hasn't freed the skb yet. 848 */ 849 drop: 850 /* Drop the packet */ 851 dev_kfree_skb_any(skb); 852 netdev->stats.tx_dropped++; 853 854 return NETDEV_TX_OK; 855 } 856 857 static void ftgmac100_free_buffers(struct ftgmac100 *priv) 858 { 859 int i; 860 861 /* Free all RX buffers */ 862 for (i = 0; i < priv->rx_q_entries; i++) { 863 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; 864 struct sk_buff *skb = priv->rx_skbs[i]; 865 dma_addr_t map = le32_to_cpu(rxdes->rxdes3); 866 867 if (!skb) 868 continue; 869 870 priv->rx_skbs[i] = NULL; 871 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 872 dev_kfree_skb_any(skb); 873 } 874 875 /* Free all TX buffers */ 876 for (i = 0; i < priv->tx_q_entries; i++) { 877 struct ftgmac100_txdes *txdes = &priv->txdes[i]; 878 struct sk_buff *skb = priv->tx_skbs[i]; 879 880 if (!skb) 881 continue; 882 ftgmac100_free_tx_packet(priv, i, skb, txdes, 883 le32_to_cpu(txdes->txdes0)); 884 } 885 } 886 887 static void ftgmac100_free_rings(struct ftgmac100 *priv) 888 { 889 /* Free skb arrays */ 890 kfree(priv->rx_skbs); 891 kfree(priv->tx_skbs); 892 893 /* Free descriptors */ 894 if (priv->rxdes) 895 dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES * 896 sizeof(struct ftgmac100_rxdes), 897 priv->rxdes, priv->rxdes_dma); 898 priv->rxdes = NULL; 899 900 if (priv->txdes) 901 dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES * 902 sizeof(struct ftgmac100_txdes), 903 priv->txdes, priv->txdes_dma); 904 priv->txdes = NULL; 905 906 /* Free scratch packet buffer */ 907 if (priv->rx_scratch) 908 dma_free_coherent(priv->dev, RX_BUF_SIZE, 909 priv->rx_scratch, priv->rx_scratch_dma); 910 } 911 912 static int ftgmac100_alloc_rings(struct ftgmac100 *priv) 913 { 914 /* Allocate skb arrays */ 915 priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *), 916 GFP_KERNEL); 917 if (!priv->rx_skbs) 918 return -ENOMEM; 919 priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *), 920 GFP_KERNEL); 921 if (!priv->tx_skbs) 922 return -ENOMEM; 923 924 /* Allocate descriptors */ 925 priv->rxdes = dma_alloc_coherent(priv->dev, 926 MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes), 927 &priv->rxdes_dma, GFP_KERNEL); 928 if (!priv->rxdes) 929 return -ENOMEM; 930 priv->txdes = dma_alloc_coherent(priv->dev, 931 MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes), 932 &priv->txdes_dma, GFP_KERNEL); 933 if (!priv->txdes) 934 return -ENOMEM; 935 936 /* Allocate scratch packet buffer */ 937 priv->rx_scratch = dma_alloc_coherent(priv->dev, 938 RX_BUF_SIZE, 939 &priv->rx_scratch_dma, 940 GFP_KERNEL); 941 if (!priv->rx_scratch) 942 return -ENOMEM; 943 944 return 0; 945 } 946 947 static void ftgmac100_init_rings(struct ftgmac100 *priv) 948 { 949 struct ftgmac100_rxdes *rxdes = NULL; 950 struct ftgmac100_txdes *txdes = NULL; 951 int i; 952 953 /* Update entries counts */ 954 priv->rx_q_entries = priv->new_rx_q_entries; 955 priv->tx_q_entries = priv->new_tx_q_entries; 956 957 if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES)) 958 return; 959 960 /* Initialize RX ring */ 961 for (i = 0; i < priv->rx_q_entries; i++) { 962 rxdes = &priv->rxdes[i]; 963 rxdes->rxdes0 = 0; 964 rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma); 965 } 966 /* Mark the end of the ring */ 967 rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask); 968 969 if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES)) 970 return; 971 972 /* Initialize TX ring */ 973 for (i = 0; i < priv->tx_q_entries; i++) { 974 txdes = &priv->txdes[i]; 975 txdes->txdes0 = 0; 976 } 977 txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask); 978 } 979 980 static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv) 981 { 982 int i; 983 984 for (i = 0; i < priv->rx_q_entries; i++) { 985 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; 986 987 if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL)) 988 return -ENOMEM; 989 } 990 return 0; 991 } 992 993 static void ftgmac100_adjust_link(struct net_device *netdev) 994 { 995 struct ftgmac100 *priv = netdev_priv(netdev); 996 struct phy_device *phydev = netdev->phydev; 997 bool tx_pause, rx_pause; 998 int new_speed; 999 1000 /* We store "no link" as speed 0 */ 1001 if (!phydev->link) 1002 new_speed = 0; 1003 else 1004 new_speed = phydev->speed; 1005 1006 /* Grab pause settings from PHY if configured to do so */ 1007 if (priv->aneg_pause) { 1008 rx_pause = tx_pause = phydev->pause; 1009 if (phydev->asym_pause) 1010 tx_pause = !rx_pause; 1011 } else { 1012 rx_pause = priv->rx_pause; 1013 tx_pause = priv->tx_pause; 1014 } 1015 1016 /* Link hasn't changed, do nothing */ 1017 if (phydev->speed == priv->cur_speed && 1018 phydev->duplex == priv->cur_duplex && 1019 rx_pause == priv->rx_pause && 1020 tx_pause == priv->tx_pause) 1021 return; 1022 1023 /* Print status if we have a link or we had one and just lost it, 1024 * don't print otherwise. 1025 */ 1026 if (new_speed || priv->cur_speed) 1027 phy_print_status(phydev); 1028 1029 priv->cur_speed = new_speed; 1030 priv->cur_duplex = phydev->duplex; 1031 priv->rx_pause = rx_pause; 1032 priv->tx_pause = tx_pause; 1033 1034 /* Link is down, do nothing else */ 1035 if (!new_speed) 1036 return; 1037 1038 /* Disable all interrupts */ 1039 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1040 1041 /* Reset the adapter asynchronously */ 1042 schedule_work(&priv->reset_task); 1043 } 1044 1045 static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf) 1046 { 1047 struct net_device *netdev = priv->netdev; 1048 struct phy_device *phydev; 1049 1050 phydev = phy_find_first(priv->mii_bus); 1051 if (!phydev) { 1052 netdev_info(netdev, "%s: no PHY found\n", netdev->name); 1053 return -ENODEV; 1054 } 1055 1056 phydev = phy_connect(netdev, phydev_name(phydev), 1057 &ftgmac100_adjust_link, intf); 1058 1059 if (IS_ERR(phydev)) { 1060 netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); 1061 return PTR_ERR(phydev); 1062 } 1063 1064 /* Indicate that we support PAUSE frames (see comment in 1065 * Documentation/networking/phy.rst) 1066 */ 1067 phy_support_asym_pause(phydev); 1068 1069 /* Display what we found */ 1070 phy_attached_info(phydev); 1071 1072 return 0; 1073 } 1074 1075 static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) 1076 { 1077 struct net_device *netdev = bus->priv; 1078 struct ftgmac100 *priv = netdev_priv(netdev); 1079 unsigned int phycr; 1080 int i; 1081 1082 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1083 1084 /* preserve MDC cycle threshold */ 1085 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 1086 1087 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 1088 FTGMAC100_PHYCR_REGAD(regnum) | 1089 FTGMAC100_PHYCR_MIIRD; 1090 1091 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 1092 1093 for (i = 0; i < 10; i++) { 1094 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1095 1096 if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) { 1097 int data; 1098 1099 data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA); 1100 return FTGMAC100_PHYDATA_MIIRDATA(data); 1101 } 1102 1103 udelay(100); 1104 } 1105 1106 netdev_err(netdev, "mdio read timed out\n"); 1107 return -EIO; 1108 } 1109 1110 static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr, 1111 int regnum, u16 value) 1112 { 1113 struct net_device *netdev = bus->priv; 1114 struct ftgmac100 *priv = netdev_priv(netdev); 1115 unsigned int phycr; 1116 int data; 1117 int i; 1118 1119 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1120 1121 /* preserve MDC cycle threshold */ 1122 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 1123 1124 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 1125 FTGMAC100_PHYCR_REGAD(regnum) | 1126 FTGMAC100_PHYCR_MIIWR; 1127 1128 data = FTGMAC100_PHYDATA_MIIWDATA(value); 1129 1130 iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA); 1131 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 1132 1133 for (i = 0; i < 10; i++) { 1134 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1135 1136 if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0) 1137 return 0; 1138 1139 udelay(100); 1140 } 1141 1142 netdev_err(netdev, "mdio write timed out\n"); 1143 return -EIO; 1144 } 1145 1146 static void ftgmac100_get_drvinfo(struct net_device *netdev, 1147 struct ethtool_drvinfo *info) 1148 { 1149 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1150 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1151 strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); 1152 } 1153 1154 static void ftgmac100_get_ringparam(struct net_device *netdev, 1155 struct ethtool_ringparam *ering) 1156 { 1157 struct ftgmac100 *priv = netdev_priv(netdev); 1158 1159 memset(ering, 0, sizeof(*ering)); 1160 ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES; 1161 ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES; 1162 ering->rx_pending = priv->rx_q_entries; 1163 ering->tx_pending = priv->tx_q_entries; 1164 } 1165 1166 static int ftgmac100_set_ringparam(struct net_device *netdev, 1167 struct ethtool_ringparam *ering) 1168 { 1169 struct ftgmac100 *priv = netdev_priv(netdev); 1170 1171 if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES || 1172 ering->tx_pending > MAX_TX_QUEUE_ENTRIES || 1173 ering->rx_pending < MIN_RX_QUEUE_ENTRIES || 1174 ering->tx_pending < MIN_TX_QUEUE_ENTRIES || 1175 !is_power_of_2(ering->rx_pending) || 1176 !is_power_of_2(ering->tx_pending)) 1177 return -EINVAL; 1178 1179 priv->new_rx_q_entries = ering->rx_pending; 1180 priv->new_tx_q_entries = ering->tx_pending; 1181 if (netif_running(netdev)) 1182 schedule_work(&priv->reset_task); 1183 1184 return 0; 1185 } 1186 1187 static void ftgmac100_get_pauseparam(struct net_device *netdev, 1188 struct ethtool_pauseparam *pause) 1189 { 1190 struct ftgmac100 *priv = netdev_priv(netdev); 1191 1192 pause->autoneg = priv->aneg_pause; 1193 pause->tx_pause = priv->tx_pause; 1194 pause->rx_pause = priv->rx_pause; 1195 } 1196 1197 static int ftgmac100_set_pauseparam(struct net_device *netdev, 1198 struct ethtool_pauseparam *pause) 1199 { 1200 struct ftgmac100 *priv = netdev_priv(netdev); 1201 struct phy_device *phydev = netdev->phydev; 1202 1203 priv->aneg_pause = pause->autoneg; 1204 priv->tx_pause = pause->tx_pause; 1205 priv->rx_pause = pause->rx_pause; 1206 1207 if (phydev) 1208 phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); 1209 1210 if (netif_running(netdev)) { 1211 if (!(phydev && priv->aneg_pause)) 1212 ftgmac100_config_pause(priv); 1213 } 1214 1215 return 0; 1216 } 1217 1218 static const struct ethtool_ops ftgmac100_ethtool_ops = { 1219 .get_drvinfo = ftgmac100_get_drvinfo, 1220 .get_link = ethtool_op_get_link, 1221 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1222 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1223 .nway_reset = phy_ethtool_nway_reset, 1224 .get_ringparam = ftgmac100_get_ringparam, 1225 .set_ringparam = ftgmac100_set_ringparam, 1226 .get_pauseparam = ftgmac100_get_pauseparam, 1227 .set_pauseparam = ftgmac100_set_pauseparam, 1228 }; 1229 1230 static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id) 1231 { 1232 struct net_device *netdev = dev_id; 1233 struct ftgmac100 *priv = netdev_priv(netdev); 1234 unsigned int status, new_mask = FTGMAC100_INT_BAD; 1235 1236 /* Fetch and clear interrupt bits, process abnormal ones */ 1237 status = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 1238 iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR); 1239 if (unlikely(status & FTGMAC100_INT_BAD)) { 1240 1241 /* RX buffer unavailable */ 1242 if (status & FTGMAC100_INT_NO_RXBUF) 1243 netdev->stats.rx_over_errors++; 1244 1245 /* received packet lost due to RX FIFO full */ 1246 if (status & FTGMAC100_INT_RPKT_LOST) 1247 netdev->stats.rx_fifo_errors++; 1248 1249 /* sent packet lost due to excessive TX collision */ 1250 if (status & FTGMAC100_INT_XPKT_LOST) 1251 netdev->stats.tx_fifo_errors++; 1252 1253 /* AHB error -> Reset the chip */ 1254 if (status & FTGMAC100_INT_AHB_ERR) { 1255 if (net_ratelimit()) 1256 netdev_warn(netdev, 1257 "AHB bus error ! Resetting chip.\n"); 1258 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1259 schedule_work(&priv->reset_task); 1260 return IRQ_HANDLED; 1261 } 1262 1263 /* We may need to restart the MAC after such errors, delay 1264 * this until after we have freed some Rx buffers though 1265 */ 1266 priv->need_mac_restart = true; 1267 1268 /* Disable those errors until we restart */ 1269 new_mask &= ~status; 1270 } 1271 1272 /* Only enable "bad" interrupts while NAPI is on */ 1273 iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER); 1274 1275 /* Schedule NAPI bh */ 1276 napi_schedule_irqoff(&priv->napi); 1277 1278 return IRQ_HANDLED; 1279 } 1280 1281 static bool ftgmac100_check_rx(struct ftgmac100 *priv) 1282 { 1283 struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer]; 1284 1285 /* Do we have a packet ? */ 1286 return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY)); 1287 } 1288 1289 static int ftgmac100_poll(struct napi_struct *napi, int budget) 1290 { 1291 struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi); 1292 int work_done = 0; 1293 bool more; 1294 1295 /* Handle TX completions */ 1296 if (ftgmac100_tx_buf_cleanable(priv)) 1297 ftgmac100_tx_complete(priv); 1298 1299 /* Handle RX packets */ 1300 do { 1301 more = ftgmac100_rx_packet(priv, &work_done); 1302 } while (more && work_done < budget); 1303 1304 1305 /* The interrupt is telling us to kick the MAC back to life 1306 * after an RX overflow 1307 */ 1308 if (unlikely(priv->need_mac_restart)) { 1309 ftgmac100_start_hw(priv); 1310 1311 /* Re-enable "bad" interrupts */ 1312 iowrite32(FTGMAC100_INT_BAD, 1313 priv->base + FTGMAC100_OFFSET_IER); 1314 } 1315 1316 /* As long as we are waiting for transmit packets to be 1317 * completed we keep NAPI going 1318 */ 1319 if (ftgmac100_tx_buf_cleanable(priv)) 1320 work_done = budget; 1321 1322 if (work_done < budget) { 1323 /* We are about to re-enable all interrupts. However 1324 * the HW has been latching RX/TX packet interrupts while 1325 * they were masked. So we clear them first, then we need 1326 * to re-check if there's something to process 1327 */ 1328 iowrite32(FTGMAC100_INT_RXTX, 1329 priv->base + FTGMAC100_OFFSET_ISR); 1330 1331 /* Push the above (and provides a barrier vs. subsequent 1332 * reads of the descriptor). 1333 */ 1334 ioread32(priv->base + FTGMAC100_OFFSET_ISR); 1335 1336 /* Check RX and TX descriptors for more work to do */ 1337 if (ftgmac100_check_rx(priv) || 1338 ftgmac100_tx_buf_cleanable(priv)) 1339 return budget; 1340 1341 /* deschedule NAPI */ 1342 napi_complete(napi); 1343 1344 /* enable all interrupts */ 1345 iowrite32(FTGMAC100_INT_ALL, 1346 priv->base + FTGMAC100_OFFSET_IER); 1347 } 1348 1349 return work_done; 1350 } 1351 1352 static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err) 1353 { 1354 int err = 0; 1355 1356 /* Re-init descriptors (adjust queue sizes) */ 1357 ftgmac100_init_rings(priv); 1358 1359 /* Realloc rx descriptors */ 1360 err = ftgmac100_alloc_rx_buffers(priv); 1361 if (err && !ignore_alloc_err) 1362 return err; 1363 1364 /* Reinit and restart HW */ 1365 ftgmac100_init_hw(priv); 1366 ftgmac100_config_pause(priv); 1367 ftgmac100_start_hw(priv); 1368 1369 /* Re-enable the device */ 1370 napi_enable(&priv->napi); 1371 netif_start_queue(priv->netdev); 1372 1373 /* Enable all interrupts */ 1374 iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER); 1375 1376 return err; 1377 } 1378 1379 static void ftgmac100_reset_task(struct work_struct *work) 1380 { 1381 struct ftgmac100 *priv = container_of(work, struct ftgmac100, 1382 reset_task); 1383 struct net_device *netdev = priv->netdev; 1384 int err; 1385 1386 netdev_dbg(netdev, "Resetting NIC...\n"); 1387 1388 /* Lock the world */ 1389 rtnl_lock(); 1390 if (netdev->phydev) 1391 mutex_lock(&netdev->phydev->lock); 1392 if (priv->mii_bus) 1393 mutex_lock(&priv->mii_bus->mdio_lock); 1394 1395 1396 /* Check if the interface is still up */ 1397 if (!netif_running(netdev)) 1398 goto bail; 1399 1400 /* Stop the network stack */ 1401 netif_trans_update(netdev); 1402 napi_disable(&priv->napi); 1403 netif_tx_disable(netdev); 1404 1405 /* Stop and reset the MAC */ 1406 ftgmac100_stop_hw(priv); 1407 err = ftgmac100_reset_and_config_mac(priv); 1408 if (err) { 1409 /* Not much we can do ... it might come back... */ 1410 netdev_err(netdev, "attempting to continue...\n"); 1411 } 1412 1413 /* Free all rx and tx buffers */ 1414 ftgmac100_free_buffers(priv); 1415 1416 /* Setup everything again and restart chip */ 1417 ftgmac100_init_all(priv, true); 1418 1419 netdev_dbg(netdev, "Reset done !\n"); 1420 bail: 1421 if (priv->mii_bus) 1422 mutex_unlock(&priv->mii_bus->mdio_lock); 1423 if (netdev->phydev) 1424 mutex_unlock(&netdev->phydev->lock); 1425 rtnl_unlock(); 1426 } 1427 1428 static int ftgmac100_open(struct net_device *netdev) 1429 { 1430 struct ftgmac100 *priv = netdev_priv(netdev); 1431 int err; 1432 1433 /* Allocate ring buffers */ 1434 err = ftgmac100_alloc_rings(priv); 1435 if (err) { 1436 netdev_err(netdev, "Failed to allocate descriptors\n"); 1437 return err; 1438 } 1439 1440 /* When using NC-SI we force the speed to 100Mbit/s full duplex, 1441 * 1442 * Otherwise we leave it set to 0 (no link), the link 1443 * message from the PHY layer will handle setting it up to 1444 * something else if needed. 1445 */ 1446 if (priv->use_ncsi) { 1447 priv->cur_duplex = DUPLEX_FULL; 1448 priv->cur_speed = SPEED_100; 1449 } else { 1450 priv->cur_duplex = 0; 1451 priv->cur_speed = 0; 1452 } 1453 1454 /* Reset the hardware */ 1455 err = ftgmac100_reset_and_config_mac(priv); 1456 if (err) 1457 goto err_hw; 1458 1459 /* Initialize NAPI */ 1460 netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64); 1461 1462 /* Grab our interrupt */ 1463 err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev); 1464 if (err) { 1465 netdev_err(netdev, "failed to request irq %d\n", netdev->irq); 1466 goto err_irq; 1467 } 1468 1469 /* Start things up */ 1470 err = ftgmac100_init_all(priv, false); 1471 if (err) { 1472 netdev_err(netdev, "Failed to allocate packet buffers\n"); 1473 goto err_alloc; 1474 } 1475 1476 if (netdev->phydev) { 1477 /* If we have a PHY, start polling */ 1478 phy_start(netdev->phydev); 1479 } else if (priv->use_ncsi) { 1480 /* If using NC-SI, set our carrier on and start the stack */ 1481 netif_carrier_on(netdev); 1482 1483 /* Start the NCSI device */ 1484 err = ncsi_start_dev(priv->ndev); 1485 if (err) 1486 goto err_ncsi; 1487 } 1488 1489 return 0; 1490 1491 err_ncsi: 1492 napi_disable(&priv->napi); 1493 netif_stop_queue(netdev); 1494 err_alloc: 1495 ftgmac100_free_buffers(priv); 1496 free_irq(netdev->irq, netdev); 1497 err_irq: 1498 netif_napi_del(&priv->napi); 1499 err_hw: 1500 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1501 ftgmac100_free_rings(priv); 1502 return err; 1503 } 1504 1505 static int ftgmac100_stop(struct net_device *netdev) 1506 { 1507 struct ftgmac100 *priv = netdev_priv(netdev); 1508 1509 /* Note about the reset task: We are called with the rtnl lock 1510 * held, so we are synchronized against the core of the reset 1511 * task. We must not try to synchronously cancel it otherwise 1512 * we can deadlock. But since it will test for netif_running() 1513 * which has already been cleared by the net core, we don't 1514 * anything special to do. 1515 */ 1516 1517 /* disable all interrupts */ 1518 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1519 1520 netif_stop_queue(netdev); 1521 napi_disable(&priv->napi); 1522 netif_napi_del(&priv->napi); 1523 if (netdev->phydev) 1524 phy_stop(netdev->phydev); 1525 else if (priv->use_ncsi) 1526 ncsi_stop_dev(priv->ndev); 1527 1528 ftgmac100_stop_hw(priv); 1529 free_irq(netdev->irq, netdev); 1530 ftgmac100_free_buffers(priv); 1531 ftgmac100_free_rings(priv); 1532 1533 return 0; 1534 } 1535 1536 /* optional */ 1537 static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1538 { 1539 if (!netdev->phydev) 1540 return -ENXIO; 1541 1542 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 1543 } 1544 1545 static void ftgmac100_tx_timeout(struct net_device *netdev) 1546 { 1547 struct ftgmac100 *priv = netdev_priv(netdev); 1548 1549 /* Disable all interrupts */ 1550 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1551 1552 /* Do the reset outside of interrupt context */ 1553 schedule_work(&priv->reset_task); 1554 } 1555 1556 static int ftgmac100_set_features(struct net_device *netdev, 1557 netdev_features_t features) 1558 { 1559 struct ftgmac100 *priv = netdev_priv(netdev); 1560 netdev_features_t changed = netdev->features ^ features; 1561 1562 if (!netif_running(netdev)) 1563 return 0; 1564 1565 /* Update the vlan filtering bit */ 1566 if (changed & NETIF_F_HW_VLAN_CTAG_RX) { 1567 u32 maccr; 1568 1569 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 1570 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 1571 maccr |= FTGMAC100_MACCR_RM_VLAN; 1572 else 1573 maccr &= ~FTGMAC100_MACCR_RM_VLAN; 1574 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 1575 } 1576 1577 return 0; 1578 } 1579 1580 #ifdef CONFIG_NET_POLL_CONTROLLER 1581 static void ftgmac100_poll_controller(struct net_device *netdev) 1582 { 1583 unsigned long flags; 1584 1585 local_irq_save(flags); 1586 ftgmac100_interrupt(netdev->irq, netdev); 1587 local_irq_restore(flags); 1588 } 1589 #endif 1590 1591 static const struct net_device_ops ftgmac100_netdev_ops = { 1592 .ndo_open = ftgmac100_open, 1593 .ndo_stop = ftgmac100_stop, 1594 .ndo_start_xmit = ftgmac100_hard_start_xmit, 1595 .ndo_set_mac_address = ftgmac100_set_mac_addr, 1596 .ndo_validate_addr = eth_validate_addr, 1597 .ndo_do_ioctl = ftgmac100_do_ioctl, 1598 .ndo_tx_timeout = ftgmac100_tx_timeout, 1599 .ndo_set_rx_mode = ftgmac100_set_rx_mode, 1600 .ndo_set_features = ftgmac100_set_features, 1601 #ifdef CONFIG_NET_POLL_CONTROLLER 1602 .ndo_poll_controller = ftgmac100_poll_controller, 1603 #endif 1604 .ndo_vlan_rx_add_vid = ncsi_vlan_rx_add_vid, 1605 .ndo_vlan_rx_kill_vid = ncsi_vlan_rx_kill_vid, 1606 }; 1607 1608 static int ftgmac100_setup_mdio(struct net_device *netdev) 1609 { 1610 struct ftgmac100 *priv = netdev_priv(netdev); 1611 struct platform_device *pdev = to_platform_device(priv->dev); 1612 int phy_intf = PHY_INTERFACE_MODE_RGMII; 1613 struct device_node *np = pdev->dev.of_node; 1614 int i, err = 0; 1615 u32 reg; 1616 1617 /* initialize mdio bus */ 1618 priv->mii_bus = mdiobus_alloc(); 1619 if (!priv->mii_bus) 1620 return -EIO; 1621 1622 if (of_device_is_compatible(np, "aspeed,ast2400-mac") || 1623 of_device_is_compatible(np, "aspeed,ast2500-mac")) { 1624 /* The AST2600 has a separate MDIO controller */ 1625 1626 /* For the AST2400 and AST2500 this driver only supports the 1627 * old MDIO interface 1628 */ 1629 reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR); 1630 reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE; 1631 iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR); 1632 } 1633 1634 /* Get PHY mode from device-tree */ 1635 if (np) { 1636 /* Default to RGMII. It's a gigabit part after all */ 1637 phy_intf = of_get_phy_mode(np); 1638 if (phy_intf < 0) 1639 phy_intf = PHY_INTERFACE_MODE_RGMII; 1640 1641 /* Aspeed only supports these. I don't know about other IP 1642 * block vendors so I'm going to just let them through for 1643 * now. Note that this is only a warning if for some obscure 1644 * reason the DT really means to lie about it or it's a newer 1645 * part we don't know about. 1646 * 1647 * On the Aspeed SoC there are additionally straps and SCU 1648 * control bits that could tell us what the interface is 1649 * (or allow us to configure it while the IP block is held 1650 * in reset). For now I chose to keep this driver away from 1651 * those SoC specific bits and assume the device-tree is 1652 * right and the SCU has been configured properly by pinmux 1653 * or the firmware. 1654 */ 1655 if (priv->is_aspeed && 1656 phy_intf != PHY_INTERFACE_MODE_RMII && 1657 phy_intf != PHY_INTERFACE_MODE_RGMII && 1658 phy_intf != PHY_INTERFACE_MODE_RGMII_ID && 1659 phy_intf != PHY_INTERFACE_MODE_RGMII_RXID && 1660 phy_intf != PHY_INTERFACE_MODE_RGMII_TXID) { 1661 netdev_warn(netdev, 1662 "Unsupported PHY mode %s !\n", 1663 phy_modes(phy_intf)); 1664 } 1665 } 1666 1667 priv->mii_bus->name = "ftgmac100_mdio"; 1668 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", 1669 pdev->name, pdev->id); 1670 priv->mii_bus->parent = priv->dev; 1671 priv->mii_bus->priv = priv->netdev; 1672 priv->mii_bus->read = ftgmac100_mdiobus_read; 1673 priv->mii_bus->write = ftgmac100_mdiobus_write; 1674 1675 for (i = 0; i < PHY_MAX_ADDR; i++) 1676 priv->mii_bus->irq[i] = PHY_POLL; 1677 1678 err = mdiobus_register(priv->mii_bus); 1679 if (err) { 1680 dev_err(priv->dev, "Cannot register MDIO bus!\n"); 1681 goto err_register_mdiobus; 1682 } 1683 1684 err = ftgmac100_mii_probe(priv, phy_intf); 1685 if (err) { 1686 dev_err(priv->dev, "MII Probe failed!\n"); 1687 goto err_mii_probe; 1688 } 1689 1690 return 0; 1691 1692 err_mii_probe: 1693 mdiobus_unregister(priv->mii_bus); 1694 err_register_mdiobus: 1695 mdiobus_free(priv->mii_bus); 1696 return err; 1697 } 1698 1699 static void ftgmac100_destroy_mdio(struct net_device *netdev) 1700 { 1701 struct ftgmac100 *priv = netdev_priv(netdev); 1702 1703 if (!netdev->phydev) 1704 return; 1705 1706 phy_disconnect(netdev->phydev); 1707 mdiobus_unregister(priv->mii_bus); 1708 mdiobus_free(priv->mii_bus); 1709 } 1710 1711 static void ftgmac100_ncsi_handler(struct ncsi_dev *nd) 1712 { 1713 if (unlikely(nd->state != ncsi_dev_state_functional)) 1714 return; 1715 1716 netdev_dbg(nd->dev, "NCSI interface %s\n", 1717 nd->link_up ? "up" : "down"); 1718 } 1719 1720 static void ftgmac100_setup_clk(struct ftgmac100 *priv) 1721 { 1722 priv->clk = devm_clk_get(priv->dev, NULL); 1723 if (IS_ERR(priv->clk)) 1724 return; 1725 1726 clk_prepare_enable(priv->clk); 1727 1728 /* Aspeed specifies a 100MHz clock is required for up to 1729 * 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz 1730 * is sufficient 1731 */ 1732 clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ : 1733 FTGMAC_100MHZ); 1734 } 1735 1736 static int ftgmac100_probe(struct platform_device *pdev) 1737 { 1738 struct resource *res; 1739 int irq; 1740 struct net_device *netdev; 1741 struct ftgmac100 *priv; 1742 struct device_node *np; 1743 int err = 0; 1744 1745 if (!pdev) 1746 return -ENODEV; 1747 1748 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1749 if (!res) 1750 return -ENXIO; 1751 1752 irq = platform_get_irq(pdev, 0); 1753 if (irq < 0) 1754 return irq; 1755 1756 /* setup net_device */ 1757 netdev = alloc_etherdev(sizeof(*priv)); 1758 if (!netdev) { 1759 err = -ENOMEM; 1760 goto err_alloc_etherdev; 1761 } 1762 1763 SET_NETDEV_DEV(netdev, &pdev->dev); 1764 1765 netdev->ethtool_ops = &ftgmac100_ethtool_ops; 1766 netdev->netdev_ops = &ftgmac100_netdev_ops; 1767 netdev->watchdog_timeo = 5 * HZ; 1768 1769 platform_set_drvdata(pdev, netdev); 1770 1771 /* setup private data */ 1772 priv = netdev_priv(netdev); 1773 priv->netdev = netdev; 1774 priv->dev = &pdev->dev; 1775 INIT_WORK(&priv->reset_task, ftgmac100_reset_task); 1776 1777 /* map io memory */ 1778 priv->res = request_mem_region(res->start, resource_size(res), 1779 dev_name(&pdev->dev)); 1780 if (!priv->res) { 1781 dev_err(&pdev->dev, "Could not reserve memory region\n"); 1782 err = -ENOMEM; 1783 goto err_req_mem; 1784 } 1785 1786 priv->base = ioremap(res->start, resource_size(res)); 1787 if (!priv->base) { 1788 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 1789 err = -EIO; 1790 goto err_ioremap; 1791 } 1792 1793 netdev->irq = irq; 1794 1795 /* Enable pause */ 1796 priv->tx_pause = true; 1797 priv->rx_pause = true; 1798 priv->aneg_pause = true; 1799 1800 /* MAC address from chip or random one */ 1801 ftgmac100_initial_mac(priv); 1802 1803 np = pdev->dev.of_node; 1804 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") || 1805 of_device_is_compatible(np, "aspeed,ast2500-mac") || 1806 of_device_is_compatible(np, "aspeed,ast2600-mac"))) { 1807 priv->rxdes0_edorr_mask = BIT(30); 1808 priv->txdes0_edotr_mask = BIT(30); 1809 priv->is_aspeed = true; 1810 } else { 1811 priv->rxdes0_edorr_mask = BIT(15); 1812 priv->txdes0_edotr_mask = BIT(15); 1813 } 1814 1815 if (np && of_get_property(np, "use-ncsi", NULL)) { 1816 if (!IS_ENABLED(CONFIG_NET_NCSI)) { 1817 dev_err(&pdev->dev, "NCSI stack not enabled\n"); 1818 goto err_ncsi_dev; 1819 } 1820 1821 dev_info(&pdev->dev, "Using NCSI interface\n"); 1822 priv->use_ncsi = true; 1823 priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler); 1824 if (!priv->ndev) 1825 goto err_ncsi_dev; 1826 } else if (np && of_get_property(np, "phy-handle", NULL)) { 1827 struct phy_device *phy; 1828 1829 phy = of_phy_get_and_connect(priv->netdev, np, 1830 &ftgmac100_adjust_link); 1831 if (!phy) { 1832 dev_err(&pdev->dev, "Failed to connect to phy\n"); 1833 goto err_setup_mdio; 1834 } 1835 1836 /* Indicate that we support PAUSE frames (see comment in 1837 * Documentation/networking/phy.txt) 1838 */ 1839 phy_support_asym_pause(phy); 1840 1841 /* Display what we found */ 1842 phy_attached_info(phy); 1843 } else if (np && !of_get_child_by_name(np, "mdio")) { 1844 /* Support legacy ASPEED devicetree descriptions that decribe a 1845 * MAC with an embedded MDIO controller but have no "mdio" 1846 * child node. Automatically scan the MDIO bus for available 1847 * PHYs. 1848 */ 1849 priv->use_ncsi = false; 1850 err = ftgmac100_setup_mdio(netdev); 1851 if (err) 1852 goto err_setup_mdio; 1853 } 1854 1855 if (priv->is_aspeed) 1856 ftgmac100_setup_clk(priv); 1857 1858 /* Default ring sizes */ 1859 priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES; 1860 priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES; 1861 1862 /* Base feature set */ 1863 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | 1864 NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX | 1865 NETIF_F_HW_VLAN_CTAG_TX; 1866 1867 if (priv->use_ncsi) 1868 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1869 1870 /* AST2400 doesn't have working HW checksum generation */ 1871 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac"))) 1872 netdev->hw_features &= ~NETIF_F_HW_CSUM; 1873 if (np && of_get_property(np, "no-hw-checksum", NULL)) 1874 netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); 1875 netdev->features |= netdev->hw_features; 1876 1877 /* register network device */ 1878 err = register_netdev(netdev); 1879 if (err) { 1880 dev_err(&pdev->dev, "Failed to register netdev\n"); 1881 goto err_register_netdev; 1882 } 1883 1884 netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base); 1885 1886 return 0; 1887 1888 err_ncsi_dev: 1889 err_register_netdev: 1890 ftgmac100_destroy_mdio(netdev); 1891 err_setup_mdio: 1892 iounmap(priv->base); 1893 err_ioremap: 1894 release_resource(priv->res); 1895 err_req_mem: 1896 free_netdev(netdev); 1897 err_alloc_etherdev: 1898 return err; 1899 } 1900 1901 static int ftgmac100_remove(struct platform_device *pdev) 1902 { 1903 struct net_device *netdev; 1904 struct ftgmac100 *priv; 1905 1906 netdev = platform_get_drvdata(pdev); 1907 priv = netdev_priv(netdev); 1908 1909 unregister_netdev(netdev); 1910 1911 clk_disable_unprepare(priv->clk); 1912 1913 /* There's a small chance the reset task will have been re-queued, 1914 * during stop, make sure it's gone before we free the structure. 1915 */ 1916 cancel_work_sync(&priv->reset_task); 1917 1918 ftgmac100_destroy_mdio(netdev); 1919 1920 iounmap(priv->base); 1921 release_resource(priv->res); 1922 1923 netif_napi_del(&priv->napi); 1924 free_netdev(netdev); 1925 return 0; 1926 } 1927 1928 static const struct of_device_id ftgmac100_of_match[] = { 1929 { .compatible = "faraday,ftgmac100" }, 1930 { } 1931 }; 1932 MODULE_DEVICE_TABLE(of, ftgmac100_of_match); 1933 1934 static struct platform_driver ftgmac100_driver = { 1935 .probe = ftgmac100_probe, 1936 .remove = ftgmac100_remove, 1937 .driver = { 1938 .name = DRV_NAME, 1939 .of_match_table = ftgmac100_of_match, 1940 }, 1941 }; 1942 module_platform_driver(ftgmac100_driver); 1943 1944 MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1945 MODULE_DESCRIPTION("FTGMAC100 driver"); 1946 MODULE_LICENSE("GPL"); 1947