1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Faraday FTGMAC100 Gigabit Ethernet 4 * 5 * (C) Copyright 2009-2011 Faraday Technology 6 * Po-Yu Chuang <ratbert@faraday-tech.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/clk.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/etherdevice.h> 14 #include <linux/ethtool.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/module.h> 18 #include <linux/netdevice.h> 19 #include <linux/of.h> 20 #include <linux/of_mdio.h> 21 #include <linux/phy.h> 22 #include <linux/platform_device.h> 23 #include <linux/property.h> 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 #include <linux/of_net.h> 27 #include <net/ip.h> 28 #include <net/ncsi.h> 29 30 #include "ftgmac100.h" 31 32 #define DRV_NAME "ftgmac100" 33 #define DRV_VERSION "0.7" 34 35 /* Arbitrary values, I am not sure the HW has limits */ 36 #define MAX_RX_QUEUE_ENTRIES 1024 37 #define MAX_TX_QUEUE_ENTRIES 1024 38 #define MIN_RX_QUEUE_ENTRIES 32 39 #define MIN_TX_QUEUE_ENTRIES 32 40 41 /* Defaults */ 42 #define DEF_RX_QUEUE_ENTRIES 128 43 #define DEF_TX_QUEUE_ENTRIES 128 44 45 #define MAX_PKT_SIZE 1536 46 #define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */ 47 48 /* Min number of tx ring entries before stopping queue */ 49 #define TX_THRESHOLD (MAX_SKB_FRAGS + 1) 50 51 #define FTGMAC_100MHZ 100000000 52 #define FTGMAC_25MHZ 25000000 53 54 struct ftgmac100 { 55 /* Registers */ 56 struct resource *res; 57 void __iomem *base; 58 59 /* Rx ring */ 60 unsigned int rx_q_entries; 61 struct ftgmac100_rxdes *rxdes; 62 dma_addr_t rxdes_dma; 63 struct sk_buff **rx_skbs; 64 unsigned int rx_pointer; 65 u32 rxdes0_edorr_mask; 66 67 /* Tx ring */ 68 unsigned int tx_q_entries; 69 struct ftgmac100_txdes *txdes; 70 dma_addr_t txdes_dma; 71 struct sk_buff **tx_skbs; 72 unsigned int tx_clean_pointer; 73 unsigned int tx_pointer; 74 u32 txdes0_edotr_mask; 75 76 /* Used to signal the reset task of ring change request */ 77 unsigned int new_rx_q_entries; 78 unsigned int new_tx_q_entries; 79 80 /* Scratch page to use when rx skb alloc fails */ 81 void *rx_scratch; 82 dma_addr_t rx_scratch_dma; 83 84 /* Component structures */ 85 struct net_device *netdev; 86 struct device *dev; 87 struct ncsi_dev *ndev; 88 struct napi_struct napi; 89 struct work_struct reset_task; 90 struct mii_bus *mii_bus; 91 struct clk *clk; 92 93 /* Link management */ 94 int cur_speed; 95 int cur_duplex; 96 bool use_ncsi; 97 98 /* Multicast filter settings */ 99 u32 maht0; 100 u32 maht1; 101 102 /* Flow control settings */ 103 bool tx_pause; 104 bool rx_pause; 105 bool aneg_pause; 106 107 /* Misc */ 108 bool need_mac_restart; 109 bool is_aspeed; 110 }; 111 112 static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr) 113 { 114 struct net_device *netdev = priv->netdev; 115 int i; 116 117 /* NOTE: reset clears all registers */ 118 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 119 iowrite32(maccr | FTGMAC100_MACCR_SW_RST, 120 priv->base + FTGMAC100_OFFSET_MACCR); 121 for (i = 0; i < 200; i++) { 122 unsigned int maccr; 123 124 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 125 if (!(maccr & FTGMAC100_MACCR_SW_RST)) 126 return 0; 127 128 udelay(1); 129 } 130 131 netdev_err(netdev, "Hardware reset failed\n"); 132 return -EIO; 133 } 134 135 static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv) 136 { 137 u32 maccr = 0; 138 139 switch (priv->cur_speed) { 140 case SPEED_10: 141 case 0: /* no link */ 142 break; 143 144 case SPEED_100: 145 maccr |= FTGMAC100_MACCR_FAST_MODE; 146 break; 147 148 case SPEED_1000: 149 maccr |= FTGMAC100_MACCR_GIGA_MODE; 150 break; 151 default: 152 netdev_err(priv->netdev, "Unknown speed %d !\n", 153 priv->cur_speed); 154 break; 155 } 156 157 /* (Re)initialize the queue pointers */ 158 priv->rx_pointer = 0; 159 priv->tx_clean_pointer = 0; 160 priv->tx_pointer = 0; 161 162 /* The doc says reset twice with 10us interval */ 163 if (ftgmac100_reset_mac(priv, maccr)) 164 return -EIO; 165 usleep_range(10, 1000); 166 return ftgmac100_reset_mac(priv, maccr); 167 } 168 169 static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac) 170 { 171 unsigned int maddr = mac[0] << 8 | mac[1]; 172 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 173 174 iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR); 175 iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR); 176 } 177 178 static void ftgmac100_initial_mac(struct ftgmac100 *priv) 179 { 180 u8 mac[ETH_ALEN]; 181 unsigned int m; 182 unsigned int l; 183 void *addr; 184 185 addr = device_get_mac_address(priv->dev, mac, ETH_ALEN); 186 if (addr) { 187 ether_addr_copy(priv->netdev->dev_addr, mac); 188 dev_info(priv->dev, "Read MAC address %pM from device tree\n", 189 mac); 190 return; 191 } 192 193 m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR); 194 l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR); 195 196 mac[0] = (m >> 8) & 0xff; 197 mac[1] = m & 0xff; 198 mac[2] = (l >> 24) & 0xff; 199 mac[3] = (l >> 16) & 0xff; 200 mac[4] = (l >> 8) & 0xff; 201 mac[5] = l & 0xff; 202 203 if (is_valid_ether_addr(mac)) { 204 ether_addr_copy(priv->netdev->dev_addr, mac); 205 dev_info(priv->dev, "Read MAC address %pM from chip\n", mac); 206 } else { 207 eth_hw_addr_random(priv->netdev); 208 dev_info(priv->dev, "Generated random MAC address %pM\n", 209 priv->netdev->dev_addr); 210 } 211 } 212 213 static int ftgmac100_set_mac_addr(struct net_device *dev, void *p) 214 { 215 int ret; 216 217 ret = eth_prepare_mac_addr_change(dev, p); 218 if (ret < 0) 219 return ret; 220 221 eth_commit_mac_addr_change(dev, p); 222 ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr); 223 224 return 0; 225 } 226 227 static void ftgmac100_config_pause(struct ftgmac100 *priv) 228 { 229 u32 fcr = FTGMAC100_FCR_PAUSE_TIME(16); 230 231 /* Throttle tx queue when receiving pause frames */ 232 if (priv->rx_pause) 233 fcr |= FTGMAC100_FCR_FC_EN; 234 235 /* Enables sending pause frames when the RX queue is past a 236 * certain threshold. 237 */ 238 if (priv->tx_pause) 239 fcr |= FTGMAC100_FCR_FCTHR_EN; 240 241 iowrite32(fcr, priv->base + FTGMAC100_OFFSET_FCR); 242 } 243 244 static void ftgmac100_init_hw(struct ftgmac100 *priv) 245 { 246 u32 reg, rfifo_sz, tfifo_sz; 247 248 /* Clear stale interrupts */ 249 reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 250 iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR); 251 252 /* Setup RX ring buffer base */ 253 iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR); 254 255 /* Setup TX ring buffer base */ 256 iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR); 257 258 /* Configure RX buffer size */ 259 iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE), 260 priv->base + FTGMAC100_OFFSET_RBSR); 261 262 /* Set RX descriptor autopoll */ 263 iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), 264 priv->base + FTGMAC100_OFFSET_APTC); 265 266 /* Write MAC address */ 267 ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr); 268 269 /* Write multicast filter */ 270 iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0); 271 iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1); 272 273 /* Configure descriptor sizes and increase burst sizes according 274 * to values in Aspeed SDK. The FIFO arbitration is enabled and 275 * the thresholds set based on the recommended values in the 276 * AST2400 specification. 277 */ 278 iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) | /* 2*8 bytes RX descs */ 279 FTGMAC100_DBLAC_TXDES_SIZE(2) | /* 2*8 bytes TX descs */ 280 FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */ 281 FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */ 282 FTGMAC100_DBLAC_RX_THR_EN | /* Enable fifo threshold arb */ 283 FTGMAC100_DBLAC_RXFIFO_HTHR(6) | /* 6/8 of FIFO high threshold */ 284 FTGMAC100_DBLAC_RXFIFO_LTHR(2), /* 2/8 of FIFO low threshold */ 285 priv->base + FTGMAC100_OFFSET_DBLAC); 286 287 /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt 288 * mitigation doesn't seem to provide any benefit with NAPI so leave 289 * it at that. 290 */ 291 iowrite32(FTGMAC100_ITC_RXINT_THR(1) | 292 FTGMAC100_ITC_TXINT_THR(1), 293 priv->base + FTGMAC100_OFFSET_ITC); 294 295 /* Configure FIFO sizes in the TPAFCR register */ 296 reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR); 297 rfifo_sz = reg & 0x00000007; 298 tfifo_sz = (reg >> 3) & 0x00000007; 299 reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR); 300 reg &= ~0x3f000000; 301 reg |= (tfifo_sz << 27); 302 reg |= (rfifo_sz << 24); 303 iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR); 304 } 305 306 static void ftgmac100_start_hw(struct ftgmac100 *priv) 307 { 308 u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 309 310 /* Keep the original GMAC and FAST bits */ 311 maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE); 312 313 /* Add all the main enable bits */ 314 maccr |= FTGMAC100_MACCR_TXDMA_EN | 315 FTGMAC100_MACCR_RXDMA_EN | 316 FTGMAC100_MACCR_TXMAC_EN | 317 FTGMAC100_MACCR_RXMAC_EN | 318 FTGMAC100_MACCR_CRC_APD | 319 FTGMAC100_MACCR_PHY_LINK_LEVEL | 320 FTGMAC100_MACCR_RX_RUNT | 321 FTGMAC100_MACCR_RX_BROADPKT; 322 323 /* Add other bits as needed */ 324 if (priv->cur_duplex == DUPLEX_FULL) 325 maccr |= FTGMAC100_MACCR_FULLDUP; 326 if (priv->netdev->flags & IFF_PROMISC) 327 maccr |= FTGMAC100_MACCR_RX_ALL; 328 if (priv->netdev->flags & IFF_ALLMULTI) 329 maccr |= FTGMAC100_MACCR_RX_MULTIPKT; 330 else if (netdev_mc_count(priv->netdev)) 331 maccr |= FTGMAC100_MACCR_HT_MULTI_EN; 332 333 /* Vlan filtering enabled */ 334 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 335 maccr |= FTGMAC100_MACCR_RM_VLAN; 336 337 /* Hit the HW */ 338 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 339 } 340 341 static void ftgmac100_stop_hw(struct ftgmac100 *priv) 342 { 343 iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR); 344 } 345 346 static void ftgmac100_calc_mc_hash(struct ftgmac100 *priv) 347 { 348 struct netdev_hw_addr *ha; 349 350 priv->maht1 = 0; 351 priv->maht0 = 0; 352 netdev_for_each_mc_addr(ha, priv->netdev) { 353 u32 crc_val = ether_crc_le(ETH_ALEN, ha->addr); 354 355 crc_val = (~(crc_val >> 2)) & 0x3f; 356 if (crc_val >= 32) 357 priv->maht1 |= 1ul << (crc_val - 32); 358 else 359 priv->maht0 |= 1ul << (crc_val); 360 } 361 } 362 363 static void ftgmac100_set_rx_mode(struct net_device *netdev) 364 { 365 struct ftgmac100 *priv = netdev_priv(netdev); 366 367 /* Setup the hash filter */ 368 ftgmac100_calc_mc_hash(priv); 369 370 /* Interface down ? that's all there is to do */ 371 if (!netif_running(netdev)) 372 return; 373 374 /* Update the HW */ 375 iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0); 376 iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1); 377 378 /* Reconfigure MACCR */ 379 ftgmac100_start_hw(priv); 380 } 381 382 static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry, 383 struct ftgmac100_rxdes *rxdes, gfp_t gfp) 384 { 385 struct net_device *netdev = priv->netdev; 386 struct sk_buff *skb; 387 dma_addr_t map; 388 int err = 0; 389 390 skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); 391 if (unlikely(!skb)) { 392 if (net_ratelimit()) 393 netdev_warn(netdev, "failed to allocate rx skb\n"); 394 err = -ENOMEM; 395 map = priv->rx_scratch_dma; 396 } else { 397 map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE, 398 DMA_FROM_DEVICE); 399 if (unlikely(dma_mapping_error(priv->dev, map))) { 400 if (net_ratelimit()) 401 netdev_err(netdev, "failed to map rx page\n"); 402 dev_kfree_skb_any(skb); 403 map = priv->rx_scratch_dma; 404 skb = NULL; 405 err = -ENOMEM; 406 } 407 } 408 409 /* Store skb */ 410 priv->rx_skbs[entry] = skb; 411 412 /* Store DMA address into RX desc */ 413 rxdes->rxdes3 = cpu_to_le32(map); 414 415 /* Ensure the above is ordered vs clearing the OWN bit */ 416 dma_wmb(); 417 418 /* Clean status (which resets own bit) */ 419 if (entry == (priv->rx_q_entries - 1)) 420 rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask); 421 else 422 rxdes->rxdes0 = 0; 423 424 return err; 425 } 426 427 static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv, 428 unsigned int pointer) 429 { 430 return (pointer + 1) & (priv->rx_q_entries - 1); 431 } 432 433 static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status) 434 { 435 struct net_device *netdev = priv->netdev; 436 437 if (status & FTGMAC100_RXDES0_RX_ERR) 438 netdev->stats.rx_errors++; 439 440 if (status & FTGMAC100_RXDES0_CRC_ERR) 441 netdev->stats.rx_crc_errors++; 442 443 if (status & (FTGMAC100_RXDES0_FTL | 444 FTGMAC100_RXDES0_RUNT | 445 FTGMAC100_RXDES0_RX_ODD_NB)) 446 netdev->stats.rx_length_errors++; 447 } 448 449 static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed) 450 { 451 struct net_device *netdev = priv->netdev; 452 struct ftgmac100_rxdes *rxdes; 453 struct sk_buff *skb; 454 unsigned int pointer, size; 455 u32 status, csum_vlan; 456 dma_addr_t map; 457 458 /* Grab next RX descriptor */ 459 pointer = priv->rx_pointer; 460 rxdes = &priv->rxdes[pointer]; 461 462 /* Grab descriptor status */ 463 status = le32_to_cpu(rxdes->rxdes0); 464 465 /* Do we have a packet ? */ 466 if (!(status & FTGMAC100_RXDES0_RXPKT_RDY)) 467 return false; 468 469 /* Order subsequent reads with the test for the ready bit */ 470 dma_rmb(); 471 472 /* We don't cope with fragmented RX packets */ 473 if (unlikely(!(status & FTGMAC100_RXDES0_FRS) || 474 !(status & FTGMAC100_RXDES0_LRS))) 475 goto drop; 476 477 /* Grab received size and csum vlan field in the descriptor */ 478 size = status & FTGMAC100_RXDES0_VDBC; 479 csum_vlan = le32_to_cpu(rxdes->rxdes1); 480 481 /* Any error (other than csum offload) flagged ? */ 482 if (unlikely(status & RXDES0_ANY_ERROR)) { 483 /* Correct for incorrect flagging of runt packets 484 * with vlan tags... Just accept a runt packet that 485 * has been flagged as vlan and whose size is at 486 * least 60 bytes. 487 */ 488 if ((status & FTGMAC100_RXDES0_RUNT) && 489 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) && 490 (size >= 60)) 491 status &= ~FTGMAC100_RXDES0_RUNT; 492 493 /* Any error still in there ? */ 494 if (status & RXDES0_ANY_ERROR) { 495 ftgmac100_rx_packet_error(priv, status); 496 goto drop; 497 } 498 } 499 500 /* If the packet had no skb (failed to allocate earlier) 501 * then try to allocate one and skip 502 */ 503 skb = priv->rx_skbs[pointer]; 504 if (!unlikely(skb)) { 505 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 506 goto drop; 507 } 508 509 if (unlikely(status & FTGMAC100_RXDES0_MULTICAST)) 510 netdev->stats.multicast++; 511 512 /* If the HW found checksum errors, bounce it to software. 513 * 514 * If we didn't, we need to see if the packet was recognized 515 * by HW as one of the supported checksummed protocols before 516 * we accept the HW test results. 517 */ 518 if (netdev->features & NETIF_F_RXCSUM) { 519 u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR | 520 FTGMAC100_RXDES1_UDP_CHKSUM_ERR | 521 FTGMAC100_RXDES1_IP_CHKSUM_ERR; 522 if ((csum_vlan & err_bits) || 523 !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK)) 524 skb->ip_summed = CHECKSUM_NONE; 525 else 526 skb->ip_summed = CHECKSUM_UNNECESSARY; 527 } 528 529 /* Transfer received size to skb */ 530 skb_put(skb, size); 531 532 /* Extract vlan tag */ 533 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 534 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL)) 535 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 536 csum_vlan & 0xffff); 537 538 /* Tear down DMA mapping, do necessary cache management */ 539 map = le32_to_cpu(rxdes->rxdes3); 540 541 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU) 542 /* When we don't have an iommu, we can save cycles by not 543 * invalidating the cache for the part of the packet that 544 * wasn't received. 545 */ 546 dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE); 547 #else 548 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 549 #endif 550 551 552 /* Resplenish rx ring */ 553 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 554 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); 555 556 skb->protocol = eth_type_trans(skb, netdev); 557 558 netdev->stats.rx_packets++; 559 netdev->stats.rx_bytes += size; 560 561 /* push packet to protocol stack */ 562 if (skb->ip_summed == CHECKSUM_NONE) 563 netif_receive_skb(skb); 564 else 565 napi_gro_receive(&priv->napi, skb); 566 567 (*processed)++; 568 return true; 569 570 drop: 571 /* Clean rxdes0 (which resets own bit) */ 572 rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask); 573 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); 574 netdev->stats.rx_dropped++; 575 return true; 576 } 577 578 static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv, 579 unsigned int index) 580 { 581 if (index == (priv->tx_q_entries - 1)) 582 return priv->txdes0_edotr_mask; 583 else 584 return 0; 585 } 586 587 static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv, 588 unsigned int pointer) 589 { 590 return (pointer + 1) & (priv->tx_q_entries - 1); 591 } 592 593 static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv) 594 { 595 /* Returns the number of available slots in the TX queue 596 * 597 * This always leaves one free slot so we don't have to 598 * worry about empty vs. full, and this simplifies the 599 * test for ftgmac100_tx_buf_cleanable() below 600 */ 601 return (priv->tx_clean_pointer - priv->tx_pointer - 1) & 602 (priv->tx_q_entries - 1); 603 } 604 605 static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv) 606 { 607 return priv->tx_pointer != priv->tx_clean_pointer; 608 } 609 610 static void ftgmac100_free_tx_packet(struct ftgmac100 *priv, 611 unsigned int pointer, 612 struct sk_buff *skb, 613 struct ftgmac100_txdes *txdes, 614 u32 ctl_stat) 615 { 616 dma_addr_t map = le32_to_cpu(txdes->txdes3); 617 size_t len; 618 619 if (ctl_stat & FTGMAC100_TXDES0_FTS) { 620 len = skb_headlen(skb); 621 dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE); 622 } else { 623 len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat); 624 dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE); 625 } 626 627 /* Free SKB on last segment */ 628 if (ctl_stat & FTGMAC100_TXDES0_LTS) 629 dev_kfree_skb(skb); 630 priv->tx_skbs[pointer] = NULL; 631 } 632 633 static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv) 634 { 635 struct net_device *netdev = priv->netdev; 636 struct ftgmac100_txdes *txdes; 637 struct sk_buff *skb; 638 unsigned int pointer; 639 u32 ctl_stat; 640 641 pointer = priv->tx_clean_pointer; 642 txdes = &priv->txdes[pointer]; 643 644 ctl_stat = le32_to_cpu(txdes->txdes0); 645 if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN) 646 return false; 647 648 skb = priv->tx_skbs[pointer]; 649 netdev->stats.tx_packets++; 650 netdev->stats.tx_bytes += skb->len; 651 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 652 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 653 654 priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer); 655 656 return true; 657 } 658 659 static void ftgmac100_tx_complete(struct ftgmac100 *priv) 660 { 661 struct net_device *netdev = priv->netdev; 662 663 /* Process all completed packets */ 664 while (ftgmac100_tx_buf_cleanable(priv) && 665 ftgmac100_tx_complete_packet(priv)) 666 ; 667 668 /* Restart queue if needed */ 669 smp_mb(); 670 if (unlikely(netif_queue_stopped(netdev) && 671 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) { 672 struct netdev_queue *txq; 673 674 txq = netdev_get_tx_queue(netdev, 0); 675 __netif_tx_lock(txq, smp_processor_id()); 676 if (netif_queue_stopped(netdev) && 677 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 678 netif_wake_queue(netdev); 679 __netif_tx_unlock(txq); 680 } 681 } 682 683 static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan) 684 { 685 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 686 u8 ip_proto = ip_hdr(skb)->protocol; 687 688 *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM; 689 switch(ip_proto) { 690 case IPPROTO_TCP: 691 *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM; 692 return true; 693 case IPPROTO_UDP: 694 *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM; 695 return true; 696 case IPPROTO_IP: 697 return true; 698 } 699 } 700 return skb_checksum_help(skb) == 0; 701 } 702 703 static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, 704 struct net_device *netdev) 705 { 706 struct ftgmac100 *priv = netdev_priv(netdev); 707 struct ftgmac100_txdes *txdes, *first; 708 unsigned int pointer, nfrags, len, i, j; 709 u32 f_ctl_stat, ctl_stat, csum_vlan; 710 dma_addr_t map; 711 712 /* The HW doesn't pad small frames */ 713 if (eth_skb_pad(skb)) { 714 netdev->stats.tx_dropped++; 715 return NETDEV_TX_OK; 716 } 717 718 /* Reject oversize packets */ 719 if (unlikely(skb->len > MAX_PKT_SIZE)) { 720 if (net_ratelimit()) 721 netdev_dbg(netdev, "tx packet too big\n"); 722 goto drop; 723 } 724 725 /* Do we have a limit on #fragments ? I yet have to get a reply 726 * from Aspeed. If there's one I haven't hit it. 727 */ 728 nfrags = skb_shinfo(skb)->nr_frags; 729 730 /* Get header len */ 731 len = skb_headlen(skb); 732 733 /* Map the packet head */ 734 map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); 735 if (dma_mapping_error(priv->dev, map)) { 736 if (net_ratelimit()) 737 netdev_err(netdev, "map tx packet head failed\n"); 738 goto drop; 739 } 740 741 /* Grab the next free tx descriptor */ 742 pointer = priv->tx_pointer; 743 txdes = first = &priv->txdes[pointer]; 744 745 /* Setup it up with the packet head. Don't write the head to the 746 * ring just yet 747 */ 748 priv->tx_skbs[pointer] = skb; 749 f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 750 f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 751 f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 752 f_ctl_stat |= FTGMAC100_TXDES0_FTS; 753 if (nfrags == 0) 754 f_ctl_stat |= FTGMAC100_TXDES0_LTS; 755 txdes->txdes3 = cpu_to_le32(map); 756 757 /* Setup HW checksumming */ 758 csum_vlan = 0; 759 if (skb->ip_summed == CHECKSUM_PARTIAL && 760 !ftgmac100_prep_tx_csum(skb, &csum_vlan)) 761 goto drop; 762 763 /* Add VLAN tag */ 764 if (skb_vlan_tag_present(skb)) { 765 csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG; 766 csum_vlan |= skb_vlan_tag_get(skb) & 0xffff; 767 } 768 769 txdes->txdes1 = cpu_to_le32(csum_vlan); 770 771 /* Next descriptor */ 772 pointer = ftgmac100_next_tx_pointer(priv, pointer); 773 774 /* Add the fragments */ 775 for (i = 0; i < nfrags; i++) { 776 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 777 778 len = skb_frag_size(frag); 779 780 /* Map it */ 781 map = skb_frag_dma_map(priv->dev, frag, 0, len, 782 DMA_TO_DEVICE); 783 if (dma_mapping_error(priv->dev, map)) 784 goto dma_err; 785 786 /* Setup descriptor */ 787 priv->tx_skbs[pointer] = skb; 788 txdes = &priv->txdes[pointer]; 789 ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 790 ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 791 ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 792 if (i == (nfrags - 1)) 793 ctl_stat |= FTGMAC100_TXDES0_LTS; 794 txdes->txdes0 = cpu_to_le32(ctl_stat); 795 txdes->txdes1 = 0; 796 txdes->txdes3 = cpu_to_le32(map); 797 798 /* Next one */ 799 pointer = ftgmac100_next_tx_pointer(priv, pointer); 800 } 801 802 /* Order the previous packet and descriptor udpates 803 * before setting the OWN bit on the first descriptor. 804 */ 805 dma_wmb(); 806 first->txdes0 = cpu_to_le32(f_ctl_stat); 807 808 /* Update next TX pointer */ 809 priv->tx_pointer = pointer; 810 811 /* If there isn't enough room for all the fragments of a new packet 812 * in the TX ring, stop the queue. The sequence below is race free 813 * vs. a concurrent restart in ftgmac100_poll() 814 */ 815 if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) { 816 netif_stop_queue(netdev); 817 /* Order the queue stop with the test below */ 818 smp_mb(); 819 if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 820 netif_wake_queue(netdev); 821 } 822 823 /* Poke transmitter to read the updated TX descriptors */ 824 iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD); 825 826 return NETDEV_TX_OK; 827 828 dma_err: 829 if (net_ratelimit()) 830 netdev_err(netdev, "map tx fragment failed\n"); 831 832 /* Free head */ 833 pointer = priv->tx_pointer; 834 ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat); 835 first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask); 836 837 /* Then all fragments */ 838 for (j = 0; j < i; j++) { 839 pointer = ftgmac100_next_tx_pointer(priv, pointer); 840 txdes = &priv->txdes[pointer]; 841 ctl_stat = le32_to_cpu(txdes->txdes0); 842 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 843 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 844 } 845 846 /* This cannot be reached if we successfully mapped the 847 * last fragment, so we know ftgmac100_free_tx_packet() 848 * hasn't freed the skb yet. 849 */ 850 drop: 851 /* Drop the packet */ 852 dev_kfree_skb_any(skb); 853 netdev->stats.tx_dropped++; 854 855 return NETDEV_TX_OK; 856 } 857 858 static void ftgmac100_free_buffers(struct ftgmac100 *priv) 859 { 860 int i; 861 862 /* Free all RX buffers */ 863 for (i = 0; i < priv->rx_q_entries; i++) { 864 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; 865 struct sk_buff *skb = priv->rx_skbs[i]; 866 dma_addr_t map = le32_to_cpu(rxdes->rxdes3); 867 868 if (!skb) 869 continue; 870 871 priv->rx_skbs[i] = NULL; 872 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 873 dev_kfree_skb_any(skb); 874 } 875 876 /* Free all TX buffers */ 877 for (i = 0; i < priv->tx_q_entries; i++) { 878 struct ftgmac100_txdes *txdes = &priv->txdes[i]; 879 struct sk_buff *skb = priv->tx_skbs[i]; 880 881 if (!skb) 882 continue; 883 ftgmac100_free_tx_packet(priv, i, skb, txdes, 884 le32_to_cpu(txdes->txdes0)); 885 } 886 } 887 888 static void ftgmac100_free_rings(struct ftgmac100 *priv) 889 { 890 /* Free skb arrays */ 891 kfree(priv->rx_skbs); 892 kfree(priv->tx_skbs); 893 894 /* Free descriptors */ 895 if (priv->rxdes) 896 dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES * 897 sizeof(struct ftgmac100_rxdes), 898 priv->rxdes, priv->rxdes_dma); 899 priv->rxdes = NULL; 900 901 if (priv->txdes) 902 dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES * 903 sizeof(struct ftgmac100_txdes), 904 priv->txdes, priv->txdes_dma); 905 priv->txdes = NULL; 906 907 /* Free scratch packet buffer */ 908 if (priv->rx_scratch) 909 dma_free_coherent(priv->dev, RX_BUF_SIZE, 910 priv->rx_scratch, priv->rx_scratch_dma); 911 } 912 913 static int ftgmac100_alloc_rings(struct ftgmac100 *priv) 914 { 915 /* Allocate skb arrays */ 916 priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *), 917 GFP_KERNEL); 918 if (!priv->rx_skbs) 919 return -ENOMEM; 920 priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *), 921 GFP_KERNEL); 922 if (!priv->tx_skbs) 923 return -ENOMEM; 924 925 /* Allocate descriptors */ 926 priv->rxdes = dma_alloc_coherent(priv->dev, 927 MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes), 928 &priv->rxdes_dma, GFP_KERNEL); 929 if (!priv->rxdes) 930 return -ENOMEM; 931 priv->txdes = dma_alloc_coherent(priv->dev, 932 MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes), 933 &priv->txdes_dma, GFP_KERNEL); 934 if (!priv->txdes) 935 return -ENOMEM; 936 937 /* Allocate scratch packet buffer */ 938 priv->rx_scratch = dma_alloc_coherent(priv->dev, 939 RX_BUF_SIZE, 940 &priv->rx_scratch_dma, 941 GFP_KERNEL); 942 if (!priv->rx_scratch) 943 return -ENOMEM; 944 945 return 0; 946 } 947 948 static void ftgmac100_init_rings(struct ftgmac100 *priv) 949 { 950 struct ftgmac100_rxdes *rxdes = NULL; 951 struct ftgmac100_txdes *txdes = NULL; 952 int i; 953 954 /* Update entries counts */ 955 priv->rx_q_entries = priv->new_rx_q_entries; 956 priv->tx_q_entries = priv->new_tx_q_entries; 957 958 if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES)) 959 return; 960 961 /* Initialize RX ring */ 962 for (i = 0; i < priv->rx_q_entries; i++) { 963 rxdes = &priv->rxdes[i]; 964 rxdes->rxdes0 = 0; 965 rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma); 966 } 967 /* Mark the end of the ring */ 968 rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask); 969 970 if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES)) 971 return; 972 973 /* Initialize TX ring */ 974 for (i = 0; i < priv->tx_q_entries; i++) { 975 txdes = &priv->txdes[i]; 976 txdes->txdes0 = 0; 977 } 978 txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask); 979 } 980 981 static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv) 982 { 983 int i; 984 985 for (i = 0; i < priv->rx_q_entries; i++) { 986 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; 987 988 if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL)) 989 return -ENOMEM; 990 } 991 return 0; 992 } 993 994 static void ftgmac100_adjust_link(struct net_device *netdev) 995 { 996 struct ftgmac100 *priv = netdev_priv(netdev); 997 struct phy_device *phydev = netdev->phydev; 998 bool tx_pause, rx_pause; 999 int new_speed; 1000 1001 /* We store "no link" as speed 0 */ 1002 if (!phydev->link) 1003 new_speed = 0; 1004 else 1005 new_speed = phydev->speed; 1006 1007 /* Grab pause settings from PHY if configured to do so */ 1008 if (priv->aneg_pause) { 1009 rx_pause = tx_pause = phydev->pause; 1010 if (phydev->asym_pause) 1011 tx_pause = !rx_pause; 1012 } else { 1013 rx_pause = priv->rx_pause; 1014 tx_pause = priv->tx_pause; 1015 } 1016 1017 /* Link hasn't changed, do nothing */ 1018 if (phydev->speed == priv->cur_speed && 1019 phydev->duplex == priv->cur_duplex && 1020 rx_pause == priv->rx_pause && 1021 tx_pause == priv->tx_pause) 1022 return; 1023 1024 /* Print status if we have a link or we had one and just lost it, 1025 * don't print otherwise. 1026 */ 1027 if (new_speed || priv->cur_speed) 1028 phy_print_status(phydev); 1029 1030 priv->cur_speed = new_speed; 1031 priv->cur_duplex = phydev->duplex; 1032 priv->rx_pause = rx_pause; 1033 priv->tx_pause = tx_pause; 1034 1035 /* Link is down, do nothing else */ 1036 if (!new_speed) 1037 return; 1038 1039 /* Disable all interrupts */ 1040 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1041 1042 /* Reset the adapter asynchronously */ 1043 schedule_work(&priv->reset_task); 1044 } 1045 1046 static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf) 1047 { 1048 struct net_device *netdev = priv->netdev; 1049 struct phy_device *phydev; 1050 1051 phydev = phy_find_first(priv->mii_bus); 1052 if (!phydev) { 1053 netdev_info(netdev, "%s: no PHY found\n", netdev->name); 1054 return -ENODEV; 1055 } 1056 1057 phydev = phy_connect(netdev, phydev_name(phydev), 1058 &ftgmac100_adjust_link, intf); 1059 1060 if (IS_ERR(phydev)) { 1061 netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); 1062 return PTR_ERR(phydev); 1063 } 1064 1065 /* Indicate that we support PAUSE frames (see comment in 1066 * Documentation/networking/phy.rst) 1067 */ 1068 phy_support_asym_pause(phydev); 1069 1070 /* Display what we found */ 1071 phy_attached_info(phydev); 1072 1073 return 0; 1074 } 1075 1076 static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) 1077 { 1078 struct net_device *netdev = bus->priv; 1079 struct ftgmac100 *priv = netdev_priv(netdev); 1080 unsigned int phycr; 1081 int i; 1082 1083 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1084 1085 /* preserve MDC cycle threshold */ 1086 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 1087 1088 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 1089 FTGMAC100_PHYCR_REGAD(regnum) | 1090 FTGMAC100_PHYCR_MIIRD; 1091 1092 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 1093 1094 for (i = 0; i < 10; i++) { 1095 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1096 1097 if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) { 1098 int data; 1099 1100 data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA); 1101 return FTGMAC100_PHYDATA_MIIRDATA(data); 1102 } 1103 1104 udelay(100); 1105 } 1106 1107 netdev_err(netdev, "mdio read timed out\n"); 1108 return -EIO; 1109 } 1110 1111 static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr, 1112 int regnum, u16 value) 1113 { 1114 struct net_device *netdev = bus->priv; 1115 struct ftgmac100 *priv = netdev_priv(netdev); 1116 unsigned int phycr; 1117 int data; 1118 int i; 1119 1120 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1121 1122 /* preserve MDC cycle threshold */ 1123 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 1124 1125 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 1126 FTGMAC100_PHYCR_REGAD(regnum) | 1127 FTGMAC100_PHYCR_MIIWR; 1128 1129 data = FTGMAC100_PHYDATA_MIIWDATA(value); 1130 1131 iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA); 1132 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 1133 1134 for (i = 0; i < 10; i++) { 1135 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1136 1137 if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0) 1138 return 0; 1139 1140 udelay(100); 1141 } 1142 1143 netdev_err(netdev, "mdio write timed out\n"); 1144 return -EIO; 1145 } 1146 1147 static void ftgmac100_get_drvinfo(struct net_device *netdev, 1148 struct ethtool_drvinfo *info) 1149 { 1150 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1151 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1152 strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); 1153 } 1154 1155 static void ftgmac100_get_ringparam(struct net_device *netdev, 1156 struct ethtool_ringparam *ering) 1157 { 1158 struct ftgmac100 *priv = netdev_priv(netdev); 1159 1160 memset(ering, 0, sizeof(*ering)); 1161 ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES; 1162 ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES; 1163 ering->rx_pending = priv->rx_q_entries; 1164 ering->tx_pending = priv->tx_q_entries; 1165 } 1166 1167 static int ftgmac100_set_ringparam(struct net_device *netdev, 1168 struct ethtool_ringparam *ering) 1169 { 1170 struct ftgmac100 *priv = netdev_priv(netdev); 1171 1172 if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES || 1173 ering->tx_pending > MAX_TX_QUEUE_ENTRIES || 1174 ering->rx_pending < MIN_RX_QUEUE_ENTRIES || 1175 ering->tx_pending < MIN_TX_QUEUE_ENTRIES || 1176 !is_power_of_2(ering->rx_pending) || 1177 !is_power_of_2(ering->tx_pending)) 1178 return -EINVAL; 1179 1180 priv->new_rx_q_entries = ering->rx_pending; 1181 priv->new_tx_q_entries = ering->tx_pending; 1182 if (netif_running(netdev)) 1183 schedule_work(&priv->reset_task); 1184 1185 return 0; 1186 } 1187 1188 static void ftgmac100_get_pauseparam(struct net_device *netdev, 1189 struct ethtool_pauseparam *pause) 1190 { 1191 struct ftgmac100 *priv = netdev_priv(netdev); 1192 1193 pause->autoneg = priv->aneg_pause; 1194 pause->tx_pause = priv->tx_pause; 1195 pause->rx_pause = priv->rx_pause; 1196 } 1197 1198 static int ftgmac100_set_pauseparam(struct net_device *netdev, 1199 struct ethtool_pauseparam *pause) 1200 { 1201 struct ftgmac100 *priv = netdev_priv(netdev); 1202 struct phy_device *phydev = netdev->phydev; 1203 1204 priv->aneg_pause = pause->autoneg; 1205 priv->tx_pause = pause->tx_pause; 1206 priv->rx_pause = pause->rx_pause; 1207 1208 if (phydev) 1209 phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); 1210 1211 if (netif_running(netdev)) { 1212 if (!(phydev && priv->aneg_pause)) 1213 ftgmac100_config_pause(priv); 1214 } 1215 1216 return 0; 1217 } 1218 1219 static const struct ethtool_ops ftgmac100_ethtool_ops = { 1220 .get_drvinfo = ftgmac100_get_drvinfo, 1221 .get_link = ethtool_op_get_link, 1222 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1223 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1224 .nway_reset = phy_ethtool_nway_reset, 1225 .get_ringparam = ftgmac100_get_ringparam, 1226 .set_ringparam = ftgmac100_set_ringparam, 1227 .get_pauseparam = ftgmac100_get_pauseparam, 1228 .set_pauseparam = ftgmac100_set_pauseparam, 1229 }; 1230 1231 static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id) 1232 { 1233 struct net_device *netdev = dev_id; 1234 struct ftgmac100 *priv = netdev_priv(netdev); 1235 unsigned int status, new_mask = FTGMAC100_INT_BAD; 1236 1237 /* Fetch and clear interrupt bits, process abnormal ones */ 1238 status = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 1239 iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR); 1240 if (unlikely(status & FTGMAC100_INT_BAD)) { 1241 1242 /* RX buffer unavailable */ 1243 if (status & FTGMAC100_INT_NO_RXBUF) 1244 netdev->stats.rx_over_errors++; 1245 1246 /* received packet lost due to RX FIFO full */ 1247 if (status & FTGMAC100_INT_RPKT_LOST) 1248 netdev->stats.rx_fifo_errors++; 1249 1250 /* sent packet lost due to excessive TX collision */ 1251 if (status & FTGMAC100_INT_XPKT_LOST) 1252 netdev->stats.tx_fifo_errors++; 1253 1254 /* AHB error -> Reset the chip */ 1255 if (status & FTGMAC100_INT_AHB_ERR) { 1256 if (net_ratelimit()) 1257 netdev_warn(netdev, 1258 "AHB bus error ! Resetting chip.\n"); 1259 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1260 schedule_work(&priv->reset_task); 1261 return IRQ_HANDLED; 1262 } 1263 1264 /* We may need to restart the MAC after such errors, delay 1265 * this until after we have freed some Rx buffers though 1266 */ 1267 priv->need_mac_restart = true; 1268 1269 /* Disable those errors until we restart */ 1270 new_mask &= ~status; 1271 } 1272 1273 /* Only enable "bad" interrupts while NAPI is on */ 1274 iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER); 1275 1276 /* Schedule NAPI bh */ 1277 napi_schedule_irqoff(&priv->napi); 1278 1279 return IRQ_HANDLED; 1280 } 1281 1282 static bool ftgmac100_check_rx(struct ftgmac100 *priv) 1283 { 1284 struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer]; 1285 1286 /* Do we have a packet ? */ 1287 return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY)); 1288 } 1289 1290 static int ftgmac100_poll(struct napi_struct *napi, int budget) 1291 { 1292 struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi); 1293 int work_done = 0; 1294 bool more; 1295 1296 /* Handle TX completions */ 1297 if (ftgmac100_tx_buf_cleanable(priv)) 1298 ftgmac100_tx_complete(priv); 1299 1300 /* Handle RX packets */ 1301 do { 1302 more = ftgmac100_rx_packet(priv, &work_done); 1303 } while (more && work_done < budget); 1304 1305 1306 /* The interrupt is telling us to kick the MAC back to life 1307 * after an RX overflow 1308 */ 1309 if (unlikely(priv->need_mac_restart)) { 1310 ftgmac100_start_hw(priv); 1311 1312 /* Re-enable "bad" interrupts */ 1313 iowrite32(FTGMAC100_INT_BAD, 1314 priv->base + FTGMAC100_OFFSET_IER); 1315 } 1316 1317 /* As long as we are waiting for transmit packets to be 1318 * completed we keep NAPI going 1319 */ 1320 if (ftgmac100_tx_buf_cleanable(priv)) 1321 work_done = budget; 1322 1323 if (work_done < budget) { 1324 /* We are about to re-enable all interrupts. However 1325 * the HW has been latching RX/TX packet interrupts while 1326 * they were masked. So we clear them first, then we need 1327 * to re-check if there's something to process 1328 */ 1329 iowrite32(FTGMAC100_INT_RXTX, 1330 priv->base + FTGMAC100_OFFSET_ISR); 1331 1332 /* Push the above (and provides a barrier vs. subsequent 1333 * reads of the descriptor). 1334 */ 1335 ioread32(priv->base + FTGMAC100_OFFSET_ISR); 1336 1337 /* Check RX and TX descriptors for more work to do */ 1338 if (ftgmac100_check_rx(priv) || 1339 ftgmac100_tx_buf_cleanable(priv)) 1340 return budget; 1341 1342 /* deschedule NAPI */ 1343 napi_complete(napi); 1344 1345 /* enable all interrupts */ 1346 iowrite32(FTGMAC100_INT_ALL, 1347 priv->base + FTGMAC100_OFFSET_IER); 1348 } 1349 1350 return work_done; 1351 } 1352 1353 static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err) 1354 { 1355 int err = 0; 1356 1357 /* Re-init descriptors (adjust queue sizes) */ 1358 ftgmac100_init_rings(priv); 1359 1360 /* Realloc rx descriptors */ 1361 err = ftgmac100_alloc_rx_buffers(priv); 1362 if (err && !ignore_alloc_err) 1363 return err; 1364 1365 /* Reinit and restart HW */ 1366 ftgmac100_init_hw(priv); 1367 ftgmac100_config_pause(priv); 1368 ftgmac100_start_hw(priv); 1369 1370 /* Re-enable the device */ 1371 napi_enable(&priv->napi); 1372 netif_start_queue(priv->netdev); 1373 1374 /* Enable all interrupts */ 1375 iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER); 1376 1377 return err; 1378 } 1379 1380 static void ftgmac100_reset_task(struct work_struct *work) 1381 { 1382 struct ftgmac100 *priv = container_of(work, struct ftgmac100, 1383 reset_task); 1384 struct net_device *netdev = priv->netdev; 1385 int err; 1386 1387 netdev_dbg(netdev, "Resetting NIC...\n"); 1388 1389 /* Lock the world */ 1390 rtnl_lock(); 1391 if (netdev->phydev) 1392 mutex_lock(&netdev->phydev->lock); 1393 if (priv->mii_bus) 1394 mutex_lock(&priv->mii_bus->mdio_lock); 1395 1396 1397 /* Check if the interface is still up */ 1398 if (!netif_running(netdev)) 1399 goto bail; 1400 1401 /* Stop the network stack */ 1402 netif_trans_update(netdev); 1403 napi_disable(&priv->napi); 1404 netif_tx_disable(netdev); 1405 1406 /* Stop and reset the MAC */ 1407 ftgmac100_stop_hw(priv); 1408 err = ftgmac100_reset_and_config_mac(priv); 1409 if (err) { 1410 /* Not much we can do ... it might come back... */ 1411 netdev_err(netdev, "attempting to continue...\n"); 1412 } 1413 1414 /* Free all rx and tx buffers */ 1415 ftgmac100_free_buffers(priv); 1416 1417 /* Setup everything again and restart chip */ 1418 ftgmac100_init_all(priv, true); 1419 1420 netdev_dbg(netdev, "Reset done !\n"); 1421 bail: 1422 if (priv->mii_bus) 1423 mutex_unlock(&priv->mii_bus->mdio_lock); 1424 if (netdev->phydev) 1425 mutex_unlock(&netdev->phydev->lock); 1426 rtnl_unlock(); 1427 } 1428 1429 static int ftgmac100_open(struct net_device *netdev) 1430 { 1431 struct ftgmac100 *priv = netdev_priv(netdev); 1432 int err; 1433 1434 /* Allocate ring buffers */ 1435 err = ftgmac100_alloc_rings(priv); 1436 if (err) { 1437 netdev_err(netdev, "Failed to allocate descriptors\n"); 1438 return err; 1439 } 1440 1441 /* When using NC-SI we force the speed to 100Mbit/s full duplex, 1442 * 1443 * Otherwise we leave it set to 0 (no link), the link 1444 * message from the PHY layer will handle setting it up to 1445 * something else if needed. 1446 */ 1447 if (priv->use_ncsi) { 1448 priv->cur_duplex = DUPLEX_FULL; 1449 priv->cur_speed = SPEED_100; 1450 } else { 1451 priv->cur_duplex = 0; 1452 priv->cur_speed = 0; 1453 } 1454 1455 /* Reset the hardware */ 1456 err = ftgmac100_reset_and_config_mac(priv); 1457 if (err) 1458 goto err_hw; 1459 1460 /* Initialize NAPI */ 1461 netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64); 1462 1463 /* Grab our interrupt */ 1464 err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev); 1465 if (err) { 1466 netdev_err(netdev, "failed to request irq %d\n", netdev->irq); 1467 goto err_irq; 1468 } 1469 1470 /* Start things up */ 1471 err = ftgmac100_init_all(priv, false); 1472 if (err) { 1473 netdev_err(netdev, "Failed to allocate packet buffers\n"); 1474 goto err_alloc; 1475 } 1476 1477 if (netdev->phydev) { 1478 /* If we have a PHY, start polling */ 1479 phy_start(netdev->phydev); 1480 } else if (priv->use_ncsi) { 1481 /* If using NC-SI, set our carrier on and start the stack */ 1482 netif_carrier_on(netdev); 1483 1484 /* Start the NCSI device */ 1485 err = ncsi_start_dev(priv->ndev); 1486 if (err) 1487 goto err_ncsi; 1488 } 1489 1490 return 0; 1491 1492 err_ncsi: 1493 napi_disable(&priv->napi); 1494 netif_stop_queue(netdev); 1495 err_alloc: 1496 ftgmac100_free_buffers(priv); 1497 free_irq(netdev->irq, netdev); 1498 err_irq: 1499 netif_napi_del(&priv->napi); 1500 err_hw: 1501 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1502 ftgmac100_free_rings(priv); 1503 return err; 1504 } 1505 1506 static int ftgmac100_stop(struct net_device *netdev) 1507 { 1508 struct ftgmac100 *priv = netdev_priv(netdev); 1509 1510 /* Note about the reset task: We are called with the rtnl lock 1511 * held, so we are synchronized against the core of the reset 1512 * task. We must not try to synchronously cancel it otherwise 1513 * we can deadlock. But since it will test for netif_running() 1514 * which has already been cleared by the net core, we don't 1515 * anything special to do. 1516 */ 1517 1518 /* disable all interrupts */ 1519 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1520 1521 netif_stop_queue(netdev); 1522 napi_disable(&priv->napi); 1523 netif_napi_del(&priv->napi); 1524 if (netdev->phydev) 1525 phy_stop(netdev->phydev); 1526 else if (priv->use_ncsi) 1527 ncsi_stop_dev(priv->ndev); 1528 1529 ftgmac100_stop_hw(priv); 1530 free_irq(netdev->irq, netdev); 1531 ftgmac100_free_buffers(priv); 1532 ftgmac100_free_rings(priv); 1533 1534 return 0; 1535 } 1536 1537 /* optional */ 1538 static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1539 { 1540 if (!netdev->phydev) 1541 return -ENXIO; 1542 1543 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 1544 } 1545 1546 static void ftgmac100_tx_timeout(struct net_device *netdev) 1547 { 1548 struct ftgmac100 *priv = netdev_priv(netdev); 1549 1550 /* Disable all interrupts */ 1551 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1552 1553 /* Do the reset outside of interrupt context */ 1554 schedule_work(&priv->reset_task); 1555 } 1556 1557 static int ftgmac100_set_features(struct net_device *netdev, 1558 netdev_features_t features) 1559 { 1560 struct ftgmac100 *priv = netdev_priv(netdev); 1561 netdev_features_t changed = netdev->features ^ features; 1562 1563 if (!netif_running(netdev)) 1564 return 0; 1565 1566 /* Update the vlan filtering bit */ 1567 if (changed & NETIF_F_HW_VLAN_CTAG_RX) { 1568 u32 maccr; 1569 1570 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 1571 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 1572 maccr |= FTGMAC100_MACCR_RM_VLAN; 1573 else 1574 maccr &= ~FTGMAC100_MACCR_RM_VLAN; 1575 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 1576 } 1577 1578 return 0; 1579 } 1580 1581 #ifdef CONFIG_NET_POLL_CONTROLLER 1582 static void ftgmac100_poll_controller(struct net_device *netdev) 1583 { 1584 unsigned long flags; 1585 1586 local_irq_save(flags); 1587 ftgmac100_interrupt(netdev->irq, netdev); 1588 local_irq_restore(flags); 1589 } 1590 #endif 1591 1592 static const struct net_device_ops ftgmac100_netdev_ops = { 1593 .ndo_open = ftgmac100_open, 1594 .ndo_stop = ftgmac100_stop, 1595 .ndo_start_xmit = ftgmac100_hard_start_xmit, 1596 .ndo_set_mac_address = ftgmac100_set_mac_addr, 1597 .ndo_validate_addr = eth_validate_addr, 1598 .ndo_do_ioctl = ftgmac100_do_ioctl, 1599 .ndo_tx_timeout = ftgmac100_tx_timeout, 1600 .ndo_set_rx_mode = ftgmac100_set_rx_mode, 1601 .ndo_set_features = ftgmac100_set_features, 1602 #ifdef CONFIG_NET_POLL_CONTROLLER 1603 .ndo_poll_controller = ftgmac100_poll_controller, 1604 #endif 1605 .ndo_vlan_rx_add_vid = ncsi_vlan_rx_add_vid, 1606 .ndo_vlan_rx_kill_vid = ncsi_vlan_rx_kill_vid, 1607 }; 1608 1609 static int ftgmac100_setup_mdio(struct net_device *netdev) 1610 { 1611 struct ftgmac100 *priv = netdev_priv(netdev); 1612 struct platform_device *pdev = to_platform_device(priv->dev); 1613 int phy_intf = PHY_INTERFACE_MODE_RGMII; 1614 struct device_node *np = pdev->dev.of_node; 1615 int i, err = 0; 1616 u32 reg; 1617 1618 /* initialize mdio bus */ 1619 priv->mii_bus = mdiobus_alloc(); 1620 if (!priv->mii_bus) 1621 return -EIO; 1622 1623 if (of_device_is_compatible(np, "aspeed,ast2400-mac") || 1624 of_device_is_compatible(np, "aspeed,ast2500-mac")) { 1625 /* The AST2600 has a separate MDIO controller */ 1626 1627 /* For the AST2400 and AST2500 this driver only supports the 1628 * old MDIO interface 1629 */ 1630 reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR); 1631 reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE; 1632 iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR); 1633 } 1634 1635 /* Get PHY mode from device-tree */ 1636 if (np) { 1637 /* Default to RGMII. It's a gigabit part after all */ 1638 phy_intf = of_get_phy_mode(np); 1639 if (phy_intf < 0) 1640 phy_intf = PHY_INTERFACE_MODE_RGMII; 1641 1642 /* Aspeed only supports these. I don't know about other IP 1643 * block vendors so I'm going to just let them through for 1644 * now. Note that this is only a warning if for some obscure 1645 * reason the DT really means to lie about it or it's a newer 1646 * part we don't know about. 1647 * 1648 * On the Aspeed SoC there are additionally straps and SCU 1649 * control bits that could tell us what the interface is 1650 * (or allow us to configure it while the IP block is held 1651 * in reset). For now I chose to keep this driver away from 1652 * those SoC specific bits and assume the device-tree is 1653 * right and the SCU has been configured properly by pinmux 1654 * or the firmware. 1655 */ 1656 if (priv->is_aspeed && 1657 phy_intf != PHY_INTERFACE_MODE_RMII && 1658 phy_intf != PHY_INTERFACE_MODE_RGMII && 1659 phy_intf != PHY_INTERFACE_MODE_RGMII_ID && 1660 phy_intf != PHY_INTERFACE_MODE_RGMII_RXID && 1661 phy_intf != PHY_INTERFACE_MODE_RGMII_TXID) { 1662 netdev_warn(netdev, 1663 "Unsupported PHY mode %s !\n", 1664 phy_modes(phy_intf)); 1665 } 1666 } 1667 1668 priv->mii_bus->name = "ftgmac100_mdio"; 1669 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", 1670 pdev->name, pdev->id); 1671 priv->mii_bus->parent = priv->dev; 1672 priv->mii_bus->priv = priv->netdev; 1673 priv->mii_bus->read = ftgmac100_mdiobus_read; 1674 priv->mii_bus->write = ftgmac100_mdiobus_write; 1675 1676 for (i = 0; i < PHY_MAX_ADDR; i++) 1677 priv->mii_bus->irq[i] = PHY_POLL; 1678 1679 err = mdiobus_register(priv->mii_bus); 1680 if (err) { 1681 dev_err(priv->dev, "Cannot register MDIO bus!\n"); 1682 goto err_register_mdiobus; 1683 } 1684 1685 err = ftgmac100_mii_probe(priv, phy_intf); 1686 if (err) { 1687 dev_err(priv->dev, "MII Probe failed!\n"); 1688 goto err_mii_probe; 1689 } 1690 1691 return 0; 1692 1693 err_mii_probe: 1694 mdiobus_unregister(priv->mii_bus); 1695 err_register_mdiobus: 1696 mdiobus_free(priv->mii_bus); 1697 return err; 1698 } 1699 1700 static void ftgmac100_destroy_mdio(struct net_device *netdev) 1701 { 1702 struct ftgmac100 *priv = netdev_priv(netdev); 1703 1704 if (!netdev->phydev) 1705 return; 1706 1707 phy_disconnect(netdev->phydev); 1708 mdiobus_unregister(priv->mii_bus); 1709 mdiobus_free(priv->mii_bus); 1710 } 1711 1712 static void ftgmac100_ncsi_handler(struct ncsi_dev *nd) 1713 { 1714 if (unlikely(nd->state != ncsi_dev_state_functional)) 1715 return; 1716 1717 netdev_dbg(nd->dev, "NCSI interface %s\n", 1718 nd->link_up ? "up" : "down"); 1719 } 1720 1721 static void ftgmac100_setup_clk(struct ftgmac100 *priv) 1722 { 1723 priv->clk = devm_clk_get(priv->dev, NULL); 1724 if (IS_ERR(priv->clk)) 1725 return; 1726 1727 clk_prepare_enable(priv->clk); 1728 1729 /* Aspeed specifies a 100MHz clock is required for up to 1730 * 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz 1731 * is sufficient 1732 */ 1733 clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ : 1734 FTGMAC_100MHZ); 1735 } 1736 1737 static int ftgmac100_probe(struct platform_device *pdev) 1738 { 1739 struct resource *res; 1740 int irq; 1741 struct net_device *netdev; 1742 struct ftgmac100 *priv; 1743 struct device_node *np; 1744 int err = 0; 1745 1746 if (!pdev) 1747 return -ENODEV; 1748 1749 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1750 if (!res) 1751 return -ENXIO; 1752 1753 irq = platform_get_irq(pdev, 0); 1754 if (irq < 0) 1755 return irq; 1756 1757 /* setup net_device */ 1758 netdev = alloc_etherdev(sizeof(*priv)); 1759 if (!netdev) { 1760 err = -ENOMEM; 1761 goto err_alloc_etherdev; 1762 } 1763 1764 SET_NETDEV_DEV(netdev, &pdev->dev); 1765 1766 netdev->ethtool_ops = &ftgmac100_ethtool_ops; 1767 netdev->netdev_ops = &ftgmac100_netdev_ops; 1768 netdev->watchdog_timeo = 5 * HZ; 1769 1770 platform_set_drvdata(pdev, netdev); 1771 1772 /* setup private data */ 1773 priv = netdev_priv(netdev); 1774 priv->netdev = netdev; 1775 priv->dev = &pdev->dev; 1776 INIT_WORK(&priv->reset_task, ftgmac100_reset_task); 1777 1778 /* map io memory */ 1779 priv->res = request_mem_region(res->start, resource_size(res), 1780 dev_name(&pdev->dev)); 1781 if (!priv->res) { 1782 dev_err(&pdev->dev, "Could not reserve memory region\n"); 1783 err = -ENOMEM; 1784 goto err_req_mem; 1785 } 1786 1787 priv->base = ioremap(res->start, resource_size(res)); 1788 if (!priv->base) { 1789 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 1790 err = -EIO; 1791 goto err_ioremap; 1792 } 1793 1794 netdev->irq = irq; 1795 1796 /* Enable pause */ 1797 priv->tx_pause = true; 1798 priv->rx_pause = true; 1799 priv->aneg_pause = true; 1800 1801 /* MAC address from chip or random one */ 1802 ftgmac100_initial_mac(priv); 1803 1804 np = pdev->dev.of_node; 1805 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") || 1806 of_device_is_compatible(np, "aspeed,ast2500-mac") || 1807 of_device_is_compatible(np, "aspeed,ast2600-mac"))) { 1808 priv->rxdes0_edorr_mask = BIT(30); 1809 priv->txdes0_edotr_mask = BIT(30); 1810 priv->is_aspeed = true; 1811 } else { 1812 priv->rxdes0_edorr_mask = BIT(15); 1813 priv->txdes0_edotr_mask = BIT(15); 1814 } 1815 1816 if (np && of_get_property(np, "use-ncsi", NULL)) { 1817 if (!IS_ENABLED(CONFIG_NET_NCSI)) { 1818 dev_err(&pdev->dev, "NCSI stack not enabled\n"); 1819 goto err_ncsi_dev; 1820 } 1821 1822 dev_info(&pdev->dev, "Using NCSI interface\n"); 1823 priv->use_ncsi = true; 1824 priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler); 1825 if (!priv->ndev) 1826 goto err_ncsi_dev; 1827 } else if (np && of_get_property(np, "phy-handle", NULL)) { 1828 struct phy_device *phy; 1829 1830 phy = of_phy_get_and_connect(priv->netdev, np, 1831 &ftgmac100_adjust_link); 1832 if (!phy) { 1833 dev_err(&pdev->dev, "Failed to connect to phy\n"); 1834 goto err_setup_mdio; 1835 } 1836 1837 /* Indicate that we support PAUSE frames (see comment in 1838 * Documentation/networking/phy.rst) 1839 */ 1840 phy_support_asym_pause(phy); 1841 1842 /* Display what we found */ 1843 phy_attached_info(phy); 1844 } else if (np && !of_get_child_by_name(np, "mdio")) { 1845 /* Support legacy ASPEED devicetree descriptions that decribe a 1846 * MAC with an embedded MDIO controller but have no "mdio" 1847 * child node. Automatically scan the MDIO bus for available 1848 * PHYs. 1849 */ 1850 priv->use_ncsi = false; 1851 err = ftgmac100_setup_mdio(netdev); 1852 if (err) 1853 goto err_setup_mdio; 1854 } 1855 1856 if (priv->is_aspeed) 1857 ftgmac100_setup_clk(priv); 1858 1859 /* Default ring sizes */ 1860 priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES; 1861 priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES; 1862 1863 /* Base feature set */ 1864 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | 1865 NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX | 1866 NETIF_F_HW_VLAN_CTAG_TX; 1867 1868 if (priv->use_ncsi) 1869 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1870 1871 /* AST2400 doesn't have working HW checksum generation */ 1872 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac"))) 1873 netdev->hw_features &= ~NETIF_F_HW_CSUM; 1874 if (np && of_get_property(np, "no-hw-checksum", NULL)) 1875 netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); 1876 netdev->features |= netdev->hw_features; 1877 1878 /* register network device */ 1879 err = register_netdev(netdev); 1880 if (err) { 1881 dev_err(&pdev->dev, "Failed to register netdev\n"); 1882 goto err_register_netdev; 1883 } 1884 1885 netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base); 1886 1887 return 0; 1888 1889 err_ncsi_dev: 1890 err_register_netdev: 1891 ftgmac100_destroy_mdio(netdev); 1892 err_setup_mdio: 1893 iounmap(priv->base); 1894 err_ioremap: 1895 release_resource(priv->res); 1896 err_req_mem: 1897 free_netdev(netdev); 1898 err_alloc_etherdev: 1899 return err; 1900 } 1901 1902 static int ftgmac100_remove(struct platform_device *pdev) 1903 { 1904 struct net_device *netdev; 1905 struct ftgmac100 *priv; 1906 1907 netdev = platform_get_drvdata(pdev); 1908 priv = netdev_priv(netdev); 1909 1910 unregister_netdev(netdev); 1911 1912 clk_disable_unprepare(priv->clk); 1913 1914 /* There's a small chance the reset task will have been re-queued, 1915 * during stop, make sure it's gone before we free the structure. 1916 */ 1917 cancel_work_sync(&priv->reset_task); 1918 1919 ftgmac100_destroy_mdio(netdev); 1920 1921 iounmap(priv->base); 1922 release_resource(priv->res); 1923 1924 netif_napi_del(&priv->napi); 1925 free_netdev(netdev); 1926 return 0; 1927 } 1928 1929 static const struct of_device_id ftgmac100_of_match[] = { 1930 { .compatible = "faraday,ftgmac100" }, 1931 { } 1932 }; 1933 MODULE_DEVICE_TABLE(of, ftgmac100_of_match); 1934 1935 static struct platform_driver ftgmac100_driver = { 1936 .probe = ftgmac100_probe, 1937 .remove = ftgmac100_remove, 1938 .driver = { 1939 .name = DRV_NAME, 1940 .of_match_table = ftgmac100_of_match, 1941 }, 1942 }; 1943 module_platform_driver(ftgmac100_driver); 1944 1945 MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1946 MODULE_DESCRIPTION("FTGMAC100 driver"); 1947 MODULE_LICENSE("GPL"); 1948