1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Faraday FTGMAC100 Gigabit Ethernet 4 * 5 * (C) Copyright 2009-2011 Faraday Technology 6 * Po-Yu Chuang <ratbert@faraday-tech.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/clk.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/etherdevice.h> 14 #include <linux/ethtool.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/module.h> 18 #include <linux/netdevice.h> 19 #include <linux/of.h> 20 #include <linux/of_mdio.h> 21 #include <linux/phy.h> 22 #include <linux/platform_device.h> 23 #include <linux/property.h> 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 #include <linux/of_net.h> 27 #include <net/ip.h> 28 #include <net/ncsi.h> 29 30 #include "ftgmac100.h" 31 32 #define DRV_NAME "ftgmac100" 33 #define DRV_VERSION "0.7" 34 35 /* Arbitrary values, I am not sure the HW has limits */ 36 #define MAX_RX_QUEUE_ENTRIES 1024 37 #define MAX_TX_QUEUE_ENTRIES 1024 38 #define MIN_RX_QUEUE_ENTRIES 32 39 #define MIN_TX_QUEUE_ENTRIES 32 40 41 /* Defaults */ 42 #define DEF_RX_QUEUE_ENTRIES 128 43 #define DEF_TX_QUEUE_ENTRIES 128 44 45 #define MAX_PKT_SIZE 1536 46 #define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */ 47 48 /* Min number of tx ring entries before stopping queue */ 49 #define TX_THRESHOLD (MAX_SKB_FRAGS + 1) 50 51 #define FTGMAC_100MHZ 100000000 52 #define FTGMAC_25MHZ 25000000 53 54 struct ftgmac100 { 55 /* Registers */ 56 struct resource *res; 57 void __iomem *base; 58 59 /* Rx ring */ 60 unsigned int rx_q_entries; 61 struct ftgmac100_rxdes *rxdes; 62 dma_addr_t rxdes_dma; 63 struct sk_buff **rx_skbs; 64 unsigned int rx_pointer; 65 u32 rxdes0_edorr_mask; 66 67 /* Tx ring */ 68 unsigned int tx_q_entries; 69 struct ftgmac100_txdes *txdes; 70 dma_addr_t txdes_dma; 71 struct sk_buff **tx_skbs; 72 unsigned int tx_clean_pointer; 73 unsigned int tx_pointer; 74 u32 txdes0_edotr_mask; 75 76 /* Used to signal the reset task of ring change request */ 77 unsigned int new_rx_q_entries; 78 unsigned int new_tx_q_entries; 79 80 /* Scratch page to use when rx skb alloc fails */ 81 void *rx_scratch; 82 dma_addr_t rx_scratch_dma; 83 84 /* Component structures */ 85 struct net_device *netdev; 86 struct device *dev; 87 struct ncsi_dev *ndev; 88 struct napi_struct napi; 89 struct work_struct reset_task; 90 struct mii_bus *mii_bus; 91 struct clk *clk; 92 93 /* AST2500/AST2600 RMII ref clock gate */ 94 struct clk *rclk; 95 96 /* Link management */ 97 int cur_speed; 98 int cur_duplex; 99 bool use_ncsi; 100 101 /* Multicast filter settings */ 102 u32 maht0; 103 u32 maht1; 104 105 /* Flow control settings */ 106 bool tx_pause; 107 bool rx_pause; 108 bool aneg_pause; 109 110 /* Misc */ 111 bool need_mac_restart; 112 bool is_aspeed; 113 }; 114 115 static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr) 116 { 117 struct net_device *netdev = priv->netdev; 118 int i; 119 120 /* NOTE: reset clears all registers */ 121 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 122 iowrite32(maccr | FTGMAC100_MACCR_SW_RST, 123 priv->base + FTGMAC100_OFFSET_MACCR); 124 for (i = 0; i < 200; i++) { 125 unsigned int maccr; 126 127 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 128 if (!(maccr & FTGMAC100_MACCR_SW_RST)) 129 return 0; 130 131 udelay(1); 132 } 133 134 netdev_err(netdev, "Hardware reset failed\n"); 135 return -EIO; 136 } 137 138 static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv) 139 { 140 u32 maccr = 0; 141 142 switch (priv->cur_speed) { 143 case SPEED_10: 144 case 0: /* no link */ 145 break; 146 147 case SPEED_100: 148 maccr |= FTGMAC100_MACCR_FAST_MODE; 149 break; 150 151 case SPEED_1000: 152 maccr |= FTGMAC100_MACCR_GIGA_MODE; 153 break; 154 default: 155 netdev_err(priv->netdev, "Unknown speed %d !\n", 156 priv->cur_speed); 157 break; 158 } 159 160 /* (Re)initialize the queue pointers */ 161 priv->rx_pointer = 0; 162 priv->tx_clean_pointer = 0; 163 priv->tx_pointer = 0; 164 165 /* The doc says reset twice with 10us interval */ 166 if (ftgmac100_reset_mac(priv, maccr)) 167 return -EIO; 168 usleep_range(10, 1000); 169 return ftgmac100_reset_mac(priv, maccr); 170 } 171 172 static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac) 173 { 174 unsigned int maddr = mac[0] << 8 | mac[1]; 175 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 176 177 iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR); 178 iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR); 179 } 180 181 static void ftgmac100_initial_mac(struct ftgmac100 *priv) 182 { 183 u8 mac[ETH_ALEN]; 184 unsigned int m; 185 unsigned int l; 186 void *addr; 187 188 addr = device_get_mac_address(priv->dev, mac, ETH_ALEN); 189 if (addr) { 190 ether_addr_copy(priv->netdev->dev_addr, mac); 191 dev_info(priv->dev, "Read MAC address %pM from device tree\n", 192 mac); 193 return; 194 } 195 196 m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR); 197 l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR); 198 199 mac[0] = (m >> 8) & 0xff; 200 mac[1] = m & 0xff; 201 mac[2] = (l >> 24) & 0xff; 202 mac[3] = (l >> 16) & 0xff; 203 mac[4] = (l >> 8) & 0xff; 204 mac[5] = l & 0xff; 205 206 if (is_valid_ether_addr(mac)) { 207 ether_addr_copy(priv->netdev->dev_addr, mac); 208 dev_info(priv->dev, "Read MAC address %pM from chip\n", mac); 209 } else { 210 eth_hw_addr_random(priv->netdev); 211 dev_info(priv->dev, "Generated random MAC address %pM\n", 212 priv->netdev->dev_addr); 213 } 214 } 215 216 static int ftgmac100_set_mac_addr(struct net_device *dev, void *p) 217 { 218 int ret; 219 220 ret = eth_prepare_mac_addr_change(dev, p); 221 if (ret < 0) 222 return ret; 223 224 eth_commit_mac_addr_change(dev, p); 225 ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr); 226 227 return 0; 228 } 229 230 static void ftgmac100_config_pause(struct ftgmac100 *priv) 231 { 232 u32 fcr = FTGMAC100_FCR_PAUSE_TIME(16); 233 234 /* Throttle tx queue when receiving pause frames */ 235 if (priv->rx_pause) 236 fcr |= FTGMAC100_FCR_FC_EN; 237 238 /* Enables sending pause frames when the RX queue is past a 239 * certain threshold. 240 */ 241 if (priv->tx_pause) 242 fcr |= FTGMAC100_FCR_FCTHR_EN; 243 244 iowrite32(fcr, priv->base + FTGMAC100_OFFSET_FCR); 245 } 246 247 static void ftgmac100_init_hw(struct ftgmac100 *priv) 248 { 249 u32 reg, rfifo_sz, tfifo_sz; 250 251 /* Clear stale interrupts */ 252 reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 253 iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR); 254 255 /* Setup RX ring buffer base */ 256 iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR); 257 258 /* Setup TX ring buffer base */ 259 iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR); 260 261 /* Configure RX buffer size */ 262 iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE), 263 priv->base + FTGMAC100_OFFSET_RBSR); 264 265 /* Set RX descriptor autopoll */ 266 iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), 267 priv->base + FTGMAC100_OFFSET_APTC); 268 269 /* Write MAC address */ 270 ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr); 271 272 /* Write multicast filter */ 273 iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0); 274 iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1); 275 276 /* Configure descriptor sizes and increase burst sizes according 277 * to values in Aspeed SDK. The FIFO arbitration is enabled and 278 * the thresholds set based on the recommended values in the 279 * AST2400 specification. 280 */ 281 iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) | /* 2*8 bytes RX descs */ 282 FTGMAC100_DBLAC_TXDES_SIZE(2) | /* 2*8 bytes TX descs */ 283 FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */ 284 FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */ 285 FTGMAC100_DBLAC_RX_THR_EN | /* Enable fifo threshold arb */ 286 FTGMAC100_DBLAC_RXFIFO_HTHR(6) | /* 6/8 of FIFO high threshold */ 287 FTGMAC100_DBLAC_RXFIFO_LTHR(2), /* 2/8 of FIFO low threshold */ 288 priv->base + FTGMAC100_OFFSET_DBLAC); 289 290 /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt 291 * mitigation doesn't seem to provide any benefit with NAPI so leave 292 * it at that. 293 */ 294 iowrite32(FTGMAC100_ITC_RXINT_THR(1) | 295 FTGMAC100_ITC_TXINT_THR(1), 296 priv->base + FTGMAC100_OFFSET_ITC); 297 298 /* Configure FIFO sizes in the TPAFCR register */ 299 reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR); 300 rfifo_sz = reg & 0x00000007; 301 tfifo_sz = (reg >> 3) & 0x00000007; 302 reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR); 303 reg &= ~0x3f000000; 304 reg |= (tfifo_sz << 27); 305 reg |= (rfifo_sz << 24); 306 iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR); 307 } 308 309 static void ftgmac100_start_hw(struct ftgmac100 *priv) 310 { 311 u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 312 313 /* Keep the original GMAC and FAST bits */ 314 maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE); 315 316 /* Add all the main enable bits */ 317 maccr |= FTGMAC100_MACCR_TXDMA_EN | 318 FTGMAC100_MACCR_RXDMA_EN | 319 FTGMAC100_MACCR_TXMAC_EN | 320 FTGMAC100_MACCR_RXMAC_EN | 321 FTGMAC100_MACCR_CRC_APD | 322 FTGMAC100_MACCR_PHY_LINK_LEVEL | 323 FTGMAC100_MACCR_RX_RUNT | 324 FTGMAC100_MACCR_RX_BROADPKT; 325 326 /* Add other bits as needed */ 327 if (priv->cur_duplex == DUPLEX_FULL) 328 maccr |= FTGMAC100_MACCR_FULLDUP; 329 if (priv->netdev->flags & IFF_PROMISC) 330 maccr |= FTGMAC100_MACCR_RX_ALL; 331 if (priv->netdev->flags & IFF_ALLMULTI) 332 maccr |= FTGMAC100_MACCR_RX_MULTIPKT; 333 else if (netdev_mc_count(priv->netdev)) 334 maccr |= FTGMAC100_MACCR_HT_MULTI_EN; 335 336 /* Vlan filtering enabled */ 337 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 338 maccr |= FTGMAC100_MACCR_RM_VLAN; 339 340 /* Hit the HW */ 341 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 342 } 343 344 static void ftgmac100_stop_hw(struct ftgmac100 *priv) 345 { 346 iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR); 347 } 348 349 static void ftgmac100_calc_mc_hash(struct ftgmac100 *priv) 350 { 351 struct netdev_hw_addr *ha; 352 353 priv->maht1 = 0; 354 priv->maht0 = 0; 355 netdev_for_each_mc_addr(ha, priv->netdev) { 356 u32 crc_val = ether_crc_le(ETH_ALEN, ha->addr); 357 358 crc_val = (~(crc_val >> 2)) & 0x3f; 359 if (crc_val >= 32) 360 priv->maht1 |= 1ul << (crc_val - 32); 361 else 362 priv->maht0 |= 1ul << (crc_val); 363 } 364 } 365 366 static void ftgmac100_set_rx_mode(struct net_device *netdev) 367 { 368 struct ftgmac100 *priv = netdev_priv(netdev); 369 370 /* Setup the hash filter */ 371 ftgmac100_calc_mc_hash(priv); 372 373 /* Interface down ? that's all there is to do */ 374 if (!netif_running(netdev)) 375 return; 376 377 /* Update the HW */ 378 iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0); 379 iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1); 380 381 /* Reconfigure MACCR */ 382 ftgmac100_start_hw(priv); 383 } 384 385 static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry, 386 struct ftgmac100_rxdes *rxdes, gfp_t gfp) 387 { 388 struct net_device *netdev = priv->netdev; 389 struct sk_buff *skb; 390 dma_addr_t map; 391 int err = 0; 392 393 skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); 394 if (unlikely(!skb)) { 395 if (net_ratelimit()) 396 netdev_warn(netdev, "failed to allocate rx skb\n"); 397 err = -ENOMEM; 398 map = priv->rx_scratch_dma; 399 } else { 400 map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE, 401 DMA_FROM_DEVICE); 402 if (unlikely(dma_mapping_error(priv->dev, map))) { 403 if (net_ratelimit()) 404 netdev_err(netdev, "failed to map rx page\n"); 405 dev_kfree_skb_any(skb); 406 map = priv->rx_scratch_dma; 407 skb = NULL; 408 err = -ENOMEM; 409 } 410 } 411 412 /* Store skb */ 413 priv->rx_skbs[entry] = skb; 414 415 /* Store DMA address into RX desc */ 416 rxdes->rxdes3 = cpu_to_le32(map); 417 418 /* Ensure the above is ordered vs clearing the OWN bit */ 419 dma_wmb(); 420 421 /* Clean status (which resets own bit) */ 422 if (entry == (priv->rx_q_entries - 1)) 423 rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask); 424 else 425 rxdes->rxdes0 = 0; 426 427 return err; 428 } 429 430 static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv, 431 unsigned int pointer) 432 { 433 return (pointer + 1) & (priv->rx_q_entries - 1); 434 } 435 436 static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status) 437 { 438 struct net_device *netdev = priv->netdev; 439 440 if (status & FTGMAC100_RXDES0_RX_ERR) 441 netdev->stats.rx_errors++; 442 443 if (status & FTGMAC100_RXDES0_CRC_ERR) 444 netdev->stats.rx_crc_errors++; 445 446 if (status & (FTGMAC100_RXDES0_FTL | 447 FTGMAC100_RXDES0_RUNT | 448 FTGMAC100_RXDES0_RX_ODD_NB)) 449 netdev->stats.rx_length_errors++; 450 } 451 452 static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed) 453 { 454 struct net_device *netdev = priv->netdev; 455 struct ftgmac100_rxdes *rxdes; 456 struct sk_buff *skb; 457 unsigned int pointer, size; 458 u32 status, csum_vlan; 459 dma_addr_t map; 460 461 /* Grab next RX descriptor */ 462 pointer = priv->rx_pointer; 463 rxdes = &priv->rxdes[pointer]; 464 465 /* Grab descriptor status */ 466 status = le32_to_cpu(rxdes->rxdes0); 467 468 /* Do we have a packet ? */ 469 if (!(status & FTGMAC100_RXDES0_RXPKT_RDY)) 470 return false; 471 472 /* Order subsequent reads with the test for the ready bit */ 473 dma_rmb(); 474 475 /* We don't cope with fragmented RX packets */ 476 if (unlikely(!(status & FTGMAC100_RXDES0_FRS) || 477 !(status & FTGMAC100_RXDES0_LRS))) 478 goto drop; 479 480 /* Grab received size and csum vlan field in the descriptor */ 481 size = status & FTGMAC100_RXDES0_VDBC; 482 csum_vlan = le32_to_cpu(rxdes->rxdes1); 483 484 /* Any error (other than csum offload) flagged ? */ 485 if (unlikely(status & RXDES0_ANY_ERROR)) { 486 /* Correct for incorrect flagging of runt packets 487 * with vlan tags... Just accept a runt packet that 488 * has been flagged as vlan and whose size is at 489 * least 60 bytes. 490 */ 491 if ((status & FTGMAC100_RXDES0_RUNT) && 492 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) && 493 (size >= 60)) 494 status &= ~FTGMAC100_RXDES0_RUNT; 495 496 /* Any error still in there ? */ 497 if (status & RXDES0_ANY_ERROR) { 498 ftgmac100_rx_packet_error(priv, status); 499 goto drop; 500 } 501 } 502 503 /* If the packet had no skb (failed to allocate earlier) 504 * then try to allocate one and skip 505 */ 506 skb = priv->rx_skbs[pointer]; 507 if (!unlikely(skb)) { 508 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 509 goto drop; 510 } 511 512 if (unlikely(status & FTGMAC100_RXDES0_MULTICAST)) 513 netdev->stats.multicast++; 514 515 /* If the HW found checksum errors, bounce it to software. 516 * 517 * If we didn't, we need to see if the packet was recognized 518 * by HW as one of the supported checksummed protocols before 519 * we accept the HW test results. 520 */ 521 if (netdev->features & NETIF_F_RXCSUM) { 522 u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR | 523 FTGMAC100_RXDES1_UDP_CHKSUM_ERR | 524 FTGMAC100_RXDES1_IP_CHKSUM_ERR; 525 if ((csum_vlan & err_bits) || 526 !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK)) 527 skb->ip_summed = CHECKSUM_NONE; 528 else 529 skb->ip_summed = CHECKSUM_UNNECESSARY; 530 } 531 532 /* Transfer received size to skb */ 533 skb_put(skb, size); 534 535 /* Extract vlan tag */ 536 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 537 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL)) 538 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 539 csum_vlan & 0xffff); 540 541 /* Tear down DMA mapping, do necessary cache management */ 542 map = le32_to_cpu(rxdes->rxdes3); 543 544 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU) 545 /* When we don't have an iommu, we can save cycles by not 546 * invalidating the cache for the part of the packet that 547 * wasn't received. 548 */ 549 dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE); 550 #else 551 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 552 #endif 553 554 555 /* Resplenish rx ring */ 556 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 557 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); 558 559 skb->protocol = eth_type_trans(skb, netdev); 560 561 netdev->stats.rx_packets++; 562 netdev->stats.rx_bytes += size; 563 564 /* push packet to protocol stack */ 565 if (skb->ip_summed == CHECKSUM_NONE) 566 netif_receive_skb(skb); 567 else 568 napi_gro_receive(&priv->napi, skb); 569 570 (*processed)++; 571 return true; 572 573 drop: 574 /* Clean rxdes0 (which resets own bit) */ 575 rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask); 576 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); 577 netdev->stats.rx_dropped++; 578 return true; 579 } 580 581 static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv, 582 unsigned int index) 583 { 584 if (index == (priv->tx_q_entries - 1)) 585 return priv->txdes0_edotr_mask; 586 else 587 return 0; 588 } 589 590 static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv, 591 unsigned int pointer) 592 { 593 return (pointer + 1) & (priv->tx_q_entries - 1); 594 } 595 596 static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv) 597 { 598 /* Returns the number of available slots in the TX queue 599 * 600 * This always leaves one free slot so we don't have to 601 * worry about empty vs. full, and this simplifies the 602 * test for ftgmac100_tx_buf_cleanable() below 603 */ 604 return (priv->tx_clean_pointer - priv->tx_pointer - 1) & 605 (priv->tx_q_entries - 1); 606 } 607 608 static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv) 609 { 610 return priv->tx_pointer != priv->tx_clean_pointer; 611 } 612 613 static void ftgmac100_free_tx_packet(struct ftgmac100 *priv, 614 unsigned int pointer, 615 struct sk_buff *skb, 616 struct ftgmac100_txdes *txdes, 617 u32 ctl_stat) 618 { 619 dma_addr_t map = le32_to_cpu(txdes->txdes3); 620 size_t len; 621 622 if (ctl_stat & FTGMAC100_TXDES0_FTS) { 623 len = skb_headlen(skb); 624 dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE); 625 } else { 626 len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat); 627 dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE); 628 } 629 630 /* Free SKB on last segment */ 631 if (ctl_stat & FTGMAC100_TXDES0_LTS) 632 dev_kfree_skb(skb); 633 priv->tx_skbs[pointer] = NULL; 634 } 635 636 static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv) 637 { 638 struct net_device *netdev = priv->netdev; 639 struct ftgmac100_txdes *txdes; 640 struct sk_buff *skb; 641 unsigned int pointer; 642 u32 ctl_stat; 643 644 pointer = priv->tx_clean_pointer; 645 txdes = &priv->txdes[pointer]; 646 647 ctl_stat = le32_to_cpu(txdes->txdes0); 648 if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN) 649 return false; 650 651 skb = priv->tx_skbs[pointer]; 652 netdev->stats.tx_packets++; 653 netdev->stats.tx_bytes += skb->len; 654 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 655 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 656 657 priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer); 658 659 return true; 660 } 661 662 static void ftgmac100_tx_complete(struct ftgmac100 *priv) 663 { 664 struct net_device *netdev = priv->netdev; 665 666 /* Process all completed packets */ 667 while (ftgmac100_tx_buf_cleanable(priv) && 668 ftgmac100_tx_complete_packet(priv)) 669 ; 670 671 /* Restart queue if needed */ 672 smp_mb(); 673 if (unlikely(netif_queue_stopped(netdev) && 674 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) { 675 struct netdev_queue *txq; 676 677 txq = netdev_get_tx_queue(netdev, 0); 678 __netif_tx_lock(txq, smp_processor_id()); 679 if (netif_queue_stopped(netdev) && 680 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 681 netif_wake_queue(netdev); 682 __netif_tx_unlock(txq); 683 } 684 } 685 686 static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan) 687 { 688 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 689 u8 ip_proto = ip_hdr(skb)->protocol; 690 691 *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM; 692 switch(ip_proto) { 693 case IPPROTO_TCP: 694 *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM; 695 return true; 696 case IPPROTO_UDP: 697 *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM; 698 return true; 699 case IPPROTO_IP: 700 return true; 701 } 702 } 703 return skb_checksum_help(skb) == 0; 704 } 705 706 static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, 707 struct net_device *netdev) 708 { 709 struct ftgmac100 *priv = netdev_priv(netdev); 710 struct ftgmac100_txdes *txdes, *first; 711 unsigned int pointer, nfrags, len, i, j; 712 u32 f_ctl_stat, ctl_stat, csum_vlan; 713 dma_addr_t map; 714 715 /* The HW doesn't pad small frames */ 716 if (eth_skb_pad(skb)) { 717 netdev->stats.tx_dropped++; 718 return NETDEV_TX_OK; 719 } 720 721 /* Reject oversize packets */ 722 if (unlikely(skb->len > MAX_PKT_SIZE)) { 723 if (net_ratelimit()) 724 netdev_dbg(netdev, "tx packet too big\n"); 725 goto drop; 726 } 727 728 /* Do we have a limit on #fragments ? I yet have to get a reply 729 * from Aspeed. If there's one I haven't hit it. 730 */ 731 nfrags = skb_shinfo(skb)->nr_frags; 732 733 /* Setup HW checksumming */ 734 csum_vlan = 0; 735 if (skb->ip_summed == CHECKSUM_PARTIAL && 736 !ftgmac100_prep_tx_csum(skb, &csum_vlan)) 737 goto drop; 738 739 /* Add VLAN tag */ 740 if (skb_vlan_tag_present(skb)) { 741 csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG; 742 csum_vlan |= skb_vlan_tag_get(skb) & 0xffff; 743 } 744 745 /* Get header len */ 746 len = skb_headlen(skb); 747 748 /* Map the packet head */ 749 map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); 750 if (dma_mapping_error(priv->dev, map)) { 751 if (net_ratelimit()) 752 netdev_err(netdev, "map tx packet head failed\n"); 753 goto drop; 754 } 755 756 /* Grab the next free tx descriptor */ 757 pointer = priv->tx_pointer; 758 txdes = first = &priv->txdes[pointer]; 759 760 /* Setup it up with the packet head. Don't write the head to the 761 * ring just yet 762 */ 763 priv->tx_skbs[pointer] = skb; 764 f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 765 f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 766 f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 767 f_ctl_stat |= FTGMAC100_TXDES0_FTS; 768 if (nfrags == 0) 769 f_ctl_stat |= FTGMAC100_TXDES0_LTS; 770 txdes->txdes3 = cpu_to_le32(map); 771 txdes->txdes1 = cpu_to_le32(csum_vlan); 772 773 /* Next descriptor */ 774 pointer = ftgmac100_next_tx_pointer(priv, pointer); 775 776 /* Add the fragments */ 777 for (i = 0; i < nfrags; i++) { 778 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 779 780 len = skb_frag_size(frag); 781 782 /* Map it */ 783 map = skb_frag_dma_map(priv->dev, frag, 0, len, 784 DMA_TO_DEVICE); 785 if (dma_mapping_error(priv->dev, map)) 786 goto dma_err; 787 788 /* Setup descriptor */ 789 priv->tx_skbs[pointer] = skb; 790 txdes = &priv->txdes[pointer]; 791 ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 792 ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 793 ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 794 if (i == (nfrags - 1)) 795 ctl_stat |= FTGMAC100_TXDES0_LTS; 796 txdes->txdes0 = cpu_to_le32(ctl_stat); 797 txdes->txdes1 = 0; 798 txdes->txdes3 = cpu_to_le32(map); 799 800 /* Next one */ 801 pointer = ftgmac100_next_tx_pointer(priv, pointer); 802 } 803 804 /* Order the previous packet and descriptor udpates 805 * before setting the OWN bit on the first descriptor. 806 */ 807 dma_wmb(); 808 first->txdes0 = cpu_to_le32(f_ctl_stat); 809 810 /* Update next TX pointer */ 811 priv->tx_pointer = pointer; 812 813 /* If there isn't enough room for all the fragments of a new packet 814 * in the TX ring, stop the queue. The sequence below is race free 815 * vs. a concurrent restart in ftgmac100_poll() 816 */ 817 if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) { 818 netif_stop_queue(netdev); 819 /* Order the queue stop with the test below */ 820 smp_mb(); 821 if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 822 netif_wake_queue(netdev); 823 } 824 825 /* Poke transmitter to read the updated TX descriptors */ 826 iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD); 827 828 return NETDEV_TX_OK; 829 830 dma_err: 831 if (net_ratelimit()) 832 netdev_err(netdev, "map tx fragment failed\n"); 833 834 /* Free head */ 835 pointer = priv->tx_pointer; 836 ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat); 837 first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask); 838 839 /* Then all fragments */ 840 for (j = 0; j < i; j++) { 841 pointer = ftgmac100_next_tx_pointer(priv, pointer); 842 txdes = &priv->txdes[pointer]; 843 ctl_stat = le32_to_cpu(txdes->txdes0); 844 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 845 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 846 } 847 848 /* This cannot be reached if we successfully mapped the 849 * last fragment, so we know ftgmac100_free_tx_packet() 850 * hasn't freed the skb yet. 851 */ 852 drop: 853 /* Drop the packet */ 854 dev_kfree_skb_any(skb); 855 netdev->stats.tx_dropped++; 856 857 return NETDEV_TX_OK; 858 } 859 860 static void ftgmac100_free_buffers(struct ftgmac100 *priv) 861 { 862 int i; 863 864 /* Free all RX buffers */ 865 for (i = 0; i < priv->rx_q_entries; i++) { 866 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; 867 struct sk_buff *skb = priv->rx_skbs[i]; 868 dma_addr_t map = le32_to_cpu(rxdes->rxdes3); 869 870 if (!skb) 871 continue; 872 873 priv->rx_skbs[i] = NULL; 874 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 875 dev_kfree_skb_any(skb); 876 } 877 878 /* Free all TX buffers */ 879 for (i = 0; i < priv->tx_q_entries; i++) { 880 struct ftgmac100_txdes *txdes = &priv->txdes[i]; 881 struct sk_buff *skb = priv->tx_skbs[i]; 882 883 if (!skb) 884 continue; 885 ftgmac100_free_tx_packet(priv, i, skb, txdes, 886 le32_to_cpu(txdes->txdes0)); 887 } 888 } 889 890 static void ftgmac100_free_rings(struct ftgmac100 *priv) 891 { 892 /* Free skb arrays */ 893 kfree(priv->rx_skbs); 894 kfree(priv->tx_skbs); 895 896 /* Free descriptors */ 897 if (priv->rxdes) 898 dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES * 899 sizeof(struct ftgmac100_rxdes), 900 priv->rxdes, priv->rxdes_dma); 901 priv->rxdes = NULL; 902 903 if (priv->txdes) 904 dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES * 905 sizeof(struct ftgmac100_txdes), 906 priv->txdes, priv->txdes_dma); 907 priv->txdes = NULL; 908 909 /* Free scratch packet buffer */ 910 if (priv->rx_scratch) 911 dma_free_coherent(priv->dev, RX_BUF_SIZE, 912 priv->rx_scratch, priv->rx_scratch_dma); 913 } 914 915 static int ftgmac100_alloc_rings(struct ftgmac100 *priv) 916 { 917 /* Allocate skb arrays */ 918 priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *), 919 GFP_KERNEL); 920 if (!priv->rx_skbs) 921 return -ENOMEM; 922 priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *), 923 GFP_KERNEL); 924 if (!priv->tx_skbs) 925 return -ENOMEM; 926 927 /* Allocate descriptors */ 928 priv->rxdes = dma_alloc_coherent(priv->dev, 929 MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes), 930 &priv->rxdes_dma, GFP_KERNEL); 931 if (!priv->rxdes) 932 return -ENOMEM; 933 priv->txdes = dma_alloc_coherent(priv->dev, 934 MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes), 935 &priv->txdes_dma, GFP_KERNEL); 936 if (!priv->txdes) 937 return -ENOMEM; 938 939 /* Allocate scratch packet buffer */ 940 priv->rx_scratch = dma_alloc_coherent(priv->dev, 941 RX_BUF_SIZE, 942 &priv->rx_scratch_dma, 943 GFP_KERNEL); 944 if (!priv->rx_scratch) 945 return -ENOMEM; 946 947 return 0; 948 } 949 950 static void ftgmac100_init_rings(struct ftgmac100 *priv) 951 { 952 struct ftgmac100_rxdes *rxdes = NULL; 953 struct ftgmac100_txdes *txdes = NULL; 954 int i; 955 956 /* Update entries counts */ 957 priv->rx_q_entries = priv->new_rx_q_entries; 958 priv->tx_q_entries = priv->new_tx_q_entries; 959 960 if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES)) 961 return; 962 963 /* Initialize RX ring */ 964 for (i = 0; i < priv->rx_q_entries; i++) { 965 rxdes = &priv->rxdes[i]; 966 rxdes->rxdes0 = 0; 967 rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma); 968 } 969 /* Mark the end of the ring */ 970 rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask); 971 972 if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES)) 973 return; 974 975 /* Initialize TX ring */ 976 for (i = 0; i < priv->tx_q_entries; i++) { 977 txdes = &priv->txdes[i]; 978 txdes->txdes0 = 0; 979 } 980 txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask); 981 } 982 983 static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv) 984 { 985 int i; 986 987 for (i = 0; i < priv->rx_q_entries; i++) { 988 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; 989 990 if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL)) 991 return -ENOMEM; 992 } 993 return 0; 994 } 995 996 static void ftgmac100_adjust_link(struct net_device *netdev) 997 { 998 struct ftgmac100 *priv = netdev_priv(netdev); 999 struct phy_device *phydev = netdev->phydev; 1000 bool tx_pause, rx_pause; 1001 int new_speed; 1002 1003 /* We store "no link" as speed 0 */ 1004 if (!phydev->link) 1005 new_speed = 0; 1006 else 1007 new_speed = phydev->speed; 1008 1009 /* Grab pause settings from PHY if configured to do so */ 1010 if (priv->aneg_pause) { 1011 rx_pause = tx_pause = phydev->pause; 1012 if (phydev->asym_pause) 1013 tx_pause = !rx_pause; 1014 } else { 1015 rx_pause = priv->rx_pause; 1016 tx_pause = priv->tx_pause; 1017 } 1018 1019 /* Link hasn't changed, do nothing */ 1020 if (phydev->speed == priv->cur_speed && 1021 phydev->duplex == priv->cur_duplex && 1022 rx_pause == priv->rx_pause && 1023 tx_pause == priv->tx_pause) 1024 return; 1025 1026 /* Print status if we have a link or we had one and just lost it, 1027 * don't print otherwise. 1028 */ 1029 if (new_speed || priv->cur_speed) 1030 phy_print_status(phydev); 1031 1032 priv->cur_speed = new_speed; 1033 priv->cur_duplex = phydev->duplex; 1034 priv->rx_pause = rx_pause; 1035 priv->tx_pause = tx_pause; 1036 1037 /* Link is down, do nothing else */ 1038 if (!new_speed) 1039 return; 1040 1041 /* Disable all interrupts */ 1042 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1043 1044 /* Reset the adapter asynchronously */ 1045 schedule_work(&priv->reset_task); 1046 } 1047 1048 static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf) 1049 { 1050 struct net_device *netdev = priv->netdev; 1051 struct phy_device *phydev; 1052 1053 phydev = phy_find_first(priv->mii_bus); 1054 if (!phydev) { 1055 netdev_info(netdev, "%s: no PHY found\n", netdev->name); 1056 return -ENODEV; 1057 } 1058 1059 phydev = phy_connect(netdev, phydev_name(phydev), 1060 &ftgmac100_adjust_link, intf); 1061 1062 if (IS_ERR(phydev)) { 1063 netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); 1064 return PTR_ERR(phydev); 1065 } 1066 1067 /* Indicate that we support PAUSE frames (see comment in 1068 * Documentation/networking/phy.rst) 1069 */ 1070 phy_support_asym_pause(phydev); 1071 1072 /* Display what we found */ 1073 phy_attached_info(phydev); 1074 1075 return 0; 1076 } 1077 1078 static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) 1079 { 1080 struct net_device *netdev = bus->priv; 1081 struct ftgmac100 *priv = netdev_priv(netdev); 1082 unsigned int phycr; 1083 int i; 1084 1085 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1086 1087 /* preserve MDC cycle threshold */ 1088 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 1089 1090 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 1091 FTGMAC100_PHYCR_REGAD(regnum) | 1092 FTGMAC100_PHYCR_MIIRD; 1093 1094 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 1095 1096 for (i = 0; i < 10; i++) { 1097 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1098 1099 if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) { 1100 int data; 1101 1102 data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA); 1103 return FTGMAC100_PHYDATA_MIIRDATA(data); 1104 } 1105 1106 udelay(100); 1107 } 1108 1109 netdev_err(netdev, "mdio read timed out\n"); 1110 return -EIO; 1111 } 1112 1113 static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr, 1114 int regnum, u16 value) 1115 { 1116 struct net_device *netdev = bus->priv; 1117 struct ftgmac100 *priv = netdev_priv(netdev); 1118 unsigned int phycr; 1119 int data; 1120 int i; 1121 1122 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1123 1124 /* preserve MDC cycle threshold */ 1125 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 1126 1127 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 1128 FTGMAC100_PHYCR_REGAD(regnum) | 1129 FTGMAC100_PHYCR_MIIWR; 1130 1131 data = FTGMAC100_PHYDATA_MIIWDATA(value); 1132 1133 iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA); 1134 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 1135 1136 for (i = 0; i < 10; i++) { 1137 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1138 1139 if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0) 1140 return 0; 1141 1142 udelay(100); 1143 } 1144 1145 netdev_err(netdev, "mdio write timed out\n"); 1146 return -EIO; 1147 } 1148 1149 static void ftgmac100_get_drvinfo(struct net_device *netdev, 1150 struct ethtool_drvinfo *info) 1151 { 1152 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1153 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1154 strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); 1155 } 1156 1157 static void ftgmac100_get_ringparam(struct net_device *netdev, 1158 struct ethtool_ringparam *ering) 1159 { 1160 struct ftgmac100 *priv = netdev_priv(netdev); 1161 1162 memset(ering, 0, sizeof(*ering)); 1163 ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES; 1164 ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES; 1165 ering->rx_pending = priv->rx_q_entries; 1166 ering->tx_pending = priv->tx_q_entries; 1167 } 1168 1169 static int ftgmac100_set_ringparam(struct net_device *netdev, 1170 struct ethtool_ringparam *ering) 1171 { 1172 struct ftgmac100 *priv = netdev_priv(netdev); 1173 1174 if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES || 1175 ering->tx_pending > MAX_TX_QUEUE_ENTRIES || 1176 ering->rx_pending < MIN_RX_QUEUE_ENTRIES || 1177 ering->tx_pending < MIN_TX_QUEUE_ENTRIES || 1178 !is_power_of_2(ering->rx_pending) || 1179 !is_power_of_2(ering->tx_pending)) 1180 return -EINVAL; 1181 1182 priv->new_rx_q_entries = ering->rx_pending; 1183 priv->new_tx_q_entries = ering->tx_pending; 1184 if (netif_running(netdev)) 1185 schedule_work(&priv->reset_task); 1186 1187 return 0; 1188 } 1189 1190 static void ftgmac100_get_pauseparam(struct net_device *netdev, 1191 struct ethtool_pauseparam *pause) 1192 { 1193 struct ftgmac100 *priv = netdev_priv(netdev); 1194 1195 pause->autoneg = priv->aneg_pause; 1196 pause->tx_pause = priv->tx_pause; 1197 pause->rx_pause = priv->rx_pause; 1198 } 1199 1200 static int ftgmac100_set_pauseparam(struct net_device *netdev, 1201 struct ethtool_pauseparam *pause) 1202 { 1203 struct ftgmac100 *priv = netdev_priv(netdev); 1204 struct phy_device *phydev = netdev->phydev; 1205 1206 priv->aneg_pause = pause->autoneg; 1207 priv->tx_pause = pause->tx_pause; 1208 priv->rx_pause = pause->rx_pause; 1209 1210 if (phydev) 1211 phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); 1212 1213 if (netif_running(netdev)) { 1214 if (!(phydev && priv->aneg_pause)) 1215 ftgmac100_config_pause(priv); 1216 } 1217 1218 return 0; 1219 } 1220 1221 static const struct ethtool_ops ftgmac100_ethtool_ops = { 1222 .get_drvinfo = ftgmac100_get_drvinfo, 1223 .get_link = ethtool_op_get_link, 1224 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1225 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1226 .nway_reset = phy_ethtool_nway_reset, 1227 .get_ringparam = ftgmac100_get_ringparam, 1228 .set_ringparam = ftgmac100_set_ringparam, 1229 .get_pauseparam = ftgmac100_get_pauseparam, 1230 .set_pauseparam = ftgmac100_set_pauseparam, 1231 }; 1232 1233 static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id) 1234 { 1235 struct net_device *netdev = dev_id; 1236 struct ftgmac100 *priv = netdev_priv(netdev); 1237 unsigned int status, new_mask = FTGMAC100_INT_BAD; 1238 1239 /* Fetch and clear interrupt bits, process abnormal ones */ 1240 status = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 1241 iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR); 1242 if (unlikely(status & FTGMAC100_INT_BAD)) { 1243 1244 /* RX buffer unavailable */ 1245 if (status & FTGMAC100_INT_NO_RXBUF) 1246 netdev->stats.rx_over_errors++; 1247 1248 /* received packet lost due to RX FIFO full */ 1249 if (status & FTGMAC100_INT_RPKT_LOST) 1250 netdev->stats.rx_fifo_errors++; 1251 1252 /* sent packet lost due to excessive TX collision */ 1253 if (status & FTGMAC100_INT_XPKT_LOST) 1254 netdev->stats.tx_fifo_errors++; 1255 1256 /* AHB error -> Reset the chip */ 1257 if (status & FTGMAC100_INT_AHB_ERR) { 1258 if (net_ratelimit()) 1259 netdev_warn(netdev, 1260 "AHB bus error ! Resetting chip.\n"); 1261 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1262 schedule_work(&priv->reset_task); 1263 return IRQ_HANDLED; 1264 } 1265 1266 /* We may need to restart the MAC after such errors, delay 1267 * this until after we have freed some Rx buffers though 1268 */ 1269 priv->need_mac_restart = true; 1270 1271 /* Disable those errors until we restart */ 1272 new_mask &= ~status; 1273 } 1274 1275 /* Only enable "bad" interrupts while NAPI is on */ 1276 iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER); 1277 1278 /* Schedule NAPI bh */ 1279 napi_schedule_irqoff(&priv->napi); 1280 1281 return IRQ_HANDLED; 1282 } 1283 1284 static bool ftgmac100_check_rx(struct ftgmac100 *priv) 1285 { 1286 struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer]; 1287 1288 /* Do we have a packet ? */ 1289 return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY)); 1290 } 1291 1292 static int ftgmac100_poll(struct napi_struct *napi, int budget) 1293 { 1294 struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi); 1295 int work_done = 0; 1296 bool more; 1297 1298 /* Handle TX completions */ 1299 if (ftgmac100_tx_buf_cleanable(priv)) 1300 ftgmac100_tx_complete(priv); 1301 1302 /* Handle RX packets */ 1303 do { 1304 more = ftgmac100_rx_packet(priv, &work_done); 1305 } while (more && work_done < budget); 1306 1307 1308 /* The interrupt is telling us to kick the MAC back to life 1309 * after an RX overflow 1310 */ 1311 if (unlikely(priv->need_mac_restart)) { 1312 ftgmac100_start_hw(priv); 1313 1314 /* Re-enable "bad" interrupts */ 1315 iowrite32(FTGMAC100_INT_BAD, 1316 priv->base + FTGMAC100_OFFSET_IER); 1317 } 1318 1319 /* As long as we are waiting for transmit packets to be 1320 * completed we keep NAPI going 1321 */ 1322 if (ftgmac100_tx_buf_cleanable(priv)) 1323 work_done = budget; 1324 1325 if (work_done < budget) { 1326 /* We are about to re-enable all interrupts. However 1327 * the HW has been latching RX/TX packet interrupts while 1328 * they were masked. So we clear them first, then we need 1329 * to re-check if there's something to process 1330 */ 1331 iowrite32(FTGMAC100_INT_RXTX, 1332 priv->base + FTGMAC100_OFFSET_ISR); 1333 1334 /* Push the above (and provides a barrier vs. subsequent 1335 * reads of the descriptor). 1336 */ 1337 ioread32(priv->base + FTGMAC100_OFFSET_ISR); 1338 1339 /* Check RX and TX descriptors for more work to do */ 1340 if (ftgmac100_check_rx(priv) || 1341 ftgmac100_tx_buf_cleanable(priv)) 1342 return budget; 1343 1344 /* deschedule NAPI */ 1345 napi_complete(napi); 1346 1347 /* enable all interrupts */ 1348 iowrite32(FTGMAC100_INT_ALL, 1349 priv->base + FTGMAC100_OFFSET_IER); 1350 } 1351 1352 return work_done; 1353 } 1354 1355 static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err) 1356 { 1357 int err = 0; 1358 1359 /* Re-init descriptors (adjust queue sizes) */ 1360 ftgmac100_init_rings(priv); 1361 1362 /* Realloc rx descriptors */ 1363 err = ftgmac100_alloc_rx_buffers(priv); 1364 if (err && !ignore_alloc_err) 1365 return err; 1366 1367 /* Reinit and restart HW */ 1368 ftgmac100_init_hw(priv); 1369 ftgmac100_config_pause(priv); 1370 ftgmac100_start_hw(priv); 1371 1372 /* Re-enable the device */ 1373 napi_enable(&priv->napi); 1374 netif_start_queue(priv->netdev); 1375 1376 /* Enable all interrupts */ 1377 iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER); 1378 1379 return err; 1380 } 1381 1382 static void ftgmac100_reset_task(struct work_struct *work) 1383 { 1384 struct ftgmac100 *priv = container_of(work, struct ftgmac100, 1385 reset_task); 1386 struct net_device *netdev = priv->netdev; 1387 int err; 1388 1389 netdev_dbg(netdev, "Resetting NIC...\n"); 1390 1391 /* Lock the world */ 1392 rtnl_lock(); 1393 if (netdev->phydev) 1394 mutex_lock(&netdev->phydev->lock); 1395 if (priv->mii_bus) 1396 mutex_lock(&priv->mii_bus->mdio_lock); 1397 1398 1399 /* Check if the interface is still up */ 1400 if (!netif_running(netdev)) 1401 goto bail; 1402 1403 /* Stop the network stack */ 1404 netif_trans_update(netdev); 1405 napi_disable(&priv->napi); 1406 netif_tx_disable(netdev); 1407 1408 /* Stop and reset the MAC */ 1409 ftgmac100_stop_hw(priv); 1410 err = ftgmac100_reset_and_config_mac(priv); 1411 if (err) { 1412 /* Not much we can do ... it might come back... */ 1413 netdev_err(netdev, "attempting to continue...\n"); 1414 } 1415 1416 /* Free all rx and tx buffers */ 1417 ftgmac100_free_buffers(priv); 1418 1419 /* Setup everything again and restart chip */ 1420 ftgmac100_init_all(priv, true); 1421 1422 netdev_dbg(netdev, "Reset done !\n"); 1423 bail: 1424 if (priv->mii_bus) 1425 mutex_unlock(&priv->mii_bus->mdio_lock); 1426 if (netdev->phydev) 1427 mutex_unlock(&netdev->phydev->lock); 1428 rtnl_unlock(); 1429 } 1430 1431 static int ftgmac100_open(struct net_device *netdev) 1432 { 1433 struct ftgmac100 *priv = netdev_priv(netdev); 1434 int err; 1435 1436 /* Allocate ring buffers */ 1437 err = ftgmac100_alloc_rings(priv); 1438 if (err) { 1439 netdev_err(netdev, "Failed to allocate descriptors\n"); 1440 return err; 1441 } 1442 1443 /* When using NC-SI we force the speed to 100Mbit/s full duplex, 1444 * 1445 * Otherwise we leave it set to 0 (no link), the link 1446 * message from the PHY layer will handle setting it up to 1447 * something else if needed. 1448 */ 1449 if (priv->use_ncsi) { 1450 priv->cur_duplex = DUPLEX_FULL; 1451 priv->cur_speed = SPEED_100; 1452 } else { 1453 priv->cur_duplex = 0; 1454 priv->cur_speed = 0; 1455 } 1456 1457 /* Reset the hardware */ 1458 err = ftgmac100_reset_and_config_mac(priv); 1459 if (err) 1460 goto err_hw; 1461 1462 /* Initialize NAPI */ 1463 netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64); 1464 1465 /* Grab our interrupt */ 1466 err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev); 1467 if (err) { 1468 netdev_err(netdev, "failed to request irq %d\n", netdev->irq); 1469 goto err_irq; 1470 } 1471 1472 /* Start things up */ 1473 err = ftgmac100_init_all(priv, false); 1474 if (err) { 1475 netdev_err(netdev, "Failed to allocate packet buffers\n"); 1476 goto err_alloc; 1477 } 1478 1479 if (netdev->phydev) { 1480 /* If we have a PHY, start polling */ 1481 phy_start(netdev->phydev); 1482 } else if (priv->use_ncsi) { 1483 /* If using NC-SI, set our carrier on and start the stack */ 1484 netif_carrier_on(netdev); 1485 1486 /* Start the NCSI device */ 1487 err = ncsi_start_dev(priv->ndev); 1488 if (err) 1489 goto err_ncsi; 1490 } 1491 1492 return 0; 1493 1494 err_ncsi: 1495 napi_disable(&priv->napi); 1496 netif_stop_queue(netdev); 1497 err_alloc: 1498 ftgmac100_free_buffers(priv); 1499 free_irq(netdev->irq, netdev); 1500 err_irq: 1501 netif_napi_del(&priv->napi); 1502 err_hw: 1503 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1504 ftgmac100_free_rings(priv); 1505 return err; 1506 } 1507 1508 static int ftgmac100_stop(struct net_device *netdev) 1509 { 1510 struct ftgmac100 *priv = netdev_priv(netdev); 1511 1512 /* Note about the reset task: We are called with the rtnl lock 1513 * held, so we are synchronized against the core of the reset 1514 * task. We must not try to synchronously cancel it otherwise 1515 * we can deadlock. But since it will test for netif_running() 1516 * which has already been cleared by the net core, we don't 1517 * anything special to do. 1518 */ 1519 1520 /* disable all interrupts */ 1521 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1522 1523 netif_stop_queue(netdev); 1524 napi_disable(&priv->napi); 1525 netif_napi_del(&priv->napi); 1526 if (netdev->phydev) 1527 phy_stop(netdev->phydev); 1528 else if (priv->use_ncsi) 1529 ncsi_stop_dev(priv->ndev); 1530 1531 ftgmac100_stop_hw(priv); 1532 free_irq(netdev->irq, netdev); 1533 ftgmac100_free_buffers(priv); 1534 ftgmac100_free_rings(priv); 1535 1536 return 0; 1537 } 1538 1539 static void ftgmac100_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1540 { 1541 struct ftgmac100 *priv = netdev_priv(netdev); 1542 1543 /* Disable all interrupts */ 1544 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1545 1546 /* Do the reset outside of interrupt context */ 1547 schedule_work(&priv->reset_task); 1548 } 1549 1550 static int ftgmac100_set_features(struct net_device *netdev, 1551 netdev_features_t features) 1552 { 1553 struct ftgmac100 *priv = netdev_priv(netdev); 1554 netdev_features_t changed = netdev->features ^ features; 1555 1556 if (!netif_running(netdev)) 1557 return 0; 1558 1559 /* Update the vlan filtering bit */ 1560 if (changed & NETIF_F_HW_VLAN_CTAG_RX) { 1561 u32 maccr; 1562 1563 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 1564 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 1565 maccr |= FTGMAC100_MACCR_RM_VLAN; 1566 else 1567 maccr &= ~FTGMAC100_MACCR_RM_VLAN; 1568 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 1569 } 1570 1571 return 0; 1572 } 1573 1574 #ifdef CONFIG_NET_POLL_CONTROLLER 1575 static void ftgmac100_poll_controller(struct net_device *netdev) 1576 { 1577 unsigned long flags; 1578 1579 local_irq_save(flags); 1580 ftgmac100_interrupt(netdev->irq, netdev); 1581 local_irq_restore(flags); 1582 } 1583 #endif 1584 1585 static const struct net_device_ops ftgmac100_netdev_ops = { 1586 .ndo_open = ftgmac100_open, 1587 .ndo_stop = ftgmac100_stop, 1588 .ndo_start_xmit = ftgmac100_hard_start_xmit, 1589 .ndo_set_mac_address = ftgmac100_set_mac_addr, 1590 .ndo_validate_addr = eth_validate_addr, 1591 .ndo_do_ioctl = phy_do_ioctl, 1592 .ndo_tx_timeout = ftgmac100_tx_timeout, 1593 .ndo_set_rx_mode = ftgmac100_set_rx_mode, 1594 .ndo_set_features = ftgmac100_set_features, 1595 #ifdef CONFIG_NET_POLL_CONTROLLER 1596 .ndo_poll_controller = ftgmac100_poll_controller, 1597 #endif 1598 .ndo_vlan_rx_add_vid = ncsi_vlan_rx_add_vid, 1599 .ndo_vlan_rx_kill_vid = ncsi_vlan_rx_kill_vid, 1600 }; 1601 1602 static int ftgmac100_setup_mdio(struct net_device *netdev) 1603 { 1604 struct ftgmac100 *priv = netdev_priv(netdev); 1605 struct platform_device *pdev = to_platform_device(priv->dev); 1606 phy_interface_t phy_intf = PHY_INTERFACE_MODE_RGMII; 1607 struct device_node *np = pdev->dev.of_node; 1608 int i, err = 0; 1609 u32 reg; 1610 1611 /* initialize mdio bus */ 1612 priv->mii_bus = mdiobus_alloc(); 1613 if (!priv->mii_bus) 1614 return -EIO; 1615 1616 if (of_device_is_compatible(np, "aspeed,ast2400-mac") || 1617 of_device_is_compatible(np, "aspeed,ast2500-mac")) { 1618 /* The AST2600 has a separate MDIO controller */ 1619 1620 /* For the AST2400 and AST2500 this driver only supports the 1621 * old MDIO interface 1622 */ 1623 reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR); 1624 reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE; 1625 iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR); 1626 } 1627 1628 /* Get PHY mode from device-tree */ 1629 if (np) { 1630 /* Default to RGMII. It's a gigabit part after all */ 1631 err = of_get_phy_mode(np, &phy_intf); 1632 if (err) 1633 phy_intf = PHY_INTERFACE_MODE_RGMII; 1634 1635 /* Aspeed only supports these. I don't know about other IP 1636 * block vendors so I'm going to just let them through for 1637 * now. Note that this is only a warning if for some obscure 1638 * reason the DT really means to lie about it or it's a newer 1639 * part we don't know about. 1640 * 1641 * On the Aspeed SoC there are additionally straps and SCU 1642 * control bits that could tell us what the interface is 1643 * (or allow us to configure it while the IP block is held 1644 * in reset). For now I chose to keep this driver away from 1645 * those SoC specific bits and assume the device-tree is 1646 * right and the SCU has been configured properly by pinmux 1647 * or the firmware. 1648 */ 1649 if (priv->is_aspeed && 1650 phy_intf != PHY_INTERFACE_MODE_RMII && 1651 phy_intf != PHY_INTERFACE_MODE_RGMII && 1652 phy_intf != PHY_INTERFACE_MODE_RGMII_ID && 1653 phy_intf != PHY_INTERFACE_MODE_RGMII_RXID && 1654 phy_intf != PHY_INTERFACE_MODE_RGMII_TXID) { 1655 netdev_warn(netdev, 1656 "Unsupported PHY mode %s !\n", 1657 phy_modes(phy_intf)); 1658 } 1659 } 1660 1661 priv->mii_bus->name = "ftgmac100_mdio"; 1662 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", 1663 pdev->name, pdev->id); 1664 priv->mii_bus->parent = priv->dev; 1665 priv->mii_bus->priv = priv->netdev; 1666 priv->mii_bus->read = ftgmac100_mdiobus_read; 1667 priv->mii_bus->write = ftgmac100_mdiobus_write; 1668 1669 for (i = 0; i < PHY_MAX_ADDR; i++) 1670 priv->mii_bus->irq[i] = PHY_POLL; 1671 1672 err = mdiobus_register(priv->mii_bus); 1673 if (err) { 1674 dev_err(priv->dev, "Cannot register MDIO bus!\n"); 1675 goto err_register_mdiobus; 1676 } 1677 1678 err = ftgmac100_mii_probe(priv, phy_intf); 1679 if (err) { 1680 dev_err(priv->dev, "MII Probe failed!\n"); 1681 goto err_mii_probe; 1682 } 1683 1684 return 0; 1685 1686 err_mii_probe: 1687 mdiobus_unregister(priv->mii_bus); 1688 err_register_mdiobus: 1689 mdiobus_free(priv->mii_bus); 1690 return err; 1691 } 1692 1693 static void ftgmac100_destroy_mdio(struct net_device *netdev) 1694 { 1695 struct ftgmac100 *priv = netdev_priv(netdev); 1696 1697 if (!netdev->phydev) 1698 return; 1699 1700 phy_disconnect(netdev->phydev); 1701 mdiobus_unregister(priv->mii_bus); 1702 mdiobus_free(priv->mii_bus); 1703 } 1704 1705 static void ftgmac100_ncsi_handler(struct ncsi_dev *nd) 1706 { 1707 if (unlikely(nd->state != ncsi_dev_state_functional)) 1708 return; 1709 1710 netdev_dbg(nd->dev, "NCSI interface %s\n", 1711 nd->link_up ? "up" : "down"); 1712 } 1713 1714 static int ftgmac100_setup_clk(struct ftgmac100 *priv) 1715 { 1716 struct clk *clk; 1717 int rc; 1718 1719 clk = devm_clk_get(priv->dev, NULL /* MACCLK */); 1720 if (IS_ERR(clk)) 1721 return PTR_ERR(clk); 1722 priv->clk = clk; 1723 rc = clk_prepare_enable(priv->clk); 1724 if (rc) 1725 return rc; 1726 1727 /* Aspeed specifies a 100MHz clock is required for up to 1728 * 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz 1729 * is sufficient 1730 */ 1731 rc = clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ : 1732 FTGMAC_100MHZ); 1733 if (rc) 1734 goto cleanup_clk; 1735 1736 /* RCLK is for RMII, typically used for NCSI. Optional because its not 1737 * necessary if it's the AST2400 MAC, or the MAC is configured for 1738 * RGMII, or the controller is not an ASPEED-based controller. 1739 */ 1740 priv->rclk = devm_clk_get_optional(priv->dev, "RCLK"); 1741 rc = clk_prepare_enable(priv->rclk); 1742 if (!rc) 1743 return 0; 1744 1745 cleanup_clk: 1746 clk_disable_unprepare(priv->clk); 1747 1748 return rc; 1749 } 1750 1751 static int ftgmac100_probe(struct platform_device *pdev) 1752 { 1753 struct resource *res; 1754 int irq; 1755 struct net_device *netdev; 1756 struct ftgmac100 *priv; 1757 struct device_node *np; 1758 int err = 0; 1759 1760 if (!pdev) 1761 return -ENODEV; 1762 1763 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1764 if (!res) 1765 return -ENXIO; 1766 1767 irq = platform_get_irq(pdev, 0); 1768 if (irq < 0) 1769 return irq; 1770 1771 /* setup net_device */ 1772 netdev = alloc_etherdev(sizeof(*priv)); 1773 if (!netdev) { 1774 err = -ENOMEM; 1775 goto err_alloc_etherdev; 1776 } 1777 1778 SET_NETDEV_DEV(netdev, &pdev->dev); 1779 1780 netdev->ethtool_ops = &ftgmac100_ethtool_ops; 1781 netdev->netdev_ops = &ftgmac100_netdev_ops; 1782 netdev->watchdog_timeo = 5 * HZ; 1783 1784 platform_set_drvdata(pdev, netdev); 1785 1786 /* setup private data */ 1787 priv = netdev_priv(netdev); 1788 priv->netdev = netdev; 1789 priv->dev = &pdev->dev; 1790 INIT_WORK(&priv->reset_task, ftgmac100_reset_task); 1791 1792 /* map io memory */ 1793 priv->res = request_mem_region(res->start, resource_size(res), 1794 dev_name(&pdev->dev)); 1795 if (!priv->res) { 1796 dev_err(&pdev->dev, "Could not reserve memory region\n"); 1797 err = -ENOMEM; 1798 goto err_req_mem; 1799 } 1800 1801 priv->base = ioremap(res->start, resource_size(res)); 1802 if (!priv->base) { 1803 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 1804 err = -EIO; 1805 goto err_ioremap; 1806 } 1807 1808 netdev->irq = irq; 1809 1810 /* Enable pause */ 1811 priv->tx_pause = true; 1812 priv->rx_pause = true; 1813 priv->aneg_pause = true; 1814 1815 /* MAC address from chip or random one */ 1816 ftgmac100_initial_mac(priv); 1817 1818 np = pdev->dev.of_node; 1819 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") || 1820 of_device_is_compatible(np, "aspeed,ast2500-mac") || 1821 of_device_is_compatible(np, "aspeed,ast2600-mac"))) { 1822 priv->rxdes0_edorr_mask = BIT(30); 1823 priv->txdes0_edotr_mask = BIT(30); 1824 priv->is_aspeed = true; 1825 } else { 1826 priv->rxdes0_edorr_mask = BIT(15); 1827 priv->txdes0_edotr_mask = BIT(15); 1828 } 1829 1830 if (np && of_get_property(np, "use-ncsi", NULL)) { 1831 if (!IS_ENABLED(CONFIG_NET_NCSI)) { 1832 dev_err(&pdev->dev, "NCSI stack not enabled\n"); 1833 goto err_ncsi_dev; 1834 } 1835 1836 dev_info(&pdev->dev, "Using NCSI interface\n"); 1837 priv->use_ncsi = true; 1838 priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler); 1839 if (!priv->ndev) 1840 goto err_ncsi_dev; 1841 } else if (np && of_get_property(np, "phy-handle", NULL)) { 1842 struct phy_device *phy; 1843 1844 phy = of_phy_get_and_connect(priv->netdev, np, 1845 &ftgmac100_adjust_link); 1846 if (!phy) { 1847 dev_err(&pdev->dev, "Failed to connect to phy\n"); 1848 goto err_setup_mdio; 1849 } 1850 1851 /* Indicate that we support PAUSE frames (see comment in 1852 * Documentation/networking/phy.rst) 1853 */ 1854 phy_support_asym_pause(phy); 1855 1856 /* Display what we found */ 1857 phy_attached_info(phy); 1858 } else if (np && !of_get_child_by_name(np, "mdio")) { 1859 /* Support legacy ASPEED devicetree descriptions that decribe a 1860 * MAC with an embedded MDIO controller but have no "mdio" 1861 * child node. Automatically scan the MDIO bus for available 1862 * PHYs. 1863 */ 1864 priv->use_ncsi = false; 1865 err = ftgmac100_setup_mdio(netdev); 1866 if (err) 1867 goto err_setup_mdio; 1868 } 1869 1870 if (priv->is_aspeed) { 1871 err = ftgmac100_setup_clk(priv); 1872 if (err) 1873 goto err_ncsi_dev; 1874 } 1875 1876 /* Default ring sizes */ 1877 priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES; 1878 priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES; 1879 1880 /* Base feature set */ 1881 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | 1882 NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX | 1883 NETIF_F_HW_VLAN_CTAG_TX; 1884 1885 if (priv->use_ncsi) 1886 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1887 1888 /* AST2400 doesn't have working HW checksum generation */ 1889 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac"))) 1890 netdev->hw_features &= ~NETIF_F_HW_CSUM; 1891 if (np && of_get_property(np, "no-hw-checksum", NULL)) 1892 netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); 1893 netdev->features |= netdev->hw_features; 1894 1895 /* register network device */ 1896 err = register_netdev(netdev); 1897 if (err) { 1898 dev_err(&pdev->dev, "Failed to register netdev\n"); 1899 goto err_register_netdev; 1900 } 1901 1902 netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base); 1903 1904 return 0; 1905 1906 err_register_netdev: 1907 clk_disable_unprepare(priv->rclk); 1908 clk_disable_unprepare(priv->clk); 1909 err_ncsi_dev: 1910 ftgmac100_destroy_mdio(netdev); 1911 err_setup_mdio: 1912 iounmap(priv->base); 1913 err_ioremap: 1914 release_resource(priv->res); 1915 err_req_mem: 1916 free_netdev(netdev); 1917 err_alloc_etherdev: 1918 return err; 1919 } 1920 1921 static int ftgmac100_remove(struct platform_device *pdev) 1922 { 1923 struct net_device *netdev; 1924 struct ftgmac100 *priv; 1925 1926 netdev = platform_get_drvdata(pdev); 1927 priv = netdev_priv(netdev); 1928 1929 unregister_netdev(netdev); 1930 1931 clk_disable_unprepare(priv->rclk); 1932 clk_disable_unprepare(priv->clk); 1933 1934 /* There's a small chance the reset task will have been re-queued, 1935 * during stop, make sure it's gone before we free the structure. 1936 */ 1937 cancel_work_sync(&priv->reset_task); 1938 1939 ftgmac100_destroy_mdio(netdev); 1940 1941 iounmap(priv->base); 1942 release_resource(priv->res); 1943 1944 netif_napi_del(&priv->napi); 1945 free_netdev(netdev); 1946 return 0; 1947 } 1948 1949 static const struct of_device_id ftgmac100_of_match[] = { 1950 { .compatible = "faraday,ftgmac100" }, 1951 { } 1952 }; 1953 MODULE_DEVICE_TABLE(of, ftgmac100_of_match); 1954 1955 static struct platform_driver ftgmac100_driver = { 1956 .probe = ftgmac100_probe, 1957 .remove = ftgmac100_remove, 1958 .driver = { 1959 .name = DRV_NAME, 1960 .of_match_table = ftgmac100_of_match, 1961 }, 1962 }; 1963 module_platform_driver(ftgmac100_driver); 1964 1965 MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1966 MODULE_DESCRIPTION("FTGMAC100 driver"); 1967 MODULE_LICENSE("GPL"); 1968