1 /* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. 2 * 3 * This program is free software; you can redistribute it and/or modify 4 * it under the terms of the GNU General Public License version 2 and 5 * only version 2 as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 */ 12 13 /* Qualcomm Technologies, Inc. EMAC Gigabit Ethernet Driver */ 14 15 #include <linux/if_ether.h> 16 #include <linux/if_vlan.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_net.h> 22 #include <linux/of_device.h> 23 #include <linux/phy.h> 24 #include <linux/platform_device.h> 25 #include <linux/acpi.h> 26 #include "emac.h" 27 #include "emac-mac.h" 28 #include "emac-phy.h" 29 #include "emac-sgmii.h" 30 31 #define EMAC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 32 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) 33 34 #define EMAC_RRD_SIZE 4 35 /* The RRD size if timestamping is enabled: */ 36 #define EMAC_TS_RRD_SIZE 6 37 #define EMAC_TPD_SIZE 4 38 #define EMAC_RFD_SIZE 2 39 40 #define REG_MAC_RX_STATUS_BIN EMAC_RXMAC_STATC_REG0 41 #define REG_MAC_RX_STATUS_END EMAC_RXMAC_STATC_REG22 42 #define REG_MAC_TX_STATUS_BIN EMAC_TXMAC_STATC_REG0 43 #define REG_MAC_TX_STATUS_END EMAC_TXMAC_STATC_REG24 44 45 #define RXQ0_NUM_RFD_PREF_DEF 8 46 #define TXQ0_NUM_TPD_PREF_DEF 5 47 48 #define EMAC_PREAMBLE_DEF 7 49 50 #define DMAR_DLY_CNT_DEF 15 51 #define DMAW_DLY_CNT_DEF 4 52 53 #define IMR_NORMAL_MASK (\ 54 ISR_ERROR |\ 55 ISR_GPHY_LINK |\ 56 ISR_TX_PKT |\ 57 GPHY_WAKEUP_INT) 58 59 #define IMR_EXTENDED_MASK (\ 60 SW_MAN_INT |\ 61 ISR_OVER |\ 62 ISR_ERROR |\ 63 ISR_GPHY_LINK |\ 64 ISR_TX_PKT |\ 65 GPHY_WAKEUP_INT) 66 67 #define ISR_TX_PKT (\ 68 TX_PKT_INT |\ 69 TX_PKT_INT1 |\ 70 TX_PKT_INT2 |\ 71 TX_PKT_INT3) 72 73 #define ISR_GPHY_LINK (\ 74 GPHY_LINK_UP_INT |\ 75 GPHY_LINK_DOWN_INT) 76 77 #define ISR_OVER (\ 78 RFD0_UR_INT |\ 79 RFD1_UR_INT |\ 80 RFD2_UR_INT |\ 81 RFD3_UR_INT |\ 82 RFD4_UR_INT |\ 83 RXF_OF_INT |\ 84 TXF_UR_INT) 85 86 #define ISR_ERROR (\ 87 DMAR_TO_INT |\ 88 DMAW_TO_INT |\ 89 TXQ_TO_INT) 90 91 /* in sync with enum emac_clk_id */ 92 static const char * const emac_clk_name[] = { 93 "axi_clk", "cfg_ahb_clk", "high_speed_clk", "mdio_clk", "tx_clk", 94 "rx_clk", "sys_clk" 95 }; 96 97 void emac_reg_update32(void __iomem *addr, u32 mask, u32 val) 98 { 99 u32 data = readl(addr); 100 101 writel(((data & ~mask) | val), addr); 102 } 103 104 /* reinitialize */ 105 int emac_reinit_locked(struct emac_adapter *adpt) 106 { 107 int ret; 108 109 mutex_lock(&adpt->reset_lock); 110 111 emac_mac_down(adpt); 112 emac_sgmii_reset(adpt); 113 ret = emac_mac_up(adpt); 114 115 mutex_unlock(&adpt->reset_lock); 116 117 return ret; 118 } 119 120 /* NAPI */ 121 static int emac_napi_rtx(struct napi_struct *napi, int budget) 122 { 123 struct emac_rx_queue *rx_q = 124 container_of(napi, struct emac_rx_queue, napi); 125 struct emac_adapter *adpt = netdev_priv(rx_q->netdev); 126 struct emac_irq *irq = rx_q->irq; 127 int work_done = 0; 128 129 emac_mac_rx_process(adpt, rx_q, &work_done, budget); 130 131 if (work_done < budget) { 132 napi_complete(napi); 133 134 irq->mask |= rx_q->intr; 135 writel(irq->mask, adpt->base + EMAC_INT_MASK); 136 } 137 138 return work_done; 139 } 140 141 /* Transmit the packet */ 142 static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev) 143 { 144 struct emac_adapter *adpt = netdev_priv(netdev); 145 146 return emac_mac_tx_buf_send(adpt, &adpt->tx_q, skb); 147 } 148 149 irqreturn_t emac_isr(int _irq, void *data) 150 { 151 struct emac_irq *irq = data; 152 struct emac_adapter *adpt = 153 container_of(irq, struct emac_adapter, irq); 154 struct emac_rx_queue *rx_q = &adpt->rx_q; 155 u32 isr, status; 156 157 /* disable the interrupt */ 158 writel(0, adpt->base + EMAC_INT_MASK); 159 160 isr = readl_relaxed(adpt->base + EMAC_INT_STATUS); 161 162 status = isr & irq->mask; 163 if (status == 0) 164 goto exit; 165 166 if (status & ISR_ERROR) { 167 netif_warn(adpt, intr, adpt->netdev, 168 "warning: error irq status 0x%lx\n", 169 status & ISR_ERROR); 170 /* reset MAC */ 171 schedule_work(&adpt->work_thread); 172 } 173 174 /* Schedule the napi for receive queue with interrupt 175 * status bit set 176 */ 177 if (status & rx_q->intr) { 178 if (napi_schedule_prep(&rx_q->napi)) { 179 irq->mask &= ~rx_q->intr; 180 __napi_schedule(&rx_q->napi); 181 } 182 } 183 184 if (status & TX_PKT_INT) 185 emac_mac_tx_process(adpt, &adpt->tx_q); 186 187 if (status & ISR_OVER) 188 net_warn_ratelimited("warning: TX/RX overflow\n"); 189 190 /* link event */ 191 if (status & ISR_GPHY_LINK) 192 phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT)); 193 194 exit: 195 /* enable the interrupt */ 196 writel(irq->mask, adpt->base + EMAC_INT_MASK); 197 198 return IRQ_HANDLED; 199 } 200 201 /* Configure VLAN tag strip/insert feature */ 202 static int emac_set_features(struct net_device *netdev, 203 netdev_features_t features) 204 { 205 netdev_features_t changed = features ^ netdev->features; 206 struct emac_adapter *adpt = netdev_priv(netdev); 207 208 /* We only need to reprogram the hardware if the VLAN tag features 209 * have changed, and if it's already running. 210 */ 211 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))) 212 return 0; 213 214 if (!netif_running(netdev)) 215 return 0; 216 217 /* emac_mac_mode_config() uses netdev->features to configure the EMAC, 218 * so make sure it's set first. 219 */ 220 netdev->features = features; 221 222 return emac_reinit_locked(adpt); 223 } 224 225 /* Configure Multicast and Promiscuous modes */ 226 static void emac_rx_mode_set(struct net_device *netdev) 227 { 228 struct emac_adapter *adpt = netdev_priv(netdev); 229 struct netdev_hw_addr *ha; 230 231 emac_mac_mode_config(adpt); 232 233 /* update multicast address filtering */ 234 emac_mac_multicast_addr_clear(adpt); 235 netdev_for_each_mc_addr(ha, netdev) 236 emac_mac_multicast_addr_set(adpt, ha->addr); 237 } 238 239 /* Change the Maximum Transfer Unit (MTU) */ 240 static int emac_change_mtu(struct net_device *netdev, int new_mtu) 241 { 242 unsigned int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 243 struct emac_adapter *adpt = netdev_priv(netdev); 244 245 if ((max_frame < EMAC_MIN_ETH_FRAME_SIZE) || 246 (max_frame > EMAC_MAX_ETH_FRAME_SIZE)) { 247 netdev_err(adpt->netdev, "error: invalid MTU setting\n"); 248 return -EINVAL; 249 } 250 251 netif_info(adpt, hw, adpt->netdev, 252 "changing MTU from %d to %d\n", netdev->mtu, 253 new_mtu); 254 netdev->mtu = new_mtu; 255 256 if (netif_running(netdev)) 257 return emac_reinit_locked(adpt); 258 259 return 0; 260 } 261 262 /* Called when the network interface is made active */ 263 static int emac_open(struct net_device *netdev) 264 { 265 struct emac_adapter *adpt = netdev_priv(netdev); 266 int ret; 267 268 /* allocate rx/tx dma buffer & descriptors */ 269 ret = emac_mac_rx_tx_rings_alloc_all(adpt); 270 if (ret) { 271 netdev_err(adpt->netdev, "error allocating rx/tx rings\n"); 272 return ret; 273 } 274 275 ret = emac_mac_up(adpt); 276 if (ret) { 277 emac_mac_rx_tx_rings_free_all(adpt); 278 return ret; 279 } 280 281 emac_mac_start(adpt); 282 283 return 0; 284 } 285 286 /* Called when the network interface is disabled */ 287 static int emac_close(struct net_device *netdev) 288 { 289 struct emac_adapter *adpt = netdev_priv(netdev); 290 291 mutex_lock(&adpt->reset_lock); 292 293 emac_mac_down(adpt); 294 emac_mac_rx_tx_rings_free_all(adpt); 295 296 mutex_unlock(&adpt->reset_lock); 297 298 return 0; 299 } 300 301 /* Respond to a TX hang */ 302 static void emac_tx_timeout(struct net_device *netdev) 303 { 304 struct emac_adapter *adpt = netdev_priv(netdev); 305 306 schedule_work(&adpt->work_thread); 307 } 308 309 /* IOCTL support for the interface */ 310 static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 311 { 312 if (!netif_running(netdev)) 313 return -EINVAL; 314 315 if (!netdev->phydev) 316 return -ENODEV; 317 318 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 319 } 320 321 /* Provide network statistics info for the interface */ 322 static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev, 323 struct rtnl_link_stats64 *net_stats) 324 { 325 struct emac_adapter *adpt = netdev_priv(netdev); 326 unsigned int addr = REG_MAC_RX_STATUS_BIN; 327 struct emac_stats *stats = &adpt->stats; 328 u64 *stats_itr = &adpt->stats.rx_ok; 329 u32 val; 330 331 spin_lock(&stats->lock); 332 333 while (addr <= REG_MAC_RX_STATUS_END) { 334 val = readl_relaxed(adpt->base + addr); 335 *stats_itr += val; 336 stats_itr++; 337 addr += sizeof(u32); 338 } 339 340 /* additional rx status */ 341 val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG23); 342 adpt->stats.rx_crc_align += val; 343 val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG24); 344 adpt->stats.rx_jabbers += val; 345 346 /* update tx status */ 347 addr = REG_MAC_TX_STATUS_BIN; 348 stats_itr = &adpt->stats.tx_ok; 349 350 while (addr <= REG_MAC_TX_STATUS_END) { 351 val = readl_relaxed(adpt->base + addr); 352 *stats_itr += val; 353 ++stats_itr; 354 addr += sizeof(u32); 355 } 356 357 /* additional tx status */ 358 val = readl_relaxed(adpt->base + EMAC_TXMAC_STATC_REG25); 359 adpt->stats.tx_col += val; 360 361 /* return parsed statistics */ 362 net_stats->rx_packets = stats->rx_ok; 363 net_stats->tx_packets = stats->tx_ok; 364 net_stats->rx_bytes = stats->rx_byte_cnt; 365 net_stats->tx_bytes = stats->tx_byte_cnt; 366 net_stats->multicast = stats->rx_mcast; 367 net_stats->collisions = stats->tx_1_col + stats->tx_2_col * 2 + 368 stats->tx_late_col + stats->tx_abort_col; 369 370 net_stats->rx_errors = stats->rx_frag + stats->rx_fcs_err + 371 stats->rx_len_err + stats->rx_sz_ov + 372 stats->rx_align_err; 373 net_stats->rx_fifo_errors = stats->rx_rxf_ov; 374 net_stats->rx_length_errors = stats->rx_len_err; 375 net_stats->rx_crc_errors = stats->rx_fcs_err; 376 net_stats->rx_frame_errors = stats->rx_align_err; 377 net_stats->rx_over_errors = stats->rx_rxf_ov; 378 net_stats->rx_missed_errors = stats->rx_rxf_ov; 379 380 net_stats->tx_errors = stats->tx_late_col + stats->tx_abort_col + 381 stats->tx_underrun + stats->tx_trunc; 382 net_stats->tx_fifo_errors = stats->tx_underrun; 383 net_stats->tx_aborted_errors = stats->tx_abort_col; 384 net_stats->tx_window_errors = stats->tx_late_col; 385 386 spin_unlock(&stats->lock); 387 388 return net_stats; 389 } 390 391 static const struct net_device_ops emac_netdev_ops = { 392 .ndo_open = emac_open, 393 .ndo_stop = emac_close, 394 .ndo_validate_addr = eth_validate_addr, 395 .ndo_start_xmit = emac_start_xmit, 396 .ndo_set_mac_address = eth_mac_addr, 397 .ndo_change_mtu = emac_change_mtu, 398 .ndo_do_ioctl = emac_ioctl, 399 .ndo_tx_timeout = emac_tx_timeout, 400 .ndo_get_stats64 = emac_get_stats64, 401 .ndo_set_features = emac_set_features, 402 .ndo_set_rx_mode = emac_rx_mode_set, 403 }; 404 405 /* Watchdog task routine, called to reinitialize the EMAC */ 406 static void emac_work_thread(struct work_struct *work) 407 { 408 struct emac_adapter *adpt = 409 container_of(work, struct emac_adapter, work_thread); 410 411 emac_reinit_locked(adpt); 412 } 413 414 /* Initialize various data structures */ 415 static void emac_init_adapter(struct emac_adapter *adpt) 416 { 417 u32 reg; 418 419 /* descriptors */ 420 adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS; 421 adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS; 422 423 /* dma */ 424 adpt->dma_order = emac_dma_ord_out; 425 adpt->dmar_block = emac_dma_req_4096; 426 adpt->dmaw_block = emac_dma_req_128; 427 adpt->dmar_dly_cnt = DMAR_DLY_CNT_DEF; 428 adpt->dmaw_dly_cnt = DMAW_DLY_CNT_DEF; 429 adpt->tpd_burst = TXQ0_NUM_TPD_PREF_DEF; 430 adpt->rfd_burst = RXQ0_NUM_RFD_PREF_DEF; 431 432 /* irq moderator */ 433 reg = ((EMAC_DEF_RX_IRQ_MOD >> 1) << IRQ_MODERATOR2_INIT_SHFT) | 434 ((EMAC_DEF_TX_IRQ_MOD >> 1) << IRQ_MODERATOR_INIT_SHFT); 435 adpt->irq_mod = reg; 436 437 /* others */ 438 adpt->preamble = EMAC_PREAMBLE_DEF; 439 } 440 441 /* Get the clock */ 442 static int emac_clks_get(struct platform_device *pdev, 443 struct emac_adapter *adpt) 444 { 445 unsigned int i; 446 447 for (i = 0; i < EMAC_CLK_CNT; i++) { 448 struct clk *clk = devm_clk_get(&pdev->dev, emac_clk_name[i]); 449 450 if (IS_ERR(clk)) { 451 dev_err(&pdev->dev, 452 "could not claim clock %s (error=%li)\n", 453 emac_clk_name[i], PTR_ERR(clk)); 454 455 return PTR_ERR(clk); 456 } 457 458 adpt->clk[i] = clk; 459 } 460 461 return 0; 462 } 463 464 /* Initialize clocks */ 465 static int emac_clks_phase1_init(struct platform_device *pdev, 466 struct emac_adapter *adpt) 467 { 468 int ret; 469 470 ret = emac_clks_get(pdev, adpt); 471 if (ret) 472 return ret; 473 474 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_AXI]); 475 if (ret) 476 return ret; 477 478 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]); 479 if (ret) 480 return ret; 481 482 ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000); 483 if (ret) 484 return ret; 485 486 return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); 487 } 488 489 /* Enable clocks; needs emac_clks_phase1_init to be called before */ 490 static int emac_clks_phase2_init(struct platform_device *pdev, 491 struct emac_adapter *adpt) 492 { 493 int ret; 494 495 ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000); 496 if (ret) 497 return ret; 498 499 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_TX]); 500 if (ret) 501 return ret; 502 503 ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000); 504 if (ret) 505 return ret; 506 507 ret = clk_set_rate(adpt->clk[EMAC_CLK_MDIO], 25000000); 508 if (ret) 509 return ret; 510 511 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_MDIO]); 512 if (ret) 513 return ret; 514 515 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_RX]); 516 if (ret) 517 return ret; 518 519 return clk_prepare_enable(adpt->clk[EMAC_CLK_SYS]); 520 } 521 522 static void emac_clks_teardown(struct emac_adapter *adpt) 523 { 524 525 unsigned int i; 526 527 for (i = 0; i < EMAC_CLK_CNT; i++) 528 clk_disable_unprepare(adpt->clk[i]); 529 } 530 531 /* Get the resources */ 532 static int emac_probe_resources(struct platform_device *pdev, 533 struct emac_adapter *adpt) 534 { 535 struct net_device *netdev = adpt->netdev; 536 struct resource *res; 537 char maddr[ETH_ALEN]; 538 int ret = 0; 539 540 /* get mac address */ 541 if (device_get_mac_address(&pdev->dev, maddr, ETH_ALEN)) 542 ether_addr_copy(netdev->dev_addr, maddr); 543 else 544 eth_hw_addr_random(netdev); 545 546 /* Core 0 interrupt */ 547 ret = platform_get_irq(pdev, 0); 548 if (ret < 0) { 549 dev_err(&pdev->dev, 550 "error: missing core0 irq resource (error=%i)\n", ret); 551 return ret; 552 } 553 adpt->irq.irq = ret; 554 555 /* base register address */ 556 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 557 adpt->base = devm_ioremap_resource(&pdev->dev, res); 558 if (IS_ERR(adpt->base)) 559 return PTR_ERR(adpt->base); 560 561 /* CSR register address */ 562 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 563 adpt->csr = devm_ioremap_resource(&pdev->dev, res); 564 if (IS_ERR(adpt->csr)) 565 return PTR_ERR(adpt->csr); 566 567 netdev->base_addr = (unsigned long)adpt->base; 568 569 return 0; 570 } 571 572 static const struct of_device_id emac_dt_match[] = { 573 { 574 .compatible = "qcom,fsm9900-emac", 575 }, 576 {} 577 }; 578 MODULE_DEVICE_TABLE(of, emac_dt_match); 579 580 #if IS_ENABLED(CONFIG_ACPI) 581 static const struct acpi_device_id emac_acpi_match[] = { 582 { 583 .id = "QCOM8070", 584 }, 585 {} 586 }; 587 MODULE_DEVICE_TABLE(acpi, emac_acpi_match); 588 #endif 589 590 static int emac_probe(struct platform_device *pdev) 591 { 592 struct net_device *netdev; 593 struct emac_adapter *adpt; 594 struct emac_phy *phy; 595 u16 devid, revid; 596 u32 reg; 597 int ret; 598 599 /* The EMAC itself is capable of 64-bit DMA, so try that first. */ 600 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 601 if (ret) { 602 /* Some platforms may restrict the EMAC's address bus to less 603 * then the size of DDR. In this case, we need to try a 604 * smaller mask. We could try every possible smaller mask, 605 * but that's overkill. Instead, just fall to 32-bit, which 606 * should always work. 607 */ 608 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 609 if (ret) { 610 dev_err(&pdev->dev, "could not set DMA mask\n"); 611 return ret; 612 } 613 } 614 615 netdev = alloc_etherdev(sizeof(struct emac_adapter)); 616 if (!netdev) 617 return -ENOMEM; 618 619 dev_set_drvdata(&pdev->dev, netdev); 620 SET_NETDEV_DEV(netdev, &pdev->dev); 621 622 adpt = netdev_priv(netdev); 623 adpt->netdev = netdev; 624 adpt->msg_enable = EMAC_MSG_DEFAULT; 625 626 phy = &adpt->phy; 627 628 mutex_init(&adpt->reset_lock); 629 spin_lock_init(&adpt->stats.lock); 630 631 adpt->irq.mask = RX_PKT_INT0 | IMR_NORMAL_MASK; 632 633 ret = emac_probe_resources(pdev, adpt); 634 if (ret) 635 goto err_undo_netdev; 636 637 /* initialize clocks */ 638 ret = emac_clks_phase1_init(pdev, adpt); 639 if (ret) { 640 dev_err(&pdev->dev, "could not initialize clocks\n"); 641 goto err_undo_netdev; 642 } 643 644 netdev->watchdog_timeo = EMAC_WATCHDOG_TIME; 645 netdev->irq = adpt->irq.irq; 646 647 adpt->rrd_size = EMAC_RRD_SIZE; 648 adpt->tpd_size = EMAC_TPD_SIZE; 649 adpt->rfd_size = EMAC_RFD_SIZE; 650 651 netdev->netdev_ops = &emac_netdev_ops; 652 653 emac_init_adapter(adpt); 654 655 /* init external phy */ 656 ret = emac_phy_config(pdev, adpt); 657 if (ret) 658 goto err_undo_clocks; 659 660 /* init internal sgmii phy */ 661 ret = emac_sgmii_config(pdev, adpt); 662 if (ret) 663 goto err_undo_mdiobus; 664 665 /* enable clocks */ 666 ret = emac_clks_phase2_init(pdev, adpt); 667 if (ret) { 668 dev_err(&pdev->dev, "could not initialize clocks\n"); 669 goto err_undo_mdiobus; 670 } 671 672 emac_mac_reset(adpt); 673 674 /* set hw features */ 675 netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | 676 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | 677 NETIF_F_HW_VLAN_CTAG_TX; 678 netdev->hw_features = netdev->features; 679 680 netdev->vlan_features |= NETIF_F_SG | NETIF_F_HW_CSUM | 681 NETIF_F_TSO | NETIF_F_TSO6; 682 683 INIT_WORK(&adpt->work_thread, emac_work_thread); 684 685 /* Initialize queues */ 686 emac_mac_rx_tx_ring_init_all(pdev, adpt); 687 688 netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx, 689 NAPI_POLL_WEIGHT); 690 691 ret = register_netdev(netdev); 692 if (ret) { 693 dev_err(&pdev->dev, "could not register net device\n"); 694 goto err_undo_napi; 695 } 696 697 reg = readl_relaxed(adpt->base + EMAC_DMA_MAS_CTRL); 698 devid = (reg & DEV_ID_NUM_BMSK) >> DEV_ID_NUM_SHFT; 699 revid = (reg & DEV_REV_NUM_BMSK) >> DEV_REV_NUM_SHFT; 700 reg = readl_relaxed(adpt->base + EMAC_CORE_HW_VERSION); 701 702 netif_info(adpt, probe, netdev, 703 "hardware id %d.%d, hardware version %d.%d.%d\n", 704 devid, revid, 705 (reg & MAJOR_BMSK) >> MAJOR_SHFT, 706 (reg & MINOR_BMSK) >> MINOR_SHFT, 707 (reg & STEP_BMSK) >> STEP_SHFT); 708 709 return 0; 710 711 err_undo_napi: 712 netif_napi_del(&adpt->rx_q.napi); 713 err_undo_mdiobus: 714 mdiobus_unregister(adpt->mii_bus); 715 err_undo_clocks: 716 emac_clks_teardown(adpt); 717 err_undo_netdev: 718 free_netdev(netdev); 719 720 return ret; 721 } 722 723 static int emac_remove(struct platform_device *pdev) 724 { 725 struct net_device *netdev = dev_get_drvdata(&pdev->dev); 726 struct emac_adapter *adpt = netdev_priv(netdev); 727 728 unregister_netdev(netdev); 729 netif_napi_del(&adpt->rx_q.napi); 730 731 emac_clks_teardown(adpt); 732 733 mdiobus_unregister(adpt->mii_bus); 734 free_netdev(netdev); 735 736 if (adpt->phy.digital) 737 iounmap(adpt->phy.digital); 738 iounmap(adpt->phy.base); 739 740 return 0; 741 } 742 743 static struct platform_driver emac_platform_driver = { 744 .probe = emac_probe, 745 .remove = emac_remove, 746 .driver = { 747 .name = "qcom-emac", 748 .of_match_table = emac_dt_match, 749 .acpi_match_table = ACPI_PTR(emac_acpi_match), 750 }, 751 }; 752 753 module_platform_driver(emac_platform_driver); 754 755 MODULE_LICENSE("GPL v2"); 756 MODULE_ALIAS("platform:qcom-emac"); 757