1 /* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. 2 * 3 * This program is free software; you can redistribute it and/or modify 4 * it under the terms of the GNU General Public License version 2 and 5 * only version 2 as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 */ 12 13 /* Qualcomm Technologies, Inc. EMAC Gigabit Ethernet Driver */ 14 15 #include <linux/if_ether.h> 16 #include <linux/if_vlan.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_net.h> 22 #include <linux/of_device.h> 23 #include <linux/phy.h> 24 #include <linux/platform_device.h> 25 #include <linux/acpi.h> 26 #include "emac.h" 27 #include "emac-mac.h" 28 #include "emac-phy.h" 29 #include "emac-sgmii.h" 30 31 #define EMAC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 32 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) 33 34 #define EMAC_RRD_SIZE 4 35 /* The RRD size if timestamping is enabled: */ 36 #define EMAC_TS_RRD_SIZE 6 37 #define EMAC_TPD_SIZE 4 38 #define EMAC_RFD_SIZE 2 39 40 #define REG_MAC_RX_STATUS_BIN EMAC_RXMAC_STATC_REG0 41 #define REG_MAC_RX_STATUS_END EMAC_RXMAC_STATC_REG22 42 #define REG_MAC_TX_STATUS_BIN EMAC_TXMAC_STATC_REG0 43 #define REG_MAC_TX_STATUS_END EMAC_TXMAC_STATC_REG24 44 45 #define RXQ0_NUM_RFD_PREF_DEF 8 46 #define TXQ0_NUM_TPD_PREF_DEF 5 47 48 #define EMAC_PREAMBLE_DEF 7 49 50 #define DMAR_DLY_CNT_DEF 15 51 #define DMAW_DLY_CNT_DEF 4 52 53 #define IMR_NORMAL_MASK (\ 54 ISR_ERROR |\ 55 ISR_GPHY_LINK |\ 56 ISR_TX_PKT |\ 57 GPHY_WAKEUP_INT) 58 59 #define IMR_EXTENDED_MASK (\ 60 SW_MAN_INT |\ 61 ISR_OVER |\ 62 ISR_ERROR |\ 63 ISR_GPHY_LINK |\ 64 ISR_TX_PKT |\ 65 GPHY_WAKEUP_INT) 66 67 #define ISR_TX_PKT (\ 68 TX_PKT_INT |\ 69 TX_PKT_INT1 |\ 70 TX_PKT_INT2 |\ 71 TX_PKT_INT3) 72 73 #define ISR_GPHY_LINK (\ 74 GPHY_LINK_UP_INT |\ 75 GPHY_LINK_DOWN_INT) 76 77 #define ISR_OVER (\ 78 RFD0_UR_INT |\ 79 RFD1_UR_INT |\ 80 RFD2_UR_INT |\ 81 RFD3_UR_INT |\ 82 RFD4_UR_INT |\ 83 RXF_OF_INT |\ 84 TXF_UR_INT) 85 86 #define ISR_ERROR (\ 87 DMAR_TO_INT |\ 88 DMAW_TO_INT |\ 89 TXQ_TO_INT) 90 91 /* in sync with enum emac_clk_id */ 92 static const char * const emac_clk_name[] = { 93 "axi_clk", "cfg_ahb_clk", "high_speed_clk", "mdio_clk", "tx_clk", 94 "rx_clk", "sys_clk" 95 }; 96 97 void emac_reg_update32(void __iomem *addr, u32 mask, u32 val) 98 { 99 u32 data = readl(addr); 100 101 writel(((data & ~mask) | val), addr); 102 } 103 104 /* reinitialize */ 105 int emac_reinit_locked(struct emac_adapter *adpt) 106 { 107 int ret; 108 109 mutex_lock(&adpt->reset_lock); 110 111 emac_mac_down(adpt); 112 emac_sgmii_reset(adpt); 113 ret = emac_mac_up(adpt); 114 115 mutex_unlock(&adpt->reset_lock); 116 117 return ret; 118 } 119 120 /* NAPI */ 121 static int emac_napi_rtx(struct napi_struct *napi, int budget) 122 { 123 struct emac_rx_queue *rx_q = 124 container_of(napi, struct emac_rx_queue, napi); 125 struct emac_adapter *adpt = netdev_priv(rx_q->netdev); 126 struct emac_irq *irq = rx_q->irq; 127 int work_done = 0; 128 129 emac_mac_rx_process(adpt, rx_q, &work_done, budget); 130 131 if (work_done < budget) { 132 napi_complete_done(napi, work_done); 133 134 irq->mask |= rx_q->intr; 135 writel(irq->mask, adpt->base + EMAC_INT_MASK); 136 } 137 138 return work_done; 139 } 140 141 /* Transmit the packet */ 142 static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev) 143 { 144 struct emac_adapter *adpt = netdev_priv(netdev); 145 146 return emac_mac_tx_buf_send(adpt, &adpt->tx_q, skb); 147 } 148 149 irqreturn_t emac_isr(int _irq, void *data) 150 { 151 struct emac_irq *irq = data; 152 struct emac_adapter *adpt = 153 container_of(irq, struct emac_adapter, irq); 154 struct emac_rx_queue *rx_q = &adpt->rx_q; 155 u32 isr, status; 156 157 /* disable the interrupt */ 158 writel(0, adpt->base + EMAC_INT_MASK); 159 160 isr = readl_relaxed(adpt->base + EMAC_INT_STATUS); 161 162 status = isr & irq->mask; 163 if (status == 0) 164 goto exit; 165 166 if (status & ISR_ERROR) { 167 netif_warn(adpt, intr, adpt->netdev, 168 "warning: error irq status 0x%lx\n", 169 status & ISR_ERROR); 170 /* reset MAC */ 171 schedule_work(&adpt->work_thread); 172 } 173 174 /* Schedule the napi for receive queue with interrupt 175 * status bit set 176 */ 177 if (status & rx_q->intr) { 178 if (napi_schedule_prep(&rx_q->napi)) { 179 irq->mask &= ~rx_q->intr; 180 __napi_schedule(&rx_q->napi); 181 } 182 } 183 184 if (status & TX_PKT_INT) 185 emac_mac_tx_process(adpt, &adpt->tx_q); 186 187 if (status & ISR_OVER) 188 net_warn_ratelimited("warning: TX/RX overflow\n"); 189 190 /* link event */ 191 if (status & ISR_GPHY_LINK) 192 phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT)); 193 194 exit: 195 /* enable the interrupt */ 196 writel(irq->mask, adpt->base + EMAC_INT_MASK); 197 198 return IRQ_HANDLED; 199 } 200 201 /* Configure VLAN tag strip/insert feature */ 202 static int emac_set_features(struct net_device *netdev, 203 netdev_features_t features) 204 { 205 netdev_features_t changed = features ^ netdev->features; 206 struct emac_adapter *adpt = netdev_priv(netdev); 207 208 /* We only need to reprogram the hardware if the VLAN tag features 209 * have changed, and if it's already running. 210 */ 211 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))) 212 return 0; 213 214 if (!netif_running(netdev)) 215 return 0; 216 217 /* emac_mac_mode_config() uses netdev->features to configure the EMAC, 218 * so make sure it's set first. 219 */ 220 netdev->features = features; 221 222 return emac_reinit_locked(adpt); 223 } 224 225 /* Configure Multicast and Promiscuous modes */ 226 static void emac_rx_mode_set(struct net_device *netdev) 227 { 228 struct emac_adapter *adpt = netdev_priv(netdev); 229 struct netdev_hw_addr *ha; 230 231 emac_mac_mode_config(adpt); 232 233 /* update multicast address filtering */ 234 emac_mac_multicast_addr_clear(adpt); 235 netdev_for_each_mc_addr(ha, netdev) 236 emac_mac_multicast_addr_set(adpt, ha->addr); 237 } 238 239 /* Change the Maximum Transfer Unit (MTU) */ 240 static int emac_change_mtu(struct net_device *netdev, int new_mtu) 241 { 242 struct emac_adapter *adpt = netdev_priv(netdev); 243 244 netif_info(adpt, hw, adpt->netdev, 245 "changing MTU from %d to %d\n", netdev->mtu, 246 new_mtu); 247 netdev->mtu = new_mtu; 248 249 if (netif_running(netdev)) 250 return emac_reinit_locked(adpt); 251 252 return 0; 253 } 254 255 /* Called when the network interface is made active */ 256 static int emac_open(struct net_device *netdev) 257 { 258 struct emac_adapter *adpt = netdev_priv(netdev); 259 struct emac_irq *irq = &adpt->irq; 260 int ret; 261 262 ret = request_irq(irq->irq, emac_isr, 0, "emac-core0", irq); 263 if (ret) { 264 netdev_err(adpt->netdev, "could not request emac-core0 irq\n"); 265 return ret; 266 } 267 268 /* allocate rx/tx dma buffer & descriptors */ 269 ret = emac_mac_rx_tx_rings_alloc_all(adpt); 270 if (ret) { 271 netdev_err(adpt->netdev, "error allocating rx/tx rings\n"); 272 free_irq(irq->irq, irq); 273 return ret; 274 } 275 276 ret = emac_mac_up(adpt); 277 if (ret) { 278 emac_mac_rx_tx_rings_free_all(adpt); 279 free_irq(irq->irq, irq); 280 return ret; 281 } 282 283 ret = adpt->phy.open(adpt); 284 if (ret) { 285 emac_mac_down(adpt); 286 emac_mac_rx_tx_rings_free_all(adpt); 287 free_irq(irq->irq, irq); 288 return ret; 289 } 290 291 return 0; 292 } 293 294 /* Called when the network interface is disabled */ 295 static int emac_close(struct net_device *netdev) 296 { 297 struct emac_adapter *adpt = netdev_priv(netdev); 298 299 mutex_lock(&adpt->reset_lock); 300 301 adpt->phy.close(adpt); 302 emac_mac_down(adpt); 303 emac_mac_rx_tx_rings_free_all(adpt); 304 305 free_irq(adpt->irq.irq, &adpt->irq); 306 307 mutex_unlock(&adpt->reset_lock); 308 309 return 0; 310 } 311 312 /* Respond to a TX hang */ 313 static void emac_tx_timeout(struct net_device *netdev) 314 { 315 struct emac_adapter *adpt = netdev_priv(netdev); 316 317 schedule_work(&adpt->work_thread); 318 } 319 320 /* IOCTL support for the interface */ 321 static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 322 { 323 if (!netif_running(netdev)) 324 return -EINVAL; 325 326 if (!netdev->phydev) 327 return -ENODEV; 328 329 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 330 } 331 332 /** 333 * emac_update_hw_stats - read the EMAC stat registers 334 * 335 * Reads the stats registers and write the values to adpt->stats. 336 * 337 * adpt->stats.lock must be held while calling this function, 338 * and while reading from adpt->stats. 339 */ 340 void emac_update_hw_stats(struct emac_adapter *adpt) 341 { 342 struct emac_stats *stats = &adpt->stats; 343 u64 *stats_itr = &adpt->stats.rx_ok; 344 void __iomem *base = adpt->base; 345 unsigned int addr; 346 347 addr = REG_MAC_RX_STATUS_BIN; 348 while (addr <= REG_MAC_RX_STATUS_END) { 349 *stats_itr += readl_relaxed(base + addr); 350 stats_itr++; 351 addr += sizeof(u32); 352 } 353 354 /* additional rx status */ 355 stats->rx_crc_align += readl_relaxed(base + EMAC_RXMAC_STATC_REG23); 356 stats->rx_jabbers += readl_relaxed(base + EMAC_RXMAC_STATC_REG24); 357 358 /* update tx status */ 359 addr = REG_MAC_TX_STATUS_BIN; 360 stats_itr = &stats->tx_ok; 361 362 while (addr <= REG_MAC_TX_STATUS_END) { 363 *stats_itr += readl_relaxed(base + addr); 364 stats_itr++; 365 addr += sizeof(u32); 366 } 367 368 /* additional tx status */ 369 stats->tx_col += readl_relaxed(base + EMAC_TXMAC_STATC_REG25); 370 } 371 372 /* Provide network statistics info for the interface */ 373 static void emac_get_stats64(struct net_device *netdev, 374 struct rtnl_link_stats64 *net_stats) 375 { 376 struct emac_adapter *adpt = netdev_priv(netdev); 377 struct emac_stats *stats = &adpt->stats; 378 379 spin_lock(&stats->lock); 380 381 emac_update_hw_stats(adpt); 382 383 /* return parsed statistics */ 384 net_stats->rx_packets = stats->rx_ok; 385 net_stats->tx_packets = stats->tx_ok; 386 net_stats->rx_bytes = stats->rx_byte_cnt; 387 net_stats->tx_bytes = stats->tx_byte_cnt; 388 net_stats->multicast = stats->rx_mcast; 389 net_stats->collisions = stats->tx_1_col + stats->tx_2_col * 2 + 390 stats->tx_late_col + stats->tx_abort_col; 391 392 net_stats->rx_errors = stats->rx_frag + stats->rx_fcs_err + 393 stats->rx_len_err + stats->rx_sz_ov + 394 stats->rx_align_err; 395 net_stats->rx_fifo_errors = stats->rx_rxf_ov; 396 net_stats->rx_length_errors = stats->rx_len_err; 397 net_stats->rx_crc_errors = stats->rx_fcs_err; 398 net_stats->rx_frame_errors = stats->rx_align_err; 399 net_stats->rx_over_errors = stats->rx_rxf_ov; 400 net_stats->rx_missed_errors = stats->rx_rxf_ov; 401 402 net_stats->tx_errors = stats->tx_late_col + stats->tx_abort_col + 403 stats->tx_underrun + stats->tx_trunc; 404 net_stats->tx_fifo_errors = stats->tx_underrun; 405 net_stats->tx_aborted_errors = stats->tx_abort_col; 406 net_stats->tx_window_errors = stats->tx_late_col; 407 408 spin_unlock(&stats->lock); 409 } 410 411 static const struct net_device_ops emac_netdev_ops = { 412 .ndo_open = emac_open, 413 .ndo_stop = emac_close, 414 .ndo_validate_addr = eth_validate_addr, 415 .ndo_start_xmit = emac_start_xmit, 416 .ndo_set_mac_address = eth_mac_addr, 417 .ndo_change_mtu = emac_change_mtu, 418 .ndo_do_ioctl = emac_ioctl, 419 .ndo_tx_timeout = emac_tx_timeout, 420 .ndo_get_stats64 = emac_get_stats64, 421 .ndo_set_features = emac_set_features, 422 .ndo_set_rx_mode = emac_rx_mode_set, 423 }; 424 425 /* Watchdog task routine, called to reinitialize the EMAC */ 426 static void emac_work_thread(struct work_struct *work) 427 { 428 struct emac_adapter *adpt = 429 container_of(work, struct emac_adapter, work_thread); 430 431 emac_reinit_locked(adpt); 432 } 433 434 /* Initialize various data structures */ 435 static void emac_init_adapter(struct emac_adapter *adpt) 436 { 437 u32 reg; 438 439 adpt->rrd_size = EMAC_RRD_SIZE; 440 adpt->tpd_size = EMAC_TPD_SIZE; 441 adpt->rfd_size = EMAC_RFD_SIZE; 442 443 /* descriptors */ 444 adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS; 445 adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS; 446 447 /* dma */ 448 adpt->dma_order = emac_dma_ord_out; 449 adpt->dmar_block = emac_dma_req_4096; 450 adpt->dmaw_block = emac_dma_req_128; 451 adpt->dmar_dly_cnt = DMAR_DLY_CNT_DEF; 452 adpt->dmaw_dly_cnt = DMAW_DLY_CNT_DEF; 453 adpt->tpd_burst = TXQ0_NUM_TPD_PREF_DEF; 454 adpt->rfd_burst = RXQ0_NUM_RFD_PREF_DEF; 455 456 /* irq moderator */ 457 reg = ((EMAC_DEF_RX_IRQ_MOD >> 1) << IRQ_MODERATOR2_INIT_SHFT) | 458 ((EMAC_DEF_TX_IRQ_MOD >> 1) << IRQ_MODERATOR_INIT_SHFT); 459 adpt->irq_mod = reg; 460 461 /* others */ 462 adpt->preamble = EMAC_PREAMBLE_DEF; 463 464 /* default to automatic flow control */ 465 adpt->automatic = true; 466 } 467 468 /* Get the clock */ 469 static int emac_clks_get(struct platform_device *pdev, 470 struct emac_adapter *adpt) 471 { 472 unsigned int i; 473 474 for (i = 0; i < EMAC_CLK_CNT; i++) { 475 struct clk *clk = devm_clk_get(&pdev->dev, emac_clk_name[i]); 476 477 if (IS_ERR(clk)) { 478 dev_err(&pdev->dev, 479 "could not claim clock %s (error=%li)\n", 480 emac_clk_name[i], PTR_ERR(clk)); 481 482 return PTR_ERR(clk); 483 } 484 485 adpt->clk[i] = clk; 486 } 487 488 return 0; 489 } 490 491 /* Initialize clocks */ 492 static int emac_clks_phase1_init(struct platform_device *pdev, 493 struct emac_adapter *adpt) 494 { 495 int ret; 496 497 /* On ACPI platforms, clocks are controlled by firmware and/or 498 * ACPI, not by drivers. 499 */ 500 if (has_acpi_companion(&pdev->dev)) 501 return 0; 502 503 ret = emac_clks_get(pdev, adpt); 504 if (ret) 505 return ret; 506 507 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_AXI]); 508 if (ret) 509 return ret; 510 511 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]); 512 if (ret) 513 return ret; 514 515 ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000); 516 if (ret) 517 return ret; 518 519 return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); 520 } 521 522 /* Enable clocks; needs emac_clks_phase1_init to be called before */ 523 static int emac_clks_phase2_init(struct platform_device *pdev, 524 struct emac_adapter *adpt) 525 { 526 int ret; 527 528 if (has_acpi_companion(&pdev->dev)) 529 return 0; 530 531 ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000); 532 if (ret) 533 return ret; 534 535 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_TX]); 536 if (ret) 537 return ret; 538 539 ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000); 540 if (ret) 541 return ret; 542 543 ret = clk_set_rate(adpt->clk[EMAC_CLK_MDIO], 25000000); 544 if (ret) 545 return ret; 546 547 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_MDIO]); 548 if (ret) 549 return ret; 550 551 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_RX]); 552 if (ret) 553 return ret; 554 555 return clk_prepare_enable(adpt->clk[EMAC_CLK_SYS]); 556 } 557 558 static void emac_clks_teardown(struct emac_adapter *adpt) 559 { 560 561 unsigned int i; 562 563 for (i = 0; i < EMAC_CLK_CNT; i++) 564 clk_disable_unprepare(adpt->clk[i]); 565 } 566 567 /* Get the resources */ 568 static int emac_probe_resources(struct platform_device *pdev, 569 struct emac_adapter *adpt) 570 { 571 struct net_device *netdev = adpt->netdev; 572 struct resource *res; 573 char maddr[ETH_ALEN]; 574 int ret = 0; 575 576 /* get mac address */ 577 if (device_get_mac_address(&pdev->dev, maddr, ETH_ALEN)) 578 ether_addr_copy(netdev->dev_addr, maddr); 579 else 580 eth_hw_addr_random(netdev); 581 582 /* Core 0 interrupt */ 583 ret = platform_get_irq(pdev, 0); 584 if (ret < 0) { 585 dev_err(&pdev->dev, 586 "error: missing core0 irq resource (error=%i)\n", ret); 587 return ret; 588 } 589 adpt->irq.irq = ret; 590 591 /* base register address */ 592 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 593 adpt->base = devm_ioremap_resource(&pdev->dev, res); 594 if (IS_ERR(adpt->base)) 595 return PTR_ERR(adpt->base); 596 597 /* CSR register address */ 598 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 599 adpt->csr = devm_ioremap_resource(&pdev->dev, res); 600 if (IS_ERR(adpt->csr)) 601 return PTR_ERR(adpt->csr); 602 603 netdev->base_addr = (unsigned long)adpt->base; 604 605 return 0; 606 } 607 608 static const struct of_device_id emac_dt_match[] = { 609 { 610 .compatible = "qcom,fsm9900-emac", 611 }, 612 {} 613 }; 614 MODULE_DEVICE_TABLE(of, emac_dt_match); 615 616 #if IS_ENABLED(CONFIG_ACPI) 617 static const struct acpi_device_id emac_acpi_match[] = { 618 { 619 .id = "QCOM8070", 620 }, 621 {} 622 }; 623 MODULE_DEVICE_TABLE(acpi, emac_acpi_match); 624 #endif 625 626 static int emac_probe(struct platform_device *pdev) 627 { 628 struct net_device *netdev; 629 struct emac_adapter *adpt; 630 struct emac_sgmii *phy; 631 u16 devid, revid; 632 u32 reg; 633 int ret; 634 635 /* The EMAC itself is capable of 64-bit DMA, so try that first. */ 636 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 637 if (ret) { 638 /* Some platforms may restrict the EMAC's address bus to less 639 * then the size of DDR. In this case, we need to try a 640 * smaller mask. We could try every possible smaller mask, 641 * but that's overkill. Instead, just fall to 32-bit, which 642 * should always work. 643 */ 644 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 645 if (ret) { 646 dev_err(&pdev->dev, "could not set DMA mask\n"); 647 return ret; 648 } 649 } 650 651 netdev = alloc_etherdev(sizeof(struct emac_adapter)); 652 if (!netdev) 653 return -ENOMEM; 654 655 dev_set_drvdata(&pdev->dev, netdev); 656 SET_NETDEV_DEV(netdev, &pdev->dev); 657 emac_set_ethtool_ops(netdev); 658 659 adpt = netdev_priv(netdev); 660 adpt->netdev = netdev; 661 adpt->msg_enable = EMAC_MSG_DEFAULT; 662 663 phy = &adpt->phy; 664 atomic_set(&phy->decode_error_count, 0); 665 666 mutex_init(&adpt->reset_lock); 667 spin_lock_init(&adpt->stats.lock); 668 669 adpt->irq.mask = RX_PKT_INT0 | IMR_NORMAL_MASK; 670 671 ret = emac_probe_resources(pdev, adpt); 672 if (ret) 673 goto err_undo_netdev; 674 675 /* initialize clocks */ 676 ret = emac_clks_phase1_init(pdev, adpt); 677 if (ret) { 678 dev_err(&pdev->dev, "could not initialize clocks\n"); 679 goto err_undo_netdev; 680 } 681 682 netdev->watchdog_timeo = EMAC_WATCHDOG_TIME; 683 netdev->irq = adpt->irq.irq; 684 685 netdev->netdev_ops = &emac_netdev_ops; 686 687 emac_init_adapter(adpt); 688 689 /* init external phy */ 690 ret = emac_phy_config(pdev, adpt); 691 if (ret) 692 goto err_undo_clocks; 693 694 /* init internal sgmii phy */ 695 ret = emac_sgmii_config(pdev, adpt); 696 if (ret) 697 goto err_undo_mdiobus; 698 699 /* enable clocks */ 700 ret = emac_clks_phase2_init(pdev, adpt); 701 if (ret) { 702 dev_err(&pdev->dev, "could not initialize clocks\n"); 703 goto err_undo_mdiobus; 704 } 705 706 emac_mac_reset(adpt); 707 708 /* set hw features */ 709 netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | 710 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | 711 NETIF_F_HW_VLAN_CTAG_TX; 712 netdev->hw_features = netdev->features; 713 714 netdev->vlan_features |= NETIF_F_SG | NETIF_F_HW_CSUM | 715 NETIF_F_TSO | NETIF_F_TSO6; 716 717 /* MTU range: 46 - 9194 */ 718 netdev->min_mtu = EMAC_MIN_ETH_FRAME_SIZE - 719 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 720 netdev->max_mtu = EMAC_MAX_ETH_FRAME_SIZE - 721 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 722 723 INIT_WORK(&adpt->work_thread, emac_work_thread); 724 725 /* Initialize queues */ 726 emac_mac_rx_tx_ring_init_all(pdev, adpt); 727 728 netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx, 729 NAPI_POLL_WEIGHT); 730 731 ret = register_netdev(netdev); 732 if (ret) { 733 dev_err(&pdev->dev, "could not register net device\n"); 734 goto err_undo_napi; 735 } 736 737 reg = readl_relaxed(adpt->base + EMAC_DMA_MAS_CTRL); 738 devid = (reg & DEV_ID_NUM_BMSK) >> DEV_ID_NUM_SHFT; 739 revid = (reg & DEV_REV_NUM_BMSK) >> DEV_REV_NUM_SHFT; 740 reg = readl_relaxed(adpt->base + EMAC_CORE_HW_VERSION); 741 742 netif_info(adpt, probe, netdev, 743 "hardware id %d.%d, hardware version %d.%d.%d\n", 744 devid, revid, 745 (reg & MAJOR_BMSK) >> MAJOR_SHFT, 746 (reg & MINOR_BMSK) >> MINOR_SHFT, 747 (reg & STEP_BMSK) >> STEP_SHFT); 748 749 return 0; 750 751 err_undo_napi: 752 netif_napi_del(&adpt->rx_q.napi); 753 err_undo_mdiobus: 754 put_device(&adpt->phydev->mdio.dev); 755 mdiobus_unregister(adpt->mii_bus); 756 err_undo_clocks: 757 emac_clks_teardown(adpt); 758 err_undo_netdev: 759 free_netdev(netdev); 760 761 return ret; 762 } 763 764 static int emac_remove(struct platform_device *pdev) 765 { 766 struct net_device *netdev = dev_get_drvdata(&pdev->dev); 767 struct emac_adapter *adpt = netdev_priv(netdev); 768 769 unregister_netdev(netdev); 770 netif_napi_del(&adpt->rx_q.napi); 771 772 emac_clks_teardown(adpt); 773 774 put_device(&adpt->phydev->mdio.dev); 775 mdiobus_unregister(adpt->mii_bus); 776 free_netdev(netdev); 777 778 if (adpt->phy.digital) 779 iounmap(adpt->phy.digital); 780 iounmap(adpt->phy.base); 781 782 return 0; 783 } 784 785 static struct platform_driver emac_platform_driver = { 786 .probe = emac_probe, 787 .remove = emac_remove, 788 .driver = { 789 .name = "qcom-emac", 790 .of_match_table = emac_dt_match, 791 .acpi_match_table = ACPI_PTR(emac_acpi_match), 792 }, 793 }; 794 795 module_platform_driver(emac_platform_driver); 796 797 MODULE_LICENSE("GPL v2"); 798 MODULE_ALIAS("platform:qcom-emac"); 799