1 // SPDX-License-Identifier: GPL-2.0 2 /* Ethernet device driver for Cortina Systems Gemini SoC 3 * Also known as the StorLink SL3512 and SL3516 (SL351x) or Lepus 4 * Net Engine and Gigabit Ethernet MAC (GMAC) 5 * This hardware contains a TCP Offload Engine (TOE) but currently the 6 * driver does not make use of it. 7 * 8 * Authors: 9 * Linus Walleij <linus.walleij@linaro.org> 10 * Tobias Waldvogel <tobias.waldvogel@gmail.com> (OpenWRT) 11 * Michał Mirosław <mirq-linux@rere.qmqm.pl> 12 * Paulius Zaleckas <paulius.zaleckas@gmail.com> 13 * Giuseppe De Robertis <Giuseppe.DeRobertis@ba.infn.it> 14 * Gary Chen & Ch Hsu Storlink Semiconductor 15 */ 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/platform_device.h> 20 #include <linux/spinlock.h> 21 #include <linux/slab.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/cache.h> 24 #include <linux/interrupt.h> 25 #include <linux/reset.h> 26 #include <linux/clk.h> 27 #include <linux/of.h> 28 #include <linux/of_mdio.h> 29 #include <linux/of_net.h> 30 #include <linux/of_platform.h> 31 #include <linux/etherdevice.h> 32 #include <linux/if_vlan.h> 33 #include <linux/skbuff.h> 34 #include <linux/phy.h> 35 #include <linux/crc32.h> 36 #include <linux/ethtool.h> 37 #include <linux/tcp.h> 38 #include <linux/u64_stats_sync.h> 39 40 #include <linux/in.h> 41 #include <linux/ip.h> 42 #include <linux/ipv6.h> 43 44 #include "gemini.h" 45 46 #define DRV_NAME "gmac-gemini" 47 48 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 49 static int debug = -1; 50 module_param(debug, int, 0); 51 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 52 53 #define HSIZE_8 0x00 54 #define HSIZE_16 0x01 55 #define HSIZE_32 0x02 56 57 #define HBURST_SINGLE 0x00 58 #define HBURST_INCR 0x01 59 #define HBURST_INCR4 0x02 60 #define HBURST_INCR8 0x03 61 62 #define HPROT_DATA_CACHE BIT(0) 63 #define HPROT_PRIVILIGED BIT(1) 64 #define HPROT_BUFFERABLE BIT(2) 65 #define HPROT_CACHABLE BIT(3) 66 67 #define DEFAULT_RX_COALESCE_NSECS 0 68 #define DEFAULT_GMAC_RXQ_ORDER 9 69 #define DEFAULT_GMAC_TXQ_ORDER 8 70 #define DEFAULT_RX_BUF_ORDER 11 71 #define TX_MAX_FRAGS 16 72 #define TX_QUEUE_NUM 1 /* max: 6 */ 73 #define RX_MAX_ALLOC_ORDER 2 74 75 #define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT | \ 76 GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT) 77 #define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT | \ 78 GMAC0_SWTQ00_FIN_INT_BIT) 79 #define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT) 80 81 #define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \ 82 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM) 83 84 /** 85 * struct gmac_queue_page - page buffer per-page info 86 * @page: the page struct 87 * @mapping: the dma address handle 88 */ 89 struct gmac_queue_page { 90 struct page *page; 91 dma_addr_t mapping; 92 }; 93 94 struct gmac_txq { 95 struct gmac_txdesc *ring; 96 struct sk_buff **skb; 97 unsigned int cptr; 98 unsigned int noirq_packets; 99 }; 100 101 struct gemini_ethernet; 102 103 struct gemini_ethernet_port { 104 u8 id; /* 0 or 1 */ 105 106 struct gemini_ethernet *geth; 107 struct net_device *netdev; 108 struct device *dev; 109 void __iomem *dma_base; 110 void __iomem *gmac_base; 111 struct clk *pclk; 112 struct reset_control *reset; 113 int irq; 114 __le32 mac_addr[3]; 115 116 void __iomem *rxq_rwptr; 117 struct gmac_rxdesc *rxq_ring; 118 unsigned int rxq_order; 119 120 struct napi_struct napi; 121 struct hrtimer rx_coalesce_timer; 122 unsigned int rx_coalesce_nsecs; 123 unsigned int freeq_refill; 124 struct gmac_txq txq[TX_QUEUE_NUM]; 125 unsigned int txq_order; 126 unsigned int irq_every_tx_packets; 127 128 dma_addr_t rxq_dma_base; 129 dma_addr_t txq_dma_base; 130 131 unsigned int msg_enable; 132 spinlock_t config_lock; /* Locks config register */ 133 134 struct u64_stats_sync tx_stats_syncp; 135 struct u64_stats_sync rx_stats_syncp; 136 struct u64_stats_sync ir_stats_syncp; 137 138 struct rtnl_link_stats64 stats; 139 u64 hw_stats[RX_STATS_NUM]; 140 u64 rx_stats[RX_STATUS_NUM]; 141 u64 rx_csum_stats[RX_CHKSUM_NUM]; 142 u64 rx_napi_exits; 143 u64 tx_frag_stats[TX_MAX_FRAGS]; 144 u64 tx_frags_linearized; 145 u64 tx_hw_csummed; 146 }; 147 148 struct gemini_ethernet { 149 struct device *dev; 150 void __iomem *base; 151 struct gemini_ethernet_port *port0; 152 struct gemini_ethernet_port *port1; 153 bool initialized; 154 155 spinlock_t irq_lock; /* Locks IRQ-related registers */ 156 unsigned int freeq_order; 157 unsigned int freeq_frag_order; 158 struct gmac_rxdesc *freeq_ring; 159 dma_addr_t freeq_dma_base; 160 struct gmac_queue_page *freeq_pages; 161 unsigned int num_freeq_pages; 162 spinlock_t freeq_lock; /* Locks queue from reentrance */ 163 }; 164 165 #define GMAC_STATS_NUM ( \ 166 RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \ 167 TX_MAX_FRAGS + 2) 168 169 static const char gmac_stats_strings[GMAC_STATS_NUM][ETH_GSTRING_LEN] = { 170 "GMAC_IN_DISCARDS", 171 "GMAC_IN_ERRORS", 172 "GMAC_IN_MCAST", 173 "GMAC_IN_BCAST", 174 "GMAC_IN_MAC1", 175 "GMAC_IN_MAC2", 176 "RX_STATUS_GOOD_FRAME", 177 "RX_STATUS_TOO_LONG_GOOD_CRC", 178 "RX_STATUS_RUNT_FRAME", 179 "RX_STATUS_SFD_NOT_FOUND", 180 "RX_STATUS_CRC_ERROR", 181 "RX_STATUS_TOO_LONG_BAD_CRC", 182 "RX_STATUS_ALIGNMENT_ERROR", 183 "RX_STATUS_TOO_LONG_BAD_ALIGN", 184 "RX_STATUS_RX_ERR", 185 "RX_STATUS_DA_FILTERED", 186 "RX_STATUS_BUFFER_FULL", 187 "RX_STATUS_11", 188 "RX_STATUS_12", 189 "RX_STATUS_13", 190 "RX_STATUS_14", 191 "RX_STATUS_15", 192 "RX_CHKSUM_IP_UDP_TCP_OK", 193 "RX_CHKSUM_IP_OK_ONLY", 194 "RX_CHKSUM_NONE", 195 "RX_CHKSUM_3", 196 "RX_CHKSUM_IP_ERR_UNKNOWN", 197 "RX_CHKSUM_IP_ERR", 198 "RX_CHKSUM_TCP_UDP_ERR", 199 "RX_CHKSUM_7", 200 "RX_NAPI_EXITS", 201 "TX_FRAGS[1]", 202 "TX_FRAGS[2]", 203 "TX_FRAGS[3]", 204 "TX_FRAGS[4]", 205 "TX_FRAGS[5]", 206 "TX_FRAGS[6]", 207 "TX_FRAGS[7]", 208 "TX_FRAGS[8]", 209 "TX_FRAGS[9]", 210 "TX_FRAGS[10]", 211 "TX_FRAGS[11]", 212 "TX_FRAGS[12]", 213 "TX_FRAGS[13]", 214 "TX_FRAGS[14]", 215 "TX_FRAGS[15]", 216 "TX_FRAGS[16+]", 217 "TX_FRAGS_LINEARIZED", 218 "TX_HW_CSUMMED", 219 }; 220 221 static void gmac_dump_dma_state(struct net_device *netdev); 222 223 static void gmac_update_config0_reg(struct net_device *netdev, 224 u32 val, u32 vmask) 225 { 226 struct gemini_ethernet_port *port = netdev_priv(netdev); 227 unsigned long flags; 228 u32 reg; 229 230 spin_lock_irqsave(&port->config_lock, flags); 231 232 reg = readl(port->gmac_base + GMAC_CONFIG0); 233 reg = (reg & ~vmask) | val; 234 writel(reg, port->gmac_base + GMAC_CONFIG0); 235 236 spin_unlock_irqrestore(&port->config_lock, flags); 237 } 238 239 static void gmac_enable_tx_rx(struct net_device *netdev) 240 { 241 struct gemini_ethernet_port *port = netdev_priv(netdev); 242 unsigned long flags; 243 u32 reg; 244 245 spin_lock_irqsave(&port->config_lock, flags); 246 247 reg = readl(port->gmac_base + GMAC_CONFIG0); 248 reg &= ~CONFIG0_TX_RX_DISABLE; 249 writel(reg, port->gmac_base + GMAC_CONFIG0); 250 251 spin_unlock_irqrestore(&port->config_lock, flags); 252 } 253 254 static void gmac_disable_tx_rx(struct net_device *netdev) 255 { 256 struct gemini_ethernet_port *port = netdev_priv(netdev); 257 unsigned long flags; 258 u32 val; 259 260 spin_lock_irqsave(&port->config_lock, flags); 261 262 val = readl(port->gmac_base + GMAC_CONFIG0); 263 val |= CONFIG0_TX_RX_DISABLE; 264 writel(val, port->gmac_base + GMAC_CONFIG0); 265 266 spin_unlock_irqrestore(&port->config_lock, flags); 267 268 mdelay(10); /* let GMAC consume packet */ 269 } 270 271 static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx) 272 { 273 struct gemini_ethernet_port *port = netdev_priv(netdev); 274 unsigned long flags; 275 u32 val; 276 277 spin_lock_irqsave(&port->config_lock, flags); 278 279 val = readl(port->gmac_base + GMAC_CONFIG0); 280 val &= ~CONFIG0_FLOW_CTL; 281 if (tx) 282 val |= CONFIG0_FLOW_TX; 283 if (rx) 284 val |= CONFIG0_FLOW_RX; 285 writel(val, port->gmac_base + GMAC_CONFIG0); 286 287 spin_unlock_irqrestore(&port->config_lock, flags); 288 } 289 290 static void gmac_speed_set(struct net_device *netdev) 291 { 292 struct gemini_ethernet_port *port = netdev_priv(netdev); 293 struct phy_device *phydev = netdev->phydev; 294 union gmac_status status, old_status; 295 int pause_tx = 0; 296 int pause_rx = 0; 297 298 status.bits32 = readl(port->gmac_base + GMAC_STATUS); 299 old_status.bits32 = status.bits32; 300 status.bits.link = phydev->link; 301 status.bits.duplex = phydev->duplex; 302 303 switch (phydev->speed) { 304 case 1000: 305 status.bits.speed = GMAC_SPEED_1000; 306 if (phy_interface_mode_is_rgmii(phydev->interface)) 307 status.bits.mii_rmii = GMAC_PHY_RGMII_1000; 308 netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n", 309 phydev_name(phydev)); 310 break; 311 case 100: 312 status.bits.speed = GMAC_SPEED_100; 313 if (phy_interface_mode_is_rgmii(phydev->interface)) 314 status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; 315 netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n", 316 phydev_name(phydev)); 317 break; 318 case 10: 319 status.bits.speed = GMAC_SPEED_10; 320 if (phy_interface_mode_is_rgmii(phydev->interface)) 321 status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; 322 netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n", 323 phydev_name(phydev)); 324 break; 325 default: 326 netdev_warn(netdev, "Unsupported PHY speed (%d) on %s\n", 327 phydev->speed, phydev_name(phydev)); 328 } 329 330 if (phydev->duplex == DUPLEX_FULL) { 331 u16 lcladv = phy_read(phydev, MII_ADVERTISE); 332 u16 rmtadv = phy_read(phydev, MII_LPA); 333 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 334 335 if (cap & FLOW_CTRL_RX) 336 pause_rx = 1; 337 if (cap & FLOW_CTRL_TX) 338 pause_tx = 1; 339 } 340 341 gmac_set_flow_control(netdev, pause_tx, pause_rx); 342 343 if (old_status.bits32 == status.bits32) 344 return; 345 346 if (netif_msg_link(port)) { 347 phy_print_status(phydev); 348 netdev_info(netdev, "link flow control: %s\n", 349 phydev->pause 350 ? (phydev->asym_pause ? "tx" : "both") 351 : (phydev->asym_pause ? "rx" : "none") 352 ); 353 } 354 355 gmac_disable_tx_rx(netdev); 356 writel(status.bits32, port->gmac_base + GMAC_STATUS); 357 gmac_enable_tx_rx(netdev); 358 } 359 360 static int gmac_setup_phy(struct net_device *netdev) 361 { 362 struct gemini_ethernet_port *port = netdev_priv(netdev); 363 union gmac_status status = { .bits32 = 0 }; 364 struct device *dev = port->dev; 365 struct phy_device *phy; 366 367 phy = of_phy_get_and_connect(netdev, 368 dev->of_node, 369 gmac_speed_set); 370 if (!phy) 371 return -ENODEV; 372 netdev->phydev = phy; 373 374 phy_set_max_speed(phy, SPEED_1000); 375 phy_support_asym_pause(phy); 376 377 /* set PHY interface type */ 378 switch (phy->interface) { 379 case PHY_INTERFACE_MODE_MII: 380 netdev_dbg(netdev, 381 "MII: set GMAC0 to GMII mode, GMAC1 disabled\n"); 382 status.bits.mii_rmii = GMAC_PHY_MII; 383 break; 384 case PHY_INTERFACE_MODE_GMII: 385 netdev_dbg(netdev, 386 "GMII: set GMAC0 to GMII mode, GMAC1 disabled\n"); 387 status.bits.mii_rmii = GMAC_PHY_GMII; 388 break; 389 case PHY_INTERFACE_MODE_RGMII: 390 case PHY_INTERFACE_MODE_RGMII_ID: 391 case PHY_INTERFACE_MODE_RGMII_TXID: 392 case PHY_INTERFACE_MODE_RGMII_RXID: 393 netdev_dbg(netdev, 394 "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n"); 395 status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; 396 break; 397 default: 398 netdev_err(netdev, "Unsupported MII interface\n"); 399 phy_disconnect(phy); 400 netdev->phydev = NULL; 401 return -EINVAL; 402 } 403 writel(status.bits32, port->gmac_base + GMAC_STATUS); 404 405 if (netif_msg_link(port)) 406 phy_attached_info(phy); 407 408 return 0; 409 } 410 411 /* The maximum frame length is not logically enumerated in the 412 * hardware, so we do a table lookup to find the applicable max 413 * frame length. 414 */ 415 struct gmac_max_framelen { 416 unsigned int max_l3_len; 417 u8 val; 418 }; 419 420 static const struct gmac_max_framelen gmac_maxlens[] = { 421 { 422 .max_l3_len = 1518, 423 .val = CONFIG0_MAXLEN_1518, 424 }, 425 { 426 .max_l3_len = 1522, 427 .val = CONFIG0_MAXLEN_1522, 428 }, 429 { 430 .max_l3_len = 1536, 431 .val = CONFIG0_MAXLEN_1536, 432 }, 433 { 434 .max_l3_len = 1548, 435 .val = CONFIG0_MAXLEN_1548, 436 }, 437 { 438 .max_l3_len = 9212, 439 .val = CONFIG0_MAXLEN_9k, 440 }, 441 { 442 .max_l3_len = 10236, 443 .val = CONFIG0_MAXLEN_10k, 444 }, 445 }; 446 447 static int gmac_pick_rx_max_len(unsigned int max_l3_len) 448 { 449 const struct gmac_max_framelen *maxlen; 450 int maxtot; 451 int i; 452 453 maxtot = max_l3_len + ETH_HLEN + VLAN_HLEN; 454 455 for (i = 0; i < ARRAY_SIZE(gmac_maxlens); i++) { 456 maxlen = &gmac_maxlens[i]; 457 if (maxtot <= maxlen->max_l3_len) 458 return maxlen->val; 459 } 460 461 return -1; 462 } 463 464 static int gmac_init(struct net_device *netdev) 465 { 466 struct gemini_ethernet_port *port = netdev_priv(netdev); 467 union gmac_config0 config0 = { .bits = { 468 .dis_tx = 1, 469 .dis_rx = 1, 470 .ipv4_rx_chksum = 1, 471 .ipv6_rx_chksum = 1, 472 .rx_err_detect = 1, 473 .rgmm_edge = 1, 474 .port0_chk_hwq = 1, 475 .port1_chk_hwq = 1, 476 .port0_chk_toeq = 1, 477 .port1_chk_toeq = 1, 478 .port0_chk_classq = 1, 479 .port1_chk_classq = 1, 480 } }; 481 union gmac_ahb_weight ahb_weight = { .bits = { 482 .rx_weight = 1, 483 .tx_weight = 1, 484 .hash_weight = 1, 485 .pre_req = 0x1f, 486 .tq_dv_threshold = 0, 487 } }; 488 union gmac_tx_wcr0 hw_weigh = { .bits = { 489 .hw_tq3 = 1, 490 .hw_tq2 = 1, 491 .hw_tq1 = 1, 492 .hw_tq0 = 1, 493 } }; 494 union gmac_tx_wcr1 sw_weigh = { .bits = { 495 .sw_tq5 = 1, 496 .sw_tq4 = 1, 497 .sw_tq3 = 1, 498 .sw_tq2 = 1, 499 .sw_tq1 = 1, 500 .sw_tq0 = 1, 501 } }; 502 union gmac_config1 config1 = { .bits = { 503 .set_threshold = 16, 504 .rel_threshold = 24, 505 } }; 506 union gmac_config2 config2 = { .bits = { 507 .set_threshold = 16, 508 .rel_threshold = 32, 509 } }; 510 union gmac_config3 config3 = { .bits = { 511 .set_threshold = 0, 512 .rel_threshold = 0, 513 } }; 514 union gmac_config0 tmp; 515 516 config0.bits.max_len = gmac_pick_rx_max_len(netdev->mtu); 517 tmp.bits32 = readl(port->gmac_base + GMAC_CONFIG0); 518 config0.bits.reserved = tmp.bits.reserved; 519 writel(config0.bits32, port->gmac_base + GMAC_CONFIG0); 520 writel(config1.bits32, port->gmac_base + GMAC_CONFIG1); 521 writel(config2.bits32, port->gmac_base + GMAC_CONFIG2); 522 writel(config3.bits32, port->gmac_base + GMAC_CONFIG3); 523 524 readl(port->dma_base + GMAC_AHB_WEIGHT_REG); 525 writel(ahb_weight.bits32, port->dma_base + GMAC_AHB_WEIGHT_REG); 526 527 writel(hw_weigh.bits32, 528 port->dma_base + GMAC_TX_WEIGHTING_CTRL_0_REG); 529 writel(sw_weigh.bits32, 530 port->dma_base + GMAC_TX_WEIGHTING_CTRL_1_REG); 531 532 port->rxq_order = DEFAULT_GMAC_RXQ_ORDER; 533 port->txq_order = DEFAULT_GMAC_TXQ_ORDER; 534 port->rx_coalesce_nsecs = DEFAULT_RX_COALESCE_NSECS; 535 536 /* Mark every quarter of the queue a packet for interrupt 537 * in order to be able to wake up the queue if it was stopped 538 */ 539 port->irq_every_tx_packets = 1 << (port->txq_order - 2); 540 541 return 0; 542 } 543 544 static int gmac_setup_txqs(struct net_device *netdev) 545 { 546 struct gemini_ethernet_port *port = netdev_priv(netdev); 547 unsigned int n_txq = netdev->num_tx_queues; 548 struct gemini_ethernet *geth = port->geth; 549 size_t entries = 1 << port->txq_order; 550 struct gmac_txq *txq = port->txq; 551 struct gmac_txdesc *desc_ring; 552 size_t len = n_txq * entries; 553 struct sk_buff **skb_tab; 554 void __iomem *rwptr_reg; 555 unsigned int r; 556 int i; 557 558 rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG; 559 560 skb_tab = kcalloc(len, sizeof(*skb_tab), GFP_KERNEL); 561 if (!skb_tab) 562 return -ENOMEM; 563 564 desc_ring = dma_alloc_coherent(geth->dev, len * sizeof(*desc_ring), 565 &port->txq_dma_base, GFP_KERNEL); 566 567 if (!desc_ring) { 568 kfree(skb_tab); 569 return -ENOMEM; 570 } 571 572 if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { 573 dev_warn(geth->dev, "TX queue base is not aligned\n"); 574 dma_free_coherent(geth->dev, len * sizeof(*desc_ring), 575 desc_ring, port->txq_dma_base); 576 kfree(skb_tab); 577 return -ENOMEM; 578 } 579 580 writel(port->txq_dma_base | port->txq_order, 581 port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG); 582 583 for (i = 0; i < n_txq; i++) { 584 txq->ring = desc_ring; 585 txq->skb = skb_tab; 586 txq->noirq_packets = 0; 587 588 r = readw(rwptr_reg); 589 rwptr_reg += 2; 590 writew(r, rwptr_reg); 591 rwptr_reg += 2; 592 txq->cptr = r; 593 594 txq++; 595 desc_ring += entries; 596 skb_tab += entries; 597 } 598 599 return 0; 600 } 601 602 static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq, 603 unsigned int r) 604 { 605 struct gemini_ethernet_port *port = netdev_priv(netdev); 606 unsigned int m = (1 << port->txq_order) - 1; 607 struct gemini_ethernet *geth = port->geth; 608 unsigned int c = txq->cptr; 609 union gmac_txdesc_0 word0; 610 union gmac_txdesc_1 word1; 611 unsigned int hwchksum = 0; 612 unsigned long bytes = 0; 613 struct gmac_txdesc *txd; 614 unsigned short nfrags; 615 unsigned int errs = 0; 616 unsigned int pkts = 0; 617 unsigned int word3; 618 dma_addr_t mapping; 619 620 if (c == r) 621 return; 622 623 while (c != r) { 624 txd = txq->ring + c; 625 word0 = txd->word0; 626 word1 = txd->word1; 627 mapping = txd->word2.buf_adr; 628 word3 = txd->word3.bits32; 629 630 dma_unmap_single(geth->dev, mapping, 631 word0.bits.buffer_size, DMA_TO_DEVICE); 632 633 if (word3 & EOF_BIT) 634 dev_kfree_skb(txq->skb[c]); 635 636 c++; 637 c &= m; 638 639 if (!(word3 & SOF_BIT)) 640 continue; 641 642 if (!word0.bits.status_tx_ok) { 643 errs++; 644 continue; 645 } 646 647 pkts++; 648 bytes += txd->word1.bits.byte_count; 649 650 if (word1.bits32 & TSS_CHECKUM_ENABLE) 651 hwchksum++; 652 653 nfrags = word0.bits.desc_count - 1; 654 if (nfrags) { 655 if (nfrags >= TX_MAX_FRAGS) 656 nfrags = TX_MAX_FRAGS - 1; 657 658 u64_stats_update_begin(&port->tx_stats_syncp); 659 port->tx_frag_stats[nfrags]++; 660 u64_stats_update_end(&port->tx_stats_syncp); 661 } 662 } 663 664 u64_stats_update_begin(&port->ir_stats_syncp); 665 port->stats.tx_errors += errs; 666 port->stats.tx_packets += pkts; 667 port->stats.tx_bytes += bytes; 668 port->tx_hw_csummed += hwchksum; 669 u64_stats_update_end(&port->ir_stats_syncp); 670 671 txq->cptr = c; 672 } 673 674 static void gmac_cleanup_txqs(struct net_device *netdev) 675 { 676 struct gemini_ethernet_port *port = netdev_priv(netdev); 677 unsigned int n_txq = netdev->num_tx_queues; 678 struct gemini_ethernet *geth = port->geth; 679 void __iomem *rwptr_reg; 680 unsigned int r, i; 681 682 rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG; 683 684 for (i = 0; i < n_txq; i++) { 685 r = readw(rwptr_reg); 686 rwptr_reg += 2; 687 writew(r, rwptr_reg); 688 rwptr_reg += 2; 689 690 gmac_clean_txq(netdev, port->txq + i, r); 691 } 692 writel(0, port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG); 693 694 kfree(port->txq->skb); 695 dma_free_coherent(geth->dev, 696 n_txq * sizeof(*port->txq->ring) << port->txq_order, 697 port->txq->ring, port->txq_dma_base); 698 } 699 700 static int gmac_setup_rxq(struct net_device *netdev) 701 { 702 struct gemini_ethernet_port *port = netdev_priv(netdev); 703 struct gemini_ethernet *geth = port->geth; 704 struct nontoe_qhdr __iomem *qhdr; 705 706 qhdr = geth->base + TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id); 707 port->rxq_rwptr = &qhdr->word1; 708 709 /* Remap a slew of memory to use for the RX queue */ 710 port->rxq_ring = dma_alloc_coherent(geth->dev, 711 sizeof(*port->rxq_ring) << port->rxq_order, 712 &port->rxq_dma_base, GFP_KERNEL); 713 if (!port->rxq_ring) 714 return -ENOMEM; 715 if (port->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK) { 716 dev_warn(geth->dev, "RX queue base is not aligned\n"); 717 return -ENOMEM; 718 } 719 720 writel(port->rxq_dma_base | port->rxq_order, &qhdr->word0); 721 writel(0, port->rxq_rwptr); 722 return 0; 723 } 724 725 static struct gmac_queue_page * 726 gmac_get_queue_page(struct gemini_ethernet *geth, 727 struct gemini_ethernet_port *port, 728 dma_addr_t addr) 729 { 730 struct gmac_queue_page *gpage; 731 dma_addr_t mapping; 732 int i; 733 734 /* Only look for even pages */ 735 mapping = addr & PAGE_MASK; 736 737 if (!geth->freeq_pages) { 738 dev_err(geth->dev, "try to get page with no page list\n"); 739 return NULL; 740 } 741 742 /* Look up a ring buffer page from virtual mapping */ 743 for (i = 0; i < geth->num_freeq_pages; i++) { 744 gpage = &geth->freeq_pages[i]; 745 if (gpage->mapping == mapping) 746 return gpage; 747 } 748 749 return NULL; 750 } 751 752 static void gmac_cleanup_rxq(struct net_device *netdev) 753 { 754 struct gemini_ethernet_port *port = netdev_priv(netdev); 755 struct gemini_ethernet *geth = port->geth; 756 struct gmac_rxdesc *rxd = port->rxq_ring; 757 static struct gmac_queue_page *gpage; 758 struct nontoe_qhdr __iomem *qhdr; 759 void __iomem *dma_reg; 760 void __iomem *ptr_reg; 761 dma_addr_t mapping; 762 union dma_rwptr rw; 763 unsigned int r, w; 764 765 qhdr = geth->base + 766 TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id); 767 dma_reg = &qhdr->word0; 768 ptr_reg = &qhdr->word1; 769 770 rw.bits32 = readl(ptr_reg); 771 r = rw.bits.rptr; 772 w = rw.bits.wptr; 773 writew(r, ptr_reg + 2); 774 775 writel(0, dma_reg); 776 777 /* Loop from read pointer to write pointer of the RX queue 778 * and free up all pages by the queue. 779 */ 780 while (r != w) { 781 mapping = rxd[r].word2.buf_adr; 782 r++; 783 r &= ((1 << port->rxq_order) - 1); 784 785 if (!mapping) 786 continue; 787 788 /* Freeq pointers are one page off */ 789 gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE); 790 if (!gpage) { 791 dev_err(geth->dev, "could not find page\n"); 792 continue; 793 } 794 /* Release the RX queue reference to the page */ 795 put_page(gpage->page); 796 } 797 798 dma_free_coherent(geth->dev, sizeof(*port->rxq_ring) << port->rxq_order, 799 port->rxq_ring, port->rxq_dma_base); 800 } 801 802 static struct page *geth_freeq_alloc_map_page(struct gemini_ethernet *geth, 803 int pn) 804 { 805 struct gmac_rxdesc *freeq_entry; 806 struct gmac_queue_page *gpage; 807 unsigned int fpp_order; 808 unsigned int frag_len; 809 dma_addr_t mapping; 810 struct page *page; 811 int i; 812 813 /* First allocate and DMA map a single page */ 814 page = alloc_page(GFP_ATOMIC); 815 if (!page) 816 return NULL; 817 818 mapping = dma_map_single(geth->dev, page_address(page), 819 PAGE_SIZE, DMA_FROM_DEVICE); 820 if (dma_mapping_error(geth->dev, mapping)) { 821 put_page(page); 822 return NULL; 823 } 824 825 /* The assign the page mapping (physical address) to the buffer address 826 * in the hardware queue. PAGE_SHIFT on ARM is 12 (1 page is 4096 bytes, 827 * 4k), and the default RX frag order is 11 (fragments are up 20 2048 828 * bytes, 2k) so fpp_order (fragments per page order) is default 1. Thus 829 * each page normally needs two entries in the queue. 830 */ 831 frag_len = 1 << geth->freeq_frag_order; /* Usually 2048 */ 832 fpp_order = PAGE_SHIFT - geth->freeq_frag_order; 833 freeq_entry = geth->freeq_ring + (pn << fpp_order); 834 dev_dbg(geth->dev, "allocate page %d fragment length %d fragments per page %d, freeq entry %p\n", 835 pn, frag_len, (1 << fpp_order), freeq_entry); 836 for (i = (1 << fpp_order); i > 0; i--) { 837 freeq_entry->word2.buf_adr = mapping; 838 freeq_entry++; 839 mapping += frag_len; 840 } 841 842 /* If the freeq entry already has a page mapped, then unmap it. */ 843 gpage = &geth->freeq_pages[pn]; 844 if (gpage->page) { 845 mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr; 846 dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE); 847 /* This should be the last reference to the page so it gets 848 * released 849 */ 850 put_page(gpage->page); 851 } 852 853 /* Then put our new mapping into the page table */ 854 dev_dbg(geth->dev, "page %d, DMA addr: %08x, page %p\n", 855 pn, (unsigned int)mapping, page); 856 gpage->mapping = mapping; 857 gpage->page = page; 858 859 return page; 860 } 861 862 /** 863 * geth_fill_freeq() - Fill the freeq with empty fragments to use 864 * @geth: the ethernet adapter 865 * @refill: whether to reset the queue by filling in all freeq entries or 866 * just refill it, usually the interrupt to refill the queue happens when 867 * the queue is half empty. 868 */ 869 static unsigned int geth_fill_freeq(struct gemini_ethernet *geth, bool refill) 870 { 871 unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order; 872 unsigned int count = 0; 873 unsigned int pn, epn; 874 unsigned long flags; 875 union dma_rwptr rw; 876 unsigned int m_pn; 877 878 /* Mask for page */ 879 m_pn = (1 << (geth->freeq_order - fpp_order)) - 1; 880 881 spin_lock_irqsave(&geth->freeq_lock, flags); 882 883 rw.bits32 = readl(geth->base + GLOBAL_SWFQ_RWPTR_REG); 884 pn = (refill ? rw.bits.wptr : rw.bits.rptr) >> fpp_order; 885 epn = (rw.bits.rptr >> fpp_order) - 1; 886 epn &= m_pn; 887 888 /* Loop over the freeq ring buffer entries */ 889 while (pn != epn) { 890 struct gmac_queue_page *gpage; 891 struct page *page; 892 893 gpage = &geth->freeq_pages[pn]; 894 page = gpage->page; 895 896 dev_dbg(geth->dev, "fill entry %d page ref count %d add %d refs\n", 897 pn, page_ref_count(page), 1 << fpp_order); 898 899 if (page_ref_count(page) > 1) { 900 unsigned int fl = (pn - epn) & m_pn; 901 902 if (fl > 64 >> fpp_order) 903 break; 904 905 page = geth_freeq_alloc_map_page(geth, pn); 906 if (!page) 907 break; 908 } 909 910 /* Add one reference per fragment in the page */ 911 page_ref_add(page, 1 << fpp_order); 912 count += 1 << fpp_order; 913 pn++; 914 pn &= m_pn; 915 } 916 917 writew(pn << fpp_order, geth->base + GLOBAL_SWFQ_RWPTR_REG + 2); 918 919 spin_unlock_irqrestore(&geth->freeq_lock, flags); 920 921 return count; 922 } 923 924 static int geth_setup_freeq(struct gemini_ethernet *geth) 925 { 926 unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order; 927 unsigned int frag_len = 1 << geth->freeq_frag_order; 928 unsigned int len = 1 << geth->freeq_order; 929 unsigned int pages = len >> fpp_order; 930 union queue_threshold qt; 931 union dma_skb_size skbsz; 932 unsigned int filled; 933 unsigned int pn; 934 935 geth->freeq_ring = dma_alloc_coherent(geth->dev, 936 sizeof(*geth->freeq_ring) << geth->freeq_order, 937 &geth->freeq_dma_base, GFP_KERNEL); 938 if (!geth->freeq_ring) 939 return -ENOMEM; 940 if (geth->freeq_dma_base & ~DMA_Q_BASE_MASK) { 941 dev_warn(geth->dev, "queue ring base is not aligned\n"); 942 goto err_freeq; 943 } 944 945 /* Allocate a mapping to page look-up index */ 946 geth->freeq_pages = kcalloc(pages, sizeof(*geth->freeq_pages), 947 GFP_KERNEL); 948 if (!geth->freeq_pages) 949 goto err_freeq; 950 geth->num_freeq_pages = pages; 951 952 dev_info(geth->dev, "allocate %d pages for queue\n", pages); 953 for (pn = 0; pn < pages; pn++) 954 if (!geth_freeq_alloc_map_page(geth, pn)) 955 goto err_freeq_alloc; 956 957 filled = geth_fill_freeq(geth, false); 958 if (!filled) 959 goto err_freeq_alloc; 960 961 qt.bits32 = readl(geth->base + GLOBAL_QUEUE_THRESHOLD_REG); 962 qt.bits.swfq_empty = 32; 963 writel(qt.bits32, geth->base + GLOBAL_QUEUE_THRESHOLD_REG); 964 965 skbsz.bits.sw_skb_size = 1 << geth->freeq_frag_order; 966 writel(skbsz.bits32, geth->base + GLOBAL_DMA_SKB_SIZE_REG); 967 writel(geth->freeq_dma_base | geth->freeq_order, 968 geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG); 969 970 return 0; 971 972 err_freeq_alloc: 973 while (pn > 0) { 974 struct gmac_queue_page *gpage; 975 dma_addr_t mapping; 976 977 --pn; 978 mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr; 979 dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE); 980 gpage = &geth->freeq_pages[pn]; 981 put_page(gpage->page); 982 } 983 984 kfree(geth->freeq_pages); 985 err_freeq: 986 dma_free_coherent(geth->dev, 987 sizeof(*geth->freeq_ring) << geth->freeq_order, 988 geth->freeq_ring, geth->freeq_dma_base); 989 geth->freeq_ring = NULL; 990 return -ENOMEM; 991 } 992 993 /** 994 * geth_cleanup_freeq() - cleanup the DMA mappings and free the queue 995 * @geth: the Gemini global ethernet state 996 */ 997 static void geth_cleanup_freeq(struct gemini_ethernet *geth) 998 { 999 unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order; 1000 unsigned int frag_len = 1 << geth->freeq_frag_order; 1001 unsigned int len = 1 << geth->freeq_order; 1002 unsigned int pages = len >> fpp_order; 1003 unsigned int pn; 1004 1005 writew(readw(geth->base + GLOBAL_SWFQ_RWPTR_REG), 1006 geth->base + GLOBAL_SWFQ_RWPTR_REG + 2); 1007 writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG); 1008 1009 for (pn = 0; pn < pages; pn++) { 1010 struct gmac_queue_page *gpage; 1011 dma_addr_t mapping; 1012 1013 mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr; 1014 dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE); 1015 1016 gpage = &geth->freeq_pages[pn]; 1017 while (page_ref_count(gpage->page) > 0) 1018 put_page(gpage->page); 1019 } 1020 1021 kfree(geth->freeq_pages); 1022 1023 dma_free_coherent(geth->dev, 1024 sizeof(*geth->freeq_ring) << geth->freeq_order, 1025 geth->freeq_ring, geth->freeq_dma_base); 1026 } 1027 1028 /** 1029 * geth_resize_freeq() - resize the software queue depth 1030 * @port: the port requesting the change 1031 * 1032 * This gets called at least once during probe() so the device queue gets 1033 * "resized" from the hardware defaults. Since both ports/net devices share 1034 * the same hardware queue, some synchronization between the ports is 1035 * needed. 1036 */ 1037 static int geth_resize_freeq(struct gemini_ethernet_port *port) 1038 { 1039 struct gemini_ethernet *geth = port->geth; 1040 struct net_device *netdev = port->netdev; 1041 struct gemini_ethernet_port *other_port; 1042 struct net_device *other_netdev; 1043 unsigned int new_size = 0; 1044 unsigned int new_order; 1045 unsigned long flags; 1046 u32 en; 1047 int ret; 1048 1049 if (netdev->dev_id == 0) 1050 other_netdev = geth->port1->netdev; 1051 else 1052 other_netdev = geth->port0->netdev; 1053 1054 if (other_netdev && netif_running(other_netdev)) 1055 return -EBUSY; 1056 1057 new_size = 1 << (port->rxq_order + 1); 1058 netdev_dbg(netdev, "port %d size: %d order %d\n", 1059 netdev->dev_id, 1060 new_size, 1061 port->rxq_order); 1062 if (other_netdev) { 1063 other_port = netdev_priv(other_netdev); 1064 new_size += 1 << (other_port->rxq_order + 1); 1065 netdev_dbg(other_netdev, "port %d size: %d order %d\n", 1066 other_netdev->dev_id, 1067 (1 << (other_port->rxq_order + 1)), 1068 other_port->rxq_order); 1069 } 1070 1071 new_order = min(15, ilog2(new_size - 1) + 1); 1072 dev_dbg(geth->dev, "set shared queue to size %d order %d\n", 1073 new_size, new_order); 1074 if (geth->freeq_order == new_order) 1075 return 0; 1076 1077 spin_lock_irqsave(&geth->irq_lock, flags); 1078 1079 /* Disable the software queue IRQs */ 1080 en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); 1081 en &= ~SWFQ_EMPTY_INT_BIT; 1082 writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); 1083 spin_unlock_irqrestore(&geth->irq_lock, flags); 1084 1085 /* Drop the old queue */ 1086 if (geth->freeq_ring) 1087 geth_cleanup_freeq(geth); 1088 1089 /* Allocate a new queue with the desired order */ 1090 geth->freeq_order = new_order; 1091 ret = geth_setup_freeq(geth); 1092 1093 /* Restart the interrupts - NOTE if this is the first resize 1094 * after probe(), this is where the interrupts get turned on 1095 * in the first place. 1096 */ 1097 spin_lock_irqsave(&geth->irq_lock, flags); 1098 en |= SWFQ_EMPTY_INT_BIT; 1099 writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); 1100 spin_unlock_irqrestore(&geth->irq_lock, flags); 1101 1102 return ret; 1103 } 1104 1105 static void gmac_tx_irq_enable(struct net_device *netdev, 1106 unsigned int txq, int en) 1107 { 1108 struct gemini_ethernet_port *port = netdev_priv(netdev); 1109 struct gemini_ethernet *geth = port->geth; 1110 unsigned long flags; 1111 u32 val, mask; 1112 1113 netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id); 1114 1115 spin_lock_irqsave(&geth->irq_lock, flags); 1116 1117 mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq); 1118 1119 if (en) 1120 writel(mask, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG); 1121 1122 val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); 1123 val = en ? val | mask : val & ~mask; 1124 writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); 1125 1126 spin_unlock_irqrestore(&geth->irq_lock, flags); 1127 } 1128 1129 static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num) 1130 { 1131 struct netdev_queue *ntxq = netdev_get_tx_queue(netdev, txq_num); 1132 1133 gmac_tx_irq_enable(netdev, txq_num, 0); 1134 netif_tx_wake_queue(ntxq); 1135 } 1136 1137 static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, 1138 struct gmac_txq *txq, unsigned short *desc) 1139 { 1140 struct gemini_ethernet_port *port = netdev_priv(netdev); 1141 struct skb_shared_info *skb_si = skb_shinfo(skb); 1142 unsigned short m = (1 << port->txq_order) - 1; 1143 short frag, last_frag = skb_si->nr_frags - 1; 1144 struct gemini_ethernet *geth = port->geth; 1145 unsigned int word1, word3, buflen; 1146 unsigned short w = *desc; 1147 struct gmac_txdesc *txd; 1148 skb_frag_t *skb_frag; 1149 dma_addr_t mapping; 1150 void *buffer; 1151 int ret; 1152 1153 /* TODO: implement proper TSO using MTU in word3 */ 1154 word1 = skb->len; 1155 word3 = SOF_BIT; 1156 1157 if (skb->len >= ETH_FRAME_LEN) { 1158 /* Hardware offloaded checksumming isn't working on frames 1159 * bigger than 1514 bytes. A hypothesis about this is that the 1160 * checksum buffer is only 1518 bytes, so when the frames get 1161 * bigger they get truncated, or the last few bytes get 1162 * overwritten by the FCS. 1163 * 1164 * Just use software checksumming and bypass on bigger frames. 1165 */ 1166 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1167 ret = skb_checksum_help(skb); 1168 if (ret) 1169 return ret; 1170 } 1171 word1 |= TSS_BYPASS_BIT; 1172 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 1173 int tcp = 0; 1174 1175 /* We do not switch off the checksumming on non TCP/UDP 1176 * frames: as is shown from tests, the checksumming engine 1177 * is smart enough to see that a frame is not actually TCP 1178 * or UDP and then just pass it through without any changes 1179 * to the frame. 1180 */ 1181 if (skb->protocol == htons(ETH_P_IP)) { 1182 word1 |= TSS_IP_CHKSUM_BIT; 1183 tcp = ip_hdr(skb)->protocol == IPPROTO_TCP; 1184 } else { /* IPv6 */ 1185 word1 |= TSS_IPV6_ENABLE_BIT; 1186 tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP; 1187 } 1188 1189 word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT; 1190 } 1191 1192 frag = -1; 1193 while (frag <= last_frag) { 1194 if (frag == -1) { 1195 buffer = skb->data; 1196 buflen = skb_headlen(skb); 1197 } else { 1198 skb_frag = skb_si->frags + frag; 1199 buffer = skb_frag_address(skb_frag); 1200 buflen = skb_frag_size(skb_frag); 1201 } 1202 1203 if (frag == last_frag) { 1204 word3 |= EOF_BIT; 1205 txq->skb[w] = skb; 1206 } 1207 1208 mapping = dma_map_single(geth->dev, buffer, buflen, 1209 DMA_TO_DEVICE); 1210 if (dma_mapping_error(geth->dev, mapping)) 1211 goto map_error; 1212 1213 txd = txq->ring + w; 1214 txd->word0.bits32 = buflen; 1215 txd->word1.bits32 = word1; 1216 txd->word2.buf_adr = mapping; 1217 txd->word3.bits32 = word3; 1218 1219 word3 &= MTU_SIZE_BIT_MASK; 1220 w++; 1221 w &= m; 1222 frag++; 1223 } 1224 1225 *desc = w; 1226 return 0; 1227 1228 map_error: 1229 while (w != *desc) { 1230 w--; 1231 w &= m; 1232 1233 dma_unmap_page(geth->dev, txq->ring[w].word2.buf_adr, 1234 txq->ring[w].word0.bits.buffer_size, 1235 DMA_TO_DEVICE); 1236 } 1237 return -ENOMEM; 1238 } 1239 1240 static netdev_tx_t gmac_start_xmit(struct sk_buff *skb, 1241 struct net_device *netdev) 1242 { 1243 struct gemini_ethernet_port *port = netdev_priv(netdev); 1244 unsigned short m = (1 << port->txq_order) - 1; 1245 struct netdev_queue *ntxq; 1246 unsigned short r, w, d; 1247 void __iomem *ptr_reg; 1248 struct gmac_txq *txq; 1249 int txq_num, nfrags; 1250 union dma_rwptr rw; 1251 1252 if (skb->len >= 0x10000) 1253 goto out_drop_free; 1254 1255 txq_num = skb_get_queue_mapping(skb); 1256 ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE_PTR_REG(txq_num); 1257 txq = &port->txq[txq_num]; 1258 ntxq = netdev_get_tx_queue(netdev, txq_num); 1259 nfrags = skb_shinfo(skb)->nr_frags; 1260 1261 rw.bits32 = readl(ptr_reg); 1262 r = rw.bits.rptr; 1263 w = rw.bits.wptr; 1264 1265 d = txq->cptr - w - 1; 1266 d &= m; 1267 1268 if (d < nfrags + 2) { 1269 gmac_clean_txq(netdev, txq, r); 1270 d = txq->cptr - w - 1; 1271 d &= m; 1272 1273 if (d < nfrags + 2) { 1274 netif_tx_stop_queue(ntxq); 1275 1276 d = txq->cptr + nfrags + 16; 1277 d &= m; 1278 txq->ring[d].word3.bits.eofie = 1; 1279 gmac_tx_irq_enable(netdev, txq_num, 1); 1280 1281 u64_stats_update_begin(&port->tx_stats_syncp); 1282 netdev->stats.tx_fifo_errors++; 1283 u64_stats_update_end(&port->tx_stats_syncp); 1284 return NETDEV_TX_BUSY; 1285 } 1286 } 1287 1288 if (gmac_map_tx_bufs(netdev, skb, txq, &w)) { 1289 if (skb_linearize(skb)) 1290 goto out_drop; 1291 1292 u64_stats_update_begin(&port->tx_stats_syncp); 1293 port->tx_frags_linearized++; 1294 u64_stats_update_end(&port->tx_stats_syncp); 1295 1296 if (gmac_map_tx_bufs(netdev, skb, txq, &w)) 1297 goto out_drop_free; 1298 } 1299 1300 writew(w, ptr_reg + 2); 1301 1302 gmac_clean_txq(netdev, txq, r); 1303 return NETDEV_TX_OK; 1304 1305 out_drop_free: 1306 dev_kfree_skb(skb); 1307 out_drop: 1308 u64_stats_update_begin(&port->tx_stats_syncp); 1309 port->stats.tx_dropped++; 1310 u64_stats_update_end(&port->tx_stats_syncp); 1311 return NETDEV_TX_OK; 1312 } 1313 1314 static void gmac_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1315 { 1316 netdev_err(netdev, "Tx timeout\n"); 1317 gmac_dump_dma_state(netdev); 1318 } 1319 1320 static void gmac_enable_irq(struct net_device *netdev, int enable) 1321 { 1322 struct gemini_ethernet_port *port = netdev_priv(netdev); 1323 struct gemini_ethernet *geth = port->geth; 1324 unsigned long flags; 1325 u32 val, mask; 1326 1327 netdev_dbg(netdev, "%s device %d %s\n", __func__, 1328 netdev->dev_id, enable ? "enable" : "disable"); 1329 spin_lock_irqsave(&geth->irq_lock, flags); 1330 1331 mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2); 1332 val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); 1333 val = enable ? (val | mask) : (val & ~mask); 1334 writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); 1335 1336 mask = DEFAULT_Q0_INT_BIT << netdev->dev_id; 1337 val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); 1338 val = enable ? (val | mask) : (val & ~mask); 1339 writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); 1340 1341 mask = GMAC0_IRQ4_8 << (netdev->dev_id * 8); 1342 val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); 1343 val = enable ? (val | mask) : (val & ~mask); 1344 writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); 1345 1346 spin_unlock_irqrestore(&geth->irq_lock, flags); 1347 } 1348 1349 static void gmac_enable_rx_irq(struct net_device *netdev, int enable) 1350 { 1351 struct gemini_ethernet_port *port = netdev_priv(netdev); 1352 struct gemini_ethernet *geth = port->geth; 1353 unsigned long flags; 1354 u32 val, mask; 1355 1356 netdev_dbg(netdev, "%s device %d %s\n", __func__, netdev->dev_id, 1357 enable ? "enable" : "disable"); 1358 spin_lock_irqsave(&geth->irq_lock, flags); 1359 mask = DEFAULT_Q0_INT_BIT << netdev->dev_id; 1360 1361 val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); 1362 val = enable ? (val | mask) : (val & ~mask); 1363 writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); 1364 1365 spin_unlock_irqrestore(&geth->irq_lock, flags); 1366 } 1367 1368 static struct sk_buff *gmac_skb_if_good_frame(struct gemini_ethernet_port *port, 1369 union gmac_rxdesc_0 word0, 1370 unsigned int frame_len) 1371 { 1372 unsigned int rx_csum = word0.bits.chksum_status; 1373 unsigned int rx_status = word0.bits.status; 1374 struct sk_buff *skb = NULL; 1375 1376 port->rx_stats[rx_status]++; 1377 port->rx_csum_stats[rx_csum]++; 1378 1379 if (word0.bits.derr || word0.bits.perr || 1380 rx_status || frame_len < ETH_ZLEN || 1381 rx_csum >= RX_CHKSUM_IP_ERR_UNKNOWN) { 1382 port->stats.rx_errors++; 1383 1384 if (frame_len < ETH_ZLEN || RX_ERROR_LENGTH(rx_status)) 1385 port->stats.rx_length_errors++; 1386 if (RX_ERROR_OVER(rx_status)) 1387 port->stats.rx_over_errors++; 1388 if (RX_ERROR_CRC(rx_status)) 1389 port->stats.rx_crc_errors++; 1390 if (RX_ERROR_FRAME(rx_status)) 1391 port->stats.rx_frame_errors++; 1392 return NULL; 1393 } 1394 1395 skb = napi_get_frags(&port->napi); 1396 if (!skb) 1397 goto update_exit; 1398 1399 if (rx_csum == RX_CHKSUM_IP_UDP_TCP_OK) 1400 skb->ip_summed = CHECKSUM_UNNECESSARY; 1401 1402 update_exit: 1403 port->stats.rx_bytes += frame_len; 1404 port->stats.rx_packets++; 1405 return skb; 1406 } 1407 1408 static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget) 1409 { 1410 struct gemini_ethernet_port *port = netdev_priv(netdev); 1411 unsigned short m = (1 << port->rxq_order) - 1; 1412 struct gemini_ethernet *geth = port->geth; 1413 void __iomem *ptr_reg = port->rxq_rwptr; 1414 unsigned int frame_len, frag_len; 1415 struct gmac_rxdesc *rx = NULL; 1416 struct gmac_queue_page *gpage; 1417 static struct sk_buff *skb; 1418 union gmac_rxdesc_0 word0; 1419 union gmac_rxdesc_1 word1; 1420 union gmac_rxdesc_3 word3; 1421 struct page *page = NULL; 1422 unsigned int page_offs; 1423 unsigned long flags; 1424 unsigned short r, w; 1425 union dma_rwptr rw; 1426 dma_addr_t mapping; 1427 int frag_nr = 0; 1428 1429 spin_lock_irqsave(&geth->irq_lock, flags); 1430 rw.bits32 = readl(ptr_reg); 1431 /* Reset interrupt as all packages until here are taken into account */ 1432 writel(DEFAULT_Q0_INT_BIT << netdev->dev_id, 1433 geth->base + GLOBAL_INTERRUPT_STATUS_1_REG); 1434 spin_unlock_irqrestore(&geth->irq_lock, flags); 1435 1436 r = rw.bits.rptr; 1437 w = rw.bits.wptr; 1438 1439 while (budget && w != r) { 1440 rx = port->rxq_ring + r; 1441 word0 = rx->word0; 1442 word1 = rx->word1; 1443 mapping = rx->word2.buf_adr; 1444 word3 = rx->word3; 1445 1446 r++; 1447 r &= m; 1448 1449 frag_len = word0.bits.buffer_size; 1450 frame_len = word1.bits.byte_count; 1451 page_offs = mapping & ~PAGE_MASK; 1452 1453 if (!mapping) { 1454 netdev_err(netdev, 1455 "rxq[%u]: HW BUG: zero DMA desc\n", r); 1456 goto err_drop; 1457 } 1458 1459 /* Freeq pointers are one page off */ 1460 gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE); 1461 if (!gpage) { 1462 dev_err(geth->dev, "could not find mapping\n"); 1463 continue; 1464 } 1465 page = gpage->page; 1466 1467 if (word3.bits32 & SOF_BIT) { 1468 if (skb) { 1469 napi_free_frags(&port->napi); 1470 port->stats.rx_dropped++; 1471 } 1472 1473 skb = gmac_skb_if_good_frame(port, word0, frame_len); 1474 if (!skb) 1475 goto err_drop; 1476 1477 page_offs += NET_IP_ALIGN; 1478 frag_len -= NET_IP_ALIGN; 1479 frag_nr = 0; 1480 1481 } else if (!skb) { 1482 put_page(page); 1483 continue; 1484 } 1485 1486 if (word3.bits32 & EOF_BIT) 1487 frag_len = frame_len - skb->len; 1488 1489 /* append page frag to skb */ 1490 if (frag_nr == MAX_SKB_FRAGS) 1491 goto err_drop; 1492 1493 if (frag_len == 0) 1494 netdev_err(netdev, "Received fragment with len = 0\n"); 1495 1496 skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len); 1497 skb->len += frag_len; 1498 skb->data_len += frag_len; 1499 skb->truesize += frag_len; 1500 frag_nr++; 1501 1502 if (word3.bits32 & EOF_BIT) { 1503 napi_gro_frags(&port->napi); 1504 skb = NULL; 1505 --budget; 1506 } 1507 continue; 1508 1509 err_drop: 1510 if (skb) { 1511 napi_free_frags(&port->napi); 1512 skb = NULL; 1513 } 1514 1515 if (mapping) 1516 put_page(page); 1517 1518 port->stats.rx_dropped++; 1519 } 1520 1521 writew(r, ptr_reg); 1522 return budget; 1523 } 1524 1525 static int gmac_napi_poll(struct napi_struct *napi, int budget) 1526 { 1527 struct gemini_ethernet_port *port = netdev_priv(napi->dev); 1528 struct gemini_ethernet *geth = port->geth; 1529 unsigned int freeq_threshold; 1530 unsigned int received; 1531 1532 freeq_threshold = 1 << (geth->freeq_order - 1); 1533 u64_stats_update_begin(&port->rx_stats_syncp); 1534 1535 received = gmac_rx(napi->dev, budget); 1536 if (received < budget) { 1537 napi_gro_flush(napi, false); 1538 napi_complete_done(napi, received); 1539 gmac_enable_rx_irq(napi->dev, 1); 1540 ++port->rx_napi_exits; 1541 } 1542 1543 port->freeq_refill += (budget - received); 1544 if (port->freeq_refill > freeq_threshold) { 1545 port->freeq_refill -= freeq_threshold; 1546 geth_fill_freeq(geth, true); 1547 } 1548 1549 u64_stats_update_end(&port->rx_stats_syncp); 1550 return received; 1551 } 1552 1553 static void gmac_dump_dma_state(struct net_device *netdev) 1554 { 1555 struct gemini_ethernet_port *port = netdev_priv(netdev); 1556 struct gemini_ethernet *geth = port->geth; 1557 void __iomem *ptr_reg; 1558 u32 reg[5]; 1559 1560 /* Interrupt status */ 1561 reg[0] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_0_REG); 1562 reg[1] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_1_REG); 1563 reg[2] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_2_REG); 1564 reg[3] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_3_REG); 1565 reg[4] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); 1566 netdev_err(netdev, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1567 reg[0], reg[1], reg[2], reg[3], reg[4]); 1568 1569 /* Interrupt enable */ 1570 reg[0] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); 1571 reg[1] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); 1572 reg[2] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG); 1573 reg[3] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG); 1574 reg[4] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); 1575 netdev_err(netdev, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1576 reg[0], reg[1], reg[2], reg[3], reg[4]); 1577 1578 /* RX DMA status */ 1579 reg[0] = readl(port->dma_base + GMAC_DMA_RX_FIRST_DESC_REG); 1580 reg[1] = readl(port->dma_base + GMAC_DMA_RX_CURR_DESC_REG); 1581 reg[2] = GET_RPTR(port->rxq_rwptr); 1582 reg[3] = GET_WPTR(port->rxq_rwptr); 1583 netdev_err(netdev, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n", 1584 reg[0], reg[1], reg[2], reg[3]); 1585 1586 reg[0] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD0_REG); 1587 reg[1] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD1_REG); 1588 reg[2] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD2_REG); 1589 reg[3] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD3_REG); 1590 netdev_err(netdev, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n", 1591 reg[0], reg[1], reg[2], reg[3]); 1592 1593 /* TX DMA status */ 1594 ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG; 1595 1596 reg[0] = readl(port->dma_base + GMAC_DMA_TX_FIRST_DESC_REG); 1597 reg[1] = readl(port->dma_base + GMAC_DMA_TX_CURR_DESC_REG); 1598 reg[2] = GET_RPTR(ptr_reg); 1599 reg[3] = GET_WPTR(ptr_reg); 1600 netdev_err(netdev, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n", 1601 reg[0], reg[1], reg[2], reg[3]); 1602 1603 reg[0] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD0_REG); 1604 reg[1] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD1_REG); 1605 reg[2] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD2_REG); 1606 reg[3] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD3_REG); 1607 netdev_err(netdev, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n", 1608 reg[0], reg[1], reg[2], reg[3]); 1609 1610 /* FREE queues status */ 1611 ptr_reg = geth->base + GLOBAL_SWFQ_RWPTR_REG; 1612 1613 reg[0] = GET_RPTR(ptr_reg); 1614 reg[1] = GET_WPTR(ptr_reg); 1615 1616 ptr_reg = geth->base + GLOBAL_HWFQ_RWPTR_REG; 1617 1618 reg[2] = GET_RPTR(ptr_reg); 1619 reg[3] = GET_WPTR(ptr_reg); 1620 netdev_err(netdev, "FQ SW ptr: %u %u, HW ptr: %u %u\n", 1621 reg[0], reg[1], reg[2], reg[3]); 1622 } 1623 1624 static void gmac_update_hw_stats(struct net_device *netdev) 1625 { 1626 struct gemini_ethernet_port *port = netdev_priv(netdev); 1627 unsigned int rx_discards, rx_mcast, rx_bcast; 1628 struct gemini_ethernet *geth = port->geth; 1629 unsigned long flags; 1630 1631 spin_lock_irqsave(&geth->irq_lock, flags); 1632 u64_stats_update_begin(&port->ir_stats_syncp); 1633 1634 rx_discards = readl(port->gmac_base + GMAC_IN_DISCARDS); 1635 port->hw_stats[0] += rx_discards; 1636 port->hw_stats[1] += readl(port->gmac_base + GMAC_IN_ERRORS); 1637 rx_mcast = readl(port->gmac_base + GMAC_IN_MCAST); 1638 port->hw_stats[2] += rx_mcast; 1639 rx_bcast = readl(port->gmac_base + GMAC_IN_BCAST); 1640 port->hw_stats[3] += rx_bcast; 1641 port->hw_stats[4] += readl(port->gmac_base + GMAC_IN_MAC1); 1642 port->hw_stats[5] += readl(port->gmac_base + GMAC_IN_MAC2); 1643 1644 port->stats.rx_missed_errors += rx_discards; 1645 port->stats.multicast += rx_mcast; 1646 port->stats.multicast += rx_bcast; 1647 1648 writel(GMAC0_MIB_INT_BIT << (netdev->dev_id * 8), 1649 geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); 1650 1651 u64_stats_update_end(&port->ir_stats_syncp); 1652 spin_unlock_irqrestore(&geth->irq_lock, flags); 1653 } 1654 1655 /** 1656 * gmac_get_intr_flags() - get interrupt status flags for a port from 1657 * @netdev: the net device for the port to get flags from 1658 * @i: the interrupt status register 0..4 1659 */ 1660 static u32 gmac_get_intr_flags(struct net_device *netdev, int i) 1661 { 1662 struct gemini_ethernet_port *port = netdev_priv(netdev); 1663 struct gemini_ethernet *geth = port->geth; 1664 void __iomem *irqif_reg, *irqen_reg; 1665 unsigned int offs, val; 1666 1667 /* Calculate the offset using the stride of the status registers */ 1668 offs = i * (GLOBAL_INTERRUPT_STATUS_1_REG - 1669 GLOBAL_INTERRUPT_STATUS_0_REG); 1670 1671 irqif_reg = geth->base + GLOBAL_INTERRUPT_STATUS_0_REG + offs; 1672 irqen_reg = geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG + offs; 1673 1674 val = readl(irqif_reg) & readl(irqen_reg); 1675 return val; 1676 } 1677 1678 static enum hrtimer_restart gmac_coalesce_delay_expired(struct hrtimer *timer) 1679 { 1680 struct gemini_ethernet_port *port = 1681 container_of(timer, struct gemini_ethernet_port, 1682 rx_coalesce_timer); 1683 1684 napi_schedule(&port->napi); 1685 return HRTIMER_NORESTART; 1686 } 1687 1688 static irqreturn_t gmac_irq(int irq, void *data) 1689 { 1690 struct gemini_ethernet_port *port; 1691 struct net_device *netdev = data; 1692 struct gemini_ethernet *geth; 1693 u32 val, orr = 0; 1694 1695 port = netdev_priv(netdev); 1696 geth = port->geth; 1697 1698 val = gmac_get_intr_flags(netdev, 0); 1699 orr |= val; 1700 1701 if (val & (GMAC0_IRQ0_2 << (netdev->dev_id * 2))) { 1702 /* Oh, crap */ 1703 netdev_err(netdev, "hw failure/sw bug\n"); 1704 gmac_dump_dma_state(netdev); 1705 1706 /* don't know how to recover, just reduce losses */ 1707 gmac_enable_irq(netdev, 0); 1708 return IRQ_HANDLED; 1709 } 1710 1711 if (val & (GMAC0_IRQ0_TXQ0_INTS << (netdev->dev_id * 6))) 1712 gmac_tx_irq(netdev, 0); 1713 1714 val = gmac_get_intr_flags(netdev, 1); 1715 orr |= val; 1716 1717 if (val & (DEFAULT_Q0_INT_BIT << netdev->dev_id)) { 1718 gmac_enable_rx_irq(netdev, 0); 1719 1720 if (!port->rx_coalesce_nsecs) { 1721 napi_schedule(&port->napi); 1722 } else { 1723 ktime_t ktime; 1724 1725 ktime = ktime_set(0, port->rx_coalesce_nsecs); 1726 hrtimer_start(&port->rx_coalesce_timer, ktime, 1727 HRTIMER_MODE_REL); 1728 } 1729 } 1730 1731 val = gmac_get_intr_flags(netdev, 4); 1732 orr |= val; 1733 1734 if (val & (GMAC0_MIB_INT_BIT << (netdev->dev_id * 8))) 1735 gmac_update_hw_stats(netdev); 1736 1737 if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) { 1738 spin_lock(&geth->irq_lock); 1739 writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8), 1740 geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); 1741 u64_stats_update_begin(&port->ir_stats_syncp); 1742 ++port->stats.rx_fifo_errors; 1743 u64_stats_update_end(&port->ir_stats_syncp); 1744 spin_unlock(&geth->irq_lock); 1745 } 1746 1747 return orr ? IRQ_HANDLED : IRQ_NONE; 1748 } 1749 1750 static void gmac_start_dma(struct gemini_ethernet_port *port) 1751 { 1752 void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG; 1753 union gmac_dma_ctrl dma_ctrl; 1754 1755 dma_ctrl.bits32 = readl(dma_ctrl_reg); 1756 dma_ctrl.bits.rd_enable = 1; 1757 dma_ctrl.bits.td_enable = 1; 1758 dma_ctrl.bits.loopback = 0; 1759 dma_ctrl.bits.drop_small_ack = 0; 1760 dma_ctrl.bits.rd_insert_bytes = NET_IP_ALIGN; 1761 dma_ctrl.bits.rd_prot = HPROT_DATA_CACHE | HPROT_PRIVILIGED; 1762 dma_ctrl.bits.rd_burst_size = HBURST_INCR8; 1763 dma_ctrl.bits.rd_bus = HSIZE_8; 1764 dma_ctrl.bits.td_prot = HPROT_DATA_CACHE; 1765 dma_ctrl.bits.td_burst_size = HBURST_INCR8; 1766 dma_ctrl.bits.td_bus = HSIZE_8; 1767 1768 writel(dma_ctrl.bits32, dma_ctrl_reg); 1769 } 1770 1771 static void gmac_stop_dma(struct gemini_ethernet_port *port) 1772 { 1773 void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG; 1774 union gmac_dma_ctrl dma_ctrl; 1775 1776 dma_ctrl.bits32 = readl(dma_ctrl_reg); 1777 dma_ctrl.bits.rd_enable = 0; 1778 dma_ctrl.bits.td_enable = 0; 1779 writel(dma_ctrl.bits32, dma_ctrl_reg); 1780 } 1781 1782 static int gmac_open(struct net_device *netdev) 1783 { 1784 struct gemini_ethernet_port *port = netdev_priv(netdev); 1785 int err; 1786 1787 err = request_irq(netdev->irq, gmac_irq, 1788 IRQF_SHARED, netdev->name, netdev); 1789 if (err) { 1790 netdev_err(netdev, "no IRQ\n"); 1791 return err; 1792 } 1793 1794 netif_carrier_off(netdev); 1795 phy_start(netdev->phydev); 1796 1797 err = geth_resize_freeq(port); 1798 /* It's fine if it's just busy, the other port has set up 1799 * the freeq in that case. 1800 */ 1801 if (err && (err != -EBUSY)) { 1802 netdev_err(netdev, "could not resize freeq\n"); 1803 goto err_stop_phy; 1804 } 1805 1806 err = gmac_setup_rxq(netdev); 1807 if (err) { 1808 netdev_err(netdev, "could not setup RXQ\n"); 1809 goto err_stop_phy; 1810 } 1811 1812 err = gmac_setup_txqs(netdev); 1813 if (err) { 1814 netdev_err(netdev, "could not setup TXQs\n"); 1815 gmac_cleanup_rxq(netdev); 1816 goto err_stop_phy; 1817 } 1818 1819 napi_enable(&port->napi); 1820 1821 gmac_start_dma(port); 1822 gmac_enable_irq(netdev, 1); 1823 gmac_enable_tx_rx(netdev); 1824 netif_tx_start_all_queues(netdev); 1825 1826 hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC, 1827 HRTIMER_MODE_REL); 1828 port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired; 1829 1830 netdev_dbg(netdev, "opened\n"); 1831 1832 return 0; 1833 1834 err_stop_phy: 1835 phy_stop(netdev->phydev); 1836 free_irq(netdev->irq, netdev); 1837 return err; 1838 } 1839 1840 static int gmac_stop(struct net_device *netdev) 1841 { 1842 struct gemini_ethernet_port *port = netdev_priv(netdev); 1843 1844 hrtimer_cancel(&port->rx_coalesce_timer); 1845 netif_tx_stop_all_queues(netdev); 1846 gmac_disable_tx_rx(netdev); 1847 gmac_stop_dma(port); 1848 napi_disable(&port->napi); 1849 1850 gmac_enable_irq(netdev, 0); 1851 gmac_cleanup_rxq(netdev); 1852 gmac_cleanup_txqs(netdev); 1853 1854 phy_stop(netdev->phydev); 1855 free_irq(netdev->irq, netdev); 1856 1857 gmac_update_hw_stats(netdev); 1858 return 0; 1859 } 1860 1861 static void gmac_set_rx_mode(struct net_device *netdev) 1862 { 1863 struct gemini_ethernet_port *port = netdev_priv(netdev); 1864 union gmac_rx_fltr filter = { .bits = { 1865 .broadcast = 1, 1866 .multicast = 1, 1867 .unicast = 1, 1868 } }; 1869 struct netdev_hw_addr *ha; 1870 unsigned int bit_nr; 1871 u32 mc_filter[2]; 1872 1873 mc_filter[1] = 0; 1874 mc_filter[0] = 0; 1875 1876 if (netdev->flags & IFF_PROMISC) { 1877 filter.bits.error = 1; 1878 filter.bits.promiscuous = 1; 1879 mc_filter[1] = ~0; 1880 mc_filter[0] = ~0; 1881 } else if (netdev->flags & IFF_ALLMULTI) { 1882 mc_filter[1] = ~0; 1883 mc_filter[0] = ~0; 1884 } else { 1885 netdev_for_each_mc_addr(ha, netdev) { 1886 bit_nr = ~crc32_le(~0, ha->addr, ETH_ALEN) & 0x3f; 1887 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 0x1f); 1888 } 1889 } 1890 1891 writel(mc_filter[0], port->gmac_base + GMAC_MCAST_FIL0); 1892 writel(mc_filter[1], port->gmac_base + GMAC_MCAST_FIL1); 1893 writel(filter.bits32, port->gmac_base + GMAC_RX_FLTR); 1894 } 1895 1896 static void gmac_write_mac_address(struct net_device *netdev) 1897 { 1898 struct gemini_ethernet_port *port = netdev_priv(netdev); 1899 __le32 addr[3]; 1900 1901 memset(addr, 0, sizeof(addr)); 1902 memcpy(addr, netdev->dev_addr, ETH_ALEN); 1903 1904 writel(le32_to_cpu(addr[0]), port->gmac_base + GMAC_STA_ADD0); 1905 writel(le32_to_cpu(addr[1]), port->gmac_base + GMAC_STA_ADD1); 1906 writel(le32_to_cpu(addr[2]), port->gmac_base + GMAC_STA_ADD2); 1907 } 1908 1909 static int gmac_set_mac_address(struct net_device *netdev, void *addr) 1910 { 1911 struct sockaddr *sa = addr; 1912 1913 eth_hw_addr_set(netdev, sa->sa_data); 1914 gmac_write_mac_address(netdev); 1915 1916 return 0; 1917 } 1918 1919 static void gmac_clear_hw_stats(struct net_device *netdev) 1920 { 1921 struct gemini_ethernet_port *port = netdev_priv(netdev); 1922 1923 readl(port->gmac_base + GMAC_IN_DISCARDS); 1924 readl(port->gmac_base + GMAC_IN_ERRORS); 1925 readl(port->gmac_base + GMAC_IN_MCAST); 1926 readl(port->gmac_base + GMAC_IN_BCAST); 1927 readl(port->gmac_base + GMAC_IN_MAC1); 1928 readl(port->gmac_base + GMAC_IN_MAC2); 1929 } 1930 1931 static void gmac_get_stats64(struct net_device *netdev, 1932 struct rtnl_link_stats64 *stats) 1933 { 1934 struct gemini_ethernet_port *port = netdev_priv(netdev); 1935 unsigned int start; 1936 1937 gmac_update_hw_stats(netdev); 1938 1939 /* Racing with RX NAPI */ 1940 do { 1941 start = u64_stats_fetch_begin(&port->rx_stats_syncp); 1942 1943 stats->rx_packets = port->stats.rx_packets; 1944 stats->rx_bytes = port->stats.rx_bytes; 1945 stats->rx_errors = port->stats.rx_errors; 1946 stats->rx_dropped = port->stats.rx_dropped; 1947 1948 stats->rx_length_errors = port->stats.rx_length_errors; 1949 stats->rx_over_errors = port->stats.rx_over_errors; 1950 stats->rx_crc_errors = port->stats.rx_crc_errors; 1951 stats->rx_frame_errors = port->stats.rx_frame_errors; 1952 1953 } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); 1954 1955 /* Racing with MIB and TX completion interrupts */ 1956 do { 1957 start = u64_stats_fetch_begin(&port->ir_stats_syncp); 1958 1959 stats->tx_errors = port->stats.tx_errors; 1960 stats->tx_packets = port->stats.tx_packets; 1961 stats->tx_bytes = port->stats.tx_bytes; 1962 1963 stats->multicast = port->stats.multicast; 1964 stats->rx_missed_errors = port->stats.rx_missed_errors; 1965 stats->rx_fifo_errors = port->stats.rx_fifo_errors; 1966 1967 } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); 1968 1969 /* Racing with hard_start_xmit */ 1970 do { 1971 start = u64_stats_fetch_begin(&port->tx_stats_syncp); 1972 1973 stats->tx_dropped = port->stats.tx_dropped; 1974 1975 } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); 1976 1977 stats->rx_dropped += stats->rx_missed_errors; 1978 } 1979 1980 static int gmac_change_mtu(struct net_device *netdev, int new_mtu) 1981 { 1982 int max_len = gmac_pick_rx_max_len(new_mtu); 1983 1984 if (max_len < 0) 1985 return -EINVAL; 1986 1987 gmac_disable_tx_rx(netdev); 1988 1989 netdev->mtu = new_mtu; 1990 gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT, 1991 CONFIG0_MAXLEN_MASK); 1992 1993 netdev_update_features(netdev); 1994 1995 gmac_enable_tx_rx(netdev); 1996 1997 return 0; 1998 } 1999 2000 static int gmac_set_features(struct net_device *netdev, 2001 netdev_features_t features) 2002 { 2003 struct gemini_ethernet_port *port = netdev_priv(netdev); 2004 int enable = features & NETIF_F_RXCSUM; 2005 unsigned long flags; 2006 u32 reg; 2007 2008 spin_lock_irqsave(&port->config_lock, flags); 2009 2010 reg = readl(port->gmac_base + GMAC_CONFIG0); 2011 reg = enable ? reg | CONFIG0_RX_CHKSUM : reg & ~CONFIG0_RX_CHKSUM; 2012 writel(reg, port->gmac_base + GMAC_CONFIG0); 2013 2014 spin_unlock_irqrestore(&port->config_lock, flags); 2015 return 0; 2016 } 2017 2018 static int gmac_get_sset_count(struct net_device *netdev, int sset) 2019 { 2020 return sset == ETH_SS_STATS ? GMAC_STATS_NUM : 0; 2021 } 2022 2023 static void gmac_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 2024 { 2025 if (stringset != ETH_SS_STATS) 2026 return; 2027 2028 memcpy(data, gmac_stats_strings, sizeof(gmac_stats_strings)); 2029 } 2030 2031 static void gmac_get_ethtool_stats(struct net_device *netdev, 2032 struct ethtool_stats *estats, u64 *values) 2033 { 2034 struct gemini_ethernet_port *port = netdev_priv(netdev); 2035 unsigned int start; 2036 u64 *p; 2037 int i; 2038 2039 gmac_update_hw_stats(netdev); 2040 2041 /* Racing with MIB interrupt */ 2042 do { 2043 p = values; 2044 start = u64_stats_fetch_begin(&port->ir_stats_syncp); 2045 2046 for (i = 0; i < RX_STATS_NUM; i++) 2047 *p++ = port->hw_stats[i]; 2048 2049 } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); 2050 values = p; 2051 2052 /* Racing with RX NAPI */ 2053 do { 2054 p = values; 2055 start = u64_stats_fetch_begin(&port->rx_stats_syncp); 2056 2057 for (i = 0; i < RX_STATUS_NUM; i++) 2058 *p++ = port->rx_stats[i]; 2059 for (i = 0; i < RX_CHKSUM_NUM; i++) 2060 *p++ = port->rx_csum_stats[i]; 2061 *p++ = port->rx_napi_exits; 2062 2063 } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); 2064 values = p; 2065 2066 /* Racing with TX start_xmit */ 2067 do { 2068 p = values; 2069 start = u64_stats_fetch_begin(&port->tx_stats_syncp); 2070 2071 for (i = 0; i < TX_MAX_FRAGS; i++) { 2072 *values++ = port->tx_frag_stats[i]; 2073 port->tx_frag_stats[i] = 0; 2074 } 2075 *values++ = port->tx_frags_linearized; 2076 *values++ = port->tx_hw_csummed; 2077 2078 } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); 2079 } 2080 2081 static int gmac_get_ksettings(struct net_device *netdev, 2082 struct ethtool_link_ksettings *cmd) 2083 { 2084 if (!netdev->phydev) 2085 return -ENXIO; 2086 phy_ethtool_ksettings_get(netdev->phydev, cmd); 2087 2088 return 0; 2089 } 2090 2091 static int gmac_set_ksettings(struct net_device *netdev, 2092 const struct ethtool_link_ksettings *cmd) 2093 { 2094 if (!netdev->phydev) 2095 return -ENXIO; 2096 return phy_ethtool_ksettings_set(netdev->phydev, cmd); 2097 } 2098 2099 static int gmac_nway_reset(struct net_device *netdev) 2100 { 2101 if (!netdev->phydev) 2102 return -ENXIO; 2103 return phy_start_aneg(netdev->phydev); 2104 } 2105 2106 static void gmac_get_pauseparam(struct net_device *netdev, 2107 struct ethtool_pauseparam *pparam) 2108 { 2109 struct gemini_ethernet_port *port = netdev_priv(netdev); 2110 union gmac_config0 config0; 2111 2112 config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0); 2113 2114 pparam->rx_pause = config0.bits.rx_fc_en; 2115 pparam->tx_pause = config0.bits.tx_fc_en; 2116 pparam->autoneg = true; 2117 } 2118 2119 static void gmac_get_ringparam(struct net_device *netdev, 2120 struct ethtool_ringparam *rp, 2121 struct kernel_ethtool_ringparam *kernel_rp, 2122 struct netlink_ext_ack *extack) 2123 { 2124 struct gemini_ethernet_port *port = netdev_priv(netdev); 2125 2126 readl(port->gmac_base + GMAC_CONFIG0); 2127 2128 rp->rx_max_pending = 1 << 15; 2129 rp->rx_mini_max_pending = 0; 2130 rp->rx_jumbo_max_pending = 0; 2131 rp->tx_max_pending = 1 << 15; 2132 2133 rp->rx_pending = 1 << port->rxq_order; 2134 rp->rx_mini_pending = 0; 2135 rp->rx_jumbo_pending = 0; 2136 rp->tx_pending = 1 << port->txq_order; 2137 } 2138 2139 static int gmac_set_ringparam(struct net_device *netdev, 2140 struct ethtool_ringparam *rp, 2141 struct kernel_ethtool_ringparam *kernel_rp, 2142 struct netlink_ext_ack *extack) 2143 { 2144 struct gemini_ethernet_port *port = netdev_priv(netdev); 2145 int err = 0; 2146 2147 if (netif_running(netdev)) 2148 return -EBUSY; 2149 2150 if (rp->rx_pending) { 2151 port->rxq_order = min(15, ilog2(rp->rx_pending - 1) + 1); 2152 err = geth_resize_freeq(port); 2153 } 2154 if (rp->tx_pending) { 2155 port->txq_order = min(15, ilog2(rp->tx_pending - 1) + 1); 2156 port->irq_every_tx_packets = 1 << (port->txq_order - 2); 2157 } 2158 2159 return err; 2160 } 2161 2162 static int gmac_get_coalesce(struct net_device *netdev, 2163 struct ethtool_coalesce *ecmd, 2164 struct kernel_ethtool_coalesce *kernel_coal, 2165 struct netlink_ext_ack *extack) 2166 { 2167 struct gemini_ethernet_port *port = netdev_priv(netdev); 2168 2169 ecmd->rx_max_coalesced_frames = 1; 2170 ecmd->tx_max_coalesced_frames = port->irq_every_tx_packets; 2171 ecmd->rx_coalesce_usecs = port->rx_coalesce_nsecs / 1000; 2172 2173 return 0; 2174 } 2175 2176 static int gmac_set_coalesce(struct net_device *netdev, 2177 struct ethtool_coalesce *ecmd, 2178 struct kernel_ethtool_coalesce *kernel_coal, 2179 struct netlink_ext_ack *extack) 2180 { 2181 struct gemini_ethernet_port *port = netdev_priv(netdev); 2182 2183 if (ecmd->tx_max_coalesced_frames < 1) 2184 return -EINVAL; 2185 if (ecmd->tx_max_coalesced_frames >= 1 << port->txq_order) 2186 return -EINVAL; 2187 2188 port->irq_every_tx_packets = ecmd->tx_max_coalesced_frames; 2189 port->rx_coalesce_nsecs = ecmd->rx_coalesce_usecs * 1000; 2190 2191 return 0; 2192 } 2193 2194 static u32 gmac_get_msglevel(struct net_device *netdev) 2195 { 2196 struct gemini_ethernet_port *port = netdev_priv(netdev); 2197 2198 return port->msg_enable; 2199 } 2200 2201 static void gmac_set_msglevel(struct net_device *netdev, u32 level) 2202 { 2203 struct gemini_ethernet_port *port = netdev_priv(netdev); 2204 2205 port->msg_enable = level; 2206 } 2207 2208 static void gmac_get_drvinfo(struct net_device *netdev, 2209 struct ethtool_drvinfo *info) 2210 { 2211 strcpy(info->driver, DRV_NAME); 2212 strcpy(info->bus_info, netdev->dev_id ? "1" : "0"); 2213 } 2214 2215 static const struct net_device_ops gmac_351x_ops = { 2216 .ndo_init = gmac_init, 2217 .ndo_open = gmac_open, 2218 .ndo_stop = gmac_stop, 2219 .ndo_start_xmit = gmac_start_xmit, 2220 .ndo_tx_timeout = gmac_tx_timeout, 2221 .ndo_set_rx_mode = gmac_set_rx_mode, 2222 .ndo_set_mac_address = gmac_set_mac_address, 2223 .ndo_get_stats64 = gmac_get_stats64, 2224 .ndo_change_mtu = gmac_change_mtu, 2225 .ndo_set_features = gmac_set_features, 2226 }; 2227 2228 static const struct ethtool_ops gmac_351x_ethtool_ops = { 2229 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 2230 ETHTOOL_COALESCE_MAX_FRAMES, 2231 .get_sset_count = gmac_get_sset_count, 2232 .get_strings = gmac_get_strings, 2233 .get_ethtool_stats = gmac_get_ethtool_stats, 2234 .get_link = ethtool_op_get_link, 2235 .get_link_ksettings = gmac_get_ksettings, 2236 .set_link_ksettings = gmac_set_ksettings, 2237 .nway_reset = gmac_nway_reset, 2238 .get_pauseparam = gmac_get_pauseparam, 2239 .get_ringparam = gmac_get_ringparam, 2240 .set_ringparam = gmac_set_ringparam, 2241 .get_coalesce = gmac_get_coalesce, 2242 .set_coalesce = gmac_set_coalesce, 2243 .get_msglevel = gmac_get_msglevel, 2244 .set_msglevel = gmac_set_msglevel, 2245 .get_drvinfo = gmac_get_drvinfo, 2246 }; 2247 2248 static irqreturn_t gemini_port_irq_thread(int irq, void *data) 2249 { 2250 unsigned long irqmask = SWFQ_EMPTY_INT_BIT; 2251 struct gemini_ethernet_port *port = data; 2252 struct gemini_ethernet *geth; 2253 unsigned long flags; 2254 2255 geth = port->geth; 2256 /* The queue is half empty so refill it */ 2257 geth_fill_freeq(geth, true); 2258 2259 spin_lock_irqsave(&geth->irq_lock, flags); 2260 /* ACK queue interrupt */ 2261 writel(irqmask, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); 2262 /* Enable queue interrupt again */ 2263 irqmask |= readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); 2264 writel(irqmask, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); 2265 spin_unlock_irqrestore(&geth->irq_lock, flags); 2266 2267 return IRQ_HANDLED; 2268 } 2269 2270 static irqreturn_t gemini_port_irq(int irq, void *data) 2271 { 2272 struct gemini_ethernet_port *port = data; 2273 struct gemini_ethernet *geth; 2274 irqreturn_t ret = IRQ_NONE; 2275 u32 val, en; 2276 2277 geth = port->geth; 2278 spin_lock(&geth->irq_lock); 2279 2280 val = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); 2281 en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); 2282 2283 if (val & en & SWFQ_EMPTY_INT_BIT) { 2284 /* Disable the queue empty interrupt while we work on 2285 * processing the queue. Also disable overrun interrupts 2286 * as there is not much we can do about it here. 2287 */ 2288 en &= ~(SWFQ_EMPTY_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT 2289 | GMAC1_RX_OVERRUN_INT_BIT); 2290 writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); 2291 ret = IRQ_WAKE_THREAD; 2292 } 2293 2294 spin_unlock(&geth->irq_lock); 2295 2296 return ret; 2297 } 2298 2299 static void gemini_port_remove(struct gemini_ethernet_port *port) 2300 { 2301 if (port->netdev) { 2302 phy_disconnect(port->netdev->phydev); 2303 unregister_netdev(port->netdev); 2304 } 2305 clk_disable_unprepare(port->pclk); 2306 geth_cleanup_freeq(port->geth); 2307 } 2308 2309 static void gemini_ethernet_init(struct gemini_ethernet *geth) 2310 { 2311 /* Only do this once both ports are online */ 2312 if (geth->initialized) 2313 return; 2314 if (geth->port0 && geth->port1) 2315 geth->initialized = true; 2316 else 2317 return; 2318 2319 writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); 2320 writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); 2321 writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG); 2322 writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG); 2323 writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); 2324 2325 /* Interrupt config: 2326 * 2327 * GMAC0 intr bits ------> int0 ----> eth0 2328 * GMAC1 intr bits ------> int1 ----> eth1 2329 * TOE intr -------------> int1 ----> eth1 2330 * Classification Intr --> int0 ----> eth0 2331 * Default Q0 -----------> int0 ----> eth0 2332 * Default Q1 -----------> int1 ----> eth1 2333 * FreeQ intr -----------> int1 ----> eth1 2334 */ 2335 writel(0xCCFC0FC0, geth->base + GLOBAL_INTERRUPT_SELECT_0_REG); 2336 writel(0x00F00002, geth->base + GLOBAL_INTERRUPT_SELECT_1_REG); 2337 writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_2_REG); 2338 writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_3_REG); 2339 writel(0xFF000003, geth->base + GLOBAL_INTERRUPT_SELECT_4_REG); 2340 2341 /* edge-triggered interrupts packed to level-triggered one... */ 2342 writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG); 2343 writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_1_REG); 2344 writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_2_REG); 2345 writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_3_REG); 2346 writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); 2347 2348 /* Set up queue */ 2349 writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG); 2350 writel(0, geth->base + GLOBAL_HW_FREEQ_BASE_SIZE_REG); 2351 writel(0, geth->base + GLOBAL_SWFQ_RWPTR_REG); 2352 writel(0, geth->base + GLOBAL_HWFQ_RWPTR_REG); 2353 2354 geth->freeq_frag_order = DEFAULT_RX_BUF_ORDER; 2355 /* This makes the queue resize on probe() so that we 2356 * set up and enable the queue IRQ. FIXME: fragile. 2357 */ 2358 geth->freeq_order = 1; 2359 } 2360 2361 static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port) 2362 { 2363 port->mac_addr[0] = 2364 cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD0)); 2365 port->mac_addr[1] = 2366 cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD1)); 2367 port->mac_addr[2] = 2368 cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD2)); 2369 } 2370 2371 static int gemini_ethernet_port_probe(struct platform_device *pdev) 2372 { 2373 char *port_names[2] = { "ethernet0", "ethernet1" }; 2374 struct device_node *np = pdev->dev.of_node; 2375 struct gemini_ethernet_port *port; 2376 struct device *dev = &pdev->dev; 2377 struct gemini_ethernet *geth; 2378 struct net_device *netdev; 2379 struct device *parent; 2380 u8 mac[ETH_ALEN]; 2381 unsigned int id; 2382 int irq; 2383 int ret; 2384 2385 parent = dev->parent; 2386 geth = dev_get_drvdata(parent); 2387 2388 if (!strcmp(dev_name(dev), "60008000.ethernet-port")) 2389 id = 0; 2390 else if (!strcmp(dev_name(dev), "6000c000.ethernet-port")) 2391 id = 1; 2392 else 2393 return -ENODEV; 2394 2395 dev_info(dev, "probe %s ID %d\n", dev_name(dev), id); 2396 2397 netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM); 2398 if (!netdev) { 2399 dev_err(dev, "Can't allocate ethernet device #%d\n", id); 2400 return -ENOMEM; 2401 } 2402 2403 port = netdev_priv(netdev); 2404 SET_NETDEV_DEV(netdev, dev); 2405 port->netdev = netdev; 2406 port->id = id; 2407 port->geth = geth; 2408 port->dev = dev; 2409 port->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 2410 2411 /* DMA memory */ 2412 port->dma_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); 2413 if (IS_ERR(port->dma_base)) { 2414 dev_err(dev, "get DMA address failed\n"); 2415 return PTR_ERR(port->dma_base); 2416 } 2417 2418 /* GMAC config memory */ 2419 port->gmac_base = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 2420 if (IS_ERR(port->gmac_base)) { 2421 dev_err(dev, "get GMAC address failed\n"); 2422 return PTR_ERR(port->gmac_base); 2423 } 2424 2425 /* Interrupt */ 2426 irq = platform_get_irq(pdev, 0); 2427 if (irq < 0) 2428 return irq; 2429 port->irq = irq; 2430 2431 /* Clock the port */ 2432 port->pclk = devm_clk_get(dev, "PCLK"); 2433 if (IS_ERR(port->pclk)) { 2434 dev_err(dev, "no PCLK\n"); 2435 return PTR_ERR(port->pclk); 2436 } 2437 ret = clk_prepare_enable(port->pclk); 2438 if (ret) 2439 return ret; 2440 2441 /* Maybe there is a nice ethernet address we should use */ 2442 gemini_port_save_mac_addr(port); 2443 2444 /* Reset the port */ 2445 port->reset = devm_reset_control_get_exclusive(dev, NULL); 2446 if (IS_ERR(port->reset)) { 2447 dev_err(dev, "no reset\n"); 2448 ret = PTR_ERR(port->reset); 2449 goto unprepare; 2450 } 2451 reset_control_reset(port->reset); 2452 usleep_range(100, 500); 2453 2454 /* Assign pointer in the main state container */ 2455 if (!id) 2456 geth->port0 = port; 2457 else 2458 geth->port1 = port; 2459 2460 /* This will just be done once both ports are up and reset */ 2461 gemini_ethernet_init(geth); 2462 2463 platform_set_drvdata(pdev, port); 2464 2465 /* Set up and register the netdev */ 2466 netdev->dev_id = port->id; 2467 netdev->irq = irq; 2468 netdev->netdev_ops = &gmac_351x_ops; 2469 netdev->ethtool_ops = &gmac_351x_ethtool_ops; 2470 2471 spin_lock_init(&port->config_lock); 2472 gmac_clear_hw_stats(netdev); 2473 2474 netdev->hw_features = GMAC_OFFLOAD_FEATURES; 2475 netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO; 2476 /* We can receive jumbo frames up to 10236 bytes but only 2477 * transmit 2047 bytes so, let's accept payloads of 2047 2478 * bytes minus VLAN and ethernet header 2479 */ 2480 netdev->min_mtu = ETH_MIN_MTU; 2481 netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN; 2482 2483 port->freeq_refill = 0; 2484 netif_napi_add(netdev, &port->napi, gmac_napi_poll); 2485 2486 ret = of_get_mac_address(np, mac); 2487 if (!ret) { 2488 dev_info(dev, "Setting macaddr from DT %pM\n", mac); 2489 memcpy(port->mac_addr, mac, ETH_ALEN); 2490 } 2491 2492 if (is_valid_ether_addr((void *)port->mac_addr)) { 2493 eth_hw_addr_set(netdev, (u8 *)port->mac_addr); 2494 } else { 2495 dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n", 2496 port->mac_addr[0], port->mac_addr[1], 2497 port->mac_addr[2]); 2498 dev_info(dev, "using a random ethernet address\n"); 2499 eth_hw_addr_random(netdev); 2500 } 2501 gmac_write_mac_address(netdev); 2502 2503 ret = devm_request_threaded_irq(port->dev, 2504 port->irq, 2505 gemini_port_irq, 2506 gemini_port_irq_thread, 2507 IRQF_SHARED, 2508 port_names[port->id], 2509 port); 2510 if (ret) 2511 goto unprepare; 2512 2513 ret = gmac_setup_phy(netdev); 2514 if (ret) { 2515 netdev_err(netdev, 2516 "PHY init failed\n"); 2517 goto unprepare; 2518 } 2519 2520 ret = register_netdev(netdev); 2521 if (ret) 2522 goto unprepare; 2523 2524 return 0; 2525 2526 unprepare: 2527 clk_disable_unprepare(port->pclk); 2528 return ret; 2529 } 2530 2531 static int gemini_ethernet_port_remove(struct platform_device *pdev) 2532 { 2533 struct gemini_ethernet_port *port = platform_get_drvdata(pdev); 2534 2535 gemini_port_remove(port); 2536 2537 return 0; 2538 } 2539 2540 static const struct of_device_id gemini_ethernet_port_of_match[] = { 2541 { 2542 .compatible = "cortina,gemini-ethernet-port", 2543 }, 2544 {}, 2545 }; 2546 MODULE_DEVICE_TABLE(of, gemini_ethernet_port_of_match); 2547 2548 static struct platform_driver gemini_ethernet_port_driver = { 2549 .driver = { 2550 .name = "gemini-ethernet-port", 2551 .of_match_table = gemini_ethernet_port_of_match, 2552 }, 2553 .probe = gemini_ethernet_port_probe, 2554 .remove = gemini_ethernet_port_remove, 2555 }; 2556 2557 static int gemini_ethernet_probe(struct platform_device *pdev) 2558 { 2559 struct device *dev = &pdev->dev; 2560 struct gemini_ethernet *geth; 2561 unsigned int retry = 5; 2562 u32 val; 2563 2564 /* Global registers */ 2565 geth = devm_kzalloc(dev, sizeof(*geth), GFP_KERNEL); 2566 if (!geth) 2567 return -ENOMEM; 2568 geth->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); 2569 if (IS_ERR(geth->base)) 2570 return PTR_ERR(geth->base); 2571 geth->dev = dev; 2572 2573 /* Wait for ports to stabilize */ 2574 do { 2575 udelay(2); 2576 val = readl(geth->base + GLOBAL_TOE_VERSION_REG); 2577 barrier(); 2578 } while (!val && --retry); 2579 if (!retry) { 2580 dev_err(dev, "failed to reset ethernet\n"); 2581 return -EIO; 2582 } 2583 dev_info(dev, "Ethernet device ID: 0x%03x, revision 0x%01x\n", 2584 (val >> 4) & 0xFFFU, val & 0xFU); 2585 2586 spin_lock_init(&geth->irq_lock); 2587 spin_lock_init(&geth->freeq_lock); 2588 2589 /* The children will use this */ 2590 platform_set_drvdata(pdev, geth); 2591 2592 /* Spawn child devices for the two ports */ 2593 return devm_of_platform_populate(dev); 2594 } 2595 2596 static int gemini_ethernet_remove(struct platform_device *pdev) 2597 { 2598 struct gemini_ethernet *geth = platform_get_drvdata(pdev); 2599 2600 geth_cleanup_freeq(geth); 2601 geth->initialized = false; 2602 2603 return 0; 2604 } 2605 2606 static const struct of_device_id gemini_ethernet_of_match[] = { 2607 { 2608 .compatible = "cortina,gemini-ethernet", 2609 }, 2610 {}, 2611 }; 2612 MODULE_DEVICE_TABLE(of, gemini_ethernet_of_match); 2613 2614 static struct platform_driver gemini_ethernet_driver = { 2615 .driver = { 2616 .name = DRV_NAME, 2617 .of_match_table = gemini_ethernet_of_match, 2618 }, 2619 .probe = gemini_ethernet_probe, 2620 .remove = gemini_ethernet_remove, 2621 }; 2622 2623 static int __init gemini_ethernet_module_init(void) 2624 { 2625 int ret; 2626 2627 ret = platform_driver_register(&gemini_ethernet_port_driver); 2628 if (ret) 2629 return ret; 2630 2631 ret = platform_driver_register(&gemini_ethernet_driver); 2632 if (ret) { 2633 platform_driver_unregister(&gemini_ethernet_port_driver); 2634 return ret; 2635 } 2636 2637 return 0; 2638 } 2639 module_init(gemini_ethernet_module_init); 2640 2641 static void __exit gemini_ethernet_module_exit(void) 2642 { 2643 platform_driver_unregister(&gemini_ethernet_driver); 2644 platform_driver_unregister(&gemini_ethernet_port_driver); 2645 } 2646 module_exit(gemini_ethernet_module_exit); 2647 2648 MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); 2649 MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver"); 2650 MODULE_LICENSE("GPL"); 2651 MODULE_ALIAS("platform:" DRV_NAME); 2652