1 /******************************************************************************* 2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 3 ST Ethernet IPs are built around a Synopsys IP Core. 4 5 Copyright(C) 2007-2011 STMicroelectronics Ltd 6 7 This program is free software; you can redistribute it and/or modify it 8 under the terms and conditions of the GNU General Public License, 9 version 2, as published by the Free Software Foundation. 10 11 This program is distributed in the hope it will be useful, but WITHOUT 12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 more details. 15 16 You should have received a copy of the GNU General Public License along with 17 this program; if not, write to the Free Software Foundation, Inc., 18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 20 The full GNU General Public License is included in this distribution in 21 the file called "COPYING". 22 23 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 24 25 Documentation available at: 26 http://www.stlinux.com 27 Support available at: 28 https://bugzilla.stlinux.com/ 29 *******************************************************************************/ 30 31 #include <linux/clk.h> 32 #include <linux/kernel.h> 33 #include <linux/interrupt.h> 34 #include <linux/ip.h> 35 #include <linux/tcp.h> 36 #include <linux/skbuff.h> 37 #include <linux/ethtool.h> 38 #include <linux/if_ether.h> 39 #include <linux/crc32.h> 40 #include <linux/mii.h> 41 #include <linux/if.h> 42 #include <linux/if_vlan.h> 43 #include <linux/dma-mapping.h> 44 #include <linux/slab.h> 45 #include <linux/prefetch.h> 46 #ifdef CONFIG_STMMAC_DEBUG_FS 47 #include <linux/debugfs.h> 48 #include <linux/seq_file.h> 49 #endif 50 #include "stmmac.h" 51 52 #undef STMMAC_DEBUG 53 /*#define STMMAC_DEBUG*/ 54 #ifdef STMMAC_DEBUG 55 #define DBG(nlevel, klevel, fmt, args...) \ 56 ((void)(netif_msg_##nlevel(priv) && \ 57 printk(KERN_##klevel fmt, ## args))) 58 #else 59 #define DBG(nlevel, klevel, fmt, args...) do { } while (0) 60 #endif 61 62 #undef STMMAC_RX_DEBUG 63 /*#define STMMAC_RX_DEBUG*/ 64 #ifdef STMMAC_RX_DEBUG 65 #define RX_DBG(fmt, args...) printk(fmt, ## args) 66 #else 67 #define RX_DBG(fmt, args...) do { } while (0) 68 #endif 69 70 #undef STMMAC_XMIT_DEBUG 71 /*#define STMMAC_XMIT_DEBUG*/ 72 #ifdef STMMAC_TX_DEBUG 73 #define TX_DBG(fmt, args...) printk(fmt, ## args) 74 #else 75 #define TX_DBG(fmt, args...) do { } while (0) 76 #endif 77 78 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) 79 #define JUMBO_LEN 9000 80 81 /* Module parameters */ 82 #define TX_TIMEO 5000 /* default 5 seconds */ 83 static int watchdog = TX_TIMEO; 84 module_param(watchdog, int, S_IRUGO | S_IWUSR); 85 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds"); 86 87 static int debug = -1; /* -1: default, 0: no output, 16: all */ 88 module_param(debug, int, S_IRUGO | S_IWUSR); 89 MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)"); 90 91 int phyaddr = -1; 92 module_param(phyaddr, int, S_IRUGO); 93 MODULE_PARM_DESC(phyaddr, "Physical device address"); 94 95 #define DMA_TX_SIZE 256 96 static int dma_txsize = DMA_TX_SIZE; 97 module_param(dma_txsize, int, S_IRUGO | S_IWUSR); 98 MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list"); 99 100 #define DMA_RX_SIZE 256 101 static int dma_rxsize = DMA_RX_SIZE; 102 module_param(dma_rxsize, int, S_IRUGO | S_IWUSR); 103 MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list"); 104 105 static int flow_ctrl = FLOW_OFF; 106 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); 107 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 108 109 static int pause = PAUSE_TIME; 110 module_param(pause, int, S_IRUGO | S_IWUSR); 111 MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 112 113 #define TC_DEFAULT 64 114 static int tc = TC_DEFAULT; 115 module_param(tc, int, S_IRUGO | S_IWUSR); 116 MODULE_PARM_DESC(tc, "DMA threshold control value"); 117 118 /* Pay attention to tune this parameter; take care of both 119 * hardware capability and network stabitily/performance impact. 120 * Many tests showed that ~4ms latency seems to be good enough. */ 121 #ifdef CONFIG_STMMAC_TIMER 122 #define DEFAULT_PERIODIC_RATE 256 123 static int tmrate = DEFAULT_PERIODIC_RATE; 124 module_param(tmrate, int, S_IRUGO | S_IWUSR); 125 MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)"); 126 #endif 127 128 #define DMA_BUFFER_SIZE BUF_SIZE_2KiB 129 static int buf_sz = DMA_BUFFER_SIZE; 130 module_param(buf_sz, int, S_IRUGO | S_IWUSR); 131 MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 132 133 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 134 NETIF_MSG_LINK | NETIF_MSG_IFUP | 135 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 136 137 #define STMMAC_DEFAULT_LPI_TIMER 1000 138 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 139 module_param(eee_timer, int, S_IRUGO | S_IWUSR); 140 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 141 #define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) 142 143 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 144 145 #ifdef CONFIG_STMMAC_DEBUG_FS 146 static int stmmac_init_fs(struct net_device *dev); 147 static void stmmac_exit_fs(void); 148 #endif 149 150 /** 151 * stmmac_verify_args - verify the driver parameters. 152 * Description: it verifies if some wrong parameter is passed to the driver. 153 * Note that wrong parameters are replaced with the default values. 154 */ 155 static void stmmac_verify_args(void) 156 { 157 if (unlikely(watchdog < 0)) 158 watchdog = TX_TIMEO; 159 if (unlikely(dma_rxsize < 0)) 160 dma_rxsize = DMA_RX_SIZE; 161 if (unlikely(dma_txsize < 0)) 162 dma_txsize = DMA_TX_SIZE; 163 if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB))) 164 buf_sz = DMA_BUFFER_SIZE; 165 if (unlikely(flow_ctrl > 1)) 166 flow_ctrl = FLOW_AUTO; 167 else if (likely(flow_ctrl < 0)) 168 flow_ctrl = FLOW_OFF; 169 if (unlikely((pause < 0) || (pause > 0xffff))) 170 pause = PAUSE_TIME; 171 if (eee_timer < 0) 172 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 173 } 174 175 static void stmmac_clk_csr_set(struct stmmac_priv *priv) 176 { 177 u32 clk_rate; 178 179 clk_rate = clk_get_rate(priv->stmmac_clk); 180 181 /* Platform provided default clk_csr would be assumed valid 182 * for all other cases except for the below mentioned ones. */ 183 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 184 if (clk_rate < CSR_F_35M) 185 priv->clk_csr = STMMAC_CSR_20_35M; 186 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 187 priv->clk_csr = STMMAC_CSR_35_60M; 188 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 189 priv->clk_csr = STMMAC_CSR_60_100M; 190 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 191 priv->clk_csr = STMMAC_CSR_100_150M; 192 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 193 priv->clk_csr = STMMAC_CSR_150_250M; 194 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 195 priv->clk_csr = STMMAC_CSR_250_300M; 196 } /* For values higher than the IEEE 802.3 specified frequency 197 * we can not estimate the proper divider as it is not known 198 * the frequency of clk_csr_i. So we do not change the default 199 * divider. */ 200 } 201 202 #if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) 203 static void print_pkt(unsigned char *buf, int len) 204 { 205 int j; 206 pr_info("len = %d byte, buf addr: 0x%p", len, buf); 207 for (j = 0; j < len; j++) { 208 if ((j % 16) == 0) 209 pr_info("\n %03x:", j); 210 pr_info(" %02x", buf[j]); 211 } 212 pr_info("\n"); 213 } 214 #endif 215 216 /* minimum number of free TX descriptors required to wake up TX process */ 217 #define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) 218 219 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) 220 { 221 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; 222 } 223 224 /* On some ST platforms, some HW system configuraton registers have to be 225 * set according to the link speed negotiated. 226 */ 227 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) 228 { 229 struct phy_device *phydev = priv->phydev; 230 231 if (likely(priv->plat->fix_mac_speed)) 232 priv->plat->fix_mac_speed(priv->plat->bsp_priv, 233 phydev->speed); 234 } 235 236 static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 237 { 238 /* Check and enter in LPI mode */ 239 if ((priv->dirty_tx == priv->cur_tx) && 240 (priv->tx_path_in_lpi_mode == false)) 241 priv->hw->mac->set_eee_mode(priv->ioaddr); 242 } 243 244 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 245 { 246 /* Exit and disable EEE in case of we are are in LPI state. */ 247 priv->hw->mac->reset_eee_mode(priv->ioaddr); 248 del_timer_sync(&priv->eee_ctrl_timer); 249 priv->tx_path_in_lpi_mode = false; 250 } 251 252 /** 253 * stmmac_eee_ctrl_timer 254 * @arg : data hook 255 * Description: 256 * If there is no data transfer and if we are not in LPI state, 257 * then MAC Transmitter can be moved to LPI state. 258 */ 259 static void stmmac_eee_ctrl_timer(unsigned long arg) 260 { 261 struct stmmac_priv *priv = (struct stmmac_priv *)arg; 262 263 stmmac_enable_eee_mode(priv); 264 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); 265 } 266 267 /** 268 * stmmac_eee_init 269 * @priv: private device pointer 270 * Description: 271 * If the EEE support has been enabled while configuring the driver, 272 * if the GMAC actually supports the EEE (from the HW cap reg) and the 273 * phy can also manage EEE, so enable the LPI state and start the timer 274 * to verify if the tx path can enter in LPI state. 275 */ 276 bool stmmac_eee_init(struct stmmac_priv *priv) 277 { 278 bool ret = false; 279 280 /* MAC core supports the EEE feature. */ 281 if (priv->dma_cap.eee) { 282 /* Check if the PHY supports EEE */ 283 if (phy_init_eee(priv->phydev, 1)) 284 goto out; 285 286 priv->eee_active = 1; 287 init_timer(&priv->eee_ctrl_timer); 288 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; 289 priv->eee_ctrl_timer.data = (unsigned long)priv; 290 priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer); 291 add_timer(&priv->eee_ctrl_timer); 292 293 priv->hw->mac->set_eee_timer(priv->ioaddr, 294 STMMAC_DEFAULT_LIT_LS_TIMER, 295 priv->tx_lpi_timer); 296 297 pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); 298 299 ret = true; 300 } 301 out: 302 return ret; 303 } 304 305 static void stmmac_eee_adjust(struct stmmac_priv *priv) 306 { 307 /* When the EEE has been already initialised we have to 308 * modify the PLS bit in the LPI ctrl & status reg according 309 * to the PHY link status. For this reason. 310 */ 311 if (priv->eee_enabled) 312 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); 313 } 314 315 /** 316 * stmmac_adjust_link 317 * @dev: net device structure 318 * Description: it adjusts the link parameters. 319 */ 320 static void stmmac_adjust_link(struct net_device *dev) 321 { 322 struct stmmac_priv *priv = netdev_priv(dev); 323 struct phy_device *phydev = priv->phydev; 324 unsigned long flags; 325 int new_state = 0; 326 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; 327 328 if (phydev == NULL) 329 return; 330 331 DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n", 332 phydev->addr, phydev->link); 333 334 spin_lock_irqsave(&priv->lock, flags); 335 336 if (phydev->link) { 337 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 338 339 /* Now we make sure that we can be in full duplex mode. 340 * If not, we operate in half-duplex mode. */ 341 if (phydev->duplex != priv->oldduplex) { 342 new_state = 1; 343 if (!(phydev->duplex)) 344 ctrl &= ~priv->hw->link.duplex; 345 else 346 ctrl |= priv->hw->link.duplex; 347 priv->oldduplex = phydev->duplex; 348 } 349 /* Flow Control operation */ 350 if (phydev->pause) 351 priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex, 352 fc, pause_time); 353 354 if (phydev->speed != priv->speed) { 355 new_state = 1; 356 switch (phydev->speed) { 357 case 1000: 358 if (likely(priv->plat->has_gmac)) 359 ctrl &= ~priv->hw->link.port; 360 stmmac_hw_fix_mac_speed(priv); 361 break; 362 case 100: 363 case 10: 364 if (priv->plat->has_gmac) { 365 ctrl |= priv->hw->link.port; 366 if (phydev->speed == SPEED_100) { 367 ctrl |= priv->hw->link.speed; 368 } else { 369 ctrl &= ~(priv->hw->link.speed); 370 } 371 } else { 372 ctrl &= ~priv->hw->link.port; 373 } 374 stmmac_hw_fix_mac_speed(priv); 375 break; 376 default: 377 if (netif_msg_link(priv)) 378 pr_warning("%s: Speed (%d) is not 10" 379 " or 100!\n", dev->name, phydev->speed); 380 break; 381 } 382 383 priv->speed = phydev->speed; 384 } 385 386 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 387 388 if (!priv->oldlink) { 389 new_state = 1; 390 priv->oldlink = 1; 391 } 392 } else if (priv->oldlink) { 393 new_state = 1; 394 priv->oldlink = 0; 395 priv->speed = 0; 396 priv->oldduplex = -1; 397 } 398 399 if (new_state && netif_msg_link(priv)) 400 phy_print_status(phydev); 401 402 stmmac_eee_adjust(priv); 403 404 spin_unlock_irqrestore(&priv->lock, flags); 405 406 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n"); 407 } 408 409 /** 410 * stmmac_init_phy - PHY initialization 411 * @dev: net device structure 412 * Description: it initializes the driver's PHY state, and attaches the PHY 413 * to the mac driver. 414 * Return value: 415 * 0 on success 416 */ 417 static int stmmac_init_phy(struct net_device *dev) 418 { 419 struct stmmac_priv *priv = netdev_priv(dev); 420 struct phy_device *phydev; 421 char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 422 char bus_id[MII_BUS_ID_SIZE]; 423 int interface = priv->plat->interface; 424 priv->oldlink = 0; 425 priv->speed = 0; 426 priv->oldduplex = -1; 427 428 if (priv->plat->phy_bus_name) 429 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", 430 priv->plat->phy_bus_name, priv->plat->bus_id); 431 else 432 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", 433 priv->plat->bus_id); 434 435 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 436 priv->plat->phy_addr); 437 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt); 438 439 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 0, 440 interface); 441 442 if (IS_ERR(phydev)) { 443 pr_err("%s: Could not attach to PHY\n", dev->name); 444 return PTR_ERR(phydev); 445 } 446 447 /* Stop Advertising 1000BASE Capability if interface is not GMII */ 448 if ((interface == PHY_INTERFACE_MODE_MII) || 449 (interface == PHY_INTERFACE_MODE_RMII)) 450 phydev->advertising &= ~(SUPPORTED_1000baseT_Half | 451 SUPPORTED_1000baseT_Full); 452 453 /* 454 * Broken HW is sometimes missing the pull-up resistor on the 455 * MDIO line, which results in reads to non-existent devices returning 456 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent 457 * device as well. 458 * Note: phydev->phy_id is the result of reading the UID PHY registers. 459 */ 460 if (phydev->phy_id == 0) { 461 phy_disconnect(phydev); 462 return -ENODEV; 463 } 464 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" 465 " Link = %d\n", dev->name, phydev->phy_id, phydev->link); 466 467 priv->phydev = phydev; 468 469 return 0; 470 } 471 472 /** 473 * display_ring 474 * @p: pointer to the ring. 475 * @size: size of the ring. 476 * Description: display all the descriptors within the ring. 477 */ 478 static void display_ring(struct dma_desc *p, int size) 479 { 480 struct tmp_s { 481 u64 a; 482 unsigned int b; 483 unsigned int c; 484 }; 485 int i; 486 for (i = 0; i < size; i++) { 487 struct tmp_s *x = (struct tmp_s *)(p + i); 488 pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", 489 i, (unsigned int)virt_to_phys(&p[i]), 490 (unsigned int)(x->a), (unsigned int)((x->a) >> 32), 491 x->b, x->c); 492 pr_info("\n"); 493 } 494 } 495 496 static int stmmac_set_bfsize(int mtu, int bufsize) 497 { 498 int ret = bufsize; 499 500 if (mtu >= BUF_SIZE_4KiB) 501 ret = BUF_SIZE_8KiB; 502 else if (mtu >= BUF_SIZE_2KiB) 503 ret = BUF_SIZE_4KiB; 504 else if (mtu >= DMA_BUFFER_SIZE) 505 ret = BUF_SIZE_2KiB; 506 else 507 ret = DMA_BUFFER_SIZE; 508 509 return ret; 510 } 511 512 /** 513 * init_dma_desc_rings - init the RX/TX descriptor rings 514 * @dev: net device structure 515 * Description: this function initializes the DMA RX/TX descriptors 516 * and allocates the socket buffers. It suppors the chained and ring 517 * modes. 518 */ 519 static void init_dma_desc_rings(struct net_device *dev) 520 { 521 int i; 522 struct stmmac_priv *priv = netdev_priv(dev); 523 struct sk_buff *skb; 524 unsigned int txsize = priv->dma_tx_size; 525 unsigned int rxsize = priv->dma_rx_size; 526 unsigned int bfsize; 527 int dis_ic = 0; 528 int des3_as_data_buf = 0; 529 530 /* Set the max buffer size according to the DESC mode 531 * and the MTU. Note that RING mode allows 16KiB bsize. */ 532 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); 533 534 if (bfsize == BUF_SIZE_16KiB) 535 des3_as_data_buf = 1; 536 else 537 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 538 539 #ifdef CONFIG_STMMAC_TIMER 540 /* Disable interrupts on completion for the reception if timer is on */ 541 if (likely(priv->tm->enable)) 542 dis_ic = 1; 543 #endif 544 545 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", 546 txsize, rxsize, bfsize); 547 548 priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL); 549 priv->rx_skbuff = 550 kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL); 551 priv->dma_rx = 552 (struct dma_desc *)dma_alloc_coherent(priv->device, 553 rxsize * 554 sizeof(struct dma_desc), 555 &priv->dma_rx_phy, 556 GFP_KERNEL); 557 priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize, 558 GFP_KERNEL); 559 priv->dma_tx = 560 (struct dma_desc *)dma_alloc_coherent(priv->device, 561 txsize * 562 sizeof(struct dma_desc), 563 &priv->dma_tx_phy, 564 GFP_KERNEL); 565 566 if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) { 567 pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__); 568 return; 569 } 570 571 DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, " 572 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", 573 dev->name, priv->dma_rx, priv->dma_tx, 574 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); 575 576 /* RX INITIALIZATION */ 577 DBG(probe, INFO, "stmmac: SKB addresses:\n" 578 "skb\t\tskb data\tdma data\n"); 579 580 for (i = 0; i < rxsize; i++) { 581 struct dma_desc *p = priv->dma_rx + i; 582 583 skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN, 584 GFP_KERNEL); 585 if (unlikely(skb == NULL)) { 586 pr_err("%s: Rx init fails; skb is NULL\n", __func__); 587 break; 588 } 589 skb_reserve(skb, NET_IP_ALIGN); 590 priv->rx_skbuff[i] = skb; 591 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, 592 bfsize, DMA_FROM_DEVICE); 593 594 p->des2 = priv->rx_skbuff_dma[i]; 595 596 priv->hw->ring->init_desc3(des3_as_data_buf, p); 597 598 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], 599 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); 600 } 601 priv->cur_rx = 0; 602 priv->dirty_rx = (unsigned int)(i - rxsize); 603 priv->dma_buf_sz = bfsize; 604 buf_sz = bfsize; 605 606 /* TX INITIALIZATION */ 607 for (i = 0; i < txsize; i++) { 608 priv->tx_skbuff[i] = NULL; 609 priv->dma_tx[i].des2 = 0; 610 } 611 612 /* In case of Chained mode this sets the des3 to the next 613 * element in the chain */ 614 priv->hw->ring->init_dma_chain(priv->dma_rx, priv->dma_rx_phy, rxsize); 615 priv->hw->ring->init_dma_chain(priv->dma_tx, priv->dma_tx_phy, txsize); 616 617 priv->dirty_tx = 0; 618 priv->cur_tx = 0; 619 620 /* Clear the Rx/Tx descriptors */ 621 priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic); 622 priv->hw->desc->init_tx_desc(priv->dma_tx, txsize); 623 624 if (netif_msg_hw(priv)) { 625 pr_info("RX descriptor ring:\n"); 626 display_ring(priv->dma_rx, rxsize); 627 pr_info("TX descriptor ring:\n"); 628 display_ring(priv->dma_tx, txsize); 629 } 630 } 631 632 static void dma_free_rx_skbufs(struct stmmac_priv *priv) 633 { 634 int i; 635 636 for (i = 0; i < priv->dma_rx_size; i++) { 637 if (priv->rx_skbuff[i]) { 638 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], 639 priv->dma_buf_sz, DMA_FROM_DEVICE); 640 dev_kfree_skb_any(priv->rx_skbuff[i]); 641 } 642 priv->rx_skbuff[i] = NULL; 643 } 644 } 645 646 static void dma_free_tx_skbufs(struct stmmac_priv *priv) 647 { 648 int i; 649 650 for (i = 0; i < priv->dma_tx_size; i++) { 651 if (priv->tx_skbuff[i] != NULL) { 652 struct dma_desc *p = priv->dma_tx + i; 653 if (p->des2) 654 dma_unmap_single(priv->device, p->des2, 655 priv->hw->desc->get_tx_len(p), 656 DMA_TO_DEVICE); 657 dev_kfree_skb_any(priv->tx_skbuff[i]); 658 priv->tx_skbuff[i] = NULL; 659 } 660 } 661 } 662 663 static void free_dma_desc_resources(struct stmmac_priv *priv) 664 { 665 /* Release the DMA TX/RX socket buffers */ 666 dma_free_rx_skbufs(priv); 667 dma_free_tx_skbufs(priv); 668 669 /* Free the region of consistent memory previously allocated for 670 * the DMA */ 671 dma_free_coherent(priv->device, 672 priv->dma_tx_size * sizeof(struct dma_desc), 673 priv->dma_tx, priv->dma_tx_phy); 674 dma_free_coherent(priv->device, 675 priv->dma_rx_size * sizeof(struct dma_desc), 676 priv->dma_rx, priv->dma_rx_phy); 677 kfree(priv->rx_skbuff_dma); 678 kfree(priv->rx_skbuff); 679 kfree(priv->tx_skbuff); 680 } 681 682 /** 683 * stmmac_dma_operation_mode - HW DMA operation mode 684 * @priv : pointer to the private device structure. 685 * Description: it sets the DMA operation mode: tx/rx DMA thresholds 686 * or Store-And-Forward capability. 687 */ 688 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 689 { 690 if (likely(priv->plat->force_sf_dma_mode || 691 ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) { 692 /* 693 * In case of GMAC, SF mode can be enabled 694 * to perform the TX COE in HW. This depends on: 695 * 1) TX COE if actually supported 696 * 2) There is no bugged Jumbo frame support 697 * that needs to not insert csum in the TDES. 698 */ 699 priv->hw->dma->dma_mode(priv->ioaddr, 700 SF_DMA_MODE, SF_DMA_MODE); 701 tc = SF_DMA_MODE; 702 } else 703 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 704 } 705 706 /** 707 * stmmac_tx: 708 * @priv: private driver structure 709 * Description: it reclaims resources after transmission completes. 710 */ 711 static void stmmac_tx(struct stmmac_priv *priv) 712 { 713 unsigned int txsize = priv->dma_tx_size; 714 715 spin_lock(&priv->tx_lock); 716 717 while (priv->dirty_tx != priv->cur_tx) { 718 int last; 719 unsigned int entry = priv->dirty_tx % txsize; 720 struct sk_buff *skb = priv->tx_skbuff[entry]; 721 struct dma_desc *p = priv->dma_tx + entry; 722 723 /* Check if the descriptor is owned by the DMA. */ 724 if (priv->hw->desc->get_tx_owner(p)) 725 break; 726 727 /* Verify tx error by looking at the last segment */ 728 last = priv->hw->desc->get_tx_ls(p); 729 if (likely(last)) { 730 int tx_error = 731 priv->hw->desc->tx_status(&priv->dev->stats, 732 &priv->xstats, p, 733 priv->ioaddr); 734 if (likely(tx_error == 0)) { 735 priv->dev->stats.tx_packets++; 736 priv->xstats.tx_pkt_n++; 737 } else 738 priv->dev->stats.tx_errors++; 739 } 740 TX_DBG("%s: curr %d, dirty %d\n", __func__, 741 priv->cur_tx, priv->dirty_tx); 742 743 if (likely(p->des2)) 744 dma_unmap_single(priv->device, p->des2, 745 priv->hw->desc->get_tx_len(p), 746 DMA_TO_DEVICE); 747 priv->hw->ring->clean_desc3(p); 748 749 if (likely(skb != NULL)) { 750 /* 751 * If there's room in the queue (limit it to size) 752 * we add this skb back into the pool, 753 * if it's the right size. 754 */ 755 if ((skb_queue_len(&priv->rx_recycle) < 756 priv->dma_rx_size) && 757 skb_recycle_check(skb, priv->dma_buf_sz)) 758 __skb_queue_head(&priv->rx_recycle, skb); 759 else 760 dev_kfree_skb(skb); 761 762 priv->tx_skbuff[entry] = NULL; 763 } 764 765 priv->hw->desc->release_tx_desc(p); 766 767 priv->dirty_tx++; 768 } 769 if (unlikely(netif_queue_stopped(priv->dev) && 770 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { 771 netif_tx_lock(priv->dev); 772 if (netif_queue_stopped(priv->dev) && 773 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { 774 TX_DBG("%s: restart transmit\n", __func__); 775 netif_wake_queue(priv->dev); 776 } 777 netif_tx_unlock(priv->dev); 778 } 779 780 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { 781 stmmac_enable_eee_mode(priv); 782 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); 783 } 784 spin_unlock(&priv->tx_lock); 785 } 786 787 static inline void stmmac_enable_irq(struct stmmac_priv *priv) 788 { 789 #ifdef CONFIG_STMMAC_TIMER 790 if (likely(priv->tm->enable)) 791 priv->tm->timer_start(tmrate); 792 else 793 #endif 794 priv->hw->dma->enable_dma_irq(priv->ioaddr); 795 } 796 797 static inline void stmmac_disable_irq(struct stmmac_priv *priv) 798 { 799 #ifdef CONFIG_STMMAC_TIMER 800 if (likely(priv->tm->enable)) 801 priv->tm->timer_stop(); 802 else 803 #endif 804 priv->hw->dma->disable_dma_irq(priv->ioaddr); 805 } 806 807 static int stmmac_has_work(struct stmmac_priv *priv) 808 { 809 unsigned int has_work = 0; 810 int rxret, tx_work = 0; 811 812 rxret = priv->hw->desc->get_rx_owner(priv->dma_rx + 813 (priv->cur_rx % priv->dma_rx_size)); 814 815 if (priv->dirty_tx != priv->cur_tx) 816 tx_work = 1; 817 818 if (likely(!rxret || tx_work)) 819 has_work = 1; 820 821 return has_work; 822 } 823 824 static inline void _stmmac_schedule(struct stmmac_priv *priv) 825 { 826 if (likely(stmmac_has_work(priv))) { 827 stmmac_disable_irq(priv); 828 napi_schedule(&priv->napi); 829 } 830 } 831 832 #ifdef CONFIG_STMMAC_TIMER 833 void stmmac_schedule(struct net_device *dev) 834 { 835 struct stmmac_priv *priv = netdev_priv(dev); 836 837 priv->xstats.sched_timer_n++; 838 839 _stmmac_schedule(priv); 840 } 841 842 static void stmmac_no_timer_started(unsigned int x) 843 {; 844 }; 845 846 static void stmmac_no_timer_stopped(void) 847 {; 848 }; 849 #endif 850 851 /** 852 * stmmac_tx_err: 853 * @priv: pointer to the private device structure 854 * Description: it cleans the descriptors and restarts the transmission 855 * in case of errors. 856 */ 857 static void stmmac_tx_err(struct stmmac_priv *priv) 858 { 859 netif_stop_queue(priv->dev); 860 861 priv->hw->dma->stop_tx(priv->ioaddr); 862 dma_free_tx_skbufs(priv); 863 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 864 priv->dirty_tx = 0; 865 priv->cur_tx = 0; 866 priv->hw->dma->start_tx(priv->ioaddr); 867 868 priv->dev->stats.tx_errors++; 869 netif_wake_queue(priv->dev); 870 } 871 872 873 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 874 { 875 int status; 876 877 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); 878 if (likely(status == handle_tx_rx)) 879 _stmmac_schedule(priv); 880 881 else if (unlikely(status == tx_hard_error_bump_tc)) { 882 /* Try to bump up the dma threshold on this failure */ 883 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) { 884 tc += 64; 885 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 886 priv->xstats.threshold = tc; 887 } 888 } else if (unlikely(status == tx_hard_error)) 889 stmmac_tx_err(priv); 890 } 891 892 static void stmmac_mmc_setup(struct stmmac_priv *priv) 893 { 894 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 895 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 896 897 /* Mask MMC irq, counters are managed in SW and registers 898 * are cleared on each READ eventually. */ 899 dwmac_mmc_intr_all_mask(priv->ioaddr); 900 901 if (priv->dma_cap.rmon) { 902 dwmac_mmc_ctrl(priv->ioaddr, mode); 903 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 904 } else 905 pr_info(" No MAC Management Counters available\n"); 906 } 907 908 static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) 909 { 910 u32 hwid = priv->hw->synopsys_uid; 911 912 /* Only check valid Synopsys Id because old MAC chips 913 * have no HW registers where get the ID */ 914 if (likely(hwid)) { 915 u32 uid = ((hwid & 0x0000ff00) >> 8); 916 u32 synid = (hwid & 0x000000ff); 917 918 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n", 919 uid, synid); 920 921 return synid; 922 } 923 return 0; 924 } 925 926 /** 927 * stmmac_selec_desc_mode 928 * @priv : private structure 929 * Description: select the Enhanced/Alternate or Normal descriptors 930 */ 931 static void stmmac_selec_desc_mode(struct stmmac_priv *priv) 932 { 933 if (priv->plat->enh_desc) { 934 pr_info(" Enhanced/Alternate descriptors\n"); 935 priv->hw->desc = &enh_desc_ops; 936 } else { 937 pr_info(" Normal descriptors\n"); 938 priv->hw->desc = &ndesc_ops; 939 } 940 } 941 942 /** 943 * stmmac_get_hw_features 944 * @priv : private device pointer 945 * Description: 946 * new GMAC chip generations have a new register to indicate the 947 * presence of the optional feature/functions. 948 * This can be also used to override the value passed through the 949 * platform and necessary for old MAC10/100 and GMAC chips. 950 */ 951 static int stmmac_get_hw_features(struct stmmac_priv *priv) 952 { 953 u32 hw_cap = 0; 954 955 if (priv->hw->dma->get_hw_feature) { 956 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr); 957 958 priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); 959 priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; 960 priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; 961 priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; 962 priv->dma_cap.multi_addr = 963 (hw_cap & DMA_HW_FEAT_ADDMACADRSEL) >> 5; 964 priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6; 965 priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8; 966 priv->dma_cap.pmt_remote_wake_up = 967 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; 968 priv->dma_cap.pmt_magic_frame = 969 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; 970 /* MMC */ 971 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; 972 /* IEEE 1588-2002*/ 973 priv->dma_cap.time_stamp = 974 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12; 975 /* IEEE 1588-2008*/ 976 priv->dma_cap.atime_stamp = 977 (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13; 978 /* 802.3az - Energy-Efficient Ethernet (EEE) */ 979 priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14; 980 priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15; 981 /* TX and RX csum */ 982 priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16; 983 priv->dma_cap.rx_coe_type1 = 984 (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17; 985 priv->dma_cap.rx_coe_type2 = 986 (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18; 987 priv->dma_cap.rxfifo_over_2048 = 988 (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19; 989 /* TX and RX number of channels */ 990 priv->dma_cap.number_rx_channel = 991 (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20; 992 priv->dma_cap.number_tx_channel = 993 (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22; 994 /* Alternate (enhanced) DESC mode*/ 995 priv->dma_cap.enh_desc = 996 (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; 997 998 } 999 1000 return hw_cap; 1001 } 1002 1003 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 1004 { 1005 /* verify if the MAC address is valid, in case of failures it 1006 * generates a random MAC address */ 1007 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 1008 priv->hw->mac->get_umac_addr((void __iomem *) 1009 priv->dev->base_addr, 1010 priv->dev->dev_addr, 0); 1011 if (!is_valid_ether_addr(priv->dev->dev_addr)) 1012 eth_hw_addr_random(priv->dev); 1013 } 1014 pr_warning("%s: device MAC address %pM\n", priv->dev->name, 1015 priv->dev->dev_addr); 1016 } 1017 1018 static int stmmac_init_dma_engine(struct stmmac_priv *priv) 1019 { 1020 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0; 1021 int mixed_burst = 0; 1022 1023 /* Some DMA parameters can be passed from the platform; 1024 * in case of these are not passed we keep a default 1025 * (good for all the chips) and init the DMA! */ 1026 if (priv->plat->dma_cfg) { 1027 pbl = priv->plat->dma_cfg->pbl; 1028 fixed_burst = priv->plat->dma_cfg->fixed_burst; 1029 mixed_burst = priv->plat->dma_cfg->mixed_burst; 1030 burst_len = priv->plat->dma_cfg->burst_len; 1031 } 1032 1033 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, 1034 burst_len, priv->dma_tx_phy, 1035 priv->dma_rx_phy); 1036 } 1037 1038 /** 1039 * stmmac_open - open entry point of the driver 1040 * @dev : pointer to the device structure. 1041 * Description: 1042 * This function is the open entry point of the driver. 1043 * Return value: 1044 * 0 on success and an appropriate (-)ve integer as defined in errno.h 1045 * file on failure. 1046 */ 1047 static int stmmac_open(struct net_device *dev) 1048 { 1049 struct stmmac_priv *priv = netdev_priv(dev); 1050 int ret; 1051 1052 #ifdef CONFIG_STMMAC_TIMER 1053 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 1054 if (unlikely(priv->tm == NULL)) 1055 return -ENOMEM; 1056 1057 priv->tm->freq = tmrate; 1058 1059 /* Test if the external timer can be actually used. 1060 * In case of failure continue without timer. */ 1061 if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) { 1062 pr_warning("stmmaceth: cannot attach the external timer.\n"); 1063 priv->tm->freq = 0; 1064 priv->tm->timer_start = stmmac_no_timer_started; 1065 priv->tm->timer_stop = stmmac_no_timer_stopped; 1066 } else 1067 priv->tm->enable = 1; 1068 #endif 1069 clk_enable(priv->stmmac_clk); 1070 1071 stmmac_check_ether_addr(priv); 1072 1073 ret = stmmac_init_phy(dev); 1074 if (unlikely(ret)) { 1075 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); 1076 goto open_error; 1077 } 1078 1079 /* Create and initialize the TX/RX descriptors chains. */ 1080 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); 1081 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); 1082 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 1083 init_dma_desc_rings(dev); 1084 1085 /* DMA initialization and SW reset */ 1086 ret = stmmac_init_dma_engine(priv); 1087 if (ret < 0) { 1088 pr_err("%s: DMA initialization failed\n", __func__); 1089 goto open_error; 1090 } 1091 1092 /* Copy the MAC addr into the HW */ 1093 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); 1094 1095 /* If required, perform hw setup of the bus. */ 1096 if (priv->plat->bus_setup) 1097 priv->plat->bus_setup(priv->ioaddr); 1098 1099 /* Initialize the MAC Core */ 1100 priv->hw->mac->core_init(priv->ioaddr); 1101 1102 /* Request the IRQ lines */ 1103 ret = request_irq(dev->irq, stmmac_interrupt, 1104 IRQF_SHARED, dev->name, dev); 1105 if (unlikely(ret < 0)) { 1106 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", 1107 __func__, dev->irq, ret); 1108 goto open_error; 1109 } 1110 1111 /* Request the Wake IRQ in case of another line is used for WoL */ 1112 if (priv->wol_irq != dev->irq) { 1113 ret = request_irq(priv->wol_irq, stmmac_interrupt, 1114 IRQF_SHARED, dev->name, dev); 1115 if (unlikely(ret < 0)) { 1116 pr_err("%s: ERROR: allocating the ext WoL IRQ %d " 1117 "(error: %d)\n", __func__, priv->wol_irq, ret); 1118 goto open_error_wolirq; 1119 } 1120 } 1121 1122 /* Request the IRQ lines */ 1123 if (priv->lpi_irq != -ENXIO) { 1124 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, 1125 dev->name, dev); 1126 if (unlikely(ret < 0)) { 1127 pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n", 1128 __func__, priv->lpi_irq, ret); 1129 goto open_error_lpiirq; 1130 } 1131 } 1132 1133 /* Enable the MAC Rx/Tx */ 1134 stmmac_set_mac(priv->ioaddr, true); 1135 1136 /* Set the HW DMA mode and the COE */ 1137 stmmac_dma_operation_mode(priv); 1138 1139 /* Extra statistics */ 1140 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 1141 priv->xstats.threshold = tc; 1142 1143 stmmac_mmc_setup(priv); 1144 1145 #ifdef CONFIG_STMMAC_DEBUG_FS 1146 ret = stmmac_init_fs(dev); 1147 if (ret < 0) 1148 pr_warning("%s: failed debugFS registration\n", __func__); 1149 #endif 1150 /* Start the ball rolling... */ 1151 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); 1152 priv->hw->dma->start_tx(priv->ioaddr); 1153 priv->hw->dma->start_rx(priv->ioaddr); 1154 1155 #ifdef CONFIG_STMMAC_TIMER 1156 priv->tm->timer_start(tmrate); 1157 #endif 1158 1159 /* Dump DMA/MAC registers */ 1160 if (netif_msg_hw(priv)) { 1161 priv->hw->mac->dump_regs(priv->ioaddr); 1162 priv->hw->dma->dump_regs(priv->ioaddr); 1163 } 1164 1165 if (priv->phydev) 1166 phy_start(priv->phydev); 1167 1168 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER; 1169 priv->eee_enabled = stmmac_eee_init(priv); 1170 1171 napi_enable(&priv->napi); 1172 skb_queue_head_init(&priv->rx_recycle); 1173 netif_start_queue(dev); 1174 1175 return 0; 1176 1177 open_error_lpiirq: 1178 if (priv->wol_irq != dev->irq) 1179 free_irq(priv->wol_irq, dev); 1180 1181 open_error_wolirq: 1182 free_irq(dev->irq, dev); 1183 1184 open_error: 1185 #ifdef CONFIG_STMMAC_TIMER 1186 kfree(priv->tm); 1187 #endif 1188 if (priv->phydev) 1189 phy_disconnect(priv->phydev); 1190 1191 clk_disable(priv->stmmac_clk); 1192 1193 return ret; 1194 } 1195 1196 /** 1197 * stmmac_release - close entry point of the driver 1198 * @dev : device pointer. 1199 * Description: 1200 * This is the stop entry point of the driver. 1201 */ 1202 static int stmmac_release(struct net_device *dev) 1203 { 1204 struct stmmac_priv *priv = netdev_priv(dev); 1205 1206 if (priv->eee_enabled) 1207 del_timer_sync(&priv->eee_ctrl_timer); 1208 1209 /* Stop and disconnect the PHY */ 1210 if (priv->phydev) { 1211 phy_stop(priv->phydev); 1212 phy_disconnect(priv->phydev); 1213 priv->phydev = NULL; 1214 } 1215 1216 netif_stop_queue(dev); 1217 1218 #ifdef CONFIG_STMMAC_TIMER 1219 /* Stop and release the timer */ 1220 stmmac_close_ext_timer(); 1221 if (priv->tm != NULL) 1222 kfree(priv->tm); 1223 #endif 1224 napi_disable(&priv->napi); 1225 skb_queue_purge(&priv->rx_recycle); 1226 1227 /* Free the IRQ lines */ 1228 free_irq(dev->irq, dev); 1229 if (priv->wol_irq != dev->irq) 1230 free_irq(priv->wol_irq, dev); 1231 if (priv->lpi_irq != -ENXIO) 1232 free_irq(priv->lpi_irq, dev); 1233 1234 /* Stop TX/RX DMA and clear the descriptors */ 1235 priv->hw->dma->stop_tx(priv->ioaddr); 1236 priv->hw->dma->stop_rx(priv->ioaddr); 1237 1238 /* Release and free the Rx/Tx resources */ 1239 free_dma_desc_resources(priv); 1240 1241 /* Disable the MAC Rx/Tx */ 1242 stmmac_set_mac(priv->ioaddr, false); 1243 1244 netif_carrier_off(dev); 1245 1246 #ifdef CONFIG_STMMAC_DEBUG_FS 1247 stmmac_exit_fs(); 1248 #endif 1249 clk_disable(priv->stmmac_clk); 1250 1251 return 0; 1252 } 1253 1254 /** 1255 * stmmac_xmit: 1256 * @skb : the socket buffer 1257 * @dev : device pointer 1258 * Description : Tx entry point of the driver. 1259 */ 1260 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 1261 { 1262 struct stmmac_priv *priv = netdev_priv(dev); 1263 unsigned int txsize = priv->dma_tx_size; 1264 unsigned int entry; 1265 int i, csum_insertion = 0; 1266 int nfrags = skb_shinfo(skb)->nr_frags; 1267 struct dma_desc *desc, *first; 1268 unsigned int nopaged_len = skb_headlen(skb); 1269 1270 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { 1271 if (!netif_queue_stopped(dev)) { 1272 netif_stop_queue(dev); 1273 /* This is a hard error, log it. */ 1274 pr_err("%s: BUG! Tx Ring full when queue awake\n", 1275 __func__); 1276 } 1277 return NETDEV_TX_BUSY; 1278 } 1279 1280 spin_lock(&priv->tx_lock); 1281 1282 if (priv->tx_path_in_lpi_mode) 1283 stmmac_disable_eee_mode(priv); 1284 1285 entry = priv->cur_tx % txsize; 1286 1287 #ifdef STMMAC_XMIT_DEBUG 1288 if ((skb->len > ETH_FRAME_LEN) || nfrags) 1289 pr_info("stmmac xmit:\n" 1290 "\tskb addr %p - len: %d - nopaged_len: %d\n" 1291 "\tn_frags: %d - ip_summed: %d - %s gso\n", 1292 skb, skb->len, nopaged_len, nfrags, skb->ip_summed, 1293 !skb_is_gso(skb) ? "isn't" : "is"); 1294 #endif 1295 1296 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 1297 1298 desc = priv->dma_tx + entry; 1299 first = desc; 1300 1301 #ifdef STMMAC_XMIT_DEBUG 1302 if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN)) 1303 pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n" 1304 "\t\tn_frags: %d, ip_summed: %d\n", 1305 skb->len, nopaged_len, nfrags, skb->ip_summed); 1306 #endif 1307 priv->tx_skbuff[entry] = skb; 1308 1309 if (priv->hw->ring->is_jumbo_frm(skb->len, priv->plat->enh_desc)) { 1310 entry = priv->hw->ring->jumbo_frm(priv, skb, csum_insertion); 1311 desc = priv->dma_tx + entry; 1312 } else { 1313 desc->des2 = dma_map_single(priv->device, skb->data, 1314 nopaged_len, DMA_TO_DEVICE); 1315 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, 1316 csum_insertion); 1317 } 1318 1319 for (i = 0; i < nfrags; i++) { 1320 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1321 int len = skb_frag_size(frag); 1322 1323 entry = (++priv->cur_tx) % txsize; 1324 desc = priv->dma_tx + entry; 1325 1326 TX_DBG("\t[entry %d] segment len: %d\n", entry, len); 1327 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, 1328 DMA_TO_DEVICE); 1329 priv->tx_skbuff[entry] = NULL; 1330 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); 1331 wmb(); 1332 priv->hw->desc->set_tx_owner(desc); 1333 wmb(); 1334 } 1335 1336 /* Interrupt on completition only for the latest segment */ 1337 priv->hw->desc->close_tx_desc(desc); 1338 1339 #ifdef CONFIG_STMMAC_TIMER 1340 /* Clean IC while using timer */ 1341 if (likely(priv->tm->enable)) 1342 priv->hw->desc->clear_tx_ic(desc); 1343 #endif 1344 1345 wmb(); 1346 1347 /* To avoid raise condition */ 1348 priv->hw->desc->set_tx_owner(first); 1349 wmb(); 1350 1351 priv->cur_tx++; 1352 1353 #ifdef STMMAC_XMIT_DEBUG 1354 if (netif_msg_pktdata(priv)) { 1355 pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, " 1356 "first=%p, nfrags=%d\n", 1357 (priv->cur_tx % txsize), (priv->dirty_tx % txsize), 1358 entry, first, nfrags); 1359 display_ring(priv->dma_tx, txsize); 1360 pr_info(">>> frame to be transmitted: "); 1361 print_pkt(skb->data, skb->len); 1362 } 1363 #endif 1364 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { 1365 TX_DBG("%s: stop transmitted packets\n", __func__); 1366 netif_stop_queue(dev); 1367 } 1368 1369 dev->stats.tx_bytes += skb->len; 1370 1371 skb_tx_timestamp(skb); 1372 1373 priv->hw->dma->enable_dma_transmission(priv->ioaddr); 1374 1375 spin_unlock(&priv->tx_lock); 1376 1377 return NETDEV_TX_OK; 1378 } 1379 1380 static inline void stmmac_rx_refill(struct stmmac_priv *priv) 1381 { 1382 unsigned int rxsize = priv->dma_rx_size; 1383 int bfsize = priv->dma_buf_sz; 1384 struct dma_desc *p = priv->dma_rx; 1385 1386 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { 1387 unsigned int entry = priv->dirty_rx % rxsize; 1388 if (likely(priv->rx_skbuff[entry] == NULL)) { 1389 struct sk_buff *skb; 1390 1391 skb = __skb_dequeue(&priv->rx_recycle); 1392 if (skb == NULL) 1393 skb = netdev_alloc_skb_ip_align(priv->dev, 1394 bfsize); 1395 1396 if (unlikely(skb == NULL)) 1397 break; 1398 1399 priv->rx_skbuff[entry] = skb; 1400 priv->rx_skbuff_dma[entry] = 1401 dma_map_single(priv->device, skb->data, bfsize, 1402 DMA_FROM_DEVICE); 1403 1404 (p + entry)->des2 = priv->rx_skbuff_dma[entry]; 1405 1406 if (unlikely(priv->plat->has_gmac)) 1407 priv->hw->ring->refill_desc3(bfsize, p + entry); 1408 1409 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); 1410 } 1411 wmb(); 1412 priv->hw->desc->set_rx_owner(p + entry); 1413 wmb(); 1414 } 1415 } 1416 1417 static int stmmac_rx(struct stmmac_priv *priv, int limit) 1418 { 1419 unsigned int rxsize = priv->dma_rx_size; 1420 unsigned int entry = priv->cur_rx % rxsize; 1421 unsigned int next_entry; 1422 unsigned int count = 0; 1423 struct dma_desc *p = priv->dma_rx + entry; 1424 struct dma_desc *p_next; 1425 1426 #ifdef STMMAC_RX_DEBUG 1427 if (netif_msg_hw(priv)) { 1428 pr_debug(">>> stmmac_rx: descriptor ring:\n"); 1429 display_ring(priv->dma_rx, rxsize); 1430 } 1431 #endif 1432 while (!priv->hw->desc->get_rx_owner(p)) { 1433 int status; 1434 1435 if (count >= limit) 1436 break; 1437 1438 count++; 1439 1440 next_entry = (++priv->cur_rx) % rxsize; 1441 p_next = priv->dma_rx + next_entry; 1442 prefetch(p_next); 1443 1444 /* read the status of the incoming frame */ 1445 status = (priv->hw->desc->rx_status(&priv->dev->stats, 1446 &priv->xstats, p)); 1447 if (unlikely(status == discard_frame)) 1448 priv->dev->stats.rx_errors++; 1449 else { 1450 struct sk_buff *skb; 1451 int frame_len; 1452 1453 frame_len = priv->hw->desc->get_rx_frame_len(p, 1454 priv->plat->rx_coe); 1455 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 1456 * Type frames (LLC/LLC-SNAP) */ 1457 if (unlikely(status != llc_snap)) 1458 frame_len -= ETH_FCS_LEN; 1459 #ifdef STMMAC_RX_DEBUG 1460 if (frame_len > ETH_FRAME_LEN) 1461 pr_debug("\tRX frame size %d, COE status: %d\n", 1462 frame_len, status); 1463 1464 if (netif_msg_hw(priv)) 1465 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", 1466 p, entry, p->des2); 1467 #endif 1468 skb = priv->rx_skbuff[entry]; 1469 if (unlikely(!skb)) { 1470 pr_err("%s: Inconsistent Rx descriptor chain\n", 1471 priv->dev->name); 1472 priv->dev->stats.rx_dropped++; 1473 break; 1474 } 1475 prefetch(skb->data - NET_IP_ALIGN); 1476 priv->rx_skbuff[entry] = NULL; 1477 1478 skb_put(skb, frame_len); 1479 dma_unmap_single(priv->device, 1480 priv->rx_skbuff_dma[entry], 1481 priv->dma_buf_sz, DMA_FROM_DEVICE); 1482 #ifdef STMMAC_RX_DEBUG 1483 if (netif_msg_pktdata(priv)) { 1484 pr_info(" frame received (%dbytes)", frame_len); 1485 print_pkt(skb->data, frame_len); 1486 } 1487 #endif 1488 skb->protocol = eth_type_trans(skb, priv->dev); 1489 1490 if (unlikely(!priv->plat->rx_coe)) { 1491 /* No RX COE for old mac10/100 devices */ 1492 skb_checksum_none_assert(skb); 1493 netif_receive_skb(skb); 1494 } else { 1495 skb->ip_summed = CHECKSUM_UNNECESSARY; 1496 napi_gro_receive(&priv->napi, skb); 1497 } 1498 1499 priv->dev->stats.rx_packets++; 1500 priv->dev->stats.rx_bytes += frame_len; 1501 } 1502 entry = next_entry; 1503 p = p_next; /* use prefetched values */ 1504 } 1505 1506 stmmac_rx_refill(priv); 1507 1508 priv->xstats.rx_pkt_n += count; 1509 1510 return count; 1511 } 1512 1513 /** 1514 * stmmac_poll - stmmac poll method (NAPI) 1515 * @napi : pointer to the napi structure. 1516 * @budget : maximum number of packets that the current CPU can receive from 1517 * all interfaces. 1518 * Description : 1519 * This function implements the the reception process. 1520 * Also it runs the TX completion thread 1521 */ 1522 static int stmmac_poll(struct napi_struct *napi, int budget) 1523 { 1524 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); 1525 int work_done = 0; 1526 1527 priv->xstats.poll_n++; 1528 stmmac_tx(priv); 1529 work_done = stmmac_rx(priv, budget); 1530 1531 if (work_done < budget) { 1532 napi_complete(napi); 1533 stmmac_enable_irq(priv); 1534 } 1535 return work_done; 1536 } 1537 1538 /** 1539 * stmmac_tx_timeout 1540 * @dev : Pointer to net device structure 1541 * Description: this function is called when a packet transmission fails to 1542 * complete within a reasonable tmrate. The driver will mark the error in the 1543 * netdev structure and arrange for the device to be reset to a sane state 1544 * in order to transmit a new packet. 1545 */ 1546 static void stmmac_tx_timeout(struct net_device *dev) 1547 { 1548 struct stmmac_priv *priv = netdev_priv(dev); 1549 1550 /* Clear Tx resources and restart transmitting again */ 1551 stmmac_tx_err(priv); 1552 } 1553 1554 /* Configuration changes (passed on by ifconfig) */ 1555 static int stmmac_config(struct net_device *dev, struct ifmap *map) 1556 { 1557 if (dev->flags & IFF_UP) /* can't act on a running interface */ 1558 return -EBUSY; 1559 1560 /* Don't allow changing the I/O address */ 1561 if (map->base_addr != dev->base_addr) { 1562 pr_warning("%s: can't change I/O address\n", dev->name); 1563 return -EOPNOTSUPP; 1564 } 1565 1566 /* Don't allow changing the IRQ */ 1567 if (map->irq != dev->irq) { 1568 pr_warning("%s: can't change IRQ number %d\n", 1569 dev->name, dev->irq); 1570 return -EOPNOTSUPP; 1571 } 1572 1573 /* ignore other fields */ 1574 return 0; 1575 } 1576 1577 /** 1578 * stmmac_set_rx_mode - entry point for multicast addressing 1579 * @dev : pointer to the device structure 1580 * Description: 1581 * This function is a driver entry point which gets called by the kernel 1582 * whenever multicast addresses must be enabled/disabled. 1583 * Return value: 1584 * void. 1585 */ 1586 static void stmmac_set_rx_mode(struct net_device *dev) 1587 { 1588 struct stmmac_priv *priv = netdev_priv(dev); 1589 1590 spin_lock(&priv->lock); 1591 priv->hw->mac->set_filter(dev, priv->synopsys_id); 1592 spin_unlock(&priv->lock); 1593 } 1594 1595 /** 1596 * stmmac_change_mtu - entry point to change MTU size for the device. 1597 * @dev : device pointer. 1598 * @new_mtu : the new MTU size for the device. 1599 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 1600 * to drive packet transmission. Ethernet has an MTU of 1500 octets 1601 * (ETH_DATA_LEN). This value can be changed with ifconfig. 1602 * Return value: 1603 * 0 on success and an appropriate (-)ve integer as defined in errno.h 1604 * file on failure. 1605 */ 1606 static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 1607 { 1608 struct stmmac_priv *priv = netdev_priv(dev); 1609 int max_mtu; 1610 1611 if (netif_running(dev)) { 1612 pr_err("%s: must be stopped to change its MTU\n", dev->name); 1613 return -EBUSY; 1614 } 1615 1616 if (priv->plat->enh_desc) 1617 max_mtu = JUMBO_LEN; 1618 else 1619 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 1620 1621 if ((new_mtu < 46) || (new_mtu > max_mtu)) { 1622 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu); 1623 return -EINVAL; 1624 } 1625 1626 dev->mtu = new_mtu; 1627 netdev_update_features(dev); 1628 1629 return 0; 1630 } 1631 1632 static netdev_features_t stmmac_fix_features(struct net_device *dev, 1633 netdev_features_t features) 1634 { 1635 struct stmmac_priv *priv = netdev_priv(dev); 1636 1637 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 1638 features &= ~NETIF_F_RXCSUM; 1639 else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1) 1640 features &= ~NETIF_F_IPV6_CSUM; 1641 if (!priv->plat->tx_coe) 1642 features &= ~NETIF_F_ALL_CSUM; 1643 1644 /* Some GMAC devices have a bugged Jumbo frame support that 1645 * needs to have the Tx COE disabled for oversized frames 1646 * (due to limited buffer sizes). In this case we disable 1647 * the TX csum insertionin the TDES and not use SF. */ 1648 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 1649 features &= ~NETIF_F_ALL_CSUM; 1650 1651 return features; 1652 } 1653 1654 static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 1655 { 1656 struct net_device *dev = (struct net_device *)dev_id; 1657 struct stmmac_priv *priv = netdev_priv(dev); 1658 1659 if (unlikely(!dev)) { 1660 pr_err("%s: invalid dev pointer\n", __func__); 1661 return IRQ_NONE; 1662 } 1663 1664 /* To handle GMAC own interrupts */ 1665 if (priv->plat->has_gmac) { 1666 int status = priv->hw->mac->host_irq_status((void __iomem *) 1667 dev->base_addr); 1668 if (unlikely(status)) { 1669 if (status & core_mmc_tx_irq) 1670 priv->xstats.mmc_tx_irq_n++; 1671 if (status & core_mmc_rx_irq) 1672 priv->xstats.mmc_rx_irq_n++; 1673 if (status & core_mmc_rx_csum_offload_irq) 1674 priv->xstats.mmc_rx_csum_offload_irq_n++; 1675 if (status & core_irq_receive_pmt_irq) 1676 priv->xstats.irq_receive_pmt_irq_n++; 1677 1678 /* For LPI we need to save the tx status */ 1679 if (status & core_irq_tx_path_in_lpi_mode) { 1680 priv->xstats.irq_tx_path_in_lpi_mode_n++; 1681 priv->tx_path_in_lpi_mode = true; 1682 } 1683 if (status & core_irq_tx_path_exit_lpi_mode) { 1684 priv->xstats.irq_tx_path_exit_lpi_mode_n++; 1685 priv->tx_path_in_lpi_mode = false; 1686 } 1687 if (status & core_irq_rx_path_in_lpi_mode) 1688 priv->xstats.irq_rx_path_in_lpi_mode_n++; 1689 if (status & core_irq_rx_path_exit_lpi_mode) 1690 priv->xstats.irq_rx_path_exit_lpi_mode_n++; 1691 } 1692 } 1693 1694 /* To handle DMA interrupts */ 1695 stmmac_dma_interrupt(priv); 1696 1697 return IRQ_HANDLED; 1698 } 1699 1700 #ifdef CONFIG_NET_POLL_CONTROLLER 1701 /* Polling receive - used by NETCONSOLE and other diagnostic tools 1702 * to allow network I/O with interrupts disabled. */ 1703 static void stmmac_poll_controller(struct net_device *dev) 1704 { 1705 disable_irq(dev->irq); 1706 stmmac_interrupt(dev->irq, dev); 1707 enable_irq(dev->irq); 1708 } 1709 #endif 1710 1711 /** 1712 * stmmac_ioctl - Entry point for the Ioctl 1713 * @dev: Device pointer. 1714 * @rq: An IOCTL specefic structure, that can contain a pointer to 1715 * a proprietary structure used to pass information to the driver. 1716 * @cmd: IOCTL command 1717 * Description: 1718 * Currently there are no special functionality supported in IOCTL, just the 1719 * phy_mii_ioctl(...) can be invoked. 1720 */ 1721 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1722 { 1723 struct stmmac_priv *priv = netdev_priv(dev); 1724 int ret; 1725 1726 if (!netif_running(dev)) 1727 return -EINVAL; 1728 1729 if (!priv->phydev) 1730 return -EINVAL; 1731 1732 ret = phy_mii_ioctl(priv->phydev, rq, cmd); 1733 1734 return ret; 1735 } 1736 1737 #ifdef CONFIG_STMMAC_DEBUG_FS 1738 static struct dentry *stmmac_fs_dir; 1739 static struct dentry *stmmac_rings_status; 1740 static struct dentry *stmmac_dma_cap; 1741 1742 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) 1743 { 1744 struct tmp_s { 1745 u64 a; 1746 unsigned int b; 1747 unsigned int c; 1748 }; 1749 int i; 1750 struct net_device *dev = seq->private; 1751 struct stmmac_priv *priv = netdev_priv(dev); 1752 1753 seq_printf(seq, "=======================\n"); 1754 seq_printf(seq, " RX descriptor ring\n"); 1755 seq_printf(seq, "=======================\n"); 1756 1757 for (i = 0; i < priv->dma_rx_size; i++) { 1758 struct tmp_s *x = (struct tmp_s *)(priv->dma_rx + i); 1759 seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", 1760 i, (unsigned int)(x->a), 1761 (unsigned int)((x->a) >> 32), x->b, x->c); 1762 seq_printf(seq, "\n"); 1763 } 1764 1765 seq_printf(seq, "\n"); 1766 seq_printf(seq, "=======================\n"); 1767 seq_printf(seq, " TX descriptor ring\n"); 1768 seq_printf(seq, "=======================\n"); 1769 1770 for (i = 0; i < priv->dma_tx_size; i++) { 1771 struct tmp_s *x = (struct tmp_s *)(priv->dma_tx + i); 1772 seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", 1773 i, (unsigned int)(x->a), 1774 (unsigned int)((x->a) >> 32), x->b, x->c); 1775 seq_printf(seq, "\n"); 1776 } 1777 1778 return 0; 1779 } 1780 1781 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file) 1782 { 1783 return single_open(file, stmmac_sysfs_ring_read, inode->i_private); 1784 } 1785 1786 static const struct file_operations stmmac_rings_status_fops = { 1787 .owner = THIS_MODULE, 1788 .open = stmmac_sysfs_ring_open, 1789 .read = seq_read, 1790 .llseek = seq_lseek, 1791 .release = single_release, 1792 }; 1793 1794 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) 1795 { 1796 struct net_device *dev = seq->private; 1797 struct stmmac_priv *priv = netdev_priv(dev); 1798 1799 if (!priv->hw_cap_support) { 1800 seq_printf(seq, "DMA HW features not supported\n"); 1801 return 0; 1802 } 1803 1804 seq_printf(seq, "==============================\n"); 1805 seq_printf(seq, "\tDMA HW features\n"); 1806 seq_printf(seq, "==============================\n"); 1807 1808 seq_printf(seq, "\t10/100 Mbps %s\n", 1809 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 1810 seq_printf(seq, "\t1000 Mbps %s\n", 1811 (priv->dma_cap.mbps_1000) ? "Y" : "N"); 1812 seq_printf(seq, "\tHalf duple %s\n", 1813 (priv->dma_cap.half_duplex) ? "Y" : "N"); 1814 seq_printf(seq, "\tHash Filter: %s\n", 1815 (priv->dma_cap.hash_filter) ? "Y" : "N"); 1816 seq_printf(seq, "\tMultiple MAC address registers: %s\n", 1817 (priv->dma_cap.multi_addr) ? "Y" : "N"); 1818 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n", 1819 (priv->dma_cap.pcs) ? "Y" : "N"); 1820 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 1821 (priv->dma_cap.sma_mdio) ? "Y" : "N"); 1822 seq_printf(seq, "\tPMT Remote wake up: %s\n", 1823 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 1824 seq_printf(seq, "\tPMT Magic Frame: %s\n", 1825 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 1826 seq_printf(seq, "\tRMON module: %s\n", 1827 (priv->dma_cap.rmon) ? "Y" : "N"); 1828 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 1829 (priv->dma_cap.time_stamp) ? "Y" : "N"); 1830 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n", 1831 (priv->dma_cap.atime_stamp) ? "Y" : "N"); 1832 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n", 1833 (priv->dma_cap.eee) ? "Y" : "N"); 1834 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 1835 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 1836 (priv->dma_cap.tx_coe) ? "Y" : "N"); 1837 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 1838 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 1839 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 1840 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 1841 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 1842 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 1843 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 1844 priv->dma_cap.number_rx_channel); 1845 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 1846 priv->dma_cap.number_tx_channel); 1847 seq_printf(seq, "\tEnhanced descriptors: %s\n", 1848 (priv->dma_cap.enh_desc) ? "Y" : "N"); 1849 1850 return 0; 1851 } 1852 1853 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file) 1854 { 1855 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private); 1856 } 1857 1858 static const struct file_operations stmmac_dma_cap_fops = { 1859 .owner = THIS_MODULE, 1860 .open = stmmac_sysfs_dma_cap_open, 1861 .read = seq_read, 1862 .llseek = seq_lseek, 1863 .release = single_release, 1864 }; 1865 1866 static int stmmac_init_fs(struct net_device *dev) 1867 { 1868 /* Create debugfs entries */ 1869 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 1870 1871 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { 1872 pr_err("ERROR %s, debugfs create directory failed\n", 1873 STMMAC_RESOURCE_NAME); 1874 1875 return -ENOMEM; 1876 } 1877 1878 /* Entry to report DMA RX/TX rings */ 1879 stmmac_rings_status = debugfs_create_file("descriptors_status", 1880 S_IRUGO, stmmac_fs_dir, dev, 1881 &stmmac_rings_status_fops); 1882 1883 if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) { 1884 pr_info("ERROR creating stmmac ring debugfs file\n"); 1885 debugfs_remove(stmmac_fs_dir); 1886 1887 return -ENOMEM; 1888 } 1889 1890 /* Entry to report the DMA HW features */ 1891 stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir, 1892 dev, &stmmac_dma_cap_fops); 1893 1894 if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) { 1895 pr_info("ERROR creating stmmac MMC debugfs file\n"); 1896 debugfs_remove(stmmac_rings_status); 1897 debugfs_remove(stmmac_fs_dir); 1898 1899 return -ENOMEM; 1900 } 1901 1902 return 0; 1903 } 1904 1905 static void stmmac_exit_fs(void) 1906 { 1907 debugfs_remove(stmmac_rings_status); 1908 debugfs_remove(stmmac_dma_cap); 1909 debugfs_remove(stmmac_fs_dir); 1910 } 1911 #endif /* CONFIG_STMMAC_DEBUG_FS */ 1912 1913 static const struct net_device_ops stmmac_netdev_ops = { 1914 .ndo_open = stmmac_open, 1915 .ndo_start_xmit = stmmac_xmit, 1916 .ndo_stop = stmmac_release, 1917 .ndo_change_mtu = stmmac_change_mtu, 1918 .ndo_fix_features = stmmac_fix_features, 1919 .ndo_set_rx_mode = stmmac_set_rx_mode, 1920 .ndo_tx_timeout = stmmac_tx_timeout, 1921 .ndo_do_ioctl = stmmac_ioctl, 1922 .ndo_set_config = stmmac_config, 1923 #ifdef CONFIG_NET_POLL_CONTROLLER 1924 .ndo_poll_controller = stmmac_poll_controller, 1925 #endif 1926 .ndo_set_mac_address = eth_mac_addr, 1927 }; 1928 1929 /** 1930 * stmmac_hw_init - Init the MAC device 1931 * @priv : pointer to the private device structure. 1932 * Description: this function detects which MAC device 1933 * (GMAC/MAC10-100) has to attached, checks the HW capability 1934 * (if supported) and sets the driver's features (for example 1935 * to use the ring or chaine mode or support the normal/enh 1936 * descriptor structure). 1937 */ 1938 static int stmmac_hw_init(struct stmmac_priv *priv) 1939 { 1940 int ret = 0; 1941 struct mac_device_info *mac; 1942 1943 /* Identify the MAC HW device */ 1944 if (priv->plat->has_gmac) { 1945 priv->dev->priv_flags |= IFF_UNICAST_FLT; 1946 mac = dwmac1000_setup(priv->ioaddr); 1947 } else { 1948 mac = dwmac100_setup(priv->ioaddr); 1949 } 1950 if (!mac) 1951 return -ENOMEM; 1952 1953 priv->hw = mac; 1954 1955 /* To use the chained or ring mode */ 1956 priv->hw->ring = &ring_mode_ops; 1957 1958 /* Get and dump the chip ID */ 1959 priv->synopsys_id = stmmac_get_synopsys_id(priv); 1960 1961 /* Get the HW capability (new GMAC newer than 3.50a) */ 1962 priv->hw_cap_support = stmmac_get_hw_features(priv); 1963 if (priv->hw_cap_support) { 1964 pr_info(" DMA HW capability register supported"); 1965 1966 /* We can override some gmac/dma configuration fields: e.g. 1967 * enh_desc, tx_coe (e.g. that are passed through the 1968 * platform) with the values from the HW capability 1969 * register (if supported). 1970 */ 1971 priv->plat->enh_desc = priv->dma_cap.enh_desc; 1972 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 1973 1974 priv->plat->tx_coe = priv->dma_cap.tx_coe; 1975 1976 if (priv->dma_cap.rx_coe_type2) 1977 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 1978 else if (priv->dma_cap.rx_coe_type1) 1979 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 1980 1981 } else 1982 pr_info(" No HW DMA feature register supported"); 1983 1984 /* Select the enhnaced/normal descriptor structures */ 1985 stmmac_selec_desc_mode(priv); 1986 1987 /* Enable the IPC (Checksum Offload) and check if the feature has been 1988 * enabled during the core configuration. */ 1989 ret = priv->hw->mac->rx_ipc(priv->ioaddr); 1990 if (!ret) { 1991 pr_warning(" RX IPC Checksum Offload not configured.\n"); 1992 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 1993 } 1994 1995 if (priv->plat->rx_coe) 1996 pr_info(" RX Checksum Offload Engine supported (type %d)\n", 1997 priv->plat->rx_coe); 1998 if (priv->plat->tx_coe) 1999 pr_info(" TX Checksum insertion supported\n"); 2000 2001 if (priv->plat->pmt) { 2002 pr_info(" Wake-Up On Lan supported\n"); 2003 device_set_wakeup_capable(priv->device, 1); 2004 } 2005 2006 return ret; 2007 } 2008 2009 /** 2010 * stmmac_dvr_probe 2011 * @device: device pointer 2012 * @plat_dat: platform data pointer 2013 * @addr: iobase memory address 2014 * Description: this is the main probe function used to 2015 * call the alloc_etherdev, allocate the priv structure. 2016 */ 2017 struct stmmac_priv *stmmac_dvr_probe(struct device *device, 2018 struct plat_stmmacenet_data *plat_dat, 2019 void __iomem *addr) 2020 { 2021 int ret = 0; 2022 struct net_device *ndev = NULL; 2023 struct stmmac_priv *priv; 2024 2025 ndev = alloc_etherdev(sizeof(struct stmmac_priv)); 2026 if (!ndev) 2027 return NULL; 2028 2029 SET_NETDEV_DEV(ndev, device); 2030 2031 priv = netdev_priv(ndev); 2032 priv->device = device; 2033 priv->dev = ndev; 2034 2035 ether_setup(ndev); 2036 2037 stmmac_set_ethtool_ops(ndev); 2038 priv->pause = pause; 2039 priv->plat = plat_dat; 2040 priv->ioaddr = addr; 2041 priv->dev->base_addr = (unsigned long)addr; 2042 2043 /* Verify driver arguments */ 2044 stmmac_verify_args(); 2045 2046 /* Override with kernel parameters if supplied XXX CRS XXX 2047 * this needs to have multiple instances */ 2048 if ((phyaddr >= 0) && (phyaddr <= 31)) 2049 priv->plat->phy_addr = phyaddr; 2050 2051 /* Init MAC and get the capabilities */ 2052 stmmac_hw_init(priv); 2053 2054 ndev->netdev_ops = &stmmac_netdev_ops; 2055 2056 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2057 NETIF_F_RXCSUM; 2058 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 2059 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 2060 #ifdef STMMAC_VLAN_TAG_USED 2061 /* Both mac100 and gmac support receive VLAN tag detection */ 2062 ndev->features |= NETIF_F_HW_VLAN_RX; 2063 #endif 2064 priv->msg_enable = netif_msg_init(debug, default_msg_level); 2065 2066 if (flow_ctrl) 2067 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 2068 2069 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); 2070 2071 spin_lock_init(&priv->lock); 2072 spin_lock_init(&priv->tx_lock); 2073 2074 ret = register_netdev(ndev); 2075 if (ret) { 2076 pr_err("%s: ERROR %i registering the device\n", __func__, ret); 2077 goto error_netdev_register; 2078 } 2079 2080 priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME); 2081 if (IS_ERR(priv->stmmac_clk)) { 2082 pr_warning("%s: warning: cannot get CSR clock\n", __func__); 2083 goto error_clk_get; 2084 } 2085 2086 /* If a specific clk_csr value is passed from the platform 2087 * this means that the CSR Clock Range selection cannot be 2088 * changed at run-time and it is fixed. Viceversa the driver'll try to 2089 * set the MDC clock dynamically according to the csr actual 2090 * clock input. 2091 */ 2092 if (!priv->plat->clk_csr) 2093 stmmac_clk_csr_set(priv); 2094 else 2095 priv->clk_csr = priv->plat->clk_csr; 2096 2097 /* MDIO bus Registration */ 2098 ret = stmmac_mdio_register(ndev); 2099 if (ret < 0) { 2100 pr_debug("%s: MDIO bus (id: %d) registration failed", 2101 __func__, priv->plat->bus_id); 2102 goto error_mdio_register; 2103 } 2104 2105 return priv; 2106 2107 error_mdio_register: 2108 clk_put(priv->stmmac_clk); 2109 error_clk_get: 2110 unregister_netdev(ndev); 2111 error_netdev_register: 2112 netif_napi_del(&priv->napi); 2113 free_netdev(ndev); 2114 2115 return NULL; 2116 } 2117 2118 /** 2119 * stmmac_dvr_remove 2120 * @ndev: net device pointer 2121 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 2122 * changes the link status, releases the DMA descriptor rings. 2123 */ 2124 int stmmac_dvr_remove(struct net_device *ndev) 2125 { 2126 struct stmmac_priv *priv = netdev_priv(ndev); 2127 2128 pr_info("%s:\n\tremoving driver", __func__); 2129 2130 priv->hw->dma->stop_rx(priv->ioaddr); 2131 priv->hw->dma->stop_tx(priv->ioaddr); 2132 2133 stmmac_set_mac(priv->ioaddr, false); 2134 stmmac_mdio_unregister(ndev); 2135 netif_carrier_off(ndev); 2136 unregister_netdev(ndev); 2137 free_netdev(ndev); 2138 2139 return 0; 2140 } 2141 2142 #ifdef CONFIG_PM 2143 int stmmac_suspend(struct net_device *ndev) 2144 { 2145 struct stmmac_priv *priv = netdev_priv(ndev); 2146 int dis_ic = 0; 2147 unsigned long flags; 2148 2149 if (!ndev || !netif_running(ndev)) 2150 return 0; 2151 2152 if (priv->phydev) 2153 phy_stop(priv->phydev); 2154 2155 spin_lock_irqsave(&priv->lock, flags); 2156 2157 netif_device_detach(ndev); 2158 netif_stop_queue(ndev); 2159 2160 #ifdef CONFIG_STMMAC_TIMER 2161 priv->tm->timer_stop(); 2162 if (likely(priv->tm->enable)) 2163 dis_ic = 1; 2164 #endif 2165 napi_disable(&priv->napi); 2166 2167 /* Stop TX/RX DMA */ 2168 priv->hw->dma->stop_tx(priv->ioaddr); 2169 priv->hw->dma->stop_rx(priv->ioaddr); 2170 /* Clear the Rx/Tx descriptors */ 2171 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, 2172 dis_ic); 2173 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 2174 2175 /* Enable Power down mode by programming the PMT regs */ 2176 if (device_may_wakeup(priv->device)) 2177 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); 2178 else { 2179 stmmac_set_mac(priv->ioaddr, false); 2180 /* Disable clock in case of PWM is off */ 2181 clk_disable(priv->stmmac_clk); 2182 } 2183 spin_unlock_irqrestore(&priv->lock, flags); 2184 return 0; 2185 } 2186 2187 int stmmac_resume(struct net_device *ndev) 2188 { 2189 struct stmmac_priv *priv = netdev_priv(ndev); 2190 unsigned long flags; 2191 2192 if (!netif_running(ndev)) 2193 return 0; 2194 2195 spin_lock_irqsave(&priv->lock, flags); 2196 2197 /* Power Down bit, into the PM register, is cleared 2198 * automatically as soon as a magic packet or a Wake-up frame 2199 * is received. Anyway, it's better to manually clear 2200 * this bit because it can generate problems while resuming 2201 * from another devices (e.g. serial console). */ 2202 if (device_may_wakeup(priv->device)) 2203 priv->hw->mac->pmt(priv->ioaddr, 0); 2204 else 2205 /* enable the clk prevously disabled */ 2206 clk_enable(priv->stmmac_clk); 2207 2208 netif_device_attach(ndev); 2209 2210 /* Enable the MAC and DMA */ 2211 stmmac_set_mac(priv->ioaddr, true); 2212 priv->hw->dma->start_tx(priv->ioaddr); 2213 priv->hw->dma->start_rx(priv->ioaddr); 2214 2215 #ifdef CONFIG_STMMAC_TIMER 2216 if (likely(priv->tm->enable)) 2217 priv->tm->timer_start(tmrate); 2218 #endif 2219 napi_enable(&priv->napi); 2220 2221 netif_start_queue(ndev); 2222 2223 spin_unlock_irqrestore(&priv->lock, flags); 2224 2225 if (priv->phydev) 2226 phy_start(priv->phydev); 2227 2228 return 0; 2229 } 2230 2231 int stmmac_freeze(struct net_device *ndev) 2232 { 2233 if (!ndev || !netif_running(ndev)) 2234 return 0; 2235 2236 return stmmac_release(ndev); 2237 } 2238 2239 int stmmac_restore(struct net_device *ndev) 2240 { 2241 if (!ndev || !netif_running(ndev)) 2242 return 0; 2243 2244 return stmmac_open(ndev); 2245 } 2246 #endif /* CONFIG_PM */ 2247 2248 /* Driver can be configured w/ and w/ both PCI and Platf drivers 2249 * depending on the configuration selected. 2250 */ 2251 static int __init stmmac_init(void) 2252 { 2253 int err_plt = 0; 2254 int err_pci = 0; 2255 2256 err_plt = stmmac_register_platform(); 2257 err_pci = stmmac_register_pci(); 2258 2259 if ((err_pci) && (err_plt)) { 2260 pr_err("stmmac: driver registration failed\n"); 2261 return -EINVAL; 2262 } 2263 2264 return 0; 2265 } 2266 2267 static void __exit stmmac_exit(void) 2268 { 2269 stmmac_unregister_platform(); 2270 stmmac_unregister_pci(); 2271 } 2272 2273 module_init(stmmac_init); 2274 module_exit(stmmac_exit); 2275 2276 #ifndef MODULE 2277 static int __init stmmac_cmdline_opt(char *str) 2278 { 2279 char *opt; 2280 2281 if (!str || !*str) 2282 return -EINVAL; 2283 while ((opt = strsep(&str, ",")) != NULL) { 2284 if (!strncmp(opt, "debug:", 6)) { 2285 if (kstrtoint(opt + 6, 0, &debug)) 2286 goto err; 2287 } else if (!strncmp(opt, "phyaddr:", 8)) { 2288 if (kstrtoint(opt + 8, 0, &phyaddr)) 2289 goto err; 2290 } else if (!strncmp(opt, "dma_txsize:", 11)) { 2291 if (kstrtoint(opt + 11, 0, &dma_txsize)) 2292 goto err; 2293 } else if (!strncmp(opt, "dma_rxsize:", 11)) { 2294 if (kstrtoint(opt + 11, 0, &dma_rxsize)) 2295 goto err; 2296 } else if (!strncmp(opt, "buf_sz:", 7)) { 2297 if (kstrtoint(opt + 7, 0, &buf_sz)) 2298 goto err; 2299 } else if (!strncmp(opt, "tc:", 3)) { 2300 if (kstrtoint(opt + 3, 0, &tc)) 2301 goto err; 2302 } else if (!strncmp(opt, "watchdog:", 9)) { 2303 if (kstrtoint(opt + 9, 0, &watchdog)) 2304 goto err; 2305 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 2306 if (kstrtoint(opt + 10, 0, &flow_ctrl)) 2307 goto err; 2308 } else if (!strncmp(opt, "pause:", 6)) { 2309 if (kstrtoint(opt + 6, 0, &pause)) 2310 goto err; 2311 } else if (!strncmp(opt, "eee_timer:", 6)) { 2312 if (kstrtoint(opt + 10, 0, &eee_timer)) 2313 goto err; 2314 #ifdef CONFIG_STMMAC_TIMER 2315 } else if (!strncmp(opt, "tmrate:", 7)) { 2316 if (kstrtoint(opt + 7, 0, &tmrate)) 2317 goto err; 2318 #endif 2319 } 2320 } 2321 return 0; 2322 2323 err: 2324 pr_err("%s: ERROR broken module parameter conversion", __func__); 2325 return -EINVAL; 2326 } 2327 2328 __setup("stmmaceth=", stmmac_cmdline_opt); 2329 #endif 2330 2331 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 2332 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 2333 MODULE_LICENSE("GPL"); 2334