1 /******************************************************************************* 2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 3 ST Ethernet IPs are built around a Synopsys IP Core. 4 5 Copyright(C) 2007-2011 STMicroelectronics Ltd 6 7 This program is free software; you can redistribute it and/or modify it 8 under the terms and conditions of the GNU General Public License, 9 version 2, as published by the Free Software Foundation. 10 11 This program is distributed in the hope it will be useful, but WITHOUT 12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 more details. 15 16 The full GNU General Public License is included in this distribution in 17 the file called "COPYING". 18 19 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 21 Documentation available at: 22 http://www.stlinux.com 23 Support available at: 24 https://bugzilla.stlinux.com/ 25 *******************************************************************************/ 26 27 #include <linux/clk.h> 28 #include <linux/kernel.h> 29 #include <linux/interrupt.h> 30 #include <linux/ip.h> 31 #include <linux/tcp.h> 32 #include <linux/skbuff.h> 33 #include <linux/ethtool.h> 34 #include <linux/if_ether.h> 35 #include <linux/crc32.h> 36 #include <linux/mii.h> 37 #include <linux/if.h> 38 #include <linux/if_vlan.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/slab.h> 41 #include <linux/prefetch.h> 42 #include <linux/pinctrl/consumer.h> 43 #ifdef CONFIG_DEBUG_FS 44 #include <linux/debugfs.h> 45 #include <linux/seq_file.h> 46 #endif /* CONFIG_DEBUG_FS */ 47 #include <linux/net_tstamp.h> 48 #include "stmmac_ptp.h" 49 #include "stmmac.h" 50 #include <linux/reset.h> 51 #include <linux/of_mdio.h> 52 #include "dwmac1000.h" 53 #include "hwif.h" 54 55 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) 56 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 57 58 /* Module parameters */ 59 #define TX_TIMEO 5000 60 static int watchdog = TX_TIMEO; 61 module_param(watchdog, int, 0644); 62 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 63 64 static int debug = -1; 65 module_param(debug, int, 0644); 66 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 67 68 static int phyaddr = -1; 69 module_param(phyaddr, int, 0444); 70 MODULE_PARM_DESC(phyaddr, "Physical device address"); 71 72 #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) 73 #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) 74 75 static int flow_ctrl = FLOW_OFF; 76 module_param(flow_ctrl, int, 0644); 77 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 78 79 static int pause = PAUSE_TIME; 80 module_param(pause, int, 0644); 81 MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 82 83 #define TC_DEFAULT 64 84 static int tc = TC_DEFAULT; 85 module_param(tc, int, 0644); 86 MODULE_PARM_DESC(tc, "DMA threshold control value"); 87 88 #define DEFAULT_BUFSIZE 1536 89 static int buf_sz = DEFAULT_BUFSIZE; 90 module_param(buf_sz, int, 0644); 91 MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 92 93 #define STMMAC_RX_COPYBREAK 256 94 95 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 96 NETIF_MSG_LINK | NETIF_MSG_IFUP | 97 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 98 99 #define STMMAC_DEFAULT_LPI_TIMER 1000 100 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 101 module_param(eee_timer, int, 0644); 102 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 103 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) 104 105 /* By default the driver will use the ring mode to manage tx and rx descriptors, 106 * but allow user to force to use the chain instead of the ring 107 */ 108 static unsigned int chain_mode; 109 module_param(chain_mode, int, 0444); 110 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 111 112 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 113 114 #ifdef CONFIG_DEBUG_FS 115 static int stmmac_init_fs(struct net_device *dev); 116 static void stmmac_exit_fs(struct net_device *dev); 117 #endif 118 119 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) 120 121 /** 122 * stmmac_verify_args - verify the driver parameters. 123 * Description: it checks the driver parameters and set a default in case of 124 * errors. 125 */ 126 static void stmmac_verify_args(void) 127 { 128 if (unlikely(watchdog < 0)) 129 watchdog = TX_TIMEO; 130 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 131 buf_sz = DEFAULT_BUFSIZE; 132 if (unlikely(flow_ctrl > 1)) 133 flow_ctrl = FLOW_AUTO; 134 else if (likely(flow_ctrl < 0)) 135 flow_ctrl = FLOW_OFF; 136 if (unlikely((pause < 0) || (pause > 0xffff))) 137 pause = PAUSE_TIME; 138 if (eee_timer < 0) 139 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 140 } 141 142 /** 143 * stmmac_disable_all_queues - Disable all queues 144 * @priv: driver private structure 145 */ 146 static void stmmac_disable_all_queues(struct stmmac_priv *priv) 147 { 148 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 149 u32 queue; 150 151 for (queue = 0; queue < rx_queues_cnt; queue++) { 152 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 153 154 napi_disable(&rx_q->napi); 155 } 156 } 157 158 /** 159 * stmmac_enable_all_queues - Enable all queues 160 * @priv: driver private structure 161 */ 162 static void stmmac_enable_all_queues(struct stmmac_priv *priv) 163 { 164 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 165 u32 queue; 166 167 for (queue = 0; queue < rx_queues_cnt; queue++) { 168 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 169 170 napi_enable(&rx_q->napi); 171 } 172 } 173 174 /** 175 * stmmac_stop_all_queues - Stop all queues 176 * @priv: driver private structure 177 */ 178 static void stmmac_stop_all_queues(struct stmmac_priv *priv) 179 { 180 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 181 u32 queue; 182 183 for (queue = 0; queue < tx_queues_cnt; queue++) 184 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 185 } 186 187 /** 188 * stmmac_start_all_queues - Start all queues 189 * @priv: driver private structure 190 */ 191 static void stmmac_start_all_queues(struct stmmac_priv *priv) 192 { 193 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 194 u32 queue; 195 196 for (queue = 0; queue < tx_queues_cnt; queue++) 197 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue)); 198 } 199 200 static void stmmac_service_event_schedule(struct stmmac_priv *priv) 201 { 202 if (!test_bit(STMMAC_DOWN, &priv->state) && 203 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 204 queue_work(priv->wq, &priv->service_task); 205 } 206 207 static void stmmac_global_err(struct stmmac_priv *priv) 208 { 209 netif_carrier_off(priv->dev); 210 set_bit(STMMAC_RESET_REQUESTED, &priv->state); 211 stmmac_service_event_schedule(priv); 212 } 213 214 /** 215 * stmmac_clk_csr_set - dynamically set the MDC clock 216 * @priv: driver private structure 217 * Description: this is to dynamically set the MDC clock according to the csr 218 * clock input. 219 * Note: 220 * If a specific clk_csr value is passed from the platform 221 * this means that the CSR Clock Range selection cannot be 222 * changed at run-time and it is fixed (as reported in the driver 223 * documentation). Viceversa the driver will try to set the MDC 224 * clock dynamically according to the actual clock input. 225 */ 226 static void stmmac_clk_csr_set(struct stmmac_priv *priv) 227 { 228 u32 clk_rate; 229 230 clk_rate = clk_get_rate(priv->plat->stmmac_clk); 231 232 /* Platform provided default clk_csr would be assumed valid 233 * for all other cases except for the below mentioned ones. 234 * For values higher than the IEEE 802.3 specified frequency 235 * we can not estimate the proper divider as it is not known 236 * the frequency of clk_csr_i. So we do not change the default 237 * divider. 238 */ 239 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 240 if (clk_rate < CSR_F_35M) 241 priv->clk_csr = STMMAC_CSR_20_35M; 242 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 243 priv->clk_csr = STMMAC_CSR_35_60M; 244 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 245 priv->clk_csr = STMMAC_CSR_60_100M; 246 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 247 priv->clk_csr = STMMAC_CSR_100_150M; 248 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 249 priv->clk_csr = STMMAC_CSR_150_250M; 250 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 251 priv->clk_csr = STMMAC_CSR_250_300M; 252 } 253 254 if (priv->plat->has_sun8i) { 255 if (clk_rate > 160000000) 256 priv->clk_csr = 0x03; 257 else if (clk_rate > 80000000) 258 priv->clk_csr = 0x02; 259 else if (clk_rate > 40000000) 260 priv->clk_csr = 0x01; 261 else 262 priv->clk_csr = 0; 263 } 264 } 265 266 static void print_pkt(unsigned char *buf, int len) 267 { 268 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 269 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 270 } 271 272 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 273 { 274 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 275 u32 avail; 276 277 if (tx_q->dirty_tx > tx_q->cur_tx) 278 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 279 else 280 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1; 281 282 return avail; 283 } 284 285 /** 286 * stmmac_rx_dirty - Get RX queue dirty 287 * @priv: driver private structure 288 * @queue: RX queue index 289 */ 290 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 291 { 292 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 293 u32 dirty; 294 295 if (rx_q->dirty_rx <= rx_q->cur_rx) 296 dirty = rx_q->cur_rx - rx_q->dirty_rx; 297 else 298 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx; 299 300 return dirty; 301 } 302 303 /** 304 * stmmac_hw_fix_mac_speed - callback for speed selection 305 * @priv: driver private structure 306 * Description: on some platforms (e.g. ST), some HW system configuration 307 * registers have to be set according to the link speed negotiated. 308 */ 309 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) 310 { 311 struct net_device *ndev = priv->dev; 312 struct phy_device *phydev = ndev->phydev; 313 314 if (likely(priv->plat->fix_mac_speed)) 315 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); 316 } 317 318 /** 319 * stmmac_enable_eee_mode - check and enter in LPI mode 320 * @priv: driver private structure 321 * Description: this function is to verify and enter in LPI mode in case of 322 * EEE. 323 */ 324 static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 325 { 326 u32 tx_cnt = priv->plat->tx_queues_to_use; 327 u32 queue; 328 329 /* check if all TX queues have the work finished */ 330 for (queue = 0; queue < tx_cnt; queue++) { 331 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 332 333 if (tx_q->dirty_tx != tx_q->cur_tx) 334 return; /* still unfinished work */ 335 } 336 337 /* Check and enter in LPI mode */ 338 if (!priv->tx_path_in_lpi_mode) 339 stmmac_set_eee_mode(priv, priv->hw, 340 priv->plat->en_tx_lpi_clockgating); 341 } 342 343 /** 344 * stmmac_disable_eee_mode - disable and exit from LPI mode 345 * @priv: driver private structure 346 * Description: this function is to exit and disable EEE in case of 347 * LPI state is true. This is called by the xmit. 348 */ 349 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 350 { 351 stmmac_reset_eee_mode(priv, priv->hw); 352 del_timer_sync(&priv->eee_ctrl_timer); 353 priv->tx_path_in_lpi_mode = false; 354 } 355 356 /** 357 * stmmac_eee_ctrl_timer - EEE TX SW timer. 358 * @arg : data hook 359 * Description: 360 * if there is no data transfer and if we are not in LPI state, 361 * then MAC Transmitter can be moved to LPI state. 362 */ 363 static void stmmac_eee_ctrl_timer(struct timer_list *t) 364 { 365 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 366 367 stmmac_enable_eee_mode(priv); 368 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 369 } 370 371 /** 372 * stmmac_eee_init - init EEE 373 * @priv: driver private structure 374 * Description: 375 * if the GMAC supports the EEE (from the HW cap reg) and the phy device 376 * can also manage EEE, this function enable the LPI state and start related 377 * timer. 378 */ 379 bool stmmac_eee_init(struct stmmac_priv *priv) 380 { 381 struct net_device *ndev = priv->dev; 382 int interface = priv->plat->interface; 383 unsigned long flags; 384 bool ret = false; 385 386 if ((interface != PHY_INTERFACE_MODE_MII) && 387 (interface != PHY_INTERFACE_MODE_GMII) && 388 !phy_interface_mode_is_rgmii(interface)) 389 goto out; 390 391 /* Using PCS we cannot dial with the phy registers at this stage 392 * so we do not support extra feature like EEE. 393 */ 394 if ((priv->hw->pcs == STMMAC_PCS_RGMII) || 395 (priv->hw->pcs == STMMAC_PCS_TBI) || 396 (priv->hw->pcs == STMMAC_PCS_RTBI)) 397 goto out; 398 399 /* MAC core supports the EEE feature. */ 400 if (priv->dma_cap.eee) { 401 int tx_lpi_timer = priv->tx_lpi_timer; 402 403 /* Check if the PHY supports EEE */ 404 if (phy_init_eee(ndev->phydev, 1)) { 405 /* To manage at run-time if the EEE cannot be supported 406 * anymore (for example because the lp caps have been 407 * changed). 408 * In that case the driver disable own timers. 409 */ 410 spin_lock_irqsave(&priv->lock, flags); 411 if (priv->eee_active) { 412 netdev_dbg(priv->dev, "disable EEE\n"); 413 del_timer_sync(&priv->eee_ctrl_timer); 414 stmmac_set_eee_timer(priv, priv->hw, 0, 415 tx_lpi_timer); 416 } 417 priv->eee_active = 0; 418 spin_unlock_irqrestore(&priv->lock, flags); 419 goto out; 420 } 421 /* Activate the EEE and start timers */ 422 spin_lock_irqsave(&priv->lock, flags); 423 if (!priv->eee_active) { 424 priv->eee_active = 1; 425 timer_setup(&priv->eee_ctrl_timer, 426 stmmac_eee_ctrl_timer, 0); 427 mod_timer(&priv->eee_ctrl_timer, 428 STMMAC_LPI_T(eee_timer)); 429 430 stmmac_set_eee_timer(priv, priv->hw, 431 STMMAC_DEFAULT_LIT_LS, tx_lpi_timer); 432 } 433 /* Set HW EEE according to the speed */ 434 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link); 435 436 ret = true; 437 spin_unlock_irqrestore(&priv->lock, flags); 438 439 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 440 } 441 out: 442 return ret; 443 } 444 445 /* stmmac_get_tx_hwtstamp - get HW TX timestamps 446 * @priv: driver private structure 447 * @p : descriptor pointer 448 * @skb : the socket buffer 449 * Description : 450 * This function will read timestamp from the descriptor & pass it to stack. 451 * and also perform some sanity checks. 452 */ 453 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 454 struct dma_desc *p, struct sk_buff *skb) 455 { 456 struct skb_shared_hwtstamps shhwtstamp; 457 u64 ns; 458 459 if (!priv->hwts_tx_en) 460 return; 461 462 /* exit if skb doesn't support hw tstamp */ 463 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 464 return; 465 466 /* check tx tstamp status */ 467 if (stmmac_get_tx_timestamp_status(priv, p)) { 468 /* get the valid tstamp */ 469 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 470 471 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 472 shhwtstamp.hwtstamp = ns_to_ktime(ns); 473 474 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 475 /* pass tstamp to stack */ 476 skb_tstamp_tx(skb, &shhwtstamp); 477 } 478 479 return; 480 } 481 482 /* stmmac_get_rx_hwtstamp - get HW RX timestamps 483 * @priv: driver private structure 484 * @p : descriptor pointer 485 * @np : next descriptor pointer 486 * @skb : the socket buffer 487 * Description : 488 * This function will read received packet's timestamp from the descriptor 489 * and pass it to stack. It also perform some sanity checks. 490 */ 491 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 492 struct dma_desc *np, struct sk_buff *skb) 493 { 494 struct skb_shared_hwtstamps *shhwtstamp = NULL; 495 struct dma_desc *desc = p; 496 u64 ns; 497 498 if (!priv->hwts_rx_en) 499 return; 500 /* For GMAC4, the valid timestamp is from CTX next desc. */ 501 if (priv->plat->has_gmac4) 502 desc = np; 503 504 /* Check if timestamp is available */ 505 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 506 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 507 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 508 shhwtstamp = skb_hwtstamps(skb); 509 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 510 shhwtstamp->hwtstamp = ns_to_ktime(ns); 511 } else { 512 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 513 } 514 } 515 516 /** 517 * stmmac_hwtstamp_ioctl - control hardware timestamping. 518 * @dev: device pointer. 519 * @ifr: An IOCTL specific structure, that can contain a pointer to 520 * a proprietary structure used to pass information to the driver. 521 * Description: 522 * This function configures the MAC to enable/disable both outgoing(TX) 523 * and incoming(RX) packets time stamping based on user input. 524 * Return Value: 525 * 0 on success and an appropriate -ve integer on failure. 526 */ 527 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) 528 { 529 struct stmmac_priv *priv = netdev_priv(dev); 530 struct hwtstamp_config config; 531 struct timespec64 now; 532 u64 temp = 0; 533 u32 ptp_v2 = 0; 534 u32 tstamp_all = 0; 535 u32 ptp_over_ipv4_udp = 0; 536 u32 ptp_over_ipv6_udp = 0; 537 u32 ptp_over_ethernet = 0; 538 u32 snap_type_sel = 0; 539 u32 ts_master_en = 0; 540 u32 ts_event_en = 0; 541 u32 value = 0; 542 u32 sec_inc; 543 544 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 545 netdev_alert(priv->dev, "No support for HW time stamping\n"); 546 priv->hwts_tx_en = 0; 547 priv->hwts_rx_en = 0; 548 549 return -EOPNOTSUPP; 550 } 551 552 if (copy_from_user(&config, ifr->ifr_data, 553 sizeof(struct hwtstamp_config))) 554 return -EFAULT; 555 556 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 557 __func__, config.flags, config.tx_type, config.rx_filter); 558 559 /* reserved for future extensions */ 560 if (config.flags) 561 return -EINVAL; 562 563 if (config.tx_type != HWTSTAMP_TX_OFF && 564 config.tx_type != HWTSTAMP_TX_ON) 565 return -ERANGE; 566 567 if (priv->adv_ts) { 568 switch (config.rx_filter) { 569 case HWTSTAMP_FILTER_NONE: 570 /* time stamp no incoming packet at all */ 571 config.rx_filter = HWTSTAMP_FILTER_NONE; 572 break; 573 574 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 575 /* PTP v1, UDP, any kind of event packet */ 576 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 577 /* take time stamp for all event messages */ 578 if (priv->plat->has_gmac4) 579 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; 580 else 581 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 582 583 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 584 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 585 break; 586 587 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 588 /* PTP v1, UDP, Sync packet */ 589 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 590 /* take time stamp for SYNC messages only */ 591 ts_event_en = PTP_TCR_TSEVNTENA; 592 593 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 594 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 595 break; 596 597 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 598 /* PTP v1, UDP, Delay_req packet */ 599 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 600 /* take time stamp for Delay_Req messages only */ 601 ts_master_en = PTP_TCR_TSMSTRENA; 602 ts_event_en = PTP_TCR_TSEVNTENA; 603 604 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 605 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 606 break; 607 608 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 609 /* PTP v2, UDP, any kind of event packet */ 610 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 611 ptp_v2 = PTP_TCR_TSVER2ENA; 612 /* take time stamp for all event messages */ 613 if (priv->plat->has_gmac4) 614 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; 615 else 616 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 617 618 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 619 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 620 break; 621 622 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 623 /* PTP v2, UDP, Sync packet */ 624 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 625 ptp_v2 = PTP_TCR_TSVER2ENA; 626 /* take time stamp for SYNC messages only */ 627 ts_event_en = PTP_TCR_TSEVNTENA; 628 629 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 630 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 631 break; 632 633 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 634 /* PTP v2, UDP, Delay_req packet */ 635 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 636 ptp_v2 = PTP_TCR_TSVER2ENA; 637 /* take time stamp for Delay_Req messages only */ 638 ts_master_en = PTP_TCR_TSMSTRENA; 639 ts_event_en = PTP_TCR_TSEVNTENA; 640 641 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 642 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 643 break; 644 645 case HWTSTAMP_FILTER_PTP_V2_EVENT: 646 /* PTP v2/802.AS1 any layer, any kind of event packet */ 647 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 648 ptp_v2 = PTP_TCR_TSVER2ENA; 649 /* take time stamp for all event messages */ 650 if (priv->plat->has_gmac4) 651 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; 652 else 653 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 654 655 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 656 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 657 ptp_over_ethernet = PTP_TCR_TSIPENA; 658 break; 659 660 case HWTSTAMP_FILTER_PTP_V2_SYNC: 661 /* PTP v2/802.AS1, any layer, Sync packet */ 662 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 663 ptp_v2 = PTP_TCR_TSVER2ENA; 664 /* take time stamp for SYNC messages only */ 665 ts_event_en = PTP_TCR_TSEVNTENA; 666 667 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 668 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 669 ptp_over_ethernet = PTP_TCR_TSIPENA; 670 break; 671 672 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 673 /* PTP v2/802.AS1, any layer, Delay_req packet */ 674 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 675 ptp_v2 = PTP_TCR_TSVER2ENA; 676 /* take time stamp for Delay_Req messages only */ 677 ts_master_en = PTP_TCR_TSMSTRENA; 678 ts_event_en = PTP_TCR_TSEVNTENA; 679 680 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 681 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 682 ptp_over_ethernet = PTP_TCR_TSIPENA; 683 break; 684 685 case HWTSTAMP_FILTER_NTP_ALL: 686 case HWTSTAMP_FILTER_ALL: 687 /* time stamp any incoming packet */ 688 config.rx_filter = HWTSTAMP_FILTER_ALL; 689 tstamp_all = PTP_TCR_TSENALL; 690 break; 691 692 default: 693 return -ERANGE; 694 } 695 } else { 696 switch (config.rx_filter) { 697 case HWTSTAMP_FILTER_NONE: 698 config.rx_filter = HWTSTAMP_FILTER_NONE; 699 break; 700 default: 701 /* PTP v1, UDP, any kind of event packet */ 702 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 703 break; 704 } 705 } 706 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 707 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 708 709 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 710 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); 711 else { 712 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 713 tstamp_all | ptp_v2 | ptp_over_ethernet | 714 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 715 ts_master_en | snap_type_sel); 716 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); 717 718 /* program Sub Second Increment reg */ 719 stmmac_config_sub_second_increment(priv, 720 priv->ptpaddr, priv->plat->clk_ptp_rate, 721 priv->plat->has_gmac4, &sec_inc); 722 temp = div_u64(1000000000ULL, sec_inc); 723 724 /* calculate default added value: 725 * formula is : 726 * addend = (2^32)/freq_div_ratio; 727 * where, freq_div_ratio = 1e9ns/sec_inc 728 */ 729 temp = (u64)(temp << 32); 730 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 731 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 732 733 /* initialize system time */ 734 ktime_get_real_ts64(&now); 735 736 /* lower 32 bits of tv_sec are safe until y2106 */ 737 stmmac_init_systime(priv, priv->ptpaddr, 738 (u32)now.tv_sec, now.tv_nsec); 739 } 740 741 return copy_to_user(ifr->ifr_data, &config, 742 sizeof(struct hwtstamp_config)) ? -EFAULT : 0; 743 } 744 745 /** 746 * stmmac_init_ptp - init PTP 747 * @priv: driver private structure 748 * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 749 * This is done by looking at the HW cap. register. 750 * This function also registers the ptp driver. 751 */ 752 static int stmmac_init_ptp(struct stmmac_priv *priv) 753 { 754 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 755 return -EOPNOTSUPP; 756 757 priv->adv_ts = 0; 758 /* Check if adv_ts can be enabled for dwmac 4.x core */ 759 if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp) 760 priv->adv_ts = 1; 761 /* Dwmac 3.x core with extend_desc can support adv_ts */ 762 else if (priv->extend_desc && priv->dma_cap.atime_stamp) 763 priv->adv_ts = 1; 764 765 if (priv->dma_cap.time_stamp) 766 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 767 768 if (priv->adv_ts) 769 netdev_info(priv->dev, 770 "IEEE 1588-2008 Advanced Timestamp supported\n"); 771 772 priv->hw->ptp = &stmmac_ptp; 773 priv->hwts_tx_en = 0; 774 priv->hwts_rx_en = 0; 775 776 stmmac_ptp_register(priv); 777 778 return 0; 779 } 780 781 static void stmmac_release_ptp(struct stmmac_priv *priv) 782 { 783 if (priv->plat->clk_ptp_ref) 784 clk_disable_unprepare(priv->plat->clk_ptp_ref); 785 stmmac_ptp_unregister(priv); 786 } 787 788 /** 789 * stmmac_mac_flow_ctrl - Configure flow control in all queues 790 * @priv: driver private structure 791 * Description: It is used for configuring the flow control in all queues 792 */ 793 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 794 { 795 u32 tx_cnt = priv->plat->tx_queues_to_use; 796 797 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 798 priv->pause, tx_cnt); 799 } 800 801 /** 802 * stmmac_adjust_link - adjusts the link parameters 803 * @dev: net device structure 804 * Description: this is the helper called by the physical abstraction layer 805 * drivers to communicate the phy link status. According the speed and duplex 806 * this driver can invoke registered glue-logic as well. 807 * It also invoke the eee initialization because it could happen when switch 808 * on different networks (that are eee capable). 809 */ 810 static void stmmac_adjust_link(struct net_device *dev) 811 { 812 struct stmmac_priv *priv = netdev_priv(dev); 813 struct phy_device *phydev = dev->phydev; 814 unsigned long flags; 815 bool new_state = false; 816 817 if (!phydev) 818 return; 819 820 spin_lock_irqsave(&priv->lock, flags); 821 822 if (phydev->link) { 823 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 824 825 /* Now we make sure that we can be in full duplex mode. 826 * If not, we operate in half-duplex mode. */ 827 if (phydev->duplex != priv->oldduplex) { 828 new_state = true; 829 if (!phydev->duplex) 830 ctrl &= ~priv->hw->link.duplex; 831 else 832 ctrl |= priv->hw->link.duplex; 833 priv->oldduplex = phydev->duplex; 834 } 835 /* Flow Control operation */ 836 if (phydev->pause) 837 stmmac_mac_flow_ctrl(priv, phydev->duplex); 838 839 if (phydev->speed != priv->speed) { 840 new_state = true; 841 ctrl &= ~priv->hw->link.speed_mask; 842 switch (phydev->speed) { 843 case SPEED_1000: 844 ctrl |= priv->hw->link.speed1000; 845 break; 846 case SPEED_100: 847 ctrl |= priv->hw->link.speed100; 848 break; 849 case SPEED_10: 850 ctrl |= priv->hw->link.speed10; 851 break; 852 default: 853 netif_warn(priv, link, priv->dev, 854 "broken speed: %d\n", phydev->speed); 855 phydev->speed = SPEED_UNKNOWN; 856 break; 857 } 858 if (phydev->speed != SPEED_UNKNOWN) 859 stmmac_hw_fix_mac_speed(priv); 860 priv->speed = phydev->speed; 861 } 862 863 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 864 865 if (!priv->oldlink) { 866 new_state = true; 867 priv->oldlink = true; 868 } 869 } else if (priv->oldlink) { 870 new_state = true; 871 priv->oldlink = false; 872 priv->speed = SPEED_UNKNOWN; 873 priv->oldduplex = DUPLEX_UNKNOWN; 874 } 875 876 if (new_state && netif_msg_link(priv)) 877 phy_print_status(phydev); 878 879 spin_unlock_irqrestore(&priv->lock, flags); 880 881 if (phydev->is_pseudo_fixed_link) 882 /* Stop PHY layer to call the hook to adjust the link in case 883 * of a switch is attached to the stmmac driver. 884 */ 885 phydev->irq = PHY_IGNORE_INTERRUPT; 886 else 887 /* At this stage, init the EEE if supported. 888 * Never called in case of fixed_link. 889 */ 890 priv->eee_enabled = stmmac_eee_init(priv); 891 } 892 893 /** 894 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 895 * @priv: driver private structure 896 * Description: this is to verify if the HW supports the PCS. 897 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 898 * configured for the TBI, RTBI, or SGMII PHY interface. 899 */ 900 static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 901 { 902 int interface = priv->plat->interface; 903 904 if (priv->dma_cap.pcs) { 905 if ((interface == PHY_INTERFACE_MODE_RGMII) || 906 (interface == PHY_INTERFACE_MODE_RGMII_ID) || 907 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 908 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 909 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 910 priv->hw->pcs = STMMAC_PCS_RGMII; 911 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 912 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 913 priv->hw->pcs = STMMAC_PCS_SGMII; 914 } 915 } 916 } 917 918 /** 919 * stmmac_init_phy - PHY initialization 920 * @dev: net device structure 921 * Description: it initializes the driver's PHY state, and attaches the PHY 922 * to the mac driver. 923 * Return value: 924 * 0 on success 925 */ 926 static int stmmac_init_phy(struct net_device *dev) 927 { 928 struct stmmac_priv *priv = netdev_priv(dev); 929 struct phy_device *phydev; 930 char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 931 char bus_id[MII_BUS_ID_SIZE]; 932 int interface = priv->plat->interface; 933 int max_speed = priv->plat->max_speed; 934 priv->oldlink = false; 935 priv->speed = SPEED_UNKNOWN; 936 priv->oldduplex = DUPLEX_UNKNOWN; 937 938 if (priv->plat->phy_node) { 939 phydev = of_phy_connect(dev, priv->plat->phy_node, 940 &stmmac_adjust_link, 0, interface); 941 } else { 942 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", 943 priv->plat->bus_id); 944 945 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 946 priv->plat->phy_addr); 947 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__, 948 phy_id_fmt); 949 950 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 951 interface); 952 } 953 954 if (IS_ERR_OR_NULL(phydev)) { 955 netdev_err(priv->dev, "Could not attach to PHY\n"); 956 if (!phydev) 957 return -ENODEV; 958 959 return PTR_ERR(phydev); 960 } 961 962 /* Stop Advertising 1000BASE Capability if interface is not GMII */ 963 if ((interface == PHY_INTERFACE_MODE_MII) || 964 (interface == PHY_INTERFACE_MODE_RMII) || 965 (max_speed < 1000 && max_speed > 0)) 966 phydev->advertising &= ~(SUPPORTED_1000baseT_Half | 967 SUPPORTED_1000baseT_Full); 968 969 /* 970 * Broken HW is sometimes missing the pull-up resistor on the 971 * MDIO line, which results in reads to non-existent devices returning 972 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent 973 * device as well. 974 * Note: phydev->phy_id is the result of reading the UID PHY registers. 975 */ 976 if (!priv->plat->phy_node && phydev->phy_id == 0) { 977 phy_disconnect(phydev); 978 return -ENODEV; 979 } 980 981 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid 982 * subsequent PHY polling, make sure we force a link transition if 983 * we have a UP/DOWN/UP transition 984 */ 985 if (phydev->is_pseudo_fixed_link) 986 phydev->irq = PHY_POLL; 987 988 phy_attached_info(phydev); 989 return 0; 990 } 991 992 static void stmmac_display_rx_rings(struct stmmac_priv *priv) 993 { 994 u32 rx_cnt = priv->plat->rx_queues_to_use; 995 void *head_rx; 996 u32 queue; 997 998 /* Display RX rings */ 999 for (queue = 0; queue < rx_cnt; queue++) { 1000 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1001 1002 pr_info("\tRX Queue %u rings\n", queue); 1003 1004 if (priv->extend_desc) 1005 head_rx = (void *)rx_q->dma_erx; 1006 else 1007 head_rx = (void *)rx_q->dma_rx; 1008 1009 /* Display RX ring */ 1010 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true); 1011 } 1012 } 1013 1014 static void stmmac_display_tx_rings(struct stmmac_priv *priv) 1015 { 1016 u32 tx_cnt = priv->plat->tx_queues_to_use; 1017 void *head_tx; 1018 u32 queue; 1019 1020 /* Display TX rings */ 1021 for (queue = 0; queue < tx_cnt; queue++) { 1022 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1023 1024 pr_info("\tTX Queue %d rings\n", queue); 1025 1026 if (priv->extend_desc) 1027 head_tx = (void *)tx_q->dma_etx; 1028 else 1029 head_tx = (void *)tx_q->dma_tx; 1030 1031 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false); 1032 } 1033 } 1034 1035 static void stmmac_display_rings(struct stmmac_priv *priv) 1036 { 1037 /* Display RX ring */ 1038 stmmac_display_rx_rings(priv); 1039 1040 /* Display TX ring */ 1041 stmmac_display_tx_rings(priv); 1042 } 1043 1044 static int stmmac_set_bfsize(int mtu, int bufsize) 1045 { 1046 int ret = bufsize; 1047 1048 if (mtu >= BUF_SIZE_4KiB) 1049 ret = BUF_SIZE_8KiB; 1050 else if (mtu >= BUF_SIZE_2KiB) 1051 ret = BUF_SIZE_4KiB; 1052 else if (mtu > DEFAULT_BUFSIZE) 1053 ret = BUF_SIZE_2KiB; 1054 else 1055 ret = DEFAULT_BUFSIZE; 1056 1057 return ret; 1058 } 1059 1060 /** 1061 * stmmac_clear_rx_descriptors - clear RX descriptors 1062 * @priv: driver private structure 1063 * @queue: RX queue index 1064 * Description: this function is called to clear the RX descriptors 1065 * in case of both basic and extended descriptors are used. 1066 */ 1067 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) 1068 { 1069 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1070 int i; 1071 1072 /* Clear the RX descriptors */ 1073 for (i = 0; i < DMA_RX_SIZE; i++) 1074 if (priv->extend_desc) 1075 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1076 priv->use_riwt, priv->mode, 1077 (i == DMA_RX_SIZE - 1)); 1078 else 1079 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1080 priv->use_riwt, priv->mode, 1081 (i == DMA_RX_SIZE - 1)); 1082 } 1083 1084 /** 1085 * stmmac_clear_tx_descriptors - clear tx descriptors 1086 * @priv: driver private structure 1087 * @queue: TX queue index. 1088 * Description: this function is called to clear the TX descriptors 1089 * in case of both basic and extended descriptors are used. 1090 */ 1091 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) 1092 { 1093 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1094 int i; 1095 1096 /* Clear the TX descriptors */ 1097 for (i = 0; i < DMA_TX_SIZE; i++) 1098 if (priv->extend_desc) 1099 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, 1100 priv->mode, (i == DMA_TX_SIZE - 1)); 1101 else 1102 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], 1103 priv->mode, (i == DMA_TX_SIZE - 1)); 1104 } 1105 1106 /** 1107 * stmmac_clear_descriptors - clear descriptors 1108 * @priv: driver private structure 1109 * Description: this function is called to clear the TX and RX descriptors 1110 * in case of both basic and extended descriptors are used. 1111 */ 1112 static void stmmac_clear_descriptors(struct stmmac_priv *priv) 1113 { 1114 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1115 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1116 u32 queue; 1117 1118 /* Clear the RX descriptors */ 1119 for (queue = 0; queue < rx_queue_cnt; queue++) 1120 stmmac_clear_rx_descriptors(priv, queue); 1121 1122 /* Clear the TX descriptors */ 1123 for (queue = 0; queue < tx_queue_cnt; queue++) 1124 stmmac_clear_tx_descriptors(priv, queue); 1125 } 1126 1127 /** 1128 * stmmac_init_rx_buffers - init the RX descriptor buffer. 1129 * @priv: driver private structure 1130 * @p: descriptor pointer 1131 * @i: descriptor index 1132 * @flags: gfp flag 1133 * @queue: RX queue index 1134 * Description: this function is called to allocate a receive buffer, perform 1135 * the DMA mapping and init the descriptor. 1136 */ 1137 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 1138 int i, gfp_t flags, u32 queue) 1139 { 1140 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1141 struct sk_buff *skb; 1142 1143 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); 1144 if (!skb) { 1145 netdev_err(priv->dev, 1146 "%s: Rx init fails; skb is NULL\n", __func__); 1147 return -ENOMEM; 1148 } 1149 rx_q->rx_skbuff[i] = skb; 1150 rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, 1151 priv->dma_buf_sz, 1152 DMA_FROM_DEVICE); 1153 if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) { 1154 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); 1155 dev_kfree_skb_any(skb); 1156 return -EINVAL; 1157 } 1158 1159 if (priv->synopsys_id >= DWMAC_CORE_4_00) 1160 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); 1161 else 1162 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); 1163 1164 if (priv->dma_buf_sz == BUF_SIZE_16KiB) 1165 stmmac_init_desc3(priv, p); 1166 1167 return 0; 1168 } 1169 1170 /** 1171 * stmmac_free_rx_buffer - free RX dma buffers 1172 * @priv: private structure 1173 * @queue: RX queue index 1174 * @i: buffer index. 1175 */ 1176 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) 1177 { 1178 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1179 1180 if (rx_q->rx_skbuff[i]) { 1181 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i], 1182 priv->dma_buf_sz, DMA_FROM_DEVICE); 1183 dev_kfree_skb_any(rx_q->rx_skbuff[i]); 1184 } 1185 rx_q->rx_skbuff[i] = NULL; 1186 } 1187 1188 /** 1189 * stmmac_free_tx_buffer - free RX dma buffers 1190 * @priv: private structure 1191 * @queue: RX queue index 1192 * @i: buffer index. 1193 */ 1194 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) 1195 { 1196 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1197 1198 if (tx_q->tx_skbuff_dma[i].buf) { 1199 if (tx_q->tx_skbuff_dma[i].map_as_page) 1200 dma_unmap_page(priv->device, 1201 tx_q->tx_skbuff_dma[i].buf, 1202 tx_q->tx_skbuff_dma[i].len, 1203 DMA_TO_DEVICE); 1204 else 1205 dma_unmap_single(priv->device, 1206 tx_q->tx_skbuff_dma[i].buf, 1207 tx_q->tx_skbuff_dma[i].len, 1208 DMA_TO_DEVICE); 1209 } 1210 1211 if (tx_q->tx_skbuff[i]) { 1212 dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1213 tx_q->tx_skbuff[i] = NULL; 1214 tx_q->tx_skbuff_dma[i].buf = 0; 1215 tx_q->tx_skbuff_dma[i].map_as_page = false; 1216 } 1217 } 1218 1219 /** 1220 * init_dma_rx_desc_rings - init the RX descriptor rings 1221 * @dev: net device structure 1222 * @flags: gfp flag. 1223 * Description: this function initializes the DMA RX descriptors 1224 * and allocates the socket buffers. It supports the chained and ring 1225 * modes. 1226 */ 1227 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) 1228 { 1229 struct stmmac_priv *priv = netdev_priv(dev); 1230 u32 rx_count = priv->plat->rx_queues_to_use; 1231 int ret = -ENOMEM; 1232 int bfsize = 0; 1233 int queue; 1234 int i; 1235 1236 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); 1237 if (bfsize < 0) 1238 bfsize = 0; 1239 1240 if (bfsize < BUF_SIZE_16KiB) 1241 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 1242 1243 priv->dma_buf_sz = bfsize; 1244 1245 /* RX INITIALIZATION */ 1246 netif_dbg(priv, probe, priv->dev, 1247 "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1248 1249 for (queue = 0; queue < rx_count; queue++) { 1250 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1251 1252 netif_dbg(priv, probe, priv->dev, 1253 "(%s) dma_rx_phy=0x%08x\n", __func__, 1254 (u32)rx_q->dma_rx_phy); 1255 1256 for (i = 0; i < DMA_RX_SIZE; i++) { 1257 struct dma_desc *p; 1258 1259 if (priv->extend_desc) 1260 p = &((rx_q->dma_erx + i)->basic); 1261 else 1262 p = rx_q->dma_rx + i; 1263 1264 ret = stmmac_init_rx_buffers(priv, p, i, flags, 1265 queue); 1266 if (ret) 1267 goto err_init_rx_buffers; 1268 1269 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", 1270 rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data, 1271 (unsigned int)rx_q->rx_skbuff_dma[i]); 1272 } 1273 1274 rx_q->cur_rx = 0; 1275 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); 1276 1277 stmmac_clear_rx_descriptors(priv, queue); 1278 1279 /* Setup the chained descriptor addresses */ 1280 if (priv->mode == STMMAC_CHAIN_MODE) { 1281 if (priv->extend_desc) 1282 stmmac_mode_init(priv, rx_q->dma_erx, 1283 rx_q->dma_rx_phy, DMA_RX_SIZE, 1); 1284 else 1285 stmmac_mode_init(priv, rx_q->dma_rx, 1286 rx_q->dma_rx_phy, DMA_RX_SIZE, 0); 1287 } 1288 } 1289 1290 buf_sz = bfsize; 1291 1292 return 0; 1293 1294 err_init_rx_buffers: 1295 while (queue >= 0) { 1296 while (--i >= 0) 1297 stmmac_free_rx_buffer(priv, queue, i); 1298 1299 if (queue == 0) 1300 break; 1301 1302 i = DMA_RX_SIZE; 1303 queue--; 1304 } 1305 1306 return ret; 1307 } 1308 1309 /** 1310 * init_dma_tx_desc_rings - init the TX descriptor rings 1311 * @dev: net device structure. 1312 * Description: this function initializes the DMA TX descriptors 1313 * and allocates the socket buffers. It supports the chained and ring 1314 * modes. 1315 */ 1316 static int init_dma_tx_desc_rings(struct net_device *dev) 1317 { 1318 struct stmmac_priv *priv = netdev_priv(dev); 1319 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1320 u32 queue; 1321 int i; 1322 1323 for (queue = 0; queue < tx_queue_cnt; queue++) { 1324 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1325 1326 netif_dbg(priv, probe, priv->dev, 1327 "(%s) dma_tx_phy=0x%08x\n", __func__, 1328 (u32)tx_q->dma_tx_phy); 1329 1330 /* Setup the chained descriptor addresses */ 1331 if (priv->mode == STMMAC_CHAIN_MODE) { 1332 if (priv->extend_desc) 1333 stmmac_mode_init(priv, tx_q->dma_etx, 1334 tx_q->dma_tx_phy, DMA_TX_SIZE, 1); 1335 else 1336 stmmac_mode_init(priv, tx_q->dma_tx, 1337 tx_q->dma_tx_phy, DMA_TX_SIZE, 0); 1338 } 1339 1340 for (i = 0; i < DMA_TX_SIZE; i++) { 1341 struct dma_desc *p; 1342 if (priv->extend_desc) 1343 p = &((tx_q->dma_etx + i)->basic); 1344 else 1345 p = tx_q->dma_tx + i; 1346 1347 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 1348 p->des0 = 0; 1349 p->des1 = 0; 1350 p->des2 = 0; 1351 p->des3 = 0; 1352 } else { 1353 p->des2 = 0; 1354 } 1355 1356 tx_q->tx_skbuff_dma[i].buf = 0; 1357 tx_q->tx_skbuff_dma[i].map_as_page = false; 1358 tx_q->tx_skbuff_dma[i].len = 0; 1359 tx_q->tx_skbuff_dma[i].last_segment = false; 1360 tx_q->tx_skbuff[i] = NULL; 1361 } 1362 1363 tx_q->dirty_tx = 0; 1364 tx_q->cur_tx = 0; 1365 tx_q->mss = 0; 1366 1367 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 1368 } 1369 1370 return 0; 1371 } 1372 1373 /** 1374 * init_dma_desc_rings - init the RX/TX descriptor rings 1375 * @dev: net device structure 1376 * @flags: gfp flag. 1377 * Description: this function initializes the DMA RX/TX descriptors 1378 * and allocates the socket buffers. It supports the chained and ring 1379 * modes. 1380 */ 1381 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) 1382 { 1383 struct stmmac_priv *priv = netdev_priv(dev); 1384 int ret; 1385 1386 ret = init_dma_rx_desc_rings(dev, flags); 1387 if (ret) 1388 return ret; 1389 1390 ret = init_dma_tx_desc_rings(dev); 1391 1392 stmmac_clear_descriptors(priv); 1393 1394 if (netif_msg_hw(priv)) 1395 stmmac_display_rings(priv); 1396 1397 return ret; 1398 } 1399 1400 /** 1401 * dma_free_rx_skbufs - free RX dma buffers 1402 * @priv: private structure 1403 * @queue: RX queue index 1404 */ 1405 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) 1406 { 1407 int i; 1408 1409 for (i = 0; i < DMA_RX_SIZE; i++) 1410 stmmac_free_rx_buffer(priv, queue, i); 1411 } 1412 1413 /** 1414 * dma_free_tx_skbufs - free TX dma buffers 1415 * @priv: private structure 1416 * @queue: TX queue index 1417 */ 1418 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) 1419 { 1420 int i; 1421 1422 for (i = 0; i < DMA_TX_SIZE; i++) 1423 stmmac_free_tx_buffer(priv, queue, i); 1424 } 1425 1426 /** 1427 * free_dma_rx_desc_resources - free RX dma desc resources 1428 * @priv: private structure 1429 */ 1430 static void free_dma_rx_desc_resources(struct stmmac_priv *priv) 1431 { 1432 u32 rx_count = priv->plat->rx_queues_to_use; 1433 u32 queue; 1434 1435 /* Free RX queue resources */ 1436 for (queue = 0; queue < rx_count; queue++) { 1437 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1438 1439 /* Release the DMA RX socket buffers */ 1440 dma_free_rx_skbufs(priv, queue); 1441 1442 /* Free DMA regions of consistent memory previously allocated */ 1443 if (!priv->extend_desc) 1444 dma_free_coherent(priv->device, 1445 DMA_RX_SIZE * sizeof(struct dma_desc), 1446 rx_q->dma_rx, rx_q->dma_rx_phy); 1447 else 1448 dma_free_coherent(priv->device, DMA_RX_SIZE * 1449 sizeof(struct dma_extended_desc), 1450 rx_q->dma_erx, rx_q->dma_rx_phy); 1451 1452 kfree(rx_q->rx_skbuff_dma); 1453 kfree(rx_q->rx_skbuff); 1454 } 1455 } 1456 1457 /** 1458 * free_dma_tx_desc_resources - free TX dma desc resources 1459 * @priv: private structure 1460 */ 1461 static void free_dma_tx_desc_resources(struct stmmac_priv *priv) 1462 { 1463 u32 tx_count = priv->plat->tx_queues_to_use; 1464 u32 queue; 1465 1466 /* Free TX queue resources */ 1467 for (queue = 0; queue < tx_count; queue++) { 1468 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1469 1470 /* Release the DMA TX socket buffers */ 1471 dma_free_tx_skbufs(priv, queue); 1472 1473 /* Free DMA regions of consistent memory previously allocated */ 1474 if (!priv->extend_desc) 1475 dma_free_coherent(priv->device, 1476 DMA_TX_SIZE * sizeof(struct dma_desc), 1477 tx_q->dma_tx, tx_q->dma_tx_phy); 1478 else 1479 dma_free_coherent(priv->device, DMA_TX_SIZE * 1480 sizeof(struct dma_extended_desc), 1481 tx_q->dma_etx, tx_q->dma_tx_phy); 1482 1483 kfree(tx_q->tx_skbuff_dma); 1484 kfree(tx_q->tx_skbuff); 1485 } 1486 } 1487 1488 /** 1489 * alloc_dma_rx_desc_resources - alloc RX resources. 1490 * @priv: private structure 1491 * Description: according to which descriptor can be used (extend or basic) 1492 * this function allocates the resources for TX and RX paths. In case of 1493 * reception, for example, it pre-allocated the RX socket buffer in order to 1494 * allow zero-copy mechanism. 1495 */ 1496 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) 1497 { 1498 u32 rx_count = priv->plat->rx_queues_to_use; 1499 int ret = -ENOMEM; 1500 u32 queue; 1501 1502 /* RX queues buffers and DMA */ 1503 for (queue = 0; queue < rx_count; queue++) { 1504 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1505 1506 rx_q->queue_index = queue; 1507 rx_q->priv_data = priv; 1508 1509 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, 1510 sizeof(dma_addr_t), 1511 GFP_KERNEL); 1512 if (!rx_q->rx_skbuff_dma) 1513 goto err_dma; 1514 1515 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, 1516 sizeof(struct sk_buff *), 1517 GFP_KERNEL); 1518 if (!rx_q->rx_skbuff) 1519 goto err_dma; 1520 1521 if (priv->extend_desc) { 1522 rx_q->dma_erx = dma_zalloc_coherent(priv->device, 1523 DMA_RX_SIZE * 1524 sizeof(struct 1525 dma_extended_desc), 1526 &rx_q->dma_rx_phy, 1527 GFP_KERNEL); 1528 if (!rx_q->dma_erx) 1529 goto err_dma; 1530 1531 } else { 1532 rx_q->dma_rx = dma_zalloc_coherent(priv->device, 1533 DMA_RX_SIZE * 1534 sizeof(struct 1535 dma_desc), 1536 &rx_q->dma_rx_phy, 1537 GFP_KERNEL); 1538 if (!rx_q->dma_rx) 1539 goto err_dma; 1540 } 1541 } 1542 1543 return 0; 1544 1545 err_dma: 1546 free_dma_rx_desc_resources(priv); 1547 1548 return ret; 1549 } 1550 1551 /** 1552 * alloc_dma_tx_desc_resources - alloc TX resources. 1553 * @priv: private structure 1554 * Description: according to which descriptor can be used (extend or basic) 1555 * this function allocates the resources for TX and RX paths. In case of 1556 * reception, for example, it pre-allocated the RX socket buffer in order to 1557 * allow zero-copy mechanism. 1558 */ 1559 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) 1560 { 1561 u32 tx_count = priv->plat->tx_queues_to_use; 1562 int ret = -ENOMEM; 1563 u32 queue; 1564 1565 /* TX queues buffers and DMA */ 1566 for (queue = 0; queue < tx_count; queue++) { 1567 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1568 1569 tx_q->queue_index = queue; 1570 tx_q->priv_data = priv; 1571 1572 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, 1573 sizeof(*tx_q->tx_skbuff_dma), 1574 GFP_KERNEL); 1575 if (!tx_q->tx_skbuff_dma) 1576 goto err_dma; 1577 1578 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, 1579 sizeof(struct sk_buff *), 1580 GFP_KERNEL); 1581 if (!tx_q->tx_skbuff) 1582 goto err_dma; 1583 1584 if (priv->extend_desc) { 1585 tx_q->dma_etx = dma_zalloc_coherent(priv->device, 1586 DMA_TX_SIZE * 1587 sizeof(struct 1588 dma_extended_desc), 1589 &tx_q->dma_tx_phy, 1590 GFP_KERNEL); 1591 if (!tx_q->dma_etx) 1592 goto err_dma; 1593 } else { 1594 tx_q->dma_tx = dma_zalloc_coherent(priv->device, 1595 DMA_TX_SIZE * 1596 sizeof(struct 1597 dma_desc), 1598 &tx_q->dma_tx_phy, 1599 GFP_KERNEL); 1600 if (!tx_q->dma_tx) 1601 goto err_dma; 1602 } 1603 } 1604 1605 return 0; 1606 1607 err_dma: 1608 free_dma_tx_desc_resources(priv); 1609 1610 return ret; 1611 } 1612 1613 /** 1614 * alloc_dma_desc_resources - alloc TX/RX resources. 1615 * @priv: private structure 1616 * Description: according to which descriptor can be used (extend or basic) 1617 * this function allocates the resources for TX and RX paths. In case of 1618 * reception, for example, it pre-allocated the RX socket buffer in order to 1619 * allow zero-copy mechanism. 1620 */ 1621 static int alloc_dma_desc_resources(struct stmmac_priv *priv) 1622 { 1623 /* RX Allocation */ 1624 int ret = alloc_dma_rx_desc_resources(priv); 1625 1626 if (ret) 1627 return ret; 1628 1629 ret = alloc_dma_tx_desc_resources(priv); 1630 1631 return ret; 1632 } 1633 1634 /** 1635 * free_dma_desc_resources - free dma desc resources 1636 * @priv: private structure 1637 */ 1638 static void free_dma_desc_resources(struct stmmac_priv *priv) 1639 { 1640 /* Release the DMA RX socket buffers */ 1641 free_dma_rx_desc_resources(priv); 1642 1643 /* Release the DMA TX socket buffers */ 1644 free_dma_tx_desc_resources(priv); 1645 } 1646 1647 /** 1648 * stmmac_mac_enable_rx_queues - Enable MAC rx queues 1649 * @priv: driver private structure 1650 * Description: It is used for enabling the rx queues in the MAC 1651 */ 1652 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 1653 { 1654 u32 rx_queues_count = priv->plat->rx_queues_to_use; 1655 int queue; 1656 u8 mode; 1657 1658 for (queue = 0; queue < rx_queues_count; queue++) { 1659 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 1660 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 1661 } 1662 } 1663 1664 /** 1665 * stmmac_start_rx_dma - start RX DMA channel 1666 * @priv: driver private structure 1667 * @chan: RX channel index 1668 * Description: 1669 * This starts a RX DMA channel 1670 */ 1671 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 1672 { 1673 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 1674 stmmac_start_rx(priv, priv->ioaddr, chan); 1675 } 1676 1677 /** 1678 * stmmac_start_tx_dma - start TX DMA channel 1679 * @priv: driver private structure 1680 * @chan: TX channel index 1681 * Description: 1682 * This starts a TX DMA channel 1683 */ 1684 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 1685 { 1686 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 1687 stmmac_start_tx(priv, priv->ioaddr, chan); 1688 } 1689 1690 /** 1691 * stmmac_stop_rx_dma - stop RX DMA channel 1692 * @priv: driver private structure 1693 * @chan: RX channel index 1694 * Description: 1695 * This stops a RX DMA channel 1696 */ 1697 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 1698 { 1699 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 1700 stmmac_stop_rx(priv, priv->ioaddr, chan); 1701 } 1702 1703 /** 1704 * stmmac_stop_tx_dma - stop TX DMA channel 1705 * @priv: driver private structure 1706 * @chan: TX channel index 1707 * Description: 1708 * This stops a TX DMA channel 1709 */ 1710 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 1711 { 1712 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 1713 stmmac_stop_tx(priv, priv->ioaddr, chan); 1714 } 1715 1716 /** 1717 * stmmac_start_all_dma - start all RX and TX DMA channels 1718 * @priv: driver private structure 1719 * Description: 1720 * This starts all the RX and TX DMA channels 1721 */ 1722 static void stmmac_start_all_dma(struct stmmac_priv *priv) 1723 { 1724 u32 rx_channels_count = priv->plat->rx_queues_to_use; 1725 u32 tx_channels_count = priv->plat->tx_queues_to_use; 1726 u32 chan = 0; 1727 1728 for (chan = 0; chan < rx_channels_count; chan++) 1729 stmmac_start_rx_dma(priv, chan); 1730 1731 for (chan = 0; chan < tx_channels_count; chan++) 1732 stmmac_start_tx_dma(priv, chan); 1733 } 1734 1735 /** 1736 * stmmac_stop_all_dma - stop all RX and TX DMA channels 1737 * @priv: driver private structure 1738 * Description: 1739 * This stops the RX and TX DMA channels 1740 */ 1741 static void stmmac_stop_all_dma(struct stmmac_priv *priv) 1742 { 1743 u32 rx_channels_count = priv->plat->rx_queues_to_use; 1744 u32 tx_channels_count = priv->plat->tx_queues_to_use; 1745 u32 chan = 0; 1746 1747 for (chan = 0; chan < rx_channels_count; chan++) 1748 stmmac_stop_rx_dma(priv, chan); 1749 1750 for (chan = 0; chan < tx_channels_count; chan++) 1751 stmmac_stop_tx_dma(priv, chan); 1752 } 1753 1754 /** 1755 * stmmac_dma_operation_mode - HW DMA operation mode 1756 * @priv: driver private structure 1757 * Description: it is used for configuring the DMA operation mode register in 1758 * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 1759 */ 1760 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 1761 { 1762 u32 rx_channels_count = priv->plat->rx_queues_to_use; 1763 u32 tx_channels_count = priv->plat->tx_queues_to_use; 1764 int rxfifosz = priv->plat->rx_fifo_size; 1765 int txfifosz = priv->plat->tx_fifo_size; 1766 u32 txmode = 0; 1767 u32 rxmode = 0; 1768 u32 chan = 0; 1769 u8 qmode = 0; 1770 1771 if (rxfifosz == 0) 1772 rxfifosz = priv->dma_cap.rx_fifo_size; 1773 if (txfifosz == 0) 1774 txfifosz = priv->dma_cap.tx_fifo_size; 1775 1776 /* Adjust for real per queue fifo size */ 1777 rxfifosz /= rx_channels_count; 1778 txfifosz /= tx_channels_count; 1779 1780 if (priv->plat->force_thresh_dma_mode) { 1781 txmode = tc; 1782 rxmode = tc; 1783 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 1784 /* 1785 * In case of GMAC, SF mode can be enabled 1786 * to perform the TX COE in HW. This depends on: 1787 * 1) TX COE if actually supported 1788 * 2) There is no bugged Jumbo frame support 1789 * that needs to not insert csum in the TDES. 1790 */ 1791 txmode = SF_DMA_MODE; 1792 rxmode = SF_DMA_MODE; 1793 priv->xstats.threshold = SF_DMA_MODE; 1794 } else { 1795 txmode = tc; 1796 rxmode = SF_DMA_MODE; 1797 } 1798 1799 /* configure all channels */ 1800 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 1801 for (chan = 0; chan < rx_channels_count; chan++) { 1802 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 1803 1804 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 1805 rxfifosz, qmode); 1806 } 1807 1808 for (chan = 0; chan < tx_channels_count; chan++) { 1809 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 1810 1811 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 1812 txfifosz, qmode); 1813 } 1814 } else { 1815 stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz); 1816 } 1817 } 1818 1819 /** 1820 * stmmac_tx_clean - to manage the transmission completion 1821 * @priv: driver private structure 1822 * @queue: TX queue index 1823 * Description: it reclaims the transmit resources after transmission completes. 1824 */ 1825 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) 1826 { 1827 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1828 unsigned int bytes_compl = 0, pkts_compl = 0; 1829 unsigned int entry; 1830 1831 netif_tx_lock(priv->dev); 1832 1833 priv->xstats.tx_clean++; 1834 1835 entry = tx_q->dirty_tx; 1836 while (entry != tx_q->cur_tx) { 1837 struct sk_buff *skb = tx_q->tx_skbuff[entry]; 1838 struct dma_desc *p; 1839 int status; 1840 1841 if (priv->extend_desc) 1842 p = (struct dma_desc *)(tx_q->dma_etx + entry); 1843 else 1844 p = tx_q->dma_tx + entry; 1845 1846 status = stmmac_tx_status(priv, &priv->dev->stats, 1847 &priv->xstats, p, priv->ioaddr); 1848 /* Check if the descriptor is owned by the DMA */ 1849 if (unlikely(status & tx_dma_own)) 1850 break; 1851 1852 /* Make sure descriptor fields are read after reading 1853 * the own bit. 1854 */ 1855 dma_rmb(); 1856 1857 /* Just consider the last segment and ...*/ 1858 if (likely(!(status & tx_not_ls))) { 1859 /* ... verify the status error condition */ 1860 if (unlikely(status & tx_err)) { 1861 priv->dev->stats.tx_errors++; 1862 } else { 1863 priv->dev->stats.tx_packets++; 1864 priv->xstats.tx_pkt_n++; 1865 } 1866 stmmac_get_tx_hwtstamp(priv, p, skb); 1867 } 1868 1869 if (likely(tx_q->tx_skbuff_dma[entry].buf)) { 1870 if (tx_q->tx_skbuff_dma[entry].map_as_page) 1871 dma_unmap_page(priv->device, 1872 tx_q->tx_skbuff_dma[entry].buf, 1873 tx_q->tx_skbuff_dma[entry].len, 1874 DMA_TO_DEVICE); 1875 else 1876 dma_unmap_single(priv->device, 1877 tx_q->tx_skbuff_dma[entry].buf, 1878 tx_q->tx_skbuff_dma[entry].len, 1879 DMA_TO_DEVICE); 1880 tx_q->tx_skbuff_dma[entry].buf = 0; 1881 tx_q->tx_skbuff_dma[entry].len = 0; 1882 tx_q->tx_skbuff_dma[entry].map_as_page = false; 1883 } 1884 1885 stmmac_clean_desc3(priv, tx_q, p); 1886 1887 tx_q->tx_skbuff_dma[entry].last_segment = false; 1888 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 1889 1890 if (likely(skb != NULL)) { 1891 pkts_compl++; 1892 bytes_compl += skb->len; 1893 dev_consume_skb_any(skb); 1894 tx_q->tx_skbuff[entry] = NULL; 1895 } 1896 1897 stmmac_release_tx_desc(priv, p, priv->mode); 1898 1899 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 1900 } 1901 tx_q->dirty_tx = entry; 1902 1903 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 1904 pkts_compl, bytes_compl); 1905 1906 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 1907 queue))) && 1908 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) { 1909 1910 netif_dbg(priv, tx_done, priv->dev, 1911 "%s: restart transmit\n", __func__); 1912 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 1913 } 1914 1915 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { 1916 stmmac_enable_eee_mode(priv); 1917 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 1918 } 1919 netif_tx_unlock(priv->dev); 1920 } 1921 1922 /** 1923 * stmmac_tx_err - to manage the tx error 1924 * @priv: driver private structure 1925 * @chan: channel index 1926 * Description: it cleans the descriptors and restarts the transmission 1927 * in case of transmission errors. 1928 */ 1929 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 1930 { 1931 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 1932 int i; 1933 1934 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 1935 1936 stmmac_stop_tx_dma(priv, chan); 1937 dma_free_tx_skbufs(priv, chan); 1938 for (i = 0; i < DMA_TX_SIZE; i++) 1939 if (priv->extend_desc) 1940 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, 1941 priv->mode, (i == DMA_TX_SIZE - 1)); 1942 else 1943 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], 1944 priv->mode, (i == DMA_TX_SIZE - 1)); 1945 tx_q->dirty_tx = 0; 1946 tx_q->cur_tx = 0; 1947 tx_q->mss = 0; 1948 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); 1949 stmmac_start_tx_dma(priv, chan); 1950 1951 priv->dev->stats.tx_errors++; 1952 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 1953 } 1954 1955 /** 1956 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 1957 * @priv: driver private structure 1958 * @txmode: TX operating mode 1959 * @rxmode: RX operating mode 1960 * @chan: channel index 1961 * Description: it is used for configuring of the DMA operation mode in 1962 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 1963 * mode. 1964 */ 1965 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 1966 u32 rxmode, u32 chan) 1967 { 1968 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 1969 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 1970 u32 rx_channels_count = priv->plat->rx_queues_to_use; 1971 u32 tx_channels_count = priv->plat->tx_queues_to_use; 1972 int rxfifosz = priv->plat->rx_fifo_size; 1973 int txfifosz = priv->plat->tx_fifo_size; 1974 1975 if (rxfifosz == 0) 1976 rxfifosz = priv->dma_cap.rx_fifo_size; 1977 if (txfifosz == 0) 1978 txfifosz = priv->dma_cap.tx_fifo_size; 1979 1980 /* Adjust for real per queue fifo size */ 1981 rxfifosz /= rx_channels_count; 1982 txfifosz /= tx_channels_count; 1983 1984 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 1985 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, 1986 rxqmode); 1987 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, 1988 txqmode); 1989 } else { 1990 stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz); 1991 } 1992 } 1993 1994 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 1995 { 1996 int ret = false; 1997 1998 /* Safety features are only available in cores >= 5.10 */ 1999 if (priv->synopsys_id < DWMAC_CORE_5_10) 2000 return ret; 2001 ret = stmmac_safety_feat_irq_status(priv, priv->dev, 2002 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2003 if (ret && (ret != -EINVAL)) { 2004 stmmac_global_err(priv); 2005 return true; 2006 } 2007 2008 return false; 2009 } 2010 2011 /** 2012 * stmmac_dma_interrupt - DMA ISR 2013 * @priv: driver private structure 2014 * Description: this is the DMA ISR. It is called by the main ISR. 2015 * It calls the dwmac dma routine and schedule poll method in case of some 2016 * work can be done. 2017 */ 2018 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 2019 { 2020 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2021 u32 rx_channel_count = priv->plat->rx_queues_to_use; 2022 u32 channels_to_check = tx_channel_count > rx_channel_count ? 2023 tx_channel_count : rx_channel_count; 2024 u32 chan; 2025 bool poll_scheduled = false; 2026 int status[channels_to_check]; 2027 2028 /* Each DMA channel can be used for rx and tx simultaneously, yet 2029 * napi_struct is embedded in struct stmmac_rx_queue rather than in a 2030 * stmmac_channel struct. 2031 * Because of this, stmmac_poll currently checks (and possibly wakes) 2032 * all tx queues rather than just a single tx queue. 2033 */ 2034 for (chan = 0; chan < channels_to_check; chan++) 2035 status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr, 2036 &priv->xstats, chan); 2037 2038 for (chan = 0; chan < rx_channel_count; chan++) { 2039 if (likely(status[chan] & handle_rx)) { 2040 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; 2041 2042 if (likely(napi_schedule_prep(&rx_q->napi))) { 2043 stmmac_disable_dma_irq(priv, priv->ioaddr, chan); 2044 __napi_schedule(&rx_q->napi); 2045 poll_scheduled = true; 2046 } 2047 } 2048 } 2049 2050 /* If we scheduled poll, we already know that tx queues will be checked. 2051 * If we didn't schedule poll, see if any DMA channel (used by tx) has a 2052 * completed transmission, if so, call stmmac_poll (once). 2053 */ 2054 if (!poll_scheduled) { 2055 for (chan = 0; chan < tx_channel_count; chan++) { 2056 if (status[chan] & handle_tx) { 2057 /* It doesn't matter what rx queue we choose 2058 * here. We use 0 since it always exists. 2059 */ 2060 struct stmmac_rx_queue *rx_q = 2061 &priv->rx_queue[0]; 2062 2063 if (likely(napi_schedule_prep(&rx_q->napi))) { 2064 stmmac_disable_dma_irq(priv, 2065 priv->ioaddr, chan); 2066 __napi_schedule(&rx_q->napi); 2067 } 2068 break; 2069 } 2070 } 2071 } 2072 2073 for (chan = 0; chan < tx_channel_count; chan++) { 2074 if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 2075 /* Try to bump up the dma threshold on this failure */ 2076 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 2077 (tc <= 256)) { 2078 tc += 64; 2079 if (priv->plat->force_thresh_dma_mode) 2080 stmmac_set_dma_operation_mode(priv, 2081 tc, 2082 tc, 2083 chan); 2084 else 2085 stmmac_set_dma_operation_mode(priv, 2086 tc, 2087 SF_DMA_MODE, 2088 chan); 2089 priv->xstats.threshold = tc; 2090 } 2091 } else if (unlikely(status[chan] == tx_hard_error)) { 2092 stmmac_tx_err(priv, chan); 2093 } 2094 } 2095 } 2096 2097 /** 2098 * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 2099 * @priv: driver private structure 2100 * Description: this masks the MMC irq, in fact, the counters are managed in SW. 2101 */ 2102 static void stmmac_mmc_setup(struct stmmac_priv *priv) 2103 { 2104 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 2105 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 2106 2107 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 2108 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET; 2109 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; 2110 } else { 2111 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET; 2112 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; 2113 } 2114 2115 dwmac_mmc_intr_all_mask(priv->mmcaddr); 2116 2117 if (priv->dma_cap.rmon) { 2118 dwmac_mmc_ctrl(priv->mmcaddr, mode); 2119 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 2120 } else 2121 netdev_info(priv->dev, "No MAC Management Counters available\n"); 2122 } 2123 2124 /** 2125 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors 2126 * @priv: driver private structure 2127 * Description: select the Enhanced/Alternate or Normal descriptors. 2128 * In case of Enhanced/Alternate, it checks if the extended descriptors are 2129 * supported by the HW capability register. 2130 */ 2131 static void stmmac_selec_desc_mode(struct stmmac_priv *priv) 2132 { 2133 if (priv->plat->enh_desc) { 2134 dev_info(priv->device, "Enhanced/Alternate descriptors\n"); 2135 2136 /* GMAC older than 3.50 has no extended descriptors */ 2137 if (priv->synopsys_id >= DWMAC_CORE_3_50) { 2138 dev_info(priv->device, "Enabled extended descriptors\n"); 2139 priv->extend_desc = 1; 2140 } else 2141 dev_warn(priv->device, "Extended descriptors not supported\n"); 2142 2143 priv->hw->desc = &enh_desc_ops; 2144 } else { 2145 dev_info(priv->device, "Normal descriptors\n"); 2146 priv->hw->desc = &ndesc_ops; 2147 } 2148 } 2149 2150 /** 2151 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 2152 * @priv: driver private structure 2153 * Description: 2154 * new GMAC chip generations have a new register to indicate the 2155 * presence of the optional feature/functions. 2156 * This can be also used to override the value passed through the 2157 * platform and necessary for old MAC10/100 and GMAC chips. 2158 */ 2159 static int stmmac_get_hw_features(struct stmmac_priv *priv) 2160 { 2161 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2162 } 2163 2164 /** 2165 * stmmac_check_ether_addr - check if the MAC addr is valid 2166 * @priv: driver private structure 2167 * Description: 2168 * it is to verify if the MAC address is valid, in case of failures it 2169 * generates a random MAC address 2170 */ 2171 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2172 { 2173 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 2174 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); 2175 if (!is_valid_ether_addr(priv->dev->dev_addr)) 2176 eth_hw_addr_random(priv->dev); 2177 netdev_info(priv->dev, "device MAC address %pM\n", 2178 priv->dev->dev_addr); 2179 } 2180 } 2181 2182 /** 2183 * stmmac_init_dma_engine - DMA init. 2184 * @priv: driver private structure 2185 * Description: 2186 * It inits the DMA invoking the specific MAC/GMAC callback. 2187 * Some DMA parameters can be passed from the platform; 2188 * in case of these are not passed a default is kept for the MAC or GMAC. 2189 */ 2190 static int stmmac_init_dma_engine(struct stmmac_priv *priv) 2191 { 2192 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2193 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2194 struct stmmac_rx_queue *rx_q; 2195 struct stmmac_tx_queue *tx_q; 2196 u32 dummy_dma_rx_phy = 0; 2197 u32 dummy_dma_tx_phy = 0; 2198 u32 chan = 0; 2199 int atds = 0; 2200 int ret = 0; 2201 2202 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2203 dev_err(priv->device, "Invalid DMA configuration\n"); 2204 return -EINVAL; 2205 } 2206 2207 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2208 atds = 1; 2209 2210 ret = stmmac_reset(priv, priv->ioaddr); 2211 if (ret) { 2212 dev_err(priv->device, "Failed to reset the dma\n"); 2213 return ret; 2214 } 2215 2216 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 2217 /* DMA Configuration */ 2218 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, 2219 dummy_dma_tx_phy, dummy_dma_rx_phy, atds); 2220 2221 /* DMA RX Channel Configuration */ 2222 for (chan = 0; chan < rx_channels_count; chan++) { 2223 rx_q = &priv->rx_queue[chan]; 2224 2225 stmmac_init_rx_chan(priv, priv->ioaddr, 2226 priv->plat->dma_cfg, rx_q->dma_rx_phy, 2227 chan); 2228 2229 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2230 (DMA_RX_SIZE * sizeof(struct dma_desc)); 2231 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2232 rx_q->rx_tail_addr, chan); 2233 } 2234 2235 /* DMA TX Channel Configuration */ 2236 for (chan = 0; chan < tx_channels_count; chan++) { 2237 tx_q = &priv->tx_queue[chan]; 2238 2239 stmmac_init_chan(priv, priv->ioaddr, 2240 priv->plat->dma_cfg, chan); 2241 2242 stmmac_init_tx_chan(priv, priv->ioaddr, 2243 priv->plat->dma_cfg, tx_q->dma_tx_phy, 2244 chan); 2245 2246 tx_q->tx_tail_addr = tx_q->dma_tx_phy + 2247 (DMA_TX_SIZE * sizeof(struct dma_desc)); 2248 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2249 tx_q->tx_tail_addr, chan); 2250 } 2251 } else { 2252 rx_q = &priv->rx_queue[chan]; 2253 tx_q = &priv->tx_queue[chan]; 2254 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, 2255 tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds); 2256 } 2257 2258 if (priv->plat->axi) 2259 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 2260 2261 return ret; 2262 } 2263 2264 /** 2265 * stmmac_tx_timer - mitigation sw timer for tx. 2266 * @data: data pointer 2267 * Description: 2268 * This is the timer handler to directly invoke the stmmac_tx_clean. 2269 */ 2270 static void stmmac_tx_timer(struct timer_list *t) 2271 { 2272 struct stmmac_priv *priv = from_timer(priv, t, txtimer); 2273 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2274 u32 queue; 2275 2276 /* let's scan all the tx queues */ 2277 for (queue = 0; queue < tx_queues_count; queue++) 2278 stmmac_tx_clean(priv, queue); 2279 } 2280 2281 /** 2282 * stmmac_init_tx_coalesce - init tx mitigation options. 2283 * @priv: driver private structure 2284 * Description: 2285 * This inits the transmit coalesce parameters: i.e. timer rate, 2286 * timer handler and default threshold used for enabling the 2287 * interrupt on completion bit. 2288 */ 2289 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) 2290 { 2291 priv->tx_coal_frames = STMMAC_TX_FRAMES; 2292 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; 2293 timer_setup(&priv->txtimer, stmmac_tx_timer, 0); 2294 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); 2295 add_timer(&priv->txtimer); 2296 } 2297 2298 static void stmmac_set_rings_length(struct stmmac_priv *priv) 2299 { 2300 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2301 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2302 u32 chan; 2303 2304 /* set TX ring length */ 2305 for (chan = 0; chan < tx_channels_count; chan++) 2306 stmmac_set_tx_ring_len(priv, priv->ioaddr, 2307 (DMA_TX_SIZE - 1), chan); 2308 2309 /* set RX ring length */ 2310 for (chan = 0; chan < rx_channels_count; chan++) 2311 stmmac_set_rx_ring_len(priv, priv->ioaddr, 2312 (DMA_RX_SIZE - 1), chan); 2313 } 2314 2315 /** 2316 * stmmac_set_tx_queue_weight - Set TX queue weight 2317 * @priv: driver private structure 2318 * Description: It is used for setting TX queues weight 2319 */ 2320 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 2321 { 2322 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2323 u32 weight; 2324 u32 queue; 2325 2326 for (queue = 0; queue < tx_queues_count; queue++) { 2327 weight = priv->plat->tx_queues_cfg[queue].weight; 2328 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 2329 } 2330 } 2331 2332 /** 2333 * stmmac_configure_cbs - Configure CBS in TX queue 2334 * @priv: driver private structure 2335 * Description: It is used for configuring CBS in AVB TX queues 2336 */ 2337 static void stmmac_configure_cbs(struct stmmac_priv *priv) 2338 { 2339 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2340 u32 mode_to_use; 2341 u32 queue; 2342 2343 /* queue 0 is reserved for legacy traffic */ 2344 for (queue = 1; queue < tx_queues_count; queue++) { 2345 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 2346 if (mode_to_use == MTL_QUEUE_DCB) 2347 continue; 2348 2349 stmmac_config_cbs(priv, priv->hw, 2350 priv->plat->tx_queues_cfg[queue].send_slope, 2351 priv->plat->tx_queues_cfg[queue].idle_slope, 2352 priv->plat->tx_queues_cfg[queue].high_credit, 2353 priv->plat->tx_queues_cfg[queue].low_credit, 2354 queue); 2355 } 2356 } 2357 2358 /** 2359 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 2360 * @priv: driver private structure 2361 * Description: It is used for mapping RX queues to RX dma channels 2362 */ 2363 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 2364 { 2365 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2366 u32 queue; 2367 u32 chan; 2368 2369 for (queue = 0; queue < rx_queues_count; queue++) { 2370 chan = priv->plat->rx_queues_cfg[queue].chan; 2371 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 2372 } 2373 } 2374 2375 /** 2376 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 2377 * @priv: driver private structure 2378 * Description: It is used for configuring the RX Queue Priority 2379 */ 2380 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 2381 { 2382 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2383 u32 queue; 2384 u32 prio; 2385 2386 for (queue = 0; queue < rx_queues_count; queue++) { 2387 if (!priv->plat->rx_queues_cfg[queue].use_prio) 2388 continue; 2389 2390 prio = priv->plat->rx_queues_cfg[queue].prio; 2391 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 2392 } 2393 } 2394 2395 /** 2396 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 2397 * @priv: driver private structure 2398 * Description: It is used for configuring the TX Queue Priority 2399 */ 2400 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 2401 { 2402 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2403 u32 queue; 2404 u32 prio; 2405 2406 for (queue = 0; queue < tx_queues_count; queue++) { 2407 if (!priv->plat->tx_queues_cfg[queue].use_prio) 2408 continue; 2409 2410 prio = priv->plat->tx_queues_cfg[queue].prio; 2411 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 2412 } 2413 } 2414 2415 /** 2416 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 2417 * @priv: driver private structure 2418 * Description: It is used for configuring the RX queue routing 2419 */ 2420 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 2421 { 2422 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2423 u32 queue; 2424 u8 packet; 2425 2426 for (queue = 0; queue < rx_queues_count; queue++) { 2427 /* no specific packet type routing specified for the queue */ 2428 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 2429 continue; 2430 2431 packet = priv->plat->rx_queues_cfg[queue].pkt_route; 2432 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 2433 } 2434 } 2435 2436 /** 2437 * stmmac_mtl_configuration - Configure MTL 2438 * @priv: driver private structure 2439 * Description: It is used for configurring MTL 2440 */ 2441 static void stmmac_mtl_configuration(struct stmmac_priv *priv) 2442 { 2443 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2444 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2445 2446 if (tx_queues_count > 1) 2447 stmmac_set_tx_queue_weight(priv); 2448 2449 /* Configure MTL RX algorithms */ 2450 if (rx_queues_count > 1) 2451 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 2452 priv->plat->rx_sched_algorithm); 2453 2454 /* Configure MTL TX algorithms */ 2455 if (tx_queues_count > 1) 2456 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 2457 priv->plat->tx_sched_algorithm); 2458 2459 /* Configure CBS in AVB TX queues */ 2460 if (tx_queues_count > 1) 2461 stmmac_configure_cbs(priv); 2462 2463 /* Map RX MTL to DMA channels */ 2464 stmmac_rx_queue_dma_chan_map(priv); 2465 2466 /* Enable MAC RX Queues */ 2467 stmmac_mac_enable_rx_queues(priv); 2468 2469 /* Set RX priorities */ 2470 if (rx_queues_count > 1) 2471 stmmac_mac_config_rx_queues_prio(priv); 2472 2473 /* Set TX priorities */ 2474 if (tx_queues_count > 1) 2475 stmmac_mac_config_tx_queues_prio(priv); 2476 2477 /* Set RX routing */ 2478 if (rx_queues_count > 1) 2479 stmmac_mac_config_rx_queues_routing(priv); 2480 } 2481 2482 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 2483 { 2484 if (priv->dma_cap.asp) { 2485 netdev_info(priv->dev, "Enabling Safety Features\n"); 2486 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp); 2487 } else { 2488 netdev_info(priv->dev, "No Safety Features support found\n"); 2489 } 2490 } 2491 2492 /** 2493 * stmmac_hw_setup - setup mac in a usable state. 2494 * @dev : pointer to the device structure. 2495 * Description: 2496 * this is the main function to setup the HW in a usable state because the 2497 * dma engine is reset, the core registers are configured (e.g. AXI, 2498 * Checksum features, timers). The DMA is ready to start receiving and 2499 * transmitting. 2500 * Return value: 2501 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2502 * file on failure. 2503 */ 2504 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) 2505 { 2506 struct stmmac_priv *priv = netdev_priv(dev); 2507 u32 rx_cnt = priv->plat->rx_queues_to_use; 2508 u32 tx_cnt = priv->plat->tx_queues_to_use; 2509 u32 chan; 2510 int ret; 2511 2512 /* DMA initialization and SW reset */ 2513 ret = stmmac_init_dma_engine(priv); 2514 if (ret < 0) { 2515 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 2516 __func__); 2517 return ret; 2518 } 2519 2520 /* Copy the MAC addr into the HW */ 2521 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 2522 2523 /* PS and related bits will be programmed according to the speed */ 2524 if (priv->hw->pcs) { 2525 int speed = priv->plat->mac_port_sel_speed; 2526 2527 if ((speed == SPEED_10) || (speed == SPEED_100) || 2528 (speed == SPEED_1000)) { 2529 priv->hw->ps = speed; 2530 } else { 2531 dev_warn(priv->device, "invalid port speed\n"); 2532 priv->hw->ps = 0; 2533 } 2534 } 2535 2536 /* Initialize the MAC Core */ 2537 stmmac_core_init(priv, priv->hw, dev); 2538 2539 /* Initialize MTL*/ 2540 if (priv->synopsys_id >= DWMAC_CORE_4_00) 2541 stmmac_mtl_configuration(priv); 2542 2543 /* Initialize Safety Features */ 2544 if (priv->synopsys_id >= DWMAC_CORE_5_10) 2545 stmmac_safety_feat_configuration(priv); 2546 2547 ret = stmmac_rx_ipc(priv, priv->hw); 2548 if (!ret) { 2549 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 2550 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 2551 priv->hw->rx_csum = 0; 2552 } 2553 2554 /* Enable the MAC Rx/Tx */ 2555 stmmac_mac_set(priv, priv->ioaddr, true); 2556 2557 /* Set the HW DMA mode and the COE */ 2558 stmmac_dma_operation_mode(priv); 2559 2560 stmmac_mmc_setup(priv); 2561 2562 if (init_ptp) { 2563 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 2564 if (ret < 0) 2565 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); 2566 2567 ret = stmmac_init_ptp(priv); 2568 if (ret == -EOPNOTSUPP) 2569 netdev_warn(priv->dev, "PTP not supported by HW\n"); 2570 else if (ret) 2571 netdev_warn(priv->dev, "PTP init failed\n"); 2572 } 2573 2574 #ifdef CONFIG_DEBUG_FS 2575 ret = stmmac_init_fs(dev); 2576 if (ret < 0) 2577 netdev_warn(priv->dev, "%s: failed debugFS registration\n", 2578 __func__); 2579 #endif 2580 /* Start the ball rolling... */ 2581 stmmac_start_all_dma(priv); 2582 2583 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; 2584 2585 if (priv->use_riwt) { 2586 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt); 2587 if (!ret) 2588 priv->rx_riwt = MAX_DMA_RIWT; 2589 } 2590 2591 if (priv->hw->pcs) 2592 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0); 2593 2594 /* set TX and RX rings length */ 2595 stmmac_set_rings_length(priv); 2596 2597 /* Enable TSO */ 2598 if (priv->tso) { 2599 for (chan = 0; chan < tx_cnt; chan++) 2600 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 2601 } 2602 2603 return 0; 2604 } 2605 2606 static void stmmac_hw_teardown(struct net_device *dev) 2607 { 2608 struct stmmac_priv *priv = netdev_priv(dev); 2609 2610 clk_disable_unprepare(priv->plat->clk_ptp_ref); 2611 } 2612 2613 /** 2614 * stmmac_open - open entry point of the driver 2615 * @dev : pointer to the device structure. 2616 * Description: 2617 * This function is the open entry point of the driver. 2618 * Return value: 2619 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2620 * file on failure. 2621 */ 2622 static int stmmac_open(struct net_device *dev) 2623 { 2624 struct stmmac_priv *priv = netdev_priv(dev); 2625 int ret; 2626 2627 stmmac_check_ether_addr(priv); 2628 2629 if (priv->hw->pcs != STMMAC_PCS_RGMII && 2630 priv->hw->pcs != STMMAC_PCS_TBI && 2631 priv->hw->pcs != STMMAC_PCS_RTBI) { 2632 ret = stmmac_init_phy(dev); 2633 if (ret) { 2634 netdev_err(priv->dev, 2635 "%s: Cannot attach to PHY (error: %d)\n", 2636 __func__, ret); 2637 return ret; 2638 } 2639 } 2640 2641 /* Extra statistics */ 2642 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 2643 priv->xstats.threshold = tc; 2644 2645 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 2646 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 2647 2648 ret = alloc_dma_desc_resources(priv); 2649 if (ret < 0) { 2650 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 2651 __func__); 2652 goto dma_desc_error; 2653 } 2654 2655 ret = init_dma_desc_rings(dev, GFP_KERNEL); 2656 if (ret < 0) { 2657 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 2658 __func__); 2659 goto init_error; 2660 } 2661 2662 ret = stmmac_hw_setup(dev, true); 2663 if (ret < 0) { 2664 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 2665 goto init_error; 2666 } 2667 2668 stmmac_init_tx_coalesce(priv); 2669 2670 if (dev->phydev) 2671 phy_start(dev->phydev); 2672 2673 /* Request the IRQ lines */ 2674 ret = request_irq(dev->irq, stmmac_interrupt, 2675 IRQF_SHARED, dev->name, dev); 2676 if (unlikely(ret < 0)) { 2677 netdev_err(priv->dev, 2678 "%s: ERROR: allocating the IRQ %d (error: %d)\n", 2679 __func__, dev->irq, ret); 2680 goto irq_error; 2681 } 2682 2683 /* Request the Wake IRQ in case of another line is used for WoL */ 2684 if (priv->wol_irq != dev->irq) { 2685 ret = request_irq(priv->wol_irq, stmmac_interrupt, 2686 IRQF_SHARED, dev->name, dev); 2687 if (unlikely(ret < 0)) { 2688 netdev_err(priv->dev, 2689 "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 2690 __func__, priv->wol_irq, ret); 2691 goto wolirq_error; 2692 } 2693 } 2694 2695 /* Request the IRQ lines */ 2696 if (priv->lpi_irq > 0) { 2697 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, 2698 dev->name, dev); 2699 if (unlikely(ret < 0)) { 2700 netdev_err(priv->dev, 2701 "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 2702 __func__, priv->lpi_irq, ret); 2703 goto lpiirq_error; 2704 } 2705 } 2706 2707 stmmac_enable_all_queues(priv); 2708 stmmac_start_all_queues(priv); 2709 2710 return 0; 2711 2712 lpiirq_error: 2713 if (priv->wol_irq != dev->irq) 2714 free_irq(priv->wol_irq, dev); 2715 wolirq_error: 2716 free_irq(dev->irq, dev); 2717 irq_error: 2718 if (dev->phydev) 2719 phy_stop(dev->phydev); 2720 2721 del_timer_sync(&priv->txtimer); 2722 stmmac_hw_teardown(dev); 2723 init_error: 2724 free_dma_desc_resources(priv); 2725 dma_desc_error: 2726 if (dev->phydev) 2727 phy_disconnect(dev->phydev); 2728 2729 return ret; 2730 } 2731 2732 /** 2733 * stmmac_release - close entry point of the driver 2734 * @dev : device pointer. 2735 * Description: 2736 * This is the stop entry point of the driver. 2737 */ 2738 static int stmmac_release(struct net_device *dev) 2739 { 2740 struct stmmac_priv *priv = netdev_priv(dev); 2741 2742 if (priv->eee_enabled) 2743 del_timer_sync(&priv->eee_ctrl_timer); 2744 2745 /* Stop and disconnect the PHY */ 2746 if (dev->phydev) { 2747 phy_stop(dev->phydev); 2748 phy_disconnect(dev->phydev); 2749 } 2750 2751 stmmac_stop_all_queues(priv); 2752 2753 stmmac_disable_all_queues(priv); 2754 2755 del_timer_sync(&priv->txtimer); 2756 2757 /* Free the IRQ lines */ 2758 free_irq(dev->irq, dev); 2759 if (priv->wol_irq != dev->irq) 2760 free_irq(priv->wol_irq, dev); 2761 if (priv->lpi_irq > 0) 2762 free_irq(priv->lpi_irq, dev); 2763 2764 /* Stop TX/RX DMA and clear the descriptors */ 2765 stmmac_stop_all_dma(priv); 2766 2767 /* Release and free the Rx/Tx resources */ 2768 free_dma_desc_resources(priv); 2769 2770 /* Disable the MAC Rx/Tx */ 2771 stmmac_mac_set(priv, priv->ioaddr, false); 2772 2773 netif_carrier_off(dev); 2774 2775 #ifdef CONFIG_DEBUG_FS 2776 stmmac_exit_fs(dev); 2777 #endif 2778 2779 stmmac_release_ptp(priv); 2780 2781 return 0; 2782 } 2783 2784 /** 2785 * stmmac_tso_allocator - close entry point of the driver 2786 * @priv: driver private structure 2787 * @des: buffer start address 2788 * @total_len: total length to fill in descriptors 2789 * @last_segmant: condition for the last descriptor 2790 * @queue: TX queue index 2791 * Description: 2792 * This function fills descriptor and request new descriptors according to 2793 * buffer length to fill 2794 */ 2795 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, 2796 int total_len, bool last_segment, u32 queue) 2797 { 2798 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2799 struct dma_desc *desc; 2800 u32 buff_size; 2801 int tmp_len; 2802 2803 tmp_len = total_len; 2804 2805 while (tmp_len > 0) { 2806 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2807 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 2808 desc = tx_q->dma_tx + tx_q->cur_tx; 2809 2810 desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); 2811 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 2812 TSO_MAX_BUFF_SIZE : tmp_len; 2813 2814 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 2815 0, 1, 2816 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 2817 0, 0); 2818 2819 tmp_len -= TSO_MAX_BUFF_SIZE; 2820 } 2821 } 2822 2823 /** 2824 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 2825 * @skb : the socket buffer 2826 * @dev : device pointer 2827 * Description: this is the transmit function that is called on TSO frames 2828 * (support available on GMAC4 and newer chips). 2829 * Diagram below show the ring programming in case of TSO frames: 2830 * 2831 * First Descriptor 2832 * -------- 2833 * | DES0 |---> buffer1 = L2/L3/L4 header 2834 * | DES1 |---> TCP Payload (can continue on next descr...) 2835 * | DES2 |---> buffer 1 and 2 len 2836 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 2837 * -------- 2838 * | 2839 * ... 2840 * | 2841 * -------- 2842 * | DES0 | --| Split TCP Payload on Buffers 1 and 2 2843 * | DES1 | --| 2844 * | DES2 | --> buffer 1 and 2 len 2845 * | DES3 | 2846 * -------- 2847 * 2848 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 2849 */ 2850 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 2851 { 2852 struct dma_desc *desc, *first, *mss_desc = NULL; 2853 struct stmmac_priv *priv = netdev_priv(dev); 2854 int nfrags = skb_shinfo(skb)->nr_frags; 2855 u32 queue = skb_get_queue_mapping(skb); 2856 unsigned int first_entry, des; 2857 struct stmmac_tx_queue *tx_q; 2858 int tmp_pay_len = 0; 2859 u32 pay_len, mss; 2860 u8 proto_hdr_len; 2861 int i; 2862 2863 tx_q = &priv->tx_queue[queue]; 2864 2865 /* Compute header lengths */ 2866 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2867 2868 /* Desc availability based on threshold should be enough safe */ 2869 if (unlikely(stmmac_tx_avail(priv, queue) < 2870 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 2871 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 2872 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 2873 queue)); 2874 /* This is a hard error, log it. */ 2875 netdev_err(priv->dev, 2876 "%s: Tx Ring full when queue awake\n", 2877 __func__); 2878 } 2879 return NETDEV_TX_BUSY; 2880 } 2881 2882 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 2883 2884 mss = skb_shinfo(skb)->gso_size; 2885 2886 /* set new MSS value if needed */ 2887 if (mss != tx_q->mss) { 2888 mss_desc = tx_q->dma_tx + tx_q->cur_tx; 2889 stmmac_set_mss(priv, mss_desc, mss); 2890 tx_q->mss = mss; 2891 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2892 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 2893 } 2894 2895 if (netif_msg_tx_queued(priv)) { 2896 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 2897 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss); 2898 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 2899 skb->data_len); 2900 } 2901 2902 first_entry = tx_q->cur_tx; 2903 WARN_ON(tx_q->tx_skbuff[first_entry]); 2904 2905 desc = tx_q->dma_tx + first_entry; 2906 first = desc; 2907 2908 /* first descriptor: fill Headers on Buf1 */ 2909 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 2910 DMA_TO_DEVICE); 2911 if (dma_mapping_error(priv->device, des)) 2912 goto dma_map_err; 2913 2914 tx_q->tx_skbuff_dma[first_entry].buf = des; 2915 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 2916 2917 first->des0 = cpu_to_le32(des); 2918 2919 /* Fill start of payload in buff2 of first descriptor */ 2920 if (pay_len) 2921 first->des1 = cpu_to_le32(des + proto_hdr_len); 2922 2923 /* If needed take extra descriptors to fill the remaining payload */ 2924 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 2925 2926 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 2927 2928 /* Prepare fragments */ 2929 for (i = 0; i < nfrags; i++) { 2930 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2931 2932 des = skb_frag_dma_map(priv->device, frag, 0, 2933 skb_frag_size(frag), 2934 DMA_TO_DEVICE); 2935 if (dma_mapping_error(priv->device, des)) 2936 goto dma_map_err; 2937 2938 stmmac_tso_allocator(priv, des, skb_frag_size(frag), 2939 (i == nfrags - 1), queue); 2940 2941 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 2942 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 2943 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 2944 } 2945 2946 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 2947 2948 /* Only the last descriptor gets to point to the skb. */ 2949 tx_q->tx_skbuff[tx_q->cur_tx] = skb; 2950 2951 /* We've used all descriptors we need for this skb, however, 2952 * advance cur_tx so that it references a fresh descriptor. 2953 * ndo_start_xmit will fill this descriptor the next time it's 2954 * called and stmmac_tx_clean may clean up to this descriptor. 2955 */ 2956 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2957 2958 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 2959 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 2960 __func__); 2961 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 2962 } 2963 2964 dev->stats.tx_bytes += skb->len; 2965 priv->xstats.tx_tso_frames++; 2966 priv->xstats.tx_tso_nfrags += nfrags; 2967 2968 /* Manage tx mitigation */ 2969 priv->tx_count_frames += nfrags + 1; 2970 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { 2971 mod_timer(&priv->txtimer, 2972 STMMAC_COAL_TIMER(priv->tx_coal_timer)); 2973 } else { 2974 priv->tx_count_frames = 0; 2975 stmmac_set_tx_ic(priv, desc); 2976 priv->xstats.tx_set_ic_bit++; 2977 } 2978 2979 skb_tx_timestamp(skb); 2980 2981 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 2982 priv->hwts_tx_en)) { 2983 /* declare that device is doing timestamping */ 2984 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2985 stmmac_enable_tx_timestamp(priv, first); 2986 } 2987 2988 /* Complete the first descriptor before granting the DMA */ 2989 stmmac_prepare_tso_tx_desc(priv, first, 1, 2990 proto_hdr_len, 2991 pay_len, 2992 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 2993 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); 2994 2995 /* If context desc is used to change MSS */ 2996 if (mss_desc) { 2997 /* Make sure that first descriptor has been completely 2998 * written, including its own bit. This is because MSS is 2999 * actually before first descriptor, so we need to make 3000 * sure that MSS's own bit is the last thing written. 3001 */ 3002 dma_wmb(); 3003 stmmac_set_tx_owner(priv, mss_desc); 3004 } 3005 3006 /* The own bit must be the latest setting done when prepare the 3007 * descriptor and then barrier is needed to make sure that 3008 * all is coherent before granting the DMA engine. 3009 */ 3010 wmb(); 3011 3012 if (netif_msg_pktdata(priv)) { 3013 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 3014 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 3015 tx_q->cur_tx, first, nfrags); 3016 3017 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0); 3018 3019 pr_info(">>> frame to be transmitted: "); 3020 print_pkt(skb->data, skb_headlen(skb)); 3021 } 3022 3023 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3024 3025 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3026 3027 return NETDEV_TX_OK; 3028 3029 dma_map_err: 3030 dev_err(priv->device, "Tx dma map failed\n"); 3031 dev_kfree_skb(skb); 3032 priv->dev->stats.tx_dropped++; 3033 return NETDEV_TX_OK; 3034 } 3035 3036 /** 3037 * stmmac_xmit - Tx entry point of the driver 3038 * @skb : the socket buffer 3039 * @dev : device pointer 3040 * Description : this is the tx entry point of the driver. 3041 * It programs the chain or the ring and supports oversized frames 3042 * and SG feature. 3043 */ 3044 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 3045 { 3046 struct stmmac_priv *priv = netdev_priv(dev); 3047 unsigned int nopaged_len = skb_headlen(skb); 3048 int i, csum_insertion = 0, is_jumbo = 0; 3049 u32 queue = skb_get_queue_mapping(skb); 3050 int nfrags = skb_shinfo(skb)->nr_frags; 3051 int entry; 3052 unsigned int first_entry; 3053 struct dma_desc *desc, *first; 3054 struct stmmac_tx_queue *tx_q; 3055 unsigned int enh_desc; 3056 unsigned int des; 3057 3058 tx_q = &priv->tx_queue[queue]; 3059 3060 /* Manage oversized TCP frames for GMAC4 device */ 3061 if (skb_is_gso(skb) && priv->tso) { 3062 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 3063 return stmmac_tso_xmit(skb, dev); 3064 } 3065 3066 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 3067 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 3068 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 3069 queue)); 3070 /* This is a hard error, log it. */ 3071 netdev_err(priv->dev, 3072 "%s: Tx Ring full when queue awake\n", 3073 __func__); 3074 } 3075 return NETDEV_TX_BUSY; 3076 } 3077 3078 if (priv->tx_path_in_lpi_mode) 3079 stmmac_disable_eee_mode(priv); 3080 3081 entry = tx_q->cur_tx; 3082 first_entry = entry; 3083 WARN_ON(tx_q->tx_skbuff[first_entry]); 3084 3085 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 3086 3087 if (likely(priv->extend_desc)) 3088 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 3089 else 3090 desc = tx_q->dma_tx + entry; 3091 3092 first = desc; 3093 3094 enh_desc = priv->plat->enh_desc; 3095 /* To program the descriptors according to the size of the frame */ 3096 if (enh_desc) 3097 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 3098 3099 if (unlikely(is_jumbo) && likely(priv->synopsys_id < 3100 DWMAC_CORE_4_00)) { 3101 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 3102 if (unlikely(entry < 0)) 3103 goto dma_map_err; 3104 } 3105 3106 for (i = 0; i < nfrags; i++) { 3107 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3108 int len = skb_frag_size(frag); 3109 bool last_segment = (i == (nfrags - 1)); 3110 3111 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3112 WARN_ON(tx_q->tx_skbuff[entry]); 3113 3114 if (likely(priv->extend_desc)) 3115 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 3116 else 3117 desc = tx_q->dma_tx + entry; 3118 3119 des = skb_frag_dma_map(priv->device, frag, 0, len, 3120 DMA_TO_DEVICE); 3121 if (dma_mapping_error(priv->device, des)) 3122 goto dma_map_err; /* should reuse desc w/o issues */ 3123 3124 tx_q->tx_skbuff_dma[entry].buf = des; 3125 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) 3126 desc->des0 = cpu_to_le32(des); 3127 else 3128 desc->des2 = cpu_to_le32(des); 3129 3130 tx_q->tx_skbuff_dma[entry].map_as_page = true; 3131 tx_q->tx_skbuff_dma[entry].len = len; 3132 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 3133 3134 /* Prepare the descriptor and set the own bit too */ 3135 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 3136 priv->mode, 1, last_segment, skb->len); 3137 } 3138 3139 /* Only the last descriptor gets to point to the skb. */ 3140 tx_q->tx_skbuff[entry] = skb; 3141 3142 /* We've used all descriptors we need for this skb, however, 3143 * advance cur_tx so that it references a fresh descriptor. 3144 * ndo_start_xmit will fill this descriptor the next time it's 3145 * called and stmmac_tx_clean may clean up to this descriptor. 3146 */ 3147 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3148 tx_q->cur_tx = entry; 3149 3150 if (netif_msg_pktdata(priv)) { 3151 void *tx_head; 3152 3153 netdev_dbg(priv->dev, 3154 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 3155 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 3156 entry, first, nfrags); 3157 3158 if (priv->extend_desc) 3159 tx_head = (void *)tx_q->dma_etx; 3160 else 3161 tx_head = (void *)tx_q->dma_tx; 3162 3163 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false); 3164 3165 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 3166 print_pkt(skb->data, skb->len); 3167 } 3168 3169 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 3170 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 3171 __func__); 3172 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 3173 } 3174 3175 dev->stats.tx_bytes += skb->len; 3176 3177 /* According to the coalesce parameter the IC bit for the latest 3178 * segment is reset and the timer re-started to clean the tx status. 3179 * This approach takes care about the fragments: desc is the first 3180 * element in case of no SG. 3181 */ 3182 priv->tx_count_frames += nfrags + 1; 3183 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { 3184 mod_timer(&priv->txtimer, 3185 STMMAC_COAL_TIMER(priv->tx_coal_timer)); 3186 } else { 3187 priv->tx_count_frames = 0; 3188 stmmac_set_tx_ic(priv, desc); 3189 priv->xstats.tx_set_ic_bit++; 3190 } 3191 3192 skb_tx_timestamp(skb); 3193 3194 /* Ready to fill the first descriptor and set the OWN bit w/o any 3195 * problems because all the descriptors are actually ready to be 3196 * passed to the DMA engine. 3197 */ 3198 if (likely(!is_jumbo)) { 3199 bool last_segment = (nfrags == 0); 3200 3201 des = dma_map_single(priv->device, skb->data, 3202 nopaged_len, DMA_TO_DEVICE); 3203 if (dma_mapping_error(priv->device, des)) 3204 goto dma_map_err; 3205 3206 tx_q->tx_skbuff_dma[first_entry].buf = des; 3207 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) 3208 first->des0 = cpu_to_le32(des); 3209 else 3210 first->des2 = cpu_to_le32(des); 3211 3212 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 3213 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 3214 3215 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 3216 priv->hwts_tx_en)) { 3217 /* declare that device is doing timestamping */ 3218 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3219 stmmac_enable_tx_timestamp(priv, first); 3220 } 3221 3222 /* Prepare the first descriptor setting the OWN bit too */ 3223 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 3224 csum_insertion, priv->mode, 1, last_segment, 3225 skb->len); 3226 3227 /* The own bit must be the latest setting done when prepare the 3228 * descriptor and then barrier is needed to make sure that 3229 * all is coherent before granting the DMA engine. 3230 */ 3231 wmb(); 3232 } 3233 3234 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3235 3236 if (priv->synopsys_id < DWMAC_CORE_4_00) 3237 stmmac_enable_dma_transmission(priv, priv->ioaddr); 3238 else 3239 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, 3240 queue); 3241 3242 return NETDEV_TX_OK; 3243 3244 dma_map_err: 3245 netdev_err(priv->dev, "Tx DMA map failed\n"); 3246 dev_kfree_skb(skb); 3247 priv->dev->stats.tx_dropped++; 3248 return NETDEV_TX_OK; 3249 } 3250 3251 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 3252 { 3253 struct ethhdr *ehdr; 3254 u16 vlanid; 3255 3256 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) == 3257 NETIF_F_HW_VLAN_CTAG_RX && 3258 !__vlan_get_tag(skb, &vlanid)) { 3259 /* pop the vlan tag */ 3260 ehdr = (struct ethhdr *)skb->data; 3261 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2); 3262 skb_pull(skb, VLAN_HLEN); 3263 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); 3264 } 3265 } 3266 3267 3268 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q) 3269 { 3270 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH) 3271 return 0; 3272 3273 return 1; 3274 } 3275 3276 /** 3277 * stmmac_rx_refill - refill used skb preallocated buffers 3278 * @priv: driver private structure 3279 * @queue: RX queue index 3280 * Description : this is to reallocate the skb for the reception process 3281 * that is based on zero-copy. 3282 */ 3283 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 3284 { 3285 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3286 int dirty = stmmac_rx_dirty(priv, queue); 3287 unsigned int entry = rx_q->dirty_rx; 3288 3289 int bfsize = priv->dma_buf_sz; 3290 3291 while (dirty-- > 0) { 3292 struct dma_desc *p; 3293 3294 if (priv->extend_desc) 3295 p = (struct dma_desc *)(rx_q->dma_erx + entry); 3296 else 3297 p = rx_q->dma_rx + entry; 3298 3299 if (likely(!rx_q->rx_skbuff[entry])) { 3300 struct sk_buff *skb; 3301 3302 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); 3303 if (unlikely(!skb)) { 3304 /* so for a while no zero-copy! */ 3305 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH; 3306 if (unlikely(net_ratelimit())) 3307 dev_err(priv->device, 3308 "fail to alloc skb entry %d\n", 3309 entry); 3310 break; 3311 } 3312 3313 rx_q->rx_skbuff[entry] = skb; 3314 rx_q->rx_skbuff_dma[entry] = 3315 dma_map_single(priv->device, skb->data, bfsize, 3316 DMA_FROM_DEVICE); 3317 if (dma_mapping_error(priv->device, 3318 rx_q->rx_skbuff_dma[entry])) { 3319 netdev_err(priv->dev, "Rx DMA map failed\n"); 3320 dev_kfree_skb(skb); 3321 break; 3322 } 3323 3324 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { 3325 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); 3326 p->des1 = 0; 3327 } else { 3328 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); 3329 } 3330 3331 stmmac_refill_desc3(priv, rx_q, p); 3332 3333 if (rx_q->rx_zeroc_thresh > 0) 3334 rx_q->rx_zeroc_thresh--; 3335 3336 netif_dbg(priv, rx_status, priv->dev, 3337 "refill entry #%d\n", entry); 3338 } 3339 dma_wmb(); 3340 3341 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) 3342 stmmac_init_rx_desc(priv, p, priv->use_riwt, 0, 0); 3343 else 3344 stmmac_set_rx_owner(priv, p); 3345 3346 dma_wmb(); 3347 3348 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); 3349 } 3350 rx_q->dirty_rx = entry; 3351 } 3352 3353 /** 3354 * stmmac_rx - manage the receive process 3355 * @priv: driver private structure 3356 * @limit: napi bugget 3357 * @queue: RX queue index. 3358 * Description : this the function called by the napi poll method. 3359 * It gets all the frames inside the ring. 3360 */ 3361 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 3362 { 3363 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3364 unsigned int entry = rx_q->cur_rx; 3365 int coe = priv->hw->rx_csum; 3366 unsigned int next_entry; 3367 unsigned int count = 0; 3368 3369 if (netif_msg_rx_status(priv)) { 3370 void *rx_head; 3371 3372 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 3373 if (priv->extend_desc) 3374 rx_head = (void *)rx_q->dma_erx; 3375 else 3376 rx_head = (void *)rx_q->dma_rx; 3377 3378 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); 3379 } 3380 while (count < limit) { 3381 int status; 3382 struct dma_desc *p; 3383 struct dma_desc *np; 3384 3385 if (priv->extend_desc) 3386 p = (struct dma_desc *)(rx_q->dma_erx + entry); 3387 else 3388 p = rx_q->dma_rx + entry; 3389 3390 /* read the status of the incoming frame */ 3391 status = stmmac_rx_status(priv, &priv->dev->stats, 3392 &priv->xstats, p); 3393 /* check if managed by the DMA otherwise go ahead */ 3394 if (unlikely(status & dma_own)) 3395 break; 3396 3397 count++; 3398 3399 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); 3400 next_entry = rx_q->cur_rx; 3401 3402 if (priv->extend_desc) 3403 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 3404 else 3405 np = rx_q->dma_rx + next_entry; 3406 3407 prefetch(np); 3408 3409 if (priv->extend_desc) 3410 stmmac_rx_extended_status(priv, &priv->dev->stats, 3411 &priv->xstats, rx_q->dma_erx + entry); 3412 if (unlikely(status == discard_frame)) { 3413 priv->dev->stats.rx_errors++; 3414 if (priv->hwts_rx_en && !priv->extend_desc) { 3415 /* DESC2 & DESC3 will be overwritten by device 3416 * with timestamp value, hence reinitialize 3417 * them in stmmac_rx_refill() function so that 3418 * device can reuse it. 3419 */ 3420 dev_kfree_skb_any(rx_q->rx_skbuff[entry]); 3421 rx_q->rx_skbuff[entry] = NULL; 3422 dma_unmap_single(priv->device, 3423 rx_q->rx_skbuff_dma[entry], 3424 priv->dma_buf_sz, 3425 DMA_FROM_DEVICE); 3426 } 3427 } else { 3428 struct sk_buff *skb; 3429 int frame_len; 3430 unsigned int des; 3431 3432 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) 3433 des = le32_to_cpu(p->des0); 3434 else 3435 des = le32_to_cpu(p->des2); 3436 3437 frame_len = stmmac_get_rx_frame_len(priv, p, coe); 3438 3439 /* If frame length is greater than skb buffer size 3440 * (preallocated during init) then the packet is 3441 * ignored 3442 */ 3443 if (frame_len > priv->dma_buf_sz) { 3444 netdev_err(priv->dev, 3445 "len %d larger than size (%d)\n", 3446 frame_len, priv->dma_buf_sz); 3447 priv->dev->stats.rx_length_errors++; 3448 break; 3449 } 3450 3451 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 3452 * Type frames (LLC/LLC-SNAP) 3453 */ 3454 if (unlikely(status != llc_snap)) 3455 frame_len -= ETH_FCS_LEN; 3456 3457 if (netif_msg_rx_status(priv)) { 3458 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n", 3459 p, entry, des); 3460 netdev_dbg(priv->dev, "frame size %d, COE: %d\n", 3461 frame_len, status); 3462 } 3463 3464 /* The zero-copy is always used for all the sizes 3465 * in case of GMAC4 because it needs 3466 * to refill the used descriptors, always. 3467 */ 3468 if (unlikely(!priv->plat->has_gmac4 && 3469 ((frame_len < priv->rx_copybreak) || 3470 stmmac_rx_threshold_count(rx_q)))) { 3471 skb = netdev_alloc_skb_ip_align(priv->dev, 3472 frame_len); 3473 if (unlikely(!skb)) { 3474 if (net_ratelimit()) 3475 dev_warn(priv->device, 3476 "packet dropped\n"); 3477 priv->dev->stats.rx_dropped++; 3478 break; 3479 } 3480 3481 dma_sync_single_for_cpu(priv->device, 3482 rx_q->rx_skbuff_dma 3483 [entry], frame_len, 3484 DMA_FROM_DEVICE); 3485 skb_copy_to_linear_data(skb, 3486 rx_q-> 3487 rx_skbuff[entry]->data, 3488 frame_len); 3489 3490 skb_put(skb, frame_len); 3491 dma_sync_single_for_device(priv->device, 3492 rx_q->rx_skbuff_dma 3493 [entry], frame_len, 3494 DMA_FROM_DEVICE); 3495 } else { 3496 skb = rx_q->rx_skbuff[entry]; 3497 if (unlikely(!skb)) { 3498 netdev_err(priv->dev, 3499 "%s: Inconsistent Rx chain\n", 3500 priv->dev->name); 3501 priv->dev->stats.rx_dropped++; 3502 break; 3503 } 3504 prefetch(skb->data - NET_IP_ALIGN); 3505 rx_q->rx_skbuff[entry] = NULL; 3506 rx_q->rx_zeroc_thresh++; 3507 3508 skb_put(skb, frame_len); 3509 dma_unmap_single(priv->device, 3510 rx_q->rx_skbuff_dma[entry], 3511 priv->dma_buf_sz, 3512 DMA_FROM_DEVICE); 3513 } 3514 3515 if (netif_msg_pktdata(priv)) { 3516 netdev_dbg(priv->dev, "frame received (%dbytes)", 3517 frame_len); 3518 print_pkt(skb->data, frame_len); 3519 } 3520 3521 stmmac_get_rx_hwtstamp(priv, p, np, skb); 3522 3523 stmmac_rx_vlan(priv->dev, skb); 3524 3525 skb->protocol = eth_type_trans(skb, priv->dev); 3526 3527 if (unlikely(!coe)) 3528 skb_checksum_none_assert(skb); 3529 else 3530 skb->ip_summed = CHECKSUM_UNNECESSARY; 3531 3532 napi_gro_receive(&rx_q->napi, skb); 3533 3534 priv->dev->stats.rx_packets++; 3535 priv->dev->stats.rx_bytes += frame_len; 3536 } 3537 entry = next_entry; 3538 } 3539 3540 stmmac_rx_refill(priv, queue); 3541 3542 priv->xstats.rx_pkt_n += count; 3543 3544 return count; 3545 } 3546 3547 /** 3548 * stmmac_poll - stmmac poll method (NAPI) 3549 * @napi : pointer to the napi structure. 3550 * @budget : maximum number of packets that the current CPU can receive from 3551 * all interfaces. 3552 * Description : 3553 * To look at the incoming frames and clear the tx resources. 3554 */ 3555 static int stmmac_poll(struct napi_struct *napi, int budget) 3556 { 3557 struct stmmac_rx_queue *rx_q = 3558 container_of(napi, struct stmmac_rx_queue, napi); 3559 struct stmmac_priv *priv = rx_q->priv_data; 3560 u32 tx_count = priv->plat->tx_queues_to_use; 3561 u32 chan = rx_q->queue_index; 3562 int work_done = 0; 3563 u32 queue; 3564 3565 priv->xstats.napi_poll++; 3566 3567 /* check all the queues */ 3568 for (queue = 0; queue < tx_count; queue++) 3569 stmmac_tx_clean(priv, queue); 3570 3571 work_done = stmmac_rx(priv, budget, rx_q->queue_index); 3572 if (work_done < budget) { 3573 napi_complete_done(napi, work_done); 3574 stmmac_enable_dma_irq(priv, priv->ioaddr, chan); 3575 } 3576 return work_done; 3577 } 3578 3579 /** 3580 * stmmac_tx_timeout 3581 * @dev : Pointer to net device structure 3582 * Description: this function is called when a packet transmission fails to 3583 * complete within a reasonable time. The driver will mark the error in the 3584 * netdev structure and arrange for the device to be reset to a sane state 3585 * in order to transmit a new packet. 3586 */ 3587 static void stmmac_tx_timeout(struct net_device *dev) 3588 { 3589 struct stmmac_priv *priv = netdev_priv(dev); 3590 3591 stmmac_global_err(priv); 3592 } 3593 3594 /** 3595 * stmmac_set_rx_mode - entry point for multicast addressing 3596 * @dev : pointer to the device structure 3597 * Description: 3598 * This function is a driver entry point which gets called by the kernel 3599 * whenever multicast addresses must be enabled/disabled. 3600 * Return value: 3601 * void. 3602 */ 3603 static void stmmac_set_rx_mode(struct net_device *dev) 3604 { 3605 struct stmmac_priv *priv = netdev_priv(dev); 3606 3607 stmmac_set_filter(priv, priv->hw, dev); 3608 } 3609 3610 /** 3611 * stmmac_change_mtu - entry point to change MTU size for the device. 3612 * @dev : device pointer. 3613 * @new_mtu : the new MTU size for the device. 3614 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 3615 * to drive packet transmission. Ethernet has an MTU of 1500 octets 3616 * (ETH_DATA_LEN). This value can be changed with ifconfig. 3617 * Return value: 3618 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3619 * file on failure. 3620 */ 3621 static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 3622 { 3623 struct stmmac_priv *priv = netdev_priv(dev); 3624 3625 if (netif_running(dev)) { 3626 netdev_err(priv->dev, "must be stopped to change its MTU\n"); 3627 return -EBUSY; 3628 } 3629 3630 dev->mtu = new_mtu; 3631 3632 netdev_update_features(dev); 3633 3634 return 0; 3635 } 3636 3637 static netdev_features_t stmmac_fix_features(struct net_device *dev, 3638 netdev_features_t features) 3639 { 3640 struct stmmac_priv *priv = netdev_priv(dev); 3641 3642 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 3643 features &= ~NETIF_F_RXCSUM; 3644 3645 if (!priv->plat->tx_coe) 3646 features &= ~NETIF_F_CSUM_MASK; 3647 3648 /* Some GMAC devices have a bugged Jumbo frame support that 3649 * needs to have the Tx COE disabled for oversized frames 3650 * (due to limited buffer sizes). In this case we disable 3651 * the TX csum insertion in the TDES and not use SF. 3652 */ 3653 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 3654 features &= ~NETIF_F_CSUM_MASK; 3655 3656 /* Disable tso if asked by ethtool */ 3657 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 3658 if (features & NETIF_F_TSO) 3659 priv->tso = true; 3660 else 3661 priv->tso = false; 3662 } 3663 3664 return features; 3665 } 3666 3667 static int stmmac_set_features(struct net_device *netdev, 3668 netdev_features_t features) 3669 { 3670 struct stmmac_priv *priv = netdev_priv(netdev); 3671 3672 /* Keep the COE Type in case of csum is supporting */ 3673 if (features & NETIF_F_RXCSUM) 3674 priv->hw->rx_csum = priv->plat->rx_coe; 3675 else 3676 priv->hw->rx_csum = 0; 3677 /* No check needed because rx_coe has been set before and it will be 3678 * fixed in case of issue. 3679 */ 3680 stmmac_rx_ipc(priv, priv->hw); 3681 3682 return 0; 3683 } 3684 3685 /** 3686 * stmmac_interrupt - main ISR 3687 * @irq: interrupt number. 3688 * @dev_id: to pass the net device pointer. 3689 * Description: this is the main driver interrupt service routine. 3690 * It can call: 3691 * o DMA service routine (to manage incoming frame reception and transmission 3692 * status) 3693 * o Core interrupts to manage: remote wake-up, management counter, LPI 3694 * interrupts. 3695 */ 3696 static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 3697 { 3698 struct net_device *dev = (struct net_device *)dev_id; 3699 struct stmmac_priv *priv = netdev_priv(dev); 3700 u32 rx_cnt = priv->plat->rx_queues_to_use; 3701 u32 tx_cnt = priv->plat->tx_queues_to_use; 3702 u32 queues_count; 3703 u32 queue; 3704 3705 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 3706 3707 if (priv->irq_wake) 3708 pm_wakeup_event(priv->device, 0); 3709 3710 if (unlikely(!dev)) { 3711 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 3712 return IRQ_NONE; 3713 } 3714 3715 /* Check if adapter is up */ 3716 if (test_bit(STMMAC_DOWN, &priv->state)) 3717 return IRQ_HANDLED; 3718 /* Check if a fatal error happened */ 3719 if (stmmac_safety_feat_interrupt(priv)) 3720 return IRQ_HANDLED; 3721 3722 /* To handle GMAC own interrupts */ 3723 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { 3724 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 3725 3726 if (unlikely(status)) { 3727 /* For LPI we need to save the tx status */ 3728 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 3729 priv->tx_path_in_lpi_mode = true; 3730 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 3731 priv->tx_path_in_lpi_mode = false; 3732 } 3733 3734 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 3735 for (queue = 0; queue < queues_count; queue++) { 3736 struct stmmac_rx_queue *rx_q = 3737 &priv->rx_queue[queue]; 3738 3739 status |= stmmac_host_mtl_irq_status(priv, 3740 priv->hw, queue); 3741 3742 if (status & CORE_IRQ_MTL_RX_OVERFLOW) 3743 stmmac_set_rx_tail_ptr(priv, 3744 priv->ioaddr, 3745 rx_q->rx_tail_addr, 3746 queue); 3747 } 3748 } 3749 3750 /* PCS link status */ 3751 if (priv->hw->pcs) { 3752 if (priv->xstats.pcs_link) 3753 netif_carrier_on(dev); 3754 else 3755 netif_carrier_off(dev); 3756 } 3757 } 3758 3759 /* To handle DMA interrupts */ 3760 stmmac_dma_interrupt(priv); 3761 3762 return IRQ_HANDLED; 3763 } 3764 3765 #ifdef CONFIG_NET_POLL_CONTROLLER 3766 /* Polling receive - used by NETCONSOLE and other diagnostic tools 3767 * to allow network I/O with interrupts disabled. 3768 */ 3769 static void stmmac_poll_controller(struct net_device *dev) 3770 { 3771 disable_irq(dev->irq); 3772 stmmac_interrupt(dev->irq, dev); 3773 enable_irq(dev->irq); 3774 } 3775 #endif 3776 3777 /** 3778 * stmmac_ioctl - Entry point for the Ioctl 3779 * @dev: Device pointer. 3780 * @rq: An IOCTL specefic structure, that can contain a pointer to 3781 * a proprietary structure used to pass information to the driver. 3782 * @cmd: IOCTL command 3783 * Description: 3784 * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 3785 */ 3786 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3787 { 3788 int ret = -EOPNOTSUPP; 3789 3790 if (!netif_running(dev)) 3791 return -EINVAL; 3792 3793 switch (cmd) { 3794 case SIOCGMIIPHY: 3795 case SIOCGMIIREG: 3796 case SIOCSMIIREG: 3797 if (!dev->phydev) 3798 return -EINVAL; 3799 ret = phy_mii_ioctl(dev->phydev, rq, cmd); 3800 break; 3801 case SIOCSHWTSTAMP: 3802 ret = stmmac_hwtstamp_ioctl(dev, rq); 3803 break; 3804 default: 3805 break; 3806 } 3807 3808 return ret; 3809 } 3810 3811 static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 3812 { 3813 struct stmmac_priv *priv = netdev_priv(ndev); 3814 int ret = 0; 3815 3816 ret = eth_mac_addr(ndev, addr); 3817 if (ret) 3818 return ret; 3819 3820 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 3821 3822 return ret; 3823 } 3824 3825 #ifdef CONFIG_DEBUG_FS 3826 static struct dentry *stmmac_fs_dir; 3827 3828 static void sysfs_display_ring(void *head, int size, int extend_desc, 3829 struct seq_file *seq) 3830 { 3831 int i; 3832 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 3833 struct dma_desc *p = (struct dma_desc *)head; 3834 3835 for (i = 0; i < size; i++) { 3836 if (extend_desc) { 3837 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 3838 i, (unsigned int)virt_to_phys(ep), 3839 le32_to_cpu(ep->basic.des0), 3840 le32_to_cpu(ep->basic.des1), 3841 le32_to_cpu(ep->basic.des2), 3842 le32_to_cpu(ep->basic.des3)); 3843 ep++; 3844 } else { 3845 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 3846 i, (unsigned int)virt_to_phys(p), 3847 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 3848 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 3849 p++; 3850 } 3851 seq_printf(seq, "\n"); 3852 } 3853 } 3854 3855 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) 3856 { 3857 struct net_device *dev = seq->private; 3858 struct stmmac_priv *priv = netdev_priv(dev); 3859 u32 rx_count = priv->plat->rx_queues_to_use; 3860 u32 tx_count = priv->plat->tx_queues_to_use; 3861 u32 queue; 3862 3863 for (queue = 0; queue < rx_count; queue++) { 3864 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3865 3866 seq_printf(seq, "RX Queue %d:\n", queue); 3867 3868 if (priv->extend_desc) { 3869 seq_printf(seq, "Extended descriptor ring:\n"); 3870 sysfs_display_ring((void *)rx_q->dma_erx, 3871 DMA_RX_SIZE, 1, seq); 3872 } else { 3873 seq_printf(seq, "Descriptor ring:\n"); 3874 sysfs_display_ring((void *)rx_q->dma_rx, 3875 DMA_RX_SIZE, 0, seq); 3876 } 3877 } 3878 3879 for (queue = 0; queue < tx_count; queue++) { 3880 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 3881 3882 seq_printf(seq, "TX Queue %d:\n", queue); 3883 3884 if (priv->extend_desc) { 3885 seq_printf(seq, "Extended descriptor ring:\n"); 3886 sysfs_display_ring((void *)tx_q->dma_etx, 3887 DMA_TX_SIZE, 1, seq); 3888 } else { 3889 seq_printf(seq, "Descriptor ring:\n"); 3890 sysfs_display_ring((void *)tx_q->dma_tx, 3891 DMA_TX_SIZE, 0, seq); 3892 } 3893 } 3894 3895 return 0; 3896 } 3897 3898 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file) 3899 { 3900 return single_open(file, stmmac_sysfs_ring_read, inode->i_private); 3901 } 3902 3903 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */ 3904 3905 static const struct file_operations stmmac_rings_status_fops = { 3906 .owner = THIS_MODULE, 3907 .open = stmmac_sysfs_ring_open, 3908 .read = seq_read, 3909 .llseek = seq_lseek, 3910 .release = single_release, 3911 }; 3912 3913 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) 3914 { 3915 struct net_device *dev = seq->private; 3916 struct stmmac_priv *priv = netdev_priv(dev); 3917 3918 if (!priv->hw_cap_support) { 3919 seq_printf(seq, "DMA HW features not supported\n"); 3920 return 0; 3921 } 3922 3923 seq_printf(seq, "==============================\n"); 3924 seq_printf(seq, "\tDMA HW features\n"); 3925 seq_printf(seq, "==============================\n"); 3926 3927 seq_printf(seq, "\t10/100 Mbps: %s\n", 3928 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 3929 seq_printf(seq, "\t1000 Mbps: %s\n", 3930 (priv->dma_cap.mbps_1000) ? "Y" : "N"); 3931 seq_printf(seq, "\tHalf duplex: %s\n", 3932 (priv->dma_cap.half_duplex) ? "Y" : "N"); 3933 seq_printf(seq, "\tHash Filter: %s\n", 3934 (priv->dma_cap.hash_filter) ? "Y" : "N"); 3935 seq_printf(seq, "\tMultiple MAC address registers: %s\n", 3936 (priv->dma_cap.multi_addr) ? "Y" : "N"); 3937 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 3938 (priv->dma_cap.pcs) ? "Y" : "N"); 3939 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 3940 (priv->dma_cap.sma_mdio) ? "Y" : "N"); 3941 seq_printf(seq, "\tPMT Remote wake up: %s\n", 3942 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 3943 seq_printf(seq, "\tPMT Magic Frame: %s\n", 3944 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 3945 seq_printf(seq, "\tRMON module: %s\n", 3946 (priv->dma_cap.rmon) ? "Y" : "N"); 3947 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 3948 (priv->dma_cap.time_stamp) ? "Y" : "N"); 3949 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 3950 (priv->dma_cap.atime_stamp) ? "Y" : "N"); 3951 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 3952 (priv->dma_cap.eee) ? "Y" : "N"); 3953 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 3954 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 3955 (priv->dma_cap.tx_coe) ? "Y" : "N"); 3956 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 3957 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 3958 (priv->dma_cap.rx_coe) ? "Y" : "N"); 3959 } else { 3960 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 3961 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 3962 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 3963 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 3964 } 3965 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 3966 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 3967 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 3968 priv->dma_cap.number_rx_channel); 3969 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 3970 priv->dma_cap.number_tx_channel); 3971 seq_printf(seq, "\tEnhanced descriptors: %s\n", 3972 (priv->dma_cap.enh_desc) ? "Y" : "N"); 3973 3974 return 0; 3975 } 3976 3977 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file) 3978 { 3979 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private); 3980 } 3981 3982 static const struct file_operations stmmac_dma_cap_fops = { 3983 .owner = THIS_MODULE, 3984 .open = stmmac_sysfs_dma_cap_open, 3985 .read = seq_read, 3986 .llseek = seq_lseek, 3987 .release = single_release, 3988 }; 3989 3990 static int stmmac_init_fs(struct net_device *dev) 3991 { 3992 struct stmmac_priv *priv = netdev_priv(dev); 3993 3994 /* Create per netdev entries */ 3995 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 3996 3997 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { 3998 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n"); 3999 4000 return -ENOMEM; 4001 } 4002 4003 /* Entry to report DMA RX/TX rings */ 4004 priv->dbgfs_rings_status = 4005 debugfs_create_file("descriptors_status", 0444, 4006 priv->dbgfs_dir, dev, 4007 &stmmac_rings_status_fops); 4008 4009 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { 4010 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n"); 4011 debugfs_remove_recursive(priv->dbgfs_dir); 4012 4013 return -ENOMEM; 4014 } 4015 4016 /* Entry to report the DMA HW features */ 4017 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444, 4018 priv->dbgfs_dir, 4019 dev, &stmmac_dma_cap_fops); 4020 4021 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { 4022 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n"); 4023 debugfs_remove_recursive(priv->dbgfs_dir); 4024 4025 return -ENOMEM; 4026 } 4027 4028 return 0; 4029 } 4030 4031 static void stmmac_exit_fs(struct net_device *dev) 4032 { 4033 struct stmmac_priv *priv = netdev_priv(dev); 4034 4035 debugfs_remove_recursive(priv->dbgfs_dir); 4036 } 4037 #endif /* CONFIG_DEBUG_FS */ 4038 4039 static const struct net_device_ops stmmac_netdev_ops = { 4040 .ndo_open = stmmac_open, 4041 .ndo_start_xmit = stmmac_xmit, 4042 .ndo_stop = stmmac_release, 4043 .ndo_change_mtu = stmmac_change_mtu, 4044 .ndo_fix_features = stmmac_fix_features, 4045 .ndo_set_features = stmmac_set_features, 4046 .ndo_set_rx_mode = stmmac_set_rx_mode, 4047 .ndo_tx_timeout = stmmac_tx_timeout, 4048 .ndo_do_ioctl = stmmac_ioctl, 4049 #ifdef CONFIG_NET_POLL_CONTROLLER 4050 .ndo_poll_controller = stmmac_poll_controller, 4051 #endif 4052 .ndo_set_mac_address = stmmac_set_mac_address, 4053 }; 4054 4055 static void stmmac_reset_subtask(struct stmmac_priv *priv) 4056 { 4057 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 4058 return; 4059 if (test_bit(STMMAC_DOWN, &priv->state)) 4060 return; 4061 4062 netdev_err(priv->dev, "Reset adapter.\n"); 4063 4064 rtnl_lock(); 4065 netif_trans_update(priv->dev); 4066 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 4067 usleep_range(1000, 2000); 4068 4069 set_bit(STMMAC_DOWN, &priv->state); 4070 dev_close(priv->dev); 4071 dev_open(priv->dev); 4072 clear_bit(STMMAC_DOWN, &priv->state); 4073 clear_bit(STMMAC_RESETING, &priv->state); 4074 rtnl_unlock(); 4075 } 4076 4077 static void stmmac_service_task(struct work_struct *work) 4078 { 4079 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 4080 service_task); 4081 4082 stmmac_reset_subtask(priv); 4083 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 4084 } 4085 4086 /** 4087 * stmmac_hw_init - Init the MAC device 4088 * @priv: driver private structure 4089 * Description: this function is to configure the MAC device according to 4090 * some platform parameters or the HW capability register. It prepares the 4091 * driver to use either ring or chain modes and to setup either enhanced or 4092 * normal descriptors. 4093 */ 4094 static int stmmac_hw_init(struct stmmac_priv *priv) 4095 { 4096 struct mac_device_info *mac; 4097 4098 /* Identify the MAC HW device */ 4099 if (priv->plat->setup) { 4100 mac = priv->plat->setup(priv); 4101 } else if (priv->plat->has_gmac) { 4102 priv->dev->priv_flags |= IFF_UNICAST_FLT; 4103 mac = dwmac1000_setup(priv->ioaddr, 4104 priv->plat->multicast_filter_bins, 4105 priv->plat->unicast_filter_entries, 4106 &priv->synopsys_id); 4107 } else if (priv->plat->has_gmac4) { 4108 priv->dev->priv_flags |= IFF_UNICAST_FLT; 4109 mac = dwmac4_setup(priv->ioaddr, 4110 priv->plat->multicast_filter_bins, 4111 priv->plat->unicast_filter_entries, 4112 &priv->synopsys_id); 4113 } else { 4114 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id); 4115 } 4116 if (!mac) 4117 return -ENOMEM; 4118 4119 priv->hw = mac; 4120 4121 /* dwmac-sun8i only work in chain mode */ 4122 if (priv->plat->has_sun8i) 4123 chain_mode = 1; 4124 4125 /* To use the chained or ring mode */ 4126 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 4127 priv->hw->mode = &dwmac4_ring_mode_ops; 4128 } else { 4129 if (chain_mode) { 4130 priv->hw->mode = &chain_mode_ops; 4131 dev_info(priv->device, "Chain mode enabled\n"); 4132 priv->mode = STMMAC_CHAIN_MODE; 4133 } else { 4134 priv->hw->mode = &ring_mode_ops; 4135 dev_info(priv->device, "Ring mode enabled\n"); 4136 priv->mode = STMMAC_RING_MODE; 4137 } 4138 } 4139 4140 /* Get the HW capability (new GMAC newer than 3.50a) */ 4141 priv->hw_cap_support = stmmac_get_hw_features(priv); 4142 if (priv->hw_cap_support) { 4143 dev_info(priv->device, "DMA HW capability register supported\n"); 4144 4145 /* We can override some gmac/dma configuration fields: e.g. 4146 * enh_desc, tx_coe (e.g. that are passed through the 4147 * platform) with the values from the HW capability 4148 * register (if supported). 4149 */ 4150 priv->plat->enh_desc = priv->dma_cap.enh_desc; 4151 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 4152 priv->hw->pmt = priv->plat->pmt; 4153 4154 /* TXCOE doesn't work in thresh DMA mode */ 4155 if (priv->plat->force_thresh_dma_mode) 4156 priv->plat->tx_coe = 0; 4157 else 4158 priv->plat->tx_coe = priv->dma_cap.tx_coe; 4159 4160 /* In case of GMAC4 rx_coe is from HW cap register. */ 4161 priv->plat->rx_coe = priv->dma_cap.rx_coe; 4162 4163 if (priv->dma_cap.rx_coe_type2) 4164 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 4165 else if (priv->dma_cap.rx_coe_type1) 4166 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 4167 4168 } else { 4169 dev_info(priv->device, "No HW DMA feature register supported\n"); 4170 } 4171 4172 /* To use alternate (extended), normal or GMAC4 descriptor structures */ 4173 if (priv->synopsys_id >= DWMAC_CORE_4_00) 4174 priv->hw->desc = &dwmac4_desc_ops; 4175 else 4176 stmmac_selec_desc_mode(priv); 4177 4178 if (priv->plat->rx_coe) { 4179 priv->hw->rx_csum = priv->plat->rx_coe; 4180 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 4181 if (priv->synopsys_id < DWMAC_CORE_4_00) 4182 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 4183 } 4184 if (priv->plat->tx_coe) 4185 dev_info(priv->device, "TX Checksum insertion supported\n"); 4186 4187 if (priv->plat->pmt) { 4188 dev_info(priv->device, "Wake-Up On Lan supported\n"); 4189 device_set_wakeup_capable(priv->device, 1); 4190 } 4191 4192 if (priv->dma_cap.tsoen) 4193 dev_info(priv->device, "TSO supported\n"); 4194 4195 return 0; 4196 } 4197 4198 /** 4199 * stmmac_dvr_probe 4200 * @device: device pointer 4201 * @plat_dat: platform data pointer 4202 * @res: stmmac resource pointer 4203 * Description: this is the main probe function used to 4204 * call the alloc_etherdev, allocate the priv structure. 4205 * Return: 4206 * returns 0 on success, otherwise errno. 4207 */ 4208 int stmmac_dvr_probe(struct device *device, 4209 struct plat_stmmacenet_data *plat_dat, 4210 struct stmmac_resources *res) 4211 { 4212 struct net_device *ndev = NULL; 4213 struct stmmac_priv *priv; 4214 int ret = 0; 4215 u32 queue; 4216 4217 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), 4218 MTL_MAX_TX_QUEUES, 4219 MTL_MAX_RX_QUEUES); 4220 if (!ndev) 4221 return -ENOMEM; 4222 4223 SET_NETDEV_DEV(ndev, device); 4224 4225 priv = netdev_priv(ndev); 4226 priv->device = device; 4227 priv->dev = ndev; 4228 4229 stmmac_set_ethtool_ops(ndev); 4230 priv->pause = pause; 4231 priv->plat = plat_dat; 4232 priv->ioaddr = res->addr; 4233 priv->dev->base_addr = (unsigned long)res->addr; 4234 4235 priv->dev->irq = res->irq; 4236 priv->wol_irq = res->wol_irq; 4237 priv->lpi_irq = res->lpi_irq; 4238 4239 if (res->mac) 4240 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 4241 4242 dev_set_drvdata(device, priv->dev); 4243 4244 /* Verify driver arguments */ 4245 stmmac_verify_args(); 4246 4247 /* Allocate workqueue */ 4248 priv->wq = create_singlethread_workqueue("stmmac_wq"); 4249 if (!priv->wq) { 4250 dev_err(priv->device, "failed to create workqueue\n"); 4251 goto error_wq; 4252 } 4253 4254 INIT_WORK(&priv->service_task, stmmac_service_task); 4255 4256 /* Override with kernel parameters if supplied XXX CRS XXX 4257 * this needs to have multiple instances 4258 */ 4259 if ((phyaddr >= 0) && (phyaddr <= 31)) 4260 priv->plat->phy_addr = phyaddr; 4261 4262 if (priv->plat->stmmac_rst) { 4263 ret = reset_control_assert(priv->plat->stmmac_rst); 4264 reset_control_deassert(priv->plat->stmmac_rst); 4265 /* Some reset controllers have only reset callback instead of 4266 * assert + deassert callbacks pair. 4267 */ 4268 if (ret == -ENOTSUPP) 4269 reset_control_reset(priv->plat->stmmac_rst); 4270 } 4271 4272 /* Init MAC and get the capabilities */ 4273 ret = stmmac_hw_init(priv); 4274 if (ret) 4275 goto error_hw_init; 4276 4277 /* Configure real RX and TX queues */ 4278 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); 4279 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); 4280 4281 ndev->netdev_ops = &stmmac_netdev_ops; 4282 4283 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 4284 NETIF_F_RXCSUM; 4285 4286 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 4287 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 4288 priv->tso = true; 4289 dev_info(priv->device, "TSO feature enabled\n"); 4290 } 4291 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 4292 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 4293 #ifdef STMMAC_VLAN_TAG_USED 4294 /* Both mac100 and gmac support receive VLAN tag detection */ 4295 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 4296 #endif 4297 priv->msg_enable = netif_msg_init(debug, default_msg_level); 4298 4299 /* MTU range: 46 - hw-specific max */ 4300 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 4301 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 4302 ndev->max_mtu = JUMBO_LEN; 4303 else 4304 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 4305 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 4306 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 4307 */ 4308 if ((priv->plat->maxmtu < ndev->max_mtu) && 4309 (priv->plat->maxmtu >= ndev->min_mtu)) 4310 ndev->max_mtu = priv->plat->maxmtu; 4311 else if (priv->plat->maxmtu < ndev->min_mtu) 4312 dev_warn(priv->device, 4313 "%s: warning: maxmtu having invalid value (%d)\n", 4314 __func__, priv->plat->maxmtu); 4315 4316 if (flow_ctrl) 4317 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 4318 4319 /* Rx Watchdog is available in the COREs newer than the 3.40. 4320 * In some case, for example on bugged HW this feature 4321 * has to be disable and this can be done by passing the 4322 * riwt_off field from the platform. 4323 */ 4324 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { 4325 priv->use_riwt = 1; 4326 dev_info(priv->device, 4327 "Enable RX Mitigation via HW Watchdog Timer\n"); 4328 } 4329 4330 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { 4331 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4332 4333 netif_napi_add(ndev, &rx_q->napi, stmmac_poll, 4334 (8 * priv->plat->rx_queues_to_use)); 4335 } 4336 4337 spin_lock_init(&priv->lock); 4338 4339 /* If a specific clk_csr value is passed from the platform 4340 * this means that the CSR Clock Range selection cannot be 4341 * changed at run-time and it is fixed. Viceversa the driver'll try to 4342 * set the MDC clock dynamically according to the csr actual 4343 * clock input. 4344 */ 4345 if (!priv->plat->clk_csr) 4346 stmmac_clk_csr_set(priv); 4347 else 4348 priv->clk_csr = priv->plat->clk_csr; 4349 4350 stmmac_check_pcs_mode(priv); 4351 4352 if (priv->hw->pcs != STMMAC_PCS_RGMII && 4353 priv->hw->pcs != STMMAC_PCS_TBI && 4354 priv->hw->pcs != STMMAC_PCS_RTBI) { 4355 /* MDIO bus Registration */ 4356 ret = stmmac_mdio_register(ndev); 4357 if (ret < 0) { 4358 dev_err(priv->device, 4359 "%s: MDIO bus (id: %d) registration failed", 4360 __func__, priv->plat->bus_id); 4361 goto error_mdio_register; 4362 } 4363 } 4364 4365 ret = register_netdev(ndev); 4366 if (ret) { 4367 dev_err(priv->device, "%s: ERROR %i registering the device\n", 4368 __func__, ret); 4369 goto error_netdev_register; 4370 } 4371 4372 return ret; 4373 4374 error_netdev_register: 4375 if (priv->hw->pcs != STMMAC_PCS_RGMII && 4376 priv->hw->pcs != STMMAC_PCS_TBI && 4377 priv->hw->pcs != STMMAC_PCS_RTBI) 4378 stmmac_mdio_unregister(ndev); 4379 error_mdio_register: 4380 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { 4381 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4382 4383 netif_napi_del(&rx_q->napi); 4384 } 4385 error_hw_init: 4386 destroy_workqueue(priv->wq); 4387 error_wq: 4388 free_netdev(ndev); 4389 4390 return ret; 4391 } 4392 EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 4393 4394 /** 4395 * stmmac_dvr_remove 4396 * @dev: device pointer 4397 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 4398 * changes the link status, releases the DMA descriptor rings. 4399 */ 4400 int stmmac_dvr_remove(struct device *dev) 4401 { 4402 struct net_device *ndev = dev_get_drvdata(dev); 4403 struct stmmac_priv *priv = netdev_priv(ndev); 4404 4405 netdev_info(priv->dev, "%s: removing driver", __func__); 4406 4407 stmmac_stop_all_dma(priv); 4408 4409 stmmac_mac_set(priv, priv->ioaddr, false); 4410 netif_carrier_off(ndev); 4411 unregister_netdev(ndev); 4412 if (priv->plat->stmmac_rst) 4413 reset_control_assert(priv->plat->stmmac_rst); 4414 clk_disable_unprepare(priv->plat->pclk); 4415 clk_disable_unprepare(priv->plat->stmmac_clk); 4416 if (priv->hw->pcs != STMMAC_PCS_RGMII && 4417 priv->hw->pcs != STMMAC_PCS_TBI && 4418 priv->hw->pcs != STMMAC_PCS_RTBI) 4419 stmmac_mdio_unregister(ndev); 4420 destroy_workqueue(priv->wq); 4421 free_netdev(ndev); 4422 4423 return 0; 4424 } 4425 EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 4426 4427 /** 4428 * stmmac_suspend - suspend callback 4429 * @dev: device pointer 4430 * Description: this is the function to suspend the device and it is called 4431 * by the platform driver to stop the network queue, release the resources, 4432 * program the PMT register (for WoL), clean and release driver resources. 4433 */ 4434 int stmmac_suspend(struct device *dev) 4435 { 4436 struct net_device *ndev = dev_get_drvdata(dev); 4437 struct stmmac_priv *priv = netdev_priv(ndev); 4438 unsigned long flags; 4439 4440 if (!ndev || !netif_running(ndev)) 4441 return 0; 4442 4443 if (ndev->phydev) 4444 phy_stop(ndev->phydev); 4445 4446 spin_lock_irqsave(&priv->lock, flags); 4447 4448 netif_device_detach(ndev); 4449 stmmac_stop_all_queues(priv); 4450 4451 stmmac_disable_all_queues(priv); 4452 4453 /* Stop TX/RX DMA */ 4454 stmmac_stop_all_dma(priv); 4455 4456 /* Enable Power down mode by programming the PMT regs */ 4457 if (device_may_wakeup(priv->device)) { 4458 stmmac_pmt(priv, priv->hw, priv->wolopts); 4459 priv->irq_wake = 1; 4460 } else { 4461 stmmac_mac_set(priv, priv->ioaddr, false); 4462 pinctrl_pm_select_sleep_state(priv->device); 4463 /* Disable clock in case of PWM is off */ 4464 clk_disable(priv->plat->pclk); 4465 clk_disable(priv->plat->stmmac_clk); 4466 } 4467 spin_unlock_irqrestore(&priv->lock, flags); 4468 4469 priv->oldlink = false; 4470 priv->speed = SPEED_UNKNOWN; 4471 priv->oldduplex = DUPLEX_UNKNOWN; 4472 return 0; 4473 } 4474 EXPORT_SYMBOL_GPL(stmmac_suspend); 4475 4476 /** 4477 * stmmac_reset_queues_param - reset queue parameters 4478 * @dev: device pointer 4479 */ 4480 static void stmmac_reset_queues_param(struct stmmac_priv *priv) 4481 { 4482 u32 rx_cnt = priv->plat->rx_queues_to_use; 4483 u32 tx_cnt = priv->plat->tx_queues_to_use; 4484 u32 queue; 4485 4486 for (queue = 0; queue < rx_cnt; queue++) { 4487 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4488 4489 rx_q->cur_rx = 0; 4490 rx_q->dirty_rx = 0; 4491 } 4492 4493 for (queue = 0; queue < tx_cnt; queue++) { 4494 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 4495 4496 tx_q->cur_tx = 0; 4497 tx_q->dirty_tx = 0; 4498 tx_q->mss = 0; 4499 } 4500 } 4501 4502 /** 4503 * stmmac_resume - resume callback 4504 * @dev: device pointer 4505 * Description: when resume this function is invoked to setup the DMA and CORE 4506 * in a usable state. 4507 */ 4508 int stmmac_resume(struct device *dev) 4509 { 4510 struct net_device *ndev = dev_get_drvdata(dev); 4511 struct stmmac_priv *priv = netdev_priv(ndev); 4512 unsigned long flags; 4513 4514 if (!netif_running(ndev)) 4515 return 0; 4516 4517 /* Power Down bit, into the PM register, is cleared 4518 * automatically as soon as a magic packet or a Wake-up frame 4519 * is received. Anyway, it's better to manually clear 4520 * this bit because it can generate problems while resuming 4521 * from another devices (e.g. serial console). 4522 */ 4523 if (device_may_wakeup(priv->device)) { 4524 spin_lock_irqsave(&priv->lock, flags); 4525 stmmac_pmt(priv, priv->hw, 0); 4526 spin_unlock_irqrestore(&priv->lock, flags); 4527 priv->irq_wake = 0; 4528 } else { 4529 pinctrl_pm_select_default_state(priv->device); 4530 /* enable the clk previously disabled */ 4531 clk_enable(priv->plat->stmmac_clk); 4532 clk_enable(priv->plat->pclk); 4533 /* reset the phy so that it's ready */ 4534 if (priv->mii) 4535 stmmac_mdio_reset(priv->mii); 4536 } 4537 4538 netif_device_attach(ndev); 4539 4540 spin_lock_irqsave(&priv->lock, flags); 4541 4542 stmmac_reset_queues_param(priv); 4543 4544 stmmac_clear_descriptors(priv); 4545 4546 stmmac_hw_setup(ndev, false); 4547 stmmac_init_tx_coalesce(priv); 4548 stmmac_set_rx_mode(ndev); 4549 4550 stmmac_enable_all_queues(priv); 4551 4552 stmmac_start_all_queues(priv); 4553 4554 spin_unlock_irqrestore(&priv->lock, flags); 4555 4556 if (ndev->phydev) 4557 phy_start(ndev->phydev); 4558 4559 return 0; 4560 } 4561 EXPORT_SYMBOL_GPL(stmmac_resume); 4562 4563 #ifndef MODULE 4564 static int __init stmmac_cmdline_opt(char *str) 4565 { 4566 char *opt; 4567 4568 if (!str || !*str) 4569 return -EINVAL; 4570 while ((opt = strsep(&str, ",")) != NULL) { 4571 if (!strncmp(opt, "debug:", 6)) { 4572 if (kstrtoint(opt + 6, 0, &debug)) 4573 goto err; 4574 } else if (!strncmp(opt, "phyaddr:", 8)) { 4575 if (kstrtoint(opt + 8, 0, &phyaddr)) 4576 goto err; 4577 } else if (!strncmp(opt, "buf_sz:", 7)) { 4578 if (kstrtoint(opt + 7, 0, &buf_sz)) 4579 goto err; 4580 } else if (!strncmp(opt, "tc:", 3)) { 4581 if (kstrtoint(opt + 3, 0, &tc)) 4582 goto err; 4583 } else if (!strncmp(opt, "watchdog:", 9)) { 4584 if (kstrtoint(opt + 9, 0, &watchdog)) 4585 goto err; 4586 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 4587 if (kstrtoint(opt + 10, 0, &flow_ctrl)) 4588 goto err; 4589 } else if (!strncmp(opt, "pause:", 6)) { 4590 if (kstrtoint(opt + 6, 0, &pause)) 4591 goto err; 4592 } else if (!strncmp(opt, "eee_timer:", 10)) { 4593 if (kstrtoint(opt + 10, 0, &eee_timer)) 4594 goto err; 4595 } else if (!strncmp(opt, "chain_mode:", 11)) { 4596 if (kstrtoint(opt + 11, 0, &chain_mode)) 4597 goto err; 4598 } 4599 } 4600 return 0; 4601 4602 err: 4603 pr_err("%s: ERROR broken module parameter conversion", __func__); 4604 return -EINVAL; 4605 } 4606 4607 __setup("stmmaceth=", stmmac_cmdline_opt); 4608 #endif /* MODULE */ 4609 4610 static int __init stmmac_init(void) 4611 { 4612 #ifdef CONFIG_DEBUG_FS 4613 /* Create debugfs main directory if it doesn't exist yet */ 4614 if (!stmmac_fs_dir) { 4615 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 4616 4617 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { 4618 pr_err("ERROR %s, debugfs create directory failed\n", 4619 STMMAC_RESOURCE_NAME); 4620 4621 return -ENOMEM; 4622 } 4623 } 4624 #endif 4625 4626 return 0; 4627 } 4628 4629 static void __exit stmmac_exit(void) 4630 { 4631 #ifdef CONFIG_DEBUG_FS 4632 debugfs_remove_recursive(stmmac_fs_dir); 4633 #endif 4634 } 4635 4636 module_init(stmmac_init) 4637 module_exit(stmmac_exit) 4638 4639 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 4640 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 4641 MODULE_LICENSE("GPL"); 4642