1 // SPDX-License-Identifier: GPL-2.0-only 2 /******************************************************************************* 3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 4 ST Ethernet IPs are built around a Synopsys IP Core. 5 6 Copyright(C) 2007-2011 STMicroelectronics Ltd 7 8 9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 10 11 Documentation available at: 12 http://www.stlinux.com 13 Support available at: 14 https://bugzilla.stlinux.com/ 15 *******************************************************************************/ 16 17 #include <linux/clk.h> 18 #include <linux/kernel.h> 19 #include <linux/interrupt.h> 20 #include <linux/ip.h> 21 #include <linux/tcp.h> 22 #include <linux/skbuff.h> 23 #include <linux/ethtool.h> 24 #include <linux/if_ether.h> 25 #include <linux/crc32.h> 26 #include <linux/mii.h> 27 #include <linux/if.h> 28 #include <linux/if_vlan.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/slab.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/prefetch.h> 33 #include <linux/pinctrl/consumer.h> 34 #ifdef CONFIG_DEBUG_FS 35 #include <linux/debugfs.h> 36 #include <linux/seq_file.h> 37 #endif /* CONFIG_DEBUG_FS */ 38 #include <linux/net_tstamp.h> 39 #include <linux/phylink.h> 40 #include <linux/udp.h> 41 #include <net/pkt_cls.h> 42 #include "stmmac_ptp.h" 43 #include "stmmac.h" 44 #include <linux/reset.h> 45 #include <linux/of_mdio.h> 46 #include "dwmac1000.h" 47 #include "dwxgmac2.h" 48 #include "hwif.h" 49 50 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 51 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 52 53 /* Module parameters */ 54 #define TX_TIMEO 5000 55 static int watchdog = TX_TIMEO; 56 module_param(watchdog, int, 0644); 57 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 58 59 static int debug = -1; 60 module_param(debug, int, 0644); 61 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 62 63 static int phyaddr = -1; 64 module_param(phyaddr, int, 0444); 65 MODULE_PARM_DESC(phyaddr, "Physical device address"); 66 67 #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) 68 #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) 69 70 static int flow_ctrl = FLOW_AUTO; 71 module_param(flow_ctrl, int, 0644); 72 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 73 74 static int pause = PAUSE_TIME; 75 module_param(pause, int, 0644); 76 MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 77 78 #define TC_DEFAULT 64 79 static int tc = TC_DEFAULT; 80 module_param(tc, int, 0644); 81 MODULE_PARM_DESC(tc, "DMA threshold control value"); 82 83 #define DEFAULT_BUFSIZE 1536 84 static int buf_sz = DEFAULT_BUFSIZE; 85 module_param(buf_sz, int, 0644); 86 MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 87 88 #define STMMAC_RX_COPYBREAK 256 89 90 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 91 NETIF_MSG_LINK | NETIF_MSG_IFUP | 92 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 93 94 #define STMMAC_DEFAULT_LPI_TIMER 1000 95 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 96 module_param(eee_timer, int, 0644); 97 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 98 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) 99 100 /* By default the driver will use the ring mode to manage tx and rx descriptors, 101 * but allow user to force to use the chain instead of the ring 102 */ 103 static unsigned int chain_mode; 104 module_param(chain_mode, int, 0444); 105 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 106 107 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 108 /* For MSI interrupts handling */ 109 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); 110 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); 111 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); 112 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); 113 114 #ifdef CONFIG_DEBUG_FS 115 static const struct net_device_ops stmmac_netdev_ops; 116 static void stmmac_init_fs(struct net_device *dev); 117 static void stmmac_exit_fs(struct net_device *dev); 118 #endif 119 120 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) 121 122 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) 123 { 124 int ret = 0; 125 126 if (enabled) { 127 ret = clk_prepare_enable(priv->plat->stmmac_clk); 128 if (ret) 129 return ret; 130 ret = clk_prepare_enable(priv->plat->pclk); 131 if (ret) { 132 clk_disable_unprepare(priv->plat->stmmac_clk); 133 return ret; 134 } 135 if (priv->plat->clks_config) { 136 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); 137 if (ret) { 138 clk_disable_unprepare(priv->plat->stmmac_clk); 139 clk_disable_unprepare(priv->plat->pclk); 140 return ret; 141 } 142 } 143 } else { 144 clk_disable_unprepare(priv->plat->stmmac_clk); 145 clk_disable_unprepare(priv->plat->pclk); 146 if (priv->plat->clks_config) 147 priv->plat->clks_config(priv->plat->bsp_priv, enabled); 148 } 149 150 return ret; 151 } 152 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); 153 154 /** 155 * stmmac_verify_args - verify the driver parameters. 156 * Description: it checks the driver parameters and set a default in case of 157 * errors. 158 */ 159 static void stmmac_verify_args(void) 160 { 161 if (unlikely(watchdog < 0)) 162 watchdog = TX_TIMEO; 163 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 164 buf_sz = DEFAULT_BUFSIZE; 165 if (unlikely(flow_ctrl > 1)) 166 flow_ctrl = FLOW_AUTO; 167 else if (likely(flow_ctrl < 0)) 168 flow_ctrl = FLOW_OFF; 169 if (unlikely((pause < 0) || (pause > 0xffff))) 170 pause = PAUSE_TIME; 171 if (eee_timer < 0) 172 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 173 } 174 175 /** 176 * stmmac_disable_all_queues - Disable all queues 177 * @priv: driver private structure 178 */ 179 static void stmmac_disable_all_queues(struct stmmac_priv *priv) 180 { 181 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 182 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 183 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 184 u32 queue; 185 186 for (queue = 0; queue < maxq; queue++) { 187 struct stmmac_channel *ch = &priv->channel[queue]; 188 189 if (queue < rx_queues_cnt) 190 napi_disable(&ch->rx_napi); 191 if (queue < tx_queues_cnt) 192 napi_disable(&ch->tx_napi); 193 } 194 } 195 196 /** 197 * stmmac_enable_all_queues - Enable all queues 198 * @priv: driver private structure 199 */ 200 static void stmmac_enable_all_queues(struct stmmac_priv *priv) 201 { 202 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 203 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 204 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 205 u32 queue; 206 207 for (queue = 0; queue < maxq; queue++) { 208 struct stmmac_channel *ch = &priv->channel[queue]; 209 210 if (queue < rx_queues_cnt) 211 napi_enable(&ch->rx_napi); 212 if (queue < tx_queues_cnt) 213 napi_enable(&ch->tx_napi); 214 } 215 } 216 217 static void stmmac_service_event_schedule(struct stmmac_priv *priv) 218 { 219 if (!test_bit(STMMAC_DOWN, &priv->state) && 220 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 221 queue_work(priv->wq, &priv->service_task); 222 } 223 224 static void stmmac_global_err(struct stmmac_priv *priv) 225 { 226 netif_carrier_off(priv->dev); 227 set_bit(STMMAC_RESET_REQUESTED, &priv->state); 228 stmmac_service_event_schedule(priv); 229 } 230 231 /** 232 * stmmac_clk_csr_set - dynamically set the MDC clock 233 * @priv: driver private structure 234 * Description: this is to dynamically set the MDC clock according to the csr 235 * clock input. 236 * Note: 237 * If a specific clk_csr value is passed from the platform 238 * this means that the CSR Clock Range selection cannot be 239 * changed at run-time and it is fixed (as reported in the driver 240 * documentation). Viceversa the driver will try to set the MDC 241 * clock dynamically according to the actual clock input. 242 */ 243 static void stmmac_clk_csr_set(struct stmmac_priv *priv) 244 { 245 u32 clk_rate; 246 247 clk_rate = clk_get_rate(priv->plat->stmmac_clk); 248 249 /* Platform provided default clk_csr would be assumed valid 250 * for all other cases except for the below mentioned ones. 251 * For values higher than the IEEE 802.3 specified frequency 252 * we can not estimate the proper divider as it is not known 253 * the frequency of clk_csr_i. So we do not change the default 254 * divider. 255 */ 256 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 257 if (clk_rate < CSR_F_35M) 258 priv->clk_csr = STMMAC_CSR_20_35M; 259 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 260 priv->clk_csr = STMMAC_CSR_35_60M; 261 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 262 priv->clk_csr = STMMAC_CSR_60_100M; 263 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 264 priv->clk_csr = STMMAC_CSR_100_150M; 265 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 266 priv->clk_csr = STMMAC_CSR_150_250M; 267 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 268 priv->clk_csr = STMMAC_CSR_250_300M; 269 } 270 271 if (priv->plat->has_sun8i) { 272 if (clk_rate > 160000000) 273 priv->clk_csr = 0x03; 274 else if (clk_rate > 80000000) 275 priv->clk_csr = 0x02; 276 else if (clk_rate > 40000000) 277 priv->clk_csr = 0x01; 278 else 279 priv->clk_csr = 0; 280 } 281 282 if (priv->plat->has_xgmac) { 283 if (clk_rate > 400000000) 284 priv->clk_csr = 0x5; 285 else if (clk_rate > 350000000) 286 priv->clk_csr = 0x4; 287 else if (clk_rate > 300000000) 288 priv->clk_csr = 0x3; 289 else if (clk_rate > 250000000) 290 priv->clk_csr = 0x2; 291 else if (clk_rate > 150000000) 292 priv->clk_csr = 0x1; 293 else 294 priv->clk_csr = 0x0; 295 } 296 } 297 298 static void print_pkt(unsigned char *buf, int len) 299 { 300 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 301 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 302 } 303 304 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 305 { 306 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 307 u32 avail; 308 309 if (tx_q->dirty_tx > tx_q->cur_tx) 310 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 311 else 312 avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; 313 314 return avail; 315 } 316 317 /** 318 * stmmac_rx_dirty - Get RX queue dirty 319 * @priv: driver private structure 320 * @queue: RX queue index 321 */ 322 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 323 { 324 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 325 u32 dirty; 326 327 if (rx_q->dirty_rx <= rx_q->cur_rx) 328 dirty = rx_q->cur_rx - rx_q->dirty_rx; 329 else 330 dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; 331 332 return dirty; 333 } 334 335 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) 336 { 337 int tx_lpi_timer; 338 339 /* Clear/set the SW EEE timer flag based on LPI ET enablement */ 340 priv->eee_sw_timer_en = en ? 0 : 1; 341 tx_lpi_timer = en ? priv->tx_lpi_timer : 0; 342 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); 343 } 344 345 /** 346 * stmmac_enable_eee_mode - check and enter in LPI mode 347 * @priv: driver private structure 348 * Description: this function is to verify and enter in LPI mode in case of 349 * EEE. 350 */ 351 static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 352 { 353 u32 tx_cnt = priv->plat->tx_queues_to_use; 354 u32 queue; 355 356 /* check if all TX queues have the work finished */ 357 for (queue = 0; queue < tx_cnt; queue++) { 358 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 359 360 if (tx_q->dirty_tx != tx_q->cur_tx) 361 return; /* still unfinished work */ 362 } 363 364 /* Check and enter in LPI mode */ 365 if (!priv->tx_path_in_lpi_mode) 366 stmmac_set_eee_mode(priv, priv->hw, 367 priv->plat->en_tx_lpi_clockgating); 368 } 369 370 /** 371 * stmmac_disable_eee_mode - disable and exit from LPI mode 372 * @priv: driver private structure 373 * Description: this function is to exit and disable EEE in case of 374 * LPI state is true. This is called by the xmit. 375 */ 376 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 377 { 378 if (!priv->eee_sw_timer_en) { 379 stmmac_lpi_entry_timer_config(priv, 0); 380 return; 381 } 382 383 stmmac_reset_eee_mode(priv, priv->hw); 384 del_timer_sync(&priv->eee_ctrl_timer); 385 priv->tx_path_in_lpi_mode = false; 386 } 387 388 /** 389 * stmmac_eee_ctrl_timer - EEE TX SW timer. 390 * @t: timer_list struct containing private info 391 * Description: 392 * if there is no data transfer and if we are not in LPI state, 393 * then MAC Transmitter can be moved to LPI state. 394 */ 395 static void stmmac_eee_ctrl_timer(struct timer_list *t) 396 { 397 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 398 399 stmmac_enable_eee_mode(priv); 400 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 401 } 402 403 /** 404 * stmmac_eee_init - init EEE 405 * @priv: driver private structure 406 * Description: 407 * if the GMAC supports the EEE (from the HW cap reg) and the phy device 408 * can also manage EEE, this function enable the LPI state and start related 409 * timer. 410 */ 411 bool stmmac_eee_init(struct stmmac_priv *priv) 412 { 413 int eee_tw_timer = priv->eee_tw_timer; 414 415 /* Using PCS we cannot dial with the phy registers at this stage 416 * so we do not support extra feature like EEE. 417 */ 418 if (priv->hw->pcs == STMMAC_PCS_TBI || 419 priv->hw->pcs == STMMAC_PCS_RTBI) 420 return false; 421 422 /* Check if MAC core supports the EEE feature. */ 423 if (!priv->dma_cap.eee) 424 return false; 425 426 mutex_lock(&priv->lock); 427 428 /* Check if it needs to be deactivated */ 429 if (!priv->eee_active) { 430 if (priv->eee_enabled) { 431 netdev_dbg(priv->dev, "disable EEE\n"); 432 stmmac_lpi_entry_timer_config(priv, 0); 433 del_timer_sync(&priv->eee_ctrl_timer); 434 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); 435 } 436 mutex_unlock(&priv->lock); 437 return false; 438 } 439 440 if (priv->eee_active && !priv->eee_enabled) { 441 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 442 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 443 eee_tw_timer); 444 } 445 446 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { 447 del_timer_sync(&priv->eee_ctrl_timer); 448 priv->tx_path_in_lpi_mode = false; 449 stmmac_lpi_entry_timer_config(priv, 1); 450 } else { 451 stmmac_lpi_entry_timer_config(priv, 0); 452 mod_timer(&priv->eee_ctrl_timer, 453 STMMAC_LPI_T(priv->tx_lpi_timer)); 454 } 455 456 mutex_unlock(&priv->lock); 457 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 458 return true; 459 } 460 461 /* stmmac_get_tx_hwtstamp - get HW TX timestamps 462 * @priv: driver private structure 463 * @p : descriptor pointer 464 * @skb : the socket buffer 465 * Description : 466 * This function will read timestamp from the descriptor & pass it to stack. 467 * and also perform some sanity checks. 468 */ 469 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 470 struct dma_desc *p, struct sk_buff *skb) 471 { 472 struct skb_shared_hwtstamps shhwtstamp; 473 bool found = false; 474 s64 adjust = 0; 475 u64 ns = 0; 476 477 if (!priv->hwts_tx_en) 478 return; 479 480 /* exit if skb doesn't support hw tstamp */ 481 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 482 return; 483 484 /* check tx tstamp status */ 485 if (stmmac_get_tx_timestamp_status(priv, p)) { 486 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 487 found = true; 488 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { 489 found = true; 490 } 491 492 if (found) { 493 /* Correct the clk domain crossing(CDC) error */ 494 if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { 495 adjust += -(2 * (NSEC_PER_SEC / 496 priv->plat->clk_ptp_rate)); 497 ns += adjust; 498 } 499 500 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 501 shhwtstamp.hwtstamp = ns_to_ktime(ns); 502 503 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 504 /* pass tstamp to stack */ 505 skb_tstamp_tx(skb, &shhwtstamp); 506 } 507 } 508 509 /* stmmac_get_rx_hwtstamp - get HW RX timestamps 510 * @priv: driver private structure 511 * @p : descriptor pointer 512 * @np : next descriptor pointer 513 * @skb : the socket buffer 514 * Description : 515 * This function will read received packet's timestamp from the descriptor 516 * and pass it to stack. It also perform some sanity checks. 517 */ 518 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 519 struct dma_desc *np, struct sk_buff *skb) 520 { 521 struct skb_shared_hwtstamps *shhwtstamp = NULL; 522 struct dma_desc *desc = p; 523 u64 adjust = 0; 524 u64 ns = 0; 525 526 if (!priv->hwts_rx_en) 527 return; 528 /* For GMAC4, the valid timestamp is from CTX next desc. */ 529 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 530 desc = np; 531 532 /* Check if timestamp is available */ 533 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 534 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 535 536 /* Correct the clk domain crossing(CDC) error */ 537 if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { 538 adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate); 539 ns -= adjust; 540 } 541 542 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 543 shhwtstamp = skb_hwtstamps(skb); 544 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 545 shhwtstamp->hwtstamp = ns_to_ktime(ns); 546 } else { 547 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 548 } 549 } 550 551 /** 552 * stmmac_hwtstamp_set - control hardware timestamping. 553 * @dev: device pointer. 554 * @ifr: An IOCTL specific structure, that can contain a pointer to 555 * a proprietary structure used to pass information to the driver. 556 * Description: 557 * This function configures the MAC to enable/disable both outgoing(TX) 558 * and incoming(RX) packets time stamping based on user input. 559 * Return Value: 560 * 0 on success and an appropriate -ve integer on failure. 561 */ 562 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 563 { 564 struct stmmac_priv *priv = netdev_priv(dev); 565 struct hwtstamp_config config; 566 struct timespec64 now; 567 u64 temp = 0; 568 u32 ptp_v2 = 0; 569 u32 tstamp_all = 0; 570 u32 ptp_over_ipv4_udp = 0; 571 u32 ptp_over_ipv6_udp = 0; 572 u32 ptp_over_ethernet = 0; 573 u32 snap_type_sel = 0; 574 u32 ts_master_en = 0; 575 u32 ts_event_en = 0; 576 u32 sec_inc = 0; 577 u32 value = 0; 578 bool xmac; 579 580 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 581 582 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 583 netdev_alert(priv->dev, "No support for HW time stamping\n"); 584 priv->hwts_tx_en = 0; 585 priv->hwts_rx_en = 0; 586 587 return -EOPNOTSUPP; 588 } 589 590 if (copy_from_user(&config, ifr->ifr_data, 591 sizeof(config))) 592 return -EFAULT; 593 594 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 595 __func__, config.flags, config.tx_type, config.rx_filter); 596 597 /* reserved for future extensions */ 598 if (config.flags) 599 return -EINVAL; 600 601 if (config.tx_type != HWTSTAMP_TX_OFF && 602 config.tx_type != HWTSTAMP_TX_ON) 603 return -ERANGE; 604 605 if (priv->adv_ts) { 606 switch (config.rx_filter) { 607 case HWTSTAMP_FILTER_NONE: 608 /* time stamp no incoming packet at all */ 609 config.rx_filter = HWTSTAMP_FILTER_NONE; 610 break; 611 612 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 613 /* PTP v1, UDP, any kind of event packet */ 614 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 615 /* 'xmac' hardware can support Sync, Pdelay_Req and 616 * Pdelay_resp by setting bit14 and bits17/16 to 01 617 * This leaves Delay_Req timestamps out. 618 * Enable all events *and* general purpose message 619 * timestamping 620 */ 621 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 622 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 623 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 624 break; 625 626 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 627 /* PTP v1, UDP, Sync packet */ 628 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 629 /* take time stamp for SYNC messages only */ 630 ts_event_en = PTP_TCR_TSEVNTENA; 631 632 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 633 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 634 break; 635 636 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 637 /* PTP v1, UDP, Delay_req packet */ 638 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 639 /* take time stamp for Delay_Req messages only */ 640 ts_master_en = PTP_TCR_TSMSTRENA; 641 ts_event_en = PTP_TCR_TSEVNTENA; 642 643 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 644 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 645 break; 646 647 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 648 /* PTP v2, UDP, any kind of event packet */ 649 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 650 ptp_v2 = PTP_TCR_TSVER2ENA; 651 /* take time stamp for all event messages */ 652 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 653 654 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 655 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 656 break; 657 658 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 659 /* PTP v2, UDP, Sync packet */ 660 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 661 ptp_v2 = PTP_TCR_TSVER2ENA; 662 /* take time stamp for SYNC messages only */ 663 ts_event_en = PTP_TCR_TSEVNTENA; 664 665 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 666 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 667 break; 668 669 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 670 /* PTP v2, UDP, Delay_req packet */ 671 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 672 ptp_v2 = PTP_TCR_TSVER2ENA; 673 /* take time stamp for Delay_Req messages only */ 674 ts_master_en = PTP_TCR_TSMSTRENA; 675 ts_event_en = PTP_TCR_TSEVNTENA; 676 677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 679 break; 680 681 case HWTSTAMP_FILTER_PTP_V2_EVENT: 682 /* PTP v2/802.AS1 any layer, any kind of event packet */ 683 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 684 ptp_v2 = PTP_TCR_TSVER2ENA; 685 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 686 if (priv->synopsys_id != DWMAC_CORE_5_10) 687 ts_event_en = PTP_TCR_TSEVNTENA; 688 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 689 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 690 ptp_over_ethernet = PTP_TCR_TSIPENA; 691 break; 692 693 case HWTSTAMP_FILTER_PTP_V2_SYNC: 694 /* PTP v2/802.AS1, any layer, Sync packet */ 695 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 696 ptp_v2 = PTP_TCR_TSVER2ENA; 697 /* take time stamp for SYNC messages only */ 698 ts_event_en = PTP_TCR_TSEVNTENA; 699 700 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 701 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 702 ptp_over_ethernet = PTP_TCR_TSIPENA; 703 break; 704 705 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 706 /* PTP v2/802.AS1, any layer, Delay_req packet */ 707 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 708 ptp_v2 = PTP_TCR_TSVER2ENA; 709 /* take time stamp for Delay_Req messages only */ 710 ts_master_en = PTP_TCR_TSMSTRENA; 711 ts_event_en = PTP_TCR_TSEVNTENA; 712 713 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 714 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 715 ptp_over_ethernet = PTP_TCR_TSIPENA; 716 break; 717 718 case HWTSTAMP_FILTER_NTP_ALL: 719 case HWTSTAMP_FILTER_ALL: 720 /* time stamp any incoming packet */ 721 config.rx_filter = HWTSTAMP_FILTER_ALL; 722 tstamp_all = PTP_TCR_TSENALL; 723 break; 724 725 default: 726 return -ERANGE; 727 } 728 } else { 729 switch (config.rx_filter) { 730 case HWTSTAMP_FILTER_NONE: 731 config.rx_filter = HWTSTAMP_FILTER_NONE; 732 break; 733 default: 734 /* PTP v1, UDP, any kind of event packet */ 735 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 736 break; 737 } 738 } 739 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 740 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 741 742 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 743 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); 744 else { 745 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 746 tstamp_all | ptp_v2 | ptp_over_ethernet | 747 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 748 ts_master_en | snap_type_sel); 749 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); 750 751 /* program Sub Second Increment reg */ 752 stmmac_config_sub_second_increment(priv, 753 priv->ptpaddr, priv->plat->clk_ptp_rate, 754 xmac, &sec_inc); 755 temp = div_u64(1000000000ULL, sec_inc); 756 757 /* Store sub second increment and flags for later use */ 758 priv->sub_second_inc = sec_inc; 759 priv->systime_flags = value; 760 761 /* calculate default added value: 762 * formula is : 763 * addend = (2^32)/freq_div_ratio; 764 * where, freq_div_ratio = 1e9ns/sec_inc 765 */ 766 temp = (u64)(temp << 32); 767 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 768 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 769 770 /* initialize system time */ 771 ktime_get_real_ts64(&now); 772 773 /* lower 32 bits of tv_sec are safe until y2106 */ 774 stmmac_init_systime(priv, priv->ptpaddr, 775 (u32)now.tv_sec, now.tv_nsec); 776 } 777 778 memcpy(&priv->tstamp_config, &config, sizeof(config)); 779 780 return copy_to_user(ifr->ifr_data, &config, 781 sizeof(config)) ? -EFAULT : 0; 782 } 783 784 /** 785 * stmmac_hwtstamp_get - read hardware timestamping. 786 * @dev: device pointer. 787 * @ifr: An IOCTL specific structure, that can contain a pointer to 788 * a proprietary structure used to pass information to the driver. 789 * Description: 790 * This function obtain the current hardware timestamping settings 791 * as requested. 792 */ 793 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 794 { 795 struct stmmac_priv *priv = netdev_priv(dev); 796 struct hwtstamp_config *config = &priv->tstamp_config; 797 798 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 799 return -EOPNOTSUPP; 800 801 return copy_to_user(ifr->ifr_data, config, 802 sizeof(*config)) ? -EFAULT : 0; 803 } 804 805 /** 806 * stmmac_init_ptp - init PTP 807 * @priv: driver private structure 808 * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 809 * This is done by looking at the HW cap. register. 810 * This function also registers the ptp driver. 811 */ 812 static int stmmac_init_ptp(struct stmmac_priv *priv) 813 { 814 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 815 816 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 817 return -EOPNOTSUPP; 818 819 priv->adv_ts = 0; 820 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 821 if (xmac && priv->dma_cap.atime_stamp) 822 priv->adv_ts = 1; 823 /* Dwmac 3.x core with extend_desc can support adv_ts */ 824 else if (priv->extend_desc && priv->dma_cap.atime_stamp) 825 priv->adv_ts = 1; 826 827 if (priv->dma_cap.time_stamp) 828 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 829 830 if (priv->adv_ts) 831 netdev_info(priv->dev, 832 "IEEE 1588-2008 Advanced Timestamp supported\n"); 833 834 priv->hwts_tx_en = 0; 835 priv->hwts_rx_en = 0; 836 837 stmmac_ptp_register(priv); 838 839 return 0; 840 } 841 842 static void stmmac_release_ptp(struct stmmac_priv *priv) 843 { 844 clk_disable_unprepare(priv->plat->clk_ptp_ref); 845 stmmac_ptp_unregister(priv); 846 } 847 848 /** 849 * stmmac_mac_flow_ctrl - Configure flow control in all queues 850 * @priv: driver private structure 851 * @duplex: duplex passed to the next function 852 * Description: It is used for configuring the flow control in all queues 853 */ 854 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 855 { 856 u32 tx_cnt = priv->plat->tx_queues_to_use; 857 858 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 859 priv->pause, tx_cnt); 860 } 861 862 static void stmmac_validate(struct phylink_config *config, 863 unsigned long *supported, 864 struct phylink_link_state *state) 865 { 866 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 867 __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, }; 868 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 869 int tx_cnt = priv->plat->tx_queues_to_use; 870 int max_speed = priv->plat->max_speed; 871 872 phylink_set(mac_supported, 10baseT_Half); 873 phylink_set(mac_supported, 10baseT_Full); 874 phylink_set(mac_supported, 100baseT_Half); 875 phylink_set(mac_supported, 100baseT_Full); 876 phylink_set(mac_supported, 1000baseT_Half); 877 phylink_set(mac_supported, 1000baseT_Full); 878 phylink_set(mac_supported, 1000baseKX_Full); 879 880 phylink_set(mac_supported, Autoneg); 881 phylink_set(mac_supported, Pause); 882 phylink_set(mac_supported, Asym_Pause); 883 phylink_set_port_modes(mac_supported); 884 885 /* Cut down 1G if asked to */ 886 if ((max_speed > 0) && (max_speed < 1000)) { 887 phylink_set(mask, 1000baseT_Full); 888 phylink_set(mask, 1000baseX_Full); 889 } else if (priv->plat->has_xgmac) { 890 if (!max_speed || (max_speed >= 2500)) { 891 phylink_set(mac_supported, 2500baseT_Full); 892 phylink_set(mac_supported, 2500baseX_Full); 893 } 894 if (!max_speed || (max_speed >= 5000)) { 895 phylink_set(mac_supported, 5000baseT_Full); 896 } 897 if (!max_speed || (max_speed >= 10000)) { 898 phylink_set(mac_supported, 10000baseSR_Full); 899 phylink_set(mac_supported, 10000baseLR_Full); 900 phylink_set(mac_supported, 10000baseER_Full); 901 phylink_set(mac_supported, 10000baseLRM_Full); 902 phylink_set(mac_supported, 10000baseT_Full); 903 phylink_set(mac_supported, 10000baseKX4_Full); 904 phylink_set(mac_supported, 10000baseKR_Full); 905 } 906 if (!max_speed || (max_speed >= 25000)) { 907 phylink_set(mac_supported, 25000baseCR_Full); 908 phylink_set(mac_supported, 25000baseKR_Full); 909 phylink_set(mac_supported, 25000baseSR_Full); 910 } 911 if (!max_speed || (max_speed >= 40000)) { 912 phylink_set(mac_supported, 40000baseKR4_Full); 913 phylink_set(mac_supported, 40000baseCR4_Full); 914 phylink_set(mac_supported, 40000baseSR4_Full); 915 phylink_set(mac_supported, 40000baseLR4_Full); 916 } 917 if (!max_speed || (max_speed >= 50000)) { 918 phylink_set(mac_supported, 50000baseCR2_Full); 919 phylink_set(mac_supported, 50000baseKR2_Full); 920 phylink_set(mac_supported, 50000baseSR2_Full); 921 phylink_set(mac_supported, 50000baseKR_Full); 922 phylink_set(mac_supported, 50000baseSR_Full); 923 phylink_set(mac_supported, 50000baseCR_Full); 924 phylink_set(mac_supported, 50000baseLR_ER_FR_Full); 925 phylink_set(mac_supported, 50000baseDR_Full); 926 } 927 if (!max_speed || (max_speed >= 100000)) { 928 phylink_set(mac_supported, 100000baseKR4_Full); 929 phylink_set(mac_supported, 100000baseSR4_Full); 930 phylink_set(mac_supported, 100000baseCR4_Full); 931 phylink_set(mac_supported, 100000baseLR4_ER4_Full); 932 phylink_set(mac_supported, 100000baseKR2_Full); 933 phylink_set(mac_supported, 100000baseSR2_Full); 934 phylink_set(mac_supported, 100000baseCR2_Full); 935 phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full); 936 phylink_set(mac_supported, 100000baseDR2_Full); 937 } 938 } 939 940 /* Half-Duplex can only work with single queue */ 941 if (tx_cnt > 1) { 942 phylink_set(mask, 10baseT_Half); 943 phylink_set(mask, 100baseT_Half); 944 phylink_set(mask, 1000baseT_Half); 945 } 946 947 linkmode_and(supported, supported, mac_supported); 948 linkmode_andnot(supported, supported, mask); 949 950 linkmode_and(state->advertising, state->advertising, mac_supported); 951 linkmode_andnot(state->advertising, state->advertising, mask); 952 953 /* If PCS is supported, check which modes it supports. */ 954 stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state); 955 } 956 957 static void stmmac_mac_pcs_get_state(struct phylink_config *config, 958 struct phylink_link_state *state) 959 { 960 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 961 962 state->link = 0; 963 stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state); 964 } 965 966 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, 967 const struct phylink_link_state *state) 968 { 969 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 970 971 stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state); 972 } 973 974 static void stmmac_mac_an_restart(struct phylink_config *config) 975 { 976 /* Not Supported */ 977 } 978 979 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) 980 { 981 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 982 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 983 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 984 bool *hs_enable = &fpe_cfg->hs_enable; 985 986 if (is_up && *hs_enable) { 987 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY); 988 } else { 989 *lo_state = FPE_EVENT_UNKNOWN; 990 *lp_state = FPE_EVENT_UNKNOWN; 991 } 992 } 993 994 static void stmmac_mac_link_down(struct phylink_config *config, 995 unsigned int mode, phy_interface_t interface) 996 { 997 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 998 999 stmmac_mac_set(priv, priv->ioaddr, false); 1000 priv->eee_active = false; 1001 priv->tx_lpi_enabled = false; 1002 stmmac_eee_init(priv); 1003 stmmac_set_eee_pls(priv, priv->hw, false); 1004 1005 if (priv->dma_cap.fpesel) 1006 stmmac_fpe_link_state_handle(priv, false); 1007 } 1008 1009 static void stmmac_mac_link_up(struct phylink_config *config, 1010 struct phy_device *phy, 1011 unsigned int mode, phy_interface_t interface, 1012 int speed, int duplex, 1013 bool tx_pause, bool rx_pause) 1014 { 1015 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 1016 u32 ctrl; 1017 1018 stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface); 1019 1020 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 1021 ctrl &= ~priv->hw->link.speed_mask; 1022 1023 if (interface == PHY_INTERFACE_MODE_USXGMII) { 1024 switch (speed) { 1025 case SPEED_10000: 1026 ctrl |= priv->hw->link.xgmii.speed10000; 1027 break; 1028 case SPEED_5000: 1029 ctrl |= priv->hw->link.xgmii.speed5000; 1030 break; 1031 case SPEED_2500: 1032 ctrl |= priv->hw->link.xgmii.speed2500; 1033 break; 1034 default: 1035 return; 1036 } 1037 } else if (interface == PHY_INTERFACE_MODE_XLGMII) { 1038 switch (speed) { 1039 case SPEED_100000: 1040 ctrl |= priv->hw->link.xlgmii.speed100000; 1041 break; 1042 case SPEED_50000: 1043 ctrl |= priv->hw->link.xlgmii.speed50000; 1044 break; 1045 case SPEED_40000: 1046 ctrl |= priv->hw->link.xlgmii.speed40000; 1047 break; 1048 case SPEED_25000: 1049 ctrl |= priv->hw->link.xlgmii.speed25000; 1050 break; 1051 case SPEED_10000: 1052 ctrl |= priv->hw->link.xgmii.speed10000; 1053 break; 1054 case SPEED_2500: 1055 ctrl |= priv->hw->link.speed2500; 1056 break; 1057 case SPEED_1000: 1058 ctrl |= priv->hw->link.speed1000; 1059 break; 1060 default: 1061 return; 1062 } 1063 } else { 1064 switch (speed) { 1065 case SPEED_2500: 1066 ctrl |= priv->hw->link.speed2500; 1067 break; 1068 case SPEED_1000: 1069 ctrl |= priv->hw->link.speed1000; 1070 break; 1071 case SPEED_100: 1072 ctrl |= priv->hw->link.speed100; 1073 break; 1074 case SPEED_10: 1075 ctrl |= priv->hw->link.speed10; 1076 break; 1077 default: 1078 return; 1079 } 1080 } 1081 1082 priv->speed = speed; 1083 1084 if (priv->plat->fix_mac_speed) 1085 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); 1086 1087 if (!duplex) 1088 ctrl &= ~priv->hw->link.duplex; 1089 else 1090 ctrl |= priv->hw->link.duplex; 1091 1092 /* Flow Control operation */ 1093 if (tx_pause && rx_pause) 1094 stmmac_mac_flow_ctrl(priv, duplex); 1095 1096 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 1097 1098 stmmac_mac_set(priv, priv->ioaddr, true); 1099 if (phy && priv->dma_cap.eee) { 1100 priv->eee_active = phy_init_eee(phy, 1) >= 0; 1101 priv->eee_enabled = stmmac_eee_init(priv); 1102 priv->tx_lpi_enabled = priv->eee_enabled; 1103 stmmac_set_eee_pls(priv, priv->hw, true); 1104 } 1105 1106 if (priv->dma_cap.fpesel) 1107 stmmac_fpe_link_state_handle(priv, true); 1108 } 1109 1110 static const struct phylink_mac_ops stmmac_phylink_mac_ops = { 1111 .validate = stmmac_validate, 1112 .mac_pcs_get_state = stmmac_mac_pcs_get_state, 1113 .mac_config = stmmac_mac_config, 1114 .mac_an_restart = stmmac_mac_an_restart, 1115 .mac_link_down = stmmac_mac_link_down, 1116 .mac_link_up = stmmac_mac_link_up, 1117 }; 1118 1119 /** 1120 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 1121 * @priv: driver private structure 1122 * Description: this is to verify if the HW supports the PCS. 1123 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 1124 * configured for the TBI, RTBI, or SGMII PHY interface. 1125 */ 1126 static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 1127 { 1128 int interface = priv->plat->interface; 1129 1130 if (priv->dma_cap.pcs) { 1131 if ((interface == PHY_INTERFACE_MODE_RGMII) || 1132 (interface == PHY_INTERFACE_MODE_RGMII_ID) || 1133 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1134 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 1135 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 1136 priv->hw->pcs = STMMAC_PCS_RGMII; 1137 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 1138 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 1139 priv->hw->pcs = STMMAC_PCS_SGMII; 1140 } 1141 } 1142 } 1143 1144 /** 1145 * stmmac_init_phy - PHY initialization 1146 * @dev: net device structure 1147 * Description: it initializes the driver's PHY state, and attaches the PHY 1148 * to the mac driver. 1149 * Return value: 1150 * 0 on success 1151 */ 1152 static int stmmac_init_phy(struct net_device *dev) 1153 { 1154 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1155 struct stmmac_priv *priv = netdev_priv(dev); 1156 struct device_node *node; 1157 int ret; 1158 1159 node = priv->plat->phylink_node; 1160 1161 if (node) 1162 ret = phylink_of_phy_connect(priv->phylink, node, 0); 1163 1164 /* Some DT bindings do not set-up the PHY handle. Let's try to 1165 * manually parse it 1166 */ 1167 if (!node || ret) { 1168 int addr = priv->plat->phy_addr; 1169 struct phy_device *phydev; 1170 1171 phydev = mdiobus_get_phy(priv->mii, addr); 1172 if (!phydev) { 1173 netdev_err(priv->dev, "no phy at addr %d\n", addr); 1174 return -ENODEV; 1175 } 1176 1177 ret = phylink_connect_phy(priv->phylink, phydev); 1178 } 1179 1180 phylink_ethtool_get_wol(priv->phylink, &wol); 1181 device_set_wakeup_capable(priv->device, !!wol.supported); 1182 1183 return ret; 1184 } 1185 1186 static int stmmac_phy_setup(struct stmmac_priv *priv) 1187 { 1188 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); 1189 int mode = priv->plat->phy_interface; 1190 struct phylink *phylink; 1191 1192 priv->phylink_config.dev = &priv->dev->dev; 1193 priv->phylink_config.type = PHYLINK_NETDEV; 1194 priv->phylink_config.pcs_poll = true; 1195 priv->phylink_config.ovr_an_inband = 1196 priv->plat->mdio_bus_data->xpcs_an_inband; 1197 1198 if (!fwnode) 1199 fwnode = dev_fwnode(priv->device); 1200 1201 phylink = phylink_create(&priv->phylink_config, fwnode, 1202 mode, &stmmac_phylink_mac_ops); 1203 if (IS_ERR(phylink)) 1204 return PTR_ERR(phylink); 1205 1206 priv->phylink = phylink; 1207 return 0; 1208 } 1209 1210 static void stmmac_display_rx_rings(struct stmmac_priv *priv) 1211 { 1212 u32 rx_cnt = priv->plat->rx_queues_to_use; 1213 unsigned int desc_size; 1214 void *head_rx; 1215 u32 queue; 1216 1217 /* Display RX rings */ 1218 for (queue = 0; queue < rx_cnt; queue++) { 1219 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1220 1221 pr_info("\tRX Queue %u rings\n", queue); 1222 1223 if (priv->extend_desc) { 1224 head_rx = (void *)rx_q->dma_erx; 1225 desc_size = sizeof(struct dma_extended_desc); 1226 } else { 1227 head_rx = (void *)rx_q->dma_rx; 1228 desc_size = sizeof(struct dma_desc); 1229 } 1230 1231 /* Display RX ring */ 1232 stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true, 1233 rx_q->dma_rx_phy, desc_size); 1234 } 1235 } 1236 1237 static void stmmac_display_tx_rings(struct stmmac_priv *priv) 1238 { 1239 u32 tx_cnt = priv->plat->tx_queues_to_use; 1240 unsigned int desc_size; 1241 void *head_tx; 1242 u32 queue; 1243 1244 /* Display TX rings */ 1245 for (queue = 0; queue < tx_cnt; queue++) { 1246 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1247 1248 pr_info("\tTX Queue %d rings\n", queue); 1249 1250 if (priv->extend_desc) { 1251 head_tx = (void *)tx_q->dma_etx; 1252 desc_size = sizeof(struct dma_extended_desc); 1253 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1254 head_tx = (void *)tx_q->dma_entx; 1255 desc_size = sizeof(struct dma_edesc); 1256 } else { 1257 head_tx = (void *)tx_q->dma_tx; 1258 desc_size = sizeof(struct dma_desc); 1259 } 1260 1261 stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false, 1262 tx_q->dma_tx_phy, desc_size); 1263 } 1264 } 1265 1266 static void stmmac_display_rings(struct stmmac_priv *priv) 1267 { 1268 /* Display RX ring */ 1269 stmmac_display_rx_rings(priv); 1270 1271 /* Display TX ring */ 1272 stmmac_display_tx_rings(priv); 1273 } 1274 1275 static int stmmac_set_bfsize(int mtu, int bufsize) 1276 { 1277 int ret = bufsize; 1278 1279 if (mtu >= BUF_SIZE_8KiB) 1280 ret = BUF_SIZE_16KiB; 1281 else if (mtu >= BUF_SIZE_4KiB) 1282 ret = BUF_SIZE_8KiB; 1283 else if (mtu >= BUF_SIZE_2KiB) 1284 ret = BUF_SIZE_4KiB; 1285 else if (mtu > DEFAULT_BUFSIZE) 1286 ret = BUF_SIZE_2KiB; 1287 else 1288 ret = DEFAULT_BUFSIZE; 1289 1290 return ret; 1291 } 1292 1293 /** 1294 * stmmac_clear_rx_descriptors - clear RX descriptors 1295 * @priv: driver private structure 1296 * @queue: RX queue index 1297 * Description: this function is called to clear the RX descriptors 1298 * in case of both basic and extended descriptors are used. 1299 */ 1300 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) 1301 { 1302 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1303 int i; 1304 1305 /* Clear the RX descriptors */ 1306 for (i = 0; i < priv->dma_rx_size; i++) 1307 if (priv->extend_desc) 1308 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1309 priv->use_riwt, priv->mode, 1310 (i == priv->dma_rx_size - 1), 1311 priv->dma_buf_sz); 1312 else 1313 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1314 priv->use_riwt, priv->mode, 1315 (i == priv->dma_rx_size - 1), 1316 priv->dma_buf_sz); 1317 } 1318 1319 /** 1320 * stmmac_clear_tx_descriptors - clear tx descriptors 1321 * @priv: driver private structure 1322 * @queue: TX queue index. 1323 * Description: this function is called to clear the TX descriptors 1324 * in case of both basic and extended descriptors are used. 1325 */ 1326 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) 1327 { 1328 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1329 int i; 1330 1331 /* Clear the TX descriptors */ 1332 for (i = 0; i < priv->dma_tx_size; i++) { 1333 int last = (i == (priv->dma_tx_size - 1)); 1334 struct dma_desc *p; 1335 1336 if (priv->extend_desc) 1337 p = &tx_q->dma_etx[i].basic; 1338 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1339 p = &tx_q->dma_entx[i].basic; 1340 else 1341 p = &tx_q->dma_tx[i]; 1342 1343 stmmac_init_tx_desc(priv, p, priv->mode, last); 1344 } 1345 } 1346 1347 /** 1348 * stmmac_clear_descriptors - clear descriptors 1349 * @priv: driver private structure 1350 * Description: this function is called to clear the TX and RX descriptors 1351 * in case of both basic and extended descriptors are used. 1352 */ 1353 static void stmmac_clear_descriptors(struct stmmac_priv *priv) 1354 { 1355 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1356 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1357 u32 queue; 1358 1359 /* Clear the RX descriptors */ 1360 for (queue = 0; queue < rx_queue_cnt; queue++) 1361 stmmac_clear_rx_descriptors(priv, queue); 1362 1363 /* Clear the TX descriptors */ 1364 for (queue = 0; queue < tx_queue_cnt; queue++) 1365 stmmac_clear_tx_descriptors(priv, queue); 1366 } 1367 1368 /** 1369 * stmmac_init_rx_buffers - init the RX descriptor buffer. 1370 * @priv: driver private structure 1371 * @p: descriptor pointer 1372 * @i: descriptor index 1373 * @flags: gfp flag 1374 * @queue: RX queue index 1375 * Description: this function is called to allocate a receive buffer, perform 1376 * the DMA mapping and init the descriptor. 1377 */ 1378 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 1379 int i, gfp_t flags, u32 queue) 1380 { 1381 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1382 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1383 1384 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 1385 if (!buf->page) 1386 return -ENOMEM; 1387 1388 if (priv->sph) { 1389 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 1390 if (!buf->sec_page) 1391 return -ENOMEM; 1392 1393 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1394 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 1395 } else { 1396 buf->sec_page = NULL; 1397 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 1398 } 1399 1400 buf->addr = page_pool_get_dma_addr(buf->page); 1401 stmmac_set_desc_addr(priv, p, buf->addr); 1402 if (priv->dma_buf_sz == BUF_SIZE_16KiB) 1403 stmmac_init_desc3(priv, p); 1404 1405 return 0; 1406 } 1407 1408 /** 1409 * stmmac_free_rx_buffer - free RX dma buffers 1410 * @priv: private structure 1411 * @queue: RX queue index 1412 * @i: buffer index. 1413 */ 1414 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) 1415 { 1416 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1417 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1418 1419 if (buf->page) 1420 page_pool_put_full_page(rx_q->page_pool, buf->page, false); 1421 buf->page = NULL; 1422 1423 if (buf->sec_page) 1424 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); 1425 buf->sec_page = NULL; 1426 } 1427 1428 /** 1429 * stmmac_free_tx_buffer - free RX dma buffers 1430 * @priv: private structure 1431 * @queue: RX queue index 1432 * @i: buffer index. 1433 */ 1434 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) 1435 { 1436 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1437 1438 if (tx_q->tx_skbuff_dma[i].buf) { 1439 if (tx_q->tx_skbuff_dma[i].map_as_page) 1440 dma_unmap_page(priv->device, 1441 tx_q->tx_skbuff_dma[i].buf, 1442 tx_q->tx_skbuff_dma[i].len, 1443 DMA_TO_DEVICE); 1444 else 1445 dma_unmap_single(priv->device, 1446 tx_q->tx_skbuff_dma[i].buf, 1447 tx_q->tx_skbuff_dma[i].len, 1448 DMA_TO_DEVICE); 1449 } 1450 1451 if (tx_q->tx_skbuff[i]) { 1452 dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1453 tx_q->tx_skbuff[i] = NULL; 1454 tx_q->tx_skbuff_dma[i].buf = 0; 1455 tx_q->tx_skbuff_dma[i].map_as_page = false; 1456 } 1457 } 1458 1459 /** 1460 * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer. 1461 * @priv: driver private structure 1462 * Description: this function is called to re-allocate a receive buffer, perform 1463 * the DMA mapping and init the descriptor. 1464 */ 1465 static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv) 1466 { 1467 u32 rx_count = priv->plat->rx_queues_to_use; 1468 u32 queue; 1469 int i; 1470 1471 for (queue = 0; queue < rx_count; queue++) { 1472 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1473 1474 for (i = 0; i < priv->dma_rx_size; i++) { 1475 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1476 1477 if (buf->page) { 1478 page_pool_recycle_direct(rx_q->page_pool, buf->page); 1479 buf->page = NULL; 1480 } 1481 1482 if (priv->sph && buf->sec_page) { 1483 page_pool_recycle_direct(rx_q->page_pool, buf->sec_page); 1484 buf->sec_page = NULL; 1485 } 1486 } 1487 } 1488 1489 for (queue = 0; queue < rx_count; queue++) { 1490 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1491 1492 for (i = 0; i < priv->dma_rx_size; i++) { 1493 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1494 struct dma_desc *p; 1495 1496 if (priv->extend_desc) 1497 p = &((rx_q->dma_erx + i)->basic); 1498 else 1499 p = rx_q->dma_rx + i; 1500 1501 if (!buf->page) { 1502 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 1503 if (!buf->page) 1504 goto err_reinit_rx_buffers; 1505 1506 buf->addr = page_pool_get_dma_addr(buf->page); 1507 } 1508 1509 if (priv->sph && !buf->sec_page) { 1510 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 1511 if (!buf->sec_page) 1512 goto err_reinit_rx_buffers; 1513 1514 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1515 } 1516 1517 stmmac_set_desc_addr(priv, p, buf->addr); 1518 if (priv->sph) 1519 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 1520 else 1521 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 1522 if (priv->dma_buf_sz == BUF_SIZE_16KiB) 1523 stmmac_init_desc3(priv, p); 1524 } 1525 } 1526 1527 return; 1528 1529 err_reinit_rx_buffers: 1530 do { 1531 while (--i >= 0) 1532 stmmac_free_rx_buffer(priv, queue, i); 1533 1534 if (queue == 0) 1535 break; 1536 1537 i = priv->dma_rx_size; 1538 } while (queue-- > 0); 1539 } 1540 1541 /** 1542 * init_dma_rx_desc_rings - init the RX descriptor rings 1543 * @dev: net device structure 1544 * @flags: gfp flag. 1545 * Description: this function initializes the DMA RX descriptors 1546 * and allocates the socket buffers. It supports the chained and ring 1547 * modes. 1548 */ 1549 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) 1550 { 1551 struct stmmac_priv *priv = netdev_priv(dev); 1552 u32 rx_count = priv->plat->rx_queues_to_use; 1553 int ret = -ENOMEM; 1554 int queue; 1555 int i; 1556 1557 /* RX INITIALIZATION */ 1558 netif_dbg(priv, probe, priv->dev, 1559 "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1560 1561 for (queue = 0; queue < rx_count; queue++) { 1562 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1563 1564 netif_dbg(priv, probe, priv->dev, 1565 "(%s) dma_rx_phy=0x%08x\n", __func__, 1566 (u32)rx_q->dma_rx_phy); 1567 1568 stmmac_clear_rx_descriptors(priv, queue); 1569 1570 for (i = 0; i < priv->dma_rx_size; i++) { 1571 struct dma_desc *p; 1572 1573 if (priv->extend_desc) 1574 p = &((rx_q->dma_erx + i)->basic); 1575 else 1576 p = rx_q->dma_rx + i; 1577 1578 ret = stmmac_init_rx_buffers(priv, p, i, flags, 1579 queue); 1580 if (ret) 1581 goto err_init_rx_buffers; 1582 } 1583 1584 rx_q->cur_rx = 0; 1585 rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size); 1586 1587 /* Setup the chained descriptor addresses */ 1588 if (priv->mode == STMMAC_CHAIN_MODE) { 1589 if (priv->extend_desc) 1590 stmmac_mode_init(priv, rx_q->dma_erx, 1591 rx_q->dma_rx_phy, 1592 priv->dma_rx_size, 1); 1593 else 1594 stmmac_mode_init(priv, rx_q->dma_rx, 1595 rx_q->dma_rx_phy, 1596 priv->dma_rx_size, 0); 1597 } 1598 } 1599 1600 return 0; 1601 1602 err_init_rx_buffers: 1603 while (queue >= 0) { 1604 while (--i >= 0) 1605 stmmac_free_rx_buffer(priv, queue, i); 1606 1607 if (queue == 0) 1608 break; 1609 1610 i = priv->dma_rx_size; 1611 queue--; 1612 } 1613 1614 return ret; 1615 } 1616 1617 /** 1618 * init_dma_tx_desc_rings - init the TX descriptor rings 1619 * @dev: net device structure. 1620 * Description: this function initializes the DMA TX descriptors 1621 * and allocates the socket buffers. It supports the chained and ring 1622 * modes. 1623 */ 1624 static int init_dma_tx_desc_rings(struct net_device *dev) 1625 { 1626 struct stmmac_priv *priv = netdev_priv(dev); 1627 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1628 u32 queue; 1629 int i; 1630 1631 for (queue = 0; queue < tx_queue_cnt; queue++) { 1632 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1633 1634 netif_dbg(priv, probe, priv->dev, 1635 "(%s) dma_tx_phy=0x%08x\n", __func__, 1636 (u32)tx_q->dma_tx_phy); 1637 1638 /* Setup the chained descriptor addresses */ 1639 if (priv->mode == STMMAC_CHAIN_MODE) { 1640 if (priv->extend_desc) 1641 stmmac_mode_init(priv, tx_q->dma_etx, 1642 tx_q->dma_tx_phy, 1643 priv->dma_tx_size, 1); 1644 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) 1645 stmmac_mode_init(priv, tx_q->dma_tx, 1646 tx_q->dma_tx_phy, 1647 priv->dma_tx_size, 0); 1648 } 1649 1650 for (i = 0; i < priv->dma_tx_size; i++) { 1651 struct dma_desc *p; 1652 if (priv->extend_desc) 1653 p = &((tx_q->dma_etx + i)->basic); 1654 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1655 p = &((tx_q->dma_entx + i)->basic); 1656 else 1657 p = tx_q->dma_tx + i; 1658 1659 stmmac_clear_desc(priv, p); 1660 1661 tx_q->tx_skbuff_dma[i].buf = 0; 1662 tx_q->tx_skbuff_dma[i].map_as_page = false; 1663 tx_q->tx_skbuff_dma[i].len = 0; 1664 tx_q->tx_skbuff_dma[i].last_segment = false; 1665 tx_q->tx_skbuff[i] = NULL; 1666 } 1667 1668 tx_q->dirty_tx = 0; 1669 tx_q->cur_tx = 0; 1670 tx_q->mss = 0; 1671 1672 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 1673 } 1674 1675 return 0; 1676 } 1677 1678 /** 1679 * init_dma_desc_rings - init the RX/TX descriptor rings 1680 * @dev: net device structure 1681 * @flags: gfp flag. 1682 * Description: this function initializes the DMA RX/TX descriptors 1683 * and allocates the socket buffers. It supports the chained and ring 1684 * modes. 1685 */ 1686 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) 1687 { 1688 struct stmmac_priv *priv = netdev_priv(dev); 1689 int ret; 1690 1691 ret = init_dma_rx_desc_rings(dev, flags); 1692 if (ret) 1693 return ret; 1694 1695 ret = init_dma_tx_desc_rings(dev); 1696 1697 stmmac_clear_descriptors(priv); 1698 1699 if (netif_msg_hw(priv)) 1700 stmmac_display_rings(priv); 1701 1702 return ret; 1703 } 1704 1705 /** 1706 * dma_free_rx_skbufs - free RX dma buffers 1707 * @priv: private structure 1708 * @queue: RX queue index 1709 */ 1710 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) 1711 { 1712 int i; 1713 1714 for (i = 0; i < priv->dma_rx_size; i++) 1715 stmmac_free_rx_buffer(priv, queue, i); 1716 } 1717 1718 /** 1719 * dma_free_tx_skbufs - free TX dma buffers 1720 * @priv: private structure 1721 * @queue: TX queue index 1722 */ 1723 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) 1724 { 1725 int i; 1726 1727 for (i = 0; i < priv->dma_tx_size; i++) 1728 stmmac_free_tx_buffer(priv, queue, i); 1729 } 1730 1731 /** 1732 * stmmac_free_tx_skbufs - free TX skb buffers 1733 * @priv: private structure 1734 */ 1735 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) 1736 { 1737 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1738 u32 queue; 1739 1740 for (queue = 0; queue < tx_queue_cnt; queue++) 1741 dma_free_tx_skbufs(priv, queue); 1742 } 1743 1744 /** 1745 * free_dma_rx_desc_resources - free RX dma desc resources 1746 * @priv: private structure 1747 */ 1748 static void free_dma_rx_desc_resources(struct stmmac_priv *priv) 1749 { 1750 u32 rx_count = priv->plat->rx_queues_to_use; 1751 u32 queue; 1752 1753 /* Free RX queue resources */ 1754 for (queue = 0; queue < rx_count; queue++) { 1755 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1756 1757 /* Release the DMA RX socket buffers */ 1758 dma_free_rx_skbufs(priv, queue); 1759 1760 /* Free DMA regions of consistent memory previously allocated */ 1761 if (!priv->extend_desc) 1762 dma_free_coherent(priv->device, priv->dma_rx_size * 1763 sizeof(struct dma_desc), 1764 rx_q->dma_rx, rx_q->dma_rx_phy); 1765 else 1766 dma_free_coherent(priv->device, priv->dma_rx_size * 1767 sizeof(struct dma_extended_desc), 1768 rx_q->dma_erx, rx_q->dma_rx_phy); 1769 1770 kfree(rx_q->buf_pool); 1771 if (rx_q->page_pool) 1772 page_pool_destroy(rx_q->page_pool); 1773 } 1774 } 1775 1776 /** 1777 * free_dma_tx_desc_resources - free TX dma desc resources 1778 * @priv: private structure 1779 */ 1780 static void free_dma_tx_desc_resources(struct stmmac_priv *priv) 1781 { 1782 u32 tx_count = priv->plat->tx_queues_to_use; 1783 u32 queue; 1784 1785 /* Free TX queue resources */ 1786 for (queue = 0; queue < tx_count; queue++) { 1787 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1788 size_t size; 1789 void *addr; 1790 1791 /* Release the DMA TX socket buffers */ 1792 dma_free_tx_skbufs(priv, queue); 1793 1794 if (priv->extend_desc) { 1795 size = sizeof(struct dma_extended_desc); 1796 addr = tx_q->dma_etx; 1797 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1798 size = sizeof(struct dma_edesc); 1799 addr = tx_q->dma_entx; 1800 } else { 1801 size = sizeof(struct dma_desc); 1802 addr = tx_q->dma_tx; 1803 } 1804 1805 size *= priv->dma_tx_size; 1806 1807 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); 1808 1809 kfree(tx_q->tx_skbuff_dma); 1810 kfree(tx_q->tx_skbuff); 1811 } 1812 } 1813 1814 /** 1815 * alloc_dma_rx_desc_resources - alloc RX resources. 1816 * @priv: private structure 1817 * Description: according to which descriptor can be used (extend or basic) 1818 * this function allocates the resources for TX and RX paths. In case of 1819 * reception, for example, it pre-allocated the RX socket buffer in order to 1820 * allow zero-copy mechanism. 1821 */ 1822 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) 1823 { 1824 u32 rx_count = priv->plat->rx_queues_to_use; 1825 int ret = -ENOMEM; 1826 u32 queue; 1827 1828 /* RX queues buffers and DMA */ 1829 for (queue = 0; queue < rx_count; queue++) { 1830 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1831 struct page_pool_params pp_params = { 0 }; 1832 unsigned int num_pages; 1833 1834 rx_q->queue_index = queue; 1835 rx_q->priv_data = priv; 1836 1837 pp_params.flags = PP_FLAG_DMA_MAP; 1838 pp_params.pool_size = priv->dma_rx_size; 1839 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); 1840 pp_params.order = ilog2(num_pages); 1841 pp_params.nid = dev_to_node(priv->device); 1842 pp_params.dev = priv->device; 1843 pp_params.dma_dir = DMA_FROM_DEVICE; 1844 1845 rx_q->page_pool = page_pool_create(&pp_params); 1846 if (IS_ERR(rx_q->page_pool)) { 1847 ret = PTR_ERR(rx_q->page_pool); 1848 rx_q->page_pool = NULL; 1849 goto err_dma; 1850 } 1851 1852 rx_q->buf_pool = kcalloc(priv->dma_rx_size, 1853 sizeof(*rx_q->buf_pool), 1854 GFP_KERNEL); 1855 if (!rx_q->buf_pool) 1856 goto err_dma; 1857 1858 if (priv->extend_desc) { 1859 rx_q->dma_erx = dma_alloc_coherent(priv->device, 1860 priv->dma_rx_size * 1861 sizeof(struct dma_extended_desc), 1862 &rx_q->dma_rx_phy, 1863 GFP_KERNEL); 1864 if (!rx_q->dma_erx) 1865 goto err_dma; 1866 1867 } else { 1868 rx_q->dma_rx = dma_alloc_coherent(priv->device, 1869 priv->dma_rx_size * 1870 sizeof(struct dma_desc), 1871 &rx_q->dma_rx_phy, 1872 GFP_KERNEL); 1873 if (!rx_q->dma_rx) 1874 goto err_dma; 1875 } 1876 } 1877 1878 return 0; 1879 1880 err_dma: 1881 free_dma_rx_desc_resources(priv); 1882 1883 return ret; 1884 } 1885 1886 /** 1887 * alloc_dma_tx_desc_resources - alloc TX resources. 1888 * @priv: private structure 1889 * Description: according to which descriptor can be used (extend or basic) 1890 * this function allocates the resources for TX and RX paths. In case of 1891 * reception, for example, it pre-allocated the RX socket buffer in order to 1892 * allow zero-copy mechanism. 1893 */ 1894 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) 1895 { 1896 u32 tx_count = priv->plat->tx_queues_to_use; 1897 int ret = -ENOMEM; 1898 u32 queue; 1899 1900 /* TX queues buffers and DMA */ 1901 for (queue = 0; queue < tx_count; queue++) { 1902 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1903 size_t size; 1904 void *addr; 1905 1906 tx_q->queue_index = queue; 1907 tx_q->priv_data = priv; 1908 1909 tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, 1910 sizeof(*tx_q->tx_skbuff_dma), 1911 GFP_KERNEL); 1912 if (!tx_q->tx_skbuff_dma) 1913 goto err_dma; 1914 1915 tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, 1916 sizeof(struct sk_buff *), 1917 GFP_KERNEL); 1918 if (!tx_q->tx_skbuff) 1919 goto err_dma; 1920 1921 if (priv->extend_desc) 1922 size = sizeof(struct dma_extended_desc); 1923 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1924 size = sizeof(struct dma_edesc); 1925 else 1926 size = sizeof(struct dma_desc); 1927 1928 size *= priv->dma_tx_size; 1929 1930 addr = dma_alloc_coherent(priv->device, size, 1931 &tx_q->dma_tx_phy, GFP_KERNEL); 1932 if (!addr) 1933 goto err_dma; 1934 1935 if (priv->extend_desc) 1936 tx_q->dma_etx = addr; 1937 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1938 tx_q->dma_entx = addr; 1939 else 1940 tx_q->dma_tx = addr; 1941 } 1942 1943 return 0; 1944 1945 err_dma: 1946 free_dma_tx_desc_resources(priv); 1947 return ret; 1948 } 1949 1950 /** 1951 * alloc_dma_desc_resources - alloc TX/RX resources. 1952 * @priv: private structure 1953 * Description: according to which descriptor can be used (extend or basic) 1954 * this function allocates the resources for TX and RX paths. In case of 1955 * reception, for example, it pre-allocated the RX socket buffer in order to 1956 * allow zero-copy mechanism. 1957 */ 1958 static int alloc_dma_desc_resources(struct stmmac_priv *priv) 1959 { 1960 /* RX Allocation */ 1961 int ret = alloc_dma_rx_desc_resources(priv); 1962 1963 if (ret) 1964 return ret; 1965 1966 ret = alloc_dma_tx_desc_resources(priv); 1967 1968 return ret; 1969 } 1970 1971 /** 1972 * free_dma_desc_resources - free dma desc resources 1973 * @priv: private structure 1974 */ 1975 static void free_dma_desc_resources(struct stmmac_priv *priv) 1976 { 1977 /* Release the DMA RX socket buffers */ 1978 free_dma_rx_desc_resources(priv); 1979 1980 /* Release the DMA TX socket buffers */ 1981 free_dma_tx_desc_resources(priv); 1982 } 1983 1984 /** 1985 * stmmac_mac_enable_rx_queues - Enable MAC rx queues 1986 * @priv: driver private structure 1987 * Description: It is used for enabling the rx queues in the MAC 1988 */ 1989 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 1990 { 1991 u32 rx_queues_count = priv->plat->rx_queues_to_use; 1992 int queue; 1993 u8 mode; 1994 1995 for (queue = 0; queue < rx_queues_count; queue++) { 1996 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 1997 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 1998 } 1999 } 2000 2001 /** 2002 * stmmac_start_rx_dma - start RX DMA channel 2003 * @priv: driver private structure 2004 * @chan: RX channel index 2005 * Description: 2006 * This starts a RX DMA channel 2007 */ 2008 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 2009 { 2010 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 2011 stmmac_start_rx(priv, priv->ioaddr, chan); 2012 } 2013 2014 /** 2015 * stmmac_start_tx_dma - start TX DMA channel 2016 * @priv: driver private structure 2017 * @chan: TX channel index 2018 * Description: 2019 * This starts a TX DMA channel 2020 */ 2021 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 2022 { 2023 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 2024 stmmac_start_tx(priv, priv->ioaddr, chan); 2025 } 2026 2027 /** 2028 * stmmac_stop_rx_dma - stop RX DMA channel 2029 * @priv: driver private structure 2030 * @chan: RX channel index 2031 * Description: 2032 * This stops a RX DMA channel 2033 */ 2034 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 2035 { 2036 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 2037 stmmac_stop_rx(priv, priv->ioaddr, chan); 2038 } 2039 2040 /** 2041 * stmmac_stop_tx_dma - stop TX DMA channel 2042 * @priv: driver private structure 2043 * @chan: TX channel index 2044 * Description: 2045 * This stops a TX DMA channel 2046 */ 2047 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 2048 { 2049 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 2050 stmmac_stop_tx(priv, priv->ioaddr, chan); 2051 } 2052 2053 /** 2054 * stmmac_start_all_dma - start all RX and TX DMA channels 2055 * @priv: driver private structure 2056 * Description: 2057 * This starts all the RX and TX DMA channels 2058 */ 2059 static void stmmac_start_all_dma(struct stmmac_priv *priv) 2060 { 2061 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2062 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2063 u32 chan = 0; 2064 2065 for (chan = 0; chan < rx_channels_count; chan++) 2066 stmmac_start_rx_dma(priv, chan); 2067 2068 for (chan = 0; chan < tx_channels_count; chan++) 2069 stmmac_start_tx_dma(priv, chan); 2070 } 2071 2072 /** 2073 * stmmac_stop_all_dma - stop all RX and TX DMA channels 2074 * @priv: driver private structure 2075 * Description: 2076 * This stops the RX and TX DMA channels 2077 */ 2078 static void stmmac_stop_all_dma(struct stmmac_priv *priv) 2079 { 2080 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2081 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2082 u32 chan = 0; 2083 2084 for (chan = 0; chan < rx_channels_count; chan++) 2085 stmmac_stop_rx_dma(priv, chan); 2086 2087 for (chan = 0; chan < tx_channels_count; chan++) 2088 stmmac_stop_tx_dma(priv, chan); 2089 } 2090 2091 /** 2092 * stmmac_dma_operation_mode - HW DMA operation mode 2093 * @priv: driver private structure 2094 * Description: it is used for configuring the DMA operation mode register in 2095 * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 2096 */ 2097 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 2098 { 2099 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2100 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2101 int rxfifosz = priv->plat->rx_fifo_size; 2102 int txfifosz = priv->plat->tx_fifo_size; 2103 u32 txmode = 0; 2104 u32 rxmode = 0; 2105 u32 chan = 0; 2106 u8 qmode = 0; 2107 2108 if (rxfifosz == 0) 2109 rxfifosz = priv->dma_cap.rx_fifo_size; 2110 if (txfifosz == 0) 2111 txfifosz = priv->dma_cap.tx_fifo_size; 2112 2113 /* Adjust for real per queue fifo size */ 2114 rxfifosz /= rx_channels_count; 2115 txfifosz /= tx_channels_count; 2116 2117 if (priv->plat->force_thresh_dma_mode) { 2118 txmode = tc; 2119 rxmode = tc; 2120 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 2121 /* 2122 * In case of GMAC, SF mode can be enabled 2123 * to perform the TX COE in HW. This depends on: 2124 * 1) TX COE if actually supported 2125 * 2) There is no bugged Jumbo frame support 2126 * that needs to not insert csum in the TDES. 2127 */ 2128 txmode = SF_DMA_MODE; 2129 rxmode = SF_DMA_MODE; 2130 priv->xstats.threshold = SF_DMA_MODE; 2131 } else { 2132 txmode = tc; 2133 rxmode = SF_DMA_MODE; 2134 } 2135 2136 /* configure all channels */ 2137 for (chan = 0; chan < rx_channels_count; chan++) { 2138 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2139 2140 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 2141 rxfifosz, qmode); 2142 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, 2143 chan); 2144 } 2145 2146 for (chan = 0; chan < tx_channels_count; chan++) { 2147 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2148 2149 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 2150 txfifosz, qmode); 2151 } 2152 } 2153 2154 /** 2155 * stmmac_tx_clean - to manage the transmission completion 2156 * @priv: driver private structure 2157 * @budget: napi budget limiting this functions packet handling 2158 * @queue: TX queue index 2159 * Description: it reclaims the transmit resources after transmission completes. 2160 */ 2161 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 2162 { 2163 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2164 unsigned int bytes_compl = 0, pkts_compl = 0; 2165 unsigned int entry, count = 0; 2166 2167 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 2168 2169 priv->xstats.tx_clean++; 2170 2171 entry = tx_q->dirty_tx; 2172 while ((entry != tx_q->cur_tx) && (count < budget)) { 2173 struct sk_buff *skb = tx_q->tx_skbuff[entry]; 2174 struct dma_desc *p; 2175 int status; 2176 2177 if (priv->extend_desc) 2178 p = (struct dma_desc *)(tx_q->dma_etx + entry); 2179 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2180 p = &tx_q->dma_entx[entry].basic; 2181 else 2182 p = tx_q->dma_tx + entry; 2183 2184 status = stmmac_tx_status(priv, &priv->dev->stats, 2185 &priv->xstats, p, priv->ioaddr); 2186 /* Check if the descriptor is owned by the DMA */ 2187 if (unlikely(status & tx_dma_own)) 2188 break; 2189 2190 count++; 2191 2192 /* Make sure descriptor fields are read after reading 2193 * the own bit. 2194 */ 2195 dma_rmb(); 2196 2197 /* Just consider the last segment and ...*/ 2198 if (likely(!(status & tx_not_ls))) { 2199 /* ... verify the status error condition */ 2200 if (unlikely(status & tx_err)) { 2201 priv->dev->stats.tx_errors++; 2202 } else { 2203 priv->dev->stats.tx_packets++; 2204 priv->xstats.tx_pkt_n++; 2205 } 2206 stmmac_get_tx_hwtstamp(priv, p, skb); 2207 } 2208 2209 if (likely(tx_q->tx_skbuff_dma[entry].buf)) { 2210 if (tx_q->tx_skbuff_dma[entry].map_as_page) 2211 dma_unmap_page(priv->device, 2212 tx_q->tx_skbuff_dma[entry].buf, 2213 tx_q->tx_skbuff_dma[entry].len, 2214 DMA_TO_DEVICE); 2215 else 2216 dma_unmap_single(priv->device, 2217 tx_q->tx_skbuff_dma[entry].buf, 2218 tx_q->tx_skbuff_dma[entry].len, 2219 DMA_TO_DEVICE); 2220 tx_q->tx_skbuff_dma[entry].buf = 0; 2221 tx_q->tx_skbuff_dma[entry].len = 0; 2222 tx_q->tx_skbuff_dma[entry].map_as_page = false; 2223 } 2224 2225 stmmac_clean_desc3(priv, tx_q, p); 2226 2227 tx_q->tx_skbuff_dma[entry].last_segment = false; 2228 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2229 2230 if (likely(skb != NULL)) { 2231 pkts_compl++; 2232 bytes_compl += skb->len; 2233 dev_consume_skb_any(skb); 2234 tx_q->tx_skbuff[entry] = NULL; 2235 } 2236 2237 stmmac_release_tx_desc(priv, p, priv->mode); 2238 2239 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 2240 } 2241 tx_q->dirty_tx = entry; 2242 2243 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 2244 pkts_compl, bytes_compl); 2245 2246 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 2247 queue))) && 2248 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { 2249 2250 netif_dbg(priv, tx_done, priv->dev, 2251 "%s: restart transmit\n", __func__); 2252 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 2253 } 2254 2255 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && 2256 priv->eee_sw_timer_en) { 2257 stmmac_enable_eee_mode(priv); 2258 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 2259 } 2260 2261 /* We still have pending packets, let's call for a new scheduling */ 2262 if (tx_q->dirty_tx != tx_q->cur_tx) 2263 hrtimer_start(&tx_q->txtimer, 2264 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2265 HRTIMER_MODE_REL); 2266 2267 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 2268 2269 return count; 2270 } 2271 2272 /** 2273 * stmmac_tx_err - to manage the tx error 2274 * @priv: driver private structure 2275 * @chan: channel index 2276 * Description: it cleans the descriptors and restarts the transmission 2277 * in case of transmission errors. 2278 */ 2279 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 2280 { 2281 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2282 2283 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 2284 2285 stmmac_stop_tx_dma(priv, chan); 2286 dma_free_tx_skbufs(priv, chan); 2287 stmmac_clear_tx_descriptors(priv, chan); 2288 tx_q->dirty_tx = 0; 2289 tx_q->cur_tx = 0; 2290 tx_q->mss = 0; 2291 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); 2292 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2293 tx_q->dma_tx_phy, chan); 2294 stmmac_start_tx_dma(priv, chan); 2295 2296 priv->dev->stats.tx_errors++; 2297 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 2298 } 2299 2300 /** 2301 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 2302 * @priv: driver private structure 2303 * @txmode: TX operating mode 2304 * @rxmode: RX operating mode 2305 * @chan: channel index 2306 * Description: it is used for configuring of the DMA operation mode in 2307 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 2308 * mode. 2309 */ 2310 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 2311 u32 rxmode, u32 chan) 2312 { 2313 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2314 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2315 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2316 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2317 int rxfifosz = priv->plat->rx_fifo_size; 2318 int txfifosz = priv->plat->tx_fifo_size; 2319 2320 if (rxfifosz == 0) 2321 rxfifosz = priv->dma_cap.rx_fifo_size; 2322 if (txfifosz == 0) 2323 txfifosz = priv->dma_cap.tx_fifo_size; 2324 2325 /* Adjust for real per queue fifo size */ 2326 rxfifosz /= rx_channels_count; 2327 txfifosz /= tx_channels_count; 2328 2329 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 2330 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 2331 } 2332 2333 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 2334 { 2335 int ret; 2336 2337 ret = stmmac_safety_feat_irq_status(priv, priv->dev, 2338 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2339 if (ret && (ret != -EINVAL)) { 2340 stmmac_global_err(priv); 2341 return true; 2342 } 2343 2344 return false; 2345 } 2346 2347 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) 2348 { 2349 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 2350 &priv->xstats, chan, dir); 2351 struct stmmac_channel *ch = &priv->channel[chan]; 2352 unsigned long flags; 2353 2354 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 2355 if (napi_schedule_prep(&ch->rx_napi)) { 2356 spin_lock_irqsave(&ch->lock, flags); 2357 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 2358 spin_unlock_irqrestore(&ch->lock, flags); 2359 __napi_schedule(&ch->rx_napi); 2360 } 2361 } 2362 2363 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2364 if (napi_schedule_prep(&ch->tx_napi)) { 2365 spin_lock_irqsave(&ch->lock, flags); 2366 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 2367 spin_unlock_irqrestore(&ch->lock, flags); 2368 __napi_schedule(&ch->tx_napi); 2369 } 2370 } 2371 2372 return status; 2373 } 2374 2375 /** 2376 * stmmac_dma_interrupt - DMA ISR 2377 * @priv: driver private structure 2378 * Description: this is the DMA ISR. It is called by the main ISR. 2379 * It calls the dwmac dma routine and schedule poll method in case of some 2380 * work can be done. 2381 */ 2382 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 2383 { 2384 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2385 u32 rx_channel_count = priv->plat->rx_queues_to_use; 2386 u32 channels_to_check = tx_channel_count > rx_channel_count ? 2387 tx_channel_count : rx_channel_count; 2388 u32 chan; 2389 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 2390 2391 /* Make sure we never check beyond our status buffer. */ 2392 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 2393 channels_to_check = ARRAY_SIZE(status); 2394 2395 for (chan = 0; chan < channels_to_check; chan++) 2396 status[chan] = stmmac_napi_check(priv, chan, 2397 DMA_DIR_RXTX); 2398 2399 for (chan = 0; chan < tx_channel_count; chan++) { 2400 if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 2401 /* Try to bump up the dma threshold on this failure */ 2402 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 2403 (tc <= 256)) { 2404 tc += 64; 2405 if (priv->plat->force_thresh_dma_mode) 2406 stmmac_set_dma_operation_mode(priv, 2407 tc, 2408 tc, 2409 chan); 2410 else 2411 stmmac_set_dma_operation_mode(priv, 2412 tc, 2413 SF_DMA_MODE, 2414 chan); 2415 priv->xstats.threshold = tc; 2416 } 2417 } else if (unlikely(status[chan] == tx_hard_error)) { 2418 stmmac_tx_err(priv, chan); 2419 } 2420 } 2421 } 2422 2423 /** 2424 * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 2425 * @priv: driver private structure 2426 * Description: this masks the MMC irq, in fact, the counters are managed in SW. 2427 */ 2428 static void stmmac_mmc_setup(struct stmmac_priv *priv) 2429 { 2430 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 2431 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 2432 2433 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); 2434 2435 if (priv->dma_cap.rmon) { 2436 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); 2437 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 2438 } else 2439 netdev_info(priv->dev, "No MAC Management Counters available\n"); 2440 } 2441 2442 /** 2443 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 2444 * @priv: driver private structure 2445 * Description: 2446 * new GMAC chip generations have a new register to indicate the 2447 * presence of the optional feature/functions. 2448 * This can be also used to override the value passed through the 2449 * platform and necessary for old MAC10/100 and GMAC chips. 2450 */ 2451 static int stmmac_get_hw_features(struct stmmac_priv *priv) 2452 { 2453 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2454 } 2455 2456 /** 2457 * stmmac_check_ether_addr - check if the MAC addr is valid 2458 * @priv: driver private structure 2459 * Description: 2460 * it is to verify if the MAC address is valid, in case of failures it 2461 * generates a random MAC address 2462 */ 2463 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2464 { 2465 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 2466 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); 2467 if (!is_valid_ether_addr(priv->dev->dev_addr)) 2468 eth_hw_addr_random(priv->dev); 2469 dev_info(priv->device, "device MAC address %pM\n", 2470 priv->dev->dev_addr); 2471 } 2472 } 2473 2474 /** 2475 * stmmac_init_dma_engine - DMA init. 2476 * @priv: driver private structure 2477 * Description: 2478 * It inits the DMA invoking the specific MAC/GMAC callback. 2479 * Some DMA parameters can be passed from the platform; 2480 * in case of these are not passed a default is kept for the MAC or GMAC. 2481 */ 2482 static int stmmac_init_dma_engine(struct stmmac_priv *priv) 2483 { 2484 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2485 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2486 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2487 struct stmmac_rx_queue *rx_q; 2488 struct stmmac_tx_queue *tx_q; 2489 u32 chan = 0; 2490 int atds = 0; 2491 int ret = 0; 2492 2493 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2494 dev_err(priv->device, "Invalid DMA configuration\n"); 2495 return -EINVAL; 2496 } 2497 2498 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2499 atds = 1; 2500 2501 ret = stmmac_reset(priv, priv->ioaddr); 2502 if (ret) { 2503 dev_err(priv->device, "Failed to reset the dma\n"); 2504 return ret; 2505 } 2506 2507 /* DMA Configuration */ 2508 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 2509 2510 if (priv->plat->axi) 2511 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 2512 2513 /* DMA CSR Channel configuration */ 2514 for (chan = 0; chan < dma_csr_ch; chan++) 2515 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2516 2517 /* DMA RX Channel Configuration */ 2518 for (chan = 0; chan < rx_channels_count; chan++) { 2519 rx_q = &priv->rx_queue[chan]; 2520 2521 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2522 rx_q->dma_rx_phy, chan); 2523 2524 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2525 (priv->dma_rx_size * 2526 sizeof(struct dma_desc)); 2527 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2528 rx_q->rx_tail_addr, chan); 2529 } 2530 2531 /* DMA TX Channel Configuration */ 2532 for (chan = 0; chan < tx_channels_count; chan++) { 2533 tx_q = &priv->tx_queue[chan]; 2534 2535 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2536 tx_q->dma_tx_phy, chan); 2537 2538 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2539 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2540 tx_q->tx_tail_addr, chan); 2541 } 2542 2543 return ret; 2544 } 2545 2546 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 2547 { 2548 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2549 2550 hrtimer_start(&tx_q->txtimer, 2551 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2552 HRTIMER_MODE_REL); 2553 } 2554 2555 /** 2556 * stmmac_tx_timer - mitigation sw timer for tx. 2557 * @t: data pointer 2558 * Description: 2559 * This is the timer handler to directly invoke the stmmac_tx_clean. 2560 */ 2561 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) 2562 { 2563 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); 2564 struct stmmac_priv *priv = tx_q->priv_data; 2565 struct stmmac_channel *ch; 2566 2567 ch = &priv->channel[tx_q->queue_index]; 2568 2569 if (likely(napi_schedule_prep(&ch->tx_napi))) { 2570 unsigned long flags; 2571 2572 spin_lock_irqsave(&ch->lock, flags); 2573 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); 2574 spin_unlock_irqrestore(&ch->lock, flags); 2575 __napi_schedule(&ch->tx_napi); 2576 } 2577 2578 return HRTIMER_NORESTART; 2579 } 2580 2581 /** 2582 * stmmac_init_coalesce - init mitigation options. 2583 * @priv: driver private structure 2584 * Description: 2585 * This inits the coalesce parameters: i.e. timer rate, 2586 * timer handler and default threshold used for enabling the 2587 * interrupt on completion bit. 2588 */ 2589 static void stmmac_init_coalesce(struct stmmac_priv *priv) 2590 { 2591 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2592 u32 rx_channel_count = priv->plat->rx_queues_to_use; 2593 u32 chan; 2594 2595 for (chan = 0; chan < tx_channel_count; chan++) { 2596 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2597 2598 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; 2599 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; 2600 2601 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2602 tx_q->txtimer.function = stmmac_tx_timer; 2603 } 2604 2605 for (chan = 0; chan < rx_channel_count; chan++) 2606 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; 2607 } 2608 2609 static void stmmac_set_rings_length(struct stmmac_priv *priv) 2610 { 2611 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2612 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2613 u32 chan; 2614 2615 /* set TX ring length */ 2616 for (chan = 0; chan < tx_channels_count; chan++) 2617 stmmac_set_tx_ring_len(priv, priv->ioaddr, 2618 (priv->dma_tx_size - 1), chan); 2619 2620 /* set RX ring length */ 2621 for (chan = 0; chan < rx_channels_count; chan++) 2622 stmmac_set_rx_ring_len(priv, priv->ioaddr, 2623 (priv->dma_rx_size - 1), chan); 2624 } 2625 2626 /** 2627 * stmmac_set_tx_queue_weight - Set TX queue weight 2628 * @priv: driver private structure 2629 * Description: It is used for setting TX queues weight 2630 */ 2631 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 2632 { 2633 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2634 u32 weight; 2635 u32 queue; 2636 2637 for (queue = 0; queue < tx_queues_count; queue++) { 2638 weight = priv->plat->tx_queues_cfg[queue].weight; 2639 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 2640 } 2641 } 2642 2643 /** 2644 * stmmac_configure_cbs - Configure CBS in TX queue 2645 * @priv: driver private structure 2646 * Description: It is used for configuring CBS in AVB TX queues 2647 */ 2648 static void stmmac_configure_cbs(struct stmmac_priv *priv) 2649 { 2650 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2651 u32 mode_to_use; 2652 u32 queue; 2653 2654 /* queue 0 is reserved for legacy traffic */ 2655 for (queue = 1; queue < tx_queues_count; queue++) { 2656 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 2657 if (mode_to_use == MTL_QUEUE_DCB) 2658 continue; 2659 2660 stmmac_config_cbs(priv, priv->hw, 2661 priv->plat->tx_queues_cfg[queue].send_slope, 2662 priv->plat->tx_queues_cfg[queue].idle_slope, 2663 priv->plat->tx_queues_cfg[queue].high_credit, 2664 priv->plat->tx_queues_cfg[queue].low_credit, 2665 queue); 2666 } 2667 } 2668 2669 /** 2670 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 2671 * @priv: driver private structure 2672 * Description: It is used for mapping RX queues to RX dma channels 2673 */ 2674 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 2675 { 2676 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2677 u32 queue; 2678 u32 chan; 2679 2680 for (queue = 0; queue < rx_queues_count; queue++) { 2681 chan = priv->plat->rx_queues_cfg[queue].chan; 2682 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 2683 } 2684 } 2685 2686 /** 2687 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 2688 * @priv: driver private structure 2689 * Description: It is used for configuring the RX Queue Priority 2690 */ 2691 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 2692 { 2693 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2694 u32 queue; 2695 u32 prio; 2696 2697 for (queue = 0; queue < rx_queues_count; queue++) { 2698 if (!priv->plat->rx_queues_cfg[queue].use_prio) 2699 continue; 2700 2701 prio = priv->plat->rx_queues_cfg[queue].prio; 2702 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 2703 } 2704 } 2705 2706 /** 2707 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 2708 * @priv: driver private structure 2709 * Description: It is used for configuring the TX Queue Priority 2710 */ 2711 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 2712 { 2713 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2714 u32 queue; 2715 u32 prio; 2716 2717 for (queue = 0; queue < tx_queues_count; queue++) { 2718 if (!priv->plat->tx_queues_cfg[queue].use_prio) 2719 continue; 2720 2721 prio = priv->plat->tx_queues_cfg[queue].prio; 2722 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 2723 } 2724 } 2725 2726 /** 2727 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 2728 * @priv: driver private structure 2729 * Description: It is used for configuring the RX queue routing 2730 */ 2731 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 2732 { 2733 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2734 u32 queue; 2735 u8 packet; 2736 2737 for (queue = 0; queue < rx_queues_count; queue++) { 2738 /* no specific packet type routing specified for the queue */ 2739 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 2740 continue; 2741 2742 packet = priv->plat->rx_queues_cfg[queue].pkt_route; 2743 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 2744 } 2745 } 2746 2747 static void stmmac_mac_config_rss(struct stmmac_priv *priv) 2748 { 2749 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { 2750 priv->rss.enable = false; 2751 return; 2752 } 2753 2754 if (priv->dev->features & NETIF_F_RXHASH) 2755 priv->rss.enable = true; 2756 else 2757 priv->rss.enable = false; 2758 2759 stmmac_rss_configure(priv, priv->hw, &priv->rss, 2760 priv->plat->rx_queues_to_use); 2761 } 2762 2763 /** 2764 * stmmac_mtl_configuration - Configure MTL 2765 * @priv: driver private structure 2766 * Description: It is used for configurring MTL 2767 */ 2768 static void stmmac_mtl_configuration(struct stmmac_priv *priv) 2769 { 2770 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2771 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2772 2773 if (tx_queues_count > 1) 2774 stmmac_set_tx_queue_weight(priv); 2775 2776 /* Configure MTL RX algorithms */ 2777 if (rx_queues_count > 1) 2778 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 2779 priv->plat->rx_sched_algorithm); 2780 2781 /* Configure MTL TX algorithms */ 2782 if (tx_queues_count > 1) 2783 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 2784 priv->plat->tx_sched_algorithm); 2785 2786 /* Configure CBS in AVB TX queues */ 2787 if (tx_queues_count > 1) 2788 stmmac_configure_cbs(priv); 2789 2790 /* Map RX MTL to DMA channels */ 2791 stmmac_rx_queue_dma_chan_map(priv); 2792 2793 /* Enable MAC RX Queues */ 2794 stmmac_mac_enable_rx_queues(priv); 2795 2796 /* Set RX priorities */ 2797 if (rx_queues_count > 1) 2798 stmmac_mac_config_rx_queues_prio(priv); 2799 2800 /* Set TX priorities */ 2801 if (tx_queues_count > 1) 2802 stmmac_mac_config_tx_queues_prio(priv); 2803 2804 /* Set RX routing */ 2805 if (rx_queues_count > 1) 2806 stmmac_mac_config_rx_queues_routing(priv); 2807 2808 /* Receive Side Scaling */ 2809 if (rx_queues_count > 1) 2810 stmmac_mac_config_rss(priv); 2811 } 2812 2813 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 2814 { 2815 if (priv->dma_cap.asp) { 2816 netdev_info(priv->dev, "Enabling Safety Features\n"); 2817 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp); 2818 } else { 2819 netdev_info(priv->dev, "No Safety Features support found\n"); 2820 } 2821 } 2822 2823 static int stmmac_fpe_start_wq(struct stmmac_priv *priv) 2824 { 2825 char *name; 2826 2827 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 2828 2829 name = priv->wq_name; 2830 sprintf(name, "%s-fpe", priv->dev->name); 2831 2832 priv->fpe_wq = create_singlethread_workqueue(name); 2833 if (!priv->fpe_wq) { 2834 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); 2835 2836 return -ENOMEM; 2837 } 2838 netdev_info(priv->dev, "FPE workqueue start"); 2839 2840 return 0; 2841 } 2842 2843 /** 2844 * stmmac_hw_setup - setup mac in a usable state. 2845 * @dev : pointer to the device structure. 2846 * @init_ptp: initialize PTP if set 2847 * Description: 2848 * this is the main function to setup the HW in a usable state because the 2849 * dma engine is reset, the core registers are configured (e.g. AXI, 2850 * Checksum features, timers). The DMA is ready to start receiving and 2851 * transmitting. 2852 * Return value: 2853 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2854 * file on failure. 2855 */ 2856 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) 2857 { 2858 struct stmmac_priv *priv = netdev_priv(dev); 2859 u32 rx_cnt = priv->plat->rx_queues_to_use; 2860 u32 tx_cnt = priv->plat->tx_queues_to_use; 2861 u32 chan; 2862 int ret; 2863 2864 /* DMA initialization and SW reset */ 2865 ret = stmmac_init_dma_engine(priv); 2866 if (ret < 0) { 2867 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 2868 __func__); 2869 return ret; 2870 } 2871 2872 /* Copy the MAC addr into the HW */ 2873 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 2874 2875 /* PS and related bits will be programmed according to the speed */ 2876 if (priv->hw->pcs) { 2877 int speed = priv->plat->mac_port_sel_speed; 2878 2879 if ((speed == SPEED_10) || (speed == SPEED_100) || 2880 (speed == SPEED_1000)) { 2881 priv->hw->ps = speed; 2882 } else { 2883 dev_warn(priv->device, "invalid port speed\n"); 2884 priv->hw->ps = 0; 2885 } 2886 } 2887 2888 /* Initialize the MAC Core */ 2889 stmmac_core_init(priv, priv->hw, dev); 2890 2891 /* Initialize MTL*/ 2892 stmmac_mtl_configuration(priv); 2893 2894 /* Initialize Safety Features */ 2895 stmmac_safety_feat_configuration(priv); 2896 2897 ret = stmmac_rx_ipc(priv, priv->hw); 2898 if (!ret) { 2899 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 2900 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 2901 priv->hw->rx_csum = 0; 2902 } 2903 2904 /* Enable the MAC Rx/Tx */ 2905 stmmac_mac_set(priv, priv->ioaddr, true); 2906 2907 /* Set the HW DMA mode and the COE */ 2908 stmmac_dma_operation_mode(priv); 2909 2910 stmmac_mmc_setup(priv); 2911 2912 if (init_ptp) { 2913 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 2914 if (ret < 0) 2915 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); 2916 2917 ret = stmmac_init_ptp(priv); 2918 if (ret == -EOPNOTSUPP) 2919 netdev_warn(priv->dev, "PTP not supported by HW\n"); 2920 else if (ret) 2921 netdev_warn(priv->dev, "PTP init failed\n"); 2922 } 2923 2924 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; 2925 2926 /* Convert the timer from msec to usec */ 2927 if (!priv->tx_lpi_timer) 2928 priv->tx_lpi_timer = eee_timer * 1000; 2929 2930 if (priv->use_riwt) { 2931 u32 queue; 2932 2933 for (queue = 0; queue < rx_cnt; queue++) { 2934 if (!priv->rx_riwt[queue]) 2935 priv->rx_riwt[queue] = DEF_DMA_RIWT; 2936 2937 stmmac_rx_watchdog(priv, priv->ioaddr, 2938 priv->rx_riwt[queue], queue); 2939 } 2940 } 2941 2942 if (priv->hw->pcs) 2943 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); 2944 2945 /* set TX and RX rings length */ 2946 stmmac_set_rings_length(priv); 2947 2948 /* Enable TSO */ 2949 if (priv->tso) { 2950 for (chan = 0; chan < tx_cnt; chan++) 2951 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 2952 } 2953 2954 /* Enable Split Header */ 2955 if (priv->sph && priv->hw->rx_csum) { 2956 for (chan = 0; chan < rx_cnt; chan++) 2957 stmmac_enable_sph(priv, priv->ioaddr, 1, chan); 2958 } 2959 2960 /* VLAN Tag Insertion */ 2961 if (priv->dma_cap.vlins) 2962 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); 2963 2964 /* TBS */ 2965 for (chan = 0; chan < tx_cnt; chan++) { 2966 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2967 int enable = tx_q->tbs & STMMAC_TBS_AVAIL; 2968 2969 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); 2970 } 2971 2972 /* Configure real RX and TX queues */ 2973 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); 2974 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); 2975 2976 /* Start the ball rolling... */ 2977 stmmac_start_all_dma(priv); 2978 2979 if (priv->dma_cap.fpesel) { 2980 stmmac_fpe_start_wq(priv); 2981 2982 if (priv->plat->fpe_cfg->enable) 2983 stmmac_fpe_handshake(priv, true); 2984 } 2985 2986 return 0; 2987 } 2988 2989 static void stmmac_hw_teardown(struct net_device *dev) 2990 { 2991 struct stmmac_priv *priv = netdev_priv(dev); 2992 2993 clk_disable_unprepare(priv->plat->clk_ptp_ref); 2994 } 2995 2996 static void stmmac_free_irq(struct net_device *dev, 2997 enum request_irq_err irq_err, int irq_idx) 2998 { 2999 struct stmmac_priv *priv = netdev_priv(dev); 3000 int j; 3001 3002 switch (irq_err) { 3003 case REQ_IRQ_ERR_ALL: 3004 irq_idx = priv->plat->tx_queues_to_use; 3005 fallthrough; 3006 case REQ_IRQ_ERR_TX: 3007 for (j = irq_idx - 1; j >= 0; j--) { 3008 if (priv->tx_irq[j] > 0) 3009 free_irq(priv->tx_irq[j], &priv->tx_queue[j]); 3010 } 3011 irq_idx = priv->plat->rx_queues_to_use; 3012 fallthrough; 3013 case REQ_IRQ_ERR_RX: 3014 for (j = irq_idx - 1; j >= 0; j--) { 3015 if (priv->rx_irq[j] > 0) 3016 free_irq(priv->rx_irq[j], &priv->rx_queue[j]); 3017 } 3018 3019 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) 3020 free_irq(priv->sfty_ue_irq, dev); 3021 fallthrough; 3022 case REQ_IRQ_ERR_SFTY_UE: 3023 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) 3024 free_irq(priv->sfty_ce_irq, dev); 3025 fallthrough; 3026 case REQ_IRQ_ERR_SFTY_CE: 3027 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) 3028 free_irq(priv->lpi_irq, dev); 3029 fallthrough; 3030 case REQ_IRQ_ERR_LPI: 3031 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) 3032 free_irq(priv->wol_irq, dev); 3033 fallthrough; 3034 case REQ_IRQ_ERR_WOL: 3035 free_irq(dev->irq, dev); 3036 fallthrough; 3037 case REQ_IRQ_ERR_MAC: 3038 case REQ_IRQ_ERR_NO: 3039 /* If MAC IRQ request error, no more IRQ to free */ 3040 break; 3041 } 3042 } 3043 3044 static int stmmac_request_irq_multi_msi(struct net_device *dev) 3045 { 3046 enum request_irq_err irq_err = REQ_IRQ_ERR_NO; 3047 struct stmmac_priv *priv = netdev_priv(dev); 3048 int irq_idx = 0; 3049 char *int_name; 3050 int ret; 3051 int i; 3052 3053 /* For common interrupt */ 3054 int_name = priv->int_name_mac; 3055 sprintf(int_name, "%s:%s", dev->name, "mac"); 3056 ret = request_irq(dev->irq, stmmac_mac_interrupt, 3057 0, int_name, dev); 3058 if (unlikely(ret < 0)) { 3059 netdev_err(priv->dev, 3060 "%s: alloc mac MSI %d (error: %d)\n", 3061 __func__, dev->irq, ret); 3062 irq_err = REQ_IRQ_ERR_MAC; 3063 goto irq_error; 3064 } 3065 3066 /* Request the Wake IRQ in case of another line 3067 * is used for WoL 3068 */ 3069 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 3070 int_name = priv->int_name_wol; 3071 sprintf(int_name, "%s:%s", dev->name, "wol"); 3072 ret = request_irq(priv->wol_irq, 3073 stmmac_mac_interrupt, 3074 0, int_name, dev); 3075 if (unlikely(ret < 0)) { 3076 netdev_err(priv->dev, 3077 "%s: alloc wol MSI %d (error: %d)\n", 3078 __func__, priv->wol_irq, ret); 3079 irq_err = REQ_IRQ_ERR_WOL; 3080 goto irq_error; 3081 } 3082 } 3083 3084 /* Request the LPI IRQ in case of another line 3085 * is used for LPI 3086 */ 3087 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 3088 int_name = priv->int_name_lpi; 3089 sprintf(int_name, "%s:%s", dev->name, "lpi"); 3090 ret = request_irq(priv->lpi_irq, 3091 stmmac_mac_interrupt, 3092 0, int_name, dev); 3093 if (unlikely(ret < 0)) { 3094 netdev_err(priv->dev, 3095 "%s: alloc lpi MSI %d (error: %d)\n", 3096 __func__, priv->lpi_irq, ret); 3097 irq_err = REQ_IRQ_ERR_LPI; 3098 goto irq_error; 3099 } 3100 } 3101 3102 /* Request the Safety Feature Correctible Error line in 3103 * case of another line is used 3104 */ 3105 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { 3106 int_name = priv->int_name_sfty_ce; 3107 sprintf(int_name, "%s:%s", dev->name, "safety-ce"); 3108 ret = request_irq(priv->sfty_ce_irq, 3109 stmmac_safety_interrupt, 3110 0, int_name, dev); 3111 if (unlikely(ret < 0)) { 3112 netdev_err(priv->dev, 3113 "%s: alloc sfty ce MSI %d (error: %d)\n", 3114 __func__, priv->sfty_ce_irq, ret); 3115 irq_err = REQ_IRQ_ERR_SFTY_CE; 3116 goto irq_error; 3117 } 3118 } 3119 3120 /* Request the Safety Feature Uncorrectible Error line in 3121 * case of another line is used 3122 */ 3123 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { 3124 int_name = priv->int_name_sfty_ue; 3125 sprintf(int_name, "%s:%s", dev->name, "safety-ue"); 3126 ret = request_irq(priv->sfty_ue_irq, 3127 stmmac_safety_interrupt, 3128 0, int_name, dev); 3129 if (unlikely(ret < 0)) { 3130 netdev_err(priv->dev, 3131 "%s: alloc sfty ue MSI %d (error: %d)\n", 3132 __func__, priv->sfty_ue_irq, ret); 3133 irq_err = REQ_IRQ_ERR_SFTY_UE; 3134 goto irq_error; 3135 } 3136 } 3137 3138 /* Request Rx MSI irq */ 3139 for (i = 0; i < priv->plat->rx_queues_to_use; i++) { 3140 if (priv->rx_irq[i] == 0) 3141 continue; 3142 3143 int_name = priv->int_name_rx_irq[i]; 3144 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); 3145 ret = request_irq(priv->rx_irq[i], 3146 stmmac_msi_intr_rx, 3147 0, int_name, &priv->rx_queue[i]); 3148 if (unlikely(ret < 0)) { 3149 netdev_err(priv->dev, 3150 "%s: alloc rx-%d MSI %d (error: %d)\n", 3151 __func__, i, priv->rx_irq[i], ret); 3152 irq_err = REQ_IRQ_ERR_RX; 3153 irq_idx = i; 3154 goto irq_error; 3155 } 3156 } 3157 3158 /* Request Tx MSI irq */ 3159 for (i = 0; i < priv->plat->tx_queues_to_use; i++) { 3160 if (priv->tx_irq[i] == 0) 3161 continue; 3162 3163 int_name = priv->int_name_tx_irq[i]; 3164 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); 3165 ret = request_irq(priv->tx_irq[i], 3166 stmmac_msi_intr_tx, 3167 0, int_name, &priv->tx_queue[i]); 3168 if (unlikely(ret < 0)) { 3169 netdev_err(priv->dev, 3170 "%s: alloc tx-%d MSI %d (error: %d)\n", 3171 __func__, i, priv->tx_irq[i], ret); 3172 irq_err = REQ_IRQ_ERR_TX; 3173 irq_idx = i; 3174 goto irq_error; 3175 } 3176 } 3177 3178 return 0; 3179 3180 irq_error: 3181 stmmac_free_irq(dev, irq_err, irq_idx); 3182 return ret; 3183 } 3184 3185 static int stmmac_request_irq_single(struct net_device *dev) 3186 { 3187 enum request_irq_err irq_err = REQ_IRQ_ERR_NO; 3188 struct stmmac_priv *priv = netdev_priv(dev); 3189 int ret; 3190 3191 ret = request_irq(dev->irq, stmmac_interrupt, 3192 IRQF_SHARED, dev->name, dev); 3193 if (unlikely(ret < 0)) { 3194 netdev_err(priv->dev, 3195 "%s: ERROR: allocating the IRQ %d (error: %d)\n", 3196 __func__, dev->irq, ret); 3197 irq_err = REQ_IRQ_ERR_MAC; 3198 return ret; 3199 } 3200 3201 /* Request the Wake IRQ in case of another line 3202 * is used for WoL 3203 */ 3204 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 3205 ret = request_irq(priv->wol_irq, stmmac_interrupt, 3206 IRQF_SHARED, dev->name, dev); 3207 if (unlikely(ret < 0)) { 3208 netdev_err(priv->dev, 3209 "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 3210 __func__, priv->wol_irq, ret); 3211 irq_err = REQ_IRQ_ERR_WOL; 3212 return ret; 3213 } 3214 } 3215 3216 /* Request the IRQ lines */ 3217 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 3218 ret = request_irq(priv->lpi_irq, stmmac_interrupt, 3219 IRQF_SHARED, dev->name, dev); 3220 if (unlikely(ret < 0)) { 3221 netdev_err(priv->dev, 3222 "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 3223 __func__, priv->lpi_irq, ret); 3224 irq_err = REQ_IRQ_ERR_LPI; 3225 goto irq_error; 3226 } 3227 } 3228 3229 return 0; 3230 3231 irq_error: 3232 stmmac_free_irq(dev, irq_err, 0); 3233 return ret; 3234 } 3235 3236 static int stmmac_request_irq(struct net_device *dev) 3237 { 3238 struct stmmac_priv *priv = netdev_priv(dev); 3239 int ret; 3240 3241 /* Request the IRQ lines */ 3242 if (priv->plat->multi_msi_en) 3243 ret = stmmac_request_irq_multi_msi(dev); 3244 else 3245 ret = stmmac_request_irq_single(dev); 3246 3247 return ret; 3248 } 3249 3250 /** 3251 * stmmac_open - open entry point of the driver 3252 * @dev : pointer to the device structure. 3253 * Description: 3254 * This function is the open entry point of the driver. 3255 * Return value: 3256 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3257 * file on failure. 3258 */ 3259 static int stmmac_open(struct net_device *dev) 3260 { 3261 struct stmmac_priv *priv = netdev_priv(dev); 3262 int bfsize = 0; 3263 u32 chan; 3264 int ret; 3265 3266 ret = pm_runtime_get_sync(priv->device); 3267 if (ret < 0) { 3268 pm_runtime_put_noidle(priv->device); 3269 return ret; 3270 } 3271 3272 if (priv->hw->pcs != STMMAC_PCS_TBI && 3273 priv->hw->pcs != STMMAC_PCS_RTBI && 3274 priv->hw->xpcs_args.an_mode != DW_AN_C73) { 3275 ret = stmmac_init_phy(dev); 3276 if (ret) { 3277 netdev_err(priv->dev, 3278 "%s: Cannot attach to PHY (error: %d)\n", 3279 __func__, ret); 3280 goto init_phy_error; 3281 } 3282 } 3283 3284 /* Extra statistics */ 3285 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 3286 priv->xstats.threshold = tc; 3287 3288 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); 3289 if (bfsize < 0) 3290 bfsize = 0; 3291 3292 if (bfsize < BUF_SIZE_16KiB) 3293 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 3294 3295 priv->dma_buf_sz = bfsize; 3296 buf_sz = bfsize; 3297 3298 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 3299 3300 if (!priv->dma_tx_size) 3301 priv->dma_tx_size = DMA_DEFAULT_TX_SIZE; 3302 if (!priv->dma_rx_size) 3303 priv->dma_rx_size = DMA_DEFAULT_RX_SIZE; 3304 3305 /* Earlier check for TBS */ 3306 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { 3307 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 3308 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; 3309 3310 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; 3311 if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan)) 3312 tx_q->tbs &= ~STMMAC_TBS_AVAIL; 3313 } 3314 3315 ret = alloc_dma_desc_resources(priv); 3316 if (ret < 0) { 3317 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 3318 __func__); 3319 goto dma_desc_error; 3320 } 3321 3322 ret = init_dma_desc_rings(dev, GFP_KERNEL); 3323 if (ret < 0) { 3324 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 3325 __func__); 3326 goto init_error; 3327 } 3328 3329 ret = stmmac_hw_setup(dev, true); 3330 if (ret < 0) { 3331 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 3332 goto init_error; 3333 } 3334 3335 stmmac_init_coalesce(priv); 3336 3337 phylink_start(priv->phylink); 3338 /* We may have called phylink_speed_down before */ 3339 phylink_speed_up(priv->phylink); 3340 3341 ret = stmmac_request_irq(dev); 3342 if (ret) 3343 goto irq_error; 3344 3345 stmmac_enable_all_queues(priv); 3346 netif_tx_start_all_queues(priv->dev); 3347 3348 return 0; 3349 3350 irq_error: 3351 phylink_stop(priv->phylink); 3352 3353 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3354 hrtimer_cancel(&priv->tx_queue[chan].txtimer); 3355 3356 stmmac_hw_teardown(dev); 3357 init_error: 3358 free_dma_desc_resources(priv); 3359 dma_desc_error: 3360 phylink_disconnect_phy(priv->phylink); 3361 init_phy_error: 3362 pm_runtime_put(priv->device); 3363 return ret; 3364 } 3365 3366 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) 3367 { 3368 set_bit(__FPE_REMOVING, &priv->fpe_task_state); 3369 3370 if (priv->fpe_wq) 3371 destroy_workqueue(priv->fpe_wq); 3372 3373 netdev_info(priv->dev, "FPE workqueue stop"); 3374 } 3375 3376 /** 3377 * stmmac_release - close entry point of the driver 3378 * @dev : device pointer. 3379 * Description: 3380 * This is the stop entry point of the driver. 3381 */ 3382 static int stmmac_release(struct net_device *dev) 3383 { 3384 struct stmmac_priv *priv = netdev_priv(dev); 3385 u32 chan; 3386 3387 if (device_may_wakeup(priv->device)) 3388 phylink_speed_down(priv->phylink, false); 3389 /* Stop and disconnect the PHY */ 3390 phylink_stop(priv->phylink); 3391 phylink_disconnect_phy(priv->phylink); 3392 3393 stmmac_disable_all_queues(priv); 3394 3395 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3396 hrtimer_cancel(&priv->tx_queue[chan].txtimer); 3397 3398 /* Free the IRQ lines */ 3399 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 3400 3401 if (priv->eee_enabled) { 3402 priv->tx_path_in_lpi_mode = false; 3403 del_timer_sync(&priv->eee_ctrl_timer); 3404 } 3405 3406 /* Stop TX/RX DMA and clear the descriptors */ 3407 stmmac_stop_all_dma(priv); 3408 3409 /* Release and free the Rx/Tx resources */ 3410 free_dma_desc_resources(priv); 3411 3412 /* Disable the MAC Rx/Tx */ 3413 stmmac_mac_set(priv, priv->ioaddr, false); 3414 3415 netif_carrier_off(dev); 3416 3417 stmmac_release_ptp(priv); 3418 3419 pm_runtime_put(priv->device); 3420 3421 if (priv->dma_cap.fpesel) 3422 stmmac_fpe_stop_wq(priv); 3423 3424 return 0; 3425 } 3426 3427 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, 3428 struct stmmac_tx_queue *tx_q) 3429 { 3430 u16 tag = 0x0, inner_tag = 0x0; 3431 u32 inner_type = 0x0; 3432 struct dma_desc *p; 3433 3434 if (!priv->dma_cap.vlins) 3435 return false; 3436 if (!skb_vlan_tag_present(skb)) 3437 return false; 3438 if (skb->vlan_proto == htons(ETH_P_8021AD)) { 3439 inner_tag = skb_vlan_tag_get(skb); 3440 inner_type = STMMAC_VLAN_INSERT; 3441 } 3442 3443 tag = skb_vlan_tag_get(skb); 3444 3445 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3446 p = &tx_q->dma_entx[tx_q->cur_tx].basic; 3447 else 3448 p = &tx_q->dma_tx[tx_q->cur_tx]; 3449 3450 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) 3451 return false; 3452 3453 stmmac_set_tx_owner(priv, p); 3454 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 3455 return true; 3456 } 3457 3458 /** 3459 * stmmac_tso_allocator - close entry point of the driver 3460 * @priv: driver private structure 3461 * @des: buffer start address 3462 * @total_len: total length to fill in descriptors 3463 * @last_segment: condition for the last descriptor 3464 * @queue: TX queue index 3465 * Description: 3466 * This function fills descriptor and request new descriptors according to 3467 * buffer length to fill 3468 */ 3469 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, 3470 int total_len, bool last_segment, u32 queue) 3471 { 3472 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 3473 struct dma_desc *desc; 3474 u32 buff_size; 3475 int tmp_len; 3476 3477 tmp_len = total_len; 3478 3479 while (tmp_len > 0) { 3480 dma_addr_t curr_addr; 3481 3482 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 3483 priv->dma_tx_size); 3484 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 3485 3486 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3487 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3488 else 3489 desc = &tx_q->dma_tx[tx_q->cur_tx]; 3490 3491 curr_addr = des + (total_len - tmp_len); 3492 if (priv->dma_cap.addr64 <= 32) 3493 desc->des0 = cpu_to_le32(curr_addr); 3494 else 3495 stmmac_set_desc_addr(priv, desc, curr_addr); 3496 3497 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 3498 TSO_MAX_BUFF_SIZE : tmp_len; 3499 3500 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 3501 0, 1, 3502 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 3503 0, 0); 3504 3505 tmp_len -= TSO_MAX_BUFF_SIZE; 3506 } 3507 } 3508 3509 /** 3510 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 3511 * @skb : the socket buffer 3512 * @dev : device pointer 3513 * Description: this is the transmit function that is called on TSO frames 3514 * (support available on GMAC4 and newer chips). 3515 * Diagram below show the ring programming in case of TSO frames: 3516 * 3517 * First Descriptor 3518 * -------- 3519 * | DES0 |---> buffer1 = L2/L3/L4 header 3520 * | DES1 |---> TCP Payload (can continue on next descr...) 3521 * | DES2 |---> buffer 1 and 2 len 3522 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 3523 * -------- 3524 * | 3525 * ... 3526 * | 3527 * -------- 3528 * | DES0 | --| Split TCP Payload on Buffers 1 and 2 3529 * | DES1 | --| 3530 * | DES2 | --> buffer 1 and 2 len 3531 * | DES3 | 3532 * -------- 3533 * 3534 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 3535 */ 3536 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 3537 { 3538 struct dma_desc *desc, *first, *mss_desc = NULL; 3539 struct stmmac_priv *priv = netdev_priv(dev); 3540 int desc_size, tmp_pay_len = 0, first_tx; 3541 int nfrags = skb_shinfo(skb)->nr_frags; 3542 u32 queue = skb_get_queue_mapping(skb); 3543 unsigned int first_entry, tx_packets; 3544 struct stmmac_tx_queue *tx_q; 3545 bool has_vlan, set_ic; 3546 u8 proto_hdr_len, hdr; 3547 u32 pay_len, mss; 3548 dma_addr_t des; 3549 int i; 3550 3551 tx_q = &priv->tx_queue[queue]; 3552 first_tx = tx_q->cur_tx; 3553 3554 /* Compute header lengths */ 3555 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 3556 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); 3557 hdr = sizeof(struct udphdr); 3558 } else { 3559 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3560 hdr = tcp_hdrlen(skb); 3561 } 3562 3563 /* Desc availability based on threshold should be enough safe */ 3564 if (unlikely(stmmac_tx_avail(priv, queue) < 3565 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 3566 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 3567 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 3568 queue)); 3569 /* This is a hard error, log it. */ 3570 netdev_err(priv->dev, 3571 "%s: Tx Ring full when queue awake\n", 3572 __func__); 3573 } 3574 return NETDEV_TX_BUSY; 3575 } 3576 3577 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 3578 3579 mss = skb_shinfo(skb)->gso_size; 3580 3581 /* set new MSS value if needed */ 3582 if (mss != tx_q->mss) { 3583 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3584 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3585 else 3586 mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; 3587 3588 stmmac_set_mss(priv, mss_desc, mss); 3589 tx_q->mss = mss; 3590 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 3591 priv->dma_tx_size); 3592 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 3593 } 3594 3595 if (netif_msg_tx_queued(priv)) { 3596 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 3597 __func__, hdr, proto_hdr_len, pay_len, mss); 3598 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 3599 skb->data_len); 3600 } 3601 3602 /* Check if VLAN can be inserted by HW */ 3603 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 3604 3605 first_entry = tx_q->cur_tx; 3606 WARN_ON(tx_q->tx_skbuff[first_entry]); 3607 3608 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3609 desc = &tx_q->dma_entx[first_entry].basic; 3610 else 3611 desc = &tx_q->dma_tx[first_entry]; 3612 first = desc; 3613 3614 if (has_vlan) 3615 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 3616 3617 /* first descriptor: fill Headers on Buf1 */ 3618 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 3619 DMA_TO_DEVICE); 3620 if (dma_mapping_error(priv->device, des)) 3621 goto dma_map_err; 3622 3623 tx_q->tx_skbuff_dma[first_entry].buf = des; 3624 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 3625 3626 if (priv->dma_cap.addr64 <= 32) { 3627 first->des0 = cpu_to_le32(des); 3628 3629 /* Fill start of payload in buff2 of first descriptor */ 3630 if (pay_len) 3631 first->des1 = cpu_to_le32(des + proto_hdr_len); 3632 3633 /* If needed take extra descriptors to fill the remaining payload */ 3634 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 3635 } else { 3636 stmmac_set_desc_addr(priv, first, des); 3637 tmp_pay_len = pay_len; 3638 des += proto_hdr_len; 3639 pay_len = 0; 3640 } 3641 3642 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 3643 3644 /* Prepare fragments */ 3645 for (i = 0; i < nfrags; i++) { 3646 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3647 3648 des = skb_frag_dma_map(priv->device, frag, 0, 3649 skb_frag_size(frag), 3650 DMA_TO_DEVICE); 3651 if (dma_mapping_error(priv->device, des)) 3652 goto dma_map_err; 3653 3654 stmmac_tso_allocator(priv, des, skb_frag_size(frag), 3655 (i == nfrags - 1), queue); 3656 3657 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 3658 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 3659 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 3660 } 3661 3662 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 3663 3664 /* Only the last descriptor gets to point to the skb. */ 3665 tx_q->tx_skbuff[tx_q->cur_tx] = skb; 3666 3667 /* Manage tx mitigation */ 3668 tx_packets = (tx_q->cur_tx + 1) - first_tx; 3669 tx_q->tx_count_frames += tx_packets; 3670 3671 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 3672 set_ic = true; 3673 else if (!priv->tx_coal_frames[queue]) 3674 set_ic = false; 3675 else if (tx_packets > priv->tx_coal_frames[queue]) 3676 set_ic = true; 3677 else if ((tx_q->tx_count_frames % 3678 priv->tx_coal_frames[queue]) < tx_packets) 3679 set_ic = true; 3680 else 3681 set_ic = false; 3682 3683 if (set_ic) { 3684 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3685 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3686 else 3687 desc = &tx_q->dma_tx[tx_q->cur_tx]; 3688 3689 tx_q->tx_count_frames = 0; 3690 stmmac_set_tx_ic(priv, desc); 3691 priv->xstats.tx_set_ic_bit++; 3692 } 3693 3694 /* We've used all descriptors we need for this skb, however, 3695 * advance cur_tx so that it references a fresh descriptor. 3696 * ndo_start_xmit will fill this descriptor the next time it's 3697 * called and stmmac_tx_clean may clean up to this descriptor. 3698 */ 3699 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 3700 3701 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 3702 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 3703 __func__); 3704 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 3705 } 3706 3707 dev->stats.tx_bytes += skb->len; 3708 priv->xstats.tx_tso_frames++; 3709 priv->xstats.tx_tso_nfrags += nfrags; 3710 3711 if (priv->sarc_type) 3712 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 3713 3714 skb_tx_timestamp(skb); 3715 3716 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 3717 priv->hwts_tx_en)) { 3718 /* declare that device is doing timestamping */ 3719 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3720 stmmac_enable_tx_timestamp(priv, first); 3721 } 3722 3723 /* Complete the first descriptor before granting the DMA */ 3724 stmmac_prepare_tso_tx_desc(priv, first, 1, 3725 proto_hdr_len, 3726 pay_len, 3727 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 3728 hdr / 4, (skb->len - proto_hdr_len)); 3729 3730 /* If context desc is used to change MSS */ 3731 if (mss_desc) { 3732 /* Make sure that first descriptor has been completely 3733 * written, including its own bit. This is because MSS is 3734 * actually before first descriptor, so we need to make 3735 * sure that MSS's own bit is the last thing written. 3736 */ 3737 dma_wmb(); 3738 stmmac_set_tx_owner(priv, mss_desc); 3739 } 3740 3741 /* The own bit must be the latest setting done when prepare the 3742 * descriptor and then barrier is needed to make sure that 3743 * all is coherent before granting the DMA engine. 3744 */ 3745 wmb(); 3746 3747 if (netif_msg_pktdata(priv)) { 3748 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 3749 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 3750 tx_q->cur_tx, first, nfrags); 3751 pr_info(">>> frame to be transmitted: "); 3752 print_pkt(skb->data, skb_headlen(skb)); 3753 } 3754 3755 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3756 3757 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3758 desc_size = sizeof(struct dma_edesc); 3759 else 3760 desc_size = sizeof(struct dma_desc); 3761 3762 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 3763 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3764 stmmac_tx_timer_arm(priv, queue); 3765 3766 return NETDEV_TX_OK; 3767 3768 dma_map_err: 3769 dev_err(priv->device, "Tx dma map failed\n"); 3770 dev_kfree_skb(skb); 3771 priv->dev->stats.tx_dropped++; 3772 return NETDEV_TX_OK; 3773 } 3774 3775 /** 3776 * stmmac_xmit - Tx entry point of the driver 3777 * @skb : the socket buffer 3778 * @dev : device pointer 3779 * Description : this is the tx entry point of the driver. 3780 * It programs the chain or the ring and supports oversized frames 3781 * and SG feature. 3782 */ 3783 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 3784 { 3785 unsigned int first_entry, tx_packets, enh_desc; 3786 struct stmmac_priv *priv = netdev_priv(dev); 3787 unsigned int nopaged_len = skb_headlen(skb); 3788 int i, csum_insertion = 0, is_jumbo = 0; 3789 u32 queue = skb_get_queue_mapping(skb); 3790 int nfrags = skb_shinfo(skb)->nr_frags; 3791 int gso = skb_shinfo(skb)->gso_type; 3792 struct dma_edesc *tbs_desc = NULL; 3793 int entry, desc_size, first_tx; 3794 struct dma_desc *desc, *first; 3795 struct stmmac_tx_queue *tx_q; 3796 bool has_vlan, set_ic; 3797 dma_addr_t des; 3798 3799 tx_q = &priv->tx_queue[queue]; 3800 first_tx = tx_q->cur_tx; 3801 3802 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) 3803 stmmac_disable_eee_mode(priv); 3804 3805 /* Manage oversized TCP frames for GMAC4 device */ 3806 if (skb_is_gso(skb) && priv->tso) { 3807 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 3808 return stmmac_tso_xmit(skb, dev); 3809 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) 3810 return stmmac_tso_xmit(skb, dev); 3811 } 3812 3813 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 3814 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 3815 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 3816 queue)); 3817 /* This is a hard error, log it. */ 3818 netdev_err(priv->dev, 3819 "%s: Tx Ring full when queue awake\n", 3820 __func__); 3821 } 3822 return NETDEV_TX_BUSY; 3823 } 3824 3825 /* Check if VLAN can be inserted by HW */ 3826 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 3827 3828 entry = tx_q->cur_tx; 3829 first_entry = entry; 3830 WARN_ON(tx_q->tx_skbuff[first_entry]); 3831 3832 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 3833 3834 if (likely(priv->extend_desc)) 3835 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 3836 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3837 desc = &tx_q->dma_entx[entry].basic; 3838 else 3839 desc = tx_q->dma_tx + entry; 3840 3841 first = desc; 3842 3843 if (has_vlan) 3844 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 3845 3846 enh_desc = priv->plat->enh_desc; 3847 /* To program the descriptors according to the size of the frame */ 3848 if (enh_desc) 3849 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 3850 3851 if (unlikely(is_jumbo)) { 3852 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 3853 if (unlikely(entry < 0) && (entry != -EINVAL)) 3854 goto dma_map_err; 3855 } 3856 3857 for (i = 0; i < nfrags; i++) { 3858 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3859 int len = skb_frag_size(frag); 3860 bool last_segment = (i == (nfrags - 1)); 3861 3862 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 3863 WARN_ON(tx_q->tx_skbuff[entry]); 3864 3865 if (likely(priv->extend_desc)) 3866 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 3867 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3868 desc = &tx_q->dma_entx[entry].basic; 3869 else 3870 desc = tx_q->dma_tx + entry; 3871 3872 des = skb_frag_dma_map(priv->device, frag, 0, len, 3873 DMA_TO_DEVICE); 3874 if (dma_mapping_error(priv->device, des)) 3875 goto dma_map_err; /* should reuse desc w/o issues */ 3876 3877 tx_q->tx_skbuff_dma[entry].buf = des; 3878 3879 stmmac_set_desc_addr(priv, desc, des); 3880 3881 tx_q->tx_skbuff_dma[entry].map_as_page = true; 3882 tx_q->tx_skbuff_dma[entry].len = len; 3883 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 3884 3885 /* Prepare the descriptor and set the own bit too */ 3886 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 3887 priv->mode, 1, last_segment, skb->len); 3888 } 3889 3890 /* Only the last descriptor gets to point to the skb. */ 3891 tx_q->tx_skbuff[entry] = skb; 3892 3893 /* According to the coalesce parameter the IC bit for the latest 3894 * segment is reset and the timer re-started to clean the tx status. 3895 * This approach takes care about the fragments: desc is the first 3896 * element in case of no SG. 3897 */ 3898 tx_packets = (entry + 1) - first_tx; 3899 tx_q->tx_count_frames += tx_packets; 3900 3901 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 3902 set_ic = true; 3903 else if (!priv->tx_coal_frames[queue]) 3904 set_ic = false; 3905 else if (tx_packets > priv->tx_coal_frames[queue]) 3906 set_ic = true; 3907 else if ((tx_q->tx_count_frames % 3908 priv->tx_coal_frames[queue]) < tx_packets) 3909 set_ic = true; 3910 else 3911 set_ic = false; 3912 3913 if (set_ic) { 3914 if (likely(priv->extend_desc)) 3915 desc = &tx_q->dma_etx[entry].basic; 3916 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3917 desc = &tx_q->dma_entx[entry].basic; 3918 else 3919 desc = &tx_q->dma_tx[entry]; 3920 3921 tx_q->tx_count_frames = 0; 3922 stmmac_set_tx_ic(priv, desc); 3923 priv->xstats.tx_set_ic_bit++; 3924 } 3925 3926 /* We've used all descriptors we need for this skb, however, 3927 * advance cur_tx so that it references a fresh descriptor. 3928 * ndo_start_xmit will fill this descriptor the next time it's 3929 * called and stmmac_tx_clean may clean up to this descriptor. 3930 */ 3931 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 3932 tx_q->cur_tx = entry; 3933 3934 if (netif_msg_pktdata(priv)) { 3935 netdev_dbg(priv->dev, 3936 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 3937 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 3938 entry, first, nfrags); 3939 3940 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 3941 print_pkt(skb->data, skb->len); 3942 } 3943 3944 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 3945 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 3946 __func__); 3947 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 3948 } 3949 3950 dev->stats.tx_bytes += skb->len; 3951 3952 if (priv->sarc_type) 3953 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 3954 3955 skb_tx_timestamp(skb); 3956 3957 /* Ready to fill the first descriptor and set the OWN bit w/o any 3958 * problems because all the descriptors are actually ready to be 3959 * passed to the DMA engine. 3960 */ 3961 if (likely(!is_jumbo)) { 3962 bool last_segment = (nfrags == 0); 3963 3964 des = dma_map_single(priv->device, skb->data, 3965 nopaged_len, DMA_TO_DEVICE); 3966 if (dma_mapping_error(priv->device, des)) 3967 goto dma_map_err; 3968 3969 tx_q->tx_skbuff_dma[first_entry].buf = des; 3970 3971 stmmac_set_desc_addr(priv, first, des); 3972 3973 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 3974 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 3975 3976 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 3977 priv->hwts_tx_en)) { 3978 /* declare that device is doing timestamping */ 3979 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3980 stmmac_enable_tx_timestamp(priv, first); 3981 } 3982 3983 /* Prepare the first descriptor setting the OWN bit too */ 3984 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 3985 csum_insertion, priv->mode, 0, last_segment, 3986 skb->len); 3987 } 3988 3989 if (tx_q->tbs & STMMAC_TBS_EN) { 3990 struct timespec64 ts = ns_to_timespec64(skb->tstamp); 3991 3992 tbs_desc = &tx_q->dma_entx[first_entry]; 3993 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); 3994 } 3995 3996 stmmac_set_tx_owner(priv, first); 3997 3998 /* The own bit must be the latest setting done when prepare the 3999 * descriptor and then barrier is needed to make sure that 4000 * all is coherent before granting the DMA engine. 4001 */ 4002 wmb(); 4003 4004 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4005 4006 stmmac_enable_dma_transmission(priv, priv->ioaddr); 4007 4008 if (likely(priv->extend_desc)) 4009 desc_size = sizeof(struct dma_extended_desc); 4010 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4011 desc_size = sizeof(struct dma_edesc); 4012 else 4013 desc_size = sizeof(struct dma_desc); 4014 4015 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 4016 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 4017 stmmac_tx_timer_arm(priv, queue); 4018 4019 return NETDEV_TX_OK; 4020 4021 dma_map_err: 4022 netdev_err(priv->dev, "Tx DMA map failed\n"); 4023 dev_kfree_skb(skb); 4024 priv->dev->stats.tx_dropped++; 4025 return NETDEV_TX_OK; 4026 } 4027 4028 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 4029 { 4030 struct vlan_ethhdr *veth; 4031 __be16 vlan_proto; 4032 u16 vlanid; 4033 4034 veth = (struct vlan_ethhdr *)skb->data; 4035 vlan_proto = veth->h_vlan_proto; 4036 4037 if ((vlan_proto == htons(ETH_P_8021Q) && 4038 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 4039 (vlan_proto == htons(ETH_P_8021AD) && 4040 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 4041 /* pop the vlan tag */ 4042 vlanid = ntohs(veth->h_vlan_TCI); 4043 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 4044 skb_pull(skb, VLAN_HLEN); 4045 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 4046 } 4047 } 4048 4049 /** 4050 * stmmac_rx_refill - refill used skb preallocated buffers 4051 * @priv: driver private structure 4052 * @queue: RX queue index 4053 * Description : this is to reallocate the skb for the reception process 4054 * that is based on zero-copy. 4055 */ 4056 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 4057 { 4058 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4059 int len, dirty = stmmac_rx_dirty(priv, queue); 4060 unsigned int entry = rx_q->dirty_rx; 4061 4062 len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; 4063 4064 while (dirty-- > 0) { 4065 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4066 struct dma_desc *p; 4067 bool use_rx_wd; 4068 4069 if (priv->extend_desc) 4070 p = (struct dma_desc *)(rx_q->dma_erx + entry); 4071 else 4072 p = rx_q->dma_rx + entry; 4073 4074 if (!buf->page) { 4075 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 4076 if (!buf->page) 4077 break; 4078 } 4079 4080 if (priv->sph && !buf->sec_page) { 4081 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 4082 if (!buf->sec_page) 4083 break; 4084 4085 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 4086 4087 dma_sync_single_for_device(priv->device, buf->sec_addr, 4088 len, DMA_FROM_DEVICE); 4089 } 4090 4091 buf->addr = page_pool_get_dma_addr(buf->page); 4092 4093 /* Sync whole allocation to device. This will invalidate old 4094 * data. 4095 */ 4096 dma_sync_single_for_device(priv->device, buf->addr, len, 4097 DMA_FROM_DEVICE); 4098 4099 stmmac_set_desc_addr(priv, p, buf->addr); 4100 if (priv->sph) 4101 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 4102 else 4103 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 4104 stmmac_refill_desc3(priv, rx_q, p); 4105 4106 rx_q->rx_count_frames++; 4107 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4108 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 4109 rx_q->rx_count_frames = 0; 4110 4111 use_rx_wd = !priv->rx_coal_frames[queue]; 4112 use_rx_wd |= rx_q->rx_count_frames > 0; 4113 if (!priv->use_riwt) 4114 use_rx_wd = false; 4115 4116 dma_wmb(); 4117 stmmac_set_rx_owner(priv, p, use_rx_wd); 4118 4119 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); 4120 } 4121 rx_q->dirty_rx = entry; 4122 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4123 (rx_q->dirty_rx * sizeof(struct dma_desc)); 4124 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 4125 } 4126 4127 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, 4128 struct dma_desc *p, 4129 int status, unsigned int len) 4130 { 4131 unsigned int plen = 0, hlen = 0; 4132 int coe = priv->hw->rx_csum; 4133 4134 /* Not first descriptor, buffer is always zero */ 4135 if (priv->sph && len) 4136 return 0; 4137 4138 /* First descriptor, get split header length */ 4139 stmmac_get_rx_header_len(priv, p, &hlen); 4140 if (priv->sph && hlen) { 4141 priv->xstats.rx_split_hdr_pkt_n++; 4142 return hlen; 4143 } 4144 4145 /* First descriptor, not last descriptor and not split header */ 4146 if (status & rx_not_ls) 4147 return priv->dma_buf_sz; 4148 4149 plen = stmmac_get_rx_frame_len(priv, p, coe); 4150 4151 /* First descriptor and last descriptor and not split header */ 4152 return min_t(unsigned int, priv->dma_buf_sz, plen); 4153 } 4154 4155 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, 4156 struct dma_desc *p, 4157 int status, unsigned int len) 4158 { 4159 int coe = priv->hw->rx_csum; 4160 unsigned int plen = 0; 4161 4162 /* Not split header, buffer is not available */ 4163 if (!priv->sph) 4164 return 0; 4165 4166 /* Not last descriptor */ 4167 if (status & rx_not_ls) 4168 return priv->dma_buf_sz; 4169 4170 plen = stmmac_get_rx_frame_len(priv, p, coe); 4171 4172 /* Last descriptor */ 4173 return plen - len; 4174 } 4175 4176 /** 4177 * stmmac_rx - manage the receive process 4178 * @priv: driver private structure 4179 * @limit: napi bugget 4180 * @queue: RX queue index. 4181 * Description : this the function called by the napi poll method. 4182 * It gets all the frames inside the ring. 4183 */ 4184 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 4185 { 4186 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4187 struct stmmac_channel *ch = &priv->channel[queue]; 4188 unsigned int count = 0, error = 0, len = 0; 4189 int status = 0, coe = priv->hw->rx_csum; 4190 unsigned int next_entry = rx_q->cur_rx; 4191 unsigned int desc_size; 4192 struct sk_buff *skb = NULL; 4193 4194 if (netif_msg_rx_status(priv)) { 4195 void *rx_head; 4196 4197 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 4198 if (priv->extend_desc) { 4199 rx_head = (void *)rx_q->dma_erx; 4200 desc_size = sizeof(struct dma_extended_desc); 4201 } else { 4202 rx_head = (void *)rx_q->dma_rx; 4203 desc_size = sizeof(struct dma_desc); 4204 } 4205 4206 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, 4207 rx_q->dma_rx_phy, desc_size); 4208 } 4209 while (count < limit) { 4210 unsigned int buf1_len = 0, buf2_len = 0; 4211 enum pkt_hash_types hash_type; 4212 struct stmmac_rx_buffer *buf; 4213 struct dma_desc *np, *p; 4214 int entry; 4215 u32 hash; 4216 4217 if (!count && rx_q->state_saved) { 4218 skb = rx_q->state.skb; 4219 error = rx_q->state.error; 4220 len = rx_q->state.len; 4221 } else { 4222 rx_q->state_saved = false; 4223 skb = NULL; 4224 error = 0; 4225 len = 0; 4226 } 4227 4228 if (count >= limit) 4229 break; 4230 4231 read_again: 4232 buf1_len = 0; 4233 buf2_len = 0; 4234 entry = next_entry; 4235 buf = &rx_q->buf_pool[entry]; 4236 4237 if (priv->extend_desc) 4238 p = (struct dma_desc *)(rx_q->dma_erx + entry); 4239 else 4240 p = rx_q->dma_rx + entry; 4241 4242 /* read the status of the incoming frame */ 4243 status = stmmac_rx_status(priv, &priv->dev->stats, 4244 &priv->xstats, p); 4245 /* check if managed by the DMA otherwise go ahead */ 4246 if (unlikely(status & dma_own)) 4247 break; 4248 4249 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 4250 priv->dma_rx_size); 4251 next_entry = rx_q->cur_rx; 4252 4253 if (priv->extend_desc) 4254 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 4255 else 4256 np = rx_q->dma_rx + next_entry; 4257 4258 prefetch(np); 4259 4260 if (priv->extend_desc) 4261 stmmac_rx_extended_status(priv, &priv->dev->stats, 4262 &priv->xstats, rx_q->dma_erx + entry); 4263 if (unlikely(status == discard_frame)) { 4264 page_pool_recycle_direct(rx_q->page_pool, buf->page); 4265 buf->page = NULL; 4266 error = 1; 4267 if (!priv->hwts_rx_en) 4268 priv->dev->stats.rx_errors++; 4269 } 4270 4271 if (unlikely(error && (status & rx_not_ls))) 4272 goto read_again; 4273 if (unlikely(error)) { 4274 dev_kfree_skb(skb); 4275 skb = NULL; 4276 count++; 4277 continue; 4278 } 4279 4280 /* Buffer is good. Go on. */ 4281 4282 prefetch(page_address(buf->page)); 4283 if (buf->sec_page) 4284 prefetch(page_address(buf->sec_page)); 4285 4286 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 4287 len += buf1_len; 4288 buf2_len = stmmac_rx_buf2_len(priv, p, status, len); 4289 len += buf2_len; 4290 4291 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 4292 * Type frames (LLC/LLC-SNAP) 4293 * 4294 * llc_snap is never checked in GMAC >= 4, so this ACS 4295 * feature is always disabled and packets need to be 4296 * stripped manually. 4297 */ 4298 if (likely(!(status & rx_not_ls)) && 4299 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || 4300 unlikely(status != llc_snap))) { 4301 if (buf2_len) 4302 buf2_len -= ETH_FCS_LEN; 4303 else 4304 buf1_len -= ETH_FCS_LEN; 4305 4306 len -= ETH_FCS_LEN; 4307 } 4308 4309 if (!skb) { 4310 skb = napi_alloc_skb(&ch->rx_napi, buf1_len); 4311 if (!skb) { 4312 priv->dev->stats.rx_dropped++; 4313 count++; 4314 goto drain_data; 4315 } 4316 4317 dma_sync_single_for_cpu(priv->device, buf->addr, 4318 buf1_len, DMA_FROM_DEVICE); 4319 skb_copy_to_linear_data(skb, page_address(buf->page), 4320 buf1_len); 4321 skb_put(skb, buf1_len); 4322 4323 /* Data payload copied into SKB, page ready for recycle */ 4324 page_pool_recycle_direct(rx_q->page_pool, buf->page); 4325 buf->page = NULL; 4326 } else if (buf1_len) { 4327 dma_sync_single_for_cpu(priv->device, buf->addr, 4328 buf1_len, DMA_FROM_DEVICE); 4329 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 4330 buf->page, 0, buf1_len, 4331 priv->dma_buf_sz); 4332 4333 /* Data payload appended into SKB */ 4334 page_pool_release_page(rx_q->page_pool, buf->page); 4335 buf->page = NULL; 4336 } 4337 4338 if (buf2_len) { 4339 dma_sync_single_for_cpu(priv->device, buf->sec_addr, 4340 buf2_len, DMA_FROM_DEVICE); 4341 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 4342 buf->sec_page, 0, buf2_len, 4343 priv->dma_buf_sz); 4344 4345 /* Data payload appended into SKB */ 4346 page_pool_release_page(rx_q->page_pool, buf->sec_page); 4347 buf->sec_page = NULL; 4348 } 4349 4350 drain_data: 4351 if (likely(status & rx_not_ls)) 4352 goto read_again; 4353 if (!skb) 4354 continue; 4355 4356 /* Got entire packet into SKB. Finish it. */ 4357 4358 stmmac_get_rx_hwtstamp(priv, p, np, skb); 4359 stmmac_rx_vlan(priv->dev, skb); 4360 skb->protocol = eth_type_trans(skb, priv->dev); 4361 4362 if (unlikely(!coe)) 4363 skb_checksum_none_assert(skb); 4364 else 4365 skb->ip_summed = CHECKSUM_UNNECESSARY; 4366 4367 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 4368 skb_set_hash(skb, hash, hash_type); 4369 4370 skb_record_rx_queue(skb, queue); 4371 napi_gro_receive(&ch->rx_napi, skb); 4372 skb = NULL; 4373 4374 priv->dev->stats.rx_packets++; 4375 priv->dev->stats.rx_bytes += len; 4376 count++; 4377 } 4378 4379 if (status & rx_not_ls || skb) { 4380 rx_q->state_saved = true; 4381 rx_q->state.skb = skb; 4382 rx_q->state.error = error; 4383 rx_q->state.len = len; 4384 } 4385 4386 stmmac_rx_refill(priv, queue); 4387 4388 priv->xstats.rx_pkt_n += count; 4389 4390 return count; 4391 } 4392 4393 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 4394 { 4395 struct stmmac_channel *ch = 4396 container_of(napi, struct stmmac_channel, rx_napi); 4397 struct stmmac_priv *priv = ch->priv_data; 4398 u32 chan = ch->index; 4399 int work_done; 4400 4401 priv->xstats.napi_poll++; 4402 4403 work_done = stmmac_rx(priv, budget, chan); 4404 if (work_done < budget && napi_complete_done(napi, work_done)) { 4405 unsigned long flags; 4406 4407 spin_lock_irqsave(&ch->lock, flags); 4408 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 4409 spin_unlock_irqrestore(&ch->lock, flags); 4410 } 4411 4412 return work_done; 4413 } 4414 4415 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 4416 { 4417 struct stmmac_channel *ch = 4418 container_of(napi, struct stmmac_channel, tx_napi); 4419 struct stmmac_priv *priv = ch->priv_data; 4420 u32 chan = ch->index; 4421 int work_done; 4422 4423 priv->xstats.napi_poll++; 4424 4425 work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan); 4426 work_done = min(work_done, budget); 4427 4428 if (work_done < budget && napi_complete_done(napi, work_done)) { 4429 unsigned long flags; 4430 4431 spin_lock_irqsave(&ch->lock, flags); 4432 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 4433 spin_unlock_irqrestore(&ch->lock, flags); 4434 } 4435 4436 return work_done; 4437 } 4438 4439 /** 4440 * stmmac_tx_timeout 4441 * @dev : Pointer to net device structure 4442 * @txqueue: the index of the hanging transmit queue 4443 * Description: this function is called when a packet transmission fails to 4444 * complete within a reasonable time. The driver will mark the error in the 4445 * netdev structure and arrange for the device to be reset to a sane state 4446 * in order to transmit a new packet. 4447 */ 4448 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) 4449 { 4450 struct stmmac_priv *priv = netdev_priv(dev); 4451 4452 stmmac_global_err(priv); 4453 } 4454 4455 /** 4456 * stmmac_set_rx_mode - entry point for multicast addressing 4457 * @dev : pointer to the device structure 4458 * Description: 4459 * This function is a driver entry point which gets called by the kernel 4460 * whenever multicast addresses must be enabled/disabled. 4461 * Return value: 4462 * void. 4463 */ 4464 static void stmmac_set_rx_mode(struct net_device *dev) 4465 { 4466 struct stmmac_priv *priv = netdev_priv(dev); 4467 4468 stmmac_set_filter(priv, priv->hw, dev); 4469 } 4470 4471 /** 4472 * stmmac_change_mtu - entry point to change MTU size for the device. 4473 * @dev : device pointer. 4474 * @new_mtu : the new MTU size for the device. 4475 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 4476 * to drive packet transmission. Ethernet has an MTU of 1500 octets 4477 * (ETH_DATA_LEN). This value can be changed with ifconfig. 4478 * Return value: 4479 * 0 on success and an appropriate (-)ve integer as defined in errno.h 4480 * file on failure. 4481 */ 4482 static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 4483 { 4484 struct stmmac_priv *priv = netdev_priv(dev); 4485 int txfifosz = priv->plat->tx_fifo_size; 4486 const int mtu = new_mtu; 4487 4488 if (txfifosz == 0) 4489 txfifosz = priv->dma_cap.tx_fifo_size; 4490 4491 txfifosz /= priv->plat->tx_queues_to_use; 4492 4493 if (netif_running(dev)) { 4494 netdev_err(priv->dev, "must be stopped to change its MTU\n"); 4495 return -EBUSY; 4496 } 4497 4498 new_mtu = STMMAC_ALIGN(new_mtu); 4499 4500 /* If condition true, FIFO is too small or MTU too large */ 4501 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) 4502 return -EINVAL; 4503 4504 dev->mtu = mtu; 4505 4506 netdev_update_features(dev); 4507 4508 return 0; 4509 } 4510 4511 static netdev_features_t stmmac_fix_features(struct net_device *dev, 4512 netdev_features_t features) 4513 { 4514 struct stmmac_priv *priv = netdev_priv(dev); 4515 4516 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 4517 features &= ~NETIF_F_RXCSUM; 4518 4519 if (!priv->plat->tx_coe) 4520 features &= ~NETIF_F_CSUM_MASK; 4521 4522 /* Some GMAC devices have a bugged Jumbo frame support that 4523 * needs to have the Tx COE disabled for oversized frames 4524 * (due to limited buffer sizes). In this case we disable 4525 * the TX csum insertion in the TDES and not use SF. 4526 */ 4527 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 4528 features &= ~NETIF_F_CSUM_MASK; 4529 4530 /* Disable tso if asked by ethtool */ 4531 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 4532 if (features & NETIF_F_TSO) 4533 priv->tso = true; 4534 else 4535 priv->tso = false; 4536 } 4537 4538 return features; 4539 } 4540 4541 static int stmmac_set_features(struct net_device *netdev, 4542 netdev_features_t features) 4543 { 4544 struct stmmac_priv *priv = netdev_priv(netdev); 4545 bool sph_en; 4546 u32 chan; 4547 4548 /* Keep the COE Type in case of csum is supporting */ 4549 if (features & NETIF_F_RXCSUM) 4550 priv->hw->rx_csum = priv->plat->rx_coe; 4551 else 4552 priv->hw->rx_csum = 0; 4553 /* No check needed because rx_coe has been set before and it will be 4554 * fixed in case of issue. 4555 */ 4556 stmmac_rx_ipc(priv, priv->hw); 4557 4558 sph_en = (priv->hw->rx_csum > 0) && priv->sph; 4559 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) 4560 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 4561 4562 return 0; 4563 } 4564 4565 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) 4566 { 4567 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 4568 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 4569 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 4570 bool *hs_enable = &fpe_cfg->hs_enable; 4571 4572 if (status == FPE_EVENT_UNKNOWN || !*hs_enable) 4573 return; 4574 4575 /* If LP has sent verify mPacket, LP is FPE capable */ 4576 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { 4577 if (*lp_state < FPE_STATE_CAPABLE) 4578 *lp_state = FPE_STATE_CAPABLE; 4579 4580 /* If user has requested FPE enable, quickly response */ 4581 if (*hs_enable) 4582 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 4583 MPACKET_RESPONSE); 4584 } 4585 4586 /* If Local has sent verify mPacket, Local is FPE capable */ 4587 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { 4588 if (*lo_state < FPE_STATE_CAPABLE) 4589 *lo_state = FPE_STATE_CAPABLE; 4590 } 4591 4592 /* If LP has sent response mPacket, LP is entering FPE ON */ 4593 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) 4594 *lp_state = FPE_STATE_ENTERING_ON; 4595 4596 /* If Local has sent response mPacket, Local is entering FPE ON */ 4597 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) 4598 *lo_state = FPE_STATE_ENTERING_ON; 4599 4600 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && 4601 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && 4602 priv->fpe_wq) { 4603 queue_work(priv->fpe_wq, &priv->fpe_task); 4604 } 4605 } 4606 4607 static void stmmac_common_interrupt(struct stmmac_priv *priv) 4608 { 4609 u32 rx_cnt = priv->plat->rx_queues_to_use; 4610 u32 tx_cnt = priv->plat->tx_queues_to_use; 4611 u32 queues_count; 4612 u32 queue; 4613 bool xmac; 4614 4615 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 4616 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 4617 4618 if (priv->irq_wake) 4619 pm_wakeup_event(priv->device, 0); 4620 4621 if (priv->dma_cap.estsel) 4622 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, 4623 &priv->xstats, tx_cnt); 4624 4625 if (priv->dma_cap.fpesel) { 4626 int status = stmmac_fpe_irq_status(priv, priv->ioaddr, 4627 priv->dev); 4628 4629 stmmac_fpe_event_status(priv, status); 4630 } 4631 4632 /* To handle GMAC own interrupts */ 4633 if ((priv->plat->has_gmac) || xmac) { 4634 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 4635 int mtl_status; 4636 4637 if (unlikely(status)) { 4638 /* For LPI we need to save the tx status */ 4639 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 4640 priv->tx_path_in_lpi_mode = true; 4641 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 4642 priv->tx_path_in_lpi_mode = false; 4643 } 4644 4645 for (queue = 0; queue < queues_count; queue++) { 4646 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4647 4648 mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw, 4649 queue); 4650 if (mtl_status != -EINVAL) 4651 status |= mtl_status; 4652 4653 if (status & CORE_IRQ_MTL_RX_OVERFLOW) 4654 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 4655 rx_q->rx_tail_addr, 4656 queue); 4657 } 4658 4659 /* PCS link status */ 4660 if (priv->hw->pcs) { 4661 if (priv->xstats.pcs_link) 4662 netif_carrier_on(priv->dev); 4663 else 4664 netif_carrier_off(priv->dev); 4665 } 4666 } 4667 } 4668 4669 /** 4670 * stmmac_interrupt - main ISR 4671 * @irq: interrupt number. 4672 * @dev_id: to pass the net device pointer. 4673 * Description: this is the main driver interrupt service routine. 4674 * It can call: 4675 * o DMA service routine (to manage incoming frame reception and transmission 4676 * status) 4677 * o Core interrupts to manage: remote wake-up, management counter, LPI 4678 * interrupts. 4679 */ 4680 static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 4681 { 4682 struct net_device *dev = (struct net_device *)dev_id; 4683 struct stmmac_priv *priv = netdev_priv(dev); 4684 4685 /* Check if adapter is up */ 4686 if (test_bit(STMMAC_DOWN, &priv->state)) 4687 return IRQ_HANDLED; 4688 4689 /* Check if a fatal error happened */ 4690 if (stmmac_safety_feat_interrupt(priv)) 4691 return IRQ_HANDLED; 4692 4693 /* To handle Common interrupts */ 4694 stmmac_common_interrupt(priv); 4695 4696 /* To handle DMA interrupts */ 4697 stmmac_dma_interrupt(priv); 4698 4699 return IRQ_HANDLED; 4700 } 4701 4702 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) 4703 { 4704 struct net_device *dev = (struct net_device *)dev_id; 4705 struct stmmac_priv *priv = netdev_priv(dev); 4706 4707 if (unlikely(!dev)) { 4708 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 4709 return IRQ_NONE; 4710 } 4711 4712 /* Check if adapter is up */ 4713 if (test_bit(STMMAC_DOWN, &priv->state)) 4714 return IRQ_HANDLED; 4715 4716 /* To handle Common interrupts */ 4717 stmmac_common_interrupt(priv); 4718 4719 return IRQ_HANDLED; 4720 } 4721 4722 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) 4723 { 4724 struct net_device *dev = (struct net_device *)dev_id; 4725 struct stmmac_priv *priv = netdev_priv(dev); 4726 4727 if (unlikely(!dev)) { 4728 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 4729 return IRQ_NONE; 4730 } 4731 4732 /* Check if adapter is up */ 4733 if (test_bit(STMMAC_DOWN, &priv->state)) 4734 return IRQ_HANDLED; 4735 4736 /* Check if a fatal error happened */ 4737 stmmac_safety_feat_interrupt(priv); 4738 4739 return IRQ_HANDLED; 4740 } 4741 4742 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) 4743 { 4744 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; 4745 int chan = tx_q->queue_index; 4746 struct stmmac_priv *priv; 4747 int status; 4748 4749 priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]); 4750 4751 if (unlikely(!data)) { 4752 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 4753 return IRQ_NONE; 4754 } 4755 4756 /* Check if adapter is up */ 4757 if (test_bit(STMMAC_DOWN, &priv->state)) 4758 return IRQ_HANDLED; 4759 4760 status = stmmac_napi_check(priv, chan, DMA_DIR_TX); 4761 4762 if (unlikely(status & tx_hard_error_bump_tc)) { 4763 /* Try to bump up the dma threshold on this failure */ 4764 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 4765 tc <= 256) { 4766 tc += 64; 4767 if (priv->plat->force_thresh_dma_mode) 4768 stmmac_set_dma_operation_mode(priv, 4769 tc, 4770 tc, 4771 chan); 4772 else 4773 stmmac_set_dma_operation_mode(priv, 4774 tc, 4775 SF_DMA_MODE, 4776 chan); 4777 priv->xstats.threshold = tc; 4778 } 4779 } else if (unlikely(status == tx_hard_error)) { 4780 stmmac_tx_err(priv, chan); 4781 } 4782 4783 return IRQ_HANDLED; 4784 } 4785 4786 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) 4787 { 4788 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; 4789 int chan = rx_q->queue_index; 4790 struct stmmac_priv *priv; 4791 4792 priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]); 4793 4794 if (unlikely(!data)) { 4795 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 4796 return IRQ_NONE; 4797 } 4798 4799 /* Check if adapter is up */ 4800 if (test_bit(STMMAC_DOWN, &priv->state)) 4801 return IRQ_HANDLED; 4802 4803 stmmac_napi_check(priv, chan, DMA_DIR_RX); 4804 4805 return IRQ_HANDLED; 4806 } 4807 4808 #ifdef CONFIG_NET_POLL_CONTROLLER 4809 /* Polling receive - used by NETCONSOLE and other diagnostic tools 4810 * to allow network I/O with interrupts disabled. 4811 */ 4812 static void stmmac_poll_controller(struct net_device *dev) 4813 { 4814 struct stmmac_priv *priv = netdev_priv(dev); 4815 int i; 4816 4817 /* If adapter is down, do nothing */ 4818 if (test_bit(STMMAC_DOWN, &priv->state)) 4819 return; 4820 4821 if (priv->plat->multi_msi_en) { 4822 for (i = 0; i < priv->plat->rx_queues_to_use; i++) 4823 stmmac_msi_intr_rx(0, &priv->rx_queue[i]); 4824 4825 for (i = 0; i < priv->plat->tx_queues_to_use; i++) 4826 stmmac_msi_intr_tx(0, &priv->tx_queue[i]); 4827 } else { 4828 disable_irq(dev->irq); 4829 stmmac_interrupt(dev->irq, dev); 4830 enable_irq(dev->irq); 4831 } 4832 } 4833 #endif 4834 4835 /** 4836 * stmmac_ioctl - Entry point for the Ioctl 4837 * @dev: Device pointer. 4838 * @rq: An IOCTL specefic structure, that can contain a pointer to 4839 * a proprietary structure used to pass information to the driver. 4840 * @cmd: IOCTL command 4841 * Description: 4842 * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 4843 */ 4844 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 4845 { 4846 struct stmmac_priv *priv = netdev_priv (dev); 4847 int ret = -EOPNOTSUPP; 4848 4849 if (!netif_running(dev)) 4850 return -EINVAL; 4851 4852 switch (cmd) { 4853 case SIOCGMIIPHY: 4854 case SIOCGMIIREG: 4855 case SIOCSMIIREG: 4856 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); 4857 break; 4858 case SIOCSHWTSTAMP: 4859 ret = stmmac_hwtstamp_set(dev, rq); 4860 break; 4861 case SIOCGHWTSTAMP: 4862 ret = stmmac_hwtstamp_get(dev, rq); 4863 break; 4864 default: 4865 break; 4866 } 4867 4868 return ret; 4869 } 4870 4871 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 4872 void *cb_priv) 4873 { 4874 struct stmmac_priv *priv = cb_priv; 4875 int ret = -EOPNOTSUPP; 4876 4877 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) 4878 return ret; 4879 4880 stmmac_disable_all_queues(priv); 4881 4882 switch (type) { 4883 case TC_SETUP_CLSU32: 4884 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 4885 break; 4886 case TC_SETUP_CLSFLOWER: 4887 ret = stmmac_tc_setup_cls(priv, priv, type_data); 4888 break; 4889 default: 4890 break; 4891 } 4892 4893 stmmac_enable_all_queues(priv); 4894 return ret; 4895 } 4896 4897 static LIST_HEAD(stmmac_block_cb_list); 4898 4899 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 4900 void *type_data) 4901 { 4902 struct stmmac_priv *priv = netdev_priv(ndev); 4903 4904 switch (type) { 4905 case TC_SETUP_BLOCK: 4906 return flow_block_cb_setup_simple(type_data, 4907 &stmmac_block_cb_list, 4908 stmmac_setup_tc_block_cb, 4909 priv, priv, true); 4910 case TC_SETUP_QDISC_CBS: 4911 return stmmac_tc_setup_cbs(priv, priv, type_data); 4912 case TC_SETUP_QDISC_TAPRIO: 4913 return stmmac_tc_setup_taprio(priv, priv, type_data); 4914 case TC_SETUP_QDISC_ETF: 4915 return stmmac_tc_setup_etf(priv, priv, type_data); 4916 default: 4917 return -EOPNOTSUPP; 4918 } 4919 } 4920 4921 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, 4922 struct net_device *sb_dev) 4923 { 4924 int gso = skb_shinfo(skb)->gso_type; 4925 4926 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { 4927 /* 4928 * There is no way to determine the number of TSO/USO 4929 * capable Queues. Let's use always the Queue 0 4930 * because if TSO/USO is supported then at least this 4931 * one will be capable. 4932 */ 4933 return 0; 4934 } 4935 4936 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; 4937 } 4938 4939 static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 4940 { 4941 struct stmmac_priv *priv = netdev_priv(ndev); 4942 int ret = 0; 4943 4944 ret = eth_mac_addr(ndev, addr); 4945 if (ret) 4946 return ret; 4947 4948 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 4949 4950 return ret; 4951 } 4952 4953 #ifdef CONFIG_DEBUG_FS 4954 static struct dentry *stmmac_fs_dir; 4955 4956 static void sysfs_display_ring(void *head, int size, int extend_desc, 4957 struct seq_file *seq, dma_addr_t dma_phy_addr) 4958 { 4959 int i; 4960 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 4961 struct dma_desc *p = (struct dma_desc *)head; 4962 dma_addr_t dma_addr; 4963 4964 for (i = 0; i < size; i++) { 4965 if (extend_desc) { 4966 dma_addr = dma_phy_addr + i * sizeof(*ep); 4967 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 4968 i, &dma_addr, 4969 le32_to_cpu(ep->basic.des0), 4970 le32_to_cpu(ep->basic.des1), 4971 le32_to_cpu(ep->basic.des2), 4972 le32_to_cpu(ep->basic.des3)); 4973 ep++; 4974 } else { 4975 dma_addr = dma_phy_addr + i * sizeof(*p); 4976 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 4977 i, &dma_addr, 4978 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 4979 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 4980 p++; 4981 } 4982 seq_printf(seq, "\n"); 4983 } 4984 } 4985 4986 static int stmmac_rings_status_show(struct seq_file *seq, void *v) 4987 { 4988 struct net_device *dev = seq->private; 4989 struct stmmac_priv *priv = netdev_priv(dev); 4990 u32 rx_count = priv->plat->rx_queues_to_use; 4991 u32 tx_count = priv->plat->tx_queues_to_use; 4992 u32 queue; 4993 4994 if ((dev->flags & IFF_UP) == 0) 4995 return 0; 4996 4997 for (queue = 0; queue < rx_count; queue++) { 4998 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4999 5000 seq_printf(seq, "RX Queue %d:\n", queue); 5001 5002 if (priv->extend_desc) { 5003 seq_printf(seq, "Extended descriptor ring:\n"); 5004 sysfs_display_ring((void *)rx_q->dma_erx, 5005 priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy); 5006 } else { 5007 seq_printf(seq, "Descriptor ring:\n"); 5008 sysfs_display_ring((void *)rx_q->dma_rx, 5009 priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy); 5010 } 5011 } 5012 5013 for (queue = 0; queue < tx_count; queue++) { 5014 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 5015 5016 seq_printf(seq, "TX Queue %d:\n", queue); 5017 5018 if (priv->extend_desc) { 5019 seq_printf(seq, "Extended descriptor ring:\n"); 5020 sysfs_display_ring((void *)tx_q->dma_etx, 5021 priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy); 5022 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { 5023 seq_printf(seq, "Descriptor ring:\n"); 5024 sysfs_display_ring((void *)tx_q->dma_tx, 5025 priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy); 5026 } 5027 } 5028 5029 return 0; 5030 } 5031 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 5032 5033 static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 5034 { 5035 struct net_device *dev = seq->private; 5036 struct stmmac_priv *priv = netdev_priv(dev); 5037 5038 if (!priv->hw_cap_support) { 5039 seq_printf(seq, "DMA HW features not supported\n"); 5040 return 0; 5041 } 5042 5043 seq_printf(seq, "==============================\n"); 5044 seq_printf(seq, "\tDMA HW features\n"); 5045 seq_printf(seq, "==============================\n"); 5046 5047 seq_printf(seq, "\t10/100 Mbps: %s\n", 5048 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 5049 seq_printf(seq, "\t1000 Mbps: %s\n", 5050 (priv->dma_cap.mbps_1000) ? "Y" : "N"); 5051 seq_printf(seq, "\tHalf duplex: %s\n", 5052 (priv->dma_cap.half_duplex) ? "Y" : "N"); 5053 seq_printf(seq, "\tHash Filter: %s\n", 5054 (priv->dma_cap.hash_filter) ? "Y" : "N"); 5055 seq_printf(seq, "\tMultiple MAC address registers: %s\n", 5056 (priv->dma_cap.multi_addr) ? "Y" : "N"); 5057 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 5058 (priv->dma_cap.pcs) ? "Y" : "N"); 5059 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 5060 (priv->dma_cap.sma_mdio) ? "Y" : "N"); 5061 seq_printf(seq, "\tPMT Remote wake up: %s\n", 5062 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 5063 seq_printf(seq, "\tPMT Magic Frame: %s\n", 5064 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 5065 seq_printf(seq, "\tRMON module: %s\n", 5066 (priv->dma_cap.rmon) ? "Y" : "N"); 5067 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 5068 (priv->dma_cap.time_stamp) ? "Y" : "N"); 5069 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 5070 (priv->dma_cap.atime_stamp) ? "Y" : "N"); 5071 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 5072 (priv->dma_cap.eee) ? "Y" : "N"); 5073 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 5074 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 5075 (priv->dma_cap.tx_coe) ? "Y" : "N"); 5076 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 5077 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 5078 (priv->dma_cap.rx_coe) ? "Y" : "N"); 5079 } else { 5080 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 5081 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 5082 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 5083 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 5084 } 5085 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 5086 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 5087 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 5088 priv->dma_cap.number_rx_channel); 5089 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 5090 priv->dma_cap.number_tx_channel); 5091 seq_printf(seq, "\tNumber of Additional RX queues: %d\n", 5092 priv->dma_cap.number_rx_queues); 5093 seq_printf(seq, "\tNumber of Additional TX queues: %d\n", 5094 priv->dma_cap.number_tx_queues); 5095 seq_printf(seq, "\tEnhanced descriptors: %s\n", 5096 (priv->dma_cap.enh_desc) ? "Y" : "N"); 5097 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); 5098 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); 5099 seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); 5100 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); 5101 seq_printf(seq, "\tNumber of PPS Outputs: %d\n", 5102 priv->dma_cap.pps_out_num); 5103 seq_printf(seq, "\tSafety Features: %s\n", 5104 priv->dma_cap.asp ? "Y" : "N"); 5105 seq_printf(seq, "\tFlexible RX Parser: %s\n", 5106 priv->dma_cap.frpsel ? "Y" : "N"); 5107 seq_printf(seq, "\tEnhanced Addressing: %d\n", 5108 priv->dma_cap.addr64); 5109 seq_printf(seq, "\tReceive Side Scaling: %s\n", 5110 priv->dma_cap.rssen ? "Y" : "N"); 5111 seq_printf(seq, "\tVLAN Hash Filtering: %s\n", 5112 priv->dma_cap.vlhash ? "Y" : "N"); 5113 seq_printf(seq, "\tSplit Header: %s\n", 5114 priv->dma_cap.sphen ? "Y" : "N"); 5115 seq_printf(seq, "\tVLAN TX Insertion: %s\n", 5116 priv->dma_cap.vlins ? "Y" : "N"); 5117 seq_printf(seq, "\tDouble VLAN: %s\n", 5118 priv->dma_cap.dvlan ? "Y" : "N"); 5119 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", 5120 priv->dma_cap.l3l4fnum); 5121 seq_printf(seq, "\tARP Offloading: %s\n", 5122 priv->dma_cap.arpoffsel ? "Y" : "N"); 5123 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", 5124 priv->dma_cap.estsel ? "Y" : "N"); 5125 seq_printf(seq, "\tFrame Preemption (FPE): %s\n", 5126 priv->dma_cap.fpesel ? "Y" : "N"); 5127 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", 5128 priv->dma_cap.tbssel ? "Y" : "N"); 5129 return 0; 5130 } 5131 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 5132 5133 /* Use network device events to rename debugfs file entries. 5134 */ 5135 static int stmmac_device_event(struct notifier_block *unused, 5136 unsigned long event, void *ptr) 5137 { 5138 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5139 struct stmmac_priv *priv = netdev_priv(dev); 5140 5141 if (dev->netdev_ops != &stmmac_netdev_ops) 5142 goto done; 5143 5144 switch (event) { 5145 case NETDEV_CHANGENAME: 5146 if (priv->dbgfs_dir) 5147 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, 5148 priv->dbgfs_dir, 5149 stmmac_fs_dir, 5150 dev->name); 5151 break; 5152 } 5153 done: 5154 return NOTIFY_DONE; 5155 } 5156 5157 static struct notifier_block stmmac_notifier = { 5158 .notifier_call = stmmac_device_event, 5159 }; 5160 5161 static void stmmac_init_fs(struct net_device *dev) 5162 { 5163 struct stmmac_priv *priv = netdev_priv(dev); 5164 5165 rtnl_lock(); 5166 5167 /* Create per netdev entries */ 5168 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 5169 5170 /* Entry to report DMA RX/TX rings */ 5171 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, 5172 &stmmac_rings_status_fops); 5173 5174 /* Entry to report the DMA HW features */ 5175 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, 5176 &stmmac_dma_cap_fops); 5177 5178 rtnl_unlock(); 5179 } 5180 5181 static void stmmac_exit_fs(struct net_device *dev) 5182 { 5183 struct stmmac_priv *priv = netdev_priv(dev); 5184 5185 debugfs_remove_recursive(priv->dbgfs_dir); 5186 } 5187 #endif /* CONFIG_DEBUG_FS */ 5188 5189 static u32 stmmac_vid_crc32_le(__le16 vid_le) 5190 { 5191 unsigned char *data = (unsigned char *)&vid_le; 5192 unsigned char data_byte = 0; 5193 u32 crc = ~0x0; 5194 u32 temp = 0; 5195 int i, bits; 5196 5197 bits = get_bitmask_order(VLAN_VID_MASK); 5198 for (i = 0; i < bits; i++) { 5199 if ((i % 8) == 0) 5200 data_byte = data[i / 8]; 5201 5202 temp = ((crc & 1) ^ data_byte) & 1; 5203 crc >>= 1; 5204 data_byte >>= 1; 5205 5206 if (temp) 5207 crc ^= 0xedb88320; 5208 } 5209 5210 return crc; 5211 } 5212 5213 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) 5214 { 5215 u32 crc, hash = 0; 5216 __le16 pmatch = 0; 5217 int count = 0; 5218 u16 vid = 0; 5219 5220 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 5221 __le16 vid_le = cpu_to_le16(vid); 5222 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; 5223 hash |= (1 << crc); 5224 count++; 5225 } 5226 5227 if (!priv->dma_cap.vlhash) { 5228 if (count > 2) /* VID = 0 always passes filter */ 5229 return -EOPNOTSUPP; 5230 5231 pmatch = cpu_to_le16(vid); 5232 hash = 0; 5233 } 5234 5235 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); 5236 } 5237 5238 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 5239 { 5240 struct stmmac_priv *priv = netdev_priv(ndev); 5241 bool is_double = false; 5242 int ret; 5243 5244 ret = pm_runtime_get_sync(priv->device); 5245 if (ret < 0) { 5246 pm_runtime_put_noidle(priv->device); 5247 return ret; 5248 } 5249 5250 if (be16_to_cpu(proto) == ETH_P_8021AD) 5251 is_double = true; 5252 5253 set_bit(vid, priv->active_vlans); 5254 ret = stmmac_vlan_update(priv, is_double); 5255 if (ret) { 5256 clear_bit(vid, priv->active_vlans); 5257 return ret; 5258 } 5259 5260 if (priv->hw->num_vlan) { 5261 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 5262 if (ret) 5263 return ret; 5264 } 5265 5266 return 0; 5267 } 5268 5269 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 5270 { 5271 struct stmmac_priv *priv = netdev_priv(ndev); 5272 bool is_double = false; 5273 int ret; 5274 5275 if (be16_to_cpu(proto) == ETH_P_8021AD) 5276 is_double = true; 5277 5278 clear_bit(vid, priv->active_vlans); 5279 5280 if (priv->hw->num_vlan) { 5281 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 5282 if (ret) 5283 goto del_vlan_error; 5284 } 5285 5286 ret = stmmac_vlan_update(priv, is_double); 5287 5288 del_vlan_error: 5289 pm_runtime_put(priv->device); 5290 5291 return ret; 5292 } 5293 5294 static const struct net_device_ops stmmac_netdev_ops = { 5295 .ndo_open = stmmac_open, 5296 .ndo_start_xmit = stmmac_xmit, 5297 .ndo_stop = stmmac_release, 5298 .ndo_change_mtu = stmmac_change_mtu, 5299 .ndo_fix_features = stmmac_fix_features, 5300 .ndo_set_features = stmmac_set_features, 5301 .ndo_set_rx_mode = stmmac_set_rx_mode, 5302 .ndo_tx_timeout = stmmac_tx_timeout, 5303 .ndo_do_ioctl = stmmac_ioctl, 5304 .ndo_setup_tc = stmmac_setup_tc, 5305 .ndo_select_queue = stmmac_select_queue, 5306 #ifdef CONFIG_NET_POLL_CONTROLLER 5307 .ndo_poll_controller = stmmac_poll_controller, 5308 #endif 5309 .ndo_set_mac_address = stmmac_set_mac_address, 5310 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, 5311 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, 5312 }; 5313 5314 static void stmmac_reset_subtask(struct stmmac_priv *priv) 5315 { 5316 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 5317 return; 5318 if (test_bit(STMMAC_DOWN, &priv->state)) 5319 return; 5320 5321 netdev_err(priv->dev, "Reset adapter.\n"); 5322 5323 rtnl_lock(); 5324 netif_trans_update(priv->dev); 5325 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 5326 usleep_range(1000, 2000); 5327 5328 set_bit(STMMAC_DOWN, &priv->state); 5329 dev_close(priv->dev); 5330 dev_open(priv->dev, NULL); 5331 clear_bit(STMMAC_DOWN, &priv->state); 5332 clear_bit(STMMAC_RESETING, &priv->state); 5333 rtnl_unlock(); 5334 } 5335 5336 static void stmmac_service_task(struct work_struct *work) 5337 { 5338 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 5339 service_task); 5340 5341 stmmac_reset_subtask(priv); 5342 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 5343 } 5344 5345 /** 5346 * stmmac_hw_init - Init the MAC device 5347 * @priv: driver private structure 5348 * Description: this function is to configure the MAC device according to 5349 * some platform parameters or the HW capability register. It prepares the 5350 * driver to use either ring or chain modes and to setup either enhanced or 5351 * normal descriptors. 5352 */ 5353 static int stmmac_hw_init(struct stmmac_priv *priv) 5354 { 5355 int ret; 5356 5357 /* dwmac-sun8i only work in chain mode */ 5358 if (priv->plat->has_sun8i) 5359 chain_mode = 1; 5360 priv->chain_mode = chain_mode; 5361 5362 /* Initialize HW Interface */ 5363 ret = stmmac_hwif_init(priv); 5364 if (ret) 5365 return ret; 5366 5367 /* Get the HW capability (new GMAC newer than 3.50a) */ 5368 priv->hw_cap_support = stmmac_get_hw_features(priv); 5369 if (priv->hw_cap_support) { 5370 dev_info(priv->device, "DMA HW capability register supported\n"); 5371 5372 /* We can override some gmac/dma configuration fields: e.g. 5373 * enh_desc, tx_coe (e.g. that are passed through the 5374 * platform) with the values from the HW capability 5375 * register (if supported). 5376 */ 5377 priv->plat->enh_desc = priv->dma_cap.enh_desc; 5378 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 5379 priv->hw->pmt = priv->plat->pmt; 5380 if (priv->dma_cap.hash_tb_sz) { 5381 priv->hw->multicast_filter_bins = 5382 (BIT(priv->dma_cap.hash_tb_sz) << 5); 5383 priv->hw->mcast_bits_log2 = 5384 ilog2(priv->hw->multicast_filter_bins); 5385 } 5386 5387 /* TXCOE doesn't work in thresh DMA mode */ 5388 if (priv->plat->force_thresh_dma_mode) 5389 priv->plat->tx_coe = 0; 5390 else 5391 priv->plat->tx_coe = priv->dma_cap.tx_coe; 5392 5393 /* In case of GMAC4 rx_coe is from HW cap register. */ 5394 priv->plat->rx_coe = priv->dma_cap.rx_coe; 5395 5396 if (priv->dma_cap.rx_coe_type2) 5397 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 5398 else if (priv->dma_cap.rx_coe_type1) 5399 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 5400 5401 } else { 5402 dev_info(priv->device, "No HW DMA feature register supported\n"); 5403 } 5404 5405 if (priv->plat->rx_coe) { 5406 priv->hw->rx_csum = priv->plat->rx_coe; 5407 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 5408 if (priv->synopsys_id < DWMAC_CORE_4_00) 5409 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 5410 } 5411 if (priv->plat->tx_coe) 5412 dev_info(priv->device, "TX Checksum insertion supported\n"); 5413 5414 if (priv->plat->pmt) { 5415 dev_info(priv->device, "Wake-Up On Lan supported\n"); 5416 device_set_wakeup_capable(priv->device, 1); 5417 } 5418 5419 if (priv->dma_cap.tsoen) 5420 dev_info(priv->device, "TSO supported\n"); 5421 5422 priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; 5423 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; 5424 5425 /* Run HW quirks, if any */ 5426 if (priv->hwif_quirks) { 5427 ret = priv->hwif_quirks(priv); 5428 if (ret) 5429 return ret; 5430 } 5431 5432 /* Rx Watchdog is available in the COREs newer than the 3.40. 5433 * In some case, for example on bugged HW this feature 5434 * has to be disable and this can be done by passing the 5435 * riwt_off field from the platform. 5436 */ 5437 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 5438 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 5439 priv->use_riwt = 1; 5440 dev_info(priv->device, 5441 "Enable RX Mitigation via HW Watchdog Timer\n"); 5442 } 5443 5444 return 0; 5445 } 5446 5447 static void stmmac_napi_add(struct net_device *dev) 5448 { 5449 struct stmmac_priv *priv = netdev_priv(dev); 5450 u32 queue, maxq; 5451 5452 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 5453 5454 for (queue = 0; queue < maxq; queue++) { 5455 struct stmmac_channel *ch = &priv->channel[queue]; 5456 5457 ch->priv_data = priv; 5458 ch->index = queue; 5459 spin_lock_init(&ch->lock); 5460 5461 if (queue < priv->plat->rx_queues_to_use) { 5462 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx, 5463 NAPI_POLL_WEIGHT); 5464 } 5465 if (queue < priv->plat->tx_queues_to_use) { 5466 netif_tx_napi_add(dev, &ch->tx_napi, 5467 stmmac_napi_poll_tx, 5468 NAPI_POLL_WEIGHT); 5469 } 5470 } 5471 } 5472 5473 static void stmmac_napi_del(struct net_device *dev) 5474 { 5475 struct stmmac_priv *priv = netdev_priv(dev); 5476 u32 queue, maxq; 5477 5478 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 5479 5480 for (queue = 0; queue < maxq; queue++) { 5481 struct stmmac_channel *ch = &priv->channel[queue]; 5482 5483 if (queue < priv->plat->rx_queues_to_use) 5484 netif_napi_del(&ch->rx_napi); 5485 if (queue < priv->plat->tx_queues_to_use) 5486 netif_napi_del(&ch->tx_napi); 5487 } 5488 } 5489 5490 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) 5491 { 5492 struct stmmac_priv *priv = netdev_priv(dev); 5493 int ret = 0; 5494 5495 if (netif_running(dev)) 5496 stmmac_release(dev); 5497 5498 stmmac_napi_del(dev); 5499 5500 priv->plat->rx_queues_to_use = rx_cnt; 5501 priv->plat->tx_queues_to_use = tx_cnt; 5502 5503 stmmac_napi_add(dev); 5504 5505 if (netif_running(dev)) 5506 ret = stmmac_open(dev); 5507 5508 return ret; 5509 } 5510 5511 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) 5512 { 5513 struct stmmac_priv *priv = netdev_priv(dev); 5514 int ret = 0; 5515 5516 if (netif_running(dev)) 5517 stmmac_release(dev); 5518 5519 priv->dma_rx_size = rx_size; 5520 priv->dma_tx_size = tx_size; 5521 5522 if (netif_running(dev)) 5523 ret = stmmac_open(dev); 5524 5525 return ret; 5526 } 5527 5528 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" 5529 static void stmmac_fpe_lp_task(struct work_struct *work) 5530 { 5531 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 5532 fpe_task); 5533 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 5534 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 5535 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 5536 bool *hs_enable = &fpe_cfg->hs_enable; 5537 bool *enable = &fpe_cfg->enable; 5538 int retries = 20; 5539 5540 while (retries-- > 0) { 5541 /* Bail out immediately if FPE handshake is OFF */ 5542 if (*lo_state == FPE_STATE_OFF || !*hs_enable) 5543 break; 5544 5545 if (*lo_state == FPE_STATE_ENTERING_ON && 5546 *lp_state == FPE_STATE_ENTERING_ON) { 5547 stmmac_fpe_configure(priv, priv->ioaddr, 5548 priv->plat->tx_queues_to_use, 5549 priv->plat->rx_queues_to_use, 5550 *enable); 5551 5552 netdev_info(priv->dev, "configured FPE\n"); 5553 5554 *lo_state = FPE_STATE_ON; 5555 *lp_state = FPE_STATE_ON; 5556 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); 5557 break; 5558 } 5559 5560 if ((*lo_state == FPE_STATE_CAPABLE || 5561 *lo_state == FPE_STATE_ENTERING_ON) && 5562 *lp_state != FPE_STATE_ON) { 5563 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, 5564 *lo_state, *lp_state); 5565 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 5566 MPACKET_VERIFY); 5567 } 5568 /* Sleep then retry */ 5569 msleep(500); 5570 } 5571 5572 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 5573 } 5574 5575 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) 5576 { 5577 if (priv->plat->fpe_cfg->hs_enable != enable) { 5578 if (enable) { 5579 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 5580 MPACKET_VERIFY); 5581 } else { 5582 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; 5583 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; 5584 } 5585 5586 priv->plat->fpe_cfg->hs_enable = enable; 5587 } 5588 } 5589 5590 /** 5591 * stmmac_dvr_probe 5592 * @device: device pointer 5593 * @plat_dat: platform data pointer 5594 * @res: stmmac resource pointer 5595 * Description: this is the main probe function used to 5596 * call the alloc_etherdev, allocate the priv structure. 5597 * Return: 5598 * returns 0 on success, otherwise errno. 5599 */ 5600 int stmmac_dvr_probe(struct device *device, 5601 struct plat_stmmacenet_data *plat_dat, 5602 struct stmmac_resources *res) 5603 { 5604 struct net_device *ndev = NULL; 5605 struct stmmac_priv *priv; 5606 u32 rxq; 5607 int i, ret = 0; 5608 5609 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), 5610 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); 5611 if (!ndev) 5612 return -ENOMEM; 5613 5614 SET_NETDEV_DEV(ndev, device); 5615 5616 priv = netdev_priv(ndev); 5617 priv->device = device; 5618 priv->dev = ndev; 5619 5620 stmmac_set_ethtool_ops(ndev); 5621 priv->pause = pause; 5622 priv->plat = plat_dat; 5623 priv->ioaddr = res->addr; 5624 priv->dev->base_addr = (unsigned long)res->addr; 5625 priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en; 5626 5627 priv->dev->irq = res->irq; 5628 priv->wol_irq = res->wol_irq; 5629 priv->lpi_irq = res->lpi_irq; 5630 priv->sfty_ce_irq = res->sfty_ce_irq; 5631 priv->sfty_ue_irq = res->sfty_ue_irq; 5632 for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 5633 priv->rx_irq[i] = res->rx_irq[i]; 5634 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 5635 priv->tx_irq[i] = res->tx_irq[i]; 5636 5637 if (!IS_ERR_OR_NULL(res->mac)) 5638 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 5639 5640 dev_set_drvdata(device, priv->dev); 5641 5642 /* Verify driver arguments */ 5643 stmmac_verify_args(); 5644 5645 /* Allocate workqueue */ 5646 priv->wq = create_singlethread_workqueue("stmmac_wq"); 5647 if (!priv->wq) { 5648 dev_err(priv->device, "failed to create workqueue\n"); 5649 return -ENOMEM; 5650 } 5651 5652 INIT_WORK(&priv->service_task, stmmac_service_task); 5653 5654 /* Initialize Link Partner FPE workqueue */ 5655 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); 5656 5657 /* Override with kernel parameters if supplied XXX CRS XXX 5658 * this needs to have multiple instances 5659 */ 5660 if ((phyaddr >= 0) && (phyaddr <= 31)) 5661 priv->plat->phy_addr = phyaddr; 5662 5663 if (priv->plat->stmmac_rst) { 5664 ret = reset_control_assert(priv->plat->stmmac_rst); 5665 reset_control_deassert(priv->plat->stmmac_rst); 5666 /* Some reset controllers have only reset callback instead of 5667 * assert + deassert callbacks pair. 5668 */ 5669 if (ret == -ENOTSUPP) 5670 reset_control_reset(priv->plat->stmmac_rst); 5671 } 5672 5673 /* Init MAC and get the capabilities */ 5674 ret = stmmac_hw_init(priv); 5675 if (ret) 5676 goto error_hw_init; 5677 5678 stmmac_check_ether_addr(priv); 5679 5680 ndev->netdev_ops = &stmmac_netdev_ops; 5681 5682 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 5683 NETIF_F_RXCSUM; 5684 5685 ret = stmmac_tc_init(priv, priv); 5686 if (!ret) { 5687 ndev->hw_features |= NETIF_F_HW_TC; 5688 } 5689 5690 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 5691 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 5692 if (priv->plat->has_gmac4) 5693 ndev->hw_features |= NETIF_F_GSO_UDP_L4; 5694 priv->tso = true; 5695 dev_info(priv->device, "TSO feature enabled\n"); 5696 } 5697 5698 if (priv->dma_cap.sphen) { 5699 ndev->hw_features |= NETIF_F_GRO; 5700 priv->sph = true; 5701 dev_info(priv->device, "SPH feature enabled\n"); 5702 } 5703 5704 /* The current IP register MAC_HW_Feature1[ADDR64] only define 5705 * 32/40/64 bit width, but some SOC support others like i.MX8MP 5706 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64]. 5707 * So overwrite dma_cap.addr64 according to HW real design. 5708 */ 5709 if (priv->plat->addr64) 5710 priv->dma_cap.addr64 = priv->plat->addr64; 5711 5712 if (priv->dma_cap.addr64) { 5713 ret = dma_set_mask_and_coherent(device, 5714 DMA_BIT_MASK(priv->dma_cap.addr64)); 5715 if (!ret) { 5716 dev_info(priv->device, "Using %d bits DMA width\n", 5717 priv->dma_cap.addr64); 5718 5719 /* 5720 * If more than 32 bits can be addressed, make sure to 5721 * enable enhanced addressing mode. 5722 */ 5723 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 5724 priv->plat->dma_cfg->eame = true; 5725 } else { 5726 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); 5727 if (ret) { 5728 dev_err(priv->device, "Failed to set DMA Mask\n"); 5729 goto error_hw_init; 5730 } 5731 5732 priv->dma_cap.addr64 = 32; 5733 } 5734 } 5735 5736 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 5737 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 5738 #ifdef STMMAC_VLAN_TAG_USED 5739 /* Both mac100 and gmac support receive VLAN tag detection */ 5740 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 5741 if (priv->dma_cap.vlhash) { 5742 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 5743 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 5744 } 5745 if (priv->dma_cap.vlins) { 5746 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; 5747 if (priv->dma_cap.dvlan) 5748 ndev->features |= NETIF_F_HW_VLAN_STAG_TX; 5749 } 5750 #endif 5751 priv->msg_enable = netif_msg_init(debug, default_msg_level); 5752 5753 /* Initialize RSS */ 5754 rxq = priv->plat->rx_queues_to_use; 5755 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); 5756 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 5757 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); 5758 5759 if (priv->dma_cap.rssen && priv->plat->rss_en) 5760 ndev->features |= NETIF_F_RXHASH; 5761 5762 /* MTU range: 46 - hw-specific max */ 5763 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 5764 if (priv->plat->has_xgmac) 5765 ndev->max_mtu = XGMAC_JUMBO_LEN; 5766 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 5767 ndev->max_mtu = JUMBO_LEN; 5768 else 5769 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 5770 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 5771 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 5772 */ 5773 if ((priv->plat->maxmtu < ndev->max_mtu) && 5774 (priv->plat->maxmtu >= ndev->min_mtu)) 5775 ndev->max_mtu = priv->plat->maxmtu; 5776 else if (priv->plat->maxmtu < ndev->min_mtu) 5777 dev_warn(priv->device, 5778 "%s: warning: maxmtu having invalid value (%d)\n", 5779 __func__, priv->plat->maxmtu); 5780 5781 if (flow_ctrl) 5782 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 5783 5784 /* Setup channels NAPI */ 5785 stmmac_napi_add(ndev); 5786 5787 mutex_init(&priv->lock); 5788 5789 /* If a specific clk_csr value is passed from the platform 5790 * this means that the CSR Clock Range selection cannot be 5791 * changed at run-time and it is fixed. Viceversa the driver'll try to 5792 * set the MDC clock dynamically according to the csr actual 5793 * clock input. 5794 */ 5795 if (priv->plat->clk_csr >= 0) 5796 priv->clk_csr = priv->plat->clk_csr; 5797 else 5798 stmmac_clk_csr_set(priv); 5799 5800 stmmac_check_pcs_mode(priv); 5801 5802 pm_runtime_get_noresume(device); 5803 pm_runtime_set_active(device); 5804 pm_runtime_enable(device); 5805 5806 if (priv->hw->pcs != STMMAC_PCS_TBI && 5807 priv->hw->pcs != STMMAC_PCS_RTBI) { 5808 /* MDIO bus Registration */ 5809 ret = stmmac_mdio_register(ndev); 5810 if (ret < 0) { 5811 dev_err(priv->device, 5812 "%s: MDIO bus (id: %d) registration failed", 5813 __func__, priv->plat->bus_id); 5814 goto error_mdio_register; 5815 } 5816 } 5817 5818 ret = stmmac_phy_setup(priv); 5819 if (ret) { 5820 netdev_err(ndev, "failed to setup phy (%d)\n", ret); 5821 goto error_phy_setup; 5822 } 5823 5824 ret = register_netdev(ndev); 5825 if (ret) { 5826 dev_err(priv->device, "%s: ERROR %i registering the device\n", 5827 __func__, ret); 5828 goto error_netdev_register; 5829 } 5830 5831 if (priv->plat->serdes_powerup) { 5832 ret = priv->plat->serdes_powerup(ndev, 5833 priv->plat->bsp_priv); 5834 5835 if (ret < 0) 5836 goto error_serdes_powerup; 5837 } 5838 5839 #ifdef CONFIG_DEBUG_FS 5840 stmmac_init_fs(ndev); 5841 #endif 5842 5843 /* Let pm_runtime_put() disable the clocks. 5844 * If CONFIG_PM is not enabled, the clocks will stay powered. 5845 */ 5846 pm_runtime_put(device); 5847 5848 return ret; 5849 5850 error_serdes_powerup: 5851 unregister_netdev(ndev); 5852 error_netdev_register: 5853 phylink_destroy(priv->phylink); 5854 error_phy_setup: 5855 if (priv->hw->pcs != STMMAC_PCS_TBI && 5856 priv->hw->pcs != STMMAC_PCS_RTBI) 5857 stmmac_mdio_unregister(ndev); 5858 error_mdio_register: 5859 stmmac_napi_del(ndev); 5860 error_hw_init: 5861 destroy_workqueue(priv->wq); 5862 stmmac_bus_clks_config(priv, false); 5863 5864 return ret; 5865 } 5866 EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 5867 5868 /** 5869 * stmmac_dvr_remove 5870 * @dev: device pointer 5871 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 5872 * changes the link status, releases the DMA descriptor rings. 5873 */ 5874 int stmmac_dvr_remove(struct device *dev) 5875 { 5876 struct net_device *ndev = dev_get_drvdata(dev); 5877 struct stmmac_priv *priv = netdev_priv(ndev); 5878 5879 netdev_info(priv->dev, "%s: removing driver", __func__); 5880 5881 stmmac_stop_all_dma(priv); 5882 stmmac_mac_set(priv, priv->ioaddr, false); 5883 netif_carrier_off(ndev); 5884 unregister_netdev(ndev); 5885 5886 /* Serdes power down needs to happen after VLAN filter 5887 * is deleted that is triggered by unregister_netdev(). 5888 */ 5889 if (priv->plat->serdes_powerdown) 5890 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 5891 5892 #ifdef CONFIG_DEBUG_FS 5893 stmmac_exit_fs(ndev); 5894 #endif 5895 phylink_destroy(priv->phylink); 5896 if (priv->plat->stmmac_rst) 5897 reset_control_assert(priv->plat->stmmac_rst); 5898 pm_runtime_put(dev); 5899 pm_runtime_disable(dev); 5900 if (priv->hw->pcs != STMMAC_PCS_TBI && 5901 priv->hw->pcs != STMMAC_PCS_RTBI) 5902 stmmac_mdio_unregister(ndev); 5903 destroy_workqueue(priv->wq); 5904 mutex_destroy(&priv->lock); 5905 5906 return 0; 5907 } 5908 EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 5909 5910 /** 5911 * stmmac_suspend - suspend callback 5912 * @dev: device pointer 5913 * Description: this is the function to suspend the device and it is called 5914 * by the platform driver to stop the network queue, release the resources, 5915 * program the PMT register (for WoL), clean and release driver resources. 5916 */ 5917 int stmmac_suspend(struct device *dev) 5918 { 5919 struct net_device *ndev = dev_get_drvdata(dev); 5920 struct stmmac_priv *priv = netdev_priv(ndev); 5921 u32 chan; 5922 int ret; 5923 5924 if (!ndev || !netif_running(ndev)) 5925 return 0; 5926 5927 phylink_mac_change(priv->phylink, false); 5928 5929 mutex_lock(&priv->lock); 5930 5931 netif_device_detach(ndev); 5932 5933 stmmac_disable_all_queues(priv); 5934 5935 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 5936 hrtimer_cancel(&priv->tx_queue[chan].txtimer); 5937 5938 if (priv->eee_enabled) { 5939 priv->tx_path_in_lpi_mode = false; 5940 del_timer_sync(&priv->eee_ctrl_timer); 5941 } 5942 5943 /* Stop TX/RX DMA */ 5944 stmmac_stop_all_dma(priv); 5945 5946 if (priv->plat->serdes_powerdown) 5947 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 5948 5949 /* Enable Power down mode by programming the PMT regs */ 5950 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 5951 stmmac_pmt(priv, priv->hw, priv->wolopts); 5952 priv->irq_wake = 1; 5953 } else { 5954 mutex_unlock(&priv->lock); 5955 rtnl_lock(); 5956 if (device_may_wakeup(priv->device)) 5957 phylink_speed_down(priv->phylink, false); 5958 phylink_stop(priv->phylink); 5959 rtnl_unlock(); 5960 mutex_lock(&priv->lock); 5961 5962 stmmac_mac_set(priv, priv->ioaddr, false); 5963 pinctrl_pm_select_sleep_state(priv->device); 5964 /* Disable clock in case of PWM is off */ 5965 clk_disable_unprepare(priv->plat->clk_ptp_ref); 5966 ret = pm_runtime_force_suspend(dev); 5967 if (ret) { 5968 mutex_unlock(&priv->lock); 5969 return ret; 5970 } 5971 } 5972 5973 mutex_unlock(&priv->lock); 5974 5975 if (priv->dma_cap.fpesel) { 5976 /* Disable FPE */ 5977 stmmac_fpe_configure(priv, priv->ioaddr, 5978 priv->plat->tx_queues_to_use, 5979 priv->plat->rx_queues_to_use, false); 5980 5981 stmmac_fpe_handshake(priv, false); 5982 } 5983 5984 priv->speed = SPEED_UNKNOWN; 5985 return 0; 5986 } 5987 EXPORT_SYMBOL_GPL(stmmac_suspend); 5988 5989 /** 5990 * stmmac_reset_queues_param - reset queue parameters 5991 * @priv: device pointer 5992 */ 5993 static void stmmac_reset_queues_param(struct stmmac_priv *priv) 5994 { 5995 u32 rx_cnt = priv->plat->rx_queues_to_use; 5996 u32 tx_cnt = priv->plat->tx_queues_to_use; 5997 u32 queue; 5998 5999 for (queue = 0; queue < rx_cnt; queue++) { 6000 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 6001 6002 rx_q->cur_rx = 0; 6003 rx_q->dirty_rx = 0; 6004 } 6005 6006 for (queue = 0; queue < tx_cnt; queue++) { 6007 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 6008 6009 tx_q->cur_tx = 0; 6010 tx_q->dirty_tx = 0; 6011 tx_q->mss = 0; 6012 6013 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 6014 } 6015 } 6016 6017 /** 6018 * stmmac_resume - resume callback 6019 * @dev: device pointer 6020 * Description: when resume this function is invoked to setup the DMA and CORE 6021 * in a usable state. 6022 */ 6023 int stmmac_resume(struct device *dev) 6024 { 6025 struct net_device *ndev = dev_get_drvdata(dev); 6026 struct stmmac_priv *priv = netdev_priv(ndev); 6027 int ret; 6028 6029 if (!netif_running(ndev)) 6030 return 0; 6031 6032 /* Power Down bit, into the PM register, is cleared 6033 * automatically as soon as a magic packet or a Wake-up frame 6034 * is received. Anyway, it's better to manually clear 6035 * this bit because it can generate problems while resuming 6036 * from another devices (e.g. serial console). 6037 */ 6038 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 6039 mutex_lock(&priv->lock); 6040 stmmac_pmt(priv, priv->hw, 0); 6041 mutex_unlock(&priv->lock); 6042 priv->irq_wake = 0; 6043 } else { 6044 pinctrl_pm_select_default_state(priv->device); 6045 /* enable the clk previously disabled */ 6046 ret = pm_runtime_force_resume(dev); 6047 if (ret) 6048 return ret; 6049 if (priv->plat->clk_ptp_ref) 6050 clk_prepare_enable(priv->plat->clk_ptp_ref); 6051 /* reset the phy so that it's ready */ 6052 if (priv->mii) 6053 stmmac_mdio_reset(priv->mii); 6054 } 6055 6056 if (priv->plat->serdes_powerup) { 6057 ret = priv->plat->serdes_powerup(ndev, 6058 priv->plat->bsp_priv); 6059 6060 if (ret < 0) 6061 return ret; 6062 } 6063 6064 if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { 6065 rtnl_lock(); 6066 phylink_start(priv->phylink); 6067 /* We may have called phylink_speed_down before */ 6068 phylink_speed_up(priv->phylink); 6069 rtnl_unlock(); 6070 } 6071 6072 rtnl_lock(); 6073 mutex_lock(&priv->lock); 6074 6075 stmmac_reset_queues_param(priv); 6076 stmmac_reinit_rx_buffers(priv); 6077 stmmac_free_tx_skbufs(priv); 6078 stmmac_clear_descriptors(priv); 6079 6080 stmmac_hw_setup(ndev, false); 6081 stmmac_init_coalesce(priv); 6082 stmmac_set_rx_mode(ndev); 6083 6084 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); 6085 6086 stmmac_enable_all_queues(priv); 6087 6088 mutex_unlock(&priv->lock); 6089 rtnl_unlock(); 6090 6091 phylink_mac_change(priv->phylink, true); 6092 6093 netif_device_attach(ndev); 6094 6095 return 0; 6096 } 6097 EXPORT_SYMBOL_GPL(stmmac_resume); 6098 6099 #ifndef MODULE 6100 static int __init stmmac_cmdline_opt(char *str) 6101 { 6102 char *opt; 6103 6104 if (!str || !*str) 6105 return -EINVAL; 6106 while ((opt = strsep(&str, ",")) != NULL) { 6107 if (!strncmp(opt, "debug:", 6)) { 6108 if (kstrtoint(opt + 6, 0, &debug)) 6109 goto err; 6110 } else if (!strncmp(opt, "phyaddr:", 8)) { 6111 if (kstrtoint(opt + 8, 0, &phyaddr)) 6112 goto err; 6113 } else if (!strncmp(opt, "buf_sz:", 7)) { 6114 if (kstrtoint(opt + 7, 0, &buf_sz)) 6115 goto err; 6116 } else if (!strncmp(opt, "tc:", 3)) { 6117 if (kstrtoint(opt + 3, 0, &tc)) 6118 goto err; 6119 } else if (!strncmp(opt, "watchdog:", 9)) { 6120 if (kstrtoint(opt + 9, 0, &watchdog)) 6121 goto err; 6122 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 6123 if (kstrtoint(opt + 10, 0, &flow_ctrl)) 6124 goto err; 6125 } else if (!strncmp(opt, "pause:", 6)) { 6126 if (kstrtoint(opt + 6, 0, &pause)) 6127 goto err; 6128 } else if (!strncmp(opt, "eee_timer:", 10)) { 6129 if (kstrtoint(opt + 10, 0, &eee_timer)) 6130 goto err; 6131 } else if (!strncmp(opt, "chain_mode:", 11)) { 6132 if (kstrtoint(opt + 11, 0, &chain_mode)) 6133 goto err; 6134 } 6135 } 6136 return 0; 6137 6138 err: 6139 pr_err("%s: ERROR broken module parameter conversion", __func__); 6140 return -EINVAL; 6141 } 6142 6143 __setup("stmmaceth=", stmmac_cmdline_opt); 6144 #endif /* MODULE */ 6145 6146 static int __init stmmac_init(void) 6147 { 6148 #ifdef CONFIG_DEBUG_FS 6149 /* Create debugfs main directory if it doesn't exist yet */ 6150 if (!stmmac_fs_dir) 6151 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 6152 register_netdevice_notifier(&stmmac_notifier); 6153 #endif 6154 6155 return 0; 6156 } 6157 6158 static void __exit stmmac_exit(void) 6159 { 6160 #ifdef CONFIG_DEBUG_FS 6161 unregister_netdevice_notifier(&stmmac_notifier); 6162 debugfs_remove_recursive(stmmac_fs_dir); 6163 #endif 6164 } 6165 6166 module_init(stmmac_init) 6167 module_exit(stmmac_exit) 6168 6169 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 6170 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 6171 MODULE_LICENSE("GPL"); 6172