1 // SPDX-License-Identifier: GPL-2.0-only 2 /******************************************************************************* 3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 4 ST Ethernet IPs are built around a Synopsys IP Core. 5 6 Copyright(C) 2007-2011 STMicroelectronics Ltd 7 8 9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 10 11 Documentation available at: 12 http://www.stlinux.com 13 Support available at: 14 https://bugzilla.stlinux.com/ 15 *******************************************************************************/ 16 17 #include <linux/clk.h> 18 #include <linux/kernel.h> 19 #include <linux/interrupt.h> 20 #include <linux/ip.h> 21 #include <linux/tcp.h> 22 #include <linux/skbuff.h> 23 #include <linux/ethtool.h> 24 #include <linux/if_ether.h> 25 #include <linux/crc32.h> 26 #include <linux/mii.h> 27 #include <linux/if.h> 28 #include <linux/if_vlan.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/slab.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/prefetch.h> 33 #include <linux/pinctrl/consumer.h> 34 #ifdef CONFIG_DEBUG_FS 35 #include <linux/debugfs.h> 36 #include <linux/seq_file.h> 37 #endif /* CONFIG_DEBUG_FS */ 38 #include <linux/net_tstamp.h> 39 #include <linux/phylink.h> 40 #include <linux/udp.h> 41 #include <linux/bpf_trace.h> 42 #include <net/pkt_cls.h> 43 #include <net/xdp_sock_drv.h> 44 #include "stmmac_ptp.h" 45 #include "stmmac.h" 46 #include "stmmac_xdp.h" 47 #include <linux/reset.h> 48 #include <linux/of_mdio.h> 49 #include "dwmac1000.h" 50 #include "dwxgmac2.h" 51 #include "hwif.h" 52 53 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 54 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 55 56 /* Module parameters */ 57 #define TX_TIMEO 5000 58 static int watchdog = TX_TIMEO; 59 module_param(watchdog, int, 0644); 60 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 61 62 static int debug = -1; 63 module_param(debug, int, 0644); 64 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 65 66 static int phyaddr = -1; 67 module_param(phyaddr, int, 0444); 68 MODULE_PARM_DESC(phyaddr, "Physical device address"); 69 70 #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) 71 #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) 72 73 /* Limit to make sure XDP TX and slow path can coexist */ 74 #define STMMAC_XSK_TX_BUDGET_MAX 256 75 #define STMMAC_TX_XSK_AVAIL 16 76 #define STMMAC_RX_FILL_BATCH 16 77 78 #define STMMAC_XDP_PASS 0 79 #define STMMAC_XDP_CONSUMED BIT(0) 80 #define STMMAC_XDP_TX BIT(1) 81 #define STMMAC_XDP_REDIRECT BIT(2) 82 83 static int flow_ctrl = FLOW_AUTO; 84 module_param(flow_ctrl, int, 0644); 85 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 86 87 static int pause = PAUSE_TIME; 88 module_param(pause, int, 0644); 89 MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 90 91 #define TC_DEFAULT 64 92 static int tc = TC_DEFAULT; 93 module_param(tc, int, 0644); 94 MODULE_PARM_DESC(tc, "DMA threshold control value"); 95 96 #define DEFAULT_BUFSIZE 1536 97 static int buf_sz = DEFAULT_BUFSIZE; 98 module_param(buf_sz, int, 0644); 99 MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 100 101 #define STMMAC_RX_COPYBREAK 256 102 103 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 104 NETIF_MSG_LINK | NETIF_MSG_IFUP | 105 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 106 107 #define STMMAC_DEFAULT_LPI_TIMER 1000 108 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 109 module_param(eee_timer, int, 0644); 110 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 111 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) 112 113 /* By default the driver will use the ring mode to manage tx and rx descriptors, 114 * but allow user to force to use the chain instead of the ring 115 */ 116 static unsigned int chain_mode; 117 module_param(chain_mode, int, 0444); 118 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 119 120 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 121 /* For MSI interrupts handling */ 122 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); 123 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); 124 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); 125 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); 126 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); 127 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); 128 129 #ifdef CONFIG_DEBUG_FS 130 static const struct net_device_ops stmmac_netdev_ops; 131 static void stmmac_init_fs(struct net_device *dev); 132 static void stmmac_exit_fs(struct net_device *dev); 133 #endif 134 135 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) 136 137 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) 138 { 139 int ret = 0; 140 141 if (enabled) { 142 ret = clk_prepare_enable(priv->plat->stmmac_clk); 143 if (ret) 144 return ret; 145 ret = clk_prepare_enable(priv->plat->pclk); 146 if (ret) { 147 clk_disable_unprepare(priv->plat->stmmac_clk); 148 return ret; 149 } 150 if (priv->plat->clks_config) { 151 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); 152 if (ret) { 153 clk_disable_unprepare(priv->plat->stmmac_clk); 154 clk_disable_unprepare(priv->plat->pclk); 155 return ret; 156 } 157 } 158 } else { 159 clk_disable_unprepare(priv->plat->stmmac_clk); 160 clk_disable_unprepare(priv->plat->pclk); 161 if (priv->plat->clks_config) 162 priv->plat->clks_config(priv->plat->bsp_priv, enabled); 163 } 164 165 return ret; 166 } 167 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); 168 169 /** 170 * stmmac_verify_args - verify the driver parameters. 171 * Description: it checks the driver parameters and set a default in case of 172 * errors. 173 */ 174 static void stmmac_verify_args(void) 175 { 176 if (unlikely(watchdog < 0)) 177 watchdog = TX_TIMEO; 178 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 179 buf_sz = DEFAULT_BUFSIZE; 180 if (unlikely(flow_ctrl > 1)) 181 flow_ctrl = FLOW_AUTO; 182 else if (likely(flow_ctrl < 0)) 183 flow_ctrl = FLOW_OFF; 184 if (unlikely((pause < 0) || (pause > 0xffff))) 185 pause = PAUSE_TIME; 186 if (eee_timer < 0) 187 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 188 } 189 190 static void __stmmac_disable_all_queues(struct stmmac_priv *priv) 191 { 192 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 193 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 194 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 195 u32 queue; 196 197 for (queue = 0; queue < maxq; queue++) { 198 struct stmmac_channel *ch = &priv->channel[queue]; 199 200 if (stmmac_xdp_is_enabled(priv) && 201 test_bit(queue, priv->af_xdp_zc_qps)) { 202 napi_disable(&ch->rxtx_napi); 203 continue; 204 } 205 206 if (queue < rx_queues_cnt) 207 napi_disable(&ch->rx_napi); 208 if (queue < tx_queues_cnt) 209 napi_disable(&ch->tx_napi); 210 } 211 } 212 213 /** 214 * stmmac_disable_all_queues - Disable all queues 215 * @priv: driver private structure 216 */ 217 static void stmmac_disable_all_queues(struct stmmac_priv *priv) 218 { 219 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 220 struct stmmac_rx_queue *rx_q; 221 u32 queue; 222 223 /* synchronize_rcu() needed for pending XDP buffers to drain */ 224 for (queue = 0; queue < rx_queues_cnt; queue++) { 225 rx_q = &priv->rx_queue[queue]; 226 if (rx_q->xsk_pool) { 227 synchronize_rcu(); 228 break; 229 } 230 } 231 232 __stmmac_disable_all_queues(priv); 233 } 234 235 /** 236 * stmmac_enable_all_queues - Enable all queues 237 * @priv: driver private structure 238 */ 239 static void stmmac_enable_all_queues(struct stmmac_priv *priv) 240 { 241 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 242 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 243 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 244 u32 queue; 245 246 for (queue = 0; queue < maxq; queue++) { 247 struct stmmac_channel *ch = &priv->channel[queue]; 248 249 if (stmmac_xdp_is_enabled(priv) && 250 test_bit(queue, priv->af_xdp_zc_qps)) { 251 napi_enable(&ch->rxtx_napi); 252 continue; 253 } 254 255 if (queue < rx_queues_cnt) 256 napi_enable(&ch->rx_napi); 257 if (queue < tx_queues_cnt) 258 napi_enable(&ch->tx_napi); 259 } 260 } 261 262 static void stmmac_service_event_schedule(struct stmmac_priv *priv) 263 { 264 if (!test_bit(STMMAC_DOWN, &priv->state) && 265 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 266 queue_work(priv->wq, &priv->service_task); 267 } 268 269 static void stmmac_global_err(struct stmmac_priv *priv) 270 { 271 netif_carrier_off(priv->dev); 272 set_bit(STMMAC_RESET_REQUESTED, &priv->state); 273 stmmac_service_event_schedule(priv); 274 } 275 276 /** 277 * stmmac_clk_csr_set - dynamically set the MDC clock 278 * @priv: driver private structure 279 * Description: this is to dynamically set the MDC clock according to the csr 280 * clock input. 281 * Note: 282 * If a specific clk_csr value is passed from the platform 283 * this means that the CSR Clock Range selection cannot be 284 * changed at run-time and it is fixed (as reported in the driver 285 * documentation). Viceversa the driver will try to set the MDC 286 * clock dynamically according to the actual clock input. 287 */ 288 static void stmmac_clk_csr_set(struct stmmac_priv *priv) 289 { 290 u32 clk_rate; 291 292 clk_rate = clk_get_rate(priv->plat->stmmac_clk); 293 294 /* Platform provided default clk_csr would be assumed valid 295 * for all other cases except for the below mentioned ones. 296 * For values higher than the IEEE 802.3 specified frequency 297 * we can not estimate the proper divider as it is not known 298 * the frequency of clk_csr_i. So we do not change the default 299 * divider. 300 */ 301 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 302 if (clk_rate < CSR_F_35M) 303 priv->clk_csr = STMMAC_CSR_20_35M; 304 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 305 priv->clk_csr = STMMAC_CSR_35_60M; 306 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 307 priv->clk_csr = STMMAC_CSR_60_100M; 308 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 309 priv->clk_csr = STMMAC_CSR_100_150M; 310 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 311 priv->clk_csr = STMMAC_CSR_150_250M; 312 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) 313 priv->clk_csr = STMMAC_CSR_250_300M; 314 } 315 316 if (priv->plat->has_sun8i) { 317 if (clk_rate > 160000000) 318 priv->clk_csr = 0x03; 319 else if (clk_rate > 80000000) 320 priv->clk_csr = 0x02; 321 else if (clk_rate > 40000000) 322 priv->clk_csr = 0x01; 323 else 324 priv->clk_csr = 0; 325 } 326 327 if (priv->plat->has_xgmac) { 328 if (clk_rate > 400000000) 329 priv->clk_csr = 0x5; 330 else if (clk_rate > 350000000) 331 priv->clk_csr = 0x4; 332 else if (clk_rate > 300000000) 333 priv->clk_csr = 0x3; 334 else if (clk_rate > 250000000) 335 priv->clk_csr = 0x2; 336 else if (clk_rate > 150000000) 337 priv->clk_csr = 0x1; 338 else 339 priv->clk_csr = 0x0; 340 } 341 } 342 343 static void print_pkt(unsigned char *buf, int len) 344 { 345 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 346 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 347 } 348 349 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 350 { 351 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 352 u32 avail; 353 354 if (tx_q->dirty_tx > tx_q->cur_tx) 355 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 356 else 357 avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; 358 359 return avail; 360 } 361 362 /** 363 * stmmac_rx_dirty - Get RX queue dirty 364 * @priv: driver private structure 365 * @queue: RX queue index 366 */ 367 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 368 { 369 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 370 u32 dirty; 371 372 if (rx_q->dirty_rx <= rx_q->cur_rx) 373 dirty = rx_q->cur_rx - rx_q->dirty_rx; 374 else 375 dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; 376 377 return dirty; 378 } 379 380 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) 381 { 382 int tx_lpi_timer; 383 384 /* Clear/set the SW EEE timer flag based on LPI ET enablement */ 385 priv->eee_sw_timer_en = en ? 0 : 1; 386 tx_lpi_timer = en ? priv->tx_lpi_timer : 0; 387 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); 388 } 389 390 /** 391 * stmmac_enable_eee_mode - check and enter in LPI mode 392 * @priv: driver private structure 393 * Description: this function is to verify and enter in LPI mode in case of 394 * EEE. 395 */ 396 static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 397 { 398 u32 tx_cnt = priv->plat->tx_queues_to_use; 399 u32 queue; 400 401 /* check if all TX queues have the work finished */ 402 for (queue = 0; queue < tx_cnt; queue++) { 403 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 404 405 if (tx_q->dirty_tx != tx_q->cur_tx) 406 return; /* still unfinished work */ 407 } 408 409 /* Check and enter in LPI mode */ 410 if (!priv->tx_path_in_lpi_mode) 411 stmmac_set_eee_mode(priv, priv->hw, 412 priv->plat->en_tx_lpi_clockgating); 413 } 414 415 /** 416 * stmmac_disable_eee_mode - disable and exit from LPI mode 417 * @priv: driver private structure 418 * Description: this function is to exit and disable EEE in case of 419 * LPI state is true. This is called by the xmit. 420 */ 421 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 422 { 423 if (!priv->eee_sw_timer_en) { 424 stmmac_lpi_entry_timer_config(priv, 0); 425 return; 426 } 427 428 stmmac_reset_eee_mode(priv, priv->hw); 429 del_timer_sync(&priv->eee_ctrl_timer); 430 priv->tx_path_in_lpi_mode = false; 431 } 432 433 /** 434 * stmmac_eee_ctrl_timer - EEE TX SW timer. 435 * @t: timer_list struct containing private info 436 * Description: 437 * if there is no data transfer and if we are not in LPI state, 438 * then MAC Transmitter can be moved to LPI state. 439 */ 440 static void stmmac_eee_ctrl_timer(struct timer_list *t) 441 { 442 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 443 444 stmmac_enable_eee_mode(priv); 445 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 446 } 447 448 /** 449 * stmmac_eee_init - init EEE 450 * @priv: driver private structure 451 * Description: 452 * if the GMAC supports the EEE (from the HW cap reg) and the phy device 453 * can also manage EEE, this function enable the LPI state and start related 454 * timer. 455 */ 456 bool stmmac_eee_init(struct stmmac_priv *priv) 457 { 458 int eee_tw_timer = priv->eee_tw_timer; 459 460 /* Using PCS we cannot dial with the phy registers at this stage 461 * so we do not support extra feature like EEE. 462 */ 463 if (priv->hw->pcs == STMMAC_PCS_TBI || 464 priv->hw->pcs == STMMAC_PCS_RTBI) 465 return false; 466 467 /* Check if MAC core supports the EEE feature. */ 468 if (!priv->dma_cap.eee) 469 return false; 470 471 mutex_lock(&priv->lock); 472 473 /* Check if it needs to be deactivated */ 474 if (!priv->eee_active) { 475 if (priv->eee_enabled) { 476 netdev_dbg(priv->dev, "disable EEE\n"); 477 stmmac_lpi_entry_timer_config(priv, 0); 478 del_timer_sync(&priv->eee_ctrl_timer); 479 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); 480 if (priv->hw->xpcs) 481 xpcs_config_eee(priv->hw->xpcs, 482 priv->plat->mult_fact_100ns, 483 false); 484 } 485 mutex_unlock(&priv->lock); 486 return false; 487 } 488 489 if (priv->eee_active && !priv->eee_enabled) { 490 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 491 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 492 eee_tw_timer); 493 if (priv->hw->xpcs) 494 xpcs_config_eee(priv->hw->xpcs, 495 priv->plat->mult_fact_100ns, 496 true); 497 } 498 499 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { 500 del_timer_sync(&priv->eee_ctrl_timer); 501 priv->tx_path_in_lpi_mode = false; 502 stmmac_lpi_entry_timer_config(priv, 1); 503 } else { 504 stmmac_lpi_entry_timer_config(priv, 0); 505 mod_timer(&priv->eee_ctrl_timer, 506 STMMAC_LPI_T(priv->tx_lpi_timer)); 507 } 508 509 mutex_unlock(&priv->lock); 510 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 511 return true; 512 } 513 514 /* stmmac_get_tx_hwtstamp - get HW TX timestamps 515 * @priv: driver private structure 516 * @p : descriptor pointer 517 * @skb : the socket buffer 518 * Description : 519 * This function will read timestamp from the descriptor & pass it to stack. 520 * and also perform some sanity checks. 521 */ 522 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 523 struct dma_desc *p, struct sk_buff *skb) 524 { 525 struct skb_shared_hwtstamps shhwtstamp; 526 bool found = false; 527 s64 adjust = 0; 528 u64 ns = 0; 529 530 if (!priv->hwts_tx_en) 531 return; 532 533 /* exit if skb doesn't support hw tstamp */ 534 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 535 return; 536 537 /* check tx tstamp status */ 538 if (stmmac_get_tx_timestamp_status(priv, p)) { 539 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 540 found = true; 541 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { 542 found = true; 543 } 544 545 if (found) { 546 /* Correct the clk domain crossing(CDC) error */ 547 if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { 548 adjust += -(2 * (NSEC_PER_SEC / 549 priv->plat->clk_ptp_rate)); 550 ns += adjust; 551 } 552 553 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 554 shhwtstamp.hwtstamp = ns_to_ktime(ns); 555 556 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 557 /* pass tstamp to stack */ 558 skb_tstamp_tx(skb, &shhwtstamp); 559 } 560 } 561 562 /* stmmac_get_rx_hwtstamp - get HW RX timestamps 563 * @priv: driver private structure 564 * @p : descriptor pointer 565 * @np : next descriptor pointer 566 * @skb : the socket buffer 567 * Description : 568 * This function will read received packet's timestamp from the descriptor 569 * and pass it to stack. It also perform some sanity checks. 570 */ 571 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 572 struct dma_desc *np, struct sk_buff *skb) 573 { 574 struct skb_shared_hwtstamps *shhwtstamp = NULL; 575 struct dma_desc *desc = p; 576 u64 adjust = 0; 577 u64 ns = 0; 578 579 if (!priv->hwts_rx_en) 580 return; 581 /* For GMAC4, the valid timestamp is from CTX next desc. */ 582 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 583 desc = np; 584 585 /* Check if timestamp is available */ 586 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 587 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 588 589 /* Correct the clk domain crossing(CDC) error */ 590 if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { 591 adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate); 592 ns -= adjust; 593 } 594 595 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 596 shhwtstamp = skb_hwtstamps(skb); 597 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 598 shhwtstamp->hwtstamp = ns_to_ktime(ns); 599 } else { 600 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 601 } 602 } 603 604 /** 605 * stmmac_hwtstamp_set - control hardware timestamping. 606 * @dev: device pointer. 607 * @ifr: An IOCTL specific structure, that can contain a pointer to 608 * a proprietary structure used to pass information to the driver. 609 * Description: 610 * This function configures the MAC to enable/disable both outgoing(TX) 611 * and incoming(RX) packets time stamping based on user input. 612 * Return Value: 613 * 0 on success and an appropriate -ve integer on failure. 614 */ 615 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 616 { 617 struct stmmac_priv *priv = netdev_priv(dev); 618 struct hwtstamp_config config; 619 struct timespec64 now; 620 u64 temp = 0; 621 u32 ptp_v2 = 0; 622 u32 tstamp_all = 0; 623 u32 ptp_over_ipv4_udp = 0; 624 u32 ptp_over_ipv6_udp = 0; 625 u32 ptp_over_ethernet = 0; 626 u32 snap_type_sel = 0; 627 u32 ts_master_en = 0; 628 u32 ts_event_en = 0; 629 u32 sec_inc = 0; 630 u32 value = 0; 631 bool xmac; 632 633 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 634 635 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 636 netdev_alert(priv->dev, "No support for HW time stamping\n"); 637 priv->hwts_tx_en = 0; 638 priv->hwts_rx_en = 0; 639 640 return -EOPNOTSUPP; 641 } 642 643 if (copy_from_user(&config, ifr->ifr_data, 644 sizeof(config))) 645 return -EFAULT; 646 647 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 648 __func__, config.flags, config.tx_type, config.rx_filter); 649 650 /* reserved for future extensions */ 651 if (config.flags) 652 return -EINVAL; 653 654 if (config.tx_type != HWTSTAMP_TX_OFF && 655 config.tx_type != HWTSTAMP_TX_ON) 656 return -ERANGE; 657 658 if (priv->adv_ts) { 659 switch (config.rx_filter) { 660 case HWTSTAMP_FILTER_NONE: 661 /* time stamp no incoming packet at all */ 662 config.rx_filter = HWTSTAMP_FILTER_NONE; 663 break; 664 665 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 666 /* PTP v1, UDP, any kind of event packet */ 667 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 668 /* 'xmac' hardware can support Sync, Pdelay_Req and 669 * Pdelay_resp by setting bit14 and bits17/16 to 01 670 * This leaves Delay_Req timestamps out. 671 * Enable all events *and* general purpose message 672 * timestamping 673 */ 674 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 675 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 676 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 677 break; 678 679 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 680 /* PTP v1, UDP, Sync packet */ 681 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 682 /* take time stamp for SYNC messages only */ 683 ts_event_en = PTP_TCR_TSEVNTENA; 684 685 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 686 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 687 break; 688 689 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 690 /* PTP v1, UDP, Delay_req packet */ 691 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 692 /* take time stamp for Delay_Req messages only */ 693 ts_master_en = PTP_TCR_TSMSTRENA; 694 ts_event_en = PTP_TCR_TSEVNTENA; 695 696 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 697 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 698 break; 699 700 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 701 /* PTP v2, UDP, any kind of event packet */ 702 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 703 ptp_v2 = PTP_TCR_TSVER2ENA; 704 /* take time stamp for all event messages */ 705 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 706 707 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 708 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 709 break; 710 711 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 712 /* PTP v2, UDP, Sync packet */ 713 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 714 ptp_v2 = PTP_TCR_TSVER2ENA; 715 /* take time stamp for SYNC messages only */ 716 ts_event_en = PTP_TCR_TSEVNTENA; 717 718 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 719 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 720 break; 721 722 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 723 /* PTP v2, UDP, Delay_req packet */ 724 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 725 ptp_v2 = PTP_TCR_TSVER2ENA; 726 /* take time stamp for Delay_Req messages only */ 727 ts_master_en = PTP_TCR_TSMSTRENA; 728 ts_event_en = PTP_TCR_TSEVNTENA; 729 730 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 731 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 732 break; 733 734 case HWTSTAMP_FILTER_PTP_V2_EVENT: 735 /* PTP v2/802.AS1 any layer, any kind of event packet */ 736 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 737 ptp_v2 = PTP_TCR_TSVER2ENA; 738 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 739 if (priv->synopsys_id < DWMAC_CORE_4_10) 740 ts_event_en = PTP_TCR_TSEVNTENA; 741 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 742 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 743 ptp_over_ethernet = PTP_TCR_TSIPENA; 744 break; 745 746 case HWTSTAMP_FILTER_PTP_V2_SYNC: 747 /* PTP v2/802.AS1, any layer, Sync packet */ 748 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 749 ptp_v2 = PTP_TCR_TSVER2ENA; 750 /* take time stamp for SYNC messages only */ 751 ts_event_en = PTP_TCR_TSEVNTENA; 752 753 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 754 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 755 ptp_over_ethernet = PTP_TCR_TSIPENA; 756 break; 757 758 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 759 /* PTP v2/802.AS1, any layer, Delay_req packet */ 760 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 761 ptp_v2 = PTP_TCR_TSVER2ENA; 762 /* take time stamp for Delay_Req messages only */ 763 ts_master_en = PTP_TCR_TSMSTRENA; 764 ts_event_en = PTP_TCR_TSEVNTENA; 765 766 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 767 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 768 ptp_over_ethernet = PTP_TCR_TSIPENA; 769 break; 770 771 case HWTSTAMP_FILTER_NTP_ALL: 772 case HWTSTAMP_FILTER_ALL: 773 /* time stamp any incoming packet */ 774 config.rx_filter = HWTSTAMP_FILTER_ALL; 775 tstamp_all = PTP_TCR_TSENALL; 776 break; 777 778 default: 779 return -ERANGE; 780 } 781 } else { 782 switch (config.rx_filter) { 783 case HWTSTAMP_FILTER_NONE: 784 config.rx_filter = HWTSTAMP_FILTER_NONE; 785 break; 786 default: 787 /* PTP v1, UDP, any kind of event packet */ 788 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 789 break; 790 } 791 } 792 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 793 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 794 795 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 796 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); 797 else { 798 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 799 tstamp_all | ptp_v2 | ptp_over_ethernet | 800 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 801 ts_master_en | snap_type_sel); 802 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); 803 804 /* program Sub Second Increment reg */ 805 stmmac_config_sub_second_increment(priv, 806 priv->ptpaddr, priv->plat->clk_ptp_rate, 807 xmac, &sec_inc); 808 temp = div_u64(1000000000ULL, sec_inc); 809 810 /* Store sub second increment and flags for later use */ 811 priv->sub_second_inc = sec_inc; 812 priv->systime_flags = value; 813 814 /* calculate default added value: 815 * formula is : 816 * addend = (2^32)/freq_div_ratio; 817 * where, freq_div_ratio = 1e9ns/sec_inc 818 */ 819 temp = (u64)(temp << 32); 820 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 821 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 822 823 /* initialize system time */ 824 ktime_get_real_ts64(&now); 825 826 /* lower 32 bits of tv_sec are safe until y2106 */ 827 stmmac_init_systime(priv, priv->ptpaddr, 828 (u32)now.tv_sec, now.tv_nsec); 829 } 830 831 memcpy(&priv->tstamp_config, &config, sizeof(config)); 832 833 return copy_to_user(ifr->ifr_data, &config, 834 sizeof(config)) ? -EFAULT : 0; 835 } 836 837 /** 838 * stmmac_hwtstamp_get - read hardware timestamping. 839 * @dev: device pointer. 840 * @ifr: An IOCTL specific structure, that can contain a pointer to 841 * a proprietary structure used to pass information to the driver. 842 * Description: 843 * This function obtain the current hardware timestamping settings 844 * as requested. 845 */ 846 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 847 { 848 struct stmmac_priv *priv = netdev_priv(dev); 849 struct hwtstamp_config *config = &priv->tstamp_config; 850 851 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 852 return -EOPNOTSUPP; 853 854 return copy_to_user(ifr->ifr_data, config, 855 sizeof(*config)) ? -EFAULT : 0; 856 } 857 858 /** 859 * stmmac_init_ptp - init PTP 860 * @priv: driver private structure 861 * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 862 * This is done by looking at the HW cap. register. 863 * This function also registers the ptp driver. 864 */ 865 static int stmmac_init_ptp(struct stmmac_priv *priv) 866 { 867 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 868 869 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 870 return -EOPNOTSUPP; 871 872 priv->adv_ts = 0; 873 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 874 if (xmac && priv->dma_cap.atime_stamp) 875 priv->adv_ts = 1; 876 /* Dwmac 3.x core with extend_desc can support adv_ts */ 877 else if (priv->extend_desc && priv->dma_cap.atime_stamp) 878 priv->adv_ts = 1; 879 880 if (priv->dma_cap.time_stamp) 881 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 882 883 if (priv->adv_ts) 884 netdev_info(priv->dev, 885 "IEEE 1588-2008 Advanced Timestamp supported\n"); 886 887 priv->hwts_tx_en = 0; 888 priv->hwts_rx_en = 0; 889 890 stmmac_ptp_register(priv); 891 892 return 0; 893 } 894 895 static void stmmac_release_ptp(struct stmmac_priv *priv) 896 { 897 clk_disable_unprepare(priv->plat->clk_ptp_ref); 898 stmmac_ptp_unregister(priv); 899 } 900 901 /** 902 * stmmac_mac_flow_ctrl - Configure flow control in all queues 903 * @priv: driver private structure 904 * @duplex: duplex passed to the next function 905 * Description: It is used for configuring the flow control in all queues 906 */ 907 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 908 { 909 u32 tx_cnt = priv->plat->tx_queues_to_use; 910 911 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 912 priv->pause, tx_cnt); 913 } 914 915 static void stmmac_validate(struct phylink_config *config, 916 unsigned long *supported, 917 struct phylink_link_state *state) 918 { 919 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 920 __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, }; 921 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 922 int tx_cnt = priv->plat->tx_queues_to_use; 923 int max_speed = priv->plat->max_speed; 924 925 phylink_set(mac_supported, 10baseT_Half); 926 phylink_set(mac_supported, 10baseT_Full); 927 phylink_set(mac_supported, 100baseT_Half); 928 phylink_set(mac_supported, 100baseT_Full); 929 phylink_set(mac_supported, 1000baseT_Half); 930 phylink_set(mac_supported, 1000baseT_Full); 931 phylink_set(mac_supported, 1000baseKX_Full); 932 933 phylink_set(mac_supported, Autoneg); 934 phylink_set(mac_supported, Pause); 935 phylink_set(mac_supported, Asym_Pause); 936 phylink_set_port_modes(mac_supported); 937 938 /* Cut down 1G if asked to */ 939 if ((max_speed > 0) && (max_speed < 1000)) { 940 phylink_set(mask, 1000baseT_Full); 941 phylink_set(mask, 1000baseX_Full); 942 } else if (priv->plat->has_gmac4) { 943 if (!max_speed || max_speed >= 2500) { 944 phylink_set(mac_supported, 2500baseT_Full); 945 phylink_set(mac_supported, 2500baseX_Full); 946 } 947 } else if (priv->plat->has_xgmac) { 948 if (!max_speed || (max_speed >= 2500)) { 949 phylink_set(mac_supported, 2500baseT_Full); 950 phylink_set(mac_supported, 2500baseX_Full); 951 } 952 if (!max_speed || (max_speed >= 5000)) { 953 phylink_set(mac_supported, 5000baseT_Full); 954 } 955 if (!max_speed || (max_speed >= 10000)) { 956 phylink_set(mac_supported, 10000baseSR_Full); 957 phylink_set(mac_supported, 10000baseLR_Full); 958 phylink_set(mac_supported, 10000baseER_Full); 959 phylink_set(mac_supported, 10000baseLRM_Full); 960 phylink_set(mac_supported, 10000baseT_Full); 961 phylink_set(mac_supported, 10000baseKX4_Full); 962 phylink_set(mac_supported, 10000baseKR_Full); 963 } 964 if (!max_speed || (max_speed >= 25000)) { 965 phylink_set(mac_supported, 25000baseCR_Full); 966 phylink_set(mac_supported, 25000baseKR_Full); 967 phylink_set(mac_supported, 25000baseSR_Full); 968 } 969 if (!max_speed || (max_speed >= 40000)) { 970 phylink_set(mac_supported, 40000baseKR4_Full); 971 phylink_set(mac_supported, 40000baseCR4_Full); 972 phylink_set(mac_supported, 40000baseSR4_Full); 973 phylink_set(mac_supported, 40000baseLR4_Full); 974 } 975 if (!max_speed || (max_speed >= 50000)) { 976 phylink_set(mac_supported, 50000baseCR2_Full); 977 phylink_set(mac_supported, 50000baseKR2_Full); 978 phylink_set(mac_supported, 50000baseSR2_Full); 979 phylink_set(mac_supported, 50000baseKR_Full); 980 phylink_set(mac_supported, 50000baseSR_Full); 981 phylink_set(mac_supported, 50000baseCR_Full); 982 phylink_set(mac_supported, 50000baseLR_ER_FR_Full); 983 phylink_set(mac_supported, 50000baseDR_Full); 984 } 985 if (!max_speed || (max_speed >= 100000)) { 986 phylink_set(mac_supported, 100000baseKR4_Full); 987 phylink_set(mac_supported, 100000baseSR4_Full); 988 phylink_set(mac_supported, 100000baseCR4_Full); 989 phylink_set(mac_supported, 100000baseLR4_ER4_Full); 990 phylink_set(mac_supported, 100000baseKR2_Full); 991 phylink_set(mac_supported, 100000baseSR2_Full); 992 phylink_set(mac_supported, 100000baseCR2_Full); 993 phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full); 994 phylink_set(mac_supported, 100000baseDR2_Full); 995 } 996 } 997 998 /* Half-Duplex can only work with single queue */ 999 if (tx_cnt > 1) { 1000 phylink_set(mask, 10baseT_Half); 1001 phylink_set(mask, 100baseT_Half); 1002 phylink_set(mask, 1000baseT_Half); 1003 } 1004 1005 linkmode_and(supported, supported, mac_supported); 1006 linkmode_andnot(supported, supported, mask); 1007 1008 linkmode_and(state->advertising, state->advertising, mac_supported); 1009 linkmode_andnot(state->advertising, state->advertising, mask); 1010 1011 /* If PCS is supported, check which modes it supports. */ 1012 if (priv->hw->xpcs) 1013 xpcs_validate(priv->hw->xpcs, supported, state); 1014 } 1015 1016 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, 1017 const struct phylink_link_state *state) 1018 { 1019 /* Nothing to do, xpcs_config() handles everything */ 1020 } 1021 1022 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) 1023 { 1024 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 1025 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 1026 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 1027 bool *hs_enable = &fpe_cfg->hs_enable; 1028 1029 if (is_up && *hs_enable) { 1030 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY); 1031 } else { 1032 *lo_state = FPE_STATE_OFF; 1033 *lp_state = FPE_STATE_OFF; 1034 } 1035 } 1036 1037 static void stmmac_mac_link_down(struct phylink_config *config, 1038 unsigned int mode, phy_interface_t interface) 1039 { 1040 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 1041 1042 stmmac_mac_set(priv, priv->ioaddr, false); 1043 priv->eee_active = false; 1044 priv->tx_lpi_enabled = false; 1045 priv->eee_enabled = stmmac_eee_init(priv); 1046 stmmac_set_eee_pls(priv, priv->hw, false); 1047 1048 if (priv->dma_cap.fpesel) 1049 stmmac_fpe_link_state_handle(priv, false); 1050 } 1051 1052 static void stmmac_mac_link_up(struct phylink_config *config, 1053 struct phy_device *phy, 1054 unsigned int mode, phy_interface_t interface, 1055 int speed, int duplex, 1056 bool tx_pause, bool rx_pause) 1057 { 1058 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 1059 u32 ctrl; 1060 1061 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 1062 ctrl &= ~priv->hw->link.speed_mask; 1063 1064 if (interface == PHY_INTERFACE_MODE_USXGMII) { 1065 switch (speed) { 1066 case SPEED_10000: 1067 ctrl |= priv->hw->link.xgmii.speed10000; 1068 break; 1069 case SPEED_5000: 1070 ctrl |= priv->hw->link.xgmii.speed5000; 1071 break; 1072 case SPEED_2500: 1073 ctrl |= priv->hw->link.xgmii.speed2500; 1074 break; 1075 default: 1076 return; 1077 } 1078 } else if (interface == PHY_INTERFACE_MODE_XLGMII) { 1079 switch (speed) { 1080 case SPEED_100000: 1081 ctrl |= priv->hw->link.xlgmii.speed100000; 1082 break; 1083 case SPEED_50000: 1084 ctrl |= priv->hw->link.xlgmii.speed50000; 1085 break; 1086 case SPEED_40000: 1087 ctrl |= priv->hw->link.xlgmii.speed40000; 1088 break; 1089 case SPEED_25000: 1090 ctrl |= priv->hw->link.xlgmii.speed25000; 1091 break; 1092 case SPEED_10000: 1093 ctrl |= priv->hw->link.xgmii.speed10000; 1094 break; 1095 case SPEED_2500: 1096 ctrl |= priv->hw->link.speed2500; 1097 break; 1098 case SPEED_1000: 1099 ctrl |= priv->hw->link.speed1000; 1100 break; 1101 default: 1102 return; 1103 } 1104 } else { 1105 switch (speed) { 1106 case SPEED_2500: 1107 ctrl |= priv->hw->link.speed2500; 1108 break; 1109 case SPEED_1000: 1110 ctrl |= priv->hw->link.speed1000; 1111 break; 1112 case SPEED_100: 1113 ctrl |= priv->hw->link.speed100; 1114 break; 1115 case SPEED_10: 1116 ctrl |= priv->hw->link.speed10; 1117 break; 1118 default: 1119 return; 1120 } 1121 } 1122 1123 priv->speed = speed; 1124 1125 if (priv->plat->fix_mac_speed) 1126 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); 1127 1128 if (!duplex) 1129 ctrl &= ~priv->hw->link.duplex; 1130 else 1131 ctrl |= priv->hw->link.duplex; 1132 1133 /* Flow Control operation */ 1134 if (tx_pause && rx_pause) 1135 stmmac_mac_flow_ctrl(priv, duplex); 1136 1137 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 1138 1139 stmmac_mac_set(priv, priv->ioaddr, true); 1140 if (phy && priv->dma_cap.eee) { 1141 priv->eee_active = phy_init_eee(phy, 1) >= 0; 1142 priv->eee_enabled = stmmac_eee_init(priv); 1143 priv->tx_lpi_enabled = priv->eee_enabled; 1144 stmmac_set_eee_pls(priv, priv->hw, true); 1145 } 1146 1147 if (priv->dma_cap.fpesel) 1148 stmmac_fpe_link_state_handle(priv, true); 1149 } 1150 1151 static const struct phylink_mac_ops stmmac_phylink_mac_ops = { 1152 .validate = stmmac_validate, 1153 .mac_config = stmmac_mac_config, 1154 .mac_link_down = stmmac_mac_link_down, 1155 .mac_link_up = stmmac_mac_link_up, 1156 }; 1157 1158 /** 1159 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 1160 * @priv: driver private structure 1161 * Description: this is to verify if the HW supports the PCS. 1162 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 1163 * configured for the TBI, RTBI, or SGMII PHY interface. 1164 */ 1165 static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 1166 { 1167 int interface = priv->plat->interface; 1168 1169 if (priv->dma_cap.pcs) { 1170 if ((interface == PHY_INTERFACE_MODE_RGMII) || 1171 (interface == PHY_INTERFACE_MODE_RGMII_ID) || 1172 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1173 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 1174 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 1175 priv->hw->pcs = STMMAC_PCS_RGMII; 1176 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 1177 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 1178 priv->hw->pcs = STMMAC_PCS_SGMII; 1179 } 1180 } 1181 } 1182 1183 /** 1184 * stmmac_init_phy - PHY initialization 1185 * @dev: net device structure 1186 * Description: it initializes the driver's PHY state, and attaches the PHY 1187 * to the mac driver. 1188 * Return value: 1189 * 0 on success 1190 */ 1191 static int stmmac_init_phy(struct net_device *dev) 1192 { 1193 struct stmmac_priv *priv = netdev_priv(dev); 1194 struct device_node *node; 1195 int ret; 1196 1197 node = priv->plat->phylink_node; 1198 1199 if (node) 1200 ret = phylink_of_phy_connect(priv->phylink, node, 0); 1201 1202 /* Some DT bindings do not set-up the PHY handle. Let's try to 1203 * manually parse it 1204 */ 1205 if (!node || ret) { 1206 int addr = priv->plat->phy_addr; 1207 struct phy_device *phydev; 1208 1209 phydev = mdiobus_get_phy(priv->mii, addr); 1210 if (!phydev) { 1211 netdev_err(priv->dev, "no phy at addr %d\n", addr); 1212 return -ENODEV; 1213 } 1214 1215 ret = phylink_connect_phy(priv->phylink, phydev); 1216 } 1217 1218 if (!priv->plat->pmt) { 1219 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1220 1221 phylink_ethtool_get_wol(priv->phylink, &wol); 1222 device_set_wakeup_capable(priv->device, !!wol.supported); 1223 } 1224 1225 return ret; 1226 } 1227 1228 static int stmmac_phy_setup(struct stmmac_priv *priv) 1229 { 1230 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; 1231 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); 1232 int mode = priv->plat->phy_interface; 1233 struct phylink *phylink; 1234 1235 priv->phylink_config.dev = &priv->dev->dev; 1236 priv->phylink_config.type = PHYLINK_NETDEV; 1237 priv->phylink_config.pcs_poll = true; 1238 if (priv->plat->mdio_bus_data) 1239 priv->phylink_config.ovr_an_inband = 1240 mdio_bus_data->xpcs_an_inband; 1241 1242 if (!fwnode) 1243 fwnode = dev_fwnode(priv->device); 1244 1245 phylink = phylink_create(&priv->phylink_config, fwnode, 1246 mode, &stmmac_phylink_mac_ops); 1247 if (IS_ERR(phylink)) 1248 return PTR_ERR(phylink); 1249 1250 if (priv->hw->xpcs) 1251 phylink_set_pcs(phylink, &priv->hw->xpcs->pcs); 1252 1253 priv->phylink = phylink; 1254 return 0; 1255 } 1256 1257 static void stmmac_display_rx_rings(struct stmmac_priv *priv) 1258 { 1259 u32 rx_cnt = priv->plat->rx_queues_to_use; 1260 unsigned int desc_size; 1261 void *head_rx; 1262 u32 queue; 1263 1264 /* Display RX rings */ 1265 for (queue = 0; queue < rx_cnt; queue++) { 1266 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1267 1268 pr_info("\tRX Queue %u rings\n", queue); 1269 1270 if (priv->extend_desc) { 1271 head_rx = (void *)rx_q->dma_erx; 1272 desc_size = sizeof(struct dma_extended_desc); 1273 } else { 1274 head_rx = (void *)rx_q->dma_rx; 1275 desc_size = sizeof(struct dma_desc); 1276 } 1277 1278 /* Display RX ring */ 1279 stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true, 1280 rx_q->dma_rx_phy, desc_size); 1281 } 1282 } 1283 1284 static void stmmac_display_tx_rings(struct stmmac_priv *priv) 1285 { 1286 u32 tx_cnt = priv->plat->tx_queues_to_use; 1287 unsigned int desc_size; 1288 void *head_tx; 1289 u32 queue; 1290 1291 /* Display TX rings */ 1292 for (queue = 0; queue < tx_cnt; queue++) { 1293 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1294 1295 pr_info("\tTX Queue %d rings\n", queue); 1296 1297 if (priv->extend_desc) { 1298 head_tx = (void *)tx_q->dma_etx; 1299 desc_size = sizeof(struct dma_extended_desc); 1300 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1301 head_tx = (void *)tx_q->dma_entx; 1302 desc_size = sizeof(struct dma_edesc); 1303 } else { 1304 head_tx = (void *)tx_q->dma_tx; 1305 desc_size = sizeof(struct dma_desc); 1306 } 1307 1308 stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false, 1309 tx_q->dma_tx_phy, desc_size); 1310 } 1311 } 1312 1313 static void stmmac_display_rings(struct stmmac_priv *priv) 1314 { 1315 /* Display RX ring */ 1316 stmmac_display_rx_rings(priv); 1317 1318 /* Display TX ring */ 1319 stmmac_display_tx_rings(priv); 1320 } 1321 1322 static int stmmac_set_bfsize(int mtu, int bufsize) 1323 { 1324 int ret = bufsize; 1325 1326 if (mtu >= BUF_SIZE_8KiB) 1327 ret = BUF_SIZE_16KiB; 1328 else if (mtu >= BUF_SIZE_4KiB) 1329 ret = BUF_SIZE_8KiB; 1330 else if (mtu >= BUF_SIZE_2KiB) 1331 ret = BUF_SIZE_4KiB; 1332 else if (mtu > DEFAULT_BUFSIZE) 1333 ret = BUF_SIZE_2KiB; 1334 else 1335 ret = DEFAULT_BUFSIZE; 1336 1337 return ret; 1338 } 1339 1340 /** 1341 * stmmac_clear_rx_descriptors - clear RX descriptors 1342 * @priv: driver private structure 1343 * @queue: RX queue index 1344 * Description: this function is called to clear the RX descriptors 1345 * in case of both basic and extended descriptors are used. 1346 */ 1347 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) 1348 { 1349 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1350 int i; 1351 1352 /* Clear the RX descriptors */ 1353 for (i = 0; i < priv->dma_rx_size; i++) 1354 if (priv->extend_desc) 1355 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1356 priv->use_riwt, priv->mode, 1357 (i == priv->dma_rx_size - 1), 1358 priv->dma_buf_sz); 1359 else 1360 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1361 priv->use_riwt, priv->mode, 1362 (i == priv->dma_rx_size - 1), 1363 priv->dma_buf_sz); 1364 } 1365 1366 /** 1367 * stmmac_clear_tx_descriptors - clear tx descriptors 1368 * @priv: driver private structure 1369 * @queue: TX queue index. 1370 * Description: this function is called to clear the TX descriptors 1371 * in case of both basic and extended descriptors are used. 1372 */ 1373 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) 1374 { 1375 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1376 int i; 1377 1378 /* Clear the TX descriptors */ 1379 for (i = 0; i < priv->dma_tx_size; i++) { 1380 int last = (i == (priv->dma_tx_size - 1)); 1381 struct dma_desc *p; 1382 1383 if (priv->extend_desc) 1384 p = &tx_q->dma_etx[i].basic; 1385 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1386 p = &tx_q->dma_entx[i].basic; 1387 else 1388 p = &tx_q->dma_tx[i]; 1389 1390 stmmac_init_tx_desc(priv, p, priv->mode, last); 1391 } 1392 } 1393 1394 /** 1395 * stmmac_clear_descriptors - clear descriptors 1396 * @priv: driver private structure 1397 * Description: this function is called to clear the TX and RX descriptors 1398 * in case of both basic and extended descriptors are used. 1399 */ 1400 static void stmmac_clear_descriptors(struct stmmac_priv *priv) 1401 { 1402 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1403 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1404 u32 queue; 1405 1406 /* Clear the RX descriptors */ 1407 for (queue = 0; queue < rx_queue_cnt; queue++) 1408 stmmac_clear_rx_descriptors(priv, queue); 1409 1410 /* Clear the TX descriptors */ 1411 for (queue = 0; queue < tx_queue_cnt; queue++) 1412 stmmac_clear_tx_descriptors(priv, queue); 1413 } 1414 1415 /** 1416 * stmmac_init_rx_buffers - init the RX descriptor buffer. 1417 * @priv: driver private structure 1418 * @p: descriptor pointer 1419 * @i: descriptor index 1420 * @flags: gfp flag 1421 * @queue: RX queue index 1422 * Description: this function is called to allocate a receive buffer, perform 1423 * the DMA mapping and init the descriptor. 1424 */ 1425 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 1426 int i, gfp_t flags, u32 queue) 1427 { 1428 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1429 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1430 1431 if (!buf->page) { 1432 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 1433 if (!buf->page) 1434 return -ENOMEM; 1435 buf->page_offset = stmmac_rx_offset(priv); 1436 } 1437 1438 if (priv->sph && !buf->sec_page) { 1439 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 1440 if (!buf->sec_page) 1441 return -ENOMEM; 1442 1443 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1444 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 1445 } else { 1446 buf->sec_page = NULL; 1447 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 1448 } 1449 1450 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 1451 1452 stmmac_set_desc_addr(priv, p, buf->addr); 1453 if (priv->dma_buf_sz == BUF_SIZE_16KiB) 1454 stmmac_init_desc3(priv, p); 1455 1456 return 0; 1457 } 1458 1459 /** 1460 * stmmac_free_rx_buffer - free RX dma buffers 1461 * @priv: private structure 1462 * @queue: RX queue index 1463 * @i: buffer index. 1464 */ 1465 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) 1466 { 1467 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1468 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1469 1470 if (buf->page) 1471 page_pool_put_full_page(rx_q->page_pool, buf->page, false); 1472 buf->page = NULL; 1473 1474 if (buf->sec_page) 1475 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); 1476 buf->sec_page = NULL; 1477 } 1478 1479 /** 1480 * stmmac_free_tx_buffer - free RX dma buffers 1481 * @priv: private structure 1482 * @queue: RX queue index 1483 * @i: buffer index. 1484 */ 1485 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) 1486 { 1487 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1488 1489 if (tx_q->tx_skbuff_dma[i].buf && 1490 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { 1491 if (tx_q->tx_skbuff_dma[i].map_as_page) 1492 dma_unmap_page(priv->device, 1493 tx_q->tx_skbuff_dma[i].buf, 1494 tx_q->tx_skbuff_dma[i].len, 1495 DMA_TO_DEVICE); 1496 else 1497 dma_unmap_single(priv->device, 1498 tx_q->tx_skbuff_dma[i].buf, 1499 tx_q->tx_skbuff_dma[i].len, 1500 DMA_TO_DEVICE); 1501 } 1502 1503 if (tx_q->xdpf[i] && 1504 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || 1505 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { 1506 xdp_return_frame(tx_q->xdpf[i]); 1507 tx_q->xdpf[i] = NULL; 1508 } 1509 1510 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) 1511 tx_q->xsk_frames_done++; 1512 1513 if (tx_q->tx_skbuff[i] && 1514 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { 1515 dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1516 tx_q->tx_skbuff[i] = NULL; 1517 } 1518 1519 tx_q->tx_skbuff_dma[i].buf = 0; 1520 tx_q->tx_skbuff_dma[i].map_as_page = false; 1521 } 1522 1523 /** 1524 * dma_free_rx_skbufs - free RX dma buffers 1525 * @priv: private structure 1526 * @queue: RX queue index 1527 */ 1528 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) 1529 { 1530 int i; 1531 1532 for (i = 0; i < priv->dma_rx_size; i++) 1533 stmmac_free_rx_buffer(priv, queue, i); 1534 } 1535 1536 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, 1537 gfp_t flags) 1538 { 1539 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1540 int i; 1541 1542 for (i = 0; i < priv->dma_rx_size; i++) { 1543 struct dma_desc *p; 1544 int ret; 1545 1546 if (priv->extend_desc) 1547 p = &((rx_q->dma_erx + i)->basic); 1548 else 1549 p = rx_q->dma_rx + i; 1550 1551 ret = stmmac_init_rx_buffers(priv, p, i, flags, 1552 queue); 1553 if (ret) 1554 return ret; 1555 1556 rx_q->buf_alloc_num++; 1557 } 1558 1559 return 0; 1560 } 1561 1562 /** 1563 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool 1564 * @priv: private structure 1565 * @queue: RX queue index 1566 */ 1567 static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) 1568 { 1569 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1570 int i; 1571 1572 for (i = 0; i < priv->dma_rx_size; i++) { 1573 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1574 1575 if (!buf->xdp) 1576 continue; 1577 1578 xsk_buff_free(buf->xdp); 1579 buf->xdp = NULL; 1580 } 1581 } 1582 1583 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue) 1584 { 1585 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1586 int i; 1587 1588 for (i = 0; i < priv->dma_rx_size; i++) { 1589 struct stmmac_rx_buffer *buf; 1590 dma_addr_t dma_addr; 1591 struct dma_desc *p; 1592 1593 if (priv->extend_desc) 1594 p = (struct dma_desc *)(rx_q->dma_erx + i); 1595 else 1596 p = rx_q->dma_rx + i; 1597 1598 buf = &rx_q->buf_pool[i]; 1599 1600 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 1601 if (!buf->xdp) 1602 return -ENOMEM; 1603 1604 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 1605 stmmac_set_desc_addr(priv, p, dma_addr); 1606 rx_q->buf_alloc_num++; 1607 } 1608 1609 return 0; 1610 } 1611 1612 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) 1613 { 1614 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) 1615 return NULL; 1616 1617 return xsk_get_pool_from_qid(priv->dev, queue); 1618 } 1619 1620 /** 1621 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) 1622 * @priv: driver private structure 1623 * @queue: RX queue index 1624 * @flags: gfp flag. 1625 * Description: this function initializes the DMA RX descriptors 1626 * and allocates the socket buffers. It supports the chained and ring 1627 * modes. 1628 */ 1629 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags) 1630 { 1631 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1632 int ret; 1633 1634 netif_dbg(priv, probe, priv->dev, 1635 "(%s) dma_rx_phy=0x%08x\n", __func__, 1636 (u32)rx_q->dma_rx_phy); 1637 1638 stmmac_clear_rx_descriptors(priv, queue); 1639 1640 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); 1641 1642 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1643 1644 if (rx_q->xsk_pool) { 1645 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1646 MEM_TYPE_XSK_BUFF_POOL, 1647 NULL)); 1648 netdev_info(priv->dev, 1649 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", 1650 rx_q->queue_index); 1651 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); 1652 } else { 1653 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1654 MEM_TYPE_PAGE_POOL, 1655 rx_q->page_pool)); 1656 netdev_info(priv->dev, 1657 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", 1658 rx_q->queue_index); 1659 } 1660 1661 if (rx_q->xsk_pool) { 1662 /* RX XDP ZC buffer pool may not be populated, e.g. 1663 * xdpsock TX-only. 1664 */ 1665 stmmac_alloc_rx_buffers_zc(priv, queue); 1666 } else { 1667 ret = stmmac_alloc_rx_buffers(priv, queue, flags); 1668 if (ret < 0) 1669 return -ENOMEM; 1670 } 1671 1672 rx_q->cur_rx = 0; 1673 rx_q->dirty_rx = 0; 1674 1675 /* Setup the chained descriptor addresses */ 1676 if (priv->mode == STMMAC_CHAIN_MODE) { 1677 if (priv->extend_desc) 1678 stmmac_mode_init(priv, rx_q->dma_erx, 1679 rx_q->dma_rx_phy, 1680 priv->dma_rx_size, 1); 1681 else 1682 stmmac_mode_init(priv, rx_q->dma_rx, 1683 rx_q->dma_rx_phy, 1684 priv->dma_rx_size, 0); 1685 } 1686 1687 return 0; 1688 } 1689 1690 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) 1691 { 1692 struct stmmac_priv *priv = netdev_priv(dev); 1693 u32 rx_count = priv->plat->rx_queues_to_use; 1694 u32 queue; 1695 int ret; 1696 1697 /* RX INITIALIZATION */ 1698 netif_dbg(priv, probe, priv->dev, 1699 "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1700 1701 for (queue = 0; queue < rx_count; queue++) { 1702 ret = __init_dma_rx_desc_rings(priv, queue, flags); 1703 if (ret) 1704 goto err_init_rx_buffers; 1705 } 1706 1707 return 0; 1708 1709 err_init_rx_buffers: 1710 while (queue >= 0) { 1711 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1712 1713 if (rx_q->xsk_pool) 1714 dma_free_rx_xskbufs(priv, queue); 1715 else 1716 dma_free_rx_skbufs(priv, queue); 1717 1718 rx_q->buf_alloc_num = 0; 1719 rx_q->xsk_pool = NULL; 1720 1721 if (queue == 0) 1722 break; 1723 1724 queue--; 1725 } 1726 1727 return ret; 1728 } 1729 1730 /** 1731 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) 1732 * @priv: driver private structure 1733 * @queue : TX queue index 1734 * Description: this function initializes the DMA TX descriptors 1735 * and allocates the socket buffers. It supports the chained and ring 1736 * modes. 1737 */ 1738 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) 1739 { 1740 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1741 int i; 1742 1743 netif_dbg(priv, probe, priv->dev, 1744 "(%s) dma_tx_phy=0x%08x\n", __func__, 1745 (u32)tx_q->dma_tx_phy); 1746 1747 /* Setup the chained descriptor addresses */ 1748 if (priv->mode == STMMAC_CHAIN_MODE) { 1749 if (priv->extend_desc) 1750 stmmac_mode_init(priv, tx_q->dma_etx, 1751 tx_q->dma_tx_phy, 1752 priv->dma_tx_size, 1); 1753 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) 1754 stmmac_mode_init(priv, tx_q->dma_tx, 1755 tx_q->dma_tx_phy, 1756 priv->dma_tx_size, 0); 1757 } 1758 1759 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1760 1761 for (i = 0; i < priv->dma_tx_size; i++) { 1762 struct dma_desc *p; 1763 1764 if (priv->extend_desc) 1765 p = &((tx_q->dma_etx + i)->basic); 1766 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1767 p = &((tx_q->dma_entx + i)->basic); 1768 else 1769 p = tx_q->dma_tx + i; 1770 1771 stmmac_clear_desc(priv, p); 1772 1773 tx_q->tx_skbuff_dma[i].buf = 0; 1774 tx_q->tx_skbuff_dma[i].map_as_page = false; 1775 tx_q->tx_skbuff_dma[i].len = 0; 1776 tx_q->tx_skbuff_dma[i].last_segment = false; 1777 tx_q->tx_skbuff[i] = NULL; 1778 } 1779 1780 tx_q->dirty_tx = 0; 1781 tx_q->cur_tx = 0; 1782 tx_q->mss = 0; 1783 1784 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 1785 1786 return 0; 1787 } 1788 1789 static int init_dma_tx_desc_rings(struct net_device *dev) 1790 { 1791 struct stmmac_priv *priv = netdev_priv(dev); 1792 u32 tx_queue_cnt; 1793 u32 queue; 1794 1795 tx_queue_cnt = priv->plat->tx_queues_to_use; 1796 1797 for (queue = 0; queue < tx_queue_cnt; queue++) 1798 __init_dma_tx_desc_rings(priv, queue); 1799 1800 return 0; 1801 } 1802 1803 /** 1804 * init_dma_desc_rings - init the RX/TX descriptor rings 1805 * @dev: net device structure 1806 * @flags: gfp flag. 1807 * Description: this function initializes the DMA RX/TX descriptors 1808 * and allocates the socket buffers. It supports the chained and ring 1809 * modes. 1810 */ 1811 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) 1812 { 1813 struct stmmac_priv *priv = netdev_priv(dev); 1814 int ret; 1815 1816 ret = init_dma_rx_desc_rings(dev, flags); 1817 if (ret) 1818 return ret; 1819 1820 ret = init_dma_tx_desc_rings(dev); 1821 1822 stmmac_clear_descriptors(priv); 1823 1824 if (netif_msg_hw(priv)) 1825 stmmac_display_rings(priv); 1826 1827 return ret; 1828 } 1829 1830 /** 1831 * dma_free_tx_skbufs - free TX dma buffers 1832 * @priv: private structure 1833 * @queue: TX queue index 1834 */ 1835 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) 1836 { 1837 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1838 int i; 1839 1840 tx_q->xsk_frames_done = 0; 1841 1842 for (i = 0; i < priv->dma_tx_size; i++) 1843 stmmac_free_tx_buffer(priv, queue, i); 1844 1845 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { 1846 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 1847 tx_q->xsk_frames_done = 0; 1848 tx_q->xsk_pool = NULL; 1849 } 1850 } 1851 1852 /** 1853 * stmmac_free_tx_skbufs - free TX skb buffers 1854 * @priv: private structure 1855 */ 1856 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) 1857 { 1858 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1859 u32 queue; 1860 1861 for (queue = 0; queue < tx_queue_cnt; queue++) 1862 dma_free_tx_skbufs(priv, queue); 1863 } 1864 1865 /** 1866 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) 1867 * @priv: private structure 1868 * @queue: RX queue index 1869 */ 1870 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) 1871 { 1872 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1873 1874 /* Release the DMA RX socket buffers */ 1875 if (rx_q->xsk_pool) 1876 dma_free_rx_xskbufs(priv, queue); 1877 else 1878 dma_free_rx_skbufs(priv, queue); 1879 1880 rx_q->buf_alloc_num = 0; 1881 rx_q->xsk_pool = NULL; 1882 1883 /* Free DMA regions of consistent memory previously allocated */ 1884 if (!priv->extend_desc) 1885 dma_free_coherent(priv->device, priv->dma_rx_size * 1886 sizeof(struct dma_desc), 1887 rx_q->dma_rx, rx_q->dma_rx_phy); 1888 else 1889 dma_free_coherent(priv->device, priv->dma_rx_size * 1890 sizeof(struct dma_extended_desc), 1891 rx_q->dma_erx, rx_q->dma_rx_phy); 1892 1893 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) 1894 xdp_rxq_info_unreg(&rx_q->xdp_rxq); 1895 1896 kfree(rx_q->buf_pool); 1897 if (rx_q->page_pool) 1898 page_pool_destroy(rx_q->page_pool); 1899 } 1900 1901 static void free_dma_rx_desc_resources(struct stmmac_priv *priv) 1902 { 1903 u32 rx_count = priv->plat->rx_queues_to_use; 1904 u32 queue; 1905 1906 /* Free RX queue resources */ 1907 for (queue = 0; queue < rx_count; queue++) 1908 __free_dma_rx_desc_resources(priv, queue); 1909 } 1910 1911 /** 1912 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) 1913 * @priv: private structure 1914 * @queue: TX queue index 1915 */ 1916 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) 1917 { 1918 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1919 size_t size; 1920 void *addr; 1921 1922 /* Release the DMA TX socket buffers */ 1923 dma_free_tx_skbufs(priv, queue); 1924 1925 if (priv->extend_desc) { 1926 size = sizeof(struct dma_extended_desc); 1927 addr = tx_q->dma_etx; 1928 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1929 size = sizeof(struct dma_edesc); 1930 addr = tx_q->dma_entx; 1931 } else { 1932 size = sizeof(struct dma_desc); 1933 addr = tx_q->dma_tx; 1934 } 1935 1936 size *= priv->dma_tx_size; 1937 1938 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); 1939 1940 kfree(tx_q->tx_skbuff_dma); 1941 kfree(tx_q->tx_skbuff); 1942 } 1943 1944 static void free_dma_tx_desc_resources(struct stmmac_priv *priv) 1945 { 1946 u32 tx_count = priv->plat->tx_queues_to_use; 1947 u32 queue; 1948 1949 /* Free TX queue resources */ 1950 for (queue = 0; queue < tx_count; queue++) 1951 __free_dma_tx_desc_resources(priv, queue); 1952 } 1953 1954 /** 1955 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). 1956 * @priv: private structure 1957 * @queue: RX queue index 1958 * Description: according to which descriptor can be used (extend or basic) 1959 * this function allocates the resources for TX and RX paths. In case of 1960 * reception, for example, it pre-allocated the RX socket buffer in order to 1961 * allow zero-copy mechanism. 1962 */ 1963 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) 1964 { 1965 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1966 struct stmmac_channel *ch = &priv->channel[queue]; 1967 bool xdp_prog = stmmac_xdp_is_enabled(priv); 1968 struct page_pool_params pp_params = { 0 }; 1969 unsigned int num_pages; 1970 unsigned int napi_id; 1971 int ret; 1972 1973 rx_q->queue_index = queue; 1974 rx_q->priv_data = priv; 1975 1976 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 1977 pp_params.pool_size = priv->dma_rx_size; 1978 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); 1979 pp_params.order = ilog2(num_pages); 1980 pp_params.nid = dev_to_node(priv->device); 1981 pp_params.dev = priv->device; 1982 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 1983 pp_params.offset = stmmac_rx_offset(priv); 1984 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); 1985 1986 rx_q->page_pool = page_pool_create(&pp_params); 1987 if (IS_ERR(rx_q->page_pool)) { 1988 ret = PTR_ERR(rx_q->page_pool); 1989 rx_q->page_pool = NULL; 1990 return ret; 1991 } 1992 1993 rx_q->buf_pool = kcalloc(priv->dma_rx_size, 1994 sizeof(*rx_q->buf_pool), 1995 GFP_KERNEL); 1996 if (!rx_q->buf_pool) 1997 return -ENOMEM; 1998 1999 if (priv->extend_desc) { 2000 rx_q->dma_erx = dma_alloc_coherent(priv->device, 2001 priv->dma_rx_size * 2002 sizeof(struct dma_extended_desc), 2003 &rx_q->dma_rx_phy, 2004 GFP_KERNEL); 2005 if (!rx_q->dma_erx) 2006 return -ENOMEM; 2007 2008 } else { 2009 rx_q->dma_rx = dma_alloc_coherent(priv->device, 2010 priv->dma_rx_size * 2011 sizeof(struct dma_desc), 2012 &rx_q->dma_rx_phy, 2013 GFP_KERNEL); 2014 if (!rx_q->dma_rx) 2015 return -ENOMEM; 2016 } 2017 2018 if (stmmac_xdp_is_enabled(priv) && 2019 test_bit(queue, priv->af_xdp_zc_qps)) 2020 napi_id = ch->rxtx_napi.napi_id; 2021 else 2022 napi_id = ch->rx_napi.napi_id; 2023 2024 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, 2025 rx_q->queue_index, 2026 napi_id); 2027 if (ret) { 2028 netdev_err(priv->dev, "Failed to register xdp rxq info\n"); 2029 return -EINVAL; 2030 } 2031 2032 return 0; 2033 } 2034 2035 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) 2036 { 2037 u32 rx_count = priv->plat->rx_queues_to_use; 2038 u32 queue; 2039 int ret; 2040 2041 /* RX queues buffers and DMA */ 2042 for (queue = 0; queue < rx_count; queue++) { 2043 ret = __alloc_dma_rx_desc_resources(priv, queue); 2044 if (ret) 2045 goto err_dma; 2046 } 2047 2048 return 0; 2049 2050 err_dma: 2051 free_dma_rx_desc_resources(priv); 2052 2053 return ret; 2054 } 2055 2056 /** 2057 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). 2058 * @priv: private structure 2059 * @queue: TX queue index 2060 * Description: according to which descriptor can be used (extend or basic) 2061 * this function allocates the resources for TX and RX paths. In case of 2062 * reception, for example, it pre-allocated the RX socket buffer in order to 2063 * allow zero-copy mechanism. 2064 */ 2065 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) 2066 { 2067 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2068 size_t size; 2069 void *addr; 2070 2071 tx_q->queue_index = queue; 2072 tx_q->priv_data = priv; 2073 2074 tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, 2075 sizeof(*tx_q->tx_skbuff_dma), 2076 GFP_KERNEL); 2077 if (!tx_q->tx_skbuff_dma) 2078 return -ENOMEM; 2079 2080 tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, 2081 sizeof(struct sk_buff *), 2082 GFP_KERNEL); 2083 if (!tx_q->tx_skbuff) 2084 return -ENOMEM; 2085 2086 if (priv->extend_desc) 2087 size = sizeof(struct dma_extended_desc); 2088 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2089 size = sizeof(struct dma_edesc); 2090 else 2091 size = sizeof(struct dma_desc); 2092 2093 size *= priv->dma_tx_size; 2094 2095 addr = dma_alloc_coherent(priv->device, size, 2096 &tx_q->dma_tx_phy, GFP_KERNEL); 2097 if (!addr) 2098 return -ENOMEM; 2099 2100 if (priv->extend_desc) 2101 tx_q->dma_etx = addr; 2102 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2103 tx_q->dma_entx = addr; 2104 else 2105 tx_q->dma_tx = addr; 2106 2107 return 0; 2108 } 2109 2110 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) 2111 { 2112 u32 tx_count = priv->plat->tx_queues_to_use; 2113 u32 queue; 2114 int ret; 2115 2116 /* TX queues buffers and DMA */ 2117 for (queue = 0; queue < tx_count; queue++) { 2118 ret = __alloc_dma_tx_desc_resources(priv, queue); 2119 if (ret) 2120 goto err_dma; 2121 } 2122 2123 return 0; 2124 2125 err_dma: 2126 free_dma_tx_desc_resources(priv); 2127 return ret; 2128 } 2129 2130 /** 2131 * alloc_dma_desc_resources - alloc TX/RX resources. 2132 * @priv: private structure 2133 * Description: according to which descriptor can be used (extend or basic) 2134 * this function allocates the resources for TX and RX paths. In case of 2135 * reception, for example, it pre-allocated the RX socket buffer in order to 2136 * allow zero-copy mechanism. 2137 */ 2138 static int alloc_dma_desc_resources(struct stmmac_priv *priv) 2139 { 2140 /* RX Allocation */ 2141 int ret = alloc_dma_rx_desc_resources(priv); 2142 2143 if (ret) 2144 return ret; 2145 2146 ret = alloc_dma_tx_desc_resources(priv); 2147 2148 return ret; 2149 } 2150 2151 /** 2152 * free_dma_desc_resources - free dma desc resources 2153 * @priv: private structure 2154 */ 2155 static void free_dma_desc_resources(struct stmmac_priv *priv) 2156 { 2157 /* Release the DMA TX socket buffers */ 2158 free_dma_tx_desc_resources(priv); 2159 2160 /* Release the DMA RX socket buffers later 2161 * to ensure all pending XDP_TX buffers are returned. 2162 */ 2163 free_dma_rx_desc_resources(priv); 2164 } 2165 2166 /** 2167 * stmmac_mac_enable_rx_queues - Enable MAC rx queues 2168 * @priv: driver private structure 2169 * Description: It is used for enabling the rx queues in the MAC 2170 */ 2171 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 2172 { 2173 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2174 int queue; 2175 u8 mode; 2176 2177 for (queue = 0; queue < rx_queues_count; queue++) { 2178 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 2179 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 2180 } 2181 } 2182 2183 /** 2184 * stmmac_start_rx_dma - start RX DMA channel 2185 * @priv: driver private structure 2186 * @chan: RX channel index 2187 * Description: 2188 * This starts a RX DMA channel 2189 */ 2190 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 2191 { 2192 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 2193 stmmac_start_rx(priv, priv->ioaddr, chan); 2194 } 2195 2196 /** 2197 * stmmac_start_tx_dma - start TX DMA channel 2198 * @priv: driver private structure 2199 * @chan: TX channel index 2200 * Description: 2201 * This starts a TX DMA channel 2202 */ 2203 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 2204 { 2205 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 2206 stmmac_start_tx(priv, priv->ioaddr, chan); 2207 } 2208 2209 /** 2210 * stmmac_stop_rx_dma - stop RX DMA channel 2211 * @priv: driver private structure 2212 * @chan: RX channel index 2213 * Description: 2214 * This stops a RX DMA channel 2215 */ 2216 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 2217 { 2218 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 2219 stmmac_stop_rx(priv, priv->ioaddr, chan); 2220 } 2221 2222 /** 2223 * stmmac_stop_tx_dma - stop TX DMA channel 2224 * @priv: driver private structure 2225 * @chan: TX channel index 2226 * Description: 2227 * This stops a TX DMA channel 2228 */ 2229 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 2230 { 2231 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 2232 stmmac_stop_tx(priv, priv->ioaddr, chan); 2233 } 2234 2235 /** 2236 * stmmac_start_all_dma - start all RX and TX DMA channels 2237 * @priv: driver private structure 2238 * Description: 2239 * This starts all the RX and TX DMA channels 2240 */ 2241 static void stmmac_start_all_dma(struct stmmac_priv *priv) 2242 { 2243 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2244 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2245 u32 chan = 0; 2246 2247 for (chan = 0; chan < rx_channels_count; chan++) 2248 stmmac_start_rx_dma(priv, chan); 2249 2250 for (chan = 0; chan < tx_channels_count; chan++) 2251 stmmac_start_tx_dma(priv, chan); 2252 } 2253 2254 /** 2255 * stmmac_stop_all_dma - stop all RX and TX DMA channels 2256 * @priv: driver private structure 2257 * Description: 2258 * This stops the RX and TX DMA channels 2259 */ 2260 static void stmmac_stop_all_dma(struct stmmac_priv *priv) 2261 { 2262 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2263 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2264 u32 chan = 0; 2265 2266 for (chan = 0; chan < rx_channels_count; chan++) 2267 stmmac_stop_rx_dma(priv, chan); 2268 2269 for (chan = 0; chan < tx_channels_count; chan++) 2270 stmmac_stop_tx_dma(priv, chan); 2271 } 2272 2273 /** 2274 * stmmac_dma_operation_mode - HW DMA operation mode 2275 * @priv: driver private structure 2276 * Description: it is used for configuring the DMA operation mode register in 2277 * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 2278 */ 2279 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 2280 { 2281 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2282 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2283 int rxfifosz = priv->plat->rx_fifo_size; 2284 int txfifosz = priv->plat->tx_fifo_size; 2285 u32 txmode = 0; 2286 u32 rxmode = 0; 2287 u32 chan = 0; 2288 u8 qmode = 0; 2289 2290 if (rxfifosz == 0) 2291 rxfifosz = priv->dma_cap.rx_fifo_size; 2292 if (txfifosz == 0) 2293 txfifosz = priv->dma_cap.tx_fifo_size; 2294 2295 /* Adjust for real per queue fifo size */ 2296 rxfifosz /= rx_channels_count; 2297 txfifosz /= tx_channels_count; 2298 2299 if (priv->plat->force_thresh_dma_mode) { 2300 txmode = tc; 2301 rxmode = tc; 2302 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 2303 /* 2304 * In case of GMAC, SF mode can be enabled 2305 * to perform the TX COE in HW. This depends on: 2306 * 1) TX COE if actually supported 2307 * 2) There is no bugged Jumbo frame support 2308 * that needs to not insert csum in the TDES. 2309 */ 2310 txmode = SF_DMA_MODE; 2311 rxmode = SF_DMA_MODE; 2312 priv->xstats.threshold = SF_DMA_MODE; 2313 } else { 2314 txmode = tc; 2315 rxmode = SF_DMA_MODE; 2316 } 2317 2318 /* configure all channels */ 2319 for (chan = 0; chan < rx_channels_count; chan++) { 2320 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; 2321 u32 buf_size; 2322 2323 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2324 2325 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 2326 rxfifosz, qmode); 2327 2328 if (rx_q->xsk_pool) { 2329 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 2330 stmmac_set_dma_bfsize(priv, priv->ioaddr, 2331 buf_size, 2332 chan); 2333 } else { 2334 stmmac_set_dma_bfsize(priv, priv->ioaddr, 2335 priv->dma_buf_sz, 2336 chan); 2337 } 2338 } 2339 2340 for (chan = 0; chan < tx_channels_count; chan++) { 2341 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2342 2343 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 2344 txfifosz, qmode); 2345 } 2346 } 2347 2348 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 2349 { 2350 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); 2351 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2352 struct xsk_buff_pool *pool = tx_q->xsk_pool; 2353 unsigned int entry = tx_q->cur_tx; 2354 struct dma_desc *tx_desc = NULL; 2355 struct xdp_desc xdp_desc; 2356 bool work_done = true; 2357 2358 /* Avoids TX time-out as we are sharing with slow path */ 2359 nq->trans_start = jiffies; 2360 2361 budget = min(budget, stmmac_tx_avail(priv, queue)); 2362 2363 while (budget-- > 0) { 2364 dma_addr_t dma_addr; 2365 bool set_ic; 2366 2367 /* We are sharing with slow path and stop XSK TX desc submission when 2368 * available TX ring is less than threshold. 2369 */ 2370 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || 2371 !netif_carrier_ok(priv->dev)) { 2372 work_done = false; 2373 break; 2374 } 2375 2376 if (!xsk_tx_peek_desc(pool, &xdp_desc)) 2377 break; 2378 2379 if (likely(priv->extend_desc)) 2380 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 2381 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2382 tx_desc = &tx_q->dma_entx[entry].basic; 2383 else 2384 tx_desc = tx_q->dma_tx + entry; 2385 2386 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2387 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); 2388 2389 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; 2390 2391 /* To return XDP buffer to XSK pool, we simple call 2392 * xsk_tx_completed(), so we don't need to fill up 2393 * 'buf' and 'xdpf'. 2394 */ 2395 tx_q->tx_skbuff_dma[entry].buf = 0; 2396 tx_q->xdpf[entry] = NULL; 2397 2398 tx_q->tx_skbuff_dma[entry].map_as_page = false; 2399 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; 2400 tx_q->tx_skbuff_dma[entry].last_segment = true; 2401 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2402 2403 stmmac_set_desc_addr(priv, tx_desc, dma_addr); 2404 2405 tx_q->tx_count_frames++; 2406 2407 if (!priv->tx_coal_frames[queue]) 2408 set_ic = false; 2409 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 2410 set_ic = true; 2411 else 2412 set_ic = false; 2413 2414 if (set_ic) { 2415 tx_q->tx_count_frames = 0; 2416 stmmac_set_tx_ic(priv, tx_desc); 2417 priv->xstats.tx_set_ic_bit++; 2418 } 2419 2420 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, 2421 true, priv->mode, true, true, 2422 xdp_desc.len); 2423 2424 stmmac_enable_dma_transmission(priv, priv->ioaddr); 2425 2426 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 2427 entry = tx_q->cur_tx; 2428 } 2429 2430 if (tx_desc) { 2431 stmmac_flush_tx_descriptors(priv, queue); 2432 xsk_tx_release(pool); 2433 } 2434 2435 /* Return true if all of the 3 conditions are met 2436 * a) TX Budget is still available 2437 * b) work_done = true when XSK TX desc peek is empty (no more 2438 * pending XSK TX for transmission) 2439 */ 2440 return !!budget && work_done; 2441 } 2442 2443 /** 2444 * stmmac_tx_clean - to manage the transmission completion 2445 * @priv: driver private structure 2446 * @budget: napi budget limiting this functions packet handling 2447 * @queue: TX queue index 2448 * Description: it reclaims the transmit resources after transmission completes. 2449 */ 2450 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 2451 { 2452 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2453 unsigned int bytes_compl = 0, pkts_compl = 0; 2454 unsigned int entry, xmits = 0, count = 0; 2455 2456 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 2457 2458 priv->xstats.tx_clean++; 2459 2460 tx_q->xsk_frames_done = 0; 2461 2462 entry = tx_q->dirty_tx; 2463 2464 /* Try to clean all TX complete frame in 1 shot */ 2465 while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) { 2466 struct xdp_frame *xdpf; 2467 struct sk_buff *skb; 2468 struct dma_desc *p; 2469 int status; 2470 2471 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || 2472 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2473 xdpf = tx_q->xdpf[entry]; 2474 skb = NULL; 2475 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2476 xdpf = NULL; 2477 skb = tx_q->tx_skbuff[entry]; 2478 } else { 2479 xdpf = NULL; 2480 skb = NULL; 2481 } 2482 2483 if (priv->extend_desc) 2484 p = (struct dma_desc *)(tx_q->dma_etx + entry); 2485 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2486 p = &tx_q->dma_entx[entry].basic; 2487 else 2488 p = tx_q->dma_tx + entry; 2489 2490 status = stmmac_tx_status(priv, &priv->dev->stats, 2491 &priv->xstats, p, priv->ioaddr); 2492 /* Check if the descriptor is owned by the DMA */ 2493 if (unlikely(status & tx_dma_own)) 2494 break; 2495 2496 count++; 2497 2498 /* Make sure descriptor fields are read after reading 2499 * the own bit. 2500 */ 2501 dma_rmb(); 2502 2503 /* Just consider the last segment and ...*/ 2504 if (likely(!(status & tx_not_ls))) { 2505 /* ... verify the status error condition */ 2506 if (unlikely(status & tx_err)) { 2507 priv->dev->stats.tx_errors++; 2508 } else { 2509 priv->dev->stats.tx_packets++; 2510 priv->xstats.tx_pkt_n++; 2511 priv->xstats.txq_stats[queue].tx_pkt_n++; 2512 } 2513 if (skb) 2514 stmmac_get_tx_hwtstamp(priv, p, skb); 2515 } 2516 2517 if (likely(tx_q->tx_skbuff_dma[entry].buf && 2518 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { 2519 if (tx_q->tx_skbuff_dma[entry].map_as_page) 2520 dma_unmap_page(priv->device, 2521 tx_q->tx_skbuff_dma[entry].buf, 2522 tx_q->tx_skbuff_dma[entry].len, 2523 DMA_TO_DEVICE); 2524 else 2525 dma_unmap_single(priv->device, 2526 tx_q->tx_skbuff_dma[entry].buf, 2527 tx_q->tx_skbuff_dma[entry].len, 2528 DMA_TO_DEVICE); 2529 tx_q->tx_skbuff_dma[entry].buf = 0; 2530 tx_q->tx_skbuff_dma[entry].len = 0; 2531 tx_q->tx_skbuff_dma[entry].map_as_page = false; 2532 } 2533 2534 stmmac_clean_desc3(priv, tx_q, p); 2535 2536 tx_q->tx_skbuff_dma[entry].last_segment = false; 2537 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2538 2539 if (xdpf && 2540 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { 2541 xdp_return_frame_rx_napi(xdpf); 2542 tx_q->xdpf[entry] = NULL; 2543 } 2544 2545 if (xdpf && 2546 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2547 xdp_return_frame(xdpf); 2548 tx_q->xdpf[entry] = NULL; 2549 } 2550 2551 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) 2552 tx_q->xsk_frames_done++; 2553 2554 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2555 if (likely(skb)) { 2556 pkts_compl++; 2557 bytes_compl += skb->len; 2558 dev_consume_skb_any(skb); 2559 tx_q->tx_skbuff[entry] = NULL; 2560 } 2561 } 2562 2563 stmmac_release_tx_desc(priv, p, priv->mode); 2564 2565 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 2566 } 2567 tx_q->dirty_tx = entry; 2568 2569 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 2570 pkts_compl, bytes_compl); 2571 2572 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 2573 queue))) && 2574 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { 2575 2576 netif_dbg(priv, tx_done, priv->dev, 2577 "%s: restart transmit\n", __func__); 2578 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 2579 } 2580 2581 if (tx_q->xsk_pool) { 2582 bool work_done; 2583 2584 if (tx_q->xsk_frames_done) 2585 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 2586 2587 if (xsk_uses_need_wakeup(tx_q->xsk_pool)) 2588 xsk_set_tx_need_wakeup(tx_q->xsk_pool); 2589 2590 /* For XSK TX, we try to send as many as possible. 2591 * If XSK work done (XSK TX desc empty and budget still 2592 * available), return "budget - 1" to reenable TX IRQ. 2593 * Else, return "budget" to make NAPI continue polling. 2594 */ 2595 work_done = stmmac_xdp_xmit_zc(priv, queue, 2596 STMMAC_XSK_TX_BUDGET_MAX); 2597 if (work_done) 2598 xmits = budget - 1; 2599 else 2600 xmits = budget; 2601 } 2602 2603 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && 2604 priv->eee_sw_timer_en) { 2605 stmmac_enable_eee_mode(priv); 2606 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 2607 } 2608 2609 /* We still have pending packets, let's call for a new scheduling */ 2610 if (tx_q->dirty_tx != tx_q->cur_tx) 2611 hrtimer_start(&tx_q->txtimer, 2612 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2613 HRTIMER_MODE_REL); 2614 2615 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 2616 2617 /* Combine decisions from TX clean and XSK TX */ 2618 return max(count, xmits); 2619 } 2620 2621 /** 2622 * stmmac_tx_err - to manage the tx error 2623 * @priv: driver private structure 2624 * @chan: channel index 2625 * Description: it cleans the descriptors and restarts the transmission 2626 * in case of transmission errors. 2627 */ 2628 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 2629 { 2630 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2631 2632 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 2633 2634 stmmac_stop_tx_dma(priv, chan); 2635 dma_free_tx_skbufs(priv, chan); 2636 stmmac_clear_tx_descriptors(priv, chan); 2637 tx_q->dirty_tx = 0; 2638 tx_q->cur_tx = 0; 2639 tx_q->mss = 0; 2640 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); 2641 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2642 tx_q->dma_tx_phy, chan); 2643 stmmac_start_tx_dma(priv, chan); 2644 2645 priv->dev->stats.tx_errors++; 2646 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 2647 } 2648 2649 /** 2650 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 2651 * @priv: driver private structure 2652 * @txmode: TX operating mode 2653 * @rxmode: RX operating mode 2654 * @chan: channel index 2655 * Description: it is used for configuring of the DMA operation mode in 2656 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 2657 * mode. 2658 */ 2659 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 2660 u32 rxmode, u32 chan) 2661 { 2662 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2663 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2664 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2665 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2666 int rxfifosz = priv->plat->rx_fifo_size; 2667 int txfifosz = priv->plat->tx_fifo_size; 2668 2669 if (rxfifosz == 0) 2670 rxfifosz = priv->dma_cap.rx_fifo_size; 2671 if (txfifosz == 0) 2672 txfifosz = priv->dma_cap.tx_fifo_size; 2673 2674 /* Adjust for real per queue fifo size */ 2675 rxfifosz /= rx_channels_count; 2676 txfifosz /= tx_channels_count; 2677 2678 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 2679 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 2680 } 2681 2682 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 2683 { 2684 int ret; 2685 2686 ret = stmmac_safety_feat_irq_status(priv, priv->dev, 2687 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2688 if (ret && (ret != -EINVAL)) { 2689 stmmac_global_err(priv); 2690 return true; 2691 } 2692 2693 return false; 2694 } 2695 2696 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) 2697 { 2698 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 2699 &priv->xstats, chan, dir); 2700 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; 2701 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2702 struct stmmac_channel *ch = &priv->channel[chan]; 2703 struct napi_struct *rx_napi; 2704 struct napi_struct *tx_napi; 2705 unsigned long flags; 2706 2707 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; 2708 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 2709 2710 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 2711 if (napi_schedule_prep(rx_napi)) { 2712 spin_lock_irqsave(&ch->lock, flags); 2713 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 2714 spin_unlock_irqrestore(&ch->lock, flags); 2715 __napi_schedule(rx_napi); 2716 } 2717 } 2718 2719 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2720 if (napi_schedule_prep(tx_napi)) { 2721 spin_lock_irqsave(&ch->lock, flags); 2722 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 2723 spin_unlock_irqrestore(&ch->lock, flags); 2724 __napi_schedule(tx_napi); 2725 } 2726 } 2727 2728 return status; 2729 } 2730 2731 /** 2732 * stmmac_dma_interrupt - DMA ISR 2733 * @priv: driver private structure 2734 * Description: this is the DMA ISR. It is called by the main ISR. 2735 * It calls the dwmac dma routine and schedule poll method in case of some 2736 * work can be done. 2737 */ 2738 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 2739 { 2740 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2741 u32 rx_channel_count = priv->plat->rx_queues_to_use; 2742 u32 channels_to_check = tx_channel_count > rx_channel_count ? 2743 tx_channel_count : rx_channel_count; 2744 u32 chan; 2745 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 2746 2747 /* Make sure we never check beyond our status buffer. */ 2748 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 2749 channels_to_check = ARRAY_SIZE(status); 2750 2751 for (chan = 0; chan < channels_to_check; chan++) 2752 status[chan] = stmmac_napi_check(priv, chan, 2753 DMA_DIR_RXTX); 2754 2755 for (chan = 0; chan < tx_channel_count; chan++) { 2756 if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 2757 /* Try to bump up the dma threshold on this failure */ 2758 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 2759 (tc <= 256)) { 2760 tc += 64; 2761 if (priv->plat->force_thresh_dma_mode) 2762 stmmac_set_dma_operation_mode(priv, 2763 tc, 2764 tc, 2765 chan); 2766 else 2767 stmmac_set_dma_operation_mode(priv, 2768 tc, 2769 SF_DMA_MODE, 2770 chan); 2771 priv->xstats.threshold = tc; 2772 } 2773 } else if (unlikely(status[chan] == tx_hard_error)) { 2774 stmmac_tx_err(priv, chan); 2775 } 2776 } 2777 } 2778 2779 /** 2780 * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 2781 * @priv: driver private structure 2782 * Description: this masks the MMC irq, in fact, the counters are managed in SW. 2783 */ 2784 static void stmmac_mmc_setup(struct stmmac_priv *priv) 2785 { 2786 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 2787 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 2788 2789 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); 2790 2791 if (priv->dma_cap.rmon) { 2792 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); 2793 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 2794 } else 2795 netdev_info(priv->dev, "No MAC Management Counters available\n"); 2796 } 2797 2798 /** 2799 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 2800 * @priv: driver private structure 2801 * Description: 2802 * new GMAC chip generations have a new register to indicate the 2803 * presence of the optional feature/functions. 2804 * This can be also used to override the value passed through the 2805 * platform and necessary for old MAC10/100 and GMAC chips. 2806 */ 2807 static int stmmac_get_hw_features(struct stmmac_priv *priv) 2808 { 2809 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2810 } 2811 2812 /** 2813 * stmmac_check_ether_addr - check if the MAC addr is valid 2814 * @priv: driver private structure 2815 * Description: 2816 * it is to verify if the MAC address is valid, in case of failures it 2817 * generates a random MAC address 2818 */ 2819 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2820 { 2821 u8 addr[ETH_ALEN]; 2822 2823 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 2824 stmmac_get_umac_addr(priv, priv->hw, addr, 0); 2825 if (is_valid_ether_addr(addr)) 2826 eth_hw_addr_set(priv->dev, addr); 2827 else 2828 eth_hw_addr_random(priv->dev); 2829 dev_info(priv->device, "device MAC address %pM\n", 2830 priv->dev->dev_addr); 2831 } 2832 } 2833 2834 /** 2835 * stmmac_init_dma_engine - DMA init. 2836 * @priv: driver private structure 2837 * Description: 2838 * It inits the DMA invoking the specific MAC/GMAC callback. 2839 * Some DMA parameters can be passed from the platform; 2840 * in case of these are not passed a default is kept for the MAC or GMAC. 2841 */ 2842 static int stmmac_init_dma_engine(struct stmmac_priv *priv) 2843 { 2844 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2845 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2846 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2847 struct stmmac_rx_queue *rx_q; 2848 struct stmmac_tx_queue *tx_q; 2849 u32 chan = 0; 2850 int atds = 0; 2851 int ret = 0; 2852 2853 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2854 dev_err(priv->device, "Invalid DMA configuration\n"); 2855 return -EINVAL; 2856 } 2857 2858 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2859 atds = 1; 2860 2861 ret = stmmac_reset(priv, priv->ioaddr); 2862 if (ret) { 2863 dev_err(priv->device, "Failed to reset the dma\n"); 2864 return ret; 2865 } 2866 2867 /* DMA Configuration */ 2868 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 2869 2870 if (priv->plat->axi) 2871 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 2872 2873 /* DMA CSR Channel configuration */ 2874 for (chan = 0; chan < dma_csr_ch; chan++) 2875 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2876 2877 /* DMA RX Channel Configuration */ 2878 for (chan = 0; chan < rx_channels_count; chan++) { 2879 rx_q = &priv->rx_queue[chan]; 2880 2881 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2882 rx_q->dma_rx_phy, chan); 2883 2884 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2885 (rx_q->buf_alloc_num * 2886 sizeof(struct dma_desc)); 2887 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2888 rx_q->rx_tail_addr, chan); 2889 } 2890 2891 /* DMA TX Channel Configuration */ 2892 for (chan = 0; chan < tx_channels_count; chan++) { 2893 tx_q = &priv->tx_queue[chan]; 2894 2895 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2896 tx_q->dma_tx_phy, chan); 2897 2898 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2899 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2900 tx_q->tx_tail_addr, chan); 2901 } 2902 2903 return ret; 2904 } 2905 2906 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 2907 { 2908 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2909 2910 hrtimer_start(&tx_q->txtimer, 2911 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2912 HRTIMER_MODE_REL); 2913 } 2914 2915 /** 2916 * stmmac_tx_timer - mitigation sw timer for tx. 2917 * @t: data pointer 2918 * Description: 2919 * This is the timer handler to directly invoke the stmmac_tx_clean. 2920 */ 2921 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) 2922 { 2923 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); 2924 struct stmmac_priv *priv = tx_q->priv_data; 2925 struct stmmac_channel *ch; 2926 struct napi_struct *napi; 2927 2928 ch = &priv->channel[tx_q->queue_index]; 2929 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 2930 2931 if (likely(napi_schedule_prep(napi))) { 2932 unsigned long flags; 2933 2934 spin_lock_irqsave(&ch->lock, flags); 2935 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); 2936 spin_unlock_irqrestore(&ch->lock, flags); 2937 __napi_schedule(napi); 2938 } 2939 2940 return HRTIMER_NORESTART; 2941 } 2942 2943 /** 2944 * stmmac_init_coalesce - init mitigation options. 2945 * @priv: driver private structure 2946 * Description: 2947 * This inits the coalesce parameters: i.e. timer rate, 2948 * timer handler and default threshold used for enabling the 2949 * interrupt on completion bit. 2950 */ 2951 static void stmmac_init_coalesce(struct stmmac_priv *priv) 2952 { 2953 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2954 u32 rx_channel_count = priv->plat->rx_queues_to_use; 2955 u32 chan; 2956 2957 for (chan = 0; chan < tx_channel_count; chan++) { 2958 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2959 2960 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; 2961 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; 2962 2963 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2964 tx_q->txtimer.function = stmmac_tx_timer; 2965 } 2966 2967 for (chan = 0; chan < rx_channel_count; chan++) 2968 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; 2969 } 2970 2971 static void stmmac_set_rings_length(struct stmmac_priv *priv) 2972 { 2973 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2974 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2975 u32 chan; 2976 2977 /* set TX ring length */ 2978 for (chan = 0; chan < tx_channels_count; chan++) 2979 stmmac_set_tx_ring_len(priv, priv->ioaddr, 2980 (priv->dma_tx_size - 1), chan); 2981 2982 /* set RX ring length */ 2983 for (chan = 0; chan < rx_channels_count; chan++) 2984 stmmac_set_rx_ring_len(priv, priv->ioaddr, 2985 (priv->dma_rx_size - 1), chan); 2986 } 2987 2988 /** 2989 * stmmac_set_tx_queue_weight - Set TX queue weight 2990 * @priv: driver private structure 2991 * Description: It is used for setting TX queues weight 2992 */ 2993 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 2994 { 2995 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2996 u32 weight; 2997 u32 queue; 2998 2999 for (queue = 0; queue < tx_queues_count; queue++) { 3000 weight = priv->plat->tx_queues_cfg[queue].weight; 3001 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 3002 } 3003 } 3004 3005 /** 3006 * stmmac_configure_cbs - Configure CBS in TX queue 3007 * @priv: driver private structure 3008 * Description: It is used for configuring CBS in AVB TX queues 3009 */ 3010 static void stmmac_configure_cbs(struct stmmac_priv *priv) 3011 { 3012 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3013 u32 mode_to_use; 3014 u32 queue; 3015 3016 /* queue 0 is reserved for legacy traffic */ 3017 for (queue = 1; queue < tx_queues_count; queue++) { 3018 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 3019 if (mode_to_use == MTL_QUEUE_DCB) 3020 continue; 3021 3022 stmmac_config_cbs(priv, priv->hw, 3023 priv->plat->tx_queues_cfg[queue].send_slope, 3024 priv->plat->tx_queues_cfg[queue].idle_slope, 3025 priv->plat->tx_queues_cfg[queue].high_credit, 3026 priv->plat->tx_queues_cfg[queue].low_credit, 3027 queue); 3028 } 3029 } 3030 3031 /** 3032 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 3033 * @priv: driver private structure 3034 * Description: It is used for mapping RX queues to RX dma channels 3035 */ 3036 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 3037 { 3038 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3039 u32 queue; 3040 u32 chan; 3041 3042 for (queue = 0; queue < rx_queues_count; queue++) { 3043 chan = priv->plat->rx_queues_cfg[queue].chan; 3044 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 3045 } 3046 } 3047 3048 /** 3049 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 3050 * @priv: driver private structure 3051 * Description: It is used for configuring the RX Queue Priority 3052 */ 3053 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 3054 { 3055 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3056 u32 queue; 3057 u32 prio; 3058 3059 for (queue = 0; queue < rx_queues_count; queue++) { 3060 if (!priv->plat->rx_queues_cfg[queue].use_prio) 3061 continue; 3062 3063 prio = priv->plat->rx_queues_cfg[queue].prio; 3064 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 3065 } 3066 } 3067 3068 /** 3069 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 3070 * @priv: driver private structure 3071 * Description: It is used for configuring the TX Queue Priority 3072 */ 3073 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 3074 { 3075 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3076 u32 queue; 3077 u32 prio; 3078 3079 for (queue = 0; queue < tx_queues_count; queue++) { 3080 if (!priv->plat->tx_queues_cfg[queue].use_prio) 3081 continue; 3082 3083 prio = priv->plat->tx_queues_cfg[queue].prio; 3084 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 3085 } 3086 } 3087 3088 /** 3089 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 3090 * @priv: driver private structure 3091 * Description: It is used for configuring the RX queue routing 3092 */ 3093 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 3094 { 3095 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3096 u32 queue; 3097 u8 packet; 3098 3099 for (queue = 0; queue < rx_queues_count; queue++) { 3100 /* no specific packet type routing specified for the queue */ 3101 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 3102 continue; 3103 3104 packet = priv->plat->rx_queues_cfg[queue].pkt_route; 3105 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 3106 } 3107 } 3108 3109 static void stmmac_mac_config_rss(struct stmmac_priv *priv) 3110 { 3111 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { 3112 priv->rss.enable = false; 3113 return; 3114 } 3115 3116 if (priv->dev->features & NETIF_F_RXHASH) 3117 priv->rss.enable = true; 3118 else 3119 priv->rss.enable = false; 3120 3121 stmmac_rss_configure(priv, priv->hw, &priv->rss, 3122 priv->plat->rx_queues_to_use); 3123 } 3124 3125 /** 3126 * stmmac_mtl_configuration - Configure MTL 3127 * @priv: driver private structure 3128 * Description: It is used for configurring MTL 3129 */ 3130 static void stmmac_mtl_configuration(struct stmmac_priv *priv) 3131 { 3132 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3133 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3134 3135 if (tx_queues_count > 1) 3136 stmmac_set_tx_queue_weight(priv); 3137 3138 /* Configure MTL RX algorithms */ 3139 if (rx_queues_count > 1) 3140 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 3141 priv->plat->rx_sched_algorithm); 3142 3143 /* Configure MTL TX algorithms */ 3144 if (tx_queues_count > 1) 3145 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 3146 priv->plat->tx_sched_algorithm); 3147 3148 /* Configure CBS in AVB TX queues */ 3149 if (tx_queues_count > 1) 3150 stmmac_configure_cbs(priv); 3151 3152 /* Map RX MTL to DMA channels */ 3153 stmmac_rx_queue_dma_chan_map(priv); 3154 3155 /* Enable MAC RX Queues */ 3156 stmmac_mac_enable_rx_queues(priv); 3157 3158 /* Set RX priorities */ 3159 if (rx_queues_count > 1) 3160 stmmac_mac_config_rx_queues_prio(priv); 3161 3162 /* Set TX priorities */ 3163 if (tx_queues_count > 1) 3164 stmmac_mac_config_tx_queues_prio(priv); 3165 3166 /* Set RX routing */ 3167 if (rx_queues_count > 1) 3168 stmmac_mac_config_rx_queues_routing(priv); 3169 3170 /* Receive Side Scaling */ 3171 if (rx_queues_count > 1) 3172 stmmac_mac_config_rss(priv); 3173 } 3174 3175 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 3176 { 3177 if (priv->dma_cap.asp) { 3178 netdev_info(priv->dev, "Enabling Safety Features\n"); 3179 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, 3180 priv->plat->safety_feat_cfg); 3181 } else { 3182 netdev_info(priv->dev, "No Safety Features support found\n"); 3183 } 3184 } 3185 3186 static int stmmac_fpe_start_wq(struct stmmac_priv *priv) 3187 { 3188 char *name; 3189 3190 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 3191 clear_bit(__FPE_REMOVING, &priv->fpe_task_state); 3192 3193 name = priv->wq_name; 3194 sprintf(name, "%s-fpe", priv->dev->name); 3195 3196 priv->fpe_wq = create_singlethread_workqueue(name); 3197 if (!priv->fpe_wq) { 3198 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); 3199 3200 return -ENOMEM; 3201 } 3202 netdev_info(priv->dev, "FPE workqueue start"); 3203 3204 return 0; 3205 } 3206 3207 /** 3208 * stmmac_hw_setup - setup mac in a usable state. 3209 * @dev : pointer to the device structure. 3210 * @init_ptp: initialize PTP if set 3211 * Description: 3212 * this is the main function to setup the HW in a usable state because the 3213 * dma engine is reset, the core registers are configured (e.g. AXI, 3214 * Checksum features, timers). The DMA is ready to start receiving and 3215 * transmitting. 3216 * Return value: 3217 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3218 * file on failure. 3219 */ 3220 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) 3221 { 3222 struct stmmac_priv *priv = netdev_priv(dev); 3223 u32 rx_cnt = priv->plat->rx_queues_to_use; 3224 u32 tx_cnt = priv->plat->tx_queues_to_use; 3225 bool sph_en; 3226 u32 chan; 3227 int ret; 3228 3229 /* DMA initialization and SW reset */ 3230 ret = stmmac_init_dma_engine(priv); 3231 if (ret < 0) { 3232 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 3233 __func__); 3234 return ret; 3235 } 3236 3237 /* Copy the MAC addr into the HW */ 3238 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 3239 3240 /* PS and related bits will be programmed according to the speed */ 3241 if (priv->hw->pcs) { 3242 int speed = priv->plat->mac_port_sel_speed; 3243 3244 if ((speed == SPEED_10) || (speed == SPEED_100) || 3245 (speed == SPEED_1000)) { 3246 priv->hw->ps = speed; 3247 } else { 3248 dev_warn(priv->device, "invalid port speed\n"); 3249 priv->hw->ps = 0; 3250 } 3251 } 3252 3253 /* Initialize the MAC Core */ 3254 stmmac_core_init(priv, priv->hw, dev); 3255 3256 /* Initialize MTL*/ 3257 stmmac_mtl_configuration(priv); 3258 3259 /* Initialize Safety Features */ 3260 stmmac_safety_feat_configuration(priv); 3261 3262 ret = stmmac_rx_ipc(priv, priv->hw); 3263 if (!ret) { 3264 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 3265 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 3266 priv->hw->rx_csum = 0; 3267 } 3268 3269 /* Enable the MAC Rx/Tx */ 3270 stmmac_mac_set(priv, priv->ioaddr, true); 3271 3272 /* Set the HW DMA mode and the COE */ 3273 stmmac_dma_operation_mode(priv); 3274 3275 stmmac_mmc_setup(priv); 3276 3277 if (init_ptp) { 3278 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 3279 if (ret < 0) 3280 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); 3281 3282 ret = stmmac_init_ptp(priv); 3283 if (ret == -EOPNOTSUPP) 3284 netdev_warn(priv->dev, "PTP not supported by HW\n"); 3285 else if (ret) 3286 netdev_warn(priv->dev, "PTP init failed\n"); 3287 } 3288 3289 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; 3290 3291 /* Convert the timer from msec to usec */ 3292 if (!priv->tx_lpi_timer) 3293 priv->tx_lpi_timer = eee_timer * 1000; 3294 3295 if (priv->use_riwt) { 3296 u32 queue; 3297 3298 for (queue = 0; queue < rx_cnt; queue++) { 3299 if (!priv->rx_riwt[queue]) 3300 priv->rx_riwt[queue] = DEF_DMA_RIWT; 3301 3302 stmmac_rx_watchdog(priv, priv->ioaddr, 3303 priv->rx_riwt[queue], queue); 3304 } 3305 } 3306 3307 if (priv->hw->pcs) 3308 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); 3309 3310 /* set TX and RX rings length */ 3311 stmmac_set_rings_length(priv); 3312 3313 /* Enable TSO */ 3314 if (priv->tso) { 3315 for (chan = 0; chan < tx_cnt; chan++) { 3316 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 3317 3318 /* TSO and TBS cannot co-exist */ 3319 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3320 continue; 3321 3322 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 3323 } 3324 } 3325 3326 /* Enable Split Header */ 3327 sph_en = (priv->hw->rx_csum > 0) && priv->sph; 3328 for (chan = 0; chan < rx_cnt; chan++) 3329 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 3330 3331 3332 /* VLAN Tag Insertion */ 3333 if (priv->dma_cap.vlins) 3334 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); 3335 3336 /* TBS */ 3337 for (chan = 0; chan < tx_cnt; chan++) { 3338 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 3339 int enable = tx_q->tbs & STMMAC_TBS_AVAIL; 3340 3341 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); 3342 } 3343 3344 /* Configure real RX and TX queues */ 3345 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); 3346 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); 3347 3348 /* Start the ball rolling... */ 3349 stmmac_start_all_dma(priv); 3350 3351 if (priv->dma_cap.fpesel) { 3352 stmmac_fpe_start_wq(priv); 3353 3354 if (priv->plat->fpe_cfg->enable) 3355 stmmac_fpe_handshake(priv, true); 3356 } 3357 3358 return 0; 3359 } 3360 3361 static void stmmac_hw_teardown(struct net_device *dev) 3362 { 3363 struct stmmac_priv *priv = netdev_priv(dev); 3364 3365 clk_disable_unprepare(priv->plat->clk_ptp_ref); 3366 } 3367 3368 static void stmmac_free_irq(struct net_device *dev, 3369 enum request_irq_err irq_err, int irq_idx) 3370 { 3371 struct stmmac_priv *priv = netdev_priv(dev); 3372 int j; 3373 3374 switch (irq_err) { 3375 case REQ_IRQ_ERR_ALL: 3376 irq_idx = priv->plat->tx_queues_to_use; 3377 fallthrough; 3378 case REQ_IRQ_ERR_TX: 3379 for (j = irq_idx - 1; j >= 0; j--) { 3380 if (priv->tx_irq[j] > 0) { 3381 irq_set_affinity_hint(priv->tx_irq[j], NULL); 3382 free_irq(priv->tx_irq[j], &priv->tx_queue[j]); 3383 } 3384 } 3385 irq_idx = priv->plat->rx_queues_to_use; 3386 fallthrough; 3387 case REQ_IRQ_ERR_RX: 3388 for (j = irq_idx - 1; j >= 0; j--) { 3389 if (priv->rx_irq[j] > 0) { 3390 irq_set_affinity_hint(priv->rx_irq[j], NULL); 3391 free_irq(priv->rx_irq[j], &priv->rx_queue[j]); 3392 } 3393 } 3394 3395 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) 3396 free_irq(priv->sfty_ue_irq, dev); 3397 fallthrough; 3398 case REQ_IRQ_ERR_SFTY_UE: 3399 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) 3400 free_irq(priv->sfty_ce_irq, dev); 3401 fallthrough; 3402 case REQ_IRQ_ERR_SFTY_CE: 3403 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) 3404 free_irq(priv->lpi_irq, dev); 3405 fallthrough; 3406 case REQ_IRQ_ERR_LPI: 3407 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) 3408 free_irq(priv->wol_irq, dev); 3409 fallthrough; 3410 case REQ_IRQ_ERR_WOL: 3411 free_irq(dev->irq, dev); 3412 fallthrough; 3413 case REQ_IRQ_ERR_MAC: 3414 case REQ_IRQ_ERR_NO: 3415 /* If MAC IRQ request error, no more IRQ to free */ 3416 break; 3417 } 3418 } 3419 3420 static int stmmac_request_irq_multi_msi(struct net_device *dev) 3421 { 3422 struct stmmac_priv *priv = netdev_priv(dev); 3423 enum request_irq_err irq_err; 3424 cpumask_t cpu_mask; 3425 int irq_idx = 0; 3426 char *int_name; 3427 int ret; 3428 int i; 3429 3430 /* For common interrupt */ 3431 int_name = priv->int_name_mac; 3432 sprintf(int_name, "%s:%s", dev->name, "mac"); 3433 ret = request_irq(dev->irq, stmmac_mac_interrupt, 3434 0, int_name, dev); 3435 if (unlikely(ret < 0)) { 3436 netdev_err(priv->dev, 3437 "%s: alloc mac MSI %d (error: %d)\n", 3438 __func__, dev->irq, ret); 3439 irq_err = REQ_IRQ_ERR_MAC; 3440 goto irq_error; 3441 } 3442 3443 /* Request the Wake IRQ in case of another line 3444 * is used for WoL 3445 */ 3446 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 3447 int_name = priv->int_name_wol; 3448 sprintf(int_name, "%s:%s", dev->name, "wol"); 3449 ret = request_irq(priv->wol_irq, 3450 stmmac_mac_interrupt, 3451 0, int_name, dev); 3452 if (unlikely(ret < 0)) { 3453 netdev_err(priv->dev, 3454 "%s: alloc wol MSI %d (error: %d)\n", 3455 __func__, priv->wol_irq, ret); 3456 irq_err = REQ_IRQ_ERR_WOL; 3457 goto irq_error; 3458 } 3459 } 3460 3461 /* Request the LPI IRQ in case of another line 3462 * is used for LPI 3463 */ 3464 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 3465 int_name = priv->int_name_lpi; 3466 sprintf(int_name, "%s:%s", dev->name, "lpi"); 3467 ret = request_irq(priv->lpi_irq, 3468 stmmac_mac_interrupt, 3469 0, int_name, dev); 3470 if (unlikely(ret < 0)) { 3471 netdev_err(priv->dev, 3472 "%s: alloc lpi MSI %d (error: %d)\n", 3473 __func__, priv->lpi_irq, ret); 3474 irq_err = REQ_IRQ_ERR_LPI; 3475 goto irq_error; 3476 } 3477 } 3478 3479 /* Request the Safety Feature Correctible Error line in 3480 * case of another line is used 3481 */ 3482 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { 3483 int_name = priv->int_name_sfty_ce; 3484 sprintf(int_name, "%s:%s", dev->name, "safety-ce"); 3485 ret = request_irq(priv->sfty_ce_irq, 3486 stmmac_safety_interrupt, 3487 0, int_name, dev); 3488 if (unlikely(ret < 0)) { 3489 netdev_err(priv->dev, 3490 "%s: alloc sfty ce MSI %d (error: %d)\n", 3491 __func__, priv->sfty_ce_irq, ret); 3492 irq_err = REQ_IRQ_ERR_SFTY_CE; 3493 goto irq_error; 3494 } 3495 } 3496 3497 /* Request the Safety Feature Uncorrectible Error line in 3498 * case of another line is used 3499 */ 3500 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { 3501 int_name = priv->int_name_sfty_ue; 3502 sprintf(int_name, "%s:%s", dev->name, "safety-ue"); 3503 ret = request_irq(priv->sfty_ue_irq, 3504 stmmac_safety_interrupt, 3505 0, int_name, dev); 3506 if (unlikely(ret < 0)) { 3507 netdev_err(priv->dev, 3508 "%s: alloc sfty ue MSI %d (error: %d)\n", 3509 __func__, priv->sfty_ue_irq, ret); 3510 irq_err = REQ_IRQ_ERR_SFTY_UE; 3511 goto irq_error; 3512 } 3513 } 3514 3515 /* Request Rx MSI irq */ 3516 for (i = 0; i < priv->plat->rx_queues_to_use; i++) { 3517 if (i >= MTL_MAX_RX_QUEUES) 3518 break; 3519 if (priv->rx_irq[i] == 0) 3520 continue; 3521 3522 int_name = priv->int_name_rx_irq[i]; 3523 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); 3524 ret = request_irq(priv->rx_irq[i], 3525 stmmac_msi_intr_rx, 3526 0, int_name, &priv->rx_queue[i]); 3527 if (unlikely(ret < 0)) { 3528 netdev_err(priv->dev, 3529 "%s: alloc rx-%d MSI %d (error: %d)\n", 3530 __func__, i, priv->rx_irq[i], ret); 3531 irq_err = REQ_IRQ_ERR_RX; 3532 irq_idx = i; 3533 goto irq_error; 3534 } 3535 cpumask_clear(&cpu_mask); 3536 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 3537 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); 3538 } 3539 3540 /* Request Tx MSI irq */ 3541 for (i = 0; i < priv->plat->tx_queues_to_use; i++) { 3542 if (i >= MTL_MAX_TX_QUEUES) 3543 break; 3544 if (priv->tx_irq[i] == 0) 3545 continue; 3546 3547 int_name = priv->int_name_tx_irq[i]; 3548 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); 3549 ret = request_irq(priv->tx_irq[i], 3550 stmmac_msi_intr_tx, 3551 0, int_name, &priv->tx_queue[i]); 3552 if (unlikely(ret < 0)) { 3553 netdev_err(priv->dev, 3554 "%s: alloc tx-%d MSI %d (error: %d)\n", 3555 __func__, i, priv->tx_irq[i], ret); 3556 irq_err = REQ_IRQ_ERR_TX; 3557 irq_idx = i; 3558 goto irq_error; 3559 } 3560 cpumask_clear(&cpu_mask); 3561 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 3562 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); 3563 } 3564 3565 return 0; 3566 3567 irq_error: 3568 stmmac_free_irq(dev, irq_err, irq_idx); 3569 return ret; 3570 } 3571 3572 static int stmmac_request_irq_single(struct net_device *dev) 3573 { 3574 struct stmmac_priv *priv = netdev_priv(dev); 3575 enum request_irq_err irq_err; 3576 int ret; 3577 3578 ret = request_irq(dev->irq, stmmac_interrupt, 3579 IRQF_SHARED, dev->name, dev); 3580 if (unlikely(ret < 0)) { 3581 netdev_err(priv->dev, 3582 "%s: ERROR: allocating the IRQ %d (error: %d)\n", 3583 __func__, dev->irq, ret); 3584 irq_err = REQ_IRQ_ERR_MAC; 3585 goto irq_error; 3586 } 3587 3588 /* Request the Wake IRQ in case of another line 3589 * is used for WoL 3590 */ 3591 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 3592 ret = request_irq(priv->wol_irq, stmmac_interrupt, 3593 IRQF_SHARED, dev->name, dev); 3594 if (unlikely(ret < 0)) { 3595 netdev_err(priv->dev, 3596 "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 3597 __func__, priv->wol_irq, ret); 3598 irq_err = REQ_IRQ_ERR_WOL; 3599 goto irq_error; 3600 } 3601 } 3602 3603 /* Request the IRQ lines */ 3604 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 3605 ret = request_irq(priv->lpi_irq, stmmac_interrupt, 3606 IRQF_SHARED, dev->name, dev); 3607 if (unlikely(ret < 0)) { 3608 netdev_err(priv->dev, 3609 "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 3610 __func__, priv->lpi_irq, ret); 3611 irq_err = REQ_IRQ_ERR_LPI; 3612 goto irq_error; 3613 } 3614 } 3615 3616 return 0; 3617 3618 irq_error: 3619 stmmac_free_irq(dev, irq_err, 0); 3620 return ret; 3621 } 3622 3623 static int stmmac_request_irq(struct net_device *dev) 3624 { 3625 struct stmmac_priv *priv = netdev_priv(dev); 3626 int ret; 3627 3628 /* Request the IRQ lines */ 3629 if (priv->plat->multi_msi_en) 3630 ret = stmmac_request_irq_multi_msi(dev); 3631 else 3632 ret = stmmac_request_irq_single(dev); 3633 3634 return ret; 3635 } 3636 3637 /** 3638 * stmmac_open - open entry point of the driver 3639 * @dev : pointer to the device structure. 3640 * Description: 3641 * This function is the open entry point of the driver. 3642 * Return value: 3643 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3644 * file on failure. 3645 */ 3646 int stmmac_open(struct net_device *dev) 3647 { 3648 struct stmmac_priv *priv = netdev_priv(dev); 3649 int mode = priv->plat->phy_interface; 3650 int bfsize = 0; 3651 u32 chan; 3652 int ret; 3653 3654 ret = pm_runtime_get_sync(priv->device); 3655 if (ret < 0) { 3656 pm_runtime_put_noidle(priv->device); 3657 return ret; 3658 } 3659 3660 if (priv->hw->pcs != STMMAC_PCS_TBI && 3661 priv->hw->pcs != STMMAC_PCS_RTBI && 3662 (!priv->hw->xpcs || 3663 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { 3664 ret = stmmac_init_phy(dev); 3665 if (ret) { 3666 netdev_err(priv->dev, 3667 "%s: Cannot attach to PHY (error: %d)\n", 3668 __func__, ret); 3669 goto init_phy_error; 3670 } 3671 } 3672 3673 /* Extra statistics */ 3674 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 3675 priv->xstats.threshold = tc; 3676 3677 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); 3678 if (bfsize < 0) 3679 bfsize = 0; 3680 3681 if (bfsize < BUF_SIZE_16KiB) 3682 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 3683 3684 priv->dma_buf_sz = bfsize; 3685 buf_sz = bfsize; 3686 3687 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 3688 3689 if (!priv->dma_tx_size) 3690 priv->dma_tx_size = DMA_DEFAULT_TX_SIZE; 3691 if (!priv->dma_rx_size) 3692 priv->dma_rx_size = DMA_DEFAULT_RX_SIZE; 3693 3694 /* Earlier check for TBS */ 3695 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { 3696 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 3697 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; 3698 3699 /* Setup per-TXQ tbs flag before TX descriptor alloc */ 3700 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; 3701 } 3702 3703 ret = alloc_dma_desc_resources(priv); 3704 if (ret < 0) { 3705 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 3706 __func__); 3707 goto dma_desc_error; 3708 } 3709 3710 ret = init_dma_desc_rings(dev, GFP_KERNEL); 3711 if (ret < 0) { 3712 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 3713 __func__); 3714 goto init_error; 3715 } 3716 3717 ret = stmmac_hw_setup(dev, true); 3718 if (ret < 0) { 3719 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 3720 goto init_error; 3721 } 3722 3723 stmmac_init_coalesce(priv); 3724 3725 phylink_start(priv->phylink); 3726 /* We may have called phylink_speed_down before */ 3727 phylink_speed_up(priv->phylink); 3728 3729 ret = stmmac_request_irq(dev); 3730 if (ret) 3731 goto irq_error; 3732 3733 stmmac_enable_all_queues(priv); 3734 netif_tx_start_all_queues(priv->dev); 3735 3736 return 0; 3737 3738 irq_error: 3739 phylink_stop(priv->phylink); 3740 3741 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3742 hrtimer_cancel(&priv->tx_queue[chan].txtimer); 3743 3744 stmmac_hw_teardown(dev); 3745 init_error: 3746 free_dma_desc_resources(priv); 3747 dma_desc_error: 3748 phylink_disconnect_phy(priv->phylink); 3749 init_phy_error: 3750 pm_runtime_put(priv->device); 3751 return ret; 3752 } 3753 3754 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) 3755 { 3756 set_bit(__FPE_REMOVING, &priv->fpe_task_state); 3757 3758 if (priv->fpe_wq) 3759 destroy_workqueue(priv->fpe_wq); 3760 3761 netdev_info(priv->dev, "FPE workqueue stop"); 3762 } 3763 3764 /** 3765 * stmmac_release - close entry point of the driver 3766 * @dev : device pointer. 3767 * Description: 3768 * This is the stop entry point of the driver. 3769 */ 3770 int stmmac_release(struct net_device *dev) 3771 { 3772 struct stmmac_priv *priv = netdev_priv(dev); 3773 u32 chan; 3774 3775 if (device_may_wakeup(priv->device)) 3776 phylink_speed_down(priv->phylink, false); 3777 /* Stop and disconnect the PHY */ 3778 phylink_stop(priv->phylink); 3779 phylink_disconnect_phy(priv->phylink); 3780 3781 stmmac_disable_all_queues(priv); 3782 3783 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3784 hrtimer_cancel(&priv->tx_queue[chan].txtimer); 3785 3786 /* Free the IRQ lines */ 3787 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 3788 3789 if (priv->eee_enabled) { 3790 priv->tx_path_in_lpi_mode = false; 3791 del_timer_sync(&priv->eee_ctrl_timer); 3792 } 3793 3794 /* Stop TX/RX DMA and clear the descriptors */ 3795 stmmac_stop_all_dma(priv); 3796 3797 /* Release and free the Rx/Tx resources */ 3798 free_dma_desc_resources(priv); 3799 3800 /* Disable the MAC Rx/Tx */ 3801 stmmac_mac_set(priv, priv->ioaddr, false); 3802 3803 netif_carrier_off(dev); 3804 3805 stmmac_release_ptp(priv); 3806 3807 pm_runtime_put(priv->device); 3808 3809 if (priv->dma_cap.fpesel) 3810 stmmac_fpe_stop_wq(priv); 3811 3812 return 0; 3813 } 3814 3815 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, 3816 struct stmmac_tx_queue *tx_q) 3817 { 3818 u16 tag = 0x0, inner_tag = 0x0; 3819 u32 inner_type = 0x0; 3820 struct dma_desc *p; 3821 3822 if (!priv->dma_cap.vlins) 3823 return false; 3824 if (!skb_vlan_tag_present(skb)) 3825 return false; 3826 if (skb->vlan_proto == htons(ETH_P_8021AD)) { 3827 inner_tag = skb_vlan_tag_get(skb); 3828 inner_type = STMMAC_VLAN_INSERT; 3829 } 3830 3831 tag = skb_vlan_tag_get(skb); 3832 3833 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3834 p = &tx_q->dma_entx[tx_q->cur_tx].basic; 3835 else 3836 p = &tx_q->dma_tx[tx_q->cur_tx]; 3837 3838 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) 3839 return false; 3840 3841 stmmac_set_tx_owner(priv, p); 3842 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 3843 return true; 3844 } 3845 3846 /** 3847 * stmmac_tso_allocator - close entry point of the driver 3848 * @priv: driver private structure 3849 * @des: buffer start address 3850 * @total_len: total length to fill in descriptors 3851 * @last_segment: condition for the last descriptor 3852 * @queue: TX queue index 3853 * Description: 3854 * This function fills descriptor and request new descriptors according to 3855 * buffer length to fill 3856 */ 3857 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, 3858 int total_len, bool last_segment, u32 queue) 3859 { 3860 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 3861 struct dma_desc *desc; 3862 u32 buff_size; 3863 int tmp_len; 3864 3865 tmp_len = total_len; 3866 3867 while (tmp_len > 0) { 3868 dma_addr_t curr_addr; 3869 3870 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 3871 priv->dma_tx_size); 3872 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 3873 3874 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3875 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3876 else 3877 desc = &tx_q->dma_tx[tx_q->cur_tx]; 3878 3879 curr_addr = des + (total_len - tmp_len); 3880 if (priv->dma_cap.addr64 <= 32) 3881 desc->des0 = cpu_to_le32(curr_addr); 3882 else 3883 stmmac_set_desc_addr(priv, desc, curr_addr); 3884 3885 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 3886 TSO_MAX_BUFF_SIZE : tmp_len; 3887 3888 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 3889 0, 1, 3890 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 3891 0, 0); 3892 3893 tmp_len -= TSO_MAX_BUFF_SIZE; 3894 } 3895 } 3896 3897 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) 3898 { 3899 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 3900 int desc_size; 3901 3902 if (likely(priv->extend_desc)) 3903 desc_size = sizeof(struct dma_extended_desc); 3904 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3905 desc_size = sizeof(struct dma_edesc); 3906 else 3907 desc_size = sizeof(struct dma_desc); 3908 3909 /* The own bit must be the latest setting done when prepare the 3910 * descriptor and then barrier is needed to make sure that 3911 * all is coherent before granting the DMA engine. 3912 */ 3913 wmb(); 3914 3915 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 3916 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3917 } 3918 3919 /** 3920 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 3921 * @skb : the socket buffer 3922 * @dev : device pointer 3923 * Description: this is the transmit function that is called on TSO frames 3924 * (support available on GMAC4 and newer chips). 3925 * Diagram below show the ring programming in case of TSO frames: 3926 * 3927 * First Descriptor 3928 * -------- 3929 * | DES0 |---> buffer1 = L2/L3/L4 header 3930 * | DES1 |---> TCP Payload (can continue on next descr...) 3931 * | DES2 |---> buffer 1 and 2 len 3932 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 3933 * -------- 3934 * | 3935 * ... 3936 * | 3937 * -------- 3938 * | DES0 | --| Split TCP Payload on Buffers 1 and 2 3939 * | DES1 | --| 3940 * | DES2 | --> buffer 1 and 2 len 3941 * | DES3 | 3942 * -------- 3943 * 3944 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 3945 */ 3946 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 3947 { 3948 struct dma_desc *desc, *first, *mss_desc = NULL; 3949 struct stmmac_priv *priv = netdev_priv(dev); 3950 int nfrags = skb_shinfo(skb)->nr_frags; 3951 u32 queue = skb_get_queue_mapping(skb); 3952 unsigned int first_entry, tx_packets; 3953 int tmp_pay_len = 0, first_tx; 3954 struct stmmac_tx_queue *tx_q; 3955 bool has_vlan, set_ic; 3956 u8 proto_hdr_len, hdr; 3957 u32 pay_len, mss; 3958 dma_addr_t des; 3959 int i; 3960 3961 tx_q = &priv->tx_queue[queue]; 3962 first_tx = tx_q->cur_tx; 3963 3964 /* Compute header lengths */ 3965 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 3966 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); 3967 hdr = sizeof(struct udphdr); 3968 } else { 3969 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3970 hdr = tcp_hdrlen(skb); 3971 } 3972 3973 /* Desc availability based on threshold should be enough safe */ 3974 if (unlikely(stmmac_tx_avail(priv, queue) < 3975 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 3976 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 3977 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 3978 queue)); 3979 /* This is a hard error, log it. */ 3980 netdev_err(priv->dev, 3981 "%s: Tx Ring full when queue awake\n", 3982 __func__); 3983 } 3984 return NETDEV_TX_BUSY; 3985 } 3986 3987 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 3988 3989 mss = skb_shinfo(skb)->gso_size; 3990 3991 /* set new MSS value if needed */ 3992 if (mss != tx_q->mss) { 3993 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3994 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3995 else 3996 mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; 3997 3998 stmmac_set_mss(priv, mss_desc, mss); 3999 tx_q->mss = mss; 4000 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 4001 priv->dma_tx_size); 4002 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 4003 } 4004 4005 if (netif_msg_tx_queued(priv)) { 4006 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 4007 __func__, hdr, proto_hdr_len, pay_len, mss); 4008 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 4009 skb->data_len); 4010 } 4011 4012 /* Check if VLAN can be inserted by HW */ 4013 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 4014 4015 first_entry = tx_q->cur_tx; 4016 WARN_ON(tx_q->tx_skbuff[first_entry]); 4017 4018 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4019 desc = &tx_q->dma_entx[first_entry].basic; 4020 else 4021 desc = &tx_q->dma_tx[first_entry]; 4022 first = desc; 4023 4024 if (has_vlan) 4025 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 4026 4027 /* first descriptor: fill Headers on Buf1 */ 4028 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 4029 DMA_TO_DEVICE); 4030 if (dma_mapping_error(priv->device, des)) 4031 goto dma_map_err; 4032 4033 tx_q->tx_skbuff_dma[first_entry].buf = des; 4034 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 4035 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4036 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4037 4038 if (priv->dma_cap.addr64 <= 32) { 4039 first->des0 = cpu_to_le32(des); 4040 4041 /* Fill start of payload in buff2 of first descriptor */ 4042 if (pay_len) 4043 first->des1 = cpu_to_le32(des + proto_hdr_len); 4044 4045 /* If needed take extra descriptors to fill the remaining payload */ 4046 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 4047 } else { 4048 stmmac_set_desc_addr(priv, first, des); 4049 tmp_pay_len = pay_len; 4050 des += proto_hdr_len; 4051 pay_len = 0; 4052 } 4053 4054 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 4055 4056 /* Prepare fragments */ 4057 for (i = 0; i < nfrags; i++) { 4058 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4059 4060 des = skb_frag_dma_map(priv->device, frag, 0, 4061 skb_frag_size(frag), 4062 DMA_TO_DEVICE); 4063 if (dma_mapping_error(priv->device, des)) 4064 goto dma_map_err; 4065 4066 stmmac_tso_allocator(priv, des, skb_frag_size(frag), 4067 (i == nfrags - 1), queue); 4068 4069 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 4070 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 4071 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 4072 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4073 } 4074 4075 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 4076 4077 /* Only the last descriptor gets to point to the skb. */ 4078 tx_q->tx_skbuff[tx_q->cur_tx] = skb; 4079 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4080 4081 /* Manage tx mitigation */ 4082 tx_packets = (tx_q->cur_tx + 1) - first_tx; 4083 tx_q->tx_count_frames += tx_packets; 4084 4085 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4086 set_ic = true; 4087 else if (!priv->tx_coal_frames[queue]) 4088 set_ic = false; 4089 else if (tx_packets > priv->tx_coal_frames[queue]) 4090 set_ic = true; 4091 else if ((tx_q->tx_count_frames % 4092 priv->tx_coal_frames[queue]) < tx_packets) 4093 set_ic = true; 4094 else 4095 set_ic = false; 4096 4097 if (set_ic) { 4098 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4099 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4100 else 4101 desc = &tx_q->dma_tx[tx_q->cur_tx]; 4102 4103 tx_q->tx_count_frames = 0; 4104 stmmac_set_tx_ic(priv, desc); 4105 priv->xstats.tx_set_ic_bit++; 4106 } 4107 4108 /* We've used all descriptors we need for this skb, however, 4109 * advance cur_tx so that it references a fresh descriptor. 4110 * ndo_start_xmit will fill this descriptor the next time it's 4111 * called and stmmac_tx_clean may clean up to this descriptor. 4112 */ 4113 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 4114 4115 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4116 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4117 __func__); 4118 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4119 } 4120 4121 dev->stats.tx_bytes += skb->len; 4122 priv->xstats.tx_tso_frames++; 4123 priv->xstats.tx_tso_nfrags += nfrags; 4124 4125 if (priv->sarc_type) 4126 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 4127 4128 skb_tx_timestamp(skb); 4129 4130 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4131 priv->hwts_tx_en)) { 4132 /* declare that device is doing timestamping */ 4133 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4134 stmmac_enable_tx_timestamp(priv, first); 4135 } 4136 4137 /* Complete the first descriptor before granting the DMA */ 4138 stmmac_prepare_tso_tx_desc(priv, first, 1, 4139 proto_hdr_len, 4140 pay_len, 4141 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 4142 hdr / 4, (skb->len - proto_hdr_len)); 4143 4144 /* If context desc is used to change MSS */ 4145 if (mss_desc) { 4146 /* Make sure that first descriptor has been completely 4147 * written, including its own bit. This is because MSS is 4148 * actually before first descriptor, so we need to make 4149 * sure that MSS's own bit is the last thing written. 4150 */ 4151 dma_wmb(); 4152 stmmac_set_tx_owner(priv, mss_desc); 4153 } 4154 4155 if (netif_msg_pktdata(priv)) { 4156 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 4157 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4158 tx_q->cur_tx, first, nfrags); 4159 pr_info(">>> frame to be transmitted: "); 4160 print_pkt(skb->data, skb_headlen(skb)); 4161 } 4162 4163 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4164 4165 stmmac_flush_tx_descriptors(priv, queue); 4166 stmmac_tx_timer_arm(priv, queue); 4167 4168 return NETDEV_TX_OK; 4169 4170 dma_map_err: 4171 dev_err(priv->device, "Tx dma map failed\n"); 4172 dev_kfree_skb(skb); 4173 priv->dev->stats.tx_dropped++; 4174 return NETDEV_TX_OK; 4175 } 4176 4177 /** 4178 * stmmac_xmit - Tx entry point of the driver 4179 * @skb : the socket buffer 4180 * @dev : device pointer 4181 * Description : this is the tx entry point of the driver. 4182 * It programs the chain or the ring and supports oversized frames 4183 * and SG feature. 4184 */ 4185 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 4186 { 4187 unsigned int first_entry, tx_packets, enh_desc; 4188 struct stmmac_priv *priv = netdev_priv(dev); 4189 unsigned int nopaged_len = skb_headlen(skb); 4190 int i, csum_insertion = 0, is_jumbo = 0; 4191 u32 queue = skb_get_queue_mapping(skb); 4192 int nfrags = skb_shinfo(skb)->nr_frags; 4193 int gso = skb_shinfo(skb)->gso_type; 4194 struct dma_edesc *tbs_desc = NULL; 4195 struct dma_desc *desc, *first; 4196 struct stmmac_tx_queue *tx_q; 4197 bool has_vlan, set_ic; 4198 int entry, first_tx; 4199 dma_addr_t des; 4200 4201 tx_q = &priv->tx_queue[queue]; 4202 first_tx = tx_q->cur_tx; 4203 4204 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) 4205 stmmac_disable_eee_mode(priv); 4206 4207 /* Manage oversized TCP frames for GMAC4 device */ 4208 if (skb_is_gso(skb) && priv->tso) { 4209 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 4210 return stmmac_tso_xmit(skb, dev); 4211 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) 4212 return stmmac_tso_xmit(skb, dev); 4213 } 4214 4215 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 4216 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4217 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4218 queue)); 4219 /* This is a hard error, log it. */ 4220 netdev_err(priv->dev, 4221 "%s: Tx Ring full when queue awake\n", 4222 __func__); 4223 } 4224 return NETDEV_TX_BUSY; 4225 } 4226 4227 /* Check if VLAN can be inserted by HW */ 4228 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 4229 4230 entry = tx_q->cur_tx; 4231 first_entry = entry; 4232 WARN_ON(tx_q->tx_skbuff[first_entry]); 4233 4234 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 4235 4236 if (likely(priv->extend_desc)) 4237 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4238 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4239 desc = &tx_q->dma_entx[entry].basic; 4240 else 4241 desc = tx_q->dma_tx + entry; 4242 4243 first = desc; 4244 4245 if (has_vlan) 4246 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 4247 4248 enh_desc = priv->plat->enh_desc; 4249 /* To program the descriptors according to the size of the frame */ 4250 if (enh_desc) 4251 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 4252 4253 if (unlikely(is_jumbo)) { 4254 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 4255 if (unlikely(entry < 0) && (entry != -EINVAL)) 4256 goto dma_map_err; 4257 } 4258 4259 for (i = 0; i < nfrags; i++) { 4260 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4261 int len = skb_frag_size(frag); 4262 bool last_segment = (i == (nfrags - 1)); 4263 4264 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 4265 WARN_ON(tx_q->tx_skbuff[entry]); 4266 4267 if (likely(priv->extend_desc)) 4268 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4269 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4270 desc = &tx_q->dma_entx[entry].basic; 4271 else 4272 desc = tx_q->dma_tx + entry; 4273 4274 des = skb_frag_dma_map(priv->device, frag, 0, len, 4275 DMA_TO_DEVICE); 4276 if (dma_mapping_error(priv->device, des)) 4277 goto dma_map_err; /* should reuse desc w/o issues */ 4278 4279 tx_q->tx_skbuff_dma[entry].buf = des; 4280 4281 stmmac_set_desc_addr(priv, desc, des); 4282 4283 tx_q->tx_skbuff_dma[entry].map_as_page = true; 4284 tx_q->tx_skbuff_dma[entry].len = len; 4285 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 4286 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4287 4288 /* Prepare the descriptor and set the own bit too */ 4289 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 4290 priv->mode, 1, last_segment, skb->len); 4291 } 4292 4293 /* Only the last descriptor gets to point to the skb. */ 4294 tx_q->tx_skbuff[entry] = skb; 4295 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4296 4297 /* According to the coalesce parameter the IC bit for the latest 4298 * segment is reset and the timer re-started to clean the tx status. 4299 * This approach takes care about the fragments: desc is the first 4300 * element in case of no SG. 4301 */ 4302 tx_packets = (entry + 1) - first_tx; 4303 tx_q->tx_count_frames += tx_packets; 4304 4305 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4306 set_ic = true; 4307 else if (!priv->tx_coal_frames[queue]) 4308 set_ic = false; 4309 else if (tx_packets > priv->tx_coal_frames[queue]) 4310 set_ic = true; 4311 else if ((tx_q->tx_count_frames % 4312 priv->tx_coal_frames[queue]) < tx_packets) 4313 set_ic = true; 4314 else 4315 set_ic = false; 4316 4317 if (set_ic) { 4318 if (likely(priv->extend_desc)) 4319 desc = &tx_q->dma_etx[entry].basic; 4320 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4321 desc = &tx_q->dma_entx[entry].basic; 4322 else 4323 desc = &tx_q->dma_tx[entry]; 4324 4325 tx_q->tx_count_frames = 0; 4326 stmmac_set_tx_ic(priv, desc); 4327 priv->xstats.tx_set_ic_bit++; 4328 } 4329 4330 /* We've used all descriptors we need for this skb, however, 4331 * advance cur_tx so that it references a fresh descriptor. 4332 * ndo_start_xmit will fill this descriptor the next time it's 4333 * called and stmmac_tx_clean may clean up to this descriptor. 4334 */ 4335 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 4336 tx_q->cur_tx = entry; 4337 4338 if (netif_msg_pktdata(priv)) { 4339 netdev_dbg(priv->dev, 4340 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 4341 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4342 entry, first, nfrags); 4343 4344 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 4345 print_pkt(skb->data, skb->len); 4346 } 4347 4348 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4349 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4350 __func__); 4351 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4352 } 4353 4354 dev->stats.tx_bytes += skb->len; 4355 4356 if (priv->sarc_type) 4357 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 4358 4359 skb_tx_timestamp(skb); 4360 4361 /* Ready to fill the first descriptor and set the OWN bit w/o any 4362 * problems because all the descriptors are actually ready to be 4363 * passed to the DMA engine. 4364 */ 4365 if (likely(!is_jumbo)) { 4366 bool last_segment = (nfrags == 0); 4367 4368 des = dma_map_single(priv->device, skb->data, 4369 nopaged_len, DMA_TO_DEVICE); 4370 if (dma_mapping_error(priv->device, des)) 4371 goto dma_map_err; 4372 4373 tx_q->tx_skbuff_dma[first_entry].buf = des; 4374 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4375 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4376 4377 stmmac_set_desc_addr(priv, first, des); 4378 4379 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 4380 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 4381 4382 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4383 priv->hwts_tx_en)) { 4384 /* declare that device is doing timestamping */ 4385 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4386 stmmac_enable_tx_timestamp(priv, first); 4387 } 4388 4389 /* Prepare the first descriptor setting the OWN bit too */ 4390 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 4391 csum_insertion, priv->mode, 0, last_segment, 4392 skb->len); 4393 } 4394 4395 if (tx_q->tbs & STMMAC_TBS_EN) { 4396 struct timespec64 ts = ns_to_timespec64(skb->tstamp); 4397 4398 tbs_desc = &tx_q->dma_entx[first_entry]; 4399 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); 4400 } 4401 4402 stmmac_set_tx_owner(priv, first); 4403 4404 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4405 4406 stmmac_enable_dma_transmission(priv, priv->ioaddr); 4407 4408 stmmac_flush_tx_descriptors(priv, queue); 4409 stmmac_tx_timer_arm(priv, queue); 4410 4411 return NETDEV_TX_OK; 4412 4413 dma_map_err: 4414 netdev_err(priv->dev, "Tx DMA map failed\n"); 4415 dev_kfree_skb(skb); 4416 priv->dev->stats.tx_dropped++; 4417 return NETDEV_TX_OK; 4418 } 4419 4420 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 4421 { 4422 struct vlan_ethhdr *veth; 4423 __be16 vlan_proto; 4424 u16 vlanid; 4425 4426 veth = (struct vlan_ethhdr *)skb->data; 4427 vlan_proto = veth->h_vlan_proto; 4428 4429 if ((vlan_proto == htons(ETH_P_8021Q) && 4430 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 4431 (vlan_proto == htons(ETH_P_8021AD) && 4432 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 4433 /* pop the vlan tag */ 4434 vlanid = ntohs(veth->h_vlan_TCI); 4435 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 4436 skb_pull(skb, VLAN_HLEN); 4437 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 4438 } 4439 } 4440 4441 /** 4442 * stmmac_rx_refill - refill used skb preallocated buffers 4443 * @priv: driver private structure 4444 * @queue: RX queue index 4445 * Description : this is to reallocate the skb for the reception process 4446 * that is based on zero-copy. 4447 */ 4448 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 4449 { 4450 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4451 int dirty = stmmac_rx_dirty(priv, queue); 4452 unsigned int entry = rx_q->dirty_rx; 4453 4454 while (dirty-- > 0) { 4455 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4456 struct dma_desc *p; 4457 bool use_rx_wd; 4458 4459 if (priv->extend_desc) 4460 p = (struct dma_desc *)(rx_q->dma_erx + entry); 4461 else 4462 p = rx_q->dma_rx + entry; 4463 4464 if (!buf->page) { 4465 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 4466 if (!buf->page) 4467 break; 4468 } 4469 4470 if (priv->sph && !buf->sec_page) { 4471 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 4472 if (!buf->sec_page) 4473 break; 4474 4475 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 4476 } 4477 4478 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 4479 4480 stmmac_set_desc_addr(priv, p, buf->addr); 4481 if (priv->sph) 4482 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 4483 else 4484 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 4485 stmmac_refill_desc3(priv, rx_q, p); 4486 4487 rx_q->rx_count_frames++; 4488 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4489 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 4490 rx_q->rx_count_frames = 0; 4491 4492 use_rx_wd = !priv->rx_coal_frames[queue]; 4493 use_rx_wd |= rx_q->rx_count_frames > 0; 4494 if (!priv->use_riwt) 4495 use_rx_wd = false; 4496 4497 dma_wmb(); 4498 stmmac_set_rx_owner(priv, p, use_rx_wd); 4499 4500 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); 4501 } 4502 rx_q->dirty_rx = entry; 4503 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4504 (rx_q->dirty_rx * sizeof(struct dma_desc)); 4505 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 4506 } 4507 4508 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, 4509 struct dma_desc *p, 4510 int status, unsigned int len) 4511 { 4512 unsigned int plen = 0, hlen = 0; 4513 int coe = priv->hw->rx_csum; 4514 4515 /* Not first descriptor, buffer is always zero */ 4516 if (priv->sph && len) 4517 return 0; 4518 4519 /* First descriptor, get split header length */ 4520 stmmac_get_rx_header_len(priv, p, &hlen); 4521 if (priv->sph && hlen) { 4522 priv->xstats.rx_split_hdr_pkt_n++; 4523 return hlen; 4524 } 4525 4526 /* First descriptor, not last descriptor and not split header */ 4527 if (status & rx_not_ls) 4528 return priv->dma_buf_sz; 4529 4530 plen = stmmac_get_rx_frame_len(priv, p, coe); 4531 4532 /* First descriptor and last descriptor and not split header */ 4533 return min_t(unsigned int, priv->dma_buf_sz, plen); 4534 } 4535 4536 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, 4537 struct dma_desc *p, 4538 int status, unsigned int len) 4539 { 4540 int coe = priv->hw->rx_csum; 4541 unsigned int plen = 0; 4542 4543 /* Not split header, buffer is not available */ 4544 if (!priv->sph) 4545 return 0; 4546 4547 /* Not last descriptor */ 4548 if (status & rx_not_ls) 4549 return priv->dma_buf_sz; 4550 4551 plen = stmmac_get_rx_frame_len(priv, p, coe); 4552 4553 /* Last descriptor */ 4554 return plen - len; 4555 } 4556 4557 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, 4558 struct xdp_frame *xdpf, bool dma_map) 4559 { 4560 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 4561 unsigned int entry = tx_q->cur_tx; 4562 struct dma_desc *tx_desc; 4563 dma_addr_t dma_addr; 4564 bool set_ic; 4565 4566 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) 4567 return STMMAC_XDP_CONSUMED; 4568 4569 if (likely(priv->extend_desc)) 4570 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4571 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4572 tx_desc = &tx_q->dma_entx[entry].basic; 4573 else 4574 tx_desc = tx_q->dma_tx + entry; 4575 4576 if (dma_map) { 4577 dma_addr = dma_map_single(priv->device, xdpf->data, 4578 xdpf->len, DMA_TO_DEVICE); 4579 if (dma_mapping_error(priv->device, dma_addr)) 4580 return STMMAC_XDP_CONSUMED; 4581 4582 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; 4583 } else { 4584 struct page *page = virt_to_page(xdpf->data); 4585 4586 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + 4587 xdpf->headroom; 4588 dma_sync_single_for_device(priv->device, dma_addr, 4589 xdpf->len, DMA_BIDIRECTIONAL); 4590 4591 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; 4592 } 4593 4594 tx_q->tx_skbuff_dma[entry].buf = dma_addr; 4595 tx_q->tx_skbuff_dma[entry].map_as_page = false; 4596 tx_q->tx_skbuff_dma[entry].len = xdpf->len; 4597 tx_q->tx_skbuff_dma[entry].last_segment = true; 4598 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 4599 4600 tx_q->xdpf[entry] = xdpf; 4601 4602 stmmac_set_desc_addr(priv, tx_desc, dma_addr); 4603 4604 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, 4605 true, priv->mode, true, true, 4606 xdpf->len); 4607 4608 tx_q->tx_count_frames++; 4609 4610 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 4611 set_ic = true; 4612 else 4613 set_ic = false; 4614 4615 if (set_ic) { 4616 tx_q->tx_count_frames = 0; 4617 stmmac_set_tx_ic(priv, tx_desc); 4618 priv->xstats.tx_set_ic_bit++; 4619 } 4620 4621 stmmac_enable_dma_transmission(priv, priv->ioaddr); 4622 4623 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 4624 tx_q->cur_tx = entry; 4625 4626 return STMMAC_XDP_TX; 4627 } 4628 4629 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, 4630 int cpu) 4631 { 4632 int index = cpu; 4633 4634 if (unlikely(index < 0)) 4635 index = 0; 4636 4637 while (index >= priv->plat->tx_queues_to_use) 4638 index -= priv->plat->tx_queues_to_use; 4639 4640 return index; 4641 } 4642 4643 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, 4644 struct xdp_buff *xdp) 4645 { 4646 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 4647 int cpu = smp_processor_id(); 4648 struct netdev_queue *nq; 4649 int queue; 4650 int res; 4651 4652 if (unlikely(!xdpf)) 4653 return STMMAC_XDP_CONSUMED; 4654 4655 queue = stmmac_xdp_get_tx_queue(priv, cpu); 4656 nq = netdev_get_tx_queue(priv->dev, queue); 4657 4658 __netif_tx_lock(nq, cpu); 4659 /* Avoids TX time-out as we are sharing with slow path */ 4660 nq->trans_start = jiffies; 4661 4662 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); 4663 if (res == STMMAC_XDP_TX) 4664 stmmac_flush_tx_descriptors(priv, queue); 4665 4666 __netif_tx_unlock(nq); 4667 4668 return res; 4669 } 4670 4671 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, 4672 struct bpf_prog *prog, 4673 struct xdp_buff *xdp) 4674 { 4675 u32 act; 4676 int res; 4677 4678 act = bpf_prog_run_xdp(prog, xdp); 4679 switch (act) { 4680 case XDP_PASS: 4681 res = STMMAC_XDP_PASS; 4682 break; 4683 case XDP_TX: 4684 res = stmmac_xdp_xmit_back(priv, xdp); 4685 break; 4686 case XDP_REDIRECT: 4687 if (xdp_do_redirect(priv->dev, xdp, prog) < 0) 4688 res = STMMAC_XDP_CONSUMED; 4689 else 4690 res = STMMAC_XDP_REDIRECT; 4691 break; 4692 default: 4693 bpf_warn_invalid_xdp_action(act); 4694 fallthrough; 4695 case XDP_ABORTED: 4696 trace_xdp_exception(priv->dev, prog, act); 4697 fallthrough; 4698 case XDP_DROP: 4699 res = STMMAC_XDP_CONSUMED; 4700 break; 4701 } 4702 4703 return res; 4704 } 4705 4706 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, 4707 struct xdp_buff *xdp) 4708 { 4709 struct bpf_prog *prog; 4710 int res; 4711 4712 prog = READ_ONCE(priv->xdp_prog); 4713 if (!prog) { 4714 res = STMMAC_XDP_PASS; 4715 goto out; 4716 } 4717 4718 res = __stmmac_xdp_run_prog(priv, prog, xdp); 4719 out: 4720 return ERR_PTR(-res); 4721 } 4722 4723 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, 4724 int xdp_status) 4725 { 4726 int cpu = smp_processor_id(); 4727 int queue; 4728 4729 queue = stmmac_xdp_get_tx_queue(priv, cpu); 4730 4731 if (xdp_status & STMMAC_XDP_TX) 4732 stmmac_tx_timer_arm(priv, queue); 4733 4734 if (xdp_status & STMMAC_XDP_REDIRECT) 4735 xdp_do_flush(); 4736 } 4737 4738 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, 4739 struct xdp_buff *xdp) 4740 { 4741 unsigned int metasize = xdp->data - xdp->data_meta; 4742 unsigned int datasize = xdp->data_end - xdp->data; 4743 struct sk_buff *skb; 4744 4745 skb = __napi_alloc_skb(&ch->rxtx_napi, 4746 xdp->data_end - xdp->data_hard_start, 4747 GFP_ATOMIC | __GFP_NOWARN); 4748 if (unlikely(!skb)) 4749 return NULL; 4750 4751 skb_reserve(skb, xdp->data - xdp->data_hard_start); 4752 memcpy(__skb_put(skb, datasize), xdp->data, datasize); 4753 if (metasize) 4754 skb_metadata_set(skb, metasize); 4755 4756 return skb; 4757 } 4758 4759 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, 4760 struct dma_desc *p, struct dma_desc *np, 4761 struct xdp_buff *xdp) 4762 { 4763 struct stmmac_channel *ch = &priv->channel[queue]; 4764 unsigned int len = xdp->data_end - xdp->data; 4765 enum pkt_hash_types hash_type; 4766 int coe = priv->hw->rx_csum; 4767 struct sk_buff *skb; 4768 u32 hash; 4769 4770 skb = stmmac_construct_skb_zc(ch, xdp); 4771 if (!skb) { 4772 priv->dev->stats.rx_dropped++; 4773 return; 4774 } 4775 4776 stmmac_get_rx_hwtstamp(priv, p, np, skb); 4777 stmmac_rx_vlan(priv->dev, skb); 4778 skb->protocol = eth_type_trans(skb, priv->dev); 4779 4780 if (unlikely(!coe)) 4781 skb_checksum_none_assert(skb); 4782 else 4783 skb->ip_summed = CHECKSUM_UNNECESSARY; 4784 4785 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 4786 skb_set_hash(skb, hash, hash_type); 4787 4788 skb_record_rx_queue(skb, queue); 4789 napi_gro_receive(&ch->rxtx_napi, skb); 4790 4791 priv->dev->stats.rx_packets++; 4792 priv->dev->stats.rx_bytes += len; 4793 } 4794 4795 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 4796 { 4797 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4798 unsigned int entry = rx_q->dirty_rx; 4799 struct dma_desc *rx_desc = NULL; 4800 bool ret = true; 4801 4802 budget = min(budget, stmmac_rx_dirty(priv, queue)); 4803 4804 while (budget-- > 0 && entry != rx_q->cur_rx) { 4805 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4806 dma_addr_t dma_addr; 4807 bool use_rx_wd; 4808 4809 if (!buf->xdp) { 4810 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 4811 if (!buf->xdp) { 4812 ret = false; 4813 break; 4814 } 4815 } 4816 4817 if (priv->extend_desc) 4818 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); 4819 else 4820 rx_desc = rx_q->dma_rx + entry; 4821 4822 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 4823 stmmac_set_desc_addr(priv, rx_desc, dma_addr); 4824 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); 4825 stmmac_refill_desc3(priv, rx_q, rx_desc); 4826 4827 rx_q->rx_count_frames++; 4828 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4829 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 4830 rx_q->rx_count_frames = 0; 4831 4832 use_rx_wd = !priv->rx_coal_frames[queue]; 4833 use_rx_wd |= rx_q->rx_count_frames > 0; 4834 if (!priv->use_riwt) 4835 use_rx_wd = false; 4836 4837 dma_wmb(); 4838 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); 4839 4840 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); 4841 } 4842 4843 if (rx_desc) { 4844 rx_q->dirty_rx = entry; 4845 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4846 (rx_q->dirty_rx * sizeof(struct dma_desc)); 4847 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 4848 } 4849 4850 return ret; 4851 } 4852 4853 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) 4854 { 4855 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4856 unsigned int count = 0, error = 0, len = 0; 4857 int dirty = stmmac_rx_dirty(priv, queue); 4858 unsigned int next_entry = rx_q->cur_rx; 4859 unsigned int desc_size; 4860 struct bpf_prog *prog; 4861 bool failure = false; 4862 int xdp_status = 0; 4863 int status = 0; 4864 4865 if (netif_msg_rx_status(priv)) { 4866 void *rx_head; 4867 4868 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 4869 if (priv->extend_desc) { 4870 rx_head = (void *)rx_q->dma_erx; 4871 desc_size = sizeof(struct dma_extended_desc); 4872 } else { 4873 rx_head = (void *)rx_q->dma_rx; 4874 desc_size = sizeof(struct dma_desc); 4875 } 4876 4877 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, 4878 rx_q->dma_rx_phy, desc_size); 4879 } 4880 while (count < limit) { 4881 struct stmmac_rx_buffer *buf; 4882 unsigned int buf1_len = 0; 4883 struct dma_desc *np, *p; 4884 int entry; 4885 int res; 4886 4887 if (!count && rx_q->state_saved) { 4888 error = rx_q->state.error; 4889 len = rx_q->state.len; 4890 } else { 4891 rx_q->state_saved = false; 4892 error = 0; 4893 len = 0; 4894 } 4895 4896 if (count >= limit) 4897 break; 4898 4899 read_again: 4900 buf1_len = 0; 4901 entry = next_entry; 4902 buf = &rx_q->buf_pool[entry]; 4903 4904 if (dirty >= STMMAC_RX_FILL_BATCH) { 4905 failure = failure || 4906 !stmmac_rx_refill_zc(priv, queue, dirty); 4907 dirty = 0; 4908 } 4909 4910 if (priv->extend_desc) 4911 p = (struct dma_desc *)(rx_q->dma_erx + entry); 4912 else 4913 p = rx_q->dma_rx + entry; 4914 4915 /* read the status of the incoming frame */ 4916 status = stmmac_rx_status(priv, &priv->dev->stats, 4917 &priv->xstats, p); 4918 /* check if managed by the DMA otherwise go ahead */ 4919 if (unlikely(status & dma_own)) 4920 break; 4921 4922 /* Prefetch the next RX descriptor */ 4923 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 4924 priv->dma_rx_size); 4925 next_entry = rx_q->cur_rx; 4926 4927 if (priv->extend_desc) 4928 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 4929 else 4930 np = rx_q->dma_rx + next_entry; 4931 4932 prefetch(np); 4933 4934 /* Ensure a valid XSK buffer before proceed */ 4935 if (!buf->xdp) 4936 break; 4937 4938 if (priv->extend_desc) 4939 stmmac_rx_extended_status(priv, &priv->dev->stats, 4940 &priv->xstats, 4941 rx_q->dma_erx + entry); 4942 if (unlikely(status == discard_frame)) { 4943 xsk_buff_free(buf->xdp); 4944 buf->xdp = NULL; 4945 dirty++; 4946 error = 1; 4947 if (!priv->hwts_rx_en) 4948 priv->dev->stats.rx_errors++; 4949 } 4950 4951 if (unlikely(error && (status & rx_not_ls))) 4952 goto read_again; 4953 if (unlikely(error)) { 4954 count++; 4955 continue; 4956 } 4957 4958 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ 4959 if (likely(status & rx_not_ls)) { 4960 xsk_buff_free(buf->xdp); 4961 buf->xdp = NULL; 4962 dirty++; 4963 count++; 4964 goto read_again; 4965 } 4966 4967 /* XDP ZC Frame only support primary buffers for now */ 4968 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 4969 len += buf1_len; 4970 4971 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 4972 * Type frames (LLC/LLC-SNAP) 4973 * 4974 * llc_snap is never checked in GMAC >= 4, so this ACS 4975 * feature is always disabled and packets need to be 4976 * stripped manually. 4977 */ 4978 if (likely(!(status & rx_not_ls)) && 4979 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || 4980 unlikely(status != llc_snap))) { 4981 buf1_len -= ETH_FCS_LEN; 4982 len -= ETH_FCS_LEN; 4983 } 4984 4985 /* RX buffer is good and fit into a XSK pool buffer */ 4986 buf->xdp->data_end = buf->xdp->data + buf1_len; 4987 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); 4988 4989 prog = READ_ONCE(priv->xdp_prog); 4990 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); 4991 4992 switch (res) { 4993 case STMMAC_XDP_PASS: 4994 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); 4995 xsk_buff_free(buf->xdp); 4996 break; 4997 case STMMAC_XDP_CONSUMED: 4998 xsk_buff_free(buf->xdp); 4999 priv->dev->stats.rx_dropped++; 5000 break; 5001 case STMMAC_XDP_TX: 5002 case STMMAC_XDP_REDIRECT: 5003 xdp_status |= res; 5004 break; 5005 } 5006 5007 buf->xdp = NULL; 5008 dirty++; 5009 count++; 5010 } 5011 5012 if (status & rx_not_ls) { 5013 rx_q->state_saved = true; 5014 rx_q->state.error = error; 5015 rx_q->state.len = len; 5016 } 5017 5018 stmmac_finalize_xdp_rx(priv, xdp_status); 5019 5020 priv->xstats.rx_pkt_n += count; 5021 priv->xstats.rxq_stats[queue].rx_pkt_n += count; 5022 5023 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { 5024 if (failure || stmmac_rx_dirty(priv, queue) > 0) 5025 xsk_set_rx_need_wakeup(rx_q->xsk_pool); 5026 else 5027 xsk_clear_rx_need_wakeup(rx_q->xsk_pool); 5028 5029 return (int)count; 5030 } 5031 5032 return failure ? limit : (int)count; 5033 } 5034 5035 /** 5036 * stmmac_rx - manage the receive process 5037 * @priv: driver private structure 5038 * @limit: napi bugget 5039 * @queue: RX queue index. 5040 * Description : this the function called by the napi poll method. 5041 * It gets all the frames inside the ring. 5042 */ 5043 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 5044 { 5045 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 5046 struct stmmac_channel *ch = &priv->channel[queue]; 5047 unsigned int count = 0, error = 0, len = 0; 5048 int status = 0, coe = priv->hw->rx_csum; 5049 unsigned int next_entry = rx_q->cur_rx; 5050 enum dma_data_direction dma_dir; 5051 unsigned int desc_size; 5052 struct sk_buff *skb = NULL; 5053 struct xdp_buff xdp; 5054 int xdp_status = 0; 5055 int buf_sz; 5056 5057 dma_dir = page_pool_get_dma_dir(rx_q->page_pool); 5058 buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; 5059 5060 if (netif_msg_rx_status(priv)) { 5061 void *rx_head; 5062 5063 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 5064 if (priv->extend_desc) { 5065 rx_head = (void *)rx_q->dma_erx; 5066 desc_size = sizeof(struct dma_extended_desc); 5067 } else { 5068 rx_head = (void *)rx_q->dma_rx; 5069 desc_size = sizeof(struct dma_desc); 5070 } 5071 5072 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, 5073 rx_q->dma_rx_phy, desc_size); 5074 } 5075 while (count < limit) { 5076 unsigned int buf1_len = 0, buf2_len = 0; 5077 enum pkt_hash_types hash_type; 5078 struct stmmac_rx_buffer *buf; 5079 struct dma_desc *np, *p; 5080 int entry; 5081 u32 hash; 5082 5083 if (!count && rx_q->state_saved) { 5084 skb = rx_q->state.skb; 5085 error = rx_q->state.error; 5086 len = rx_q->state.len; 5087 } else { 5088 rx_q->state_saved = false; 5089 skb = NULL; 5090 error = 0; 5091 len = 0; 5092 } 5093 5094 if (count >= limit) 5095 break; 5096 5097 read_again: 5098 buf1_len = 0; 5099 buf2_len = 0; 5100 entry = next_entry; 5101 buf = &rx_q->buf_pool[entry]; 5102 5103 if (priv->extend_desc) 5104 p = (struct dma_desc *)(rx_q->dma_erx + entry); 5105 else 5106 p = rx_q->dma_rx + entry; 5107 5108 /* read the status of the incoming frame */ 5109 status = stmmac_rx_status(priv, &priv->dev->stats, 5110 &priv->xstats, p); 5111 /* check if managed by the DMA otherwise go ahead */ 5112 if (unlikely(status & dma_own)) 5113 break; 5114 5115 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 5116 priv->dma_rx_size); 5117 next_entry = rx_q->cur_rx; 5118 5119 if (priv->extend_desc) 5120 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5121 else 5122 np = rx_q->dma_rx + next_entry; 5123 5124 prefetch(np); 5125 5126 if (priv->extend_desc) 5127 stmmac_rx_extended_status(priv, &priv->dev->stats, 5128 &priv->xstats, rx_q->dma_erx + entry); 5129 if (unlikely(status == discard_frame)) { 5130 page_pool_recycle_direct(rx_q->page_pool, buf->page); 5131 buf->page = NULL; 5132 error = 1; 5133 if (!priv->hwts_rx_en) 5134 priv->dev->stats.rx_errors++; 5135 } 5136 5137 if (unlikely(error && (status & rx_not_ls))) 5138 goto read_again; 5139 if (unlikely(error)) { 5140 dev_kfree_skb(skb); 5141 skb = NULL; 5142 count++; 5143 continue; 5144 } 5145 5146 /* Buffer is good. Go on. */ 5147 5148 prefetch(page_address(buf->page) + buf->page_offset); 5149 if (buf->sec_page) 5150 prefetch(page_address(buf->sec_page)); 5151 5152 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 5153 len += buf1_len; 5154 buf2_len = stmmac_rx_buf2_len(priv, p, status, len); 5155 len += buf2_len; 5156 5157 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 5158 * Type frames (LLC/LLC-SNAP) 5159 * 5160 * llc_snap is never checked in GMAC >= 4, so this ACS 5161 * feature is always disabled and packets need to be 5162 * stripped manually. 5163 */ 5164 if (likely(!(status & rx_not_ls)) && 5165 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || 5166 unlikely(status != llc_snap))) { 5167 if (buf2_len) 5168 buf2_len -= ETH_FCS_LEN; 5169 else 5170 buf1_len -= ETH_FCS_LEN; 5171 5172 len -= ETH_FCS_LEN; 5173 } 5174 5175 if (!skb) { 5176 unsigned int pre_len, sync_len; 5177 5178 dma_sync_single_for_cpu(priv->device, buf->addr, 5179 buf1_len, dma_dir); 5180 5181 xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq); 5182 xdp_prepare_buff(&xdp, page_address(buf->page), 5183 buf->page_offset, buf1_len, false); 5184 5185 pre_len = xdp.data_end - xdp.data_hard_start - 5186 buf->page_offset; 5187 skb = stmmac_xdp_run_prog(priv, &xdp); 5188 /* Due xdp_adjust_tail: DMA sync for_device 5189 * cover max len CPU touch 5190 */ 5191 sync_len = xdp.data_end - xdp.data_hard_start - 5192 buf->page_offset; 5193 sync_len = max(sync_len, pre_len); 5194 5195 /* For Not XDP_PASS verdict */ 5196 if (IS_ERR(skb)) { 5197 unsigned int xdp_res = -PTR_ERR(skb); 5198 5199 if (xdp_res & STMMAC_XDP_CONSUMED) { 5200 page_pool_put_page(rx_q->page_pool, 5201 virt_to_head_page(xdp.data), 5202 sync_len, true); 5203 buf->page = NULL; 5204 priv->dev->stats.rx_dropped++; 5205 5206 /* Clear skb as it was set as 5207 * status by XDP program. 5208 */ 5209 skb = NULL; 5210 5211 if (unlikely((status & rx_not_ls))) 5212 goto read_again; 5213 5214 count++; 5215 continue; 5216 } else if (xdp_res & (STMMAC_XDP_TX | 5217 STMMAC_XDP_REDIRECT)) { 5218 xdp_status |= xdp_res; 5219 buf->page = NULL; 5220 skb = NULL; 5221 count++; 5222 continue; 5223 } 5224 } 5225 } 5226 5227 if (!skb) { 5228 /* XDP program may expand or reduce tail */ 5229 buf1_len = xdp.data_end - xdp.data; 5230 5231 skb = napi_alloc_skb(&ch->rx_napi, buf1_len); 5232 if (!skb) { 5233 priv->dev->stats.rx_dropped++; 5234 count++; 5235 goto drain_data; 5236 } 5237 5238 /* XDP program may adjust header */ 5239 skb_copy_to_linear_data(skb, xdp.data, buf1_len); 5240 skb_put(skb, buf1_len); 5241 5242 /* Data payload copied into SKB, page ready for recycle */ 5243 page_pool_recycle_direct(rx_q->page_pool, buf->page); 5244 buf->page = NULL; 5245 } else if (buf1_len) { 5246 dma_sync_single_for_cpu(priv->device, buf->addr, 5247 buf1_len, dma_dir); 5248 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 5249 buf->page, buf->page_offset, buf1_len, 5250 priv->dma_buf_sz); 5251 5252 /* Data payload appended into SKB */ 5253 page_pool_release_page(rx_q->page_pool, buf->page); 5254 buf->page = NULL; 5255 } 5256 5257 if (buf2_len) { 5258 dma_sync_single_for_cpu(priv->device, buf->sec_addr, 5259 buf2_len, dma_dir); 5260 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 5261 buf->sec_page, 0, buf2_len, 5262 priv->dma_buf_sz); 5263 5264 /* Data payload appended into SKB */ 5265 page_pool_release_page(rx_q->page_pool, buf->sec_page); 5266 buf->sec_page = NULL; 5267 } 5268 5269 drain_data: 5270 if (likely(status & rx_not_ls)) 5271 goto read_again; 5272 if (!skb) 5273 continue; 5274 5275 /* Got entire packet into SKB. Finish it. */ 5276 5277 stmmac_get_rx_hwtstamp(priv, p, np, skb); 5278 stmmac_rx_vlan(priv->dev, skb); 5279 skb->protocol = eth_type_trans(skb, priv->dev); 5280 5281 if (unlikely(!coe)) 5282 skb_checksum_none_assert(skb); 5283 else 5284 skb->ip_summed = CHECKSUM_UNNECESSARY; 5285 5286 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 5287 skb_set_hash(skb, hash, hash_type); 5288 5289 skb_record_rx_queue(skb, queue); 5290 napi_gro_receive(&ch->rx_napi, skb); 5291 skb = NULL; 5292 5293 priv->dev->stats.rx_packets++; 5294 priv->dev->stats.rx_bytes += len; 5295 count++; 5296 } 5297 5298 if (status & rx_not_ls || skb) { 5299 rx_q->state_saved = true; 5300 rx_q->state.skb = skb; 5301 rx_q->state.error = error; 5302 rx_q->state.len = len; 5303 } 5304 5305 stmmac_finalize_xdp_rx(priv, xdp_status); 5306 5307 stmmac_rx_refill(priv, queue); 5308 5309 priv->xstats.rx_pkt_n += count; 5310 priv->xstats.rxq_stats[queue].rx_pkt_n += count; 5311 5312 return count; 5313 } 5314 5315 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 5316 { 5317 struct stmmac_channel *ch = 5318 container_of(napi, struct stmmac_channel, rx_napi); 5319 struct stmmac_priv *priv = ch->priv_data; 5320 u32 chan = ch->index; 5321 int work_done; 5322 5323 priv->xstats.napi_poll++; 5324 5325 work_done = stmmac_rx(priv, budget, chan); 5326 if (work_done < budget && napi_complete_done(napi, work_done)) { 5327 unsigned long flags; 5328 5329 spin_lock_irqsave(&ch->lock, flags); 5330 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 5331 spin_unlock_irqrestore(&ch->lock, flags); 5332 } 5333 5334 return work_done; 5335 } 5336 5337 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 5338 { 5339 struct stmmac_channel *ch = 5340 container_of(napi, struct stmmac_channel, tx_napi); 5341 struct stmmac_priv *priv = ch->priv_data; 5342 u32 chan = ch->index; 5343 int work_done; 5344 5345 priv->xstats.napi_poll++; 5346 5347 work_done = stmmac_tx_clean(priv, budget, chan); 5348 work_done = min(work_done, budget); 5349 5350 if (work_done < budget && napi_complete_done(napi, work_done)) { 5351 unsigned long flags; 5352 5353 spin_lock_irqsave(&ch->lock, flags); 5354 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 5355 spin_unlock_irqrestore(&ch->lock, flags); 5356 } 5357 5358 return work_done; 5359 } 5360 5361 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) 5362 { 5363 struct stmmac_channel *ch = 5364 container_of(napi, struct stmmac_channel, rxtx_napi); 5365 struct stmmac_priv *priv = ch->priv_data; 5366 int rx_done, tx_done, rxtx_done; 5367 u32 chan = ch->index; 5368 5369 priv->xstats.napi_poll++; 5370 5371 tx_done = stmmac_tx_clean(priv, budget, chan); 5372 tx_done = min(tx_done, budget); 5373 5374 rx_done = stmmac_rx_zc(priv, budget, chan); 5375 5376 rxtx_done = max(tx_done, rx_done); 5377 5378 /* If either TX or RX work is not complete, return budget 5379 * and keep pooling 5380 */ 5381 if (rxtx_done >= budget) 5382 return budget; 5383 5384 /* all work done, exit the polling mode */ 5385 if (napi_complete_done(napi, rxtx_done)) { 5386 unsigned long flags; 5387 5388 spin_lock_irqsave(&ch->lock, flags); 5389 /* Both RX and TX work done are compelte, 5390 * so enable both RX & TX IRQs. 5391 */ 5392 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 5393 spin_unlock_irqrestore(&ch->lock, flags); 5394 } 5395 5396 return min(rxtx_done, budget - 1); 5397 } 5398 5399 /** 5400 * stmmac_tx_timeout 5401 * @dev : Pointer to net device structure 5402 * @txqueue: the index of the hanging transmit queue 5403 * Description: this function is called when a packet transmission fails to 5404 * complete within a reasonable time. The driver will mark the error in the 5405 * netdev structure and arrange for the device to be reset to a sane state 5406 * in order to transmit a new packet. 5407 */ 5408 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) 5409 { 5410 struct stmmac_priv *priv = netdev_priv(dev); 5411 5412 stmmac_global_err(priv); 5413 } 5414 5415 /** 5416 * stmmac_set_rx_mode - entry point for multicast addressing 5417 * @dev : pointer to the device structure 5418 * Description: 5419 * This function is a driver entry point which gets called by the kernel 5420 * whenever multicast addresses must be enabled/disabled. 5421 * Return value: 5422 * void. 5423 */ 5424 static void stmmac_set_rx_mode(struct net_device *dev) 5425 { 5426 struct stmmac_priv *priv = netdev_priv(dev); 5427 5428 stmmac_set_filter(priv, priv->hw, dev); 5429 } 5430 5431 /** 5432 * stmmac_change_mtu - entry point to change MTU size for the device. 5433 * @dev : device pointer. 5434 * @new_mtu : the new MTU size for the device. 5435 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 5436 * to drive packet transmission. Ethernet has an MTU of 1500 octets 5437 * (ETH_DATA_LEN). This value can be changed with ifconfig. 5438 * Return value: 5439 * 0 on success and an appropriate (-)ve integer as defined in errno.h 5440 * file on failure. 5441 */ 5442 static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 5443 { 5444 struct stmmac_priv *priv = netdev_priv(dev); 5445 int txfifosz = priv->plat->tx_fifo_size; 5446 const int mtu = new_mtu; 5447 5448 if (txfifosz == 0) 5449 txfifosz = priv->dma_cap.tx_fifo_size; 5450 5451 txfifosz /= priv->plat->tx_queues_to_use; 5452 5453 if (netif_running(dev)) { 5454 netdev_err(priv->dev, "must be stopped to change its MTU\n"); 5455 return -EBUSY; 5456 } 5457 5458 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { 5459 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); 5460 return -EINVAL; 5461 } 5462 5463 new_mtu = STMMAC_ALIGN(new_mtu); 5464 5465 /* If condition true, FIFO is too small or MTU too large */ 5466 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) 5467 return -EINVAL; 5468 5469 dev->mtu = mtu; 5470 5471 netdev_update_features(dev); 5472 5473 return 0; 5474 } 5475 5476 static netdev_features_t stmmac_fix_features(struct net_device *dev, 5477 netdev_features_t features) 5478 { 5479 struct stmmac_priv *priv = netdev_priv(dev); 5480 5481 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 5482 features &= ~NETIF_F_RXCSUM; 5483 5484 if (!priv->plat->tx_coe) 5485 features &= ~NETIF_F_CSUM_MASK; 5486 5487 /* Some GMAC devices have a bugged Jumbo frame support that 5488 * needs to have the Tx COE disabled for oversized frames 5489 * (due to limited buffer sizes). In this case we disable 5490 * the TX csum insertion in the TDES and not use SF. 5491 */ 5492 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 5493 features &= ~NETIF_F_CSUM_MASK; 5494 5495 /* Disable tso if asked by ethtool */ 5496 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 5497 if (features & NETIF_F_TSO) 5498 priv->tso = true; 5499 else 5500 priv->tso = false; 5501 } 5502 5503 return features; 5504 } 5505 5506 static int stmmac_set_features(struct net_device *netdev, 5507 netdev_features_t features) 5508 { 5509 struct stmmac_priv *priv = netdev_priv(netdev); 5510 bool sph_en; 5511 u32 chan; 5512 5513 /* Keep the COE Type in case of csum is supporting */ 5514 if (features & NETIF_F_RXCSUM) 5515 priv->hw->rx_csum = priv->plat->rx_coe; 5516 else 5517 priv->hw->rx_csum = 0; 5518 /* No check needed because rx_coe has been set before and it will be 5519 * fixed in case of issue. 5520 */ 5521 stmmac_rx_ipc(priv, priv->hw); 5522 5523 sph_en = (priv->hw->rx_csum > 0) && priv->sph; 5524 5525 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) 5526 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 5527 5528 return 0; 5529 } 5530 5531 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) 5532 { 5533 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 5534 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 5535 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 5536 bool *hs_enable = &fpe_cfg->hs_enable; 5537 5538 if (status == FPE_EVENT_UNKNOWN || !*hs_enable) 5539 return; 5540 5541 /* If LP has sent verify mPacket, LP is FPE capable */ 5542 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { 5543 if (*lp_state < FPE_STATE_CAPABLE) 5544 *lp_state = FPE_STATE_CAPABLE; 5545 5546 /* If user has requested FPE enable, quickly response */ 5547 if (*hs_enable) 5548 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 5549 MPACKET_RESPONSE); 5550 } 5551 5552 /* If Local has sent verify mPacket, Local is FPE capable */ 5553 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { 5554 if (*lo_state < FPE_STATE_CAPABLE) 5555 *lo_state = FPE_STATE_CAPABLE; 5556 } 5557 5558 /* If LP has sent response mPacket, LP is entering FPE ON */ 5559 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) 5560 *lp_state = FPE_STATE_ENTERING_ON; 5561 5562 /* If Local has sent response mPacket, Local is entering FPE ON */ 5563 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) 5564 *lo_state = FPE_STATE_ENTERING_ON; 5565 5566 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && 5567 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && 5568 priv->fpe_wq) { 5569 queue_work(priv->fpe_wq, &priv->fpe_task); 5570 } 5571 } 5572 5573 static void stmmac_common_interrupt(struct stmmac_priv *priv) 5574 { 5575 u32 rx_cnt = priv->plat->rx_queues_to_use; 5576 u32 tx_cnt = priv->plat->tx_queues_to_use; 5577 u32 queues_count; 5578 u32 queue; 5579 bool xmac; 5580 5581 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 5582 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 5583 5584 if (priv->irq_wake) 5585 pm_wakeup_event(priv->device, 0); 5586 5587 if (priv->dma_cap.estsel) 5588 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, 5589 &priv->xstats, tx_cnt); 5590 5591 if (priv->dma_cap.fpesel) { 5592 int status = stmmac_fpe_irq_status(priv, priv->ioaddr, 5593 priv->dev); 5594 5595 stmmac_fpe_event_status(priv, status); 5596 } 5597 5598 /* To handle GMAC own interrupts */ 5599 if ((priv->plat->has_gmac) || xmac) { 5600 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 5601 5602 if (unlikely(status)) { 5603 /* For LPI we need to save the tx status */ 5604 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 5605 priv->tx_path_in_lpi_mode = true; 5606 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 5607 priv->tx_path_in_lpi_mode = false; 5608 } 5609 5610 for (queue = 0; queue < queues_count; queue++) { 5611 status = stmmac_host_mtl_irq_status(priv, priv->hw, 5612 queue); 5613 } 5614 5615 /* PCS link status */ 5616 if (priv->hw->pcs) { 5617 if (priv->xstats.pcs_link) 5618 netif_carrier_on(priv->dev); 5619 else 5620 netif_carrier_off(priv->dev); 5621 } 5622 5623 stmmac_timestamp_interrupt(priv, priv); 5624 } 5625 } 5626 5627 /** 5628 * stmmac_interrupt - main ISR 5629 * @irq: interrupt number. 5630 * @dev_id: to pass the net device pointer. 5631 * Description: this is the main driver interrupt service routine. 5632 * It can call: 5633 * o DMA service routine (to manage incoming frame reception and transmission 5634 * status) 5635 * o Core interrupts to manage: remote wake-up, management counter, LPI 5636 * interrupts. 5637 */ 5638 static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 5639 { 5640 struct net_device *dev = (struct net_device *)dev_id; 5641 struct stmmac_priv *priv = netdev_priv(dev); 5642 5643 /* Check if adapter is up */ 5644 if (test_bit(STMMAC_DOWN, &priv->state)) 5645 return IRQ_HANDLED; 5646 5647 /* Check if a fatal error happened */ 5648 if (stmmac_safety_feat_interrupt(priv)) 5649 return IRQ_HANDLED; 5650 5651 /* To handle Common interrupts */ 5652 stmmac_common_interrupt(priv); 5653 5654 /* To handle DMA interrupts */ 5655 stmmac_dma_interrupt(priv); 5656 5657 return IRQ_HANDLED; 5658 } 5659 5660 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) 5661 { 5662 struct net_device *dev = (struct net_device *)dev_id; 5663 struct stmmac_priv *priv = netdev_priv(dev); 5664 5665 if (unlikely(!dev)) { 5666 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5667 return IRQ_NONE; 5668 } 5669 5670 /* Check if adapter is up */ 5671 if (test_bit(STMMAC_DOWN, &priv->state)) 5672 return IRQ_HANDLED; 5673 5674 /* To handle Common interrupts */ 5675 stmmac_common_interrupt(priv); 5676 5677 return IRQ_HANDLED; 5678 } 5679 5680 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) 5681 { 5682 struct net_device *dev = (struct net_device *)dev_id; 5683 struct stmmac_priv *priv = netdev_priv(dev); 5684 5685 if (unlikely(!dev)) { 5686 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5687 return IRQ_NONE; 5688 } 5689 5690 /* Check if adapter is up */ 5691 if (test_bit(STMMAC_DOWN, &priv->state)) 5692 return IRQ_HANDLED; 5693 5694 /* Check if a fatal error happened */ 5695 stmmac_safety_feat_interrupt(priv); 5696 5697 return IRQ_HANDLED; 5698 } 5699 5700 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) 5701 { 5702 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; 5703 int chan = tx_q->queue_index; 5704 struct stmmac_priv *priv; 5705 int status; 5706 5707 priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]); 5708 5709 if (unlikely(!data)) { 5710 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5711 return IRQ_NONE; 5712 } 5713 5714 /* Check if adapter is up */ 5715 if (test_bit(STMMAC_DOWN, &priv->state)) 5716 return IRQ_HANDLED; 5717 5718 status = stmmac_napi_check(priv, chan, DMA_DIR_TX); 5719 5720 if (unlikely(status & tx_hard_error_bump_tc)) { 5721 /* Try to bump up the dma threshold on this failure */ 5722 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 5723 tc <= 256) { 5724 tc += 64; 5725 if (priv->plat->force_thresh_dma_mode) 5726 stmmac_set_dma_operation_mode(priv, 5727 tc, 5728 tc, 5729 chan); 5730 else 5731 stmmac_set_dma_operation_mode(priv, 5732 tc, 5733 SF_DMA_MODE, 5734 chan); 5735 priv->xstats.threshold = tc; 5736 } 5737 } else if (unlikely(status == tx_hard_error)) { 5738 stmmac_tx_err(priv, chan); 5739 } 5740 5741 return IRQ_HANDLED; 5742 } 5743 5744 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) 5745 { 5746 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; 5747 int chan = rx_q->queue_index; 5748 struct stmmac_priv *priv; 5749 5750 priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]); 5751 5752 if (unlikely(!data)) { 5753 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5754 return IRQ_NONE; 5755 } 5756 5757 /* Check if adapter is up */ 5758 if (test_bit(STMMAC_DOWN, &priv->state)) 5759 return IRQ_HANDLED; 5760 5761 stmmac_napi_check(priv, chan, DMA_DIR_RX); 5762 5763 return IRQ_HANDLED; 5764 } 5765 5766 #ifdef CONFIG_NET_POLL_CONTROLLER 5767 /* Polling receive - used by NETCONSOLE and other diagnostic tools 5768 * to allow network I/O with interrupts disabled. 5769 */ 5770 static void stmmac_poll_controller(struct net_device *dev) 5771 { 5772 struct stmmac_priv *priv = netdev_priv(dev); 5773 int i; 5774 5775 /* If adapter is down, do nothing */ 5776 if (test_bit(STMMAC_DOWN, &priv->state)) 5777 return; 5778 5779 if (priv->plat->multi_msi_en) { 5780 for (i = 0; i < priv->plat->rx_queues_to_use; i++) 5781 stmmac_msi_intr_rx(0, &priv->rx_queue[i]); 5782 5783 for (i = 0; i < priv->plat->tx_queues_to_use; i++) 5784 stmmac_msi_intr_tx(0, &priv->tx_queue[i]); 5785 } else { 5786 disable_irq(dev->irq); 5787 stmmac_interrupt(dev->irq, dev); 5788 enable_irq(dev->irq); 5789 } 5790 } 5791 #endif 5792 5793 /** 5794 * stmmac_ioctl - Entry point for the Ioctl 5795 * @dev: Device pointer. 5796 * @rq: An IOCTL specefic structure, that can contain a pointer to 5797 * a proprietary structure used to pass information to the driver. 5798 * @cmd: IOCTL command 5799 * Description: 5800 * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 5801 */ 5802 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 5803 { 5804 struct stmmac_priv *priv = netdev_priv (dev); 5805 int ret = -EOPNOTSUPP; 5806 5807 if (!netif_running(dev)) 5808 return -EINVAL; 5809 5810 switch (cmd) { 5811 case SIOCGMIIPHY: 5812 case SIOCGMIIREG: 5813 case SIOCSMIIREG: 5814 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); 5815 break; 5816 case SIOCSHWTSTAMP: 5817 ret = stmmac_hwtstamp_set(dev, rq); 5818 break; 5819 case SIOCGHWTSTAMP: 5820 ret = stmmac_hwtstamp_get(dev, rq); 5821 break; 5822 default: 5823 break; 5824 } 5825 5826 return ret; 5827 } 5828 5829 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 5830 void *cb_priv) 5831 { 5832 struct stmmac_priv *priv = cb_priv; 5833 int ret = -EOPNOTSUPP; 5834 5835 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) 5836 return ret; 5837 5838 __stmmac_disable_all_queues(priv); 5839 5840 switch (type) { 5841 case TC_SETUP_CLSU32: 5842 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 5843 break; 5844 case TC_SETUP_CLSFLOWER: 5845 ret = stmmac_tc_setup_cls(priv, priv, type_data); 5846 break; 5847 default: 5848 break; 5849 } 5850 5851 stmmac_enable_all_queues(priv); 5852 return ret; 5853 } 5854 5855 static LIST_HEAD(stmmac_block_cb_list); 5856 5857 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 5858 void *type_data) 5859 { 5860 struct stmmac_priv *priv = netdev_priv(ndev); 5861 5862 switch (type) { 5863 case TC_SETUP_BLOCK: 5864 return flow_block_cb_setup_simple(type_data, 5865 &stmmac_block_cb_list, 5866 stmmac_setup_tc_block_cb, 5867 priv, priv, true); 5868 case TC_SETUP_QDISC_CBS: 5869 return stmmac_tc_setup_cbs(priv, priv, type_data); 5870 case TC_SETUP_QDISC_TAPRIO: 5871 return stmmac_tc_setup_taprio(priv, priv, type_data); 5872 case TC_SETUP_QDISC_ETF: 5873 return stmmac_tc_setup_etf(priv, priv, type_data); 5874 default: 5875 return -EOPNOTSUPP; 5876 } 5877 } 5878 5879 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, 5880 struct net_device *sb_dev) 5881 { 5882 int gso = skb_shinfo(skb)->gso_type; 5883 5884 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { 5885 /* 5886 * There is no way to determine the number of TSO/USO 5887 * capable Queues. Let's use always the Queue 0 5888 * because if TSO/USO is supported then at least this 5889 * one will be capable. 5890 */ 5891 return 0; 5892 } 5893 5894 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; 5895 } 5896 5897 static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 5898 { 5899 struct stmmac_priv *priv = netdev_priv(ndev); 5900 int ret = 0; 5901 5902 ret = pm_runtime_get_sync(priv->device); 5903 if (ret < 0) { 5904 pm_runtime_put_noidle(priv->device); 5905 return ret; 5906 } 5907 5908 ret = eth_mac_addr(ndev, addr); 5909 if (ret) 5910 goto set_mac_error; 5911 5912 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 5913 5914 set_mac_error: 5915 pm_runtime_put(priv->device); 5916 5917 return ret; 5918 } 5919 5920 #ifdef CONFIG_DEBUG_FS 5921 static struct dentry *stmmac_fs_dir; 5922 5923 static void sysfs_display_ring(void *head, int size, int extend_desc, 5924 struct seq_file *seq, dma_addr_t dma_phy_addr) 5925 { 5926 int i; 5927 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 5928 struct dma_desc *p = (struct dma_desc *)head; 5929 dma_addr_t dma_addr; 5930 5931 for (i = 0; i < size; i++) { 5932 if (extend_desc) { 5933 dma_addr = dma_phy_addr + i * sizeof(*ep); 5934 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 5935 i, &dma_addr, 5936 le32_to_cpu(ep->basic.des0), 5937 le32_to_cpu(ep->basic.des1), 5938 le32_to_cpu(ep->basic.des2), 5939 le32_to_cpu(ep->basic.des3)); 5940 ep++; 5941 } else { 5942 dma_addr = dma_phy_addr + i * sizeof(*p); 5943 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 5944 i, &dma_addr, 5945 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 5946 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 5947 p++; 5948 } 5949 seq_printf(seq, "\n"); 5950 } 5951 } 5952 5953 static int stmmac_rings_status_show(struct seq_file *seq, void *v) 5954 { 5955 struct net_device *dev = seq->private; 5956 struct stmmac_priv *priv = netdev_priv(dev); 5957 u32 rx_count = priv->plat->rx_queues_to_use; 5958 u32 tx_count = priv->plat->tx_queues_to_use; 5959 u32 queue; 5960 5961 if ((dev->flags & IFF_UP) == 0) 5962 return 0; 5963 5964 for (queue = 0; queue < rx_count; queue++) { 5965 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 5966 5967 seq_printf(seq, "RX Queue %d:\n", queue); 5968 5969 if (priv->extend_desc) { 5970 seq_printf(seq, "Extended descriptor ring:\n"); 5971 sysfs_display_ring((void *)rx_q->dma_erx, 5972 priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy); 5973 } else { 5974 seq_printf(seq, "Descriptor ring:\n"); 5975 sysfs_display_ring((void *)rx_q->dma_rx, 5976 priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy); 5977 } 5978 } 5979 5980 for (queue = 0; queue < tx_count; queue++) { 5981 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 5982 5983 seq_printf(seq, "TX Queue %d:\n", queue); 5984 5985 if (priv->extend_desc) { 5986 seq_printf(seq, "Extended descriptor ring:\n"); 5987 sysfs_display_ring((void *)tx_q->dma_etx, 5988 priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy); 5989 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { 5990 seq_printf(seq, "Descriptor ring:\n"); 5991 sysfs_display_ring((void *)tx_q->dma_tx, 5992 priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy); 5993 } 5994 } 5995 5996 return 0; 5997 } 5998 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 5999 6000 static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 6001 { 6002 struct net_device *dev = seq->private; 6003 struct stmmac_priv *priv = netdev_priv(dev); 6004 6005 if (!priv->hw_cap_support) { 6006 seq_printf(seq, "DMA HW features not supported\n"); 6007 return 0; 6008 } 6009 6010 seq_printf(seq, "==============================\n"); 6011 seq_printf(seq, "\tDMA HW features\n"); 6012 seq_printf(seq, "==============================\n"); 6013 6014 seq_printf(seq, "\t10/100 Mbps: %s\n", 6015 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 6016 seq_printf(seq, "\t1000 Mbps: %s\n", 6017 (priv->dma_cap.mbps_1000) ? "Y" : "N"); 6018 seq_printf(seq, "\tHalf duplex: %s\n", 6019 (priv->dma_cap.half_duplex) ? "Y" : "N"); 6020 seq_printf(seq, "\tHash Filter: %s\n", 6021 (priv->dma_cap.hash_filter) ? "Y" : "N"); 6022 seq_printf(seq, "\tMultiple MAC address registers: %s\n", 6023 (priv->dma_cap.multi_addr) ? "Y" : "N"); 6024 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 6025 (priv->dma_cap.pcs) ? "Y" : "N"); 6026 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 6027 (priv->dma_cap.sma_mdio) ? "Y" : "N"); 6028 seq_printf(seq, "\tPMT Remote wake up: %s\n", 6029 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 6030 seq_printf(seq, "\tPMT Magic Frame: %s\n", 6031 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 6032 seq_printf(seq, "\tRMON module: %s\n", 6033 (priv->dma_cap.rmon) ? "Y" : "N"); 6034 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 6035 (priv->dma_cap.time_stamp) ? "Y" : "N"); 6036 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 6037 (priv->dma_cap.atime_stamp) ? "Y" : "N"); 6038 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 6039 (priv->dma_cap.eee) ? "Y" : "N"); 6040 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 6041 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 6042 (priv->dma_cap.tx_coe) ? "Y" : "N"); 6043 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 6044 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 6045 (priv->dma_cap.rx_coe) ? "Y" : "N"); 6046 } else { 6047 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 6048 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 6049 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 6050 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 6051 } 6052 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 6053 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 6054 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 6055 priv->dma_cap.number_rx_channel); 6056 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 6057 priv->dma_cap.number_tx_channel); 6058 seq_printf(seq, "\tNumber of Additional RX queues: %d\n", 6059 priv->dma_cap.number_rx_queues); 6060 seq_printf(seq, "\tNumber of Additional TX queues: %d\n", 6061 priv->dma_cap.number_tx_queues); 6062 seq_printf(seq, "\tEnhanced descriptors: %s\n", 6063 (priv->dma_cap.enh_desc) ? "Y" : "N"); 6064 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); 6065 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); 6066 seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); 6067 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); 6068 seq_printf(seq, "\tNumber of PPS Outputs: %d\n", 6069 priv->dma_cap.pps_out_num); 6070 seq_printf(seq, "\tSafety Features: %s\n", 6071 priv->dma_cap.asp ? "Y" : "N"); 6072 seq_printf(seq, "\tFlexible RX Parser: %s\n", 6073 priv->dma_cap.frpsel ? "Y" : "N"); 6074 seq_printf(seq, "\tEnhanced Addressing: %d\n", 6075 priv->dma_cap.addr64); 6076 seq_printf(seq, "\tReceive Side Scaling: %s\n", 6077 priv->dma_cap.rssen ? "Y" : "N"); 6078 seq_printf(seq, "\tVLAN Hash Filtering: %s\n", 6079 priv->dma_cap.vlhash ? "Y" : "N"); 6080 seq_printf(seq, "\tSplit Header: %s\n", 6081 priv->dma_cap.sphen ? "Y" : "N"); 6082 seq_printf(seq, "\tVLAN TX Insertion: %s\n", 6083 priv->dma_cap.vlins ? "Y" : "N"); 6084 seq_printf(seq, "\tDouble VLAN: %s\n", 6085 priv->dma_cap.dvlan ? "Y" : "N"); 6086 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", 6087 priv->dma_cap.l3l4fnum); 6088 seq_printf(seq, "\tARP Offloading: %s\n", 6089 priv->dma_cap.arpoffsel ? "Y" : "N"); 6090 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", 6091 priv->dma_cap.estsel ? "Y" : "N"); 6092 seq_printf(seq, "\tFrame Preemption (FPE): %s\n", 6093 priv->dma_cap.fpesel ? "Y" : "N"); 6094 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", 6095 priv->dma_cap.tbssel ? "Y" : "N"); 6096 return 0; 6097 } 6098 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 6099 6100 /* Use network device events to rename debugfs file entries. 6101 */ 6102 static int stmmac_device_event(struct notifier_block *unused, 6103 unsigned long event, void *ptr) 6104 { 6105 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6106 struct stmmac_priv *priv = netdev_priv(dev); 6107 6108 if (dev->netdev_ops != &stmmac_netdev_ops) 6109 goto done; 6110 6111 switch (event) { 6112 case NETDEV_CHANGENAME: 6113 if (priv->dbgfs_dir) 6114 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, 6115 priv->dbgfs_dir, 6116 stmmac_fs_dir, 6117 dev->name); 6118 break; 6119 } 6120 done: 6121 return NOTIFY_DONE; 6122 } 6123 6124 static struct notifier_block stmmac_notifier = { 6125 .notifier_call = stmmac_device_event, 6126 }; 6127 6128 static void stmmac_init_fs(struct net_device *dev) 6129 { 6130 struct stmmac_priv *priv = netdev_priv(dev); 6131 6132 rtnl_lock(); 6133 6134 /* Create per netdev entries */ 6135 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 6136 6137 /* Entry to report DMA RX/TX rings */ 6138 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, 6139 &stmmac_rings_status_fops); 6140 6141 /* Entry to report the DMA HW features */ 6142 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, 6143 &stmmac_dma_cap_fops); 6144 6145 rtnl_unlock(); 6146 } 6147 6148 static void stmmac_exit_fs(struct net_device *dev) 6149 { 6150 struct stmmac_priv *priv = netdev_priv(dev); 6151 6152 debugfs_remove_recursive(priv->dbgfs_dir); 6153 } 6154 #endif /* CONFIG_DEBUG_FS */ 6155 6156 static u32 stmmac_vid_crc32_le(__le16 vid_le) 6157 { 6158 unsigned char *data = (unsigned char *)&vid_le; 6159 unsigned char data_byte = 0; 6160 u32 crc = ~0x0; 6161 u32 temp = 0; 6162 int i, bits; 6163 6164 bits = get_bitmask_order(VLAN_VID_MASK); 6165 for (i = 0; i < bits; i++) { 6166 if ((i % 8) == 0) 6167 data_byte = data[i / 8]; 6168 6169 temp = ((crc & 1) ^ data_byte) & 1; 6170 crc >>= 1; 6171 data_byte >>= 1; 6172 6173 if (temp) 6174 crc ^= 0xedb88320; 6175 } 6176 6177 return crc; 6178 } 6179 6180 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) 6181 { 6182 u32 crc, hash = 0; 6183 __le16 pmatch = 0; 6184 int count = 0; 6185 u16 vid = 0; 6186 6187 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 6188 __le16 vid_le = cpu_to_le16(vid); 6189 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; 6190 hash |= (1 << crc); 6191 count++; 6192 } 6193 6194 if (!priv->dma_cap.vlhash) { 6195 if (count > 2) /* VID = 0 always passes filter */ 6196 return -EOPNOTSUPP; 6197 6198 pmatch = cpu_to_le16(vid); 6199 hash = 0; 6200 } 6201 6202 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); 6203 } 6204 6205 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 6206 { 6207 struct stmmac_priv *priv = netdev_priv(ndev); 6208 bool is_double = false; 6209 int ret; 6210 6211 if (be16_to_cpu(proto) == ETH_P_8021AD) 6212 is_double = true; 6213 6214 set_bit(vid, priv->active_vlans); 6215 ret = stmmac_vlan_update(priv, is_double); 6216 if (ret) { 6217 clear_bit(vid, priv->active_vlans); 6218 return ret; 6219 } 6220 6221 if (priv->hw->num_vlan) { 6222 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6223 if (ret) 6224 return ret; 6225 } 6226 6227 return 0; 6228 } 6229 6230 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 6231 { 6232 struct stmmac_priv *priv = netdev_priv(ndev); 6233 bool is_double = false; 6234 int ret; 6235 6236 ret = pm_runtime_get_sync(priv->device); 6237 if (ret < 0) { 6238 pm_runtime_put_noidle(priv->device); 6239 return ret; 6240 } 6241 6242 if (be16_to_cpu(proto) == ETH_P_8021AD) 6243 is_double = true; 6244 6245 clear_bit(vid, priv->active_vlans); 6246 6247 if (priv->hw->num_vlan) { 6248 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6249 if (ret) 6250 goto del_vlan_error; 6251 } 6252 6253 ret = stmmac_vlan_update(priv, is_double); 6254 6255 del_vlan_error: 6256 pm_runtime_put(priv->device); 6257 6258 return ret; 6259 } 6260 6261 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) 6262 { 6263 struct stmmac_priv *priv = netdev_priv(dev); 6264 6265 switch (bpf->command) { 6266 case XDP_SETUP_PROG: 6267 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); 6268 case XDP_SETUP_XSK_POOL: 6269 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, 6270 bpf->xsk.queue_id); 6271 default: 6272 return -EOPNOTSUPP; 6273 } 6274 } 6275 6276 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, 6277 struct xdp_frame **frames, u32 flags) 6278 { 6279 struct stmmac_priv *priv = netdev_priv(dev); 6280 int cpu = smp_processor_id(); 6281 struct netdev_queue *nq; 6282 int i, nxmit = 0; 6283 int queue; 6284 6285 if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) 6286 return -ENETDOWN; 6287 6288 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 6289 return -EINVAL; 6290 6291 queue = stmmac_xdp_get_tx_queue(priv, cpu); 6292 nq = netdev_get_tx_queue(priv->dev, queue); 6293 6294 __netif_tx_lock(nq, cpu); 6295 /* Avoids TX time-out as we are sharing with slow path */ 6296 nq->trans_start = jiffies; 6297 6298 for (i = 0; i < num_frames; i++) { 6299 int res; 6300 6301 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); 6302 if (res == STMMAC_XDP_CONSUMED) 6303 break; 6304 6305 nxmit++; 6306 } 6307 6308 if (flags & XDP_XMIT_FLUSH) { 6309 stmmac_flush_tx_descriptors(priv, queue); 6310 stmmac_tx_timer_arm(priv, queue); 6311 } 6312 6313 __netif_tx_unlock(nq); 6314 6315 return nxmit; 6316 } 6317 6318 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) 6319 { 6320 struct stmmac_channel *ch = &priv->channel[queue]; 6321 unsigned long flags; 6322 6323 spin_lock_irqsave(&ch->lock, flags); 6324 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6325 spin_unlock_irqrestore(&ch->lock, flags); 6326 6327 stmmac_stop_rx_dma(priv, queue); 6328 __free_dma_rx_desc_resources(priv, queue); 6329 } 6330 6331 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) 6332 { 6333 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 6334 struct stmmac_channel *ch = &priv->channel[queue]; 6335 unsigned long flags; 6336 u32 buf_size; 6337 int ret; 6338 6339 ret = __alloc_dma_rx_desc_resources(priv, queue); 6340 if (ret) { 6341 netdev_err(priv->dev, "Failed to alloc RX desc.\n"); 6342 return; 6343 } 6344 6345 ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL); 6346 if (ret) { 6347 __free_dma_rx_desc_resources(priv, queue); 6348 netdev_err(priv->dev, "Failed to init RX desc.\n"); 6349 return; 6350 } 6351 6352 stmmac_clear_rx_descriptors(priv, queue); 6353 6354 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6355 rx_q->dma_rx_phy, rx_q->queue_index); 6356 6357 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * 6358 sizeof(struct dma_desc)); 6359 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6360 rx_q->rx_tail_addr, rx_q->queue_index); 6361 6362 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6363 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6364 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6365 buf_size, 6366 rx_q->queue_index); 6367 } else { 6368 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6369 priv->dma_buf_sz, 6370 rx_q->queue_index); 6371 } 6372 6373 stmmac_start_rx_dma(priv, queue); 6374 6375 spin_lock_irqsave(&ch->lock, flags); 6376 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6377 spin_unlock_irqrestore(&ch->lock, flags); 6378 } 6379 6380 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) 6381 { 6382 struct stmmac_channel *ch = &priv->channel[queue]; 6383 unsigned long flags; 6384 6385 spin_lock_irqsave(&ch->lock, flags); 6386 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6387 spin_unlock_irqrestore(&ch->lock, flags); 6388 6389 stmmac_stop_tx_dma(priv, queue); 6390 __free_dma_tx_desc_resources(priv, queue); 6391 } 6392 6393 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) 6394 { 6395 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 6396 struct stmmac_channel *ch = &priv->channel[queue]; 6397 unsigned long flags; 6398 int ret; 6399 6400 ret = __alloc_dma_tx_desc_resources(priv, queue); 6401 if (ret) { 6402 netdev_err(priv->dev, "Failed to alloc TX desc.\n"); 6403 return; 6404 } 6405 6406 ret = __init_dma_tx_desc_rings(priv, queue); 6407 if (ret) { 6408 __free_dma_tx_desc_resources(priv, queue); 6409 netdev_err(priv->dev, "Failed to init TX desc.\n"); 6410 return; 6411 } 6412 6413 stmmac_clear_tx_descriptors(priv, queue); 6414 6415 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6416 tx_q->dma_tx_phy, tx_q->queue_index); 6417 6418 if (tx_q->tbs & STMMAC_TBS_AVAIL) 6419 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); 6420 6421 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6422 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6423 tx_q->tx_tail_addr, tx_q->queue_index); 6424 6425 stmmac_start_tx_dma(priv, queue); 6426 6427 spin_lock_irqsave(&ch->lock, flags); 6428 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6429 spin_unlock_irqrestore(&ch->lock, flags); 6430 } 6431 6432 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) 6433 { 6434 struct stmmac_priv *priv = netdev_priv(dev); 6435 struct stmmac_rx_queue *rx_q; 6436 struct stmmac_tx_queue *tx_q; 6437 struct stmmac_channel *ch; 6438 6439 if (test_bit(STMMAC_DOWN, &priv->state) || 6440 !netif_carrier_ok(priv->dev)) 6441 return -ENETDOWN; 6442 6443 if (!stmmac_xdp_is_enabled(priv)) 6444 return -ENXIO; 6445 6446 if (queue >= priv->plat->rx_queues_to_use || 6447 queue >= priv->plat->tx_queues_to_use) 6448 return -EINVAL; 6449 6450 rx_q = &priv->rx_queue[queue]; 6451 tx_q = &priv->tx_queue[queue]; 6452 ch = &priv->channel[queue]; 6453 6454 if (!rx_q->xsk_pool && !tx_q->xsk_pool) 6455 return -ENXIO; 6456 6457 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { 6458 /* EQoS does not have per-DMA channel SW interrupt, 6459 * so we schedule RX Napi straight-away. 6460 */ 6461 if (likely(napi_schedule_prep(&ch->rxtx_napi))) 6462 __napi_schedule(&ch->rxtx_napi); 6463 } 6464 6465 return 0; 6466 } 6467 6468 static const struct net_device_ops stmmac_netdev_ops = { 6469 .ndo_open = stmmac_open, 6470 .ndo_start_xmit = stmmac_xmit, 6471 .ndo_stop = stmmac_release, 6472 .ndo_change_mtu = stmmac_change_mtu, 6473 .ndo_fix_features = stmmac_fix_features, 6474 .ndo_set_features = stmmac_set_features, 6475 .ndo_set_rx_mode = stmmac_set_rx_mode, 6476 .ndo_tx_timeout = stmmac_tx_timeout, 6477 .ndo_eth_ioctl = stmmac_ioctl, 6478 .ndo_setup_tc = stmmac_setup_tc, 6479 .ndo_select_queue = stmmac_select_queue, 6480 #ifdef CONFIG_NET_POLL_CONTROLLER 6481 .ndo_poll_controller = stmmac_poll_controller, 6482 #endif 6483 .ndo_set_mac_address = stmmac_set_mac_address, 6484 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, 6485 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, 6486 .ndo_bpf = stmmac_bpf, 6487 .ndo_xdp_xmit = stmmac_xdp_xmit, 6488 .ndo_xsk_wakeup = stmmac_xsk_wakeup, 6489 }; 6490 6491 static void stmmac_reset_subtask(struct stmmac_priv *priv) 6492 { 6493 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 6494 return; 6495 if (test_bit(STMMAC_DOWN, &priv->state)) 6496 return; 6497 6498 netdev_err(priv->dev, "Reset adapter.\n"); 6499 6500 rtnl_lock(); 6501 netif_trans_update(priv->dev); 6502 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 6503 usleep_range(1000, 2000); 6504 6505 set_bit(STMMAC_DOWN, &priv->state); 6506 dev_close(priv->dev); 6507 dev_open(priv->dev, NULL); 6508 clear_bit(STMMAC_DOWN, &priv->state); 6509 clear_bit(STMMAC_RESETING, &priv->state); 6510 rtnl_unlock(); 6511 } 6512 6513 static void stmmac_service_task(struct work_struct *work) 6514 { 6515 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 6516 service_task); 6517 6518 stmmac_reset_subtask(priv); 6519 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 6520 } 6521 6522 /** 6523 * stmmac_hw_init - Init the MAC device 6524 * @priv: driver private structure 6525 * Description: this function is to configure the MAC device according to 6526 * some platform parameters or the HW capability register. It prepares the 6527 * driver to use either ring or chain modes and to setup either enhanced or 6528 * normal descriptors. 6529 */ 6530 static int stmmac_hw_init(struct stmmac_priv *priv) 6531 { 6532 int ret; 6533 6534 /* dwmac-sun8i only work in chain mode */ 6535 if (priv->plat->has_sun8i) 6536 chain_mode = 1; 6537 priv->chain_mode = chain_mode; 6538 6539 /* Initialize HW Interface */ 6540 ret = stmmac_hwif_init(priv); 6541 if (ret) 6542 return ret; 6543 6544 /* Get the HW capability (new GMAC newer than 3.50a) */ 6545 priv->hw_cap_support = stmmac_get_hw_features(priv); 6546 if (priv->hw_cap_support) { 6547 dev_info(priv->device, "DMA HW capability register supported\n"); 6548 6549 /* We can override some gmac/dma configuration fields: e.g. 6550 * enh_desc, tx_coe (e.g. that are passed through the 6551 * platform) with the values from the HW capability 6552 * register (if supported). 6553 */ 6554 priv->plat->enh_desc = priv->dma_cap.enh_desc; 6555 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && 6556 !priv->plat->use_phy_wol; 6557 priv->hw->pmt = priv->plat->pmt; 6558 if (priv->dma_cap.hash_tb_sz) { 6559 priv->hw->multicast_filter_bins = 6560 (BIT(priv->dma_cap.hash_tb_sz) << 5); 6561 priv->hw->mcast_bits_log2 = 6562 ilog2(priv->hw->multicast_filter_bins); 6563 } 6564 6565 /* TXCOE doesn't work in thresh DMA mode */ 6566 if (priv->plat->force_thresh_dma_mode) 6567 priv->plat->tx_coe = 0; 6568 else 6569 priv->plat->tx_coe = priv->dma_cap.tx_coe; 6570 6571 /* In case of GMAC4 rx_coe is from HW cap register. */ 6572 priv->plat->rx_coe = priv->dma_cap.rx_coe; 6573 6574 if (priv->dma_cap.rx_coe_type2) 6575 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 6576 else if (priv->dma_cap.rx_coe_type1) 6577 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 6578 6579 } else { 6580 dev_info(priv->device, "No HW DMA feature register supported\n"); 6581 } 6582 6583 if (priv->plat->rx_coe) { 6584 priv->hw->rx_csum = priv->plat->rx_coe; 6585 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 6586 if (priv->synopsys_id < DWMAC_CORE_4_00) 6587 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 6588 } 6589 if (priv->plat->tx_coe) 6590 dev_info(priv->device, "TX Checksum insertion supported\n"); 6591 6592 if (priv->plat->pmt) { 6593 dev_info(priv->device, "Wake-Up On Lan supported\n"); 6594 device_set_wakeup_capable(priv->device, 1); 6595 } 6596 6597 if (priv->dma_cap.tsoen) 6598 dev_info(priv->device, "TSO supported\n"); 6599 6600 priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; 6601 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; 6602 6603 /* Run HW quirks, if any */ 6604 if (priv->hwif_quirks) { 6605 ret = priv->hwif_quirks(priv); 6606 if (ret) 6607 return ret; 6608 } 6609 6610 /* Rx Watchdog is available in the COREs newer than the 3.40. 6611 * In some case, for example on bugged HW this feature 6612 * has to be disable and this can be done by passing the 6613 * riwt_off field from the platform. 6614 */ 6615 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 6616 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 6617 priv->use_riwt = 1; 6618 dev_info(priv->device, 6619 "Enable RX Mitigation via HW Watchdog Timer\n"); 6620 } 6621 6622 return 0; 6623 } 6624 6625 static void stmmac_napi_add(struct net_device *dev) 6626 { 6627 struct stmmac_priv *priv = netdev_priv(dev); 6628 u32 queue, maxq; 6629 6630 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 6631 6632 for (queue = 0; queue < maxq; queue++) { 6633 struct stmmac_channel *ch = &priv->channel[queue]; 6634 6635 ch->priv_data = priv; 6636 ch->index = queue; 6637 spin_lock_init(&ch->lock); 6638 6639 if (queue < priv->plat->rx_queues_to_use) { 6640 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx, 6641 NAPI_POLL_WEIGHT); 6642 } 6643 if (queue < priv->plat->tx_queues_to_use) { 6644 netif_tx_napi_add(dev, &ch->tx_napi, 6645 stmmac_napi_poll_tx, 6646 NAPI_POLL_WEIGHT); 6647 } 6648 if (queue < priv->plat->rx_queues_to_use && 6649 queue < priv->plat->tx_queues_to_use) { 6650 netif_napi_add(dev, &ch->rxtx_napi, 6651 stmmac_napi_poll_rxtx, 6652 NAPI_POLL_WEIGHT); 6653 } 6654 } 6655 } 6656 6657 static void stmmac_napi_del(struct net_device *dev) 6658 { 6659 struct stmmac_priv *priv = netdev_priv(dev); 6660 u32 queue, maxq; 6661 6662 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 6663 6664 for (queue = 0; queue < maxq; queue++) { 6665 struct stmmac_channel *ch = &priv->channel[queue]; 6666 6667 if (queue < priv->plat->rx_queues_to_use) 6668 netif_napi_del(&ch->rx_napi); 6669 if (queue < priv->plat->tx_queues_to_use) 6670 netif_napi_del(&ch->tx_napi); 6671 if (queue < priv->plat->rx_queues_to_use && 6672 queue < priv->plat->tx_queues_to_use) { 6673 netif_napi_del(&ch->rxtx_napi); 6674 } 6675 } 6676 } 6677 6678 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) 6679 { 6680 struct stmmac_priv *priv = netdev_priv(dev); 6681 int ret = 0; 6682 6683 if (netif_running(dev)) 6684 stmmac_release(dev); 6685 6686 stmmac_napi_del(dev); 6687 6688 priv->plat->rx_queues_to_use = rx_cnt; 6689 priv->plat->tx_queues_to_use = tx_cnt; 6690 6691 stmmac_napi_add(dev); 6692 6693 if (netif_running(dev)) 6694 ret = stmmac_open(dev); 6695 6696 return ret; 6697 } 6698 6699 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) 6700 { 6701 struct stmmac_priv *priv = netdev_priv(dev); 6702 int ret = 0; 6703 6704 if (netif_running(dev)) 6705 stmmac_release(dev); 6706 6707 priv->dma_rx_size = rx_size; 6708 priv->dma_tx_size = tx_size; 6709 6710 if (netif_running(dev)) 6711 ret = stmmac_open(dev); 6712 6713 return ret; 6714 } 6715 6716 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" 6717 static void stmmac_fpe_lp_task(struct work_struct *work) 6718 { 6719 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 6720 fpe_task); 6721 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 6722 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 6723 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 6724 bool *hs_enable = &fpe_cfg->hs_enable; 6725 bool *enable = &fpe_cfg->enable; 6726 int retries = 20; 6727 6728 while (retries-- > 0) { 6729 /* Bail out immediately if FPE handshake is OFF */ 6730 if (*lo_state == FPE_STATE_OFF || !*hs_enable) 6731 break; 6732 6733 if (*lo_state == FPE_STATE_ENTERING_ON && 6734 *lp_state == FPE_STATE_ENTERING_ON) { 6735 stmmac_fpe_configure(priv, priv->ioaddr, 6736 priv->plat->tx_queues_to_use, 6737 priv->plat->rx_queues_to_use, 6738 *enable); 6739 6740 netdev_info(priv->dev, "configured FPE\n"); 6741 6742 *lo_state = FPE_STATE_ON; 6743 *lp_state = FPE_STATE_ON; 6744 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); 6745 break; 6746 } 6747 6748 if ((*lo_state == FPE_STATE_CAPABLE || 6749 *lo_state == FPE_STATE_ENTERING_ON) && 6750 *lp_state != FPE_STATE_ON) { 6751 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, 6752 *lo_state, *lp_state); 6753 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 6754 MPACKET_VERIFY); 6755 } 6756 /* Sleep then retry */ 6757 msleep(500); 6758 } 6759 6760 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 6761 } 6762 6763 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) 6764 { 6765 if (priv->plat->fpe_cfg->hs_enable != enable) { 6766 if (enable) { 6767 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 6768 MPACKET_VERIFY); 6769 } else { 6770 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; 6771 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; 6772 } 6773 6774 priv->plat->fpe_cfg->hs_enable = enable; 6775 } 6776 } 6777 6778 /** 6779 * stmmac_dvr_probe 6780 * @device: device pointer 6781 * @plat_dat: platform data pointer 6782 * @res: stmmac resource pointer 6783 * Description: this is the main probe function used to 6784 * call the alloc_etherdev, allocate the priv structure. 6785 * Return: 6786 * returns 0 on success, otherwise errno. 6787 */ 6788 int stmmac_dvr_probe(struct device *device, 6789 struct plat_stmmacenet_data *plat_dat, 6790 struct stmmac_resources *res) 6791 { 6792 struct net_device *ndev = NULL; 6793 struct stmmac_priv *priv; 6794 u32 rxq; 6795 int i, ret = 0; 6796 6797 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), 6798 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); 6799 if (!ndev) 6800 return -ENOMEM; 6801 6802 SET_NETDEV_DEV(ndev, device); 6803 6804 priv = netdev_priv(ndev); 6805 priv->device = device; 6806 priv->dev = ndev; 6807 6808 stmmac_set_ethtool_ops(ndev); 6809 priv->pause = pause; 6810 priv->plat = plat_dat; 6811 priv->ioaddr = res->addr; 6812 priv->dev->base_addr = (unsigned long)res->addr; 6813 priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en; 6814 6815 priv->dev->irq = res->irq; 6816 priv->wol_irq = res->wol_irq; 6817 priv->lpi_irq = res->lpi_irq; 6818 priv->sfty_ce_irq = res->sfty_ce_irq; 6819 priv->sfty_ue_irq = res->sfty_ue_irq; 6820 for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 6821 priv->rx_irq[i] = res->rx_irq[i]; 6822 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 6823 priv->tx_irq[i] = res->tx_irq[i]; 6824 6825 if (!is_zero_ether_addr(res->mac)) 6826 eth_hw_addr_set(priv->dev, res->mac); 6827 6828 dev_set_drvdata(device, priv->dev); 6829 6830 /* Verify driver arguments */ 6831 stmmac_verify_args(); 6832 6833 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); 6834 if (!priv->af_xdp_zc_qps) 6835 return -ENOMEM; 6836 6837 /* Allocate workqueue */ 6838 priv->wq = create_singlethread_workqueue("stmmac_wq"); 6839 if (!priv->wq) { 6840 dev_err(priv->device, "failed to create workqueue\n"); 6841 return -ENOMEM; 6842 } 6843 6844 INIT_WORK(&priv->service_task, stmmac_service_task); 6845 6846 /* Initialize Link Partner FPE workqueue */ 6847 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); 6848 6849 /* Override with kernel parameters if supplied XXX CRS XXX 6850 * this needs to have multiple instances 6851 */ 6852 if ((phyaddr >= 0) && (phyaddr <= 31)) 6853 priv->plat->phy_addr = phyaddr; 6854 6855 if (priv->plat->stmmac_rst) { 6856 ret = reset_control_assert(priv->plat->stmmac_rst); 6857 reset_control_deassert(priv->plat->stmmac_rst); 6858 /* Some reset controllers have only reset callback instead of 6859 * assert + deassert callbacks pair. 6860 */ 6861 if (ret == -ENOTSUPP) 6862 reset_control_reset(priv->plat->stmmac_rst); 6863 } 6864 6865 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); 6866 if (ret == -ENOTSUPP) 6867 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", 6868 ERR_PTR(ret)); 6869 6870 /* Init MAC and get the capabilities */ 6871 ret = stmmac_hw_init(priv); 6872 if (ret) 6873 goto error_hw_init; 6874 6875 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. 6876 */ 6877 if (priv->synopsys_id < DWMAC_CORE_5_20) 6878 priv->plat->dma_cfg->dche = false; 6879 6880 stmmac_check_ether_addr(priv); 6881 6882 ndev->netdev_ops = &stmmac_netdev_ops; 6883 6884 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 6885 NETIF_F_RXCSUM; 6886 6887 ret = stmmac_tc_init(priv, priv); 6888 if (!ret) { 6889 ndev->hw_features |= NETIF_F_HW_TC; 6890 } 6891 6892 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 6893 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 6894 if (priv->plat->has_gmac4) 6895 ndev->hw_features |= NETIF_F_GSO_UDP_L4; 6896 priv->tso = true; 6897 dev_info(priv->device, "TSO feature enabled\n"); 6898 } 6899 6900 if (priv->dma_cap.sphen) { 6901 ndev->hw_features |= NETIF_F_GRO; 6902 priv->sph_cap = true; 6903 priv->sph = priv->sph_cap; 6904 dev_info(priv->device, "SPH feature enabled\n"); 6905 } 6906 6907 /* The current IP register MAC_HW_Feature1[ADDR64] only define 6908 * 32/40/64 bit width, but some SOC support others like i.MX8MP 6909 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64]. 6910 * So overwrite dma_cap.addr64 according to HW real design. 6911 */ 6912 if (priv->plat->addr64) 6913 priv->dma_cap.addr64 = priv->plat->addr64; 6914 6915 if (priv->dma_cap.addr64) { 6916 ret = dma_set_mask_and_coherent(device, 6917 DMA_BIT_MASK(priv->dma_cap.addr64)); 6918 if (!ret) { 6919 dev_info(priv->device, "Using %d bits DMA width\n", 6920 priv->dma_cap.addr64); 6921 6922 /* 6923 * If more than 32 bits can be addressed, make sure to 6924 * enable enhanced addressing mode. 6925 */ 6926 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 6927 priv->plat->dma_cfg->eame = true; 6928 } else { 6929 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); 6930 if (ret) { 6931 dev_err(priv->device, "Failed to set DMA Mask\n"); 6932 goto error_hw_init; 6933 } 6934 6935 priv->dma_cap.addr64 = 32; 6936 } 6937 } 6938 6939 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 6940 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 6941 #ifdef STMMAC_VLAN_TAG_USED 6942 /* Both mac100 and gmac support receive VLAN tag detection */ 6943 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 6944 if (priv->dma_cap.vlhash) { 6945 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6946 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 6947 } 6948 if (priv->dma_cap.vlins) { 6949 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; 6950 if (priv->dma_cap.dvlan) 6951 ndev->features |= NETIF_F_HW_VLAN_STAG_TX; 6952 } 6953 #endif 6954 priv->msg_enable = netif_msg_init(debug, default_msg_level); 6955 6956 /* Initialize RSS */ 6957 rxq = priv->plat->rx_queues_to_use; 6958 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); 6959 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 6960 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); 6961 6962 if (priv->dma_cap.rssen && priv->plat->rss_en) 6963 ndev->features |= NETIF_F_RXHASH; 6964 6965 /* MTU range: 46 - hw-specific max */ 6966 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 6967 if (priv->plat->has_xgmac) 6968 ndev->max_mtu = XGMAC_JUMBO_LEN; 6969 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 6970 ndev->max_mtu = JUMBO_LEN; 6971 else 6972 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 6973 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 6974 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 6975 */ 6976 if ((priv->plat->maxmtu < ndev->max_mtu) && 6977 (priv->plat->maxmtu >= ndev->min_mtu)) 6978 ndev->max_mtu = priv->plat->maxmtu; 6979 else if (priv->plat->maxmtu < ndev->min_mtu) 6980 dev_warn(priv->device, 6981 "%s: warning: maxmtu having invalid value (%d)\n", 6982 __func__, priv->plat->maxmtu); 6983 6984 if (flow_ctrl) 6985 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 6986 6987 /* Setup channels NAPI */ 6988 stmmac_napi_add(ndev); 6989 6990 mutex_init(&priv->lock); 6991 6992 /* If a specific clk_csr value is passed from the platform 6993 * this means that the CSR Clock Range selection cannot be 6994 * changed at run-time and it is fixed. Viceversa the driver'll try to 6995 * set the MDC clock dynamically according to the csr actual 6996 * clock input. 6997 */ 6998 if (priv->plat->clk_csr >= 0) 6999 priv->clk_csr = priv->plat->clk_csr; 7000 else 7001 stmmac_clk_csr_set(priv); 7002 7003 stmmac_check_pcs_mode(priv); 7004 7005 pm_runtime_get_noresume(device); 7006 pm_runtime_set_active(device); 7007 pm_runtime_enable(device); 7008 7009 if (priv->hw->pcs != STMMAC_PCS_TBI && 7010 priv->hw->pcs != STMMAC_PCS_RTBI) { 7011 /* MDIO bus Registration */ 7012 ret = stmmac_mdio_register(ndev); 7013 if (ret < 0) { 7014 dev_err(priv->device, 7015 "%s: MDIO bus (id: %d) registration failed", 7016 __func__, priv->plat->bus_id); 7017 goto error_mdio_register; 7018 } 7019 } 7020 7021 if (priv->plat->speed_mode_2500) 7022 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); 7023 7024 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { 7025 ret = stmmac_xpcs_setup(priv->mii); 7026 if (ret) 7027 goto error_xpcs_setup; 7028 } 7029 7030 ret = stmmac_phy_setup(priv); 7031 if (ret) { 7032 netdev_err(ndev, "failed to setup phy (%d)\n", ret); 7033 goto error_phy_setup; 7034 } 7035 7036 ret = register_netdev(ndev); 7037 if (ret) { 7038 dev_err(priv->device, "%s: ERROR %i registering the device\n", 7039 __func__, ret); 7040 goto error_netdev_register; 7041 } 7042 7043 if (priv->plat->serdes_powerup) { 7044 ret = priv->plat->serdes_powerup(ndev, 7045 priv->plat->bsp_priv); 7046 7047 if (ret < 0) 7048 goto error_serdes_powerup; 7049 } 7050 7051 #ifdef CONFIG_DEBUG_FS 7052 stmmac_init_fs(ndev); 7053 #endif 7054 7055 /* Let pm_runtime_put() disable the clocks. 7056 * If CONFIG_PM is not enabled, the clocks will stay powered. 7057 */ 7058 pm_runtime_put(device); 7059 7060 return ret; 7061 7062 error_serdes_powerup: 7063 unregister_netdev(ndev); 7064 error_netdev_register: 7065 phylink_destroy(priv->phylink); 7066 error_xpcs_setup: 7067 error_phy_setup: 7068 if (priv->hw->pcs != STMMAC_PCS_TBI && 7069 priv->hw->pcs != STMMAC_PCS_RTBI) 7070 stmmac_mdio_unregister(ndev); 7071 error_mdio_register: 7072 stmmac_napi_del(ndev); 7073 error_hw_init: 7074 destroy_workqueue(priv->wq); 7075 bitmap_free(priv->af_xdp_zc_qps); 7076 7077 return ret; 7078 } 7079 EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 7080 7081 /** 7082 * stmmac_dvr_remove 7083 * @dev: device pointer 7084 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 7085 * changes the link status, releases the DMA descriptor rings. 7086 */ 7087 int stmmac_dvr_remove(struct device *dev) 7088 { 7089 struct net_device *ndev = dev_get_drvdata(dev); 7090 struct stmmac_priv *priv = netdev_priv(ndev); 7091 7092 netdev_info(priv->dev, "%s: removing driver", __func__); 7093 7094 stmmac_stop_all_dma(priv); 7095 stmmac_mac_set(priv, priv->ioaddr, false); 7096 netif_carrier_off(ndev); 7097 unregister_netdev(ndev); 7098 7099 /* Serdes power down needs to happen after VLAN filter 7100 * is deleted that is triggered by unregister_netdev(). 7101 */ 7102 if (priv->plat->serdes_powerdown) 7103 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 7104 7105 #ifdef CONFIG_DEBUG_FS 7106 stmmac_exit_fs(ndev); 7107 #endif 7108 phylink_destroy(priv->phylink); 7109 if (priv->plat->stmmac_rst) 7110 reset_control_assert(priv->plat->stmmac_rst); 7111 reset_control_assert(priv->plat->stmmac_ahb_rst); 7112 pm_runtime_put(dev); 7113 pm_runtime_disable(dev); 7114 if (priv->hw->pcs != STMMAC_PCS_TBI && 7115 priv->hw->pcs != STMMAC_PCS_RTBI) 7116 stmmac_mdio_unregister(ndev); 7117 destroy_workqueue(priv->wq); 7118 mutex_destroy(&priv->lock); 7119 bitmap_free(priv->af_xdp_zc_qps); 7120 7121 return 0; 7122 } 7123 EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 7124 7125 /** 7126 * stmmac_suspend - suspend callback 7127 * @dev: device pointer 7128 * Description: this is the function to suspend the device and it is called 7129 * by the platform driver to stop the network queue, release the resources, 7130 * program the PMT register (for WoL), clean and release driver resources. 7131 */ 7132 int stmmac_suspend(struct device *dev) 7133 { 7134 struct net_device *ndev = dev_get_drvdata(dev); 7135 struct stmmac_priv *priv = netdev_priv(ndev); 7136 u32 chan; 7137 7138 if (!ndev || !netif_running(ndev)) 7139 return 0; 7140 7141 mutex_lock(&priv->lock); 7142 7143 netif_device_detach(ndev); 7144 7145 stmmac_disable_all_queues(priv); 7146 7147 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 7148 hrtimer_cancel(&priv->tx_queue[chan].txtimer); 7149 7150 if (priv->eee_enabled) { 7151 priv->tx_path_in_lpi_mode = false; 7152 del_timer_sync(&priv->eee_ctrl_timer); 7153 } 7154 7155 /* Stop TX/RX DMA */ 7156 stmmac_stop_all_dma(priv); 7157 7158 if (priv->plat->serdes_powerdown) 7159 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 7160 7161 /* Enable Power down mode by programming the PMT regs */ 7162 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7163 stmmac_pmt(priv, priv->hw, priv->wolopts); 7164 priv->irq_wake = 1; 7165 } else { 7166 stmmac_mac_set(priv, priv->ioaddr, false); 7167 pinctrl_pm_select_sleep_state(priv->device); 7168 } 7169 7170 mutex_unlock(&priv->lock); 7171 7172 rtnl_lock(); 7173 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7174 phylink_suspend(priv->phylink, true); 7175 } else { 7176 if (device_may_wakeup(priv->device)) 7177 phylink_speed_down(priv->phylink, false); 7178 phylink_suspend(priv->phylink, false); 7179 } 7180 rtnl_unlock(); 7181 7182 if (priv->dma_cap.fpesel) { 7183 /* Disable FPE */ 7184 stmmac_fpe_configure(priv, priv->ioaddr, 7185 priv->plat->tx_queues_to_use, 7186 priv->plat->rx_queues_to_use, false); 7187 7188 stmmac_fpe_handshake(priv, false); 7189 stmmac_fpe_stop_wq(priv); 7190 } 7191 7192 priv->speed = SPEED_UNKNOWN; 7193 return 0; 7194 } 7195 EXPORT_SYMBOL_GPL(stmmac_suspend); 7196 7197 /** 7198 * stmmac_reset_queues_param - reset queue parameters 7199 * @priv: device pointer 7200 */ 7201 static void stmmac_reset_queues_param(struct stmmac_priv *priv) 7202 { 7203 u32 rx_cnt = priv->plat->rx_queues_to_use; 7204 u32 tx_cnt = priv->plat->tx_queues_to_use; 7205 u32 queue; 7206 7207 for (queue = 0; queue < rx_cnt; queue++) { 7208 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 7209 7210 rx_q->cur_rx = 0; 7211 rx_q->dirty_rx = 0; 7212 } 7213 7214 for (queue = 0; queue < tx_cnt; queue++) { 7215 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 7216 7217 tx_q->cur_tx = 0; 7218 tx_q->dirty_tx = 0; 7219 tx_q->mss = 0; 7220 7221 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 7222 } 7223 } 7224 7225 /** 7226 * stmmac_resume - resume callback 7227 * @dev: device pointer 7228 * Description: when resume this function is invoked to setup the DMA and CORE 7229 * in a usable state. 7230 */ 7231 int stmmac_resume(struct device *dev) 7232 { 7233 struct net_device *ndev = dev_get_drvdata(dev); 7234 struct stmmac_priv *priv = netdev_priv(ndev); 7235 int ret; 7236 7237 if (!netif_running(ndev)) 7238 return 0; 7239 7240 /* Power Down bit, into the PM register, is cleared 7241 * automatically as soon as a magic packet or a Wake-up frame 7242 * is received. Anyway, it's better to manually clear 7243 * this bit because it can generate problems while resuming 7244 * from another devices (e.g. serial console). 7245 */ 7246 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7247 mutex_lock(&priv->lock); 7248 stmmac_pmt(priv, priv->hw, 0); 7249 mutex_unlock(&priv->lock); 7250 priv->irq_wake = 0; 7251 } else { 7252 pinctrl_pm_select_default_state(priv->device); 7253 /* reset the phy so that it's ready */ 7254 if (priv->mii) 7255 stmmac_mdio_reset(priv->mii); 7256 } 7257 7258 if (priv->plat->serdes_powerup) { 7259 ret = priv->plat->serdes_powerup(ndev, 7260 priv->plat->bsp_priv); 7261 7262 if (ret < 0) 7263 return ret; 7264 } 7265 7266 rtnl_lock(); 7267 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7268 phylink_resume(priv->phylink); 7269 } else { 7270 phylink_resume(priv->phylink); 7271 if (device_may_wakeup(priv->device)) 7272 phylink_speed_up(priv->phylink); 7273 } 7274 rtnl_unlock(); 7275 7276 rtnl_lock(); 7277 mutex_lock(&priv->lock); 7278 7279 stmmac_reset_queues_param(priv); 7280 7281 stmmac_free_tx_skbufs(priv); 7282 stmmac_clear_descriptors(priv); 7283 7284 stmmac_hw_setup(ndev, false); 7285 stmmac_init_coalesce(priv); 7286 stmmac_set_rx_mode(ndev); 7287 7288 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); 7289 7290 stmmac_enable_all_queues(priv); 7291 7292 mutex_unlock(&priv->lock); 7293 rtnl_unlock(); 7294 7295 netif_device_attach(ndev); 7296 7297 return 0; 7298 } 7299 EXPORT_SYMBOL_GPL(stmmac_resume); 7300 7301 #ifndef MODULE 7302 static int __init stmmac_cmdline_opt(char *str) 7303 { 7304 char *opt; 7305 7306 if (!str || !*str) 7307 return -EINVAL; 7308 while ((opt = strsep(&str, ",")) != NULL) { 7309 if (!strncmp(opt, "debug:", 6)) { 7310 if (kstrtoint(opt + 6, 0, &debug)) 7311 goto err; 7312 } else if (!strncmp(opt, "phyaddr:", 8)) { 7313 if (kstrtoint(opt + 8, 0, &phyaddr)) 7314 goto err; 7315 } else if (!strncmp(opt, "buf_sz:", 7)) { 7316 if (kstrtoint(opt + 7, 0, &buf_sz)) 7317 goto err; 7318 } else if (!strncmp(opt, "tc:", 3)) { 7319 if (kstrtoint(opt + 3, 0, &tc)) 7320 goto err; 7321 } else if (!strncmp(opt, "watchdog:", 9)) { 7322 if (kstrtoint(opt + 9, 0, &watchdog)) 7323 goto err; 7324 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 7325 if (kstrtoint(opt + 10, 0, &flow_ctrl)) 7326 goto err; 7327 } else if (!strncmp(opt, "pause:", 6)) { 7328 if (kstrtoint(opt + 6, 0, &pause)) 7329 goto err; 7330 } else if (!strncmp(opt, "eee_timer:", 10)) { 7331 if (kstrtoint(opt + 10, 0, &eee_timer)) 7332 goto err; 7333 } else if (!strncmp(opt, "chain_mode:", 11)) { 7334 if (kstrtoint(opt + 11, 0, &chain_mode)) 7335 goto err; 7336 } 7337 } 7338 return 0; 7339 7340 err: 7341 pr_err("%s: ERROR broken module parameter conversion", __func__); 7342 return -EINVAL; 7343 } 7344 7345 __setup("stmmaceth=", stmmac_cmdline_opt); 7346 #endif /* MODULE */ 7347 7348 static int __init stmmac_init(void) 7349 { 7350 #ifdef CONFIG_DEBUG_FS 7351 /* Create debugfs main directory if it doesn't exist yet */ 7352 if (!stmmac_fs_dir) 7353 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 7354 register_netdevice_notifier(&stmmac_notifier); 7355 #endif 7356 7357 return 0; 7358 } 7359 7360 static void __exit stmmac_exit(void) 7361 { 7362 #ifdef CONFIG_DEBUG_FS 7363 unregister_netdevice_notifier(&stmmac_notifier); 7364 debugfs_remove_recursive(stmmac_fs_dir); 7365 #endif 7366 } 7367 7368 module_init(stmmac_init) 7369 module_exit(stmmac_exit) 7370 7371 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 7372 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 7373 MODULE_LICENSE("GPL"); 7374