1 // SPDX-License-Identifier: GPL-2.0-only 2 /******************************************************************************* 3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 4 ST Ethernet IPs are built around a Synopsys IP Core. 5 6 Copyright(C) 2007-2011 STMicroelectronics Ltd 7 8 9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 10 11 Documentation available at: 12 http://www.stlinux.com 13 Support available at: 14 https://bugzilla.stlinux.com/ 15 *******************************************************************************/ 16 17 #include <linux/clk.h> 18 #include <linux/kernel.h> 19 #include <linux/interrupt.h> 20 #include <linux/ip.h> 21 #include <linux/tcp.h> 22 #include <linux/skbuff.h> 23 #include <linux/ethtool.h> 24 #include <linux/if_ether.h> 25 #include <linux/crc32.h> 26 #include <linux/mii.h> 27 #include <linux/if.h> 28 #include <linux/if_vlan.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/slab.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/prefetch.h> 33 #include <linux/pinctrl/consumer.h> 34 #ifdef CONFIG_DEBUG_FS 35 #include <linux/debugfs.h> 36 #include <linux/seq_file.h> 37 #endif /* CONFIG_DEBUG_FS */ 38 #include <linux/net_tstamp.h> 39 #include <linux/phylink.h> 40 #include <linux/udp.h> 41 #include <linux/bpf_trace.h> 42 #include <net/page_pool/helpers.h> 43 #include <net/pkt_cls.h> 44 #include <net/xdp_sock_drv.h> 45 #include "stmmac_ptp.h" 46 #include "stmmac.h" 47 #include "stmmac_xdp.h" 48 #include <linux/reset.h> 49 #include <linux/of_mdio.h> 50 #include "dwmac1000.h" 51 #include "dwxgmac2.h" 52 #include "hwif.h" 53 54 /* As long as the interface is active, we keep the timestamping counter enabled 55 * with fine resolution and binary rollover. This avoid non-monotonic behavior 56 * (clock jumps) when changing timestamping settings at runtime. 57 */ 58 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ 59 PTP_TCR_TSCTRLSSR) 60 61 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 63 64 /* Module parameters */ 65 #define TX_TIMEO 5000 66 static int watchdog = TX_TIMEO; 67 module_param(watchdog, int, 0644); 68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 69 70 static int debug = -1; 71 module_param(debug, int, 0644); 72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 73 74 static int phyaddr = -1; 75 module_param(phyaddr, int, 0444); 76 MODULE_PARM_DESC(phyaddr, "Physical device address"); 77 78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) 79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) 80 81 /* Limit to make sure XDP TX and slow path can coexist */ 82 #define STMMAC_XSK_TX_BUDGET_MAX 256 83 #define STMMAC_TX_XSK_AVAIL 16 84 #define STMMAC_RX_FILL_BATCH 16 85 86 #define STMMAC_XDP_PASS 0 87 #define STMMAC_XDP_CONSUMED BIT(0) 88 #define STMMAC_XDP_TX BIT(1) 89 #define STMMAC_XDP_REDIRECT BIT(2) 90 91 static int flow_ctrl = FLOW_AUTO; 92 module_param(flow_ctrl, int, 0644); 93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 94 95 static int pause = PAUSE_TIME; 96 module_param(pause, int, 0644); 97 MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 98 99 #define TC_DEFAULT 64 100 static int tc = TC_DEFAULT; 101 module_param(tc, int, 0644); 102 MODULE_PARM_DESC(tc, "DMA threshold control value"); 103 104 #define DEFAULT_BUFSIZE 1536 105 static int buf_sz = DEFAULT_BUFSIZE; 106 module_param(buf_sz, int, 0644); 107 MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 108 109 #define STMMAC_RX_COPYBREAK 256 110 111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 112 NETIF_MSG_LINK | NETIF_MSG_IFUP | 113 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 114 115 #define STMMAC_DEFAULT_LPI_TIMER 1000 116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 117 module_param(eee_timer, int, 0644); 118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) 120 121 /* By default the driver will use the ring mode to manage tx and rx descriptors, 122 * but allow user to force to use the chain instead of the ring 123 */ 124 static unsigned int chain_mode; 125 module_param(chain_mode, int, 0444); 126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 127 128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 129 /* For MSI interrupts handling */ 130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); 131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); 132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); 133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); 134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue); 135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue); 136 static void stmmac_reset_queues_param(struct stmmac_priv *priv); 137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); 138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); 139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 140 u32 rxmode, u32 chan); 141 142 #ifdef CONFIG_DEBUG_FS 143 static const struct net_device_ops stmmac_netdev_ops; 144 static void stmmac_init_fs(struct net_device *dev); 145 static void stmmac_exit_fs(struct net_device *dev); 146 #endif 147 148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) 149 150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) 151 { 152 int ret = 0; 153 154 if (enabled) { 155 ret = clk_prepare_enable(priv->plat->stmmac_clk); 156 if (ret) 157 return ret; 158 ret = clk_prepare_enable(priv->plat->pclk); 159 if (ret) { 160 clk_disable_unprepare(priv->plat->stmmac_clk); 161 return ret; 162 } 163 if (priv->plat->clks_config) { 164 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); 165 if (ret) { 166 clk_disable_unprepare(priv->plat->stmmac_clk); 167 clk_disable_unprepare(priv->plat->pclk); 168 return ret; 169 } 170 } 171 } else { 172 clk_disable_unprepare(priv->plat->stmmac_clk); 173 clk_disable_unprepare(priv->plat->pclk); 174 if (priv->plat->clks_config) 175 priv->plat->clks_config(priv->plat->bsp_priv, enabled); 176 } 177 178 return ret; 179 } 180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); 181 182 /** 183 * stmmac_verify_args - verify the driver parameters. 184 * Description: it checks the driver parameters and set a default in case of 185 * errors. 186 */ 187 static void stmmac_verify_args(void) 188 { 189 if (unlikely(watchdog < 0)) 190 watchdog = TX_TIMEO; 191 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 192 buf_sz = DEFAULT_BUFSIZE; 193 if (unlikely(flow_ctrl > 1)) 194 flow_ctrl = FLOW_AUTO; 195 else if (likely(flow_ctrl < 0)) 196 flow_ctrl = FLOW_OFF; 197 if (unlikely((pause < 0) || (pause > 0xffff))) 198 pause = PAUSE_TIME; 199 if (eee_timer < 0) 200 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 201 } 202 203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv) 204 { 205 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 206 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 207 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 208 u32 queue; 209 210 for (queue = 0; queue < maxq; queue++) { 211 struct stmmac_channel *ch = &priv->channel[queue]; 212 213 if (stmmac_xdp_is_enabled(priv) && 214 test_bit(queue, priv->af_xdp_zc_qps)) { 215 napi_disable(&ch->rxtx_napi); 216 continue; 217 } 218 219 if (queue < rx_queues_cnt) 220 napi_disable(&ch->rx_napi); 221 if (queue < tx_queues_cnt) 222 napi_disable(&ch->tx_napi); 223 } 224 } 225 226 /** 227 * stmmac_disable_all_queues - Disable all queues 228 * @priv: driver private structure 229 */ 230 static void stmmac_disable_all_queues(struct stmmac_priv *priv) 231 { 232 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 233 struct stmmac_rx_queue *rx_q; 234 u32 queue; 235 236 /* synchronize_rcu() needed for pending XDP buffers to drain */ 237 for (queue = 0; queue < rx_queues_cnt; queue++) { 238 rx_q = &priv->dma_conf.rx_queue[queue]; 239 if (rx_q->xsk_pool) { 240 synchronize_rcu(); 241 break; 242 } 243 } 244 245 __stmmac_disable_all_queues(priv); 246 } 247 248 /** 249 * stmmac_enable_all_queues - Enable all queues 250 * @priv: driver private structure 251 */ 252 static void stmmac_enable_all_queues(struct stmmac_priv *priv) 253 { 254 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 255 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 256 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 257 u32 queue; 258 259 for (queue = 0; queue < maxq; queue++) { 260 struct stmmac_channel *ch = &priv->channel[queue]; 261 262 if (stmmac_xdp_is_enabled(priv) && 263 test_bit(queue, priv->af_xdp_zc_qps)) { 264 napi_enable(&ch->rxtx_napi); 265 continue; 266 } 267 268 if (queue < rx_queues_cnt) 269 napi_enable(&ch->rx_napi); 270 if (queue < tx_queues_cnt) 271 napi_enable(&ch->tx_napi); 272 } 273 } 274 275 static void stmmac_service_event_schedule(struct stmmac_priv *priv) 276 { 277 if (!test_bit(STMMAC_DOWN, &priv->state) && 278 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 279 queue_work(priv->wq, &priv->service_task); 280 } 281 282 static void stmmac_global_err(struct stmmac_priv *priv) 283 { 284 netif_carrier_off(priv->dev); 285 set_bit(STMMAC_RESET_REQUESTED, &priv->state); 286 stmmac_service_event_schedule(priv); 287 } 288 289 /** 290 * stmmac_clk_csr_set - dynamically set the MDC clock 291 * @priv: driver private structure 292 * Description: this is to dynamically set the MDC clock according to the csr 293 * clock input. 294 * Note: 295 * If a specific clk_csr value is passed from the platform 296 * this means that the CSR Clock Range selection cannot be 297 * changed at run-time and it is fixed (as reported in the driver 298 * documentation). Viceversa the driver will try to set the MDC 299 * clock dynamically according to the actual clock input. 300 */ 301 static void stmmac_clk_csr_set(struct stmmac_priv *priv) 302 { 303 u32 clk_rate; 304 305 clk_rate = clk_get_rate(priv->plat->stmmac_clk); 306 307 /* Platform provided default clk_csr would be assumed valid 308 * for all other cases except for the below mentioned ones. 309 * For values higher than the IEEE 802.3 specified frequency 310 * we can not estimate the proper divider as it is not known 311 * the frequency of clk_csr_i. So we do not change the default 312 * divider. 313 */ 314 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 315 if (clk_rate < CSR_F_35M) 316 priv->clk_csr = STMMAC_CSR_20_35M; 317 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 318 priv->clk_csr = STMMAC_CSR_35_60M; 319 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 320 priv->clk_csr = STMMAC_CSR_60_100M; 321 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 322 priv->clk_csr = STMMAC_CSR_100_150M; 323 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 324 priv->clk_csr = STMMAC_CSR_150_250M; 325 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) 326 priv->clk_csr = STMMAC_CSR_250_300M; 327 } 328 329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { 330 if (clk_rate > 160000000) 331 priv->clk_csr = 0x03; 332 else if (clk_rate > 80000000) 333 priv->clk_csr = 0x02; 334 else if (clk_rate > 40000000) 335 priv->clk_csr = 0x01; 336 else 337 priv->clk_csr = 0; 338 } 339 340 if (priv->plat->has_xgmac) { 341 if (clk_rate > 400000000) 342 priv->clk_csr = 0x5; 343 else if (clk_rate > 350000000) 344 priv->clk_csr = 0x4; 345 else if (clk_rate > 300000000) 346 priv->clk_csr = 0x3; 347 else if (clk_rate > 250000000) 348 priv->clk_csr = 0x2; 349 else if (clk_rate > 150000000) 350 priv->clk_csr = 0x1; 351 else 352 priv->clk_csr = 0x0; 353 } 354 } 355 356 static void print_pkt(unsigned char *buf, int len) 357 { 358 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 360 } 361 362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 363 { 364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 365 u32 avail; 366 367 if (tx_q->dirty_tx > tx_q->cur_tx) 368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 369 else 370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; 371 372 return avail; 373 } 374 375 /** 376 * stmmac_rx_dirty - Get RX queue dirty 377 * @priv: driver private structure 378 * @queue: RX queue index 379 */ 380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 381 { 382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 383 u32 dirty; 384 385 if (rx_q->dirty_rx <= rx_q->cur_rx) 386 dirty = rx_q->cur_rx - rx_q->dirty_rx; 387 else 388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; 389 390 return dirty; 391 } 392 393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) 394 { 395 int tx_lpi_timer; 396 397 /* Clear/set the SW EEE timer flag based on LPI ET enablement */ 398 priv->eee_sw_timer_en = en ? 0 : 1; 399 tx_lpi_timer = en ? priv->tx_lpi_timer : 0; 400 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); 401 } 402 403 /** 404 * stmmac_enable_eee_mode - check and enter in LPI mode 405 * @priv: driver private structure 406 * Description: this function is to verify and enter in LPI mode in case of 407 * EEE. 408 */ 409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv) 410 { 411 u32 tx_cnt = priv->plat->tx_queues_to_use; 412 u32 queue; 413 414 /* check if all TX queues have the work finished */ 415 for (queue = 0; queue < tx_cnt; queue++) { 416 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 417 418 if (tx_q->dirty_tx != tx_q->cur_tx) 419 return -EBUSY; /* still unfinished work */ 420 } 421 422 /* Check and enter in LPI mode */ 423 if (!priv->tx_path_in_lpi_mode) 424 stmmac_set_eee_mode(priv, priv->hw, 425 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING); 426 return 0; 427 } 428 429 /** 430 * stmmac_disable_eee_mode - disable and exit from LPI mode 431 * @priv: driver private structure 432 * Description: this function is to exit and disable EEE in case of 433 * LPI state is true. This is called by the xmit. 434 */ 435 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 436 { 437 if (!priv->eee_sw_timer_en) { 438 stmmac_lpi_entry_timer_config(priv, 0); 439 return; 440 } 441 442 stmmac_reset_eee_mode(priv, priv->hw); 443 del_timer_sync(&priv->eee_ctrl_timer); 444 priv->tx_path_in_lpi_mode = false; 445 } 446 447 /** 448 * stmmac_eee_ctrl_timer - EEE TX SW timer. 449 * @t: timer_list struct containing private info 450 * Description: 451 * if there is no data transfer and if we are not in LPI state, 452 * then MAC Transmitter can be moved to LPI state. 453 */ 454 static void stmmac_eee_ctrl_timer(struct timer_list *t) 455 { 456 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 457 458 if (stmmac_enable_eee_mode(priv)) 459 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 460 } 461 462 /** 463 * stmmac_eee_init - init EEE 464 * @priv: driver private structure 465 * Description: 466 * if the GMAC supports the EEE (from the HW cap reg) and the phy device 467 * can also manage EEE, this function enable the LPI state and start related 468 * timer. 469 */ 470 bool stmmac_eee_init(struct stmmac_priv *priv) 471 { 472 int eee_tw_timer = priv->eee_tw_timer; 473 474 /* Using PCS we cannot dial with the phy registers at this stage 475 * so we do not support extra feature like EEE. 476 */ 477 if (priv->hw->pcs == STMMAC_PCS_TBI || 478 priv->hw->pcs == STMMAC_PCS_RTBI) 479 return false; 480 481 /* Check if MAC core supports the EEE feature. */ 482 if (!priv->dma_cap.eee) 483 return false; 484 485 mutex_lock(&priv->lock); 486 487 /* Check if it needs to be deactivated */ 488 if (!priv->eee_active) { 489 if (priv->eee_enabled) { 490 netdev_dbg(priv->dev, "disable EEE\n"); 491 stmmac_lpi_entry_timer_config(priv, 0); 492 del_timer_sync(&priv->eee_ctrl_timer); 493 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); 494 if (priv->hw->xpcs) 495 xpcs_config_eee(priv->hw->xpcs, 496 priv->plat->mult_fact_100ns, 497 false); 498 } 499 mutex_unlock(&priv->lock); 500 return false; 501 } 502 503 if (priv->eee_active && !priv->eee_enabled) { 504 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 505 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 506 eee_tw_timer); 507 if (priv->hw->xpcs) 508 xpcs_config_eee(priv->hw->xpcs, 509 priv->plat->mult_fact_100ns, 510 true); 511 } 512 513 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { 514 del_timer_sync(&priv->eee_ctrl_timer); 515 priv->tx_path_in_lpi_mode = false; 516 stmmac_lpi_entry_timer_config(priv, 1); 517 } else { 518 stmmac_lpi_entry_timer_config(priv, 0); 519 mod_timer(&priv->eee_ctrl_timer, 520 STMMAC_LPI_T(priv->tx_lpi_timer)); 521 } 522 523 mutex_unlock(&priv->lock); 524 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 525 return true; 526 } 527 528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps 529 * @priv: driver private structure 530 * @p : descriptor pointer 531 * @skb : the socket buffer 532 * Description : 533 * This function will read timestamp from the descriptor & pass it to stack. 534 * and also perform some sanity checks. 535 */ 536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 537 struct dma_desc *p, struct sk_buff *skb) 538 { 539 struct skb_shared_hwtstamps shhwtstamp; 540 bool found = false; 541 u64 ns = 0; 542 543 if (!priv->hwts_tx_en) 544 return; 545 546 /* exit if skb doesn't support hw tstamp */ 547 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 548 return; 549 550 /* check tx tstamp status */ 551 if (stmmac_get_tx_timestamp_status(priv, p)) { 552 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 553 found = true; 554 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { 555 found = true; 556 } 557 558 if (found) { 559 ns -= priv->plat->cdc_error_adj; 560 561 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 562 shhwtstamp.hwtstamp = ns_to_ktime(ns); 563 564 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 565 /* pass tstamp to stack */ 566 skb_tstamp_tx(skb, &shhwtstamp); 567 } 568 } 569 570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps 571 * @priv: driver private structure 572 * @p : descriptor pointer 573 * @np : next descriptor pointer 574 * @skb : the socket buffer 575 * Description : 576 * This function will read received packet's timestamp from the descriptor 577 * and pass it to stack. It also perform some sanity checks. 578 */ 579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 580 struct dma_desc *np, struct sk_buff *skb) 581 { 582 struct skb_shared_hwtstamps *shhwtstamp = NULL; 583 struct dma_desc *desc = p; 584 u64 ns = 0; 585 586 if (!priv->hwts_rx_en) 587 return; 588 /* For GMAC4, the valid timestamp is from CTX next desc. */ 589 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 590 desc = np; 591 592 /* Check if timestamp is available */ 593 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 594 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 595 596 ns -= priv->plat->cdc_error_adj; 597 598 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 599 shhwtstamp = skb_hwtstamps(skb); 600 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 601 shhwtstamp->hwtstamp = ns_to_ktime(ns); 602 } else { 603 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 604 } 605 } 606 607 /** 608 * stmmac_hwtstamp_set - control hardware timestamping. 609 * @dev: device pointer. 610 * @ifr: An IOCTL specific structure, that can contain a pointer to 611 * a proprietary structure used to pass information to the driver. 612 * Description: 613 * This function configures the MAC to enable/disable both outgoing(TX) 614 * and incoming(RX) packets time stamping based on user input. 615 * Return Value: 616 * 0 on success and an appropriate -ve integer on failure. 617 */ 618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 619 { 620 struct stmmac_priv *priv = netdev_priv(dev); 621 struct hwtstamp_config config; 622 u32 ptp_v2 = 0; 623 u32 tstamp_all = 0; 624 u32 ptp_over_ipv4_udp = 0; 625 u32 ptp_over_ipv6_udp = 0; 626 u32 ptp_over_ethernet = 0; 627 u32 snap_type_sel = 0; 628 u32 ts_master_en = 0; 629 u32 ts_event_en = 0; 630 631 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 632 netdev_alert(priv->dev, "No support for HW time stamping\n"); 633 priv->hwts_tx_en = 0; 634 priv->hwts_rx_en = 0; 635 636 return -EOPNOTSUPP; 637 } 638 639 if (copy_from_user(&config, ifr->ifr_data, 640 sizeof(config))) 641 return -EFAULT; 642 643 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 644 __func__, config.flags, config.tx_type, config.rx_filter); 645 646 if (config.tx_type != HWTSTAMP_TX_OFF && 647 config.tx_type != HWTSTAMP_TX_ON) 648 return -ERANGE; 649 650 if (priv->adv_ts) { 651 switch (config.rx_filter) { 652 case HWTSTAMP_FILTER_NONE: 653 /* time stamp no incoming packet at all */ 654 config.rx_filter = HWTSTAMP_FILTER_NONE; 655 break; 656 657 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 658 /* PTP v1, UDP, any kind of event packet */ 659 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 660 /* 'xmac' hardware can support Sync, Pdelay_Req and 661 * Pdelay_resp by setting bit14 and bits17/16 to 01 662 * This leaves Delay_Req timestamps out. 663 * Enable all events *and* general purpose message 664 * timestamping 665 */ 666 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 667 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 668 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 669 break; 670 671 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 672 /* PTP v1, UDP, Sync packet */ 673 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 674 /* take time stamp for SYNC messages only */ 675 ts_event_en = PTP_TCR_TSEVNTENA; 676 677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 679 break; 680 681 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 682 /* PTP v1, UDP, Delay_req packet */ 683 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 684 /* take time stamp for Delay_Req messages only */ 685 ts_master_en = PTP_TCR_TSMSTRENA; 686 ts_event_en = PTP_TCR_TSEVNTENA; 687 688 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 689 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 690 break; 691 692 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 693 /* PTP v2, UDP, any kind of event packet */ 694 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 695 ptp_v2 = PTP_TCR_TSVER2ENA; 696 /* take time stamp for all event messages */ 697 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 698 699 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 700 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 701 break; 702 703 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 704 /* PTP v2, UDP, Sync packet */ 705 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 706 ptp_v2 = PTP_TCR_TSVER2ENA; 707 /* take time stamp for SYNC messages only */ 708 ts_event_en = PTP_TCR_TSEVNTENA; 709 710 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 711 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 712 break; 713 714 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 715 /* PTP v2, UDP, Delay_req packet */ 716 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 717 ptp_v2 = PTP_TCR_TSVER2ENA; 718 /* take time stamp for Delay_Req messages only */ 719 ts_master_en = PTP_TCR_TSMSTRENA; 720 ts_event_en = PTP_TCR_TSEVNTENA; 721 722 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 723 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 724 break; 725 726 case HWTSTAMP_FILTER_PTP_V2_EVENT: 727 /* PTP v2/802.AS1 any layer, any kind of event packet */ 728 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 729 ptp_v2 = PTP_TCR_TSVER2ENA; 730 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 731 if (priv->synopsys_id < DWMAC_CORE_4_10) 732 ts_event_en = PTP_TCR_TSEVNTENA; 733 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 734 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 735 ptp_over_ethernet = PTP_TCR_TSIPENA; 736 break; 737 738 case HWTSTAMP_FILTER_PTP_V2_SYNC: 739 /* PTP v2/802.AS1, any layer, Sync packet */ 740 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 741 ptp_v2 = PTP_TCR_TSVER2ENA; 742 /* take time stamp for SYNC messages only */ 743 ts_event_en = PTP_TCR_TSEVNTENA; 744 745 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 746 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 747 ptp_over_ethernet = PTP_TCR_TSIPENA; 748 break; 749 750 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 751 /* PTP v2/802.AS1, any layer, Delay_req packet */ 752 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 753 ptp_v2 = PTP_TCR_TSVER2ENA; 754 /* take time stamp for Delay_Req messages only */ 755 ts_master_en = PTP_TCR_TSMSTRENA; 756 ts_event_en = PTP_TCR_TSEVNTENA; 757 758 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 759 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 760 ptp_over_ethernet = PTP_TCR_TSIPENA; 761 break; 762 763 case HWTSTAMP_FILTER_NTP_ALL: 764 case HWTSTAMP_FILTER_ALL: 765 /* time stamp any incoming packet */ 766 config.rx_filter = HWTSTAMP_FILTER_ALL; 767 tstamp_all = PTP_TCR_TSENALL; 768 break; 769 770 default: 771 return -ERANGE; 772 } 773 } else { 774 switch (config.rx_filter) { 775 case HWTSTAMP_FILTER_NONE: 776 config.rx_filter = HWTSTAMP_FILTER_NONE; 777 break; 778 default: 779 /* PTP v1, UDP, any kind of event packet */ 780 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 781 break; 782 } 783 } 784 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 785 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 786 787 priv->systime_flags = STMMAC_HWTS_ACTIVE; 788 789 if (priv->hwts_tx_en || priv->hwts_rx_en) { 790 priv->systime_flags |= tstamp_all | ptp_v2 | 791 ptp_over_ethernet | ptp_over_ipv6_udp | 792 ptp_over_ipv4_udp | ts_event_en | 793 ts_master_en | snap_type_sel; 794 } 795 796 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); 797 798 memcpy(&priv->tstamp_config, &config, sizeof(config)); 799 800 return copy_to_user(ifr->ifr_data, &config, 801 sizeof(config)) ? -EFAULT : 0; 802 } 803 804 /** 805 * stmmac_hwtstamp_get - read hardware timestamping. 806 * @dev: device pointer. 807 * @ifr: An IOCTL specific structure, that can contain a pointer to 808 * a proprietary structure used to pass information to the driver. 809 * Description: 810 * This function obtain the current hardware timestamping settings 811 * as requested. 812 */ 813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 814 { 815 struct stmmac_priv *priv = netdev_priv(dev); 816 struct hwtstamp_config *config = &priv->tstamp_config; 817 818 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 819 return -EOPNOTSUPP; 820 821 return copy_to_user(ifr->ifr_data, config, 822 sizeof(*config)) ? -EFAULT : 0; 823 } 824 825 /** 826 * stmmac_init_tstamp_counter - init hardware timestamping counter 827 * @priv: driver private structure 828 * @systime_flags: timestamping flags 829 * Description: 830 * Initialize hardware counter for packet timestamping. 831 * This is valid as long as the interface is open and not suspended. 832 * Will be rerun after resuming from suspend, case in which the timestamping 833 * flags updated by stmmac_hwtstamp_set() also need to be restored. 834 */ 835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) 836 { 837 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 838 struct timespec64 now; 839 u32 sec_inc = 0; 840 u64 temp = 0; 841 842 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 843 return -EOPNOTSUPP; 844 845 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); 846 priv->systime_flags = systime_flags; 847 848 /* program Sub Second Increment reg */ 849 stmmac_config_sub_second_increment(priv, priv->ptpaddr, 850 priv->plat->clk_ptp_rate, 851 xmac, &sec_inc); 852 temp = div_u64(1000000000ULL, sec_inc); 853 854 /* Store sub second increment for later use */ 855 priv->sub_second_inc = sec_inc; 856 857 /* calculate default added value: 858 * formula is : 859 * addend = (2^32)/freq_div_ratio; 860 * where, freq_div_ratio = 1e9ns/sec_inc 861 */ 862 temp = (u64)(temp << 32); 863 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 864 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 865 866 /* initialize system time */ 867 ktime_get_real_ts64(&now); 868 869 /* lower 32 bits of tv_sec are safe until y2106 */ 870 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); 871 872 return 0; 873 } 874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); 875 876 /** 877 * stmmac_init_ptp - init PTP 878 * @priv: driver private structure 879 * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 880 * This is done by looking at the HW cap. register. 881 * This function also registers the ptp driver. 882 */ 883 static int stmmac_init_ptp(struct stmmac_priv *priv) 884 { 885 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 886 int ret; 887 888 if (priv->plat->ptp_clk_freq_config) 889 priv->plat->ptp_clk_freq_config(priv); 890 891 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); 892 if (ret) 893 return ret; 894 895 priv->adv_ts = 0; 896 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 897 if (xmac && priv->dma_cap.atime_stamp) 898 priv->adv_ts = 1; 899 /* Dwmac 3.x core with extend_desc can support adv_ts */ 900 else if (priv->extend_desc && priv->dma_cap.atime_stamp) 901 priv->adv_ts = 1; 902 903 if (priv->dma_cap.time_stamp) 904 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 905 906 if (priv->adv_ts) 907 netdev_info(priv->dev, 908 "IEEE 1588-2008 Advanced Timestamp supported\n"); 909 910 priv->hwts_tx_en = 0; 911 priv->hwts_rx_en = 0; 912 913 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) 914 stmmac_hwtstamp_correct_latency(priv, priv); 915 916 return 0; 917 } 918 919 static void stmmac_release_ptp(struct stmmac_priv *priv) 920 { 921 clk_disable_unprepare(priv->plat->clk_ptp_ref); 922 stmmac_ptp_unregister(priv); 923 } 924 925 /** 926 * stmmac_mac_flow_ctrl - Configure flow control in all queues 927 * @priv: driver private structure 928 * @duplex: duplex passed to the next function 929 * Description: It is used for configuring the flow control in all queues 930 */ 931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 932 { 933 u32 tx_cnt = priv->plat->tx_queues_to_use; 934 935 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 936 priv->pause, tx_cnt); 937 } 938 939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, 940 phy_interface_t interface) 941 { 942 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 943 944 if (priv->hw->xpcs) 945 return &priv->hw->xpcs->pcs; 946 947 if (priv->hw->lynx_pcs) 948 return priv->hw->lynx_pcs; 949 950 return NULL; 951 } 952 953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, 954 const struct phylink_link_state *state) 955 { 956 /* Nothing to do, xpcs_config() handles everything */ 957 } 958 959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) 960 { 961 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 962 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 963 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 964 bool *hs_enable = &fpe_cfg->hs_enable; 965 966 if (is_up && *hs_enable) { 967 stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, 968 MPACKET_VERIFY); 969 } else { 970 *lo_state = FPE_STATE_OFF; 971 *lp_state = FPE_STATE_OFF; 972 } 973 } 974 975 static void stmmac_mac_link_down(struct phylink_config *config, 976 unsigned int mode, phy_interface_t interface) 977 { 978 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 979 980 stmmac_mac_set(priv, priv->ioaddr, false); 981 priv->eee_active = false; 982 priv->tx_lpi_enabled = false; 983 priv->eee_enabled = stmmac_eee_init(priv); 984 stmmac_set_eee_pls(priv, priv->hw, false); 985 986 if (priv->dma_cap.fpesel) 987 stmmac_fpe_link_state_handle(priv, false); 988 } 989 990 static void stmmac_mac_link_up(struct phylink_config *config, 991 struct phy_device *phy, 992 unsigned int mode, phy_interface_t interface, 993 int speed, int duplex, 994 bool tx_pause, bool rx_pause) 995 { 996 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 997 u32 old_ctrl, ctrl; 998 999 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && 1000 priv->plat->serdes_powerup) 1001 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv); 1002 1003 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 1004 ctrl = old_ctrl & ~priv->hw->link.speed_mask; 1005 1006 if (interface == PHY_INTERFACE_MODE_USXGMII) { 1007 switch (speed) { 1008 case SPEED_10000: 1009 ctrl |= priv->hw->link.xgmii.speed10000; 1010 break; 1011 case SPEED_5000: 1012 ctrl |= priv->hw->link.xgmii.speed5000; 1013 break; 1014 case SPEED_2500: 1015 ctrl |= priv->hw->link.xgmii.speed2500; 1016 break; 1017 default: 1018 return; 1019 } 1020 } else if (interface == PHY_INTERFACE_MODE_XLGMII) { 1021 switch (speed) { 1022 case SPEED_100000: 1023 ctrl |= priv->hw->link.xlgmii.speed100000; 1024 break; 1025 case SPEED_50000: 1026 ctrl |= priv->hw->link.xlgmii.speed50000; 1027 break; 1028 case SPEED_40000: 1029 ctrl |= priv->hw->link.xlgmii.speed40000; 1030 break; 1031 case SPEED_25000: 1032 ctrl |= priv->hw->link.xlgmii.speed25000; 1033 break; 1034 case SPEED_10000: 1035 ctrl |= priv->hw->link.xgmii.speed10000; 1036 break; 1037 case SPEED_2500: 1038 ctrl |= priv->hw->link.speed2500; 1039 break; 1040 case SPEED_1000: 1041 ctrl |= priv->hw->link.speed1000; 1042 break; 1043 default: 1044 return; 1045 } 1046 } else { 1047 switch (speed) { 1048 case SPEED_2500: 1049 ctrl |= priv->hw->link.speed2500; 1050 break; 1051 case SPEED_1000: 1052 ctrl |= priv->hw->link.speed1000; 1053 break; 1054 case SPEED_100: 1055 ctrl |= priv->hw->link.speed100; 1056 break; 1057 case SPEED_10: 1058 ctrl |= priv->hw->link.speed10; 1059 break; 1060 default: 1061 return; 1062 } 1063 } 1064 1065 priv->speed = speed; 1066 1067 if (priv->plat->fix_mac_speed) 1068 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode); 1069 1070 if (!duplex) 1071 ctrl &= ~priv->hw->link.duplex; 1072 else 1073 ctrl |= priv->hw->link.duplex; 1074 1075 /* Flow Control operation */ 1076 if (rx_pause && tx_pause) 1077 priv->flow_ctrl = FLOW_AUTO; 1078 else if (rx_pause && !tx_pause) 1079 priv->flow_ctrl = FLOW_RX; 1080 else if (!rx_pause && tx_pause) 1081 priv->flow_ctrl = FLOW_TX; 1082 else 1083 priv->flow_ctrl = FLOW_OFF; 1084 1085 stmmac_mac_flow_ctrl(priv, duplex); 1086 1087 if (ctrl != old_ctrl) 1088 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 1089 1090 stmmac_mac_set(priv, priv->ioaddr, true); 1091 if (phy && priv->dma_cap.eee) { 1092 priv->eee_active = 1093 phy_init_eee(phy, !(priv->plat->flags & 1094 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0; 1095 priv->eee_enabled = stmmac_eee_init(priv); 1096 priv->tx_lpi_enabled = priv->eee_enabled; 1097 stmmac_set_eee_pls(priv, priv->hw, true); 1098 } 1099 1100 if (priv->dma_cap.fpesel) 1101 stmmac_fpe_link_state_handle(priv, true); 1102 1103 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) 1104 stmmac_hwtstamp_correct_latency(priv, priv); 1105 } 1106 1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = { 1108 .mac_select_pcs = stmmac_mac_select_pcs, 1109 .mac_config = stmmac_mac_config, 1110 .mac_link_down = stmmac_mac_link_down, 1111 .mac_link_up = stmmac_mac_link_up, 1112 }; 1113 1114 /** 1115 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 1116 * @priv: driver private structure 1117 * Description: this is to verify if the HW supports the PCS. 1118 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 1119 * configured for the TBI, RTBI, or SGMII PHY interface. 1120 */ 1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 1122 { 1123 int interface = priv->plat->mac_interface; 1124 1125 if (priv->dma_cap.pcs) { 1126 if ((interface == PHY_INTERFACE_MODE_RGMII) || 1127 (interface == PHY_INTERFACE_MODE_RGMII_ID) || 1128 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1129 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 1130 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 1131 priv->hw->pcs = STMMAC_PCS_RGMII; 1132 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 1133 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 1134 priv->hw->pcs = STMMAC_PCS_SGMII; 1135 } 1136 } 1137 } 1138 1139 /** 1140 * stmmac_init_phy - PHY initialization 1141 * @dev: net device structure 1142 * Description: it initializes the driver's PHY state, and attaches the PHY 1143 * to the mac driver. 1144 * Return value: 1145 * 0 on success 1146 */ 1147 static int stmmac_init_phy(struct net_device *dev) 1148 { 1149 struct stmmac_priv *priv = netdev_priv(dev); 1150 struct fwnode_handle *phy_fwnode; 1151 struct fwnode_handle *fwnode; 1152 int ret; 1153 1154 if (!phylink_expects_phy(priv->phylink)) 1155 return 0; 1156 1157 fwnode = priv->plat->port_node; 1158 if (!fwnode) 1159 fwnode = dev_fwnode(priv->device); 1160 1161 if (fwnode) 1162 phy_fwnode = fwnode_get_phy_node(fwnode); 1163 else 1164 phy_fwnode = NULL; 1165 1166 /* Some DT bindings do not set-up the PHY handle. Let's try to 1167 * manually parse it 1168 */ 1169 if (!phy_fwnode || IS_ERR(phy_fwnode)) { 1170 int addr = priv->plat->phy_addr; 1171 struct phy_device *phydev; 1172 1173 if (addr < 0) { 1174 netdev_err(priv->dev, "no phy found\n"); 1175 return -ENODEV; 1176 } 1177 1178 phydev = mdiobus_get_phy(priv->mii, addr); 1179 if (!phydev) { 1180 netdev_err(priv->dev, "no phy at addr %d\n", addr); 1181 return -ENODEV; 1182 } 1183 1184 ret = phylink_connect_phy(priv->phylink, phydev); 1185 } else { 1186 fwnode_handle_put(phy_fwnode); 1187 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); 1188 } 1189 1190 if (!priv->plat->pmt) { 1191 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1192 1193 phylink_ethtool_get_wol(priv->phylink, &wol); 1194 device_set_wakeup_capable(priv->device, !!wol.supported); 1195 device_set_wakeup_enable(priv->device, !!wol.wolopts); 1196 } 1197 1198 return ret; 1199 } 1200 1201 static void stmmac_set_half_duplex(struct stmmac_priv *priv) 1202 { 1203 /* Half-Duplex can only work with single tx queue */ 1204 if (priv->plat->tx_queues_to_use > 1) 1205 priv->phylink_config.mac_capabilities &= 1206 ~(MAC_10HD | MAC_100HD | MAC_1000HD); 1207 else 1208 priv->phylink_config.mac_capabilities |= 1209 (MAC_10HD | MAC_100HD | MAC_1000HD); 1210 } 1211 1212 static int stmmac_phy_setup(struct stmmac_priv *priv) 1213 { 1214 struct stmmac_mdio_bus_data *mdio_bus_data; 1215 int mode = priv->plat->phy_interface; 1216 struct fwnode_handle *fwnode; 1217 struct phylink *phylink; 1218 int max_speed; 1219 1220 priv->phylink_config.dev = &priv->dev->dev; 1221 priv->phylink_config.type = PHYLINK_NETDEV; 1222 priv->phylink_config.mac_managed_pm = true; 1223 1224 mdio_bus_data = priv->plat->mdio_bus_data; 1225 if (mdio_bus_data) 1226 priv->phylink_config.ovr_an_inband = 1227 mdio_bus_data->xpcs_an_inband; 1228 1229 /* Set the platform/firmware specified interface mode. Note, phylink 1230 * deals with the PHY interface mode, not the MAC interface mode. 1231 */ 1232 __set_bit(mode, priv->phylink_config.supported_interfaces); 1233 1234 /* If we have an xpcs, it defines which PHY interfaces are supported. */ 1235 if (priv->hw->xpcs) 1236 xpcs_get_interfaces(priv->hw->xpcs, 1237 priv->phylink_config.supported_interfaces); 1238 1239 priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1240 MAC_10FD | MAC_100FD | 1241 MAC_1000FD; 1242 1243 stmmac_set_half_duplex(priv); 1244 1245 /* Get the MAC specific capabilities */ 1246 stmmac_mac_phylink_get_caps(priv); 1247 1248 max_speed = priv->plat->max_speed; 1249 if (max_speed) 1250 phylink_limit_mac_speed(&priv->phylink_config, max_speed); 1251 1252 fwnode = priv->plat->port_node; 1253 if (!fwnode) 1254 fwnode = dev_fwnode(priv->device); 1255 1256 phylink = phylink_create(&priv->phylink_config, fwnode, 1257 mode, &stmmac_phylink_mac_ops); 1258 if (IS_ERR(phylink)) 1259 return PTR_ERR(phylink); 1260 1261 priv->phylink = phylink; 1262 return 0; 1263 } 1264 1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv, 1266 struct stmmac_dma_conf *dma_conf) 1267 { 1268 u32 rx_cnt = priv->plat->rx_queues_to_use; 1269 unsigned int desc_size; 1270 void *head_rx; 1271 u32 queue; 1272 1273 /* Display RX rings */ 1274 for (queue = 0; queue < rx_cnt; queue++) { 1275 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1276 1277 pr_info("\tRX Queue %u rings\n", queue); 1278 1279 if (priv->extend_desc) { 1280 head_rx = (void *)rx_q->dma_erx; 1281 desc_size = sizeof(struct dma_extended_desc); 1282 } else { 1283 head_rx = (void *)rx_q->dma_rx; 1284 desc_size = sizeof(struct dma_desc); 1285 } 1286 1287 /* Display RX ring */ 1288 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, 1289 rx_q->dma_rx_phy, desc_size); 1290 } 1291 } 1292 1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv, 1294 struct stmmac_dma_conf *dma_conf) 1295 { 1296 u32 tx_cnt = priv->plat->tx_queues_to_use; 1297 unsigned int desc_size; 1298 void *head_tx; 1299 u32 queue; 1300 1301 /* Display TX rings */ 1302 for (queue = 0; queue < tx_cnt; queue++) { 1303 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1304 1305 pr_info("\tTX Queue %d rings\n", queue); 1306 1307 if (priv->extend_desc) { 1308 head_tx = (void *)tx_q->dma_etx; 1309 desc_size = sizeof(struct dma_extended_desc); 1310 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1311 head_tx = (void *)tx_q->dma_entx; 1312 desc_size = sizeof(struct dma_edesc); 1313 } else { 1314 head_tx = (void *)tx_q->dma_tx; 1315 desc_size = sizeof(struct dma_desc); 1316 } 1317 1318 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, 1319 tx_q->dma_tx_phy, desc_size); 1320 } 1321 } 1322 1323 static void stmmac_display_rings(struct stmmac_priv *priv, 1324 struct stmmac_dma_conf *dma_conf) 1325 { 1326 /* Display RX ring */ 1327 stmmac_display_rx_rings(priv, dma_conf); 1328 1329 /* Display TX ring */ 1330 stmmac_display_tx_rings(priv, dma_conf); 1331 } 1332 1333 static int stmmac_set_bfsize(int mtu, int bufsize) 1334 { 1335 int ret = bufsize; 1336 1337 if (mtu >= BUF_SIZE_8KiB) 1338 ret = BUF_SIZE_16KiB; 1339 else if (mtu >= BUF_SIZE_4KiB) 1340 ret = BUF_SIZE_8KiB; 1341 else if (mtu >= BUF_SIZE_2KiB) 1342 ret = BUF_SIZE_4KiB; 1343 else if (mtu > DEFAULT_BUFSIZE) 1344 ret = BUF_SIZE_2KiB; 1345 else 1346 ret = DEFAULT_BUFSIZE; 1347 1348 return ret; 1349 } 1350 1351 /** 1352 * stmmac_clear_rx_descriptors - clear RX descriptors 1353 * @priv: driver private structure 1354 * @dma_conf: structure to take the dma data 1355 * @queue: RX queue index 1356 * Description: this function is called to clear the RX descriptors 1357 * in case of both basic and extended descriptors are used. 1358 */ 1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, 1360 struct stmmac_dma_conf *dma_conf, 1361 u32 queue) 1362 { 1363 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1364 int i; 1365 1366 /* Clear the RX descriptors */ 1367 for (i = 0; i < dma_conf->dma_rx_size; i++) 1368 if (priv->extend_desc) 1369 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1370 priv->use_riwt, priv->mode, 1371 (i == dma_conf->dma_rx_size - 1), 1372 dma_conf->dma_buf_sz); 1373 else 1374 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1375 priv->use_riwt, priv->mode, 1376 (i == dma_conf->dma_rx_size - 1), 1377 dma_conf->dma_buf_sz); 1378 } 1379 1380 /** 1381 * stmmac_clear_tx_descriptors - clear tx descriptors 1382 * @priv: driver private structure 1383 * @dma_conf: structure to take the dma data 1384 * @queue: TX queue index. 1385 * Description: this function is called to clear the TX descriptors 1386 * in case of both basic and extended descriptors are used. 1387 */ 1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, 1389 struct stmmac_dma_conf *dma_conf, 1390 u32 queue) 1391 { 1392 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1393 int i; 1394 1395 /* Clear the TX descriptors */ 1396 for (i = 0; i < dma_conf->dma_tx_size; i++) { 1397 int last = (i == (dma_conf->dma_tx_size - 1)); 1398 struct dma_desc *p; 1399 1400 if (priv->extend_desc) 1401 p = &tx_q->dma_etx[i].basic; 1402 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1403 p = &tx_q->dma_entx[i].basic; 1404 else 1405 p = &tx_q->dma_tx[i]; 1406 1407 stmmac_init_tx_desc(priv, p, priv->mode, last); 1408 } 1409 } 1410 1411 /** 1412 * stmmac_clear_descriptors - clear descriptors 1413 * @priv: driver private structure 1414 * @dma_conf: structure to take the dma data 1415 * Description: this function is called to clear the TX and RX descriptors 1416 * in case of both basic and extended descriptors are used. 1417 */ 1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv, 1419 struct stmmac_dma_conf *dma_conf) 1420 { 1421 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1422 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1423 u32 queue; 1424 1425 /* Clear the RX descriptors */ 1426 for (queue = 0; queue < rx_queue_cnt; queue++) 1427 stmmac_clear_rx_descriptors(priv, dma_conf, queue); 1428 1429 /* Clear the TX descriptors */ 1430 for (queue = 0; queue < tx_queue_cnt; queue++) 1431 stmmac_clear_tx_descriptors(priv, dma_conf, queue); 1432 } 1433 1434 /** 1435 * stmmac_init_rx_buffers - init the RX descriptor buffer. 1436 * @priv: driver private structure 1437 * @dma_conf: structure to take the dma data 1438 * @p: descriptor pointer 1439 * @i: descriptor index 1440 * @flags: gfp flag 1441 * @queue: RX queue index 1442 * Description: this function is called to allocate a receive buffer, perform 1443 * the DMA mapping and init the descriptor. 1444 */ 1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, 1446 struct stmmac_dma_conf *dma_conf, 1447 struct dma_desc *p, 1448 int i, gfp_t flags, u32 queue) 1449 { 1450 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1451 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1452 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 1453 1454 if (priv->dma_cap.host_dma_width <= 32) 1455 gfp |= GFP_DMA32; 1456 1457 if (!buf->page) { 1458 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); 1459 if (!buf->page) 1460 return -ENOMEM; 1461 buf->page_offset = stmmac_rx_offset(priv); 1462 } 1463 1464 if (priv->sph && !buf->sec_page) { 1465 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); 1466 if (!buf->sec_page) 1467 return -ENOMEM; 1468 1469 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1470 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 1471 } else { 1472 buf->sec_page = NULL; 1473 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 1474 } 1475 1476 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 1477 1478 stmmac_set_desc_addr(priv, p, buf->addr); 1479 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) 1480 stmmac_init_desc3(priv, p); 1481 1482 return 0; 1483 } 1484 1485 /** 1486 * stmmac_free_rx_buffer - free RX dma buffers 1487 * @priv: private structure 1488 * @rx_q: RX queue 1489 * @i: buffer index. 1490 */ 1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, 1492 struct stmmac_rx_queue *rx_q, 1493 int i) 1494 { 1495 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1496 1497 if (buf->page) 1498 page_pool_put_full_page(rx_q->page_pool, buf->page, false); 1499 buf->page = NULL; 1500 1501 if (buf->sec_page) 1502 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); 1503 buf->sec_page = NULL; 1504 } 1505 1506 /** 1507 * stmmac_free_tx_buffer - free RX dma buffers 1508 * @priv: private structure 1509 * @dma_conf: structure to take the dma data 1510 * @queue: RX queue index 1511 * @i: buffer index. 1512 */ 1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, 1514 struct stmmac_dma_conf *dma_conf, 1515 u32 queue, int i) 1516 { 1517 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1518 1519 if (tx_q->tx_skbuff_dma[i].buf && 1520 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { 1521 if (tx_q->tx_skbuff_dma[i].map_as_page) 1522 dma_unmap_page(priv->device, 1523 tx_q->tx_skbuff_dma[i].buf, 1524 tx_q->tx_skbuff_dma[i].len, 1525 DMA_TO_DEVICE); 1526 else 1527 dma_unmap_single(priv->device, 1528 tx_q->tx_skbuff_dma[i].buf, 1529 tx_q->tx_skbuff_dma[i].len, 1530 DMA_TO_DEVICE); 1531 } 1532 1533 if (tx_q->xdpf[i] && 1534 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || 1535 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { 1536 xdp_return_frame(tx_q->xdpf[i]); 1537 tx_q->xdpf[i] = NULL; 1538 } 1539 1540 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) 1541 tx_q->xsk_frames_done++; 1542 1543 if (tx_q->tx_skbuff[i] && 1544 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { 1545 dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1546 tx_q->tx_skbuff[i] = NULL; 1547 } 1548 1549 tx_q->tx_skbuff_dma[i].buf = 0; 1550 tx_q->tx_skbuff_dma[i].map_as_page = false; 1551 } 1552 1553 /** 1554 * dma_free_rx_skbufs - free RX dma buffers 1555 * @priv: private structure 1556 * @dma_conf: structure to take the dma data 1557 * @queue: RX queue index 1558 */ 1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv, 1560 struct stmmac_dma_conf *dma_conf, 1561 u32 queue) 1562 { 1563 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1564 int i; 1565 1566 for (i = 0; i < dma_conf->dma_rx_size; i++) 1567 stmmac_free_rx_buffer(priv, rx_q, i); 1568 } 1569 1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, 1571 struct stmmac_dma_conf *dma_conf, 1572 u32 queue, gfp_t flags) 1573 { 1574 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1575 int i; 1576 1577 for (i = 0; i < dma_conf->dma_rx_size; i++) { 1578 struct dma_desc *p; 1579 int ret; 1580 1581 if (priv->extend_desc) 1582 p = &((rx_q->dma_erx + i)->basic); 1583 else 1584 p = rx_q->dma_rx + i; 1585 1586 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags, 1587 queue); 1588 if (ret) 1589 return ret; 1590 1591 rx_q->buf_alloc_num++; 1592 } 1593 1594 return 0; 1595 } 1596 1597 /** 1598 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool 1599 * @priv: private structure 1600 * @dma_conf: structure to take the dma data 1601 * @queue: RX queue index 1602 */ 1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv, 1604 struct stmmac_dma_conf *dma_conf, 1605 u32 queue) 1606 { 1607 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1608 int i; 1609 1610 for (i = 0; i < dma_conf->dma_rx_size; i++) { 1611 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1612 1613 if (!buf->xdp) 1614 continue; 1615 1616 xsk_buff_free(buf->xdp); 1617 buf->xdp = NULL; 1618 } 1619 } 1620 1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, 1622 struct stmmac_dma_conf *dma_conf, 1623 u32 queue) 1624 { 1625 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1626 int i; 1627 1628 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes) 1629 * in struct xdp_buff_xsk to stash driver specific information. Thus, 1630 * use this macro to make sure no size violations. 1631 */ 1632 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff); 1633 1634 for (i = 0; i < dma_conf->dma_rx_size; i++) { 1635 struct stmmac_rx_buffer *buf; 1636 dma_addr_t dma_addr; 1637 struct dma_desc *p; 1638 1639 if (priv->extend_desc) 1640 p = (struct dma_desc *)(rx_q->dma_erx + i); 1641 else 1642 p = rx_q->dma_rx + i; 1643 1644 buf = &rx_q->buf_pool[i]; 1645 1646 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 1647 if (!buf->xdp) 1648 return -ENOMEM; 1649 1650 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 1651 stmmac_set_desc_addr(priv, p, dma_addr); 1652 rx_q->buf_alloc_num++; 1653 } 1654 1655 return 0; 1656 } 1657 1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) 1659 { 1660 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) 1661 return NULL; 1662 1663 return xsk_get_pool_from_qid(priv->dev, queue); 1664 } 1665 1666 /** 1667 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) 1668 * @priv: driver private structure 1669 * @dma_conf: structure to take the dma data 1670 * @queue: RX queue index 1671 * @flags: gfp flag. 1672 * Description: this function initializes the DMA RX descriptors 1673 * and allocates the socket buffers. It supports the chained and ring 1674 * modes. 1675 */ 1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, 1677 struct stmmac_dma_conf *dma_conf, 1678 u32 queue, gfp_t flags) 1679 { 1680 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1681 int ret; 1682 1683 netif_dbg(priv, probe, priv->dev, 1684 "(%s) dma_rx_phy=0x%08x\n", __func__, 1685 (u32)rx_q->dma_rx_phy); 1686 1687 stmmac_clear_rx_descriptors(priv, dma_conf, queue); 1688 1689 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); 1690 1691 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1692 1693 if (rx_q->xsk_pool) { 1694 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1695 MEM_TYPE_XSK_BUFF_POOL, 1696 NULL)); 1697 netdev_info(priv->dev, 1698 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", 1699 rx_q->queue_index); 1700 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); 1701 } else { 1702 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1703 MEM_TYPE_PAGE_POOL, 1704 rx_q->page_pool)); 1705 netdev_info(priv->dev, 1706 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", 1707 rx_q->queue_index); 1708 } 1709 1710 if (rx_q->xsk_pool) { 1711 /* RX XDP ZC buffer pool may not be populated, e.g. 1712 * xdpsock TX-only. 1713 */ 1714 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); 1715 } else { 1716 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); 1717 if (ret < 0) 1718 return -ENOMEM; 1719 } 1720 1721 /* Setup the chained descriptor addresses */ 1722 if (priv->mode == STMMAC_CHAIN_MODE) { 1723 if (priv->extend_desc) 1724 stmmac_mode_init(priv, rx_q->dma_erx, 1725 rx_q->dma_rx_phy, 1726 dma_conf->dma_rx_size, 1); 1727 else 1728 stmmac_mode_init(priv, rx_q->dma_rx, 1729 rx_q->dma_rx_phy, 1730 dma_conf->dma_rx_size, 0); 1731 } 1732 1733 return 0; 1734 } 1735 1736 static int init_dma_rx_desc_rings(struct net_device *dev, 1737 struct stmmac_dma_conf *dma_conf, 1738 gfp_t flags) 1739 { 1740 struct stmmac_priv *priv = netdev_priv(dev); 1741 u32 rx_count = priv->plat->rx_queues_to_use; 1742 int queue; 1743 int ret; 1744 1745 /* RX INITIALIZATION */ 1746 netif_dbg(priv, probe, priv->dev, 1747 "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1748 1749 for (queue = 0; queue < rx_count; queue++) { 1750 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); 1751 if (ret) 1752 goto err_init_rx_buffers; 1753 } 1754 1755 return 0; 1756 1757 err_init_rx_buffers: 1758 while (queue >= 0) { 1759 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1760 1761 if (rx_q->xsk_pool) 1762 dma_free_rx_xskbufs(priv, dma_conf, queue); 1763 else 1764 dma_free_rx_skbufs(priv, dma_conf, queue); 1765 1766 rx_q->buf_alloc_num = 0; 1767 rx_q->xsk_pool = NULL; 1768 1769 queue--; 1770 } 1771 1772 return ret; 1773 } 1774 1775 /** 1776 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) 1777 * @priv: driver private structure 1778 * @dma_conf: structure to take the dma data 1779 * @queue: TX queue index 1780 * Description: this function initializes the DMA TX descriptors 1781 * and allocates the socket buffers. It supports the chained and ring 1782 * modes. 1783 */ 1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, 1785 struct stmmac_dma_conf *dma_conf, 1786 u32 queue) 1787 { 1788 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1789 int i; 1790 1791 netif_dbg(priv, probe, priv->dev, 1792 "(%s) dma_tx_phy=0x%08x\n", __func__, 1793 (u32)tx_q->dma_tx_phy); 1794 1795 /* Setup the chained descriptor addresses */ 1796 if (priv->mode == STMMAC_CHAIN_MODE) { 1797 if (priv->extend_desc) 1798 stmmac_mode_init(priv, tx_q->dma_etx, 1799 tx_q->dma_tx_phy, 1800 dma_conf->dma_tx_size, 1); 1801 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) 1802 stmmac_mode_init(priv, tx_q->dma_tx, 1803 tx_q->dma_tx_phy, 1804 dma_conf->dma_tx_size, 0); 1805 } 1806 1807 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1808 1809 for (i = 0; i < dma_conf->dma_tx_size; i++) { 1810 struct dma_desc *p; 1811 1812 if (priv->extend_desc) 1813 p = &((tx_q->dma_etx + i)->basic); 1814 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1815 p = &((tx_q->dma_entx + i)->basic); 1816 else 1817 p = tx_q->dma_tx + i; 1818 1819 stmmac_clear_desc(priv, p); 1820 1821 tx_q->tx_skbuff_dma[i].buf = 0; 1822 tx_q->tx_skbuff_dma[i].map_as_page = false; 1823 tx_q->tx_skbuff_dma[i].len = 0; 1824 tx_q->tx_skbuff_dma[i].last_segment = false; 1825 tx_q->tx_skbuff[i] = NULL; 1826 } 1827 1828 return 0; 1829 } 1830 1831 static int init_dma_tx_desc_rings(struct net_device *dev, 1832 struct stmmac_dma_conf *dma_conf) 1833 { 1834 struct stmmac_priv *priv = netdev_priv(dev); 1835 u32 tx_queue_cnt; 1836 u32 queue; 1837 1838 tx_queue_cnt = priv->plat->tx_queues_to_use; 1839 1840 for (queue = 0; queue < tx_queue_cnt; queue++) 1841 __init_dma_tx_desc_rings(priv, dma_conf, queue); 1842 1843 return 0; 1844 } 1845 1846 /** 1847 * init_dma_desc_rings - init the RX/TX descriptor rings 1848 * @dev: net device structure 1849 * @dma_conf: structure to take the dma data 1850 * @flags: gfp flag. 1851 * Description: this function initializes the DMA RX/TX descriptors 1852 * and allocates the socket buffers. It supports the chained and ring 1853 * modes. 1854 */ 1855 static int init_dma_desc_rings(struct net_device *dev, 1856 struct stmmac_dma_conf *dma_conf, 1857 gfp_t flags) 1858 { 1859 struct stmmac_priv *priv = netdev_priv(dev); 1860 int ret; 1861 1862 ret = init_dma_rx_desc_rings(dev, dma_conf, flags); 1863 if (ret) 1864 return ret; 1865 1866 ret = init_dma_tx_desc_rings(dev, dma_conf); 1867 1868 stmmac_clear_descriptors(priv, dma_conf); 1869 1870 if (netif_msg_hw(priv)) 1871 stmmac_display_rings(priv, dma_conf); 1872 1873 return ret; 1874 } 1875 1876 /** 1877 * dma_free_tx_skbufs - free TX dma buffers 1878 * @priv: private structure 1879 * @dma_conf: structure to take the dma data 1880 * @queue: TX queue index 1881 */ 1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv, 1883 struct stmmac_dma_conf *dma_conf, 1884 u32 queue) 1885 { 1886 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1887 int i; 1888 1889 tx_q->xsk_frames_done = 0; 1890 1891 for (i = 0; i < dma_conf->dma_tx_size; i++) 1892 stmmac_free_tx_buffer(priv, dma_conf, queue, i); 1893 1894 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { 1895 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 1896 tx_q->xsk_frames_done = 0; 1897 tx_q->xsk_pool = NULL; 1898 } 1899 } 1900 1901 /** 1902 * stmmac_free_tx_skbufs - free TX skb buffers 1903 * @priv: private structure 1904 */ 1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) 1906 { 1907 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1908 u32 queue; 1909 1910 for (queue = 0; queue < tx_queue_cnt; queue++) 1911 dma_free_tx_skbufs(priv, &priv->dma_conf, queue); 1912 } 1913 1914 /** 1915 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) 1916 * @priv: private structure 1917 * @dma_conf: structure to take the dma data 1918 * @queue: RX queue index 1919 */ 1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, 1921 struct stmmac_dma_conf *dma_conf, 1922 u32 queue) 1923 { 1924 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1925 1926 /* Release the DMA RX socket buffers */ 1927 if (rx_q->xsk_pool) 1928 dma_free_rx_xskbufs(priv, dma_conf, queue); 1929 else 1930 dma_free_rx_skbufs(priv, dma_conf, queue); 1931 1932 rx_q->buf_alloc_num = 0; 1933 rx_q->xsk_pool = NULL; 1934 1935 /* Free DMA regions of consistent memory previously allocated */ 1936 if (!priv->extend_desc) 1937 dma_free_coherent(priv->device, dma_conf->dma_rx_size * 1938 sizeof(struct dma_desc), 1939 rx_q->dma_rx, rx_q->dma_rx_phy); 1940 else 1941 dma_free_coherent(priv->device, dma_conf->dma_rx_size * 1942 sizeof(struct dma_extended_desc), 1943 rx_q->dma_erx, rx_q->dma_rx_phy); 1944 1945 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) 1946 xdp_rxq_info_unreg(&rx_q->xdp_rxq); 1947 1948 kfree(rx_q->buf_pool); 1949 if (rx_q->page_pool) 1950 page_pool_destroy(rx_q->page_pool); 1951 } 1952 1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv, 1954 struct stmmac_dma_conf *dma_conf) 1955 { 1956 u32 rx_count = priv->plat->rx_queues_to_use; 1957 u32 queue; 1958 1959 /* Free RX queue resources */ 1960 for (queue = 0; queue < rx_count; queue++) 1961 __free_dma_rx_desc_resources(priv, dma_conf, queue); 1962 } 1963 1964 /** 1965 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) 1966 * @priv: private structure 1967 * @dma_conf: structure to take the dma data 1968 * @queue: TX queue index 1969 */ 1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, 1971 struct stmmac_dma_conf *dma_conf, 1972 u32 queue) 1973 { 1974 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1975 size_t size; 1976 void *addr; 1977 1978 /* Release the DMA TX socket buffers */ 1979 dma_free_tx_skbufs(priv, dma_conf, queue); 1980 1981 if (priv->extend_desc) { 1982 size = sizeof(struct dma_extended_desc); 1983 addr = tx_q->dma_etx; 1984 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1985 size = sizeof(struct dma_edesc); 1986 addr = tx_q->dma_entx; 1987 } else { 1988 size = sizeof(struct dma_desc); 1989 addr = tx_q->dma_tx; 1990 } 1991 1992 size *= dma_conf->dma_tx_size; 1993 1994 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); 1995 1996 kfree(tx_q->tx_skbuff_dma); 1997 kfree(tx_q->tx_skbuff); 1998 } 1999 2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv, 2001 struct stmmac_dma_conf *dma_conf) 2002 { 2003 u32 tx_count = priv->plat->tx_queues_to_use; 2004 u32 queue; 2005 2006 /* Free TX queue resources */ 2007 for (queue = 0; queue < tx_count; queue++) 2008 __free_dma_tx_desc_resources(priv, dma_conf, queue); 2009 } 2010 2011 /** 2012 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). 2013 * @priv: private structure 2014 * @dma_conf: structure to take the dma data 2015 * @queue: RX queue index 2016 * Description: according to which descriptor can be used (extend or basic) 2017 * this function allocates the resources for TX and RX paths. In case of 2018 * reception, for example, it pre-allocated the RX socket buffer in order to 2019 * allow zero-copy mechanism. 2020 */ 2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, 2022 struct stmmac_dma_conf *dma_conf, 2023 u32 queue) 2024 { 2025 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 2026 struct stmmac_channel *ch = &priv->channel[queue]; 2027 bool xdp_prog = stmmac_xdp_is_enabled(priv); 2028 struct page_pool_params pp_params = { 0 }; 2029 unsigned int num_pages; 2030 unsigned int napi_id; 2031 int ret; 2032 2033 rx_q->queue_index = queue; 2034 rx_q->priv_data = priv; 2035 2036 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 2037 pp_params.pool_size = dma_conf->dma_rx_size; 2038 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); 2039 pp_params.order = ilog2(num_pages); 2040 pp_params.nid = dev_to_node(priv->device); 2041 pp_params.dev = priv->device; 2042 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 2043 pp_params.offset = stmmac_rx_offset(priv); 2044 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); 2045 2046 rx_q->page_pool = page_pool_create(&pp_params); 2047 if (IS_ERR(rx_q->page_pool)) { 2048 ret = PTR_ERR(rx_q->page_pool); 2049 rx_q->page_pool = NULL; 2050 return ret; 2051 } 2052 2053 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, 2054 sizeof(*rx_q->buf_pool), 2055 GFP_KERNEL); 2056 if (!rx_q->buf_pool) 2057 return -ENOMEM; 2058 2059 if (priv->extend_desc) { 2060 rx_q->dma_erx = dma_alloc_coherent(priv->device, 2061 dma_conf->dma_rx_size * 2062 sizeof(struct dma_extended_desc), 2063 &rx_q->dma_rx_phy, 2064 GFP_KERNEL); 2065 if (!rx_q->dma_erx) 2066 return -ENOMEM; 2067 2068 } else { 2069 rx_q->dma_rx = dma_alloc_coherent(priv->device, 2070 dma_conf->dma_rx_size * 2071 sizeof(struct dma_desc), 2072 &rx_q->dma_rx_phy, 2073 GFP_KERNEL); 2074 if (!rx_q->dma_rx) 2075 return -ENOMEM; 2076 } 2077 2078 if (stmmac_xdp_is_enabled(priv) && 2079 test_bit(queue, priv->af_xdp_zc_qps)) 2080 napi_id = ch->rxtx_napi.napi_id; 2081 else 2082 napi_id = ch->rx_napi.napi_id; 2083 2084 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, 2085 rx_q->queue_index, 2086 napi_id); 2087 if (ret) { 2088 netdev_err(priv->dev, "Failed to register xdp rxq info\n"); 2089 return -EINVAL; 2090 } 2091 2092 return 0; 2093 } 2094 2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv, 2096 struct stmmac_dma_conf *dma_conf) 2097 { 2098 u32 rx_count = priv->plat->rx_queues_to_use; 2099 u32 queue; 2100 int ret; 2101 2102 /* RX queues buffers and DMA */ 2103 for (queue = 0; queue < rx_count; queue++) { 2104 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); 2105 if (ret) 2106 goto err_dma; 2107 } 2108 2109 return 0; 2110 2111 err_dma: 2112 free_dma_rx_desc_resources(priv, dma_conf); 2113 2114 return ret; 2115 } 2116 2117 /** 2118 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). 2119 * @priv: private structure 2120 * @dma_conf: structure to take the dma data 2121 * @queue: TX queue index 2122 * Description: according to which descriptor can be used (extend or basic) 2123 * this function allocates the resources for TX and RX paths. In case of 2124 * reception, for example, it pre-allocated the RX socket buffer in order to 2125 * allow zero-copy mechanism. 2126 */ 2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, 2128 struct stmmac_dma_conf *dma_conf, 2129 u32 queue) 2130 { 2131 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 2132 size_t size; 2133 void *addr; 2134 2135 tx_q->queue_index = queue; 2136 tx_q->priv_data = priv; 2137 2138 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, 2139 sizeof(*tx_q->tx_skbuff_dma), 2140 GFP_KERNEL); 2141 if (!tx_q->tx_skbuff_dma) 2142 return -ENOMEM; 2143 2144 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, 2145 sizeof(struct sk_buff *), 2146 GFP_KERNEL); 2147 if (!tx_q->tx_skbuff) 2148 return -ENOMEM; 2149 2150 if (priv->extend_desc) 2151 size = sizeof(struct dma_extended_desc); 2152 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2153 size = sizeof(struct dma_edesc); 2154 else 2155 size = sizeof(struct dma_desc); 2156 2157 size *= dma_conf->dma_tx_size; 2158 2159 addr = dma_alloc_coherent(priv->device, size, 2160 &tx_q->dma_tx_phy, GFP_KERNEL); 2161 if (!addr) 2162 return -ENOMEM; 2163 2164 if (priv->extend_desc) 2165 tx_q->dma_etx = addr; 2166 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2167 tx_q->dma_entx = addr; 2168 else 2169 tx_q->dma_tx = addr; 2170 2171 return 0; 2172 } 2173 2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv, 2175 struct stmmac_dma_conf *dma_conf) 2176 { 2177 u32 tx_count = priv->plat->tx_queues_to_use; 2178 u32 queue; 2179 int ret; 2180 2181 /* TX queues buffers and DMA */ 2182 for (queue = 0; queue < tx_count; queue++) { 2183 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); 2184 if (ret) 2185 goto err_dma; 2186 } 2187 2188 return 0; 2189 2190 err_dma: 2191 free_dma_tx_desc_resources(priv, dma_conf); 2192 return ret; 2193 } 2194 2195 /** 2196 * alloc_dma_desc_resources - alloc TX/RX resources. 2197 * @priv: private structure 2198 * @dma_conf: structure to take the dma data 2199 * Description: according to which descriptor can be used (extend or basic) 2200 * this function allocates the resources for TX and RX paths. In case of 2201 * reception, for example, it pre-allocated the RX socket buffer in order to 2202 * allow zero-copy mechanism. 2203 */ 2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv, 2205 struct stmmac_dma_conf *dma_conf) 2206 { 2207 /* RX Allocation */ 2208 int ret = alloc_dma_rx_desc_resources(priv, dma_conf); 2209 2210 if (ret) 2211 return ret; 2212 2213 ret = alloc_dma_tx_desc_resources(priv, dma_conf); 2214 2215 return ret; 2216 } 2217 2218 /** 2219 * free_dma_desc_resources - free dma desc resources 2220 * @priv: private structure 2221 * @dma_conf: structure to take the dma data 2222 */ 2223 static void free_dma_desc_resources(struct stmmac_priv *priv, 2224 struct stmmac_dma_conf *dma_conf) 2225 { 2226 /* Release the DMA TX socket buffers */ 2227 free_dma_tx_desc_resources(priv, dma_conf); 2228 2229 /* Release the DMA RX socket buffers later 2230 * to ensure all pending XDP_TX buffers are returned. 2231 */ 2232 free_dma_rx_desc_resources(priv, dma_conf); 2233 } 2234 2235 /** 2236 * stmmac_mac_enable_rx_queues - Enable MAC rx queues 2237 * @priv: driver private structure 2238 * Description: It is used for enabling the rx queues in the MAC 2239 */ 2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 2241 { 2242 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2243 int queue; 2244 u8 mode; 2245 2246 for (queue = 0; queue < rx_queues_count; queue++) { 2247 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 2248 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 2249 } 2250 } 2251 2252 /** 2253 * stmmac_start_rx_dma - start RX DMA channel 2254 * @priv: driver private structure 2255 * @chan: RX channel index 2256 * Description: 2257 * This starts a RX DMA channel 2258 */ 2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 2260 { 2261 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 2262 stmmac_start_rx(priv, priv->ioaddr, chan); 2263 } 2264 2265 /** 2266 * stmmac_start_tx_dma - start TX DMA channel 2267 * @priv: driver private structure 2268 * @chan: TX channel index 2269 * Description: 2270 * This starts a TX DMA channel 2271 */ 2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 2273 { 2274 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 2275 stmmac_start_tx(priv, priv->ioaddr, chan); 2276 } 2277 2278 /** 2279 * stmmac_stop_rx_dma - stop RX DMA channel 2280 * @priv: driver private structure 2281 * @chan: RX channel index 2282 * Description: 2283 * This stops a RX DMA channel 2284 */ 2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 2286 { 2287 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 2288 stmmac_stop_rx(priv, priv->ioaddr, chan); 2289 } 2290 2291 /** 2292 * stmmac_stop_tx_dma - stop TX DMA channel 2293 * @priv: driver private structure 2294 * @chan: TX channel index 2295 * Description: 2296 * This stops a TX DMA channel 2297 */ 2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 2299 { 2300 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 2301 stmmac_stop_tx(priv, priv->ioaddr, chan); 2302 } 2303 2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) 2305 { 2306 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2307 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2308 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2309 u32 chan; 2310 2311 for (chan = 0; chan < dma_csr_ch; chan++) { 2312 struct stmmac_channel *ch = &priv->channel[chan]; 2313 unsigned long flags; 2314 2315 spin_lock_irqsave(&ch->lock, flags); 2316 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 2317 spin_unlock_irqrestore(&ch->lock, flags); 2318 } 2319 } 2320 2321 /** 2322 * stmmac_start_all_dma - start all RX and TX DMA channels 2323 * @priv: driver private structure 2324 * Description: 2325 * This starts all the RX and TX DMA channels 2326 */ 2327 static void stmmac_start_all_dma(struct stmmac_priv *priv) 2328 { 2329 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2330 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2331 u32 chan = 0; 2332 2333 for (chan = 0; chan < rx_channels_count; chan++) 2334 stmmac_start_rx_dma(priv, chan); 2335 2336 for (chan = 0; chan < tx_channels_count; chan++) 2337 stmmac_start_tx_dma(priv, chan); 2338 } 2339 2340 /** 2341 * stmmac_stop_all_dma - stop all RX and TX DMA channels 2342 * @priv: driver private structure 2343 * Description: 2344 * This stops the RX and TX DMA channels 2345 */ 2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv) 2347 { 2348 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2349 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2350 u32 chan = 0; 2351 2352 for (chan = 0; chan < rx_channels_count; chan++) 2353 stmmac_stop_rx_dma(priv, chan); 2354 2355 for (chan = 0; chan < tx_channels_count; chan++) 2356 stmmac_stop_tx_dma(priv, chan); 2357 } 2358 2359 /** 2360 * stmmac_dma_operation_mode - HW DMA operation mode 2361 * @priv: driver private structure 2362 * Description: it is used for configuring the DMA operation mode register in 2363 * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 2364 */ 2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 2366 { 2367 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2368 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2369 int rxfifosz = priv->plat->rx_fifo_size; 2370 int txfifosz = priv->plat->tx_fifo_size; 2371 u32 txmode = 0; 2372 u32 rxmode = 0; 2373 u32 chan = 0; 2374 u8 qmode = 0; 2375 2376 if (rxfifosz == 0) 2377 rxfifosz = priv->dma_cap.rx_fifo_size; 2378 if (txfifosz == 0) 2379 txfifosz = priv->dma_cap.tx_fifo_size; 2380 2381 /* Adjust for real per queue fifo size */ 2382 rxfifosz /= rx_channels_count; 2383 txfifosz /= tx_channels_count; 2384 2385 if (priv->plat->force_thresh_dma_mode) { 2386 txmode = tc; 2387 rxmode = tc; 2388 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 2389 /* 2390 * In case of GMAC, SF mode can be enabled 2391 * to perform the TX COE in HW. This depends on: 2392 * 1) TX COE if actually supported 2393 * 2) There is no bugged Jumbo frame support 2394 * that needs to not insert csum in the TDES. 2395 */ 2396 txmode = SF_DMA_MODE; 2397 rxmode = SF_DMA_MODE; 2398 priv->xstats.threshold = SF_DMA_MODE; 2399 } else { 2400 txmode = tc; 2401 rxmode = SF_DMA_MODE; 2402 } 2403 2404 /* configure all channels */ 2405 for (chan = 0; chan < rx_channels_count; chan++) { 2406 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; 2407 u32 buf_size; 2408 2409 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2410 2411 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 2412 rxfifosz, qmode); 2413 2414 if (rx_q->xsk_pool) { 2415 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 2416 stmmac_set_dma_bfsize(priv, priv->ioaddr, 2417 buf_size, 2418 chan); 2419 } else { 2420 stmmac_set_dma_bfsize(priv, priv->ioaddr, 2421 priv->dma_conf.dma_buf_sz, 2422 chan); 2423 } 2424 } 2425 2426 for (chan = 0; chan < tx_channels_count; chan++) { 2427 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2428 2429 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 2430 txfifosz, qmode); 2431 } 2432 } 2433 2434 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 2435 { 2436 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); 2437 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 2438 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; 2439 struct xsk_buff_pool *pool = tx_q->xsk_pool; 2440 unsigned int entry = tx_q->cur_tx; 2441 struct dma_desc *tx_desc = NULL; 2442 struct xdp_desc xdp_desc; 2443 bool work_done = true; 2444 u32 tx_set_ic_bit = 0; 2445 unsigned long flags; 2446 2447 /* Avoids TX time-out as we are sharing with slow path */ 2448 txq_trans_cond_update(nq); 2449 2450 budget = min(budget, stmmac_tx_avail(priv, queue)); 2451 2452 while (budget-- > 0) { 2453 dma_addr_t dma_addr; 2454 bool set_ic; 2455 2456 /* We are sharing with slow path and stop XSK TX desc submission when 2457 * available TX ring is less than threshold. 2458 */ 2459 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || 2460 !netif_carrier_ok(priv->dev)) { 2461 work_done = false; 2462 break; 2463 } 2464 2465 if (!xsk_tx_peek_desc(pool, &xdp_desc)) 2466 break; 2467 2468 if (likely(priv->extend_desc)) 2469 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 2470 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2471 tx_desc = &tx_q->dma_entx[entry].basic; 2472 else 2473 tx_desc = tx_q->dma_tx + entry; 2474 2475 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2476 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); 2477 2478 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; 2479 2480 /* To return XDP buffer to XSK pool, we simple call 2481 * xsk_tx_completed(), so we don't need to fill up 2482 * 'buf' and 'xdpf'. 2483 */ 2484 tx_q->tx_skbuff_dma[entry].buf = 0; 2485 tx_q->xdpf[entry] = NULL; 2486 2487 tx_q->tx_skbuff_dma[entry].map_as_page = false; 2488 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; 2489 tx_q->tx_skbuff_dma[entry].last_segment = true; 2490 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2491 2492 stmmac_set_desc_addr(priv, tx_desc, dma_addr); 2493 2494 tx_q->tx_count_frames++; 2495 2496 if (!priv->tx_coal_frames[queue]) 2497 set_ic = false; 2498 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 2499 set_ic = true; 2500 else 2501 set_ic = false; 2502 2503 if (set_ic) { 2504 tx_q->tx_count_frames = 0; 2505 stmmac_set_tx_ic(priv, tx_desc); 2506 tx_set_ic_bit++; 2507 } 2508 2509 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, 2510 true, priv->mode, true, true, 2511 xdp_desc.len); 2512 2513 stmmac_enable_dma_transmission(priv, priv->ioaddr); 2514 2515 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 2516 entry = tx_q->cur_tx; 2517 } 2518 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 2519 txq_stats->tx_set_ic_bit += tx_set_ic_bit; 2520 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 2521 2522 if (tx_desc) { 2523 stmmac_flush_tx_descriptors(priv, queue); 2524 xsk_tx_release(pool); 2525 } 2526 2527 /* Return true if all of the 3 conditions are met 2528 * a) TX Budget is still available 2529 * b) work_done = true when XSK TX desc peek is empty (no more 2530 * pending XSK TX for transmission) 2531 */ 2532 return !!budget && work_done; 2533 } 2534 2535 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) 2536 { 2537 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { 2538 tc += 64; 2539 2540 if (priv->plat->force_thresh_dma_mode) 2541 stmmac_set_dma_operation_mode(priv, tc, tc, chan); 2542 else 2543 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE, 2544 chan); 2545 2546 priv->xstats.threshold = tc; 2547 } 2548 } 2549 2550 /** 2551 * stmmac_tx_clean - to manage the transmission completion 2552 * @priv: driver private structure 2553 * @budget: napi budget limiting this functions packet handling 2554 * @queue: TX queue index 2555 * Description: it reclaims the transmit resources after transmission completes. 2556 */ 2557 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 2558 { 2559 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 2560 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; 2561 unsigned int bytes_compl = 0, pkts_compl = 0; 2562 unsigned int entry, xmits = 0, count = 0; 2563 u32 tx_packets = 0, tx_errors = 0; 2564 unsigned long flags; 2565 2566 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 2567 2568 tx_q->xsk_frames_done = 0; 2569 2570 entry = tx_q->dirty_tx; 2571 2572 /* Try to clean all TX complete frame in 1 shot */ 2573 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { 2574 struct xdp_frame *xdpf; 2575 struct sk_buff *skb; 2576 struct dma_desc *p; 2577 int status; 2578 2579 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || 2580 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2581 xdpf = tx_q->xdpf[entry]; 2582 skb = NULL; 2583 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2584 xdpf = NULL; 2585 skb = tx_q->tx_skbuff[entry]; 2586 } else { 2587 xdpf = NULL; 2588 skb = NULL; 2589 } 2590 2591 if (priv->extend_desc) 2592 p = (struct dma_desc *)(tx_q->dma_etx + entry); 2593 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2594 p = &tx_q->dma_entx[entry].basic; 2595 else 2596 p = tx_q->dma_tx + entry; 2597 2598 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr); 2599 /* Check if the descriptor is owned by the DMA */ 2600 if (unlikely(status & tx_dma_own)) 2601 break; 2602 2603 count++; 2604 2605 /* Make sure descriptor fields are read after reading 2606 * the own bit. 2607 */ 2608 dma_rmb(); 2609 2610 /* Just consider the last segment and ...*/ 2611 if (likely(!(status & tx_not_ls))) { 2612 /* ... verify the status error condition */ 2613 if (unlikely(status & tx_err)) { 2614 tx_errors++; 2615 if (unlikely(status & tx_err_bump_tc)) 2616 stmmac_bump_dma_threshold(priv, queue); 2617 } else { 2618 tx_packets++; 2619 } 2620 if (skb) 2621 stmmac_get_tx_hwtstamp(priv, p, skb); 2622 } 2623 2624 if (likely(tx_q->tx_skbuff_dma[entry].buf && 2625 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { 2626 if (tx_q->tx_skbuff_dma[entry].map_as_page) 2627 dma_unmap_page(priv->device, 2628 tx_q->tx_skbuff_dma[entry].buf, 2629 tx_q->tx_skbuff_dma[entry].len, 2630 DMA_TO_DEVICE); 2631 else 2632 dma_unmap_single(priv->device, 2633 tx_q->tx_skbuff_dma[entry].buf, 2634 tx_q->tx_skbuff_dma[entry].len, 2635 DMA_TO_DEVICE); 2636 tx_q->tx_skbuff_dma[entry].buf = 0; 2637 tx_q->tx_skbuff_dma[entry].len = 0; 2638 tx_q->tx_skbuff_dma[entry].map_as_page = false; 2639 } 2640 2641 stmmac_clean_desc3(priv, tx_q, p); 2642 2643 tx_q->tx_skbuff_dma[entry].last_segment = false; 2644 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2645 2646 if (xdpf && 2647 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { 2648 xdp_return_frame_rx_napi(xdpf); 2649 tx_q->xdpf[entry] = NULL; 2650 } 2651 2652 if (xdpf && 2653 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2654 xdp_return_frame(xdpf); 2655 tx_q->xdpf[entry] = NULL; 2656 } 2657 2658 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) 2659 tx_q->xsk_frames_done++; 2660 2661 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2662 if (likely(skb)) { 2663 pkts_compl++; 2664 bytes_compl += skb->len; 2665 dev_consume_skb_any(skb); 2666 tx_q->tx_skbuff[entry] = NULL; 2667 } 2668 } 2669 2670 stmmac_release_tx_desc(priv, p, priv->mode); 2671 2672 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 2673 } 2674 tx_q->dirty_tx = entry; 2675 2676 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 2677 pkts_compl, bytes_compl); 2678 2679 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 2680 queue))) && 2681 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { 2682 2683 netif_dbg(priv, tx_done, priv->dev, 2684 "%s: restart transmit\n", __func__); 2685 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 2686 } 2687 2688 if (tx_q->xsk_pool) { 2689 bool work_done; 2690 2691 if (tx_q->xsk_frames_done) 2692 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 2693 2694 if (xsk_uses_need_wakeup(tx_q->xsk_pool)) 2695 xsk_set_tx_need_wakeup(tx_q->xsk_pool); 2696 2697 /* For XSK TX, we try to send as many as possible. 2698 * If XSK work done (XSK TX desc empty and budget still 2699 * available), return "budget - 1" to reenable TX IRQ. 2700 * Else, return "budget" to make NAPI continue polling. 2701 */ 2702 work_done = stmmac_xdp_xmit_zc(priv, queue, 2703 STMMAC_XSK_TX_BUDGET_MAX); 2704 if (work_done) 2705 xmits = budget - 1; 2706 else 2707 xmits = budget; 2708 } 2709 2710 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && 2711 priv->eee_sw_timer_en) { 2712 if (stmmac_enable_eee_mode(priv)) 2713 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 2714 } 2715 2716 /* We still have pending packets, let's call for a new scheduling */ 2717 if (tx_q->dirty_tx != tx_q->cur_tx) 2718 stmmac_tx_timer_arm(priv, queue); 2719 2720 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 2721 txq_stats->tx_packets += tx_packets; 2722 txq_stats->tx_pkt_n += tx_packets; 2723 txq_stats->tx_clean++; 2724 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 2725 2726 priv->xstats.tx_errors += tx_errors; 2727 2728 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 2729 2730 /* Combine decisions from TX clean and XSK TX */ 2731 return max(count, xmits); 2732 } 2733 2734 /** 2735 * stmmac_tx_err - to manage the tx error 2736 * @priv: driver private structure 2737 * @chan: channel index 2738 * Description: it cleans the descriptors and restarts the transmission 2739 * in case of transmission errors. 2740 */ 2741 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 2742 { 2743 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 2744 2745 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 2746 2747 stmmac_stop_tx_dma(priv, chan); 2748 dma_free_tx_skbufs(priv, &priv->dma_conf, chan); 2749 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); 2750 stmmac_reset_tx_queue(priv, chan); 2751 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2752 tx_q->dma_tx_phy, chan); 2753 stmmac_start_tx_dma(priv, chan); 2754 2755 priv->xstats.tx_errors++; 2756 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 2757 } 2758 2759 /** 2760 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 2761 * @priv: driver private structure 2762 * @txmode: TX operating mode 2763 * @rxmode: RX operating mode 2764 * @chan: channel index 2765 * Description: it is used for configuring of the DMA operation mode in 2766 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 2767 * mode. 2768 */ 2769 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 2770 u32 rxmode, u32 chan) 2771 { 2772 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2773 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2774 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2775 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2776 int rxfifosz = priv->plat->rx_fifo_size; 2777 int txfifosz = priv->plat->tx_fifo_size; 2778 2779 if (rxfifosz == 0) 2780 rxfifosz = priv->dma_cap.rx_fifo_size; 2781 if (txfifosz == 0) 2782 txfifosz = priv->dma_cap.tx_fifo_size; 2783 2784 /* Adjust for real per queue fifo size */ 2785 rxfifosz /= rx_channels_count; 2786 txfifosz /= tx_channels_count; 2787 2788 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 2789 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 2790 } 2791 2792 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 2793 { 2794 int ret; 2795 2796 ret = stmmac_safety_feat_irq_status(priv, priv->dev, 2797 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2798 if (ret && (ret != -EINVAL)) { 2799 stmmac_global_err(priv); 2800 return true; 2801 } 2802 2803 return false; 2804 } 2805 2806 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) 2807 { 2808 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 2809 &priv->xstats, chan, dir); 2810 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; 2811 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 2812 struct stmmac_channel *ch = &priv->channel[chan]; 2813 struct napi_struct *rx_napi; 2814 struct napi_struct *tx_napi; 2815 unsigned long flags; 2816 2817 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; 2818 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 2819 2820 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 2821 if (napi_schedule_prep(rx_napi)) { 2822 spin_lock_irqsave(&ch->lock, flags); 2823 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 2824 spin_unlock_irqrestore(&ch->lock, flags); 2825 __napi_schedule(rx_napi); 2826 } 2827 } 2828 2829 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2830 if (napi_schedule_prep(tx_napi)) { 2831 spin_lock_irqsave(&ch->lock, flags); 2832 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 2833 spin_unlock_irqrestore(&ch->lock, flags); 2834 __napi_schedule(tx_napi); 2835 } 2836 } 2837 2838 return status; 2839 } 2840 2841 /** 2842 * stmmac_dma_interrupt - DMA ISR 2843 * @priv: driver private structure 2844 * Description: this is the DMA ISR. It is called by the main ISR. 2845 * It calls the dwmac dma routine and schedule poll method in case of some 2846 * work can be done. 2847 */ 2848 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 2849 { 2850 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2851 u32 rx_channel_count = priv->plat->rx_queues_to_use; 2852 u32 channels_to_check = tx_channel_count > rx_channel_count ? 2853 tx_channel_count : rx_channel_count; 2854 u32 chan; 2855 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 2856 2857 /* Make sure we never check beyond our status buffer. */ 2858 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 2859 channels_to_check = ARRAY_SIZE(status); 2860 2861 for (chan = 0; chan < channels_to_check; chan++) 2862 status[chan] = stmmac_napi_check(priv, chan, 2863 DMA_DIR_RXTX); 2864 2865 for (chan = 0; chan < tx_channel_count; chan++) { 2866 if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 2867 /* Try to bump up the dma threshold on this failure */ 2868 stmmac_bump_dma_threshold(priv, chan); 2869 } else if (unlikely(status[chan] == tx_hard_error)) { 2870 stmmac_tx_err(priv, chan); 2871 } 2872 } 2873 } 2874 2875 /** 2876 * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 2877 * @priv: driver private structure 2878 * Description: this masks the MMC irq, in fact, the counters are managed in SW. 2879 */ 2880 static void stmmac_mmc_setup(struct stmmac_priv *priv) 2881 { 2882 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 2883 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 2884 2885 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); 2886 2887 if (priv->dma_cap.rmon) { 2888 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); 2889 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 2890 } else 2891 netdev_info(priv->dev, "No MAC Management Counters available\n"); 2892 } 2893 2894 /** 2895 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 2896 * @priv: driver private structure 2897 * Description: 2898 * new GMAC chip generations have a new register to indicate the 2899 * presence of the optional feature/functions. 2900 * This can be also used to override the value passed through the 2901 * platform and necessary for old MAC10/100 and GMAC chips. 2902 */ 2903 static int stmmac_get_hw_features(struct stmmac_priv *priv) 2904 { 2905 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2906 } 2907 2908 /** 2909 * stmmac_check_ether_addr - check if the MAC addr is valid 2910 * @priv: driver private structure 2911 * Description: 2912 * it is to verify if the MAC address is valid, in case of failures it 2913 * generates a random MAC address 2914 */ 2915 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2916 { 2917 u8 addr[ETH_ALEN]; 2918 2919 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 2920 stmmac_get_umac_addr(priv, priv->hw, addr, 0); 2921 if (is_valid_ether_addr(addr)) 2922 eth_hw_addr_set(priv->dev, addr); 2923 else 2924 eth_hw_addr_random(priv->dev); 2925 dev_info(priv->device, "device MAC address %pM\n", 2926 priv->dev->dev_addr); 2927 } 2928 } 2929 2930 /** 2931 * stmmac_init_dma_engine - DMA init. 2932 * @priv: driver private structure 2933 * Description: 2934 * It inits the DMA invoking the specific MAC/GMAC callback. 2935 * Some DMA parameters can be passed from the platform; 2936 * in case of these are not passed a default is kept for the MAC or GMAC. 2937 */ 2938 static int stmmac_init_dma_engine(struct stmmac_priv *priv) 2939 { 2940 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2941 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2942 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2943 struct stmmac_rx_queue *rx_q; 2944 struct stmmac_tx_queue *tx_q; 2945 u32 chan = 0; 2946 int atds = 0; 2947 int ret = 0; 2948 2949 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2950 dev_err(priv->device, "Invalid DMA configuration\n"); 2951 return -EINVAL; 2952 } 2953 2954 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2955 atds = 1; 2956 2957 ret = stmmac_reset(priv, priv->ioaddr); 2958 if (ret) { 2959 dev_err(priv->device, "Failed to reset the dma\n"); 2960 return ret; 2961 } 2962 2963 /* DMA Configuration */ 2964 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 2965 2966 if (priv->plat->axi) 2967 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 2968 2969 /* DMA CSR Channel configuration */ 2970 for (chan = 0; chan < dma_csr_ch; chan++) { 2971 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2972 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 2973 } 2974 2975 /* DMA RX Channel Configuration */ 2976 for (chan = 0; chan < rx_channels_count; chan++) { 2977 rx_q = &priv->dma_conf.rx_queue[chan]; 2978 2979 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2980 rx_q->dma_rx_phy, chan); 2981 2982 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2983 (rx_q->buf_alloc_num * 2984 sizeof(struct dma_desc)); 2985 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2986 rx_q->rx_tail_addr, chan); 2987 } 2988 2989 /* DMA TX Channel Configuration */ 2990 for (chan = 0; chan < tx_channels_count; chan++) { 2991 tx_q = &priv->dma_conf.tx_queue[chan]; 2992 2993 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2994 tx_q->dma_tx_phy, chan); 2995 2996 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2997 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2998 tx_q->tx_tail_addr, chan); 2999 } 3000 3001 return ret; 3002 } 3003 3004 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 3005 { 3006 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 3007 u32 tx_coal_timer = priv->tx_coal_timer[queue]; 3008 3009 if (!tx_coal_timer) 3010 return; 3011 3012 hrtimer_start(&tx_q->txtimer, 3013 STMMAC_COAL_TIMER(tx_coal_timer), 3014 HRTIMER_MODE_REL); 3015 } 3016 3017 /** 3018 * stmmac_tx_timer - mitigation sw timer for tx. 3019 * @t: data pointer 3020 * Description: 3021 * This is the timer handler to directly invoke the stmmac_tx_clean. 3022 */ 3023 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) 3024 { 3025 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); 3026 struct stmmac_priv *priv = tx_q->priv_data; 3027 struct stmmac_channel *ch; 3028 struct napi_struct *napi; 3029 3030 ch = &priv->channel[tx_q->queue_index]; 3031 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 3032 3033 if (likely(napi_schedule_prep(napi))) { 3034 unsigned long flags; 3035 3036 spin_lock_irqsave(&ch->lock, flags); 3037 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); 3038 spin_unlock_irqrestore(&ch->lock, flags); 3039 __napi_schedule(napi); 3040 } 3041 3042 return HRTIMER_NORESTART; 3043 } 3044 3045 /** 3046 * stmmac_init_coalesce - init mitigation options. 3047 * @priv: driver private structure 3048 * Description: 3049 * This inits the coalesce parameters: i.e. timer rate, 3050 * timer handler and default threshold used for enabling the 3051 * interrupt on completion bit. 3052 */ 3053 static void stmmac_init_coalesce(struct stmmac_priv *priv) 3054 { 3055 u32 tx_channel_count = priv->plat->tx_queues_to_use; 3056 u32 rx_channel_count = priv->plat->rx_queues_to_use; 3057 u32 chan; 3058 3059 for (chan = 0; chan < tx_channel_count; chan++) { 3060 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3061 3062 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; 3063 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; 3064 3065 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3066 tx_q->txtimer.function = stmmac_tx_timer; 3067 } 3068 3069 for (chan = 0; chan < rx_channel_count; chan++) 3070 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; 3071 } 3072 3073 static void stmmac_set_rings_length(struct stmmac_priv *priv) 3074 { 3075 u32 rx_channels_count = priv->plat->rx_queues_to_use; 3076 u32 tx_channels_count = priv->plat->tx_queues_to_use; 3077 u32 chan; 3078 3079 /* set TX ring length */ 3080 for (chan = 0; chan < tx_channels_count; chan++) 3081 stmmac_set_tx_ring_len(priv, priv->ioaddr, 3082 (priv->dma_conf.dma_tx_size - 1), chan); 3083 3084 /* set RX ring length */ 3085 for (chan = 0; chan < rx_channels_count; chan++) 3086 stmmac_set_rx_ring_len(priv, priv->ioaddr, 3087 (priv->dma_conf.dma_rx_size - 1), chan); 3088 } 3089 3090 /** 3091 * stmmac_set_tx_queue_weight - Set TX queue weight 3092 * @priv: driver private structure 3093 * Description: It is used for setting TX queues weight 3094 */ 3095 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 3096 { 3097 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3098 u32 weight; 3099 u32 queue; 3100 3101 for (queue = 0; queue < tx_queues_count; queue++) { 3102 weight = priv->plat->tx_queues_cfg[queue].weight; 3103 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 3104 } 3105 } 3106 3107 /** 3108 * stmmac_configure_cbs - Configure CBS in TX queue 3109 * @priv: driver private structure 3110 * Description: It is used for configuring CBS in AVB TX queues 3111 */ 3112 static void stmmac_configure_cbs(struct stmmac_priv *priv) 3113 { 3114 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3115 u32 mode_to_use; 3116 u32 queue; 3117 3118 /* queue 0 is reserved for legacy traffic */ 3119 for (queue = 1; queue < tx_queues_count; queue++) { 3120 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 3121 if (mode_to_use == MTL_QUEUE_DCB) 3122 continue; 3123 3124 stmmac_config_cbs(priv, priv->hw, 3125 priv->plat->tx_queues_cfg[queue].send_slope, 3126 priv->plat->tx_queues_cfg[queue].idle_slope, 3127 priv->plat->tx_queues_cfg[queue].high_credit, 3128 priv->plat->tx_queues_cfg[queue].low_credit, 3129 queue); 3130 } 3131 } 3132 3133 /** 3134 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 3135 * @priv: driver private structure 3136 * Description: It is used for mapping RX queues to RX dma channels 3137 */ 3138 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 3139 { 3140 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3141 u32 queue; 3142 u32 chan; 3143 3144 for (queue = 0; queue < rx_queues_count; queue++) { 3145 chan = priv->plat->rx_queues_cfg[queue].chan; 3146 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 3147 } 3148 } 3149 3150 /** 3151 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 3152 * @priv: driver private structure 3153 * Description: It is used for configuring the RX Queue Priority 3154 */ 3155 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 3156 { 3157 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3158 u32 queue; 3159 u32 prio; 3160 3161 for (queue = 0; queue < rx_queues_count; queue++) { 3162 if (!priv->plat->rx_queues_cfg[queue].use_prio) 3163 continue; 3164 3165 prio = priv->plat->rx_queues_cfg[queue].prio; 3166 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 3167 } 3168 } 3169 3170 /** 3171 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 3172 * @priv: driver private structure 3173 * Description: It is used for configuring the TX Queue Priority 3174 */ 3175 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 3176 { 3177 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3178 u32 queue; 3179 u32 prio; 3180 3181 for (queue = 0; queue < tx_queues_count; queue++) { 3182 if (!priv->plat->tx_queues_cfg[queue].use_prio) 3183 continue; 3184 3185 prio = priv->plat->tx_queues_cfg[queue].prio; 3186 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 3187 } 3188 } 3189 3190 /** 3191 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 3192 * @priv: driver private structure 3193 * Description: It is used for configuring the RX queue routing 3194 */ 3195 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 3196 { 3197 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3198 u32 queue; 3199 u8 packet; 3200 3201 for (queue = 0; queue < rx_queues_count; queue++) { 3202 /* no specific packet type routing specified for the queue */ 3203 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 3204 continue; 3205 3206 packet = priv->plat->rx_queues_cfg[queue].pkt_route; 3207 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 3208 } 3209 } 3210 3211 static void stmmac_mac_config_rss(struct stmmac_priv *priv) 3212 { 3213 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { 3214 priv->rss.enable = false; 3215 return; 3216 } 3217 3218 if (priv->dev->features & NETIF_F_RXHASH) 3219 priv->rss.enable = true; 3220 else 3221 priv->rss.enable = false; 3222 3223 stmmac_rss_configure(priv, priv->hw, &priv->rss, 3224 priv->plat->rx_queues_to_use); 3225 } 3226 3227 /** 3228 * stmmac_mtl_configuration - Configure MTL 3229 * @priv: driver private structure 3230 * Description: It is used for configurring MTL 3231 */ 3232 static void stmmac_mtl_configuration(struct stmmac_priv *priv) 3233 { 3234 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3235 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3236 3237 if (tx_queues_count > 1) 3238 stmmac_set_tx_queue_weight(priv); 3239 3240 /* Configure MTL RX algorithms */ 3241 if (rx_queues_count > 1) 3242 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 3243 priv->plat->rx_sched_algorithm); 3244 3245 /* Configure MTL TX algorithms */ 3246 if (tx_queues_count > 1) 3247 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 3248 priv->plat->tx_sched_algorithm); 3249 3250 /* Configure CBS in AVB TX queues */ 3251 if (tx_queues_count > 1) 3252 stmmac_configure_cbs(priv); 3253 3254 /* Map RX MTL to DMA channels */ 3255 stmmac_rx_queue_dma_chan_map(priv); 3256 3257 /* Enable MAC RX Queues */ 3258 stmmac_mac_enable_rx_queues(priv); 3259 3260 /* Set RX priorities */ 3261 if (rx_queues_count > 1) 3262 stmmac_mac_config_rx_queues_prio(priv); 3263 3264 /* Set TX priorities */ 3265 if (tx_queues_count > 1) 3266 stmmac_mac_config_tx_queues_prio(priv); 3267 3268 /* Set RX routing */ 3269 if (rx_queues_count > 1) 3270 stmmac_mac_config_rx_queues_routing(priv); 3271 3272 /* Receive Side Scaling */ 3273 if (rx_queues_count > 1) 3274 stmmac_mac_config_rss(priv); 3275 } 3276 3277 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 3278 { 3279 if (priv->dma_cap.asp) { 3280 netdev_info(priv->dev, "Enabling Safety Features\n"); 3281 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, 3282 priv->plat->safety_feat_cfg); 3283 } else { 3284 netdev_info(priv->dev, "No Safety Features support found\n"); 3285 } 3286 } 3287 3288 static int stmmac_fpe_start_wq(struct stmmac_priv *priv) 3289 { 3290 char *name; 3291 3292 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 3293 clear_bit(__FPE_REMOVING, &priv->fpe_task_state); 3294 3295 name = priv->wq_name; 3296 sprintf(name, "%s-fpe", priv->dev->name); 3297 3298 priv->fpe_wq = create_singlethread_workqueue(name); 3299 if (!priv->fpe_wq) { 3300 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); 3301 3302 return -ENOMEM; 3303 } 3304 netdev_info(priv->dev, "FPE workqueue start"); 3305 3306 return 0; 3307 } 3308 3309 /** 3310 * stmmac_hw_setup - setup mac in a usable state. 3311 * @dev : pointer to the device structure. 3312 * @ptp_register: register PTP if set 3313 * Description: 3314 * this is the main function to setup the HW in a usable state because the 3315 * dma engine is reset, the core registers are configured (e.g. AXI, 3316 * Checksum features, timers). The DMA is ready to start receiving and 3317 * transmitting. 3318 * Return value: 3319 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3320 * file on failure. 3321 */ 3322 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) 3323 { 3324 struct stmmac_priv *priv = netdev_priv(dev); 3325 u32 rx_cnt = priv->plat->rx_queues_to_use; 3326 u32 tx_cnt = priv->plat->tx_queues_to_use; 3327 bool sph_en; 3328 u32 chan; 3329 int ret; 3330 3331 /* DMA initialization and SW reset */ 3332 ret = stmmac_init_dma_engine(priv); 3333 if (ret < 0) { 3334 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 3335 __func__); 3336 return ret; 3337 } 3338 3339 /* Copy the MAC addr into the HW */ 3340 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 3341 3342 /* PS and related bits will be programmed according to the speed */ 3343 if (priv->hw->pcs) { 3344 int speed = priv->plat->mac_port_sel_speed; 3345 3346 if ((speed == SPEED_10) || (speed == SPEED_100) || 3347 (speed == SPEED_1000)) { 3348 priv->hw->ps = speed; 3349 } else { 3350 dev_warn(priv->device, "invalid port speed\n"); 3351 priv->hw->ps = 0; 3352 } 3353 } 3354 3355 /* Initialize the MAC Core */ 3356 stmmac_core_init(priv, priv->hw, dev); 3357 3358 /* Initialize MTL*/ 3359 stmmac_mtl_configuration(priv); 3360 3361 /* Initialize Safety Features */ 3362 stmmac_safety_feat_configuration(priv); 3363 3364 ret = stmmac_rx_ipc(priv, priv->hw); 3365 if (!ret) { 3366 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 3367 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 3368 priv->hw->rx_csum = 0; 3369 } 3370 3371 /* Enable the MAC Rx/Tx */ 3372 stmmac_mac_set(priv, priv->ioaddr, true); 3373 3374 /* Set the HW DMA mode and the COE */ 3375 stmmac_dma_operation_mode(priv); 3376 3377 stmmac_mmc_setup(priv); 3378 3379 if (ptp_register) { 3380 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 3381 if (ret < 0) 3382 netdev_warn(priv->dev, 3383 "failed to enable PTP reference clock: %pe\n", 3384 ERR_PTR(ret)); 3385 } 3386 3387 ret = stmmac_init_ptp(priv); 3388 if (ret == -EOPNOTSUPP) 3389 netdev_info(priv->dev, "PTP not supported by HW\n"); 3390 else if (ret) 3391 netdev_warn(priv->dev, "PTP init failed\n"); 3392 else if (ptp_register) 3393 stmmac_ptp_register(priv); 3394 3395 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; 3396 3397 /* Convert the timer from msec to usec */ 3398 if (!priv->tx_lpi_timer) 3399 priv->tx_lpi_timer = eee_timer * 1000; 3400 3401 if (priv->use_riwt) { 3402 u32 queue; 3403 3404 for (queue = 0; queue < rx_cnt; queue++) { 3405 if (!priv->rx_riwt[queue]) 3406 priv->rx_riwt[queue] = DEF_DMA_RIWT; 3407 3408 stmmac_rx_watchdog(priv, priv->ioaddr, 3409 priv->rx_riwt[queue], queue); 3410 } 3411 } 3412 3413 if (priv->hw->pcs) 3414 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); 3415 3416 /* set TX and RX rings length */ 3417 stmmac_set_rings_length(priv); 3418 3419 /* Enable TSO */ 3420 if (priv->tso) { 3421 for (chan = 0; chan < tx_cnt; chan++) { 3422 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3423 3424 /* TSO and TBS cannot co-exist */ 3425 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3426 continue; 3427 3428 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 3429 } 3430 } 3431 3432 /* Enable Split Header */ 3433 sph_en = (priv->hw->rx_csum > 0) && priv->sph; 3434 for (chan = 0; chan < rx_cnt; chan++) 3435 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 3436 3437 3438 /* VLAN Tag Insertion */ 3439 if (priv->dma_cap.vlins) 3440 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); 3441 3442 /* TBS */ 3443 for (chan = 0; chan < tx_cnt; chan++) { 3444 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3445 int enable = tx_q->tbs & STMMAC_TBS_AVAIL; 3446 3447 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); 3448 } 3449 3450 /* Configure real RX and TX queues */ 3451 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); 3452 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); 3453 3454 /* Start the ball rolling... */ 3455 stmmac_start_all_dma(priv); 3456 3457 if (priv->dma_cap.fpesel) { 3458 stmmac_fpe_start_wq(priv); 3459 3460 if (priv->plat->fpe_cfg->enable) 3461 stmmac_fpe_handshake(priv, true); 3462 } 3463 3464 return 0; 3465 } 3466 3467 static void stmmac_hw_teardown(struct net_device *dev) 3468 { 3469 struct stmmac_priv *priv = netdev_priv(dev); 3470 3471 clk_disable_unprepare(priv->plat->clk_ptp_ref); 3472 } 3473 3474 static void stmmac_free_irq(struct net_device *dev, 3475 enum request_irq_err irq_err, int irq_idx) 3476 { 3477 struct stmmac_priv *priv = netdev_priv(dev); 3478 int j; 3479 3480 switch (irq_err) { 3481 case REQ_IRQ_ERR_ALL: 3482 irq_idx = priv->plat->tx_queues_to_use; 3483 fallthrough; 3484 case REQ_IRQ_ERR_TX: 3485 for (j = irq_idx - 1; j >= 0; j--) { 3486 if (priv->tx_irq[j] > 0) { 3487 irq_set_affinity_hint(priv->tx_irq[j], NULL); 3488 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); 3489 } 3490 } 3491 irq_idx = priv->plat->rx_queues_to_use; 3492 fallthrough; 3493 case REQ_IRQ_ERR_RX: 3494 for (j = irq_idx - 1; j >= 0; j--) { 3495 if (priv->rx_irq[j] > 0) { 3496 irq_set_affinity_hint(priv->rx_irq[j], NULL); 3497 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); 3498 } 3499 } 3500 3501 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) 3502 free_irq(priv->sfty_ue_irq, dev); 3503 fallthrough; 3504 case REQ_IRQ_ERR_SFTY_UE: 3505 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) 3506 free_irq(priv->sfty_ce_irq, dev); 3507 fallthrough; 3508 case REQ_IRQ_ERR_SFTY_CE: 3509 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) 3510 free_irq(priv->lpi_irq, dev); 3511 fallthrough; 3512 case REQ_IRQ_ERR_LPI: 3513 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) 3514 free_irq(priv->wol_irq, dev); 3515 fallthrough; 3516 case REQ_IRQ_ERR_WOL: 3517 free_irq(dev->irq, dev); 3518 fallthrough; 3519 case REQ_IRQ_ERR_MAC: 3520 case REQ_IRQ_ERR_NO: 3521 /* If MAC IRQ request error, no more IRQ to free */ 3522 break; 3523 } 3524 } 3525 3526 static int stmmac_request_irq_multi_msi(struct net_device *dev) 3527 { 3528 struct stmmac_priv *priv = netdev_priv(dev); 3529 enum request_irq_err irq_err; 3530 cpumask_t cpu_mask; 3531 int irq_idx = 0; 3532 char *int_name; 3533 int ret; 3534 int i; 3535 3536 /* For common interrupt */ 3537 int_name = priv->int_name_mac; 3538 sprintf(int_name, "%s:%s", dev->name, "mac"); 3539 ret = request_irq(dev->irq, stmmac_mac_interrupt, 3540 0, int_name, dev); 3541 if (unlikely(ret < 0)) { 3542 netdev_err(priv->dev, 3543 "%s: alloc mac MSI %d (error: %d)\n", 3544 __func__, dev->irq, ret); 3545 irq_err = REQ_IRQ_ERR_MAC; 3546 goto irq_error; 3547 } 3548 3549 /* Request the Wake IRQ in case of another line 3550 * is used for WoL 3551 */ 3552 priv->wol_irq_disabled = true; 3553 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 3554 int_name = priv->int_name_wol; 3555 sprintf(int_name, "%s:%s", dev->name, "wol"); 3556 ret = request_irq(priv->wol_irq, 3557 stmmac_mac_interrupt, 3558 0, int_name, dev); 3559 if (unlikely(ret < 0)) { 3560 netdev_err(priv->dev, 3561 "%s: alloc wol MSI %d (error: %d)\n", 3562 __func__, priv->wol_irq, ret); 3563 irq_err = REQ_IRQ_ERR_WOL; 3564 goto irq_error; 3565 } 3566 } 3567 3568 /* Request the LPI IRQ in case of another line 3569 * is used for LPI 3570 */ 3571 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 3572 int_name = priv->int_name_lpi; 3573 sprintf(int_name, "%s:%s", dev->name, "lpi"); 3574 ret = request_irq(priv->lpi_irq, 3575 stmmac_mac_interrupt, 3576 0, int_name, dev); 3577 if (unlikely(ret < 0)) { 3578 netdev_err(priv->dev, 3579 "%s: alloc lpi MSI %d (error: %d)\n", 3580 __func__, priv->lpi_irq, ret); 3581 irq_err = REQ_IRQ_ERR_LPI; 3582 goto irq_error; 3583 } 3584 } 3585 3586 /* Request the Safety Feature Correctible Error line in 3587 * case of another line is used 3588 */ 3589 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { 3590 int_name = priv->int_name_sfty_ce; 3591 sprintf(int_name, "%s:%s", dev->name, "safety-ce"); 3592 ret = request_irq(priv->sfty_ce_irq, 3593 stmmac_safety_interrupt, 3594 0, int_name, dev); 3595 if (unlikely(ret < 0)) { 3596 netdev_err(priv->dev, 3597 "%s: alloc sfty ce MSI %d (error: %d)\n", 3598 __func__, priv->sfty_ce_irq, ret); 3599 irq_err = REQ_IRQ_ERR_SFTY_CE; 3600 goto irq_error; 3601 } 3602 } 3603 3604 /* Request the Safety Feature Uncorrectible Error line in 3605 * case of another line is used 3606 */ 3607 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { 3608 int_name = priv->int_name_sfty_ue; 3609 sprintf(int_name, "%s:%s", dev->name, "safety-ue"); 3610 ret = request_irq(priv->sfty_ue_irq, 3611 stmmac_safety_interrupt, 3612 0, int_name, dev); 3613 if (unlikely(ret < 0)) { 3614 netdev_err(priv->dev, 3615 "%s: alloc sfty ue MSI %d (error: %d)\n", 3616 __func__, priv->sfty_ue_irq, ret); 3617 irq_err = REQ_IRQ_ERR_SFTY_UE; 3618 goto irq_error; 3619 } 3620 } 3621 3622 /* Request Rx MSI irq */ 3623 for (i = 0; i < priv->plat->rx_queues_to_use; i++) { 3624 if (i >= MTL_MAX_RX_QUEUES) 3625 break; 3626 if (priv->rx_irq[i] == 0) 3627 continue; 3628 3629 int_name = priv->int_name_rx_irq[i]; 3630 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); 3631 ret = request_irq(priv->rx_irq[i], 3632 stmmac_msi_intr_rx, 3633 0, int_name, &priv->dma_conf.rx_queue[i]); 3634 if (unlikely(ret < 0)) { 3635 netdev_err(priv->dev, 3636 "%s: alloc rx-%d MSI %d (error: %d)\n", 3637 __func__, i, priv->rx_irq[i], ret); 3638 irq_err = REQ_IRQ_ERR_RX; 3639 irq_idx = i; 3640 goto irq_error; 3641 } 3642 cpumask_clear(&cpu_mask); 3643 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 3644 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); 3645 } 3646 3647 /* Request Tx MSI irq */ 3648 for (i = 0; i < priv->plat->tx_queues_to_use; i++) { 3649 if (i >= MTL_MAX_TX_QUEUES) 3650 break; 3651 if (priv->tx_irq[i] == 0) 3652 continue; 3653 3654 int_name = priv->int_name_tx_irq[i]; 3655 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); 3656 ret = request_irq(priv->tx_irq[i], 3657 stmmac_msi_intr_tx, 3658 0, int_name, &priv->dma_conf.tx_queue[i]); 3659 if (unlikely(ret < 0)) { 3660 netdev_err(priv->dev, 3661 "%s: alloc tx-%d MSI %d (error: %d)\n", 3662 __func__, i, priv->tx_irq[i], ret); 3663 irq_err = REQ_IRQ_ERR_TX; 3664 irq_idx = i; 3665 goto irq_error; 3666 } 3667 cpumask_clear(&cpu_mask); 3668 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 3669 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); 3670 } 3671 3672 return 0; 3673 3674 irq_error: 3675 stmmac_free_irq(dev, irq_err, irq_idx); 3676 return ret; 3677 } 3678 3679 static int stmmac_request_irq_single(struct net_device *dev) 3680 { 3681 struct stmmac_priv *priv = netdev_priv(dev); 3682 enum request_irq_err irq_err; 3683 int ret; 3684 3685 ret = request_irq(dev->irq, stmmac_interrupt, 3686 IRQF_SHARED, dev->name, dev); 3687 if (unlikely(ret < 0)) { 3688 netdev_err(priv->dev, 3689 "%s: ERROR: allocating the IRQ %d (error: %d)\n", 3690 __func__, dev->irq, ret); 3691 irq_err = REQ_IRQ_ERR_MAC; 3692 goto irq_error; 3693 } 3694 3695 /* Request the Wake IRQ in case of another line 3696 * is used for WoL 3697 */ 3698 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 3699 ret = request_irq(priv->wol_irq, stmmac_interrupt, 3700 IRQF_SHARED, dev->name, dev); 3701 if (unlikely(ret < 0)) { 3702 netdev_err(priv->dev, 3703 "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 3704 __func__, priv->wol_irq, ret); 3705 irq_err = REQ_IRQ_ERR_WOL; 3706 goto irq_error; 3707 } 3708 } 3709 3710 /* Request the IRQ lines */ 3711 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 3712 ret = request_irq(priv->lpi_irq, stmmac_interrupt, 3713 IRQF_SHARED, dev->name, dev); 3714 if (unlikely(ret < 0)) { 3715 netdev_err(priv->dev, 3716 "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 3717 __func__, priv->lpi_irq, ret); 3718 irq_err = REQ_IRQ_ERR_LPI; 3719 goto irq_error; 3720 } 3721 } 3722 3723 return 0; 3724 3725 irq_error: 3726 stmmac_free_irq(dev, irq_err, 0); 3727 return ret; 3728 } 3729 3730 static int stmmac_request_irq(struct net_device *dev) 3731 { 3732 struct stmmac_priv *priv = netdev_priv(dev); 3733 int ret; 3734 3735 /* Request the IRQ lines */ 3736 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) 3737 ret = stmmac_request_irq_multi_msi(dev); 3738 else 3739 ret = stmmac_request_irq_single(dev); 3740 3741 return ret; 3742 } 3743 3744 /** 3745 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue 3746 * @priv: driver private structure 3747 * @mtu: MTU to setup the dma queue and buf with 3748 * Description: Allocate and generate a dma_conf based on the provided MTU. 3749 * Allocate the Tx/Rx DMA queue and init them. 3750 * Return value: 3751 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure. 3752 */ 3753 static struct stmmac_dma_conf * 3754 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) 3755 { 3756 struct stmmac_dma_conf *dma_conf; 3757 int chan, bfsize, ret; 3758 3759 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL); 3760 if (!dma_conf) { 3761 netdev_err(priv->dev, "%s: DMA conf allocation failed\n", 3762 __func__); 3763 return ERR_PTR(-ENOMEM); 3764 } 3765 3766 bfsize = stmmac_set_16kib_bfsize(priv, mtu); 3767 if (bfsize < 0) 3768 bfsize = 0; 3769 3770 if (bfsize < BUF_SIZE_16KiB) 3771 bfsize = stmmac_set_bfsize(mtu, 0); 3772 3773 dma_conf->dma_buf_sz = bfsize; 3774 /* Chose the tx/rx size from the already defined one in the 3775 * priv struct. (if defined) 3776 */ 3777 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; 3778 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; 3779 3780 if (!dma_conf->dma_tx_size) 3781 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; 3782 if (!dma_conf->dma_rx_size) 3783 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; 3784 3785 /* Earlier check for TBS */ 3786 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { 3787 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; 3788 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; 3789 3790 /* Setup per-TXQ tbs flag before TX descriptor alloc */ 3791 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; 3792 } 3793 3794 ret = alloc_dma_desc_resources(priv, dma_conf); 3795 if (ret < 0) { 3796 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 3797 __func__); 3798 goto alloc_error; 3799 } 3800 3801 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); 3802 if (ret < 0) { 3803 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 3804 __func__); 3805 goto init_error; 3806 } 3807 3808 return dma_conf; 3809 3810 init_error: 3811 free_dma_desc_resources(priv, dma_conf); 3812 alloc_error: 3813 kfree(dma_conf); 3814 return ERR_PTR(ret); 3815 } 3816 3817 /** 3818 * __stmmac_open - open entry point of the driver 3819 * @dev : pointer to the device structure. 3820 * @dma_conf : structure to take the dma data 3821 * Description: 3822 * This function is the open entry point of the driver. 3823 * Return value: 3824 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3825 * file on failure. 3826 */ 3827 static int __stmmac_open(struct net_device *dev, 3828 struct stmmac_dma_conf *dma_conf) 3829 { 3830 struct stmmac_priv *priv = netdev_priv(dev); 3831 int mode = priv->plat->phy_interface; 3832 u32 chan; 3833 int ret; 3834 3835 ret = pm_runtime_resume_and_get(priv->device); 3836 if (ret < 0) 3837 return ret; 3838 3839 if (priv->hw->pcs != STMMAC_PCS_TBI && 3840 priv->hw->pcs != STMMAC_PCS_RTBI && 3841 (!priv->hw->xpcs || 3842 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) && 3843 !priv->hw->lynx_pcs) { 3844 ret = stmmac_init_phy(dev); 3845 if (ret) { 3846 netdev_err(priv->dev, 3847 "%s: Cannot attach to PHY (error: %d)\n", 3848 __func__, ret); 3849 goto init_phy_error; 3850 } 3851 } 3852 3853 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 3854 3855 buf_sz = dma_conf->dma_buf_sz; 3856 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); 3857 3858 stmmac_reset_queues_param(priv); 3859 3860 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && 3861 priv->plat->serdes_powerup) { 3862 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); 3863 if (ret < 0) { 3864 netdev_err(priv->dev, "%s: Serdes powerup failed\n", 3865 __func__); 3866 goto init_error; 3867 } 3868 } 3869 3870 ret = stmmac_hw_setup(dev, true); 3871 if (ret < 0) { 3872 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 3873 goto init_error; 3874 } 3875 3876 stmmac_init_coalesce(priv); 3877 3878 phylink_start(priv->phylink); 3879 /* We may have called phylink_speed_down before */ 3880 phylink_speed_up(priv->phylink); 3881 3882 ret = stmmac_request_irq(dev); 3883 if (ret) 3884 goto irq_error; 3885 3886 stmmac_enable_all_queues(priv); 3887 netif_tx_start_all_queues(priv->dev); 3888 stmmac_enable_all_dma_irq(priv); 3889 3890 return 0; 3891 3892 irq_error: 3893 phylink_stop(priv->phylink); 3894 3895 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3896 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 3897 3898 stmmac_hw_teardown(dev); 3899 init_error: 3900 phylink_disconnect_phy(priv->phylink); 3901 init_phy_error: 3902 pm_runtime_put(priv->device); 3903 return ret; 3904 } 3905 3906 static int stmmac_open(struct net_device *dev) 3907 { 3908 struct stmmac_priv *priv = netdev_priv(dev); 3909 struct stmmac_dma_conf *dma_conf; 3910 int ret; 3911 3912 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); 3913 if (IS_ERR(dma_conf)) 3914 return PTR_ERR(dma_conf); 3915 3916 ret = __stmmac_open(dev, dma_conf); 3917 if (ret) 3918 free_dma_desc_resources(priv, dma_conf); 3919 3920 kfree(dma_conf); 3921 return ret; 3922 } 3923 3924 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) 3925 { 3926 set_bit(__FPE_REMOVING, &priv->fpe_task_state); 3927 3928 if (priv->fpe_wq) 3929 destroy_workqueue(priv->fpe_wq); 3930 3931 netdev_info(priv->dev, "FPE workqueue stop"); 3932 } 3933 3934 /** 3935 * stmmac_release - close entry point of the driver 3936 * @dev : device pointer. 3937 * Description: 3938 * This is the stop entry point of the driver. 3939 */ 3940 static int stmmac_release(struct net_device *dev) 3941 { 3942 struct stmmac_priv *priv = netdev_priv(dev); 3943 u32 chan; 3944 3945 if (device_may_wakeup(priv->device)) 3946 phylink_speed_down(priv->phylink, false); 3947 /* Stop and disconnect the PHY */ 3948 phylink_stop(priv->phylink); 3949 phylink_disconnect_phy(priv->phylink); 3950 3951 stmmac_disable_all_queues(priv); 3952 3953 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3954 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 3955 3956 netif_tx_disable(dev); 3957 3958 /* Free the IRQ lines */ 3959 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 3960 3961 if (priv->eee_enabled) { 3962 priv->tx_path_in_lpi_mode = false; 3963 del_timer_sync(&priv->eee_ctrl_timer); 3964 } 3965 3966 /* Stop TX/RX DMA and clear the descriptors */ 3967 stmmac_stop_all_dma(priv); 3968 3969 /* Release and free the Rx/Tx resources */ 3970 free_dma_desc_resources(priv, &priv->dma_conf); 3971 3972 /* Disable the MAC Rx/Tx */ 3973 stmmac_mac_set(priv, priv->ioaddr, false); 3974 3975 /* Powerdown Serdes if there is */ 3976 if (priv->plat->serdes_powerdown) 3977 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); 3978 3979 netif_carrier_off(dev); 3980 3981 stmmac_release_ptp(priv); 3982 3983 pm_runtime_put(priv->device); 3984 3985 if (priv->dma_cap.fpesel) 3986 stmmac_fpe_stop_wq(priv); 3987 3988 return 0; 3989 } 3990 3991 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, 3992 struct stmmac_tx_queue *tx_q) 3993 { 3994 u16 tag = 0x0, inner_tag = 0x0; 3995 u32 inner_type = 0x0; 3996 struct dma_desc *p; 3997 3998 if (!priv->dma_cap.vlins) 3999 return false; 4000 if (!skb_vlan_tag_present(skb)) 4001 return false; 4002 if (skb->vlan_proto == htons(ETH_P_8021AD)) { 4003 inner_tag = skb_vlan_tag_get(skb); 4004 inner_type = STMMAC_VLAN_INSERT; 4005 } 4006 4007 tag = skb_vlan_tag_get(skb); 4008 4009 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4010 p = &tx_q->dma_entx[tx_q->cur_tx].basic; 4011 else 4012 p = &tx_q->dma_tx[tx_q->cur_tx]; 4013 4014 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) 4015 return false; 4016 4017 stmmac_set_tx_owner(priv, p); 4018 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 4019 return true; 4020 } 4021 4022 /** 4023 * stmmac_tso_allocator - close entry point of the driver 4024 * @priv: driver private structure 4025 * @des: buffer start address 4026 * @total_len: total length to fill in descriptors 4027 * @last_segment: condition for the last descriptor 4028 * @queue: TX queue index 4029 * Description: 4030 * This function fills descriptor and request new descriptors according to 4031 * buffer length to fill 4032 */ 4033 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, 4034 int total_len, bool last_segment, u32 queue) 4035 { 4036 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4037 struct dma_desc *desc; 4038 u32 buff_size; 4039 int tmp_len; 4040 4041 tmp_len = total_len; 4042 4043 while (tmp_len > 0) { 4044 dma_addr_t curr_addr; 4045 4046 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 4047 priv->dma_conf.dma_tx_size); 4048 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 4049 4050 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4051 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4052 else 4053 desc = &tx_q->dma_tx[tx_q->cur_tx]; 4054 4055 curr_addr = des + (total_len - tmp_len); 4056 if (priv->dma_cap.addr64 <= 32) 4057 desc->des0 = cpu_to_le32(curr_addr); 4058 else 4059 stmmac_set_desc_addr(priv, desc, curr_addr); 4060 4061 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 4062 TSO_MAX_BUFF_SIZE : tmp_len; 4063 4064 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 4065 0, 1, 4066 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 4067 0, 0); 4068 4069 tmp_len -= TSO_MAX_BUFF_SIZE; 4070 } 4071 } 4072 4073 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) 4074 { 4075 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4076 int desc_size; 4077 4078 if (likely(priv->extend_desc)) 4079 desc_size = sizeof(struct dma_extended_desc); 4080 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4081 desc_size = sizeof(struct dma_edesc); 4082 else 4083 desc_size = sizeof(struct dma_desc); 4084 4085 /* The own bit must be the latest setting done when prepare the 4086 * descriptor and then barrier is needed to make sure that 4087 * all is coherent before granting the DMA engine. 4088 */ 4089 wmb(); 4090 4091 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 4092 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 4093 } 4094 4095 /** 4096 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 4097 * @skb : the socket buffer 4098 * @dev : device pointer 4099 * Description: this is the transmit function that is called on TSO frames 4100 * (support available on GMAC4 and newer chips). 4101 * Diagram below show the ring programming in case of TSO frames: 4102 * 4103 * First Descriptor 4104 * -------- 4105 * | DES0 |---> buffer1 = L2/L3/L4 header 4106 * | DES1 |---> TCP Payload (can continue on next descr...) 4107 * | DES2 |---> buffer 1 and 2 len 4108 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 4109 * -------- 4110 * | 4111 * ... 4112 * | 4113 * -------- 4114 * | DES0 | --| Split TCP Payload on Buffers 1 and 2 4115 * | DES1 | --| 4116 * | DES2 | --> buffer 1 and 2 len 4117 * | DES3 | 4118 * -------- 4119 * 4120 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 4121 */ 4122 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 4123 { 4124 struct dma_desc *desc, *first, *mss_desc = NULL; 4125 struct stmmac_priv *priv = netdev_priv(dev); 4126 int nfrags = skb_shinfo(skb)->nr_frags; 4127 u32 queue = skb_get_queue_mapping(skb); 4128 unsigned int first_entry, tx_packets; 4129 struct stmmac_txq_stats *txq_stats; 4130 int tmp_pay_len = 0, first_tx; 4131 struct stmmac_tx_queue *tx_q; 4132 bool has_vlan, set_ic; 4133 u8 proto_hdr_len, hdr; 4134 unsigned long flags; 4135 u32 pay_len, mss; 4136 dma_addr_t des; 4137 int i; 4138 4139 tx_q = &priv->dma_conf.tx_queue[queue]; 4140 txq_stats = &priv->xstats.txq_stats[queue]; 4141 first_tx = tx_q->cur_tx; 4142 4143 /* Compute header lengths */ 4144 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 4145 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); 4146 hdr = sizeof(struct udphdr); 4147 } else { 4148 proto_hdr_len = skb_tcp_all_headers(skb); 4149 hdr = tcp_hdrlen(skb); 4150 } 4151 4152 /* Desc availability based on threshold should be enough safe */ 4153 if (unlikely(stmmac_tx_avail(priv, queue) < 4154 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 4155 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4156 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4157 queue)); 4158 /* This is a hard error, log it. */ 4159 netdev_err(priv->dev, 4160 "%s: Tx Ring full when queue awake\n", 4161 __func__); 4162 } 4163 return NETDEV_TX_BUSY; 4164 } 4165 4166 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 4167 4168 mss = skb_shinfo(skb)->gso_size; 4169 4170 /* set new MSS value if needed */ 4171 if (mss != tx_q->mss) { 4172 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4173 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4174 else 4175 mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; 4176 4177 stmmac_set_mss(priv, mss_desc, mss); 4178 tx_q->mss = mss; 4179 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 4180 priv->dma_conf.dma_tx_size); 4181 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 4182 } 4183 4184 if (netif_msg_tx_queued(priv)) { 4185 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 4186 __func__, hdr, proto_hdr_len, pay_len, mss); 4187 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 4188 skb->data_len); 4189 } 4190 4191 /* Check if VLAN can be inserted by HW */ 4192 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 4193 4194 first_entry = tx_q->cur_tx; 4195 WARN_ON(tx_q->tx_skbuff[first_entry]); 4196 4197 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4198 desc = &tx_q->dma_entx[first_entry].basic; 4199 else 4200 desc = &tx_q->dma_tx[first_entry]; 4201 first = desc; 4202 4203 if (has_vlan) 4204 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 4205 4206 /* first descriptor: fill Headers on Buf1 */ 4207 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 4208 DMA_TO_DEVICE); 4209 if (dma_mapping_error(priv->device, des)) 4210 goto dma_map_err; 4211 4212 tx_q->tx_skbuff_dma[first_entry].buf = des; 4213 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 4214 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4215 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4216 4217 if (priv->dma_cap.addr64 <= 32) { 4218 first->des0 = cpu_to_le32(des); 4219 4220 /* Fill start of payload in buff2 of first descriptor */ 4221 if (pay_len) 4222 first->des1 = cpu_to_le32(des + proto_hdr_len); 4223 4224 /* If needed take extra descriptors to fill the remaining payload */ 4225 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 4226 } else { 4227 stmmac_set_desc_addr(priv, first, des); 4228 tmp_pay_len = pay_len; 4229 des += proto_hdr_len; 4230 pay_len = 0; 4231 } 4232 4233 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 4234 4235 /* Prepare fragments */ 4236 for (i = 0; i < nfrags; i++) { 4237 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4238 4239 des = skb_frag_dma_map(priv->device, frag, 0, 4240 skb_frag_size(frag), 4241 DMA_TO_DEVICE); 4242 if (dma_mapping_error(priv->device, des)) 4243 goto dma_map_err; 4244 4245 stmmac_tso_allocator(priv, des, skb_frag_size(frag), 4246 (i == nfrags - 1), queue); 4247 4248 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 4249 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 4250 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 4251 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4252 } 4253 4254 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 4255 4256 /* Only the last descriptor gets to point to the skb. */ 4257 tx_q->tx_skbuff[tx_q->cur_tx] = skb; 4258 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4259 4260 /* Manage tx mitigation */ 4261 tx_packets = (tx_q->cur_tx + 1) - first_tx; 4262 tx_q->tx_count_frames += tx_packets; 4263 4264 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4265 set_ic = true; 4266 else if (!priv->tx_coal_frames[queue]) 4267 set_ic = false; 4268 else if (tx_packets > priv->tx_coal_frames[queue]) 4269 set_ic = true; 4270 else if ((tx_q->tx_count_frames % 4271 priv->tx_coal_frames[queue]) < tx_packets) 4272 set_ic = true; 4273 else 4274 set_ic = false; 4275 4276 if (set_ic) { 4277 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4278 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4279 else 4280 desc = &tx_q->dma_tx[tx_q->cur_tx]; 4281 4282 tx_q->tx_count_frames = 0; 4283 stmmac_set_tx_ic(priv, desc); 4284 } 4285 4286 /* We've used all descriptors we need for this skb, however, 4287 * advance cur_tx so that it references a fresh descriptor. 4288 * ndo_start_xmit will fill this descriptor the next time it's 4289 * called and stmmac_tx_clean may clean up to this descriptor. 4290 */ 4291 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 4292 4293 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4294 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4295 __func__); 4296 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4297 } 4298 4299 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 4300 txq_stats->tx_bytes += skb->len; 4301 txq_stats->tx_tso_frames++; 4302 txq_stats->tx_tso_nfrags += nfrags; 4303 if (set_ic) 4304 txq_stats->tx_set_ic_bit++; 4305 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 4306 4307 if (priv->sarc_type) 4308 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 4309 4310 skb_tx_timestamp(skb); 4311 4312 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4313 priv->hwts_tx_en)) { 4314 /* declare that device is doing timestamping */ 4315 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4316 stmmac_enable_tx_timestamp(priv, first); 4317 } 4318 4319 /* Complete the first descriptor before granting the DMA */ 4320 stmmac_prepare_tso_tx_desc(priv, first, 1, 4321 proto_hdr_len, 4322 pay_len, 4323 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 4324 hdr / 4, (skb->len - proto_hdr_len)); 4325 4326 /* If context desc is used to change MSS */ 4327 if (mss_desc) { 4328 /* Make sure that first descriptor has been completely 4329 * written, including its own bit. This is because MSS is 4330 * actually before first descriptor, so we need to make 4331 * sure that MSS's own bit is the last thing written. 4332 */ 4333 dma_wmb(); 4334 stmmac_set_tx_owner(priv, mss_desc); 4335 } 4336 4337 if (netif_msg_pktdata(priv)) { 4338 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 4339 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4340 tx_q->cur_tx, first, nfrags); 4341 pr_info(">>> frame to be transmitted: "); 4342 print_pkt(skb->data, skb_headlen(skb)); 4343 } 4344 4345 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4346 4347 stmmac_flush_tx_descriptors(priv, queue); 4348 stmmac_tx_timer_arm(priv, queue); 4349 4350 return NETDEV_TX_OK; 4351 4352 dma_map_err: 4353 dev_err(priv->device, "Tx dma map failed\n"); 4354 dev_kfree_skb(skb); 4355 priv->xstats.tx_dropped++; 4356 return NETDEV_TX_OK; 4357 } 4358 4359 /** 4360 * stmmac_xmit - Tx entry point of the driver 4361 * @skb : the socket buffer 4362 * @dev : device pointer 4363 * Description : this is the tx entry point of the driver. 4364 * It programs the chain or the ring and supports oversized frames 4365 * and SG feature. 4366 */ 4367 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 4368 { 4369 unsigned int first_entry, tx_packets, enh_desc; 4370 struct stmmac_priv *priv = netdev_priv(dev); 4371 unsigned int nopaged_len = skb_headlen(skb); 4372 int i, csum_insertion = 0, is_jumbo = 0; 4373 u32 queue = skb_get_queue_mapping(skb); 4374 int nfrags = skb_shinfo(skb)->nr_frags; 4375 int gso = skb_shinfo(skb)->gso_type; 4376 struct stmmac_txq_stats *txq_stats; 4377 struct dma_edesc *tbs_desc = NULL; 4378 struct dma_desc *desc, *first; 4379 struct stmmac_tx_queue *tx_q; 4380 bool has_vlan, set_ic; 4381 int entry, first_tx; 4382 unsigned long flags; 4383 dma_addr_t des; 4384 4385 tx_q = &priv->dma_conf.tx_queue[queue]; 4386 txq_stats = &priv->xstats.txq_stats[queue]; 4387 first_tx = tx_q->cur_tx; 4388 4389 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) 4390 stmmac_disable_eee_mode(priv); 4391 4392 /* Manage oversized TCP frames for GMAC4 device */ 4393 if (skb_is_gso(skb) && priv->tso) { 4394 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 4395 return stmmac_tso_xmit(skb, dev); 4396 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) 4397 return stmmac_tso_xmit(skb, dev); 4398 } 4399 4400 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 4401 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4402 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4403 queue)); 4404 /* This is a hard error, log it. */ 4405 netdev_err(priv->dev, 4406 "%s: Tx Ring full when queue awake\n", 4407 __func__); 4408 } 4409 return NETDEV_TX_BUSY; 4410 } 4411 4412 /* Check if VLAN can be inserted by HW */ 4413 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 4414 4415 entry = tx_q->cur_tx; 4416 first_entry = entry; 4417 WARN_ON(tx_q->tx_skbuff[first_entry]); 4418 4419 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 4420 4421 if (likely(priv->extend_desc)) 4422 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4423 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4424 desc = &tx_q->dma_entx[entry].basic; 4425 else 4426 desc = tx_q->dma_tx + entry; 4427 4428 first = desc; 4429 4430 if (has_vlan) 4431 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 4432 4433 enh_desc = priv->plat->enh_desc; 4434 /* To program the descriptors according to the size of the frame */ 4435 if (enh_desc) 4436 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 4437 4438 if (unlikely(is_jumbo)) { 4439 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 4440 if (unlikely(entry < 0) && (entry != -EINVAL)) 4441 goto dma_map_err; 4442 } 4443 4444 for (i = 0; i < nfrags; i++) { 4445 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4446 int len = skb_frag_size(frag); 4447 bool last_segment = (i == (nfrags - 1)); 4448 4449 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4450 WARN_ON(tx_q->tx_skbuff[entry]); 4451 4452 if (likely(priv->extend_desc)) 4453 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4454 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4455 desc = &tx_q->dma_entx[entry].basic; 4456 else 4457 desc = tx_q->dma_tx + entry; 4458 4459 des = skb_frag_dma_map(priv->device, frag, 0, len, 4460 DMA_TO_DEVICE); 4461 if (dma_mapping_error(priv->device, des)) 4462 goto dma_map_err; /* should reuse desc w/o issues */ 4463 4464 tx_q->tx_skbuff_dma[entry].buf = des; 4465 4466 stmmac_set_desc_addr(priv, desc, des); 4467 4468 tx_q->tx_skbuff_dma[entry].map_as_page = true; 4469 tx_q->tx_skbuff_dma[entry].len = len; 4470 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 4471 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4472 4473 /* Prepare the descriptor and set the own bit too */ 4474 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 4475 priv->mode, 1, last_segment, skb->len); 4476 } 4477 4478 /* Only the last descriptor gets to point to the skb. */ 4479 tx_q->tx_skbuff[entry] = skb; 4480 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4481 4482 /* According to the coalesce parameter the IC bit for the latest 4483 * segment is reset and the timer re-started to clean the tx status. 4484 * This approach takes care about the fragments: desc is the first 4485 * element in case of no SG. 4486 */ 4487 tx_packets = (entry + 1) - first_tx; 4488 tx_q->tx_count_frames += tx_packets; 4489 4490 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4491 set_ic = true; 4492 else if (!priv->tx_coal_frames[queue]) 4493 set_ic = false; 4494 else if (tx_packets > priv->tx_coal_frames[queue]) 4495 set_ic = true; 4496 else if ((tx_q->tx_count_frames % 4497 priv->tx_coal_frames[queue]) < tx_packets) 4498 set_ic = true; 4499 else 4500 set_ic = false; 4501 4502 if (set_ic) { 4503 if (likely(priv->extend_desc)) 4504 desc = &tx_q->dma_etx[entry].basic; 4505 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4506 desc = &tx_q->dma_entx[entry].basic; 4507 else 4508 desc = &tx_q->dma_tx[entry]; 4509 4510 tx_q->tx_count_frames = 0; 4511 stmmac_set_tx_ic(priv, desc); 4512 } 4513 4514 /* We've used all descriptors we need for this skb, however, 4515 * advance cur_tx so that it references a fresh descriptor. 4516 * ndo_start_xmit will fill this descriptor the next time it's 4517 * called and stmmac_tx_clean may clean up to this descriptor. 4518 */ 4519 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4520 tx_q->cur_tx = entry; 4521 4522 if (netif_msg_pktdata(priv)) { 4523 netdev_dbg(priv->dev, 4524 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 4525 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4526 entry, first, nfrags); 4527 4528 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 4529 print_pkt(skb->data, skb->len); 4530 } 4531 4532 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4533 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4534 __func__); 4535 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4536 } 4537 4538 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 4539 txq_stats->tx_bytes += skb->len; 4540 if (set_ic) 4541 txq_stats->tx_set_ic_bit++; 4542 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 4543 4544 if (priv->sarc_type) 4545 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 4546 4547 skb_tx_timestamp(skb); 4548 4549 /* Ready to fill the first descriptor and set the OWN bit w/o any 4550 * problems because all the descriptors are actually ready to be 4551 * passed to the DMA engine. 4552 */ 4553 if (likely(!is_jumbo)) { 4554 bool last_segment = (nfrags == 0); 4555 4556 des = dma_map_single(priv->device, skb->data, 4557 nopaged_len, DMA_TO_DEVICE); 4558 if (dma_mapping_error(priv->device, des)) 4559 goto dma_map_err; 4560 4561 tx_q->tx_skbuff_dma[first_entry].buf = des; 4562 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4563 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4564 4565 stmmac_set_desc_addr(priv, first, des); 4566 4567 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 4568 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 4569 4570 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4571 priv->hwts_tx_en)) { 4572 /* declare that device is doing timestamping */ 4573 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4574 stmmac_enable_tx_timestamp(priv, first); 4575 } 4576 4577 /* Prepare the first descriptor setting the OWN bit too */ 4578 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 4579 csum_insertion, priv->mode, 0, last_segment, 4580 skb->len); 4581 } 4582 4583 if (tx_q->tbs & STMMAC_TBS_EN) { 4584 struct timespec64 ts = ns_to_timespec64(skb->tstamp); 4585 4586 tbs_desc = &tx_q->dma_entx[first_entry]; 4587 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); 4588 } 4589 4590 stmmac_set_tx_owner(priv, first); 4591 4592 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4593 4594 stmmac_enable_dma_transmission(priv, priv->ioaddr); 4595 4596 stmmac_flush_tx_descriptors(priv, queue); 4597 stmmac_tx_timer_arm(priv, queue); 4598 4599 return NETDEV_TX_OK; 4600 4601 dma_map_err: 4602 netdev_err(priv->dev, "Tx DMA map failed\n"); 4603 dev_kfree_skb(skb); 4604 priv->xstats.tx_dropped++; 4605 return NETDEV_TX_OK; 4606 } 4607 4608 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 4609 { 4610 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); 4611 __be16 vlan_proto = veth->h_vlan_proto; 4612 u16 vlanid; 4613 4614 if ((vlan_proto == htons(ETH_P_8021Q) && 4615 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 4616 (vlan_proto == htons(ETH_P_8021AD) && 4617 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 4618 /* pop the vlan tag */ 4619 vlanid = ntohs(veth->h_vlan_TCI); 4620 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 4621 skb_pull(skb, VLAN_HLEN); 4622 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 4623 } 4624 } 4625 4626 /** 4627 * stmmac_rx_refill - refill used skb preallocated buffers 4628 * @priv: driver private structure 4629 * @queue: RX queue index 4630 * Description : this is to reallocate the skb for the reception process 4631 * that is based on zero-copy. 4632 */ 4633 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 4634 { 4635 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 4636 int dirty = stmmac_rx_dirty(priv, queue); 4637 unsigned int entry = rx_q->dirty_rx; 4638 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 4639 4640 if (priv->dma_cap.host_dma_width <= 32) 4641 gfp |= GFP_DMA32; 4642 4643 while (dirty-- > 0) { 4644 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4645 struct dma_desc *p; 4646 bool use_rx_wd; 4647 4648 if (priv->extend_desc) 4649 p = (struct dma_desc *)(rx_q->dma_erx + entry); 4650 else 4651 p = rx_q->dma_rx + entry; 4652 4653 if (!buf->page) { 4654 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); 4655 if (!buf->page) 4656 break; 4657 } 4658 4659 if (priv->sph && !buf->sec_page) { 4660 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); 4661 if (!buf->sec_page) 4662 break; 4663 4664 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 4665 } 4666 4667 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 4668 4669 stmmac_set_desc_addr(priv, p, buf->addr); 4670 if (priv->sph) 4671 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 4672 else 4673 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 4674 stmmac_refill_desc3(priv, rx_q, p); 4675 4676 rx_q->rx_count_frames++; 4677 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4678 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 4679 rx_q->rx_count_frames = 0; 4680 4681 use_rx_wd = !priv->rx_coal_frames[queue]; 4682 use_rx_wd |= rx_q->rx_count_frames > 0; 4683 if (!priv->use_riwt) 4684 use_rx_wd = false; 4685 4686 dma_wmb(); 4687 stmmac_set_rx_owner(priv, p, use_rx_wd); 4688 4689 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); 4690 } 4691 rx_q->dirty_rx = entry; 4692 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4693 (rx_q->dirty_rx * sizeof(struct dma_desc)); 4694 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 4695 } 4696 4697 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, 4698 struct dma_desc *p, 4699 int status, unsigned int len) 4700 { 4701 unsigned int plen = 0, hlen = 0; 4702 int coe = priv->hw->rx_csum; 4703 4704 /* Not first descriptor, buffer is always zero */ 4705 if (priv->sph && len) 4706 return 0; 4707 4708 /* First descriptor, get split header length */ 4709 stmmac_get_rx_header_len(priv, p, &hlen); 4710 if (priv->sph && hlen) { 4711 priv->xstats.rx_split_hdr_pkt_n++; 4712 return hlen; 4713 } 4714 4715 /* First descriptor, not last descriptor and not split header */ 4716 if (status & rx_not_ls) 4717 return priv->dma_conf.dma_buf_sz; 4718 4719 plen = stmmac_get_rx_frame_len(priv, p, coe); 4720 4721 /* First descriptor and last descriptor and not split header */ 4722 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); 4723 } 4724 4725 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, 4726 struct dma_desc *p, 4727 int status, unsigned int len) 4728 { 4729 int coe = priv->hw->rx_csum; 4730 unsigned int plen = 0; 4731 4732 /* Not split header, buffer is not available */ 4733 if (!priv->sph) 4734 return 0; 4735 4736 /* Not last descriptor */ 4737 if (status & rx_not_ls) 4738 return priv->dma_conf.dma_buf_sz; 4739 4740 plen = stmmac_get_rx_frame_len(priv, p, coe); 4741 4742 /* Last descriptor */ 4743 return plen - len; 4744 } 4745 4746 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, 4747 struct xdp_frame *xdpf, bool dma_map) 4748 { 4749 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; 4750 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4751 unsigned int entry = tx_q->cur_tx; 4752 struct dma_desc *tx_desc; 4753 dma_addr_t dma_addr; 4754 bool set_ic; 4755 4756 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) 4757 return STMMAC_XDP_CONSUMED; 4758 4759 if (likely(priv->extend_desc)) 4760 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4761 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4762 tx_desc = &tx_q->dma_entx[entry].basic; 4763 else 4764 tx_desc = tx_q->dma_tx + entry; 4765 4766 if (dma_map) { 4767 dma_addr = dma_map_single(priv->device, xdpf->data, 4768 xdpf->len, DMA_TO_DEVICE); 4769 if (dma_mapping_error(priv->device, dma_addr)) 4770 return STMMAC_XDP_CONSUMED; 4771 4772 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; 4773 } else { 4774 struct page *page = virt_to_page(xdpf->data); 4775 4776 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + 4777 xdpf->headroom; 4778 dma_sync_single_for_device(priv->device, dma_addr, 4779 xdpf->len, DMA_BIDIRECTIONAL); 4780 4781 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; 4782 } 4783 4784 tx_q->tx_skbuff_dma[entry].buf = dma_addr; 4785 tx_q->tx_skbuff_dma[entry].map_as_page = false; 4786 tx_q->tx_skbuff_dma[entry].len = xdpf->len; 4787 tx_q->tx_skbuff_dma[entry].last_segment = true; 4788 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 4789 4790 tx_q->xdpf[entry] = xdpf; 4791 4792 stmmac_set_desc_addr(priv, tx_desc, dma_addr); 4793 4794 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, 4795 true, priv->mode, true, true, 4796 xdpf->len); 4797 4798 tx_q->tx_count_frames++; 4799 4800 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 4801 set_ic = true; 4802 else 4803 set_ic = false; 4804 4805 if (set_ic) { 4806 unsigned long flags; 4807 tx_q->tx_count_frames = 0; 4808 stmmac_set_tx_ic(priv, tx_desc); 4809 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 4810 txq_stats->tx_set_ic_bit++; 4811 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 4812 } 4813 4814 stmmac_enable_dma_transmission(priv, priv->ioaddr); 4815 4816 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4817 tx_q->cur_tx = entry; 4818 4819 return STMMAC_XDP_TX; 4820 } 4821 4822 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, 4823 int cpu) 4824 { 4825 int index = cpu; 4826 4827 if (unlikely(index < 0)) 4828 index = 0; 4829 4830 while (index >= priv->plat->tx_queues_to_use) 4831 index -= priv->plat->tx_queues_to_use; 4832 4833 return index; 4834 } 4835 4836 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, 4837 struct xdp_buff *xdp) 4838 { 4839 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 4840 int cpu = smp_processor_id(); 4841 struct netdev_queue *nq; 4842 int queue; 4843 int res; 4844 4845 if (unlikely(!xdpf)) 4846 return STMMAC_XDP_CONSUMED; 4847 4848 queue = stmmac_xdp_get_tx_queue(priv, cpu); 4849 nq = netdev_get_tx_queue(priv->dev, queue); 4850 4851 __netif_tx_lock(nq, cpu); 4852 /* Avoids TX time-out as we are sharing with slow path */ 4853 txq_trans_cond_update(nq); 4854 4855 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); 4856 if (res == STMMAC_XDP_TX) 4857 stmmac_flush_tx_descriptors(priv, queue); 4858 4859 __netif_tx_unlock(nq); 4860 4861 return res; 4862 } 4863 4864 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, 4865 struct bpf_prog *prog, 4866 struct xdp_buff *xdp) 4867 { 4868 u32 act; 4869 int res; 4870 4871 act = bpf_prog_run_xdp(prog, xdp); 4872 switch (act) { 4873 case XDP_PASS: 4874 res = STMMAC_XDP_PASS; 4875 break; 4876 case XDP_TX: 4877 res = stmmac_xdp_xmit_back(priv, xdp); 4878 break; 4879 case XDP_REDIRECT: 4880 if (xdp_do_redirect(priv->dev, xdp, prog) < 0) 4881 res = STMMAC_XDP_CONSUMED; 4882 else 4883 res = STMMAC_XDP_REDIRECT; 4884 break; 4885 default: 4886 bpf_warn_invalid_xdp_action(priv->dev, prog, act); 4887 fallthrough; 4888 case XDP_ABORTED: 4889 trace_xdp_exception(priv->dev, prog, act); 4890 fallthrough; 4891 case XDP_DROP: 4892 res = STMMAC_XDP_CONSUMED; 4893 break; 4894 } 4895 4896 return res; 4897 } 4898 4899 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, 4900 struct xdp_buff *xdp) 4901 { 4902 struct bpf_prog *prog; 4903 int res; 4904 4905 prog = READ_ONCE(priv->xdp_prog); 4906 if (!prog) { 4907 res = STMMAC_XDP_PASS; 4908 goto out; 4909 } 4910 4911 res = __stmmac_xdp_run_prog(priv, prog, xdp); 4912 out: 4913 return ERR_PTR(-res); 4914 } 4915 4916 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, 4917 int xdp_status) 4918 { 4919 int cpu = smp_processor_id(); 4920 int queue; 4921 4922 queue = stmmac_xdp_get_tx_queue(priv, cpu); 4923 4924 if (xdp_status & STMMAC_XDP_TX) 4925 stmmac_tx_timer_arm(priv, queue); 4926 4927 if (xdp_status & STMMAC_XDP_REDIRECT) 4928 xdp_do_flush(); 4929 } 4930 4931 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, 4932 struct xdp_buff *xdp) 4933 { 4934 unsigned int metasize = xdp->data - xdp->data_meta; 4935 unsigned int datasize = xdp->data_end - xdp->data; 4936 struct sk_buff *skb; 4937 4938 skb = __napi_alloc_skb(&ch->rxtx_napi, 4939 xdp->data_end - xdp->data_hard_start, 4940 GFP_ATOMIC | __GFP_NOWARN); 4941 if (unlikely(!skb)) 4942 return NULL; 4943 4944 skb_reserve(skb, xdp->data - xdp->data_hard_start); 4945 memcpy(__skb_put(skb, datasize), xdp->data, datasize); 4946 if (metasize) 4947 skb_metadata_set(skb, metasize); 4948 4949 return skb; 4950 } 4951 4952 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, 4953 struct dma_desc *p, struct dma_desc *np, 4954 struct xdp_buff *xdp) 4955 { 4956 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; 4957 struct stmmac_channel *ch = &priv->channel[queue]; 4958 unsigned int len = xdp->data_end - xdp->data; 4959 enum pkt_hash_types hash_type; 4960 int coe = priv->hw->rx_csum; 4961 unsigned long flags; 4962 struct sk_buff *skb; 4963 u32 hash; 4964 4965 skb = stmmac_construct_skb_zc(ch, xdp); 4966 if (!skb) { 4967 priv->xstats.rx_dropped++; 4968 return; 4969 } 4970 4971 stmmac_get_rx_hwtstamp(priv, p, np, skb); 4972 stmmac_rx_vlan(priv->dev, skb); 4973 skb->protocol = eth_type_trans(skb, priv->dev); 4974 4975 if (unlikely(!coe)) 4976 skb_checksum_none_assert(skb); 4977 else 4978 skb->ip_summed = CHECKSUM_UNNECESSARY; 4979 4980 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 4981 skb_set_hash(skb, hash, hash_type); 4982 4983 skb_record_rx_queue(skb, queue); 4984 napi_gro_receive(&ch->rxtx_napi, skb); 4985 4986 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 4987 rxq_stats->rx_pkt_n++; 4988 rxq_stats->rx_bytes += len; 4989 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 4990 } 4991 4992 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 4993 { 4994 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 4995 unsigned int entry = rx_q->dirty_rx; 4996 struct dma_desc *rx_desc = NULL; 4997 bool ret = true; 4998 4999 budget = min(budget, stmmac_rx_dirty(priv, queue)); 5000 5001 while (budget-- > 0 && entry != rx_q->cur_rx) { 5002 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 5003 dma_addr_t dma_addr; 5004 bool use_rx_wd; 5005 5006 if (!buf->xdp) { 5007 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 5008 if (!buf->xdp) { 5009 ret = false; 5010 break; 5011 } 5012 } 5013 5014 if (priv->extend_desc) 5015 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); 5016 else 5017 rx_desc = rx_q->dma_rx + entry; 5018 5019 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 5020 stmmac_set_desc_addr(priv, rx_desc, dma_addr); 5021 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); 5022 stmmac_refill_desc3(priv, rx_q, rx_desc); 5023 5024 rx_q->rx_count_frames++; 5025 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 5026 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 5027 rx_q->rx_count_frames = 0; 5028 5029 use_rx_wd = !priv->rx_coal_frames[queue]; 5030 use_rx_wd |= rx_q->rx_count_frames > 0; 5031 if (!priv->use_riwt) 5032 use_rx_wd = false; 5033 5034 dma_wmb(); 5035 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); 5036 5037 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); 5038 } 5039 5040 if (rx_desc) { 5041 rx_q->dirty_rx = entry; 5042 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 5043 (rx_q->dirty_rx * sizeof(struct dma_desc)); 5044 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 5045 } 5046 5047 return ret; 5048 } 5049 5050 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp) 5051 { 5052 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used 5053 * to represent incoming packet, whereas cb field in the same structure 5054 * is used to store driver specific info. Thus, struct stmmac_xdp_buff 5055 * is laid on top of xdp and cb fields of struct xdp_buff_xsk. 5056 */ 5057 return (struct stmmac_xdp_buff *)xdp; 5058 } 5059 5060 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) 5061 { 5062 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; 5063 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 5064 unsigned int count = 0, error = 0, len = 0; 5065 int dirty = stmmac_rx_dirty(priv, queue); 5066 unsigned int next_entry = rx_q->cur_rx; 5067 u32 rx_errors = 0, rx_dropped = 0; 5068 unsigned int desc_size; 5069 struct bpf_prog *prog; 5070 bool failure = false; 5071 unsigned long flags; 5072 int xdp_status = 0; 5073 int status = 0; 5074 5075 if (netif_msg_rx_status(priv)) { 5076 void *rx_head; 5077 5078 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 5079 if (priv->extend_desc) { 5080 rx_head = (void *)rx_q->dma_erx; 5081 desc_size = sizeof(struct dma_extended_desc); 5082 } else { 5083 rx_head = (void *)rx_q->dma_rx; 5084 desc_size = sizeof(struct dma_desc); 5085 } 5086 5087 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, 5088 rx_q->dma_rx_phy, desc_size); 5089 } 5090 while (count < limit) { 5091 struct stmmac_rx_buffer *buf; 5092 struct stmmac_xdp_buff *ctx; 5093 unsigned int buf1_len = 0; 5094 struct dma_desc *np, *p; 5095 int entry; 5096 int res; 5097 5098 if (!count && rx_q->state_saved) { 5099 error = rx_q->state.error; 5100 len = rx_q->state.len; 5101 } else { 5102 rx_q->state_saved = false; 5103 error = 0; 5104 len = 0; 5105 } 5106 5107 if (count >= limit) 5108 break; 5109 5110 read_again: 5111 buf1_len = 0; 5112 entry = next_entry; 5113 buf = &rx_q->buf_pool[entry]; 5114 5115 if (dirty >= STMMAC_RX_FILL_BATCH) { 5116 failure = failure || 5117 !stmmac_rx_refill_zc(priv, queue, dirty); 5118 dirty = 0; 5119 } 5120 5121 if (priv->extend_desc) 5122 p = (struct dma_desc *)(rx_q->dma_erx + entry); 5123 else 5124 p = rx_q->dma_rx + entry; 5125 5126 /* read the status of the incoming frame */ 5127 status = stmmac_rx_status(priv, &priv->xstats, p); 5128 /* check if managed by the DMA otherwise go ahead */ 5129 if (unlikely(status & dma_own)) 5130 break; 5131 5132 /* Prefetch the next RX descriptor */ 5133 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 5134 priv->dma_conf.dma_rx_size); 5135 next_entry = rx_q->cur_rx; 5136 5137 if (priv->extend_desc) 5138 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5139 else 5140 np = rx_q->dma_rx + next_entry; 5141 5142 prefetch(np); 5143 5144 /* Ensure a valid XSK buffer before proceed */ 5145 if (!buf->xdp) 5146 break; 5147 5148 if (priv->extend_desc) 5149 stmmac_rx_extended_status(priv, &priv->xstats, 5150 rx_q->dma_erx + entry); 5151 if (unlikely(status == discard_frame)) { 5152 xsk_buff_free(buf->xdp); 5153 buf->xdp = NULL; 5154 dirty++; 5155 error = 1; 5156 if (!priv->hwts_rx_en) 5157 rx_errors++; 5158 } 5159 5160 if (unlikely(error && (status & rx_not_ls))) 5161 goto read_again; 5162 if (unlikely(error)) { 5163 count++; 5164 continue; 5165 } 5166 5167 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ 5168 if (likely(status & rx_not_ls)) { 5169 xsk_buff_free(buf->xdp); 5170 buf->xdp = NULL; 5171 dirty++; 5172 count++; 5173 goto read_again; 5174 } 5175 5176 ctx = xsk_buff_to_stmmac_ctx(buf->xdp); 5177 ctx->priv = priv; 5178 ctx->desc = p; 5179 ctx->ndesc = np; 5180 5181 /* XDP ZC Frame only support primary buffers for now */ 5182 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 5183 len += buf1_len; 5184 5185 /* ACS is disabled; strip manually. */ 5186 if (likely(!(status & rx_not_ls))) { 5187 buf1_len -= ETH_FCS_LEN; 5188 len -= ETH_FCS_LEN; 5189 } 5190 5191 /* RX buffer is good and fit into a XSK pool buffer */ 5192 buf->xdp->data_end = buf->xdp->data + buf1_len; 5193 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); 5194 5195 prog = READ_ONCE(priv->xdp_prog); 5196 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); 5197 5198 switch (res) { 5199 case STMMAC_XDP_PASS: 5200 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); 5201 xsk_buff_free(buf->xdp); 5202 break; 5203 case STMMAC_XDP_CONSUMED: 5204 xsk_buff_free(buf->xdp); 5205 rx_dropped++; 5206 break; 5207 case STMMAC_XDP_TX: 5208 case STMMAC_XDP_REDIRECT: 5209 xdp_status |= res; 5210 break; 5211 } 5212 5213 buf->xdp = NULL; 5214 dirty++; 5215 count++; 5216 } 5217 5218 if (status & rx_not_ls) { 5219 rx_q->state_saved = true; 5220 rx_q->state.error = error; 5221 rx_q->state.len = len; 5222 } 5223 5224 stmmac_finalize_xdp_rx(priv, xdp_status); 5225 5226 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5227 rxq_stats->rx_pkt_n += count; 5228 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5229 5230 priv->xstats.rx_dropped += rx_dropped; 5231 priv->xstats.rx_errors += rx_errors; 5232 5233 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { 5234 if (failure || stmmac_rx_dirty(priv, queue) > 0) 5235 xsk_set_rx_need_wakeup(rx_q->xsk_pool); 5236 else 5237 xsk_clear_rx_need_wakeup(rx_q->xsk_pool); 5238 5239 return (int)count; 5240 } 5241 5242 return failure ? limit : (int)count; 5243 } 5244 5245 /** 5246 * stmmac_rx - manage the receive process 5247 * @priv: driver private structure 5248 * @limit: napi bugget 5249 * @queue: RX queue index. 5250 * Description : this the function called by the napi poll method. 5251 * It gets all the frames inside the ring. 5252 */ 5253 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 5254 { 5255 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0; 5256 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; 5257 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 5258 struct stmmac_channel *ch = &priv->channel[queue]; 5259 unsigned int count = 0, error = 0, len = 0; 5260 int status = 0, coe = priv->hw->rx_csum; 5261 unsigned int next_entry = rx_q->cur_rx; 5262 enum dma_data_direction dma_dir; 5263 unsigned int desc_size; 5264 struct sk_buff *skb = NULL; 5265 struct stmmac_xdp_buff ctx; 5266 unsigned long flags; 5267 int xdp_status = 0; 5268 int buf_sz; 5269 5270 dma_dir = page_pool_get_dma_dir(rx_q->page_pool); 5271 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; 5272 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); 5273 5274 if (netif_msg_rx_status(priv)) { 5275 void *rx_head; 5276 5277 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 5278 if (priv->extend_desc) { 5279 rx_head = (void *)rx_q->dma_erx; 5280 desc_size = sizeof(struct dma_extended_desc); 5281 } else { 5282 rx_head = (void *)rx_q->dma_rx; 5283 desc_size = sizeof(struct dma_desc); 5284 } 5285 5286 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, 5287 rx_q->dma_rx_phy, desc_size); 5288 } 5289 while (count < limit) { 5290 unsigned int buf1_len = 0, buf2_len = 0; 5291 enum pkt_hash_types hash_type; 5292 struct stmmac_rx_buffer *buf; 5293 struct dma_desc *np, *p; 5294 int entry; 5295 u32 hash; 5296 5297 if (!count && rx_q->state_saved) { 5298 skb = rx_q->state.skb; 5299 error = rx_q->state.error; 5300 len = rx_q->state.len; 5301 } else { 5302 rx_q->state_saved = false; 5303 skb = NULL; 5304 error = 0; 5305 len = 0; 5306 } 5307 5308 read_again: 5309 if (count >= limit) 5310 break; 5311 5312 buf1_len = 0; 5313 buf2_len = 0; 5314 entry = next_entry; 5315 buf = &rx_q->buf_pool[entry]; 5316 5317 if (priv->extend_desc) 5318 p = (struct dma_desc *)(rx_q->dma_erx + entry); 5319 else 5320 p = rx_q->dma_rx + entry; 5321 5322 /* read the status of the incoming frame */ 5323 status = stmmac_rx_status(priv, &priv->xstats, p); 5324 /* check if managed by the DMA otherwise go ahead */ 5325 if (unlikely(status & dma_own)) 5326 break; 5327 5328 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 5329 priv->dma_conf.dma_rx_size); 5330 next_entry = rx_q->cur_rx; 5331 5332 if (priv->extend_desc) 5333 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5334 else 5335 np = rx_q->dma_rx + next_entry; 5336 5337 prefetch(np); 5338 5339 if (priv->extend_desc) 5340 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); 5341 if (unlikely(status == discard_frame)) { 5342 page_pool_recycle_direct(rx_q->page_pool, buf->page); 5343 buf->page = NULL; 5344 error = 1; 5345 if (!priv->hwts_rx_en) 5346 rx_errors++; 5347 } 5348 5349 if (unlikely(error && (status & rx_not_ls))) 5350 goto read_again; 5351 if (unlikely(error)) { 5352 dev_kfree_skb(skb); 5353 skb = NULL; 5354 count++; 5355 continue; 5356 } 5357 5358 /* Buffer is good. Go on. */ 5359 5360 prefetch(page_address(buf->page) + buf->page_offset); 5361 if (buf->sec_page) 5362 prefetch(page_address(buf->sec_page)); 5363 5364 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 5365 len += buf1_len; 5366 buf2_len = stmmac_rx_buf2_len(priv, p, status, len); 5367 len += buf2_len; 5368 5369 /* ACS is disabled; strip manually. */ 5370 if (likely(!(status & rx_not_ls))) { 5371 if (buf2_len) { 5372 buf2_len -= ETH_FCS_LEN; 5373 len -= ETH_FCS_LEN; 5374 } else if (buf1_len) { 5375 buf1_len -= ETH_FCS_LEN; 5376 len -= ETH_FCS_LEN; 5377 } 5378 } 5379 5380 if (!skb) { 5381 unsigned int pre_len, sync_len; 5382 5383 dma_sync_single_for_cpu(priv->device, buf->addr, 5384 buf1_len, dma_dir); 5385 5386 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq); 5387 xdp_prepare_buff(&ctx.xdp, page_address(buf->page), 5388 buf->page_offset, buf1_len, true); 5389 5390 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - 5391 buf->page_offset; 5392 5393 ctx.priv = priv; 5394 ctx.desc = p; 5395 ctx.ndesc = np; 5396 5397 skb = stmmac_xdp_run_prog(priv, &ctx.xdp); 5398 /* Due xdp_adjust_tail: DMA sync for_device 5399 * cover max len CPU touch 5400 */ 5401 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - 5402 buf->page_offset; 5403 sync_len = max(sync_len, pre_len); 5404 5405 /* For Not XDP_PASS verdict */ 5406 if (IS_ERR(skb)) { 5407 unsigned int xdp_res = -PTR_ERR(skb); 5408 5409 if (xdp_res & STMMAC_XDP_CONSUMED) { 5410 page_pool_put_page(rx_q->page_pool, 5411 virt_to_head_page(ctx.xdp.data), 5412 sync_len, true); 5413 buf->page = NULL; 5414 rx_dropped++; 5415 5416 /* Clear skb as it was set as 5417 * status by XDP program. 5418 */ 5419 skb = NULL; 5420 5421 if (unlikely((status & rx_not_ls))) 5422 goto read_again; 5423 5424 count++; 5425 continue; 5426 } else if (xdp_res & (STMMAC_XDP_TX | 5427 STMMAC_XDP_REDIRECT)) { 5428 xdp_status |= xdp_res; 5429 buf->page = NULL; 5430 skb = NULL; 5431 count++; 5432 continue; 5433 } 5434 } 5435 } 5436 5437 if (!skb) { 5438 /* XDP program may expand or reduce tail */ 5439 buf1_len = ctx.xdp.data_end - ctx.xdp.data; 5440 5441 skb = napi_alloc_skb(&ch->rx_napi, buf1_len); 5442 if (!skb) { 5443 rx_dropped++; 5444 count++; 5445 goto drain_data; 5446 } 5447 5448 /* XDP program may adjust header */ 5449 skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len); 5450 skb_put(skb, buf1_len); 5451 5452 /* Data payload copied into SKB, page ready for recycle */ 5453 page_pool_recycle_direct(rx_q->page_pool, buf->page); 5454 buf->page = NULL; 5455 } else if (buf1_len) { 5456 dma_sync_single_for_cpu(priv->device, buf->addr, 5457 buf1_len, dma_dir); 5458 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 5459 buf->page, buf->page_offset, buf1_len, 5460 priv->dma_conf.dma_buf_sz); 5461 5462 /* Data payload appended into SKB */ 5463 skb_mark_for_recycle(skb); 5464 buf->page = NULL; 5465 } 5466 5467 if (buf2_len) { 5468 dma_sync_single_for_cpu(priv->device, buf->sec_addr, 5469 buf2_len, dma_dir); 5470 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 5471 buf->sec_page, 0, buf2_len, 5472 priv->dma_conf.dma_buf_sz); 5473 5474 /* Data payload appended into SKB */ 5475 skb_mark_for_recycle(skb); 5476 buf->sec_page = NULL; 5477 } 5478 5479 drain_data: 5480 if (likely(status & rx_not_ls)) 5481 goto read_again; 5482 if (!skb) 5483 continue; 5484 5485 /* Got entire packet into SKB. Finish it. */ 5486 5487 stmmac_get_rx_hwtstamp(priv, p, np, skb); 5488 stmmac_rx_vlan(priv->dev, skb); 5489 skb->protocol = eth_type_trans(skb, priv->dev); 5490 5491 if (unlikely(!coe)) 5492 skb_checksum_none_assert(skb); 5493 else 5494 skb->ip_summed = CHECKSUM_UNNECESSARY; 5495 5496 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 5497 skb_set_hash(skb, hash, hash_type); 5498 5499 skb_record_rx_queue(skb, queue); 5500 napi_gro_receive(&ch->rx_napi, skb); 5501 skb = NULL; 5502 5503 rx_packets++; 5504 rx_bytes += len; 5505 count++; 5506 } 5507 5508 if (status & rx_not_ls || skb) { 5509 rx_q->state_saved = true; 5510 rx_q->state.skb = skb; 5511 rx_q->state.error = error; 5512 rx_q->state.len = len; 5513 } 5514 5515 stmmac_finalize_xdp_rx(priv, xdp_status); 5516 5517 stmmac_rx_refill(priv, queue); 5518 5519 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5520 rxq_stats->rx_packets += rx_packets; 5521 rxq_stats->rx_bytes += rx_bytes; 5522 rxq_stats->rx_pkt_n += count; 5523 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5524 5525 priv->xstats.rx_dropped += rx_dropped; 5526 priv->xstats.rx_errors += rx_errors; 5527 5528 return count; 5529 } 5530 5531 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 5532 { 5533 struct stmmac_channel *ch = 5534 container_of(napi, struct stmmac_channel, rx_napi); 5535 struct stmmac_priv *priv = ch->priv_data; 5536 struct stmmac_rxq_stats *rxq_stats; 5537 u32 chan = ch->index; 5538 unsigned long flags; 5539 int work_done; 5540 5541 rxq_stats = &priv->xstats.rxq_stats[chan]; 5542 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5543 rxq_stats->napi_poll++; 5544 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5545 5546 work_done = stmmac_rx(priv, budget, chan); 5547 if (work_done < budget && napi_complete_done(napi, work_done)) { 5548 unsigned long flags; 5549 5550 spin_lock_irqsave(&ch->lock, flags); 5551 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 5552 spin_unlock_irqrestore(&ch->lock, flags); 5553 } 5554 5555 return work_done; 5556 } 5557 5558 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 5559 { 5560 struct stmmac_channel *ch = 5561 container_of(napi, struct stmmac_channel, tx_napi); 5562 struct stmmac_priv *priv = ch->priv_data; 5563 struct stmmac_txq_stats *txq_stats; 5564 u32 chan = ch->index; 5565 unsigned long flags; 5566 int work_done; 5567 5568 txq_stats = &priv->xstats.txq_stats[chan]; 5569 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 5570 txq_stats->napi_poll++; 5571 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 5572 5573 work_done = stmmac_tx_clean(priv, budget, chan); 5574 work_done = min(work_done, budget); 5575 5576 if (work_done < budget && napi_complete_done(napi, work_done)) { 5577 unsigned long flags; 5578 5579 spin_lock_irqsave(&ch->lock, flags); 5580 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 5581 spin_unlock_irqrestore(&ch->lock, flags); 5582 } 5583 5584 return work_done; 5585 } 5586 5587 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) 5588 { 5589 struct stmmac_channel *ch = 5590 container_of(napi, struct stmmac_channel, rxtx_napi); 5591 struct stmmac_priv *priv = ch->priv_data; 5592 int rx_done, tx_done, rxtx_done; 5593 struct stmmac_rxq_stats *rxq_stats; 5594 struct stmmac_txq_stats *txq_stats; 5595 u32 chan = ch->index; 5596 unsigned long flags; 5597 5598 rxq_stats = &priv->xstats.rxq_stats[chan]; 5599 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5600 rxq_stats->napi_poll++; 5601 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5602 5603 txq_stats = &priv->xstats.txq_stats[chan]; 5604 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 5605 txq_stats->napi_poll++; 5606 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 5607 5608 tx_done = stmmac_tx_clean(priv, budget, chan); 5609 tx_done = min(tx_done, budget); 5610 5611 rx_done = stmmac_rx_zc(priv, budget, chan); 5612 5613 rxtx_done = max(tx_done, rx_done); 5614 5615 /* If either TX or RX work is not complete, return budget 5616 * and keep pooling 5617 */ 5618 if (rxtx_done >= budget) 5619 return budget; 5620 5621 /* all work done, exit the polling mode */ 5622 if (napi_complete_done(napi, rxtx_done)) { 5623 unsigned long flags; 5624 5625 spin_lock_irqsave(&ch->lock, flags); 5626 /* Both RX and TX work done are compelte, 5627 * so enable both RX & TX IRQs. 5628 */ 5629 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 5630 spin_unlock_irqrestore(&ch->lock, flags); 5631 } 5632 5633 return min(rxtx_done, budget - 1); 5634 } 5635 5636 /** 5637 * stmmac_tx_timeout 5638 * @dev : Pointer to net device structure 5639 * @txqueue: the index of the hanging transmit queue 5640 * Description: this function is called when a packet transmission fails to 5641 * complete within a reasonable time. The driver will mark the error in the 5642 * netdev structure and arrange for the device to be reset to a sane state 5643 * in order to transmit a new packet. 5644 */ 5645 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) 5646 { 5647 struct stmmac_priv *priv = netdev_priv(dev); 5648 5649 stmmac_global_err(priv); 5650 } 5651 5652 /** 5653 * stmmac_set_rx_mode - entry point for multicast addressing 5654 * @dev : pointer to the device structure 5655 * Description: 5656 * This function is a driver entry point which gets called by the kernel 5657 * whenever multicast addresses must be enabled/disabled. 5658 * Return value: 5659 * void. 5660 */ 5661 static void stmmac_set_rx_mode(struct net_device *dev) 5662 { 5663 struct stmmac_priv *priv = netdev_priv(dev); 5664 5665 stmmac_set_filter(priv, priv->hw, dev); 5666 } 5667 5668 /** 5669 * stmmac_change_mtu - entry point to change MTU size for the device. 5670 * @dev : device pointer. 5671 * @new_mtu : the new MTU size for the device. 5672 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 5673 * to drive packet transmission. Ethernet has an MTU of 1500 octets 5674 * (ETH_DATA_LEN). This value can be changed with ifconfig. 5675 * Return value: 5676 * 0 on success and an appropriate (-)ve integer as defined in errno.h 5677 * file on failure. 5678 */ 5679 static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 5680 { 5681 struct stmmac_priv *priv = netdev_priv(dev); 5682 int txfifosz = priv->plat->tx_fifo_size; 5683 struct stmmac_dma_conf *dma_conf; 5684 const int mtu = new_mtu; 5685 int ret; 5686 5687 if (txfifosz == 0) 5688 txfifosz = priv->dma_cap.tx_fifo_size; 5689 5690 txfifosz /= priv->plat->tx_queues_to_use; 5691 5692 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { 5693 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); 5694 return -EINVAL; 5695 } 5696 5697 new_mtu = STMMAC_ALIGN(new_mtu); 5698 5699 /* If condition true, FIFO is too small or MTU too large */ 5700 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) 5701 return -EINVAL; 5702 5703 if (netif_running(dev)) { 5704 netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); 5705 /* Try to allocate the new DMA conf with the new mtu */ 5706 dma_conf = stmmac_setup_dma_desc(priv, mtu); 5707 if (IS_ERR(dma_conf)) { 5708 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", 5709 mtu); 5710 return PTR_ERR(dma_conf); 5711 } 5712 5713 stmmac_release(dev); 5714 5715 ret = __stmmac_open(dev, dma_conf); 5716 if (ret) { 5717 free_dma_desc_resources(priv, dma_conf); 5718 kfree(dma_conf); 5719 netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); 5720 return ret; 5721 } 5722 5723 kfree(dma_conf); 5724 5725 stmmac_set_rx_mode(dev); 5726 } 5727 5728 dev->mtu = mtu; 5729 netdev_update_features(dev); 5730 5731 return 0; 5732 } 5733 5734 static netdev_features_t stmmac_fix_features(struct net_device *dev, 5735 netdev_features_t features) 5736 { 5737 struct stmmac_priv *priv = netdev_priv(dev); 5738 5739 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 5740 features &= ~NETIF_F_RXCSUM; 5741 5742 if (!priv->plat->tx_coe) 5743 features &= ~NETIF_F_CSUM_MASK; 5744 5745 /* Some GMAC devices have a bugged Jumbo frame support that 5746 * needs to have the Tx COE disabled for oversized frames 5747 * (due to limited buffer sizes). In this case we disable 5748 * the TX csum insertion in the TDES and not use SF. 5749 */ 5750 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 5751 features &= ~NETIF_F_CSUM_MASK; 5752 5753 /* Disable tso if asked by ethtool */ 5754 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { 5755 if (features & NETIF_F_TSO) 5756 priv->tso = true; 5757 else 5758 priv->tso = false; 5759 } 5760 5761 return features; 5762 } 5763 5764 static int stmmac_set_features(struct net_device *netdev, 5765 netdev_features_t features) 5766 { 5767 struct stmmac_priv *priv = netdev_priv(netdev); 5768 5769 /* Keep the COE Type in case of csum is supporting */ 5770 if (features & NETIF_F_RXCSUM) 5771 priv->hw->rx_csum = priv->plat->rx_coe; 5772 else 5773 priv->hw->rx_csum = 0; 5774 /* No check needed because rx_coe has been set before and it will be 5775 * fixed in case of issue. 5776 */ 5777 stmmac_rx_ipc(priv, priv->hw); 5778 5779 if (priv->sph_cap) { 5780 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; 5781 u32 chan; 5782 5783 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) 5784 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 5785 } 5786 5787 return 0; 5788 } 5789 5790 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) 5791 { 5792 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 5793 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 5794 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 5795 bool *hs_enable = &fpe_cfg->hs_enable; 5796 5797 if (status == FPE_EVENT_UNKNOWN || !*hs_enable) 5798 return; 5799 5800 /* If LP has sent verify mPacket, LP is FPE capable */ 5801 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { 5802 if (*lp_state < FPE_STATE_CAPABLE) 5803 *lp_state = FPE_STATE_CAPABLE; 5804 5805 /* If user has requested FPE enable, quickly response */ 5806 if (*hs_enable) 5807 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 5808 fpe_cfg, 5809 MPACKET_RESPONSE); 5810 } 5811 5812 /* If Local has sent verify mPacket, Local is FPE capable */ 5813 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { 5814 if (*lo_state < FPE_STATE_CAPABLE) 5815 *lo_state = FPE_STATE_CAPABLE; 5816 } 5817 5818 /* If LP has sent response mPacket, LP is entering FPE ON */ 5819 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) 5820 *lp_state = FPE_STATE_ENTERING_ON; 5821 5822 /* If Local has sent response mPacket, Local is entering FPE ON */ 5823 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) 5824 *lo_state = FPE_STATE_ENTERING_ON; 5825 5826 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && 5827 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && 5828 priv->fpe_wq) { 5829 queue_work(priv->fpe_wq, &priv->fpe_task); 5830 } 5831 } 5832 5833 static void stmmac_common_interrupt(struct stmmac_priv *priv) 5834 { 5835 u32 rx_cnt = priv->plat->rx_queues_to_use; 5836 u32 tx_cnt = priv->plat->tx_queues_to_use; 5837 u32 queues_count; 5838 u32 queue; 5839 bool xmac; 5840 5841 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 5842 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 5843 5844 if (priv->irq_wake) 5845 pm_wakeup_event(priv->device, 0); 5846 5847 if (priv->dma_cap.estsel) 5848 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, 5849 &priv->xstats, tx_cnt); 5850 5851 if (priv->dma_cap.fpesel) { 5852 int status = stmmac_fpe_irq_status(priv, priv->ioaddr, 5853 priv->dev); 5854 5855 stmmac_fpe_event_status(priv, status); 5856 } 5857 5858 /* To handle GMAC own interrupts */ 5859 if ((priv->plat->has_gmac) || xmac) { 5860 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 5861 5862 if (unlikely(status)) { 5863 /* For LPI we need to save the tx status */ 5864 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 5865 priv->tx_path_in_lpi_mode = true; 5866 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 5867 priv->tx_path_in_lpi_mode = false; 5868 } 5869 5870 for (queue = 0; queue < queues_count; queue++) { 5871 status = stmmac_host_mtl_irq_status(priv, priv->hw, 5872 queue); 5873 } 5874 5875 /* PCS link status */ 5876 if (priv->hw->pcs && 5877 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { 5878 if (priv->xstats.pcs_link) 5879 netif_carrier_on(priv->dev); 5880 else 5881 netif_carrier_off(priv->dev); 5882 } 5883 5884 stmmac_timestamp_interrupt(priv, priv); 5885 } 5886 } 5887 5888 /** 5889 * stmmac_interrupt - main ISR 5890 * @irq: interrupt number. 5891 * @dev_id: to pass the net device pointer. 5892 * Description: this is the main driver interrupt service routine. 5893 * It can call: 5894 * o DMA service routine (to manage incoming frame reception and transmission 5895 * status) 5896 * o Core interrupts to manage: remote wake-up, management counter, LPI 5897 * interrupts. 5898 */ 5899 static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 5900 { 5901 struct net_device *dev = (struct net_device *)dev_id; 5902 struct stmmac_priv *priv = netdev_priv(dev); 5903 5904 /* Check if adapter is up */ 5905 if (test_bit(STMMAC_DOWN, &priv->state)) 5906 return IRQ_HANDLED; 5907 5908 /* Check if a fatal error happened */ 5909 if (stmmac_safety_feat_interrupt(priv)) 5910 return IRQ_HANDLED; 5911 5912 /* To handle Common interrupts */ 5913 stmmac_common_interrupt(priv); 5914 5915 /* To handle DMA interrupts */ 5916 stmmac_dma_interrupt(priv); 5917 5918 return IRQ_HANDLED; 5919 } 5920 5921 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) 5922 { 5923 struct net_device *dev = (struct net_device *)dev_id; 5924 struct stmmac_priv *priv = netdev_priv(dev); 5925 5926 if (unlikely(!dev)) { 5927 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5928 return IRQ_NONE; 5929 } 5930 5931 /* Check if adapter is up */ 5932 if (test_bit(STMMAC_DOWN, &priv->state)) 5933 return IRQ_HANDLED; 5934 5935 /* To handle Common interrupts */ 5936 stmmac_common_interrupt(priv); 5937 5938 return IRQ_HANDLED; 5939 } 5940 5941 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) 5942 { 5943 struct net_device *dev = (struct net_device *)dev_id; 5944 struct stmmac_priv *priv = netdev_priv(dev); 5945 5946 if (unlikely(!dev)) { 5947 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5948 return IRQ_NONE; 5949 } 5950 5951 /* Check if adapter is up */ 5952 if (test_bit(STMMAC_DOWN, &priv->state)) 5953 return IRQ_HANDLED; 5954 5955 /* Check if a fatal error happened */ 5956 stmmac_safety_feat_interrupt(priv); 5957 5958 return IRQ_HANDLED; 5959 } 5960 5961 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) 5962 { 5963 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; 5964 struct stmmac_dma_conf *dma_conf; 5965 int chan = tx_q->queue_index; 5966 struct stmmac_priv *priv; 5967 int status; 5968 5969 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); 5970 priv = container_of(dma_conf, struct stmmac_priv, dma_conf); 5971 5972 if (unlikely(!data)) { 5973 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5974 return IRQ_NONE; 5975 } 5976 5977 /* Check if adapter is up */ 5978 if (test_bit(STMMAC_DOWN, &priv->state)) 5979 return IRQ_HANDLED; 5980 5981 status = stmmac_napi_check(priv, chan, DMA_DIR_TX); 5982 5983 if (unlikely(status & tx_hard_error_bump_tc)) { 5984 /* Try to bump up the dma threshold on this failure */ 5985 stmmac_bump_dma_threshold(priv, chan); 5986 } else if (unlikely(status == tx_hard_error)) { 5987 stmmac_tx_err(priv, chan); 5988 } 5989 5990 return IRQ_HANDLED; 5991 } 5992 5993 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) 5994 { 5995 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; 5996 struct stmmac_dma_conf *dma_conf; 5997 int chan = rx_q->queue_index; 5998 struct stmmac_priv *priv; 5999 6000 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); 6001 priv = container_of(dma_conf, struct stmmac_priv, dma_conf); 6002 6003 if (unlikely(!data)) { 6004 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 6005 return IRQ_NONE; 6006 } 6007 6008 /* Check if adapter is up */ 6009 if (test_bit(STMMAC_DOWN, &priv->state)) 6010 return IRQ_HANDLED; 6011 6012 stmmac_napi_check(priv, chan, DMA_DIR_RX); 6013 6014 return IRQ_HANDLED; 6015 } 6016 6017 /** 6018 * stmmac_ioctl - Entry point for the Ioctl 6019 * @dev: Device pointer. 6020 * @rq: An IOCTL specefic structure, that can contain a pointer to 6021 * a proprietary structure used to pass information to the driver. 6022 * @cmd: IOCTL command 6023 * Description: 6024 * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 6025 */ 6026 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 6027 { 6028 struct stmmac_priv *priv = netdev_priv (dev); 6029 int ret = -EOPNOTSUPP; 6030 6031 if (!netif_running(dev)) 6032 return -EINVAL; 6033 6034 switch (cmd) { 6035 case SIOCGMIIPHY: 6036 case SIOCGMIIREG: 6037 case SIOCSMIIREG: 6038 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); 6039 break; 6040 case SIOCSHWTSTAMP: 6041 ret = stmmac_hwtstamp_set(dev, rq); 6042 break; 6043 case SIOCGHWTSTAMP: 6044 ret = stmmac_hwtstamp_get(dev, rq); 6045 break; 6046 default: 6047 break; 6048 } 6049 6050 return ret; 6051 } 6052 6053 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 6054 void *cb_priv) 6055 { 6056 struct stmmac_priv *priv = cb_priv; 6057 int ret = -EOPNOTSUPP; 6058 6059 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) 6060 return ret; 6061 6062 __stmmac_disable_all_queues(priv); 6063 6064 switch (type) { 6065 case TC_SETUP_CLSU32: 6066 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 6067 break; 6068 case TC_SETUP_CLSFLOWER: 6069 ret = stmmac_tc_setup_cls(priv, priv, type_data); 6070 break; 6071 default: 6072 break; 6073 } 6074 6075 stmmac_enable_all_queues(priv); 6076 return ret; 6077 } 6078 6079 static LIST_HEAD(stmmac_block_cb_list); 6080 6081 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 6082 void *type_data) 6083 { 6084 struct stmmac_priv *priv = netdev_priv(ndev); 6085 6086 switch (type) { 6087 case TC_QUERY_CAPS: 6088 return stmmac_tc_query_caps(priv, priv, type_data); 6089 case TC_SETUP_BLOCK: 6090 return flow_block_cb_setup_simple(type_data, 6091 &stmmac_block_cb_list, 6092 stmmac_setup_tc_block_cb, 6093 priv, priv, true); 6094 case TC_SETUP_QDISC_CBS: 6095 return stmmac_tc_setup_cbs(priv, priv, type_data); 6096 case TC_SETUP_QDISC_TAPRIO: 6097 return stmmac_tc_setup_taprio(priv, priv, type_data); 6098 case TC_SETUP_QDISC_ETF: 6099 return stmmac_tc_setup_etf(priv, priv, type_data); 6100 default: 6101 return -EOPNOTSUPP; 6102 } 6103 } 6104 6105 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, 6106 struct net_device *sb_dev) 6107 { 6108 int gso = skb_shinfo(skb)->gso_type; 6109 6110 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { 6111 /* 6112 * There is no way to determine the number of TSO/USO 6113 * capable Queues. Let's use always the Queue 0 6114 * because if TSO/USO is supported then at least this 6115 * one will be capable. 6116 */ 6117 return 0; 6118 } 6119 6120 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; 6121 } 6122 6123 static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 6124 { 6125 struct stmmac_priv *priv = netdev_priv(ndev); 6126 int ret = 0; 6127 6128 ret = pm_runtime_resume_and_get(priv->device); 6129 if (ret < 0) 6130 return ret; 6131 6132 ret = eth_mac_addr(ndev, addr); 6133 if (ret) 6134 goto set_mac_error; 6135 6136 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 6137 6138 set_mac_error: 6139 pm_runtime_put(priv->device); 6140 6141 return ret; 6142 } 6143 6144 #ifdef CONFIG_DEBUG_FS 6145 static struct dentry *stmmac_fs_dir; 6146 6147 static void sysfs_display_ring(void *head, int size, int extend_desc, 6148 struct seq_file *seq, dma_addr_t dma_phy_addr) 6149 { 6150 int i; 6151 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 6152 struct dma_desc *p = (struct dma_desc *)head; 6153 dma_addr_t dma_addr; 6154 6155 for (i = 0; i < size; i++) { 6156 if (extend_desc) { 6157 dma_addr = dma_phy_addr + i * sizeof(*ep); 6158 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 6159 i, &dma_addr, 6160 le32_to_cpu(ep->basic.des0), 6161 le32_to_cpu(ep->basic.des1), 6162 le32_to_cpu(ep->basic.des2), 6163 le32_to_cpu(ep->basic.des3)); 6164 ep++; 6165 } else { 6166 dma_addr = dma_phy_addr + i * sizeof(*p); 6167 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 6168 i, &dma_addr, 6169 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 6170 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 6171 p++; 6172 } 6173 seq_printf(seq, "\n"); 6174 } 6175 } 6176 6177 static int stmmac_rings_status_show(struct seq_file *seq, void *v) 6178 { 6179 struct net_device *dev = seq->private; 6180 struct stmmac_priv *priv = netdev_priv(dev); 6181 u32 rx_count = priv->plat->rx_queues_to_use; 6182 u32 tx_count = priv->plat->tx_queues_to_use; 6183 u32 queue; 6184 6185 if ((dev->flags & IFF_UP) == 0) 6186 return 0; 6187 6188 for (queue = 0; queue < rx_count; queue++) { 6189 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 6190 6191 seq_printf(seq, "RX Queue %d:\n", queue); 6192 6193 if (priv->extend_desc) { 6194 seq_printf(seq, "Extended descriptor ring:\n"); 6195 sysfs_display_ring((void *)rx_q->dma_erx, 6196 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); 6197 } else { 6198 seq_printf(seq, "Descriptor ring:\n"); 6199 sysfs_display_ring((void *)rx_q->dma_rx, 6200 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); 6201 } 6202 } 6203 6204 for (queue = 0; queue < tx_count; queue++) { 6205 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 6206 6207 seq_printf(seq, "TX Queue %d:\n", queue); 6208 6209 if (priv->extend_desc) { 6210 seq_printf(seq, "Extended descriptor ring:\n"); 6211 sysfs_display_ring((void *)tx_q->dma_etx, 6212 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); 6213 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { 6214 seq_printf(seq, "Descriptor ring:\n"); 6215 sysfs_display_ring((void *)tx_q->dma_tx, 6216 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); 6217 } 6218 } 6219 6220 return 0; 6221 } 6222 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 6223 6224 static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 6225 { 6226 static const char * const dwxgmac_timestamp_source[] = { 6227 "None", 6228 "Internal", 6229 "External", 6230 "Both", 6231 }; 6232 static const char * const dwxgmac_safety_feature_desc[] = { 6233 "No", 6234 "All Safety Features with ECC and Parity", 6235 "All Safety Features without ECC or Parity", 6236 "All Safety Features with Parity Only", 6237 "ECC Only", 6238 "UNDEFINED", 6239 "UNDEFINED", 6240 "UNDEFINED", 6241 }; 6242 struct net_device *dev = seq->private; 6243 struct stmmac_priv *priv = netdev_priv(dev); 6244 6245 if (!priv->hw_cap_support) { 6246 seq_printf(seq, "DMA HW features not supported\n"); 6247 return 0; 6248 } 6249 6250 seq_printf(seq, "==============================\n"); 6251 seq_printf(seq, "\tDMA HW features\n"); 6252 seq_printf(seq, "==============================\n"); 6253 6254 seq_printf(seq, "\t10/100 Mbps: %s\n", 6255 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 6256 seq_printf(seq, "\t1000 Mbps: %s\n", 6257 (priv->dma_cap.mbps_1000) ? "Y" : "N"); 6258 seq_printf(seq, "\tHalf duplex: %s\n", 6259 (priv->dma_cap.half_duplex) ? "Y" : "N"); 6260 if (priv->plat->has_xgmac) { 6261 seq_printf(seq, 6262 "\tNumber of Additional MAC address registers: %d\n", 6263 priv->dma_cap.multi_addr); 6264 } else { 6265 seq_printf(seq, "\tHash Filter: %s\n", 6266 (priv->dma_cap.hash_filter) ? "Y" : "N"); 6267 seq_printf(seq, "\tMultiple MAC address registers: %s\n", 6268 (priv->dma_cap.multi_addr) ? "Y" : "N"); 6269 } 6270 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 6271 (priv->dma_cap.pcs) ? "Y" : "N"); 6272 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 6273 (priv->dma_cap.sma_mdio) ? "Y" : "N"); 6274 seq_printf(seq, "\tPMT Remote wake up: %s\n", 6275 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 6276 seq_printf(seq, "\tPMT Magic Frame: %s\n", 6277 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 6278 seq_printf(seq, "\tRMON module: %s\n", 6279 (priv->dma_cap.rmon) ? "Y" : "N"); 6280 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 6281 (priv->dma_cap.time_stamp) ? "Y" : "N"); 6282 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 6283 (priv->dma_cap.atime_stamp) ? "Y" : "N"); 6284 if (priv->plat->has_xgmac) 6285 seq_printf(seq, "\tTimestamp System Time Source: %s\n", 6286 dwxgmac_timestamp_source[priv->dma_cap.tssrc]); 6287 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 6288 (priv->dma_cap.eee) ? "Y" : "N"); 6289 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 6290 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 6291 (priv->dma_cap.tx_coe) ? "Y" : "N"); 6292 if (priv->synopsys_id >= DWMAC_CORE_4_00 || 6293 priv->plat->has_xgmac) { 6294 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 6295 (priv->dma_cap.rx_coe) ? "Y" : "N"); 6296 } else { 6297 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 6298 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 6299 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 6300 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 6301 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 6302 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 6303 } 6304 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 6305 priv->dma_cap.number_rx_channel); 6306 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 6307 priv->dma_cap.number_tx_channel); 6308 seq_printf(seq, "\tNumber of Additional RX queues: %d\n", 6309 priv->dma_cap.number_rx_queues); 6310 seq_printf(seq, "\tNumber of Additional TX queues: %d\n", 6311 priv->dma_cap.number_tx_queues); 6312 seq_printf(seq, "\tEnhanced descriptors: %s\n", 6313 (priv->dma_cap.enh_desc) ? "Y" : "N"); 6314 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); 6315 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); 6316 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ? 6317 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0); 6318 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); 6319 seq_printf(seq, "\tNumber of PPS Outputs: %d\n", 6320 priv->dma_cap.pps_out_num); 6321 seq_printf(seq, "\tSafety Features: %s\n", 6322 dwxgmac_safety_feature_desc[priv->dma_cap.asp]); 6323 seq_printf(seq, "\tFlexible RX Parser: %s\n", 6324 priv->dma_cap.frpsel ? "Y" : "N"); 6325 seq_printf(seq, "\tEnhanced Addressing: %d\n", 6326 priv->dma_cap.host_dma_width); 6327 seq_printf(seq, "\tReceive Side Scaling: %s\n", 6328 priv->dma_cap.rssen ? "Y" : "N"); 6329 seq_printf(seq, "\tVLAN Hash Filtering: %s\n", 6330 priv->dma_cap.vlhash ? "Y" : "N"); 6331 seq_printf(seq, "\tSplit Header: %s\n", 6332 priv->dma_cap.sphen ? "Y" : "N"); 6333 seq_printf(seq, "\tVLAN TX Insertion: %s\n", 6334 priv->dma_cap.vlins ? "Y" : "N"); 6335 seq_printf(seq, "\tDouble VLAN: %s\n", 6336 priv->dma_cap.dvlan ? "Y" : "N"); 6337 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", 6338 priv->dma_cap.l3l4fnum); 6339 seq_printf(seq, "\tARP Offloading: %s\n", 6340 priv->dma_cap.arpoffsel ? "Y" : "N"); 6341 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", 6342 priv->dma_cap.estsel ? "Y" : "N"); 6343 seq_printf(seq, "\tFrame Preemption (FPE): %s\n", 6344 priv->dma_cap.fpesel ? "Y" : "N"); 6345 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", 6346 priv->dma_cap.tbssel ? "Y" : "N"); 6347 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n", 6348 priv->dma_cap.tbs_ch_num); 6349 seq_printf(seq, "\tPer-Stream Filtering: %s\n", 6350 priv->dma_cap.sgfsel ? "Y" : "N"); 6351 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n", 6352 BIT(priv->dma_cap.ttsfd) >> 1); 6353 seq_printf(seq, "\tNumber of Traffic Classes: %d\n", 6354 priv->dma_cap.numtc); 6355 seq_printf(seq, "\tDCB Feature: %s\n", 6356 priv->dma_cap.dcben ? "Y" : "N"); 6357 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n", 6358 priv->dma_cap.advthword ? "Y" : "N"); 6359 seq_printf(seq, "\tPTP Offload: %s\n", 6360 priv->dma_cap.ptoen ? "Y" : "N"); 6361 seq_printf(seq, "\tOne-Step Timestamping: %s\n", 6362 priv->dma_cap.osten ? "Y" : "N"); 6363 seq_printf(seq, "\tPriority-Based Flow Control: %s\n", 6364 priv->dma_cap.pfcen ? "Y" : "N"); 6365 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n", 6366 BIT(priv->dma_cap.frpes) << 6); 6367 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n", 6368 BIT(priv->dma_cap.frpbs) << 6); 6369 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n", 6370 priv->dma_cap.frppipe_num); 6371 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n", 6372 priv->dma_cap.nrvf_num ? 6373 (BIT(priv->dma_cap.nrvf_num) << 1) : 0); 6374 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n", 6375 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0); 6376 seq_printf(seq, "\tDepth of GCL: %lu\n", 6377 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0); 6378 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n", 6379 priv->dma_cap.cbtisel ? "Y" : "N"); 6380 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n", 6381 priv->dma_cap.aux_snapshot_n); 6382 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n", 6383 priv->dma_cap.pou_ost_en ? "Y" : "N"); 6384 seq_printf(seq, "\tEnhanced DMA: %s\n", 6385 priv->dma_cap.edma ? "Y" : "N"); 6386 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n", 6387 priv->dma_cap.ediffc ? "Y" : "N"); 6388 seq_printf(seq, "\tVxLAN/NVGRE: %s\n", 6389 priv->dma_cap.vxn ? "Y" : "N"); 6390 seq_printf(seq, "\tDebug Memory Interface: %s\n", 6391 priv->dma_cap.dbgmem ? "Y" : "N"); 6392 seq_printf(seq, "\tNumber of Policing Counters: %lu\n", 6393 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0); 6394 return 0; 6395 } 6396 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 6397 6398 /* Use network device events to rename debugfs file entries. 6399 */ 6400 static int stmmac_device_event(struct notifier_block *unused, 6401 unsigned long event, void *ptr) 6402 { 6403 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6404 struct stmmac_priv *priv = netdev_priv(dev); 6405 6406 if (dev->netdev_ops != &stmmac_netdev_ops) 6407 goto done; 6408 6409 switch (event) { 6410 case NETDEV_CHANGENAME: 6411 if (priv->dbgfs_dir) 6412 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, 6413 priv->dbgfs_dir, 6414 stmmac_fs_dir, 6415 dev->name); 6416 break; 6417 } 6418 done: 6419 return NOTIFY_DONE; 6420 } 6421 6422 static struct notifier_block stmmac_notifier = { 6423 .notifier_call = stmmac_device_event, 6424 }; 6425 6426 static void stmmac_init_fs(struct net_device *dev) 6427 { 6428 struct stmmac_priv *priv = netdev_priv(dev); 6429 6430 rtnl_lock(); 6431 6432 /* Create per netdev entries */ 6433 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 6434 6435 /* Entry to report DMA RX/TX rings */ 6436 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, 6437 &stmmac_rings_status_fops); 6438 6439 /* Entry to report the DMA HW features */ 6440 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, 6441 &stmmac_dma_cap_fops); 6442 6443 rtnl_unlock(); 6444 } 6445 6446 static void stmmac_exit_fs(struct net_device *dev) 6447 { 6448 struct stmmac_priv *priv = netdev_priv(dev); 6449 6450 debugfs_remove_recursive(priv->dbgfs_dir); 6451 } 6452 #endif /* CONFIG_DEBUG_FS */ 6453 6454 static u32 stmmac_vid_crc32_le(__le16 vid_le) 6455 { 6456 unsigned char *data = (unsigned char *)&vid_le; 6457 unsigned char data_byte = 0; 6458 u32 crc = ~0x0; 6459 u32 temp = 0; 6460 int i, bits; 6461 6462 bits = get_bitmask_order(VLAN_VID_MASK); 6463 for (i = 0; i < bits; i++) { 6464 if ((i % 8) == 0) 6465 data_byte = data[i / 8]; 6466 6467 temp = ((crc & 1) ^ data_byte) & 1; 6468 crc >>= 1; 6469 data_byte >>= 1; 6470 6471 if (temp) 6472 crc ^= 0xedb88320; 6473 } 6474 6475 return crc; 6476 } 6477 6478 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) 6479 { 6480 u32 crc, hash = 0; 6481 __le16 pmatch = 0; 6482 int count = 0; 6483 u16 vid = 0; 6484 6485 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 6486 __le16 vid_le = cpu_to_le16(vid); 6487 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; 6488 hash |= (1 << crc); 6489 count++; 6490 } 6491 6492 if (!priv->dma_cap.vlhash) { 6493 if (count > 2) /* VID = 0 always passes filter */ 6494 return -EOPNOTSUPP; 6495 6496 pmatch = cpu_to_le16(vid); 6497 hash = 0; 6498 } 6499 6500 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); 6501 } 6502 6503 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 6504 { 6505 struct stmmac_priv *priv = netdev_priv(ndev); 6506 bool is_double = false; 6507 int ret; 6508 6509 ret = pm_runtime_resume_and_get(priv->device); 6510 if (ret < 0) 6511 return ret; 6512 6513 if (be16_to_cpu(proto) == ETH_P_8021AD) 6514 is_double = true; 6515 6516 set_bit(vid, priv->active_vlans); 6517 ret = stmmac_vlan_update(priv, is_double); 6518 if (ret) { 6519 clear_bit(vid, priv->active_vlans); 6520 goto err_pm_put; 6521 } 6522 6523 if (priv->hw->num_vlan) { 6524 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6525 if (ret) 6526 goto err_pm_put; 6527 } 6528 err_pm_put: 6529 pm_runtime_put(priv->device); 6530 6531 return ret; 6532 } 6533 6534 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 6535 { 6536 struct stmmac_priv *priv = netdev_priv(ndev); 6537 bool is_double = false; 6538 int ret; 6539 6540 ret = pm_runtime_resume_and_get(priv->device); 6541 if (ret < 0) 6542 return ret; 6543 6544 if (be16_to_cpu(proto) == ETH_P_8021AD) 6545 is_double = true; 6546 6547 clear_bit(vid, priv->active_vlans); 6548 6549 if (priv->hw->num_vlan) { 6550 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6551 if (ret) 6552 goto del_vlan_error; 6553 } 6554 6555 ret = stmmac_vlan_update(priv, is_double); 6556 6557 del_vlan_error: 6558 pm_runtime_put(priv->device); 6559 6560 return ret; 6561 } 6562 6563 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) 6564 { 6565 struct stmmac_priv *priv = netdev_priv(dev); 6566 6567 switch (bpf->command) { 6568 case XDP_SETUP_PROG: 6569 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); 6570 case XDP_SETUP_XSK_POOL: 6571 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, 6572 bpf->xsk.queue_id); 6573 default: 6574 return -EOPNOTSUPP; 6575 } 6576 } 6577 6578 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, 6579 struct xdp_frame **frames, u32 flags) 6580 { 6581 struct stmmac_priv *priv = netdev_priv(dev); 6582 int cpu = smp_processor_id(); 6583 struct netdev_queue *nq; 6584 int i, nxmit = 0; 6585 int queue; 6586 6587 if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) 6588 return -ENETDOWN; 6589 6590 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 6591 return -EINVAL; 6592 6593 queue = stmmac_xdp_get_tx_queue(priv, cpu); 6594 nq = netdev_get_tx_queue(priv->dev, queue); 6595 6596 __netif_tx_lock(nq, cpu); 6597 /* Avoids TX time-out as we are sharing with slow path */ 6598 txq_trans_cond_update(nq); 6599 6600 for (i = 0; i < num_frames; i++) { 6601 int res; 6602 6603 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); 6604 if (res == STMMAC_XDP_CONSUMED) 6605 break; 6606 6607 nxmit++; 6608 } 6609 6610 if (flags & XDP_XMIT_FLUSH) { 6611 stmmac_flush_tx_descriptors(priv, queue); 6612 stmmac_tx_timer_arm(priv, queue); 6613 } 6614 6615 __netif_tx_unlock(nq); 6616 6617 return nxmit; 6618 } 6619 6620 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) 6621 { 6622 struct stmmac_channel *ch = &priv->channel[queue]; 6623 unsigned long flags; 6624 6625 spin_lock_irqsave(&ch->lock, flags); 6626 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6627 spin_unlock_irqrestore(&ch->lock, flags); 6628 6629 stmmac_stop_rx_dma(priv, queue); 6630 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6631 } 6632 6633 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) 6634 { 6635 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 6636 struct stmmac_channel *ch = &priv->channel[queue]; 6637 unsigned long flags; 6638 u32 buf_size; 6639 int ret; 6640 6641 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6642 if (ret) { 6643 netdev_err(priv->dev, "Failed to alloc RX desc.\n"); 6644 return; 6645 } 6646 6647 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); 6648 if (ret) { 6649 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6650 netdev_err(priv->dev, "Failed to init RX desc.\n"); 6651 return; 6652 } 6653 6654 stmmac_reset_rx_queue(priv, queue); 6655 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); 6656 6657 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6658 rx_q->dma_rx_phy, rx_q->queue_index); 6659 6660 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * 6661 sizeof(struct dma_desc)); 6662 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6663 rx_q->rx_tail_addr, rx_q->queue_index); 6664 6665 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6666 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6667 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6668 buf_size, 6669 rx_q->queue_index); 6670 } else { 6671 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6672 priv->dma_conf.dma_buf_sz, 6673 rx_q->queue_index); 6674 } 6675 6676 stmmac_start_rx_dma(priv, queue); 6677 6678 spin_lock_irqsave(&ch->lock, flags); 6679 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6680 spin_unlock_irqrestore(&ch->lock, flags); 6681 } 6682 6683 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) 6684 { 6685 struct stmmac_channel *ch = &priv->channel[queue]; 6686 unsigned long flags; 6687 6688 spin_lock_irqsave(&ch->lock, flags); 6689 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6690 spin_unlock_irqrestore(&ch->lock, flags); 6691 6692 stmmac_stop_tx_dma(priv, queue); 6693 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6694 } 6695 6696 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) 6697 { 6698 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 6699 struct stmmac_channel *ch = &priv->channel[queue]; 6700 unsigned long flags; 6701 int ret; 6702 6703 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6704 if (ret) { 6705 netdev_err(priv->dev, "Failed to alloc TX desc.\n"); 6706 return; 6707 } 6708 6709 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); 6710 if (ret) { 6711 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6712 netdev_err(priv->dev, "Failed to init TX desc.\n"); 6713 return; 6714 } 6715 6716 stmmac_reset_tx_queue(priv, queue); 6717 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); 6718 6719 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6720 tx_q->dma_tx_phy, tx_q->queue_index); 6721 6722 if (tx_q->tbs & STMMAC_TBS_AVAIL) 6723 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); 6724 6725 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6726 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6727 tx_q->tx_tail_addr, tx_q->queue_index); 6728 6729 stmmac_start_tx_dma(priv, queue); 6730 6731 spin_lock_irqsave(&ch->lock, flags); 6732 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6733 spin_unlock_irqrestore(&ch->lock, flags); 6734 } 6735 6736 void stmmac_xdp_release(struct net_device *dev) 6737 { 6738 struct stmmac_priv *priv = netdev_priv(dev); 6739 u32 chan; 6740 6741 /* Ensure tx function is not running */ 6742 netif_tx_disable(dev); 6743 6744 /* Disable NAPI process */ 6745 stmmac_disable_all_queues(priv); 6746 6747 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 6748 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 6749 6750 /* Free the IRQ lines */ 6751 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 6752 6753 /* Stop TX/RX DMA channels */ 6754 stmmac_stop_all_dma(priv); 6755 6756 /* Release and free the Rx/Tx resources */ 6757 free_dma_desc_resources(priv, &priv->dma_conf); 6758 6759 /* Disable the MAC Rx/Tx */ 6760 stmmac_mac_set(priv, priv->ioaddr, false); 6761 6762 /* set trans_start so we don't get spurious 6763 * watchdogs during reset 6764 */ 6765 netif_trans_update(dev); 6766 netif_carrier_off(dev); 6767 } 6768 6769 int stmmac_xdp_open(struct net_device *dev) 6770 { 6771 struct stmmac_priv *priv = netdev_priv(dev); 6772 u32 rx_cnt = priv->plat->rx_queues_to_use; 6773 u32 tx_cnt = priv->plat->tx_queues_to_use; 6774 u32 dma_csr_ch = max(rx_cnt, tx_cnt); 6775 struct stmmac_rx_queue *rx_q; 6776 struct stmmac_tx_queue *tx_q; 6777 u32 buf_size; 6778 bool sph_en; 6779 u32 chan; 6780 int ret; 6781 6782 ret = alloc_dma_desc_resources(priv, &priv->dma_conf); 6783 if (ret < 0) { 6784 netdev_err(dev, "%s: DMA descriptors allocation failed\n", 6785 __func__); 6786 goto dma_desc_error; 6787 } 6788 6789 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); 6790 if (ret < 0) { 6791 netdev_err(dev, "%s: DMA descriptors initialization failed\n", 6792 __func__); 6793 goto init_error; 6794 } 6795 6796 stmmac_reset_queues_param(priv); 6797 6798 /* DMA CSR Channel configuration */ 6799 for (chan = 0; chan < dma_csr_ch; chan++) { 6800 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 6801 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 6802 } 6803 6804 /* Adjust Split header */ 6805 sph_en = (priv->hw->rx_csum > 0) && priv->sph; 6806 6807 /* DMA RX Channel Configuration */ 6808 for (chan = 0; chan < rx_cnt; chan++) { 6809 rx_q = &priv->dma_conf.rx_queue[chan]; 6810 6811 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6812 rx_q->dma_rx_phy, chan); 6813 6814 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 6815 (rx_q->buf_alloc_num * 6816 sizeof(struct dma_desc)); 6817 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6818 rx_q->rx_tail_addr, chan); 6819 6820 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6821 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6822 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6823 buf_size, 6824 rx_q->queue_index); 6825 } else { 6826 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6827 priv->dma_conf.dma_buf_sz, 6828 rx_q->queue_index); 6829 } 6830 6831 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 6832 } 6833 6834 /* DMA TX Channel Configuration */ 6835 for (chan = 0; chan < tx_cnt; chan++) { 6836 tx_q = &priv->dma_conf.tx_queue[chan]; 6837 6838 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6839 tx_q->dma_tx_phy, chan); 6840 6841 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6842 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6843 tx_q->tx_tail_addr, chan); 6844 6845 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6846 tx_q->txtimer.function = stmmac_tx_timer; 6847 } 6848 6849 /* Enable the MAC Rx/Tx */ 6850 stmmac_mac_set(priv, priv->ioaddr, true); 6851 6852 /* Start Rx & Tx DMA Channels */ 6853 stmmac_start_all_dma(priv); 6854 6855 ret = stmmac_request_irq(dev); 6856 if (ret) 6857 goto irq_error; 6858 6859 /* Enable NAPI process*/ 6860 stmmac_enable_all_queues(priv); 6861 netif_carrier_on(dev); 6862 netif_tx_start_all_queues(dev); 6863 stmmac_enable_all_dma_irq(priv); 6864 6865 return 0; 6866 6867 irq_error: 6868 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 6869 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 6870 6871 stmmac_hw_teardown(dev); 6872 init_error: 6873 free_dma_desc_resources(priv, &priv->dma_conf); 6874 dma_desc_error: 6875 return ret; 6876 } 6877 6878 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) 6879 { 6880 struct stmmac_priv *priv = netdev_priv(dev); 6881 struct stmmac_rx_queue *rx_q; 6882 struct stmmac_tx_queue *tx_q; 6883 struct stmmac_channel *ch; 6884 6885 if (test_bit(STMMAC_DOWN, &priv->state) || 6886 !netif_carrier_ok(priv->dev)) 6887 return -ENETDOWN; 6888 6889 if (!stmmac_xdp_is_enabled(priv)) 6890 return -EINVAL; 6891 6892 if (queue >= priv->plat->rx_queues_to_use || 6893 queue >= priv->plat->tx_queues_to_use) 6894 return -EINVAL; 6895 6896 rx_q = &priv->dma_conf.rx_queue[queue]; 6897 tx_q = &priv->dma_conf.tx_queue[queue]; 6898 ch = &priv->channel[queue]; 6899 6900 if (!rx_q->xsk_pool && !tx_q->xsk_pool) 6901 return -EINVAL; 6902 6903 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { 6904 /* EQoS does not have per-DMA channel SW interrupt, 6905 * so we schedule RX Napi straight-away. 6906 */ 6907 if (likely(napi_schedule_prep(&ch->rxtx_napi))) 6908 __napi_schedule(&ch->rxtx_napi); 6909 } 6910 6911 return 0; 6912 } 6913 6914 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6915 { 6916 struct stmmac_priv *priv = netdev_priv(dev); 6917 u32 tx_cnt = priv->plat->tx_queues_to_use; 6918 u32 rx_cnt = priv->plat->rx_queues_to_use; 6919 unsigned int start; 6920 int q; 6921 6922 for (q = 0; q < tx_cnt; q++) { 6923 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; 6924 u64 tx_packets; 6925 u64 tx_bytes; 6926 6927 do { 6928 start = u64_stats_fetch_begin(&txq_stats->syncp); 6929 tx_packets = txq_stats->tx_packets; 6930 tx_bytes = txq_stats->tx_bytes; 6931 } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); 6932 6933 stats->tx_packets += tx_packets; 6934 stats->tx_bytes += tx_bytes; 6935 } 6936 6937 for (q = 0; q < rx_cnt; q++) { 6938 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; 6939 u64 rx_packets; 6940 u64 rx_bytes; 6941 6942 do { 6943 start = u64_stats_fetch_begin(&rxq_stats->syncp); 6944 rx_packets = rxq_stats->rx_packets; 6945 rx_bytes = rxq_stats->rx_bytes; 6946 } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); 6947 6948 stats->rx_packets += rx_packets; 6949 stats->rx_bytes += rx_bytes; 6950 } 6951 6952 stats->rx_dropped = priv->xstats.rx_dropped; 6953 stats->rx_errors = priv->xstats.rx_errors; 6954 stats->tx_dropped = priv->xstats.tx_dropped; 6955 stats->tx_errors = priv->xstats.tx_errors; 6956 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier; 6957 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision; 6958 stats->rx_length_errors = priv->xstats.rx_length; 6959 stats->rx_crc_errors = priv->xstats.rx_crc_errors; 6960 stats->rx_over_errors = priv->xstats.rx_overflow_cntr; 6961 stats->rx_missed_errors = priv->xstats.rx_missed_cntr; 6962 } 6963 6964 static const struct net_device_ops stmmac_netdev_ops = { 6965 .ndo_open = stmmac_open, 6966 .ndo_start_xmit = stmmac_xmit, 6967 .ndo_stop = stmmac_release, 6968 .ndo_change_mtu = stmmac_change_mtu, 6969 .ndo_fix_features = stmmac_fix_features, 6970 .ndo_set_features = stmmac_set_features, 6971 .ndo_set_rx_mode = stmmac_set_rx_mode, 6972 .ndo_tx_timeout = stmmac_tx_timeout, 6973 .ndo_eth_ioctl = stmmac_ioctl, 6974 .ndo_get_stats64 = stmmac_get_stats64, 6975 .ndo_setup_tc = stmmac_setup_tc, 6976 .ndo_select_queue = stmmac_select_queue, 6977 .ndo_set_mac_address = stmmac_set_mac_address, 6978 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, 6979 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, 6980 .ndo_bpf = stmmac_bpf, 6981 .ndo_xdp_xmit = stmmac_xdp_xmit, 6982 .ndo_xsk_wakeup = stmmac_xsk_wakeup, 6983 }; 6984 6985 static void stmmac_reset_subtask(struct stmmac_priv *priv) 6986 { 6987 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 6988 return; 6989 if (test_bit(STMMAC_DOWN, &priv->state)) 6990 return; 6991 6992 netdev_err(priv->dev, "Reset adapter.\n"); 6993 6994 rtnl_lock(); 6995 netif_trans_update(priv->dev); 6996 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 6997 usleep_range(1000, 2000); 6998 6999 set_bit(STMMAC_DOWN, &priv->state); 7000 dev_close(priv->dev); 7001 dev_open(priv->dev, NULL); 7002 clear_bit(STMMAC_DOWN, &priv->state); 7003 clear_bit(STMMAC_RESETING, &priv->state); 7004 rtnl_unlock(); 7005 } 7006 7007 static void stmmac_service_task(struct work_struct *work) 7008 { 7009 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 7010 service_task); 7011 7012 stmmac_reset_subtask(priv); 7013 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 7014 } 7015 7016 /** 7017 * stmmac_hw_init - Init the MAC device 7018 * @priv: driver private structure 7019 * Description: this function is to configure the MAC device according to 7020 * some platform parameters or the HW capability register. It prepares the 7021 * driver to use either ring or chain modes and to setup either enhanced or 7022 * normal descriptors. 7023 */ 7024 static int stmmac_hw_init(struct stmmac_priv *priv) 7025 { 7026 int ret; 7027 7028 /* dwmac-sun8i only work in chain mode */ 7029 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) 7030 chain_mode = 1; 7031 priv->chain_mode = chain_mode; 7032 7033 /* Initialize HW Interface */ 7034 ret = stmmac_hwif_init(priv); 7035 if (ret) 7036 return ret; 7037 7038 /* Get the HW capability (new GMAC newer than 3.50a) */ 7039 priv->hw_cap_support = stmmac_get_hw_features(priv); 7040 if (priv->hw_cap_support) { 7041 dev_info(priv->device, "DMA HW capability register supported\n"); 7042 7043 /* We can override some gmac/dma configuration fields: e.g. 7044 * enh_desc, tx_coe (e.g. that are passed through the 7045 * platform) with the values from the HW capability 7046 * register (if supported). 7047 */ 7048 priv->plat->enh_desc = priv->dma_cap.enh_desc; 7049 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && 7050 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL); 7051 priv->hw->pmt = priv->plat->pmt; 7052 if (priv->dma_cap.hash_tb_sz) { 7053 priv->hw->multicast_filter_bins = 7054 (BIT(priv->dma_cap.hash_tb_sz) << 5); 7055 priv->hw->mcast_bits_log2 = 7056 ilog2(priv->hw->multicast_filter_bins); 7057 } 7058 7059 /* TXCOE doesn't work in thresh DMA mode */ 7060 if (priv->plat->force_thresh_dma_mode) 7061 priv->plat->tx_coe = 0; 7062 else 7063 priv->plat->tx_coe = priv->dma_cap.tx_coe; 7064 7065 /* In case of GMAC4 rx_coe is from HW cap register. */ 7066 priv->plat->rx_coe = priv->dma_cap.rx_coe; 7067 7068 if (priv->dma_cap.rx_coe_type2) 7069 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 7070 else if (priv->dma_cap.rx_coe_type1) 7071 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 7072 7073 } else { 7074 dev_info(priv->device, "No HW DMA feature register supported\n"); 7075 } 7076 7077 if (priv->plat->rx_coe) { 7078 priv->hw->rx_csum = priv->plat->rx_coe; 7079 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 7080 if (priv->synopsys_id < DWMAC_CORE_4_00) 7081 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 7082 } 7083 if (priv->plat->tx_coe) 7084 dev_info(priv->device, "TX Checksum insertion supported\n"); 7085 7086 if (priv->plat->pmt) { 7087 dev_info(priv->device, "Wake-Up On Lan supported\n"); 7088 device_set_wakeup_capable(priv->device, 1); 7089 } 7090 7091 if (priv->dma_cap.tsoen) 7092 dev_info(priv->device, "TSO supported\n"); 7093 7094 priv->hw->vlan_fail_q_en = 7095 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); 7096 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; 7097 7098 /* Run HW quirks, if any */ 7099 if (priv->hwif_quirks) { 7100 ret = priv->hwif_quirks(priv); 7101 if (ret) 7102 return ret; 7103 } 7104 7105 /* Rx Watchdog is available in the COREs newer than the 3.40. 7106 * In some case, for example on bugged HW this feature 7107 * has to be disable and this can be done by passing the 7108 * riwt_off field from the platform. 7109 */ 7110 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 7111 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 7112 priv->use_riwt = 1; 7113 dev_info(priv->device, 7114 "Enable RX Mitigation via HW Watchdog Timer\n"); 7115 } 7116 7117 return 0; 7118 } 7119 7120 static void stmmac_napi_add(struct net_device *dev) 7121 { 7122 struct stmmac_priv *priv = netdev_priv(dev); 7123 u32 queue, maxq; 7124 7125 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 7126 7127 for (queue = 0; queue < maxq; queue++) { 7128 struct stmmac_channel *ch = &priv->channel[queue]; 7129 7130 ch->priv_data = priv; 7131 ch->index = queue; 7132 spin_lock_init(&ch->lock); 7133 7134 if (queue < priv->plat->rx_queues_to_use) { 7135 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); 7136 } 7137 if (queue < priv->plat->tx_queues_to_use) { 7138 netif_napi_add_tx(dev, &ch->tx_napi, 7139 stmmac_napi_poll_tx); 7140 } 7141 if (queue < priv->plat->rx_queues_to_use && 7142 queue < priv->plat->tx_queues_to_use) { 7143 netif_napi_add(dev, &ch->rxtx_napi, 7144 stmmac_napi_poll_rxtx); 7145 } 7146 } 7147 } 7148 7149 static void stmmac_napi_del(struct net_device *dev) 7150 { 7151 struct stmmac_priv *priv = netdev_priv(dev); 7152 u32 queue, maxq; 7153 7154 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 7155 7156 for (queue = 0; queue < maxq; queue++) { 7157 struct stmmac_channel *ch = &priv->channel[queue]; 7158 7159 if (queue < priv->plat->rx_queues_to_use) 7160 netif_napi_del(&ch->rx_napi); 7161 if (queue < priv->plat->tx_queues_to_use) 7162 netif_napi_del(&ch->tx_napi); 7163 if (queue < priv->plat->rx_queues_to_use && 7164 queue < priv->plat->tx_queues_to_use) { 7165 netif_napi_del(&ch->rxtx_napi); 7166 } 7167 } 7168 } 7169 7170 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) 7171 { 7172 struct stmmac_priv *priv = netdev_priv(dev); 7173 int ret = 0, i; 7174 7175 if (netif_running(dev)) 7176 stmmac_release(dev); 7177 7178 stmmac_napi_del(dev); 7179 7180 priv->plat->rx_queues_to_use = rx_cnt; 7181 priv->plat->tx_queues_to_use = tx_cnt; 7182 if (!netif_is_rxfh_configured(dev)) 7183 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 7184 priv->rss.table[i] = ethtool_rxfh_indir_default(i, 7185 rx_cnt); 7186 7187 stmmac_set_half_duplex(priv); 7188 stmmac_napi_add(dev); 7189 7190 if (netif_running(dev)) 7191 ret = stmmac_open(dev); 7192 7193 return ret; 7194 } 7195 7196 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) 7197 { 7198 struct stmmac_priv *priv = netdev_priv(dev); 7199 int ret = 0; 7200 7201 if (netif_running(dev)) 7202 stmmac_release(dev); 7203 7204 priv->dma_conf.dma_rx_size = rx_size; 7205 priv->dma_conf.dma_tx_size = tx_size; 7206 7207 if (netif_running(dev)) 7208 ret = stmmac_open(dev); 7209 7210 return ret; 7211 } 7212 7213 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" 7214 static void stmmac_fpe_lp_task(struct work_struct *work) 7215 { 7216 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 7217 fpe_task); 7218 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 7219 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 7220 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 7221 bool *hs_enable = &fpe_cfg->hs_enable; 7222 bool *enable = &fpe_cfg->enable; 7223 int retries = 20; 7224 7225 while (retries-- > 0) { 7226 /* Bail out immediately if FPE handshake is OFF */ 7227 if (*lo_state == FPE_STATE_OFF || !*hs_enable) 7228 break; 7229 7230 if (*lo_state == FPE_STATE_ENTERING_ON && 7231 *lp_state == FPE_STATE_ENTERING_ON) { 7232 stmmac_fpe_configure(priv, priv->ioaddr, 7233 fpe_cfg, 7234 priv->plat->tx_queues_to_use, 7235 priv->plat->rx_queues_to_use, 7236 *enable); 7237 7238 netdev_info(priv->dev, "configured FPE\n"); 7239 7240 *lo_state = FPE_STATE_ON; 7241 *lp_state = FPE_STATE_ON; 7242 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); 7243 break; 7244 } 7245 7246 if ((*lo_state == FPE_STATE_CAPABLE || 7247 *lo_state == FPE_STATE_ENTERING_ON) && 7248 *lp_state != FPE_STATE_ON) { 7249 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, 7250 *lo_state, *lp_state); 7251 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 7252 fpe_cfg, 7253 MPACKET_VERIFY); 7254 } 7255 /* Sleep then retry */ 7256 msleep(500); 7257 } 7258 7259 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 7260 } 7261 7262 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) 7263 { 7264 if (priv->plat->fpe_cfg->hs_enable != enable) { 7265 if (enable) { 7266 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 7267 priv->plat->fpe_cfg, 7268 MPACKET_VERIFY); 7269 } else { 7270 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; 7271 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; 7272 } 7273 7274 priv->plat->fpe_cfg->hs_enable = enable; 7275 } 7276 } 7277 7278 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) 7279 { 7280 const struct stmmac_xdp_buff *ctx = (void *)_ctx; 7281 struct dma_desc *desc_contains_ts = ctx->desc; 7282 struct stmmac_priv *priv = ctx->priv; 7283 struct dma_desc *ndesc = ctx->ndesc; 7284 struct dma_desc *desc = ctx->desc; 7285 u64 ns = 0; 7286 7287 if (!priv->hwts_rx_en) 7288 return -ENODATA; 7289 7290 /* For GMAC4, the valid timestamp is from CTX next desc. */ 7291 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 7292 desc_contains_ts = ndesc; 7293 7294 /* Check if timestamp is available */ 7295 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) { 7296 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns); 7297 ns -= priv->plat->cdc_error_adj; 7298 *timestamp = ns_to_ktime(ns); 7299 return 0; 7300 } 7301 7302 return -ENODATA; 7303 } 7304 7305 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = { 7306 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp, 7307 }; 7308 7309 /** 7310 * stmmac_dvr_probe 7311 * @device: device pointer 7312 * @plat_dat: platform data pointer 7313 * @res: stmmac resource pointer 7314 * Description: this is the main probe function used to 7315 * call the alloc_etherdev, allocate the priv structure. 7316 * Return: 7317 * returns 0 on success, otherwise errno. 7318 */ 7319 int stmmac_dvr_probe(struct device *device, 7320 struct plat_stmmacenet_data *plat_dat, 7321 struct stmmac_resources *res) 7322 { 7323 struct net_device *ndev = NULL; 7324 struct stmmac_priv *priv; 7325 u32 rxq; 7326 int i, ret = 0; 7327 7328 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), 7329 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); 7330 if (!ndev) 7331 return -ENOMEM; 7332 7333 SET_NETDEV_DEV(ndev, device); 7334 7335 priv = netdev_priv(ndev); 7336 priv->device = device; 7337 priv->dev = ndev; 7338 7339 for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 7340 u64_stats_init(&priv->xstats.rxq_stats[i].syncp); 7341 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 7342 u64_stats_init(&priv->xstats.txq_stats[i].syncp); 7343 7344 stmmac_set_ethtool_ops(ndev); 7345 priv->pause = pause; 7346 priv->plat = plat_dat; 7347 priv->ioaddr = res->addr; 7348 priv->dev->base_addr = (unsigned long)res->addr; 7349 priv->plat->dma_cfg->multi_msi_en = 7350 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN); 7351 7352 priv->dev->irq = res->irq; 7353 priv->wol_irq = res->wol_irq; 7354 priv->lpi_irq = res->lpi_irq; 7355 priv->sfty_ce_irq = res->sfty_ce_irq; 7356 priv->sfty_ue_irq = res->sfty_ue_irq; 7357 for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 7358 priv->rx_irq[i] = res->rx_irq[i]; 7359 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 7360 priv->tx_irq[i] = res->tx_irq[i]; 7361 7362 if (!is_zero_ether_addr(res->mac)) 7363 eth_hw_addr_set(priv->dev, res->mac); 7364 7365 dev_set_drvdata(device, priv->dev); 7366 7367 /* Verify driver arguments */ 7368 stmmac_verify_args(); 7369 7370 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); 7371 if (!priv->af_xdp_zc_qps) 7372 return -ENOMEM; 7373 7374 /* Allocate workqueue */ 7375 priv->wq = create_singlethread_workqueue("stmmac_wq"); 7376 if (!priv->wq) { 7377 dev_err(priv->device, "failed to create workqueue\n"); 7378 ret = -ENOMEM; 7379 goto error_wq_init; 7380 } 7381 7382 INIT_WORK(&priv->service_task, stmmac_service_task); 7383 7384 /* Initialize Link Partner FPE workqueue */ 7385 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); 7386 7387 /* Override with kernel parameters if supplied XXX CRS XXX 7388 * this needs to have multiple instances 7389 */ 7390 if ((phyaddr >= 0) && (phyaddr <= 31)) 7391 priv->plat->phy_addr = phyaddr; 7392 7393 if (priv->plat->stmmac_rst) { 7394 ret = reset_control_assert(priv->plat->stmmac_rst); 7395 reset_control_deassert(priv->plat->stmmac_rst); 7396 /* Some reset controllers have only reset callback instead of 7397 * assert + deassert callbacks pair. 7398 */ 7399 if (ret == -ENOTSUPP) 7400 reset_control_reset(priv->plat->stmmac_rst); 7401 } 7402 7403 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); 7404 if (ret == -ENOTSUPP) 7405 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", 7406 ERR_PTR(ret)); 7407 7408 /* Init MAC and get the capabilities */ 7409 ret = stmmac_hw_init(priv); 7410 if (ret) 7411 goto error_hw_init; 7412 7413 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. 7414 */ 7415 if (priv->synopsys_id < DWMAC_CORE_5_20) 7416 priv->plat->dma_cfg->dche = false; 7417 7418 stmmac_check_ether_addr(priv); 7419 7420 ndev->netdev_ops = &stmmac_netdev_ops; 7421 7422 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops; 7423 7424 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 7425 NETIF_F_RXCSUM; 7426 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 7427 NETDEV_XDP_ACT_XSK_ZEROCOPY; 7428 7429 ret = stmmac_tc_init(priv, priv); 7430 if (!ret) { 7431 ndev->hw_features |= NETIF_F_HW_TC; 7432 } 7433 7434 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { 7435 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 7436 if (priv->plat->has_gmac4) 7437 ndev->hw_features |= NETIF_F_GSO_UDP_L4; 7438 priv->tso = true; 7439 dev_info(priv->device, "TSO feature enabled\n"); 7440 } 7441 7442 if (priv->dma_cap.sphen && 7443 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) { 7444 ndev->hw_features |= NETIF_F_GRO; 7445 priv->sph_cap = true; 7446 priv->sph = priv->sph_cap; 7447 dev_info(priv->device, "SPH feature enabled\n"); 7448 } 7449 7450 /* Ideally our host DMA address width is the same as for the 7451 * device. However, it may differ and then we have to use our 7452 * host DMA width for allocation and the device DMA width for 7453 * register handling. 7454 */ 7455 if (priv->plat->host_dma_width) 7456 priv->dma_cap.host_dma_width = priv->plat->host_dma_width; 7457 else 7458 priv->dma_cap.host_dma_width = priv->dma_cap.addr64; 7459 7460 if (priv->dma_cap.host_dma_width) { 7461 ret = dma_set_mask_and_coherent(device, 7462 DMA_BIT_MASK(priv->dma_cap.host_dma_width)); 7463 if (!ret) { 7464 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n", 7465 priv->dma_cap.host_dma_width, priv->dma_cap.addr64); 7466 7467 /* 7468 * If more than 32 bits can be addressed, make sure to 7469 * enable enhanced addressing mode. 7470 */ 7471 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 7472 priv->plat->dma_cfg->eame = true; 7473 } else { 7474 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); 7475 if (ret) { 7476 dev_err(priv->device, "Failed to set DMA Mask\n"); 7477 goto error_hw_init; 7478 } 7479 7480 priv->dma_cap.host_dma_width = 32; 7481 } 7482 } 7483 7484 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 7485 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 7486 #ifdef STMMAC_VLAN_TAG_USED 7487 /* Both mac100 and gmac support receive VLAN tag detection */ 7488 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 7489 if (priv->dma_cap.vlhash) { 7490 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 7491 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 7492 } 7493 if (priv->dma_cap.vlins) { 7494 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; 7495 if (priv->dma_cap.dvlan) 7496 ndev->features |= NETIF_F_HW_VLAN_STAG_TX; 7497 } 7498 #endif 7499 priv->msg_enable = netif_msg_init(debug, default_msg_level); 7500 7501 priv->xstats.threshold = tc; 7502 7503 /* Initialize RSS */ 7504 rxq = priv->plat->rx_queues_to_use; 7505 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); 7506 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 7507 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); 7508 7509 if (priv->dma_cap.rssen && priv->plat->rss_en) 7510 ndev->features |= NETIF_F_RXHASH; 7511 7512 ndev->vlan_features |= ndev->features; 7513 /* TSO doesn't work on VLANs yet */ 7514 ndev->vlan_features &= ~NETIF_F_TSO; 7515 7516 /* MTU range: 46 - hw-specific max */ 7517 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 7518 if (priv->plat->has_xgmac) 7519 ndev->max_mtu = XGMAC_JUMBO_LEN; 7520 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 7521 ndev->max_mtu = JUMBO_LEN; 7522 else 7523 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 7524 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 7525 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 7526 */ 7527 if ((priv->plat->maxmtu < ndev->max_mtu) && 7528 (priv->plat->maxmtu >= ndev->min_mtu)) 7529 ndev->max_mtu = priv->plat->maxmtu; 7530 else if (priv->plat->maxmtu < ndev->min_mtu) 7531 dev_warn(priv->device, 7532 "%s: warning: maxmtu having invalid value (%d)\n", 7533 __func__, priv->plat->maxmtu); 7534 7535 if (flow_ctrl) 7536 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 7537 7538 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 7539 7540 /* Setup channels NAPI */ 7541 stmmac_napi_add(ndev); 7542 7543 mutex_init(&priv->lock); 7544 7545 /* If a specific clk_csr value is passed from the platform 7546 * this means that the CSR Clock Range selection cannot be 7547 * changed at run-time and it is fixed. Viceversa the driver'll try to 7548 * set the MDC clock dynamically according to the csr actual 7549 * clock input. 7550 */ 7551 if (priv->plat->clk_csr >= 0) 7552 priv->clk_csr = priv->plat->clk_csr; 7553 else 7554 stmmac_clk_csr_set(priv); 7555 7556 stmmac_check_pcs_mode(priv); 7557 7558 pm_runtime_get_noresume(device); 7559 pm_runtime_set_active(device); 7560 if (!pm_runtime_enabled(device)) 7561 pm_runtime_enable(device); 7562 7563 if (priv->hw->pcs != STMMAC_PCS_TBI && 7564 priv->hw->pcs != STMMAC_PCS_RTBI) { 7565 /* MDIO bus Registration */ 7566 ret = stmmac_mdio_register(ndev); 7567 if (ret < 0) { 7568 dev_err_probe(priv->device, ret, 7569 "%s: MDIO bus (id: %d) registration failed\n", 7570 __func__, priv->plat->bus_id); 7571 goto error_mdio_register; 7572 } 7573 } 7574 7575 if (priv->plat->speed_mode_2500) 7576 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); 7577 7578 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { 7579 ret = stmmac_xpcs_setup(priv->mii); 7580 if (ret) 7581 goto error_xpcs_setup; 7582 } 7583 7584 ret = stmmac_phy_setup(priv); 7585 if (ret) { 7586 netdev_err(ndev, "failed to setup phy (%d)\n", ret); 7587 goto error_phy_setup; 7588 } 7589 7590 ret = register_netdev(ndev); 7591 if (ret) { 7592 dev_err(priv->device, "%s: ERROR %i registering the device\n", 7593 __func__, ret); 7594 goto error_netdev_register; 7595 } 7596 7597 #ifdef CONFIG_DEBUG_FS 7598 stmmac_init_fs(ndev); 7599 #endif 7600 7601 if (priv->plat->dump_debug_regs) 7602 priv->plat->dump_debug_regs(priv->plat->bsp_priv); 7603 7604 /* Let pm_runtime_put() disable the clocks. 7605 * If CONFIG_PM is not enabled, the clocks will stay powered. 7606 */ 7607 pm_runtime_put(device); 7608 7609 return ret; 7610 7611 error_netdev_register: 7612 phylink_destroy(priv->phylink); 7613 error_xpcs_setup: 7614 error_phy_setup: 7615 if (priv->hw->pcs != STMMAC_PCS_TBI && 7616 priv->hw->pcs != STMMAC_PCS_RTBI) 7617 stmmac_mdio_unregister(ndev); 7618 error_mdio_register: 7619 stmmac_napi_del(ndev); 7620 error_hw_init: 7621 destroy_workqueue(priv->wq); 7622 error_wq_init: 7623 bitmap_free(priv->af_xdp_zc_qps); 7624 7625 return ret; 7626 } 7627 EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 7628 7629 /** 7630 * stmmac_dvr_remove 7631 * @dev: device pointer 7632 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 7633 * changes the link status, releases the DMA descriptor rings. 7634 */ 7635 void stmmac_dvr_remove(struct device *dev) 7636 { 7637 struct net_device *ndev = dev_get_drvdata(dev); 7638 struct stmmac_priv *priv = netdev_priv(ndev); 7639 7640 netdev_info(priv->dev, "%s: removing driver", __func__); 7641 7642 pm_runtime_get_sync(dev); 7643 7644 stmmac_stop_all_dma(priv); 7645 stmmac_mac_set(priv, priv->ioaddr, false); 7646 netif_carrier_off(ndev); 7647 unregister_netdev(ndev); 7648 7649 #ifdef CONFIG_DEBUG_FS 7650 stmmac_exit_fs(ndev); 7651 #endif 7652 phylink_destroy(priv->phylink); 7653 if (priv->plat->stmmac_rst) 7654 reset_control_assert(priv->plat->stmmac_rst); 7655 reset_control_assert(priv->plat->stmmac_ahb_rst); 7656 if (priv->hw->pcs != STMMAC_PCS_TBI && 7657 priv->hw->pcs != STMMAC_PCS_RTBI) 7658 stmmac_mdio_unregister(ndev); 7659 destroy_workqueue(priv->wq); 7660 mutex_destroy(&priv->lock); 7661 bitmap_free(priv->af_xdp_zc_qps); 7662 7663 pm_runtime_disable(dev); 7664 pm_runtime_put_noidle(dev); 7665 } 7666 EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 7667 7668 /** 7669 * stmmac_suspend - suspend callback 7670 * @dev: device pointer 7671 * Description: this is the function to suspend the device and it is called 7672 * by the platform driver to stop the network queue, release the resources, 7673 * program the PMT register (for WoL), clean and release driver resources. 7674 */ 7675 int stmmac_suspend(struct device *dev) 7676 { 7677 struct net_device *ndev = dev_get_drvdata(dev); 7678 struct stmmac_priv *priv = netdev_priv(ndev); 7679 u32 chan; 7680 7681 if (!ndev || !netif_running(ndev)) 7682 return 0; 7683 7684 mutex_lock(&priv->lock); 7685 7686 netif_device_detach(ndev); 7687 7688 stmmac_disable_all_queues(priv); 7689 7690 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 7691 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 7692 7693 if (priv->eee_enabled) { 7694 priv->tx_path_in_lpi_mode = false; 7695 del_timer_sync(&priv->eee_ctrl_timer); 7696 } 7697 7698 /* Stop TX/RX DMA */ 7699 stmmac_stop_all_dma(priv); 7700 7701 if (priv->plat->serdes_powerdown) 7702 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 7703 7704 /* Enable Power down mode by programming the PMT regs */ 7705 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7706 stmmac_pmt(priv, priv->hw, priv->wolopts); 7707 priv->irq_wake = 1; 7708 } else { 7709 stmmac_mac_set(priv, priv->ioaddr, false); 7710 pinctrl_pm_select_sleep_state(priv->device); 7711 } 7712 7713 mutex_unlock(&priv->lock); 7714 7715 rtnl_lock(); 7716 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7717 phylink_suspend(priv->phylink, true); 7718 } else { 7719 if (device_may_wakeup(priv->device)) 7720 phylink_speed_down(priv->phylink, false); 7721 phylink_suspend(priv->phylink, false); 7722 } 7723 rtnl_unlock(); 7724 7725 if (priv->dma_cap.fpesel) { 7726 /* Disable FPE */ 7727 stmmac_fpe_configure(priv, priv->ioaddr, 7728 priv->plat->fpe_cfg, 7729 priv->plat->tx_queues_to_use, 7730 priv->plat->rx_queues_to_use, false); 7731 7732 stmmac_fpe_handshake(priv, false); 7733 stmmac_fpe_stop_wq(priv); 7734 } 7735 7736 priv->speed = SPEED_UNKNOWN; 7737 return 0; 7738 } 7739 EXPORT_SYMBOL_GPL(stmmac_suspend); 7740 7741 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) 7742 { 7743 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 7744 7745 rx_q->cur_rx = 0; 7746 rx_q->dirty_rx = 0; 7747 } 7748 7749 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) 7750 { 7751 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 7752 7753 tx_q->cur_tx = 0; 7754 tx_q->dirty_tx = 0; 7755 tx_q->mss = 0; 7756 7757 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 7758 } 7759 7760 /** 7761 * stmmac_reset_queues_param - reset queue parameters 7762 * @priv: device pointer 7763 */ 7764 static void stmmac_reset_queues_param(struct stmmac_priv *priv) 7765 { 7766 u32 rx_cnt = priv->plat->rx_queues_to_use; 7767 u32 tx_cnt = priv->plat->tx_queues_to_use; 7768 u32 queue; 7769 7770 for (queue = 0; queue < rx_cnt; queue++) 7771 stmmac_reset_rx_queue(priv, queue); 7772 7773 for (queue = 0; queue < tx_cnt; queue++) 7774 stmmac_reset_tx_queue(priv, queue); 7775 } 7776 7777 /** 7778 * stmmac_resume - resume callback 7779 * @dev: device pointer 7780 * Description: when resume this function is invoked to setup the DMA and CORE 7781 * in a usable state. 7782 */ 7783 int stmmac_resume(struct device *dev) 7784 { 7785 struct net_device *ndev = dev_get_drvdata(dev); 7786 struct stmmac_priv *priv = netdev_priv(ndev); 7787 int ret; 7788 7789 if (!netif_running(ndev)) 7790 return 0; 7791 7792 /* Power Down bit, into the PM register, is cleared 7793 * automatically as soon as a magic packet or a Wake-up frame 7794 * is received. Anyway, it's better to manually clear 7795 * this bit because it can generate problems while resuming 7796 * from another devices (e.g. serial console). 7797 */ 7798 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7799 mutex_lock(&priv->lock); 7800 stmmac_pmt(priv, priv->hw, 0); 7801 mutex_unlock(&priv->lock); 7802 priv->irq_wake = 0; 7803 } else { 7804 pinctrl_pm_select_default_state(priv->device); 7805 /* reset the phy so that it's ready */ 7806 if (priv->mii) 7807 stmmac_mdio_reset(priv->mii); 7808 } 7809 7810 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && 7811 priv->plat->serdes_powerup) { 7812 ret = priv->plat->serdes_powerup(ndev, 7813 priv->plat->bsp_priv); 7814 7815 if (ret < 0) 7816 return ret; 7817 } 7818 7819 rtnl_lock(); 7820 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7821 phylink_resume(priv->phylink); 7822 } else { 7823 phylink_resume(priv->phylink); 7824 if (device_may_wakeup(priv->device)) 7825 phylink_speed_up(priv->phylink); 7826 } 7827 rtnl_unlock(); 7828 7829 rtnl_lock(); 7830 mutex_lock(&priv->lock); 7831 7832 stmmac_reset_queues_param(priv); 7833 7834 stmmac_free_tx_skbufs(priv); 7835 stmmac_clear_descriptors(priv, &priv->dma_conf); 7836 7837 stmmac_hw_setup(ndev, false); 7838 stmmac_init_coalesce(priv); 7839 stmmac_set_rx_mode(ndev); 7840 7841 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); 7842 7843 stmmac_enable_all_queues(priv); 7844 stmmac_enable_all_dma_irq(priv); 7845 7846 mutex_unlock(&priv->lock); 7847 rtnl_unlock(); 7848 7849 netif_device_attach(ndev); 7850 7851 return 0; 7852 } 7853 EXPORT_SYMBOL_GPL(stmmac_resume); 7854 7855 #ifndef MODULE 7856 static int __init stmmac_cmdline_opt(char *str) 7857 { 7858 char *opt; 7859 7860 if (!str || !*str) 7861 return 1; 7862 while ((opt = strsep(&str, ",")) != NULL) { 7863 if (!strncmp(opt, "debug:", 6)) { 7864 if (kstrtoint(opt + 6, 0, &debug)) 7865 goto err; 7866 } else if (!strncmp(opt, "phyaddr:", 8)) { 7867 if (kstrtoint(opt + 8, 0, &phyaddr)) 7868 goto err; 7869 } else if (!strncmp(opt, "buf_sz:", 7)) { 7870 if (kstrtoint(opt + 7, 0, &buf_sz)) 7871 goto err; 7872 } else if (!strncmp(opt, "tc:", 3)) { 7873 if (kstrtoint(opt + 3, 0, &tc)) 7874 goto err; 7875 } else if (!strncmp(opt, "watchdog:", 9)) { 7876 if (kstrtoint(opt + 9, 0, &watchdog)) 7877 goto err; 7878 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 7879 if (kstrtoint(opt + 10, 0, &flow_ctrl)) 7880 goto err; 7881 } else if (!strncmp(opt, "pause:", 6)) { 7882 if (kstrtoint(opt + 6, 0, &pause)) 7883 goto err; 7884 } else if (!strncmp(opt, "eee_timer:", 10)) { 7885 if (kstrtoint(opt + 10, 0, &eee_timer)) 7886 goto err; 7887 } else if (!strncmp(opt, "chain_mode:", 11)) { 7888 if (kstrtoint(opt + 11, 0, &chain_mode)) 7889 goto err; 7890 } 7891 } 7892 return 1; 7893 7894 err: 7895 pr_err("%s: ERROR broken module parameter conversion", __func__); 7896 return 1; 7897 } 7898 7899 __setup("stmmaceth=", stmmac_cmdline_opt); 7900 #endif /* MODULE */ 7901 7902 static int __init stmmac_init(void) 7903 { 7904 #ifdef CONFIG_DEBUG_FS 7905 /* Create debugfs main directory if it doesn't exist yet */ 7906 if (!stmmac_fs_dir) 7907 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 7908 register_netdevice_notifier(&stmmac_notifier); 7909 #endif 7910 7911 return 0; 7912 } 7913 7914 static void __exit stmmac_exit(void) 7915 { 7916 #ifdef CONFIG_DEBUG_FS 7917 unregister_netdevice_notifier(&stmmac_notifier); 7918 debugfs_remove_recursive(stmmac_fs_dir); 7919 #endif 7920 } 7921 7922 module_init(stmmac_init) 7923 module_exit(stmmac_exit) 7924 7925 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 7926 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 7927 MODULE_LICENSE("GPL"); 7928