1 // SPDX-License-Identifier: GPL-2.0-only 2 /******************************************************************************* 3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 4 ST Ethernet IPs are built around a Synopsys IP Core. 5 6 Copyright(C) 2007-2011 STMicroelectronics Ltd 7 8 9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 10 11 Documentation available at: 12 http://www.stlinux.com 13 Support available at: 14 https://bugzilla.stlinux.com/ 15 *******************************************************************************/ 16 17 #include <linux/clk.h> 18 #include <linux/kernel.h> 19 #include <linux/interrupt.h> 20 #include <linux/ip.h> 21 #include <linux/tcp.h> 22 #include <linux/skbuff.h> 23 #include <linux/ethtool.h> 24 #include <linux/if_ether.h> 25 #include <linux/crc32.h> 26 #include <linux/mii.h> 27 #include <linux/if.h> 28 #include <linux/if_vlan.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/slab.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/prefetch.h> 33 #include <linux/pinctrl/consumer.h> 34 #ifdef CONFIG_DEBUG_FS 35 #include <linux/debugfs.h> 36 #include <linux/seq_file.h> 37 #endif /* CONFIG_DEBUG_FS */ 38 #include <linux/net_tstamp.h> 39 #include <linux/phylink.h> 40 #include <linux/udp.h> 41 #include <linux/bpf_trace.h> 42 #include <net/page_pool/helpers.h> 43 #include <net/pkt_cls.h> 44 #include <net/xdp_sock_drv.h> 45 #include "stmmac_ptp.h" 46 #include "stmmac.h" 47 #include "stmmac_xdp.h" 48 #include <linux/reset.h> 49 #include <linux/of_mdio.h> 50 #include "dwmac1000.h" 51 #include "dwxgmac2.h" 52 #include "hwif.h" 53 54 /* As long as the interface is active, we keep the timestamping counter enabled 55 * with fine resolution and binary rollover. This avoid non-monotonic behavior 56 * (clock jumps) when changing timestamping settings at runtime. 57 */ 58 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ 59 PTP_TCR_TSCTRLSSR) 60 61 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 63 64 /* Module parameters */ 65 #define TX_TIMEO 5000 66 static int watchdog = TX_TIMEO; 67 module_param(watchdog, int, 0644); 68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 69 70 static int debug = -1; 71 module_param(debug, int, 0644); 72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 73 74 static int phyaddr = -1; 75 module_param(phyaddr, int, 0444); 76 MODULE_PARM_DESC(phyaddr, "Physical device address"); 77 78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) 79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) 80 81 /* Limit to make sure XDP TX and slow path can coexist */ 82 #define STMMAC_XSK_TX_BUDGET_MAX 256 83 #define STMMAC_TX_XSK_AVAIL 16 84 #define STMMAC_RX_FILL_BATCH 16 85 86 #define STMMAC_XDP_PASS 0 87 #define STMMAC_XDP_CONSUMED BIT(0) 88 #define STMMAC_XDP_TX BIT(1) 89 #define STMMAC_XDP_REDIRECT BIT(2) 90 91 static int flow_ctrl = FLOW_AUTO; 92 module_param(flow_ctrl, int, 0644); 93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 94 95 static int pause = PAUSE_TIME; 96 module_param(pause, int, 0644); 97 MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 98 99 #define TC_DEFAULT 64 100 static int tc = TC_DEFAULT; 101 module_param(tc, int, 0644); 102 MODULE_PARM_DESC(tc, "DMA threshold control value"); 103 104 #define DEFAULT_BUFSIZE 1536 105 static int buf_sz = DEFAULT_BUFSIZE; 106 module_param(buf_sz, int, 0644); 107 MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 108 109 #define STMMAC_RX_COPYBREAK 256 110 111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 112 NETIF_MSG_LINK | NETIF_MSG_IFUP | 113 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 114 115 #define STMMAC_DEFAULT_LPI_TIMER 1000 116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 117 module_param(eee_timer, int, 0644); 118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) 120 121 /* By default the driver will use the ring mode to manage tx and rx descriptors, 122 * but allow user to force to use the chain instead of the ring 123 */ 124 static unsigned int chain_mode; 125 module_param(chain_mode, int, 0444); 126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 127 128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 129 /* For MSI interrupts handling */ 130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); 131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); 132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); 133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); 134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue); 135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue); 136 static void stmmac_reset_queues_param(struct stmmac_priv *priv); 137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); 138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); 139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 140 u32 rxmode, u32 chan); 141 142 #ifdef CONFIG_DEBUG_FS 143 static const struct net_device_ops stmmac_netdev_ops; 144 static void stmmac_init_fs(struct net_device *dev); 145 static void stmmac_exit_fs(struct net_device *dev); 146 #endif 147 148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) 149 150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) 151 { 152 int ret = 0; 153 154 if (enabled) { 155 ret = clk_prepare_enable(priv->plat->stmmac_clk); 156 if (ret) 157 return ret; 158 ret = clk_prepare_enable(priv->plat->pclk); 159 if (ret) { 160 clk_disable_unprepare(priv->plat->stmmac_clk); 161 return ret; 162 } 163 if (priv->plat->clks_config) { 164 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); 165 if (ret) { 166 clk_disable_unprepare(priv->plat->stmmac_clk); 167 clk_disable_unprepare(priv->plat->pclk); 168 return ret; 169 } 170 } 171 } else { 172 clk_disable_unprepare(priv->plat->stmmac_clk); 173 clk_disable_unprepare(priv->plat->pclk); 174 if (priv->plat->clks_config) 175 priv->plat->clks_config(priv->plat->bsp_priv, enabled); 176 } 177 178 return ret; 179 } 180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); 181 182 /** 183 * stmmac_verify_args - verify the driver parameters. 184 * Description: it checks the driver parameters and set a default in case of 185 * errors. 186 */ 187 static void stmmac_verify_args(void) 188 { 189 if (unlikely(watchdog < 0)) 190 watchdog = TX_TIMEO; 191 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 192 buf_sz = DEFAULT_BUFSIZE; 193 if (unlikely(flow_ctrl > 1)) 194 flow_ctrl = FLOW_AUTO; 195 else if (likely(flow_ctrl < 0)) 196 flow_ctrl = FLOW_OFF; 197 if (unlikely((pause < 0) || (pause > 0xffff))) 198 pause = PAUSE_TIME; 199 if (eee_timer < 0) 200 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 201 } 202 203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv) 204 { 205 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 206 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 207 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 208 u32 queue; 209 210 for (queue = 0; queue < maxq; queue++) { 211 struct stmmac_channel *ch = &priv->channel[queue]; 212 213 if (stmmac_xdp_is_enabled(priv) && 214 test_bit(queue, priv->af_xdp_zc_qps)) { 215 napi_disable(&ch->rxtx_napi); 216 continue; 217 } 218 219 if (queue < rx_queues_cnt) 220 napi_disable(&ch->rx_napi); 221 if (queue < tx_queues_cnt) 222 napi_disable(&ch->tx_napi); 223 } 224 } 225 226 /** 227 * stmmac_disable_all_queues - Disable all queues 228 * @priv: driver private structure 229 */ 230 static void stmmac_disable_all_queues(struct stmmac_priv *priv) 231 { 232 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 233 struct stmmac_rx_queue *rx_q; 234 u32 queue; 235 236 /* synchronize_rcu() needed for pending XDP buffers to drain */ 237 for (queue = 0; queue < rx_queues_cnt; queue++) { 238 rx_q = &priv->dma_conf.rx_queue[queue]; 239 if (rx_q->xsk_pool) { 240 synchronize_rcu(); 241 break; 242 } 243 } 244 245 __stmmac_disable_all_queues(priv); 246 } 247 248 /** 249 * stmmac_enable_all_queues - Enable all queues 250 * @priv: driver private structure 251 */ 252 static void stmmac_enable_all_queues(struct stmmac_priv *priv) 253 { 254 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 255 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 256 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 257 u32 queue; 258 259 for (queue = 0; queue < maxq; queue++) { 260 struct stmmac_channel *ch = &priv->channel[queue]; 261 262 if (stmmac_xdp_is_enabled(priv) && 263 test_bit(queue, priv->af_xdp_zc_qps)) { 264 napi_enable(&ch->rxtx_napi); 265 continue; 266 } 267 268 if (queue < rx_queues_cnt) 269 napi_enable(&ch->rx_napi); 270 if (queue < tx_queues_cnt) 271 napi_enable(&ch->tx_napi); 272 } 273 } 274 275 static void stmmac_service_event_schedule(struct stmmac_priv *priv) 276 { 277 if (!test_bit(STMMAC_DOWN, &priv->state) && 278 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 279 queue_work(priv->wq, &priv->service_task); 280 } 281 282 static void stmmac_global_err(struct stmmac_priv *priv) 283 { 284 netif_carrier_off(priv->dev); 285 set_bit(STMMAC_RESET_REQUESTED, &priv->state); 286 stmmac_service_event_schedule(priv); 287 } 288 289 /** 290 * stmmac_clk_csr_set - dynamically set the MDC clock 291 * @priv: driver private structure 292 * Description: this is to dynamically set the MDC clock according to the csr 293 * clock input. 294 * Note: 295 * If a specific clk_csr value is passed from the platform 296 * this means that the CSR Clock Range selection cannot be 297 * changed at run-time and it is fixed (as reported in the driver 298 * documentation). Viceversa the driver will try to set the MDC 299 * clock dynamically according to the actual clock input. 300 */ 301 static void stmmac_clk_csr_set(struct stmmac_priv *priv) 302 { 303 u32 clk_rate; 304 305 clk_rate = clk_get_rate(priv->plat->stmmac_clk); 306 307 /* Platform provided default clk_csr would be assumed valid 308 * for all other cases except for the below mentioned ones. 309 * For values higher than the IEEE 802.3 specified frequency 310 * we can not estimate the proper divider as it is not known 311 * the frequency of clk_csr_i. So we do not change the default 312 * divider. 313 */ 314 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 315 if (clk_rate < CSR_F_35M) 316 priv->clk_csr = STMMAC_CSR_20_35M; 317 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 318 priv->clk_csr = STMMAC_CSR_35_60M; 319 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 320 priv->clk_csr = STMMAC_CSR_60_100M; 321 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 322 priv->clk_csr = STMMAC_CSR_100_150M; 323 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 324 priv->clk_csr = STMMAC_CSR_150_250M; 325 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) 326 priv->clk_csr = STMMAC_CSR_250_300M; 327 } 328 329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { 330 if (clk_rate > 160000000) 331 priv->clk_csr = 0x03; 332 else if (clk_rate > 80000000) 333 priv->clk_csr = 0x02; 334 else if (clk_rate > 40000000) 335 priv->clk_csr = 0x01; 336 else 337 priv->clk_csr = 0; 338 } 339 340 if (priv->plat->has_xgmac) { 341 if (clk_rate > 400000000) 342 priv->clk_csr = 0x5; 343 else if (clk_rate > 350000000) 344 priv->clk_csr = 0x4; 345 else if (clk_rate > 300000000) 346 priv->clk_csr = 0x3; 347 else if (clk_rate > 250000000) 348 priv->clk_csr = 0x2; 349 else if (clk_rate > 150000000) 350 priv->clk_csr = 0x1; 351 else 352 priv->clk_csr = 0x0; 353 } 354 } 355 356 static void print_pkt(unsigned char *buf, int len) 357 { 358 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 360 } 361 362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 363 { 364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 365 u32 avail; 366 367 if (tx_q->dirty_tx > tx_q->cur_tx) 368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 369 else 370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; 371 372 return avail; 373 } 374 375 /** 376 * stmmac_rx_dirty - Get RX queue dirty 377 * @priv: driver private structure 378 * @queue: RX queue index 379 */ 380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 381 { 382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 383 u32 dirty; 384 385 if (rx_q->dirty_rx <= rx_q->cur_rx) 386 dirty = rx_q->cur_rx - rx_q->dirty_rx; 387 else 388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; 389 390 return dirty; 391 } 392 393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) 394 { 395 int tx_lpi_timer; 396 397 /* Clear/set the SW EEE timer flag based on LPI ET enablement */ 398 priv->eee_sw_timer_en = en ? 0 : 1; 399 tx_lpi_timer = en ? priv->tx_lpi_timer : 0; 400 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); 401 } 402 403 /** 404 * stmmac_enable_eee_mode - check and enter in LPI mode 405 * @priv: driver private structure 406 * Description: this function is to verify and enter in LPI mode in case of 407 * EEE. 408 */ 409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv) 410 { 411 u32 tx_cnt = priv->plat->tx_queues_to_use; 412 u32 queue; 413 414 /* check if all TX queues have the work finished */ 415 for (queue = 0; queue < tx_cnt; queue++) { 416 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 417 418 if (tx_q->dirty_tx != tx_q->cur_tx) 419 return -EBUSY; /* still unfinished work */ 420 } 421 422 /* Check and enter in LPI mode */ 423 if (!priv->tx_path_in_lpi_mode) 424 stmmac_set_eee_mode(priv, priv->hw, 425 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING); 426 return 0; 427 } 428 429 /** 430 * stmmac_disable_eee_mode - disable and exit from LPI mode 431 * @priv: driver private structure 432 * Description: this function is to exit and disable EEE in case of 433 * LPI state is true. This is called by the xmit. 434 */ 435 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 436 { 437 if (!priv->eee_sw_timer_en) { 438 stmmac_lpi_entry_timer_config(priv, 0); 439 return; 440 } 441 442 stmmac_reset_eee_mode(priv, priv->hw); 443 del_timer_sync(&priv->eee_ctrl_timer); 444 priv->tx_path_in_lpi_mode = false; 445 } 446 447 /** 448 * stmmac_eee_ctrl_timer - EEE TX SW timer. 449 * @t: timer_list struct containing private info 450 * Description: 451 * if there is no data transfer and if we are not in LPI state, 452 * then MAC Transmitter can be moved to LPI state. 453 */ 454 static void stmmac_eee_ctrl_timer(struct timer_list *t) 455 { 456 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 457 458 if (stmmac_enable_eee_mode(priv)) 459 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 460 } 461 462 /** 463 * stmmac_eee_init - init EEE 464 * @priv: driver private structure 465 * Description: 466 * if the GMAC supports the EEE (from the HW cap reg) and the phy device 467 * can also manage EEE, this function enable the LPI state and start related 468 * timer. 469 */ 470 bool stmmac_eee_init(struct stmmac_priv *priv) 471 { 472 int eee_tw_timer = priv->eee_tw_timer; 473 474 /* Using PCS we cannot dial with the phy registers at this stage 475 * so we do not support extra feature like EEE. 476 */ 477 if (priv->hw->pcs == STMMAC_PCS_TBI || 478 priv->hw->pcs == STMMAC_PCS_RTBI) 479 return false; 480 481 /* Check if MAC core supports the EEE feature. */ 482 if (!priv->dma_cap.eee) 483 return false; 484 485 mutex_lock(&priv->lock); 486 487 /* Check if it needs to be deactivated */ 488 if (!priv->eee_active) { 489 if (priv->eee_enabled) { 490 netdev_dbg(priv->dev, "disable EEE\n"); 491 stmmac_lpi_entry_timer_config(priv, 0); 492 del_timer_sync(&priv->eee_ctrl_timer); 493 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); 494 if (priv->hw->xpcs) 495 xpcs_config_eee(priv->hw->xpcs, 496 priv->plat->mult_fact_100ns, 497 false); 498 } 499 mutex_unlock(&priv->lock); 500 return false; 501 } 502 503 if (priv->eee_active && !priv->eee_enabled) { 504 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 505 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 506 eee_tw_timer); 507 if (priv->hw->xpcs) 508 xpcs_config_eee(priv->hw->xpcs, 509 priv->plat->mult_fact_100ns, 510 true); 511 } 512 513 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { 514 del_timer_sync(&priv->eee_ctrl_timer); 515 priv->tx_path_in_lpi_mode = false; 516 stmmac_lpi_entry_timer_config(priv, 1); 517 } else { 518 stmmac_lpi_entry_timer_config(priv, 0); 519 mod_timer(&priv->eee_ctrl_timer, 520 STMMAC_LPI_T(priv->tx_lpi_timer)); 521 } 522 523 mutex_unlock(&priv->lock); 524 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 525 return true; 526 } 527 528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps 529 * @priv: driver private structure 530 * @p : descriptor pointer 531 * @skb : the socket buffer 532 * Description : 533 * This function will read timestamp from the descriptor & pass it to stack. 534 * and also perform some sanity checks. 535 */ 536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 537 struct dma_desc *p, struct sk_buff *skb) 538 { 539 struct skb_shared_hwtstamps shhwtstamp; 540 bool found = false; 541 u64 ns = 0; 542 543 if (!priv->hwts_tx_en) 544 return; 545 546 /* exit if skb doesn't support hw tstamp */ 547 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 548 return; 549 550 /* check tx tstamp status */ 551 if (stmmac_get_tx_timestamp_status(priv, p)) { 552 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 553 found = true; 554 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { 555 found = true; 556 } 557 558 if (found) { 559 ns -= priv->plat->cdc_error_adj; 560 561 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 562 shhwtstamp.hwtstamp = ns_to_ktime(ns); 563 564 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 565 /* pass tstamp to stack */ 566 skb_tstamp_tx(skb, &shhwtstamp); 567 } 568 } 569 570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps 571 * @priv: driver private structure 572 * @p : descriptor pointer 573 * @np : next descriptor pointer 574 * @skb : the socket buffer 575 * Description : 576 * This function will read received packet's timestamp from the descriptor 577 * and pass it to stack. It also perform some sanity checks. 578 */ 579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 580 struct dma_desc *np, struct sk_buff *skb) 581 { 582 struct skb_shared_hwtstamps *shhwtstamp = NULL; 583 struct dma_desc *desc = p; 584 u64 ns = 0; 585 586 if (!priv->hwts_rx_en) 587 return; 588 /* For GMAC4, the valid timestamp is from CTX next desc. */ 589 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 590 desc = np; 591 592 /* Check if timestamp is available */ 593 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 594 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 595 596 ns -= priv->plat->cdc_error_adj; 597 598 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 599 shhwtstamp = skb_hwtstamps(skb); 600 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 601 shhwtstamp->hwtstamp = ns_to_ktime(ns); 602 } else { 603 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 604 } 605 } 606 607 /** 608 * stmmac_hwtstamp_set - control hardware timestamping. 609 * @dev: device pointer. 610 * @ifr: An IOCTL specific structure, that can contain a pointer to 611 * a proprietary structure used to pass information to the driver. 612 * Description: 613 * This function configures the MAC to enable/disable both outgoing(TX) 614 * and incoming(RX) packets time stamping based on user input. 615 * Return Value: 616 * 0 on success and an appropriate -ve integer on failure. 617 */ 618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 619 { 620 struct stmmac_priv *priv = netdev_priv(dev); 621 struct hwtstamp_config config; 622 u32 ptp_v2 = 0; 623 u32 tstamp_all = 0; 624 u32 ptp_over_ipv4_udp = 0; 625 u32 ptp_over_ipv6_udp = 0; 626 u32 ptp_over_ethernet = 0; 627 u32 snap_type_sel = 0; 628 u32 ts_master_en = 0; 629 u32 ts_event_en = 0; 630 631 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 632 netdev_alert(priv->dev, "No support for HW time stamping\n"); 633 priv->hwts_tx_en = 0; 634 priv->hwts_rx_en = 0; 635 636 return -EOPNOTSUPP; 637 } 638 639 if (copy_from_user(&config, ifr->ifr_data, 640 sizeof(config))) 641 return -EFAULT; 642 643 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 644 __func__, config.flags, config.tx_type, config.rx_filter); 645 646 if (config.tx_type != HWTSTAMP_TX_OFF && 647 config.tx_type != HWTSTAMP_TX_ON) 648 return -ERANGE; 649 650 if (priv->adv_ts) { 651 switch (config.rx_filter) { 652 case HWTSTAMP_FILTER_NONE: 653 /* time stamp no incoming packet at all */ 654 config.rx_filter = HWTSTAMP_FILTER_NONE; 655 break; 656 657 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 658 /* PTP v1, UDP, any kind of event packet */ 659 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 660 /* 'xmac' hardware can support Sync, Pdelay_Req and 661 * Pdelay_resp by setting bit14 and bits17/16 to 01 662 * This leaves Delay_Req timestamps out. 663 * Enable all events *and* general purpose message 664 * timestamping 665 */ 666 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 667 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 668 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 669 break; 670 671 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 672 /* PTP v1, UDP, Sync packet */ 673 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 674 /* take time stamp for SYNC messages only */ 675 ts_event_en = PTP_TCR_TSEVNTENA; 676 677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 679 break; 680 681 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 682 /* PTP v1, UDP, Delay_req packet */ 683 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 684 /* take time stamp for Delay_Req messages only */ 685 ts_master_en = PTP_TCR_TSMSTRENA; 686 ts_event_en = PTP_TCR_TSEVNTENA; 687 688 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 689 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 690 break; 691 692 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 693 /* PTP v2, UDP, any kind of event packet */ 694 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 695 ptp_v2 = PTP_TCR_TSVER2ENA; 696 /* take time stamp for all event messages */ 697 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 698 699 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 700 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 701 break; 702 703 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 704 /* PTP v2, UDP, Sync packet */ 705 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 706 ptp_v2 = PTP_TCR_TSVER2ENA; 707 /* take time stamp for SYNC messages only */ 708 ts_event_en = PTP_TCR_TSEVNTENA; 709 710 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 711 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 712 break; 713 714 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 715 /* PTP v2, UDP, Delay_req packet */ 716 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 717 ptp_v2 = PTP_TCR_TSVER2ENA; 718 /* take time stamp for Delay_Req messages only */ 719 ts_master_en = PTP_TCR_TSMSTRENA; 720 ts_event_en = PTP_TCR_TSEVNTENA; 721 722 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 723 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 724 break; 725 726 case HWTSTAMP_FILTER_PTP_V2_EVENT: 727 /* PTP v2/802.AS1 any layer, any kind of event packet */ 728 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 729 ptp_v2 = PTP_TCR_TSVER2ENA; 730 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 731 if (priv->synopsys_id < DWMAC_CORE_4_10) 732 ts_event_en = PTP_TCR_TSEVNTENA; 733 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 734 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 735 ptp_over_ethernet = PTP_TCR_TSIPENA; 736 break; 737 738 case HWTSTAMP_FILTER_PTP_V2_SYNC: 739 /* PTP v2/802.AS1, any layer, Sync packet */ 740 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 741 ptp_v2 = PTP_TCR_TSVER2ENA; 742 /* take time stamp for SYNC messages only */ 743 ts_event_en = PTP_TCR_TSEVNTENA; 744 745 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 746 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 747 ptp_over_ethernet = PTP_TCR_TSIPENA; 748 break; 749 750 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 751 /* PTP v2/802.AS1, any layer, Delay_req packet */ 752 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 753 ptp_v2 = PTP_TCR_TSVER2ENA; 754 /* take time stamp for Delay_Req messages only */ 755 ts_master_en = PTP_TCR_TSMSTRENA; 756 ts_event_en = PTP_TCR_TSEVNTENA; 757 758 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 759 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 760 ptp_over_ethernet = PTP_TCR_TSIPENA; 761 break; 762 763 case HWTSTAMP_FILTER_NTP_ALL: 764 case HWTSTAMP_FILTER_ALL: 765 /* time stamp any incoming packet */ 766 config.rx_filter = HWTSTAMP_FILTER_ALL; 767 tstamp_all = PTP_TCR_TSENALL; 768 break; 769 770 default: 771 return -ERANGE; 772 } 773 } else { 774 switch (config.rx_filter) { 775 case HWTSTAMP_FILTER_NONE: 776 config.rx_filter = HWTSTAMP_FILTER_NONE; 777 break; 778 default: 779 /* PTP v1, UDP, any kind of event packet */ 780 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 781 break; 782 } 783 } 784 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 785 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 786 787 priv->systime_flags = STMMAC_HWTS_ACTIVE; 788 789 if (priv->hwts_tx_en || priv->hwts_rx_en) { 790 priv->systime_flags |= tstamp_all | ptp_v2 | 791 ptp_over_ethernet | ptp_over_ipv6_udp | 792 ptp_over_ipv4_udp | ts_event_en | 793 ts_master_en | snap_type_sel; 794 } 795 796 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); 797 798 memcpy(&priv->tstamp_config, &config, sizeof(config)); 799 800 return copy_to_user(ifr->ifr_data, &config, 801 sizeof(config)) ? -EFAULT : 0; 802 } 803 804 /** 805 * stmmac_hwtstamp_get - read hardware timestamping. 806 * @dev: device pointer. 807 * @ifr: An IOCTL specific structure, that can contain a pointer to 808 * a proprietary structure used to pass information to the driver. 809 * Description: 810 * This function obtain the current hardware timestamping settings 811 * as requested. 812 */ 813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 814 { 815 struct stmmac_priv *priv = netdev_priv(dev); 816 struct hwtstamp_config *config = &priv->tstamp_config; 817 818 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 819 return -EOPNOTSUPP; 820 821 return copy_to_user(ifr->ifr_data, config, 822 sizeof(*config)) ? -EFAULT : 0; 823 } 824 825 /** 826 * stmmac_init_tstamp_counter - init hardware timestamping counter 827 * @priv: driver private structure 828 * @systime_flags: timestamping flags 829 * Description: 830 * Initialize hardware counter for packet timestamping. 831 * This is valid as long as the interface is open and not suspended. 832 * Will be rerun after resuming from suspend, case in which the timestamping 833 * flags updated by stmmac_hwtstamp_set() also need to be restored. 834 */ 835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) 836 { 837 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 838 struct timespec64 now; 839 u32 sec_inc = 0; 840 u64 temp = 0; 841 842 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 843 return -EOPNOTSUPP; 844 845 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); 846 priv->systime_flags = systime_flags; 847 848 /* program Sub Second Increment reg */ 849 stmmac_config_sub_second_increment(priv, priv->ptpaddr, 850 priv->plat->clk_ptp_rate, 851 xmac, &sec_inc); 852 temp = div_u64(1000000000ULL, sec_inc); 853 854 /* Store sub second increment for later use */ 855 priv->sub_second_inc = sec_inc; 856 857 /* calculate default added value: 858 * formula is : 859 * addend = (2^32)/freq_div_ratio; 860 * where, freq_div_ratio = 1e9ns/sec_inc 861 */ 862 temp = (u64)(temp << 32); 863 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 864 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 865 866 /* initialize system time */ 867 ktime_get_real_ts64(&now); 868 869 /* lower 32 bits of tv_sec are safe until y2106 */ 870 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); 871 872 return 0; 873 } 874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); 875 876 /** 877 * stmmac_init_ptp - init PTP 878 * @priv: driver private structure 879 * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 880 * This is done by looking at the HW cap. register. 881 * This function also registers the ptp driver. 882 */ 883 static int stmmac_init_ptp(struct stmmac_priv *priv) 884 { 885 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 886 int ret; 887 888 if (priv->plat->ptp_clk_freq_config) 889 priv->plat->ptp_clk_freq_config(priv); 890 891 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); 892 if (ret) 893 return ret; 894 895 priv->adv_ts = 0; 896 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 897 if (xmac && priv->dma_cap.atime_stamp) 898 priv->adv_ts = 1; 899 /* Dwmac 3.x core with extend_desc can support adv_ts */ 900 else if (priv->extend_desc && priv->dma_cap.atime_stamp) 901 priv->adv_ts = 1; 902 903 if (priv->dma_cap.time_stamp) 904 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 905 906 if (priv->adv_ts) 907 netdev_info(priv->dev, 908 "IEEE 1588-2008 Advanced Timestamp supported\n"); 909 910 priv->hwts_tx_en = 0; 911 priv->hwts_rx_en = 0; 912 913 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) 914 stmmac_hwtstamp_correct_latency(priv, priv); 915 916 return 0; 917 } 918 919 static void stmmac_release_ptp(struct stmmac_priv *priv) 920 { 921 clk_disable_unprepare(priv->plat->clk_ptp_ref); 922 stmmac_ptp_unregister(priv); 923 } 924 925 /** 926 * stmmac_mac_flow_ctrl - Configure flow control in all queues 927 * @priv: driver private structure 928 * @duplex: duplex passed to the next function 929 * Description: It is used for configuring the flow control in all queues 930 */ 931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 932 { 933 u32 tx_cnt = priv->plat->tx_queues_to_use; 934 935 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 936 priv->pause, tx_cnt); 937 } 938 939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, 940 phy_interface_t interface) 941 { 942 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 943 944 if (priv->hw->xpcs) 945 return &priv->hw->xpcs->pcs; 946 947 if (priv->hw->lynx_pcs) 948 return priv->hw->lynx_pcs; 949 950 return NULL; 951 } 952 953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, 954 const struct phylink_link_state *state) 955 { 956 /* Nothing to do, xpcs_config() handles everything */ 957 } 958 959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) 960 { 961 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 962 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 963 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 964 bool *hs_enable = &fpe_cfg->hs_enable; 965 966 if (is_up && *hs_enable) { 967 stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, 968 MPACKET_VERIFY); 969 } else { 970 *lo_state = FPE_STATE_OFF; 971 *lp_state = FPE_STATE_OFF; 972 } 973 } 974 975 static void stmmac_mac_link_down(struct phylink_config *config, 976 unsigned int mode, phy_interface_t interface) 977 { 978 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 979 980 stmmac_mac_set(priv, priv->ioaddr, false); 981 priv->eee_active = false; 982 priv->tx_lpi_enabled = false; 983 priv->eee_enabled = stmmac_eee_init(priv); 984 stmmac_set_eee_pls(priv, priv->hw, false); 985 986 if (priv->dma_cap.fpesel) 987 stmmac_fpe_link_state_handle(priv, false); 988 } 989 990 static void stmmac_mac_link_up(struct phylink_config *config, 991 struct phy_device *phy, 992 unsigned int mode, phy_interface_t interface, 993 int speed, int duplex, 994 bool tx_pause, bool rx_pause) 995 { 996 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 997 u32 old_ctrl, ctrl; 998 999 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && 1000 priv->plat->serdes_powerup) 1001 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv); 1002 1003 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 1004 ctrl = old_ctrl & ~priv->hw->link.speed_mask; 1005 1006 if (interface == PHY_INTERFACE_MODE_USXGMII) { 1007 switch (speed) { 1008 case SPEED_10000: 1009 ctrl |= priv->hw->link.xgmii.speed10000; 1010 break; 1011 case SPEED_5000: 1012 ctrl |= priv->hw->link.xgmii.speed5000; 1013 break; 1014 case SPEED_2500: 1015 ctrl |= priv->hw->link.xgmii.speed2500; 1016 break; 1017 default: 1018 return; 1019 } 1020 } else if (interface == PHY_INTERFACE_MODE_XLGMII) { 1021 switch (speed) { 1022 case SPEED_100000: 1023 ctrl |= priv->hw->link.xlgmii.speed100000; 1024 break; 1025 case SPEED_50000: 1026 ctrl |= priv->hw->link.xlgmii.speed50000; 1027 break; 1028 case SPEED_40000: 1029 ctrl |= priv->hw->link.xlgmii.speed40000; 1030 break; 1031 case SPEED_25000: 1032 ctrl |= priv->hw->link.xlgmii.speed25000; 1033 break; 1034 case SPEED_10000: 1035 ctrl |= priv->hw->link.xgmii.speed10000; 1036 break; 1037 case SPEED_2500: 1038 ctrl |= priv->hw->link.speed2500; 1039 break; 1040 case SPEED_1000: 1041 ctrl |= priv->hw->link.speed1000; 1042 break; 1043 default: 1044 return; 1045 } 1046 } else { 1047 switch (speed) { 1048 case SPEED_2500: 1049 ctrl |= priv->hw->link.speed2500; 1050 break; 1051 case SPEED_1000: 1052 ctrl |= priv->hw->link.speed1000; 1053 break; 1054 case SPEED_100: 1055 ctrl |= priv->hw->link.speed100; 1056 break; 1057 case SPEED_10: 1058 ctrl |= priv->hw->link.speed10; 1059 break; 1060 default: 1061 return; 1062 } 1063 } 1064 1065 priv->speed = speed; 1066 1067 if (priv->plat->fix_mac_speed) 1068 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode); 1069 1070 if (!duplex) 1071 ctrl &= ~priv->hw->link.duplex; 1072 else 1073 ctrl |= priv->hw->link.duplex; 1074 1075 /* Flow Control operation */ 1076 if (rx_pause && tx_pause) 1077 priv->flow_ctrl = FLOW_AUTO; 1078 else if (rx_pause && !tx_pause) 1079 priv->flow_ctrl = FLOW_RX; 1080 else if (!rx_pause && tx_pause) 1081 priv->flow_ctrl = FLOW_TX; 1082 else 1083 priv->flow_ctrl = FLOW_OFF; 1084 1085 stmmac_mac_flow_ctrl(priv, duplex); 1086 1087 if (ctrl != old_ctrl) 1088 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 1089 1090 stmmac_mac_set(priv, priv->ioaddr, true); 1091 if (phy && priv->dma_cap.eee) { 1092 priv->eee_active = 1093 phy_init_eee(phy, !(priv->plat->flags & 1094 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0; 1095 priv->eee_enabled = stmmac_eee_init(priv); 1096 priv->tx_lpi_enabled = priv->eee_enabled; 1097 stmmac_set_eee_pls(priv, priv->hw, true); 1098 } 1099 1100 if (priv->dma_cap.fpesel) 1101 stmmac_fpe_link_state_handle(priv, true); 1102 1103 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) 1104 stmmac_hwtstamp_correct_latency(priv, priv); 1105 } 1106 1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = { 1108 .mac_select_pcs = stmmac_mac_select_pcs, 1109 .mac_config = stmmac_mac_config, 1110 .mac_link_down = stmmac_mac_link_down, 1111 .mac_link_up = stmmac_mac_link_up, 1112 }; 1113 1114 /** 1115 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 1116 * @priv: driver private structure 1117 * Description: this is to verify if the HW supports the PCS. 1118 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 1119 * configured for the TBI, RTBI, or SGMII PHY interface. 1120 */ 1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 1122 { 1123 int interface = priv->plat->mac_interface; 1124 1125 if (priv->dma_cap.pcs) { 1126 if ((interface == PHY_INTERFACE_MODE_RGMII) || 1127 (interface == PHY_INTERFACE_MODE_RGMII_ID) || 1128 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1129 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 1130 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 1131 priv->hw->pcs = STMMAC_PCS_RGMII; 1132 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 1133 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 1134 priv->hw->pcs = STMMAC_PCS_SGMII; 1135 } 1136 } 1137 } 1138 1139 /** 1140 * stmmac_init_phy - PHY initialization 1141 * @dev: net device structure 1142 * Description: it initializes the driver's PHY state, and attaches the PHY 1143 * to the mac driver. 1144 * Return value: 1145 * 0 on success 1146 */ 1147 static int stmmac_init_phy(struct net_device *dev) 1148 { 1149 struct stmmac_priv *priv = netdev_priv(dev); 1150 struct fwnode_handle *phy_fwnode; 1151 struct fwnode_handle *fwnode; 1152 int ret; 1153 1154 if (!phylink_expects_phy(priv->phylink)) 1155 return 0; 1156 1157 fwnode = priv->plat->port_node; 1158 if (!fwnode) 1159 fwnode = dev_fwnode(priv->device); 1160 1161 if (fwnode) 1162 phy_fwnode = fwnode_get_phy_node(fwnode); 1163 else 1164 phy_fwnode = NULL; 1165 1166 /* Some DT bindings do not set-up the PHY handle. Let's try to 1167 * manually parse it 1168 */ 1169 if (!phy_fwnode || IS_ERR(phy_fwnode)) { 1170 int addr = priv->plat->phy_addr; 1171 struct phy_device *phydev; 1172 1173 if (addr < 0) { 1174 netdev_err(priv->dev, "no phy found\n"); 1175 return -ENODEV; 1176 } 1177 1178 phydev = mdiobus_get_phy(priv->mii, addr); 1179 if (!phydev) { 1180 netdev_err(priv->dev, "no phy at addr %d\n", addr); 1181 return -ENODEV; 1182 } 1183 1184 ret = phylink_connect_phy(priv->phylink, phydev); 1185 } else { 1186 fwnode_handle_put(phy_fwnode); 1187 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); 1188 } 1189 1190 if (!priv->plat->pmt) { 1191 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1192 1193 phylink_ethtool_get_wol(priv->phylink, &wol); 1194 device_set_wakeup_capable(priv->device, !!wol.supported); 1195 device_set_wakeup_enable(priv->device, !!wol.wolopts); 1196 } 1197 1198 return ret; 1199 } 1200 1201 static void stmmac_set_half_duplex(struct stmmac_priv *priv) 1202 { 1203 /* Half-Duplex can only work with single tx queue */ 1204 if (priv->plat->tx_queues_to_use > 1) 1205 priv->phylink_config.mac_capabilities &= 1206 ~(MAC_10HD | MAC_100HD | MAC_1000HD); 1207 else 1208 priv->phylink_config.mac_capabilities |= 1209 (MAC_10HD | MAC_100HD | MAC_1000HD); 1210 } 1211 1212 static int stmmac_phy_setup(struct stmmac_priv *priv) 1213 { 1214 struct stmmac_mdio_bus_data *mdio_bus_data; 1215 int mode = priv->plat->phy_interface; 1216 struct fwnode_handle *fwnode; 1217 struct phylink *phylink; 1218 int max_speed; 1219 1220 priv->phylink_config.dev = &priv->dev->dev; 1221 priv->phylink_config.type = PHYLINK_NETDEV; 1222 priv->phylink_config.mac_managed_pm = true; 1223 1224 mdio_bus_data = priv->plat->mdio_bus_data; 1225 if (mdio_bus_data) 1226 priv->phylink_config.ovr_an_inband = 1227 mdio_bus_data->xpcs_an_inband; 1228 1229 /* Set the platform/firmware specified interface mode. Note, phylink 1230 * deals with the PHY interface mode, not the MAC interface mode. 1231 */ 1232 __set_bit(mode, priv->phylink_config.supported_interfaces); 1233 1234 /* If we have an xpcs, it defines which PHY interfaces are supported. */ 1235 if (priv->hw->xpcs) 1236 xpcs_get_interfaces(priv->hw->xpcs, 1237 priv->phylink_config.supported_interfaces); 1238 1239 priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1240 MAC_10FD | MAC_100FD | 1241 MAC_1000FD; 1242 1243 stmmac_set_half_duplex(priv); 1244 1245 /* Get the MAC specific capabilities */ 1246 stmmac_mac_phylink_get_caps(priv); 1247 1248 max_speed = priv->plat->max_speed; 1249 if (max_speed) 1250 phylink_limit_mac_speed(&priv->phylink_config, max_speed); 1251 1252 fwnode = priv->plat->port_node; 1253 if (!fwnode) 1254 fwnode = dev_fwnode(priv->device); 1255 1256 phylink = phylink_create(&priv->phylink_config, fwnode, 1257 mode, &stmmac_phylink_mac_ops); 1258 if (IS_ERR(phylink)) 1259 return PTR_ERR(phylink); 1260 1261 priv->phylink = phylink; 1262 return 0; 1263 } 1264 1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv, 1266 struct stmmac_dma_conf *dma_conf) 1267 { 1268 u32 rx_cnt = priv->plat->rx_queues_to_use; 1269 unsigned int desc_size; 1270 void *head_rx; 1271 u32 queue; 1272 1273 /* Display RX rings */ 1274 for (queue = 0; queue < rx_cnt; queue++) { 1275 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1276 1277 pr_info("\tRX Queue %u rings\n", queue); 1278 1279 if (priv->extend_desc) { 1280 head_rx = (void *)rx_q->dma_erx; 1281 desc_size = sizeof(struct dma_extended_desc); 1282 } else { 1283 head_rx = (void *)rx_q->dma_rx; 1284 desc_size = sizeof(struct dma_desc); 1285 } 1286 1287 /* Display RX ring */ 1288 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, 1289 rx_q->dma_rx_phy, desc_size); 1290 } 1291 } 1292 1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv, 1294 struct stmmac_dma_conf *dma_conf) 1295 { 1296 u32 tx_cnt = priv->plat->tx_queues_to_use; 1297 unsigned int desc_size; 1298 void *head_tx; 1299 u32 queue; 1300 1301 /* Display TX rings */ 1302 for (queue = 0; queue < tx_cnt; queue++) { 1303 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1304 1305 pr_info("\tTX Queue %d rings\n", queue); 1306 1307 if (priv->extend_desc) { 1308 head_tx = (void *)tx_q->dma_etx; 1309 desc_size = sizeof(struct dma_extended_desc); 1310 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1311 head_tx = (void *)tx_q->dma_entx; 1312 desc_size = sizeof(struct dma_edesc); 1313 } else { 1314 head_tx = (void *)tx_q->dma_tx; 1315 desc_size = sizeof(struct dma_desc); 1316 } 1317 1318 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, 1319 tx_q->dma_tx_phy, desc_size); 1320 } 1321 } 1322 1323 static void stmmac_display_rings(struct stmmac_priv *priv, 1324 struct stmmac_dma_conf *dma_conf) 1325 { 1326 /* Display RX ring */ 1327 stmmac_display_rx_rings(priv, dma_conf); 1328 1329 /* Display TX ring */ 1330 stmmac_display_tx_rings(priv, dma_conf); 1331 } 1332 1333 static int stmmac_set_bfsize(int mtu, int bufsize) 1334 { 1335 int ret = bufsize; 1336 1337 if (mtu >= BUF_SIZE_8KiB) 1338 ret = BUF_SIZE_16KiB; 1339 else if (mtu >= BUF_SIZE_4KiB) 1340 ret = BUF_SIZE_8KiB; 1341 else if (mtu >= BUF_SIZE_2KiB) 1342 ret = BUF_SIZE_4KiB; 1343 else if (mtu > DEFAULT_BUFSIZE) 1344 ret = BUF_SIZE_2KiB; 1345 else 1346 ret = DEFAULT_BUFSIZE; 1347 1348 return ret; 1349 } 1350 1351 /** 1352 * stmmac_clear_rx_descriptors - clear RX descriptors 1353 * @priv: driver private structure 1354 * @dma_conf: structure to take the dma data 1355 * @queue: RX queue index 1356 * Description: this function is called to clear the RX descriptors 1357 * in case of both basic and extended descriptors are used. 1358 */ 1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, 1360 struct stmmac_dma_conf *dma_conf, 1361 u32 queue) 1362 { 1363 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1364 int i; 1365 1366 /* Clear the RX descriptors */ 1367 for (i = 0; i < dma_conf->dma_rx_size; i++) 1368 if (priv->extend_desc) 1369 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1370 priv->use_riwt, priv->mode, 1371 (i == dma_conf->dma_rx_size - 1), 1372 dma_conf->dma_buf_sz); 1373 else 1374 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1375 priv->use_riwt, priv->mode, 1376 (i == dma_conf->dma_rx_size - 1), 1377 dma_conf->dma_buf_sz); 1378 } 1379 1380 /** 1381 * stmmac_clear_tx_descriptors - clear tx descriptors 1382 * @priv: driver private structure 1383 * @dma_conf: structure to take the dma data 1384 * @queue: TX queue index. 1385 * Description: this function is called to clear the TX descriptors 1386 * in case of both basic and extended descriptors are used. 1387 */ 1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, 1389 struct stmmac_dma_conf *dma_conf, 1390 u32 queue) 1391 { 1392 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1393 int i; 1394 1395 /* Clear the TX descriptors */ 1396 for (i = 0; i < dma_conf->dma_tx_size; i++) { 1397 int last = (i == (dma_conf->dma_tx_size - 1)); 1398 struct dma_desc *p; 1399 1400 if (priv->extend_desc) 1401 p = &tx_q->dma_etx[i].basic; 1402 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1403 p = &tx_q->dma_entx[i].basic; 1404 else 1405 p = &tx_q->dma_tx[i]; 1406 1407 stmmac_init_tx_desc(priv, p, priv->mode, last); 1408 } 1409 } 1410 1411 /** 1412 * stmmac_clear_descriptors - clear descriptors 1413 * @priv: driver private structure 1414 * @dma_conf: structure to take the dma data 1415 * Description: this function is called to clear the TX and RX descriptors 1416 * in case of both basic and extended descriptors are used. 1417 */ 1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv, 1419 struct stmmac_dma_conf *dma_conf) 1420 { 1421 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1422 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1423 u32 queue; 1424 1425 /* Clear the RX descriptors */ 1426 for (queue = 0; queue < rx_queue_cnt; queue++) 1427 stmmac_clear_rx_descriptors(priv, dma_conf, queue); 1428 1429 /* Clear the TX descriptors */ 1430 for (queue = 0; queue < tx_queue_cnt; queue++) 1431 stmmac_clear_tx_descriptors(priv, dma_conf, queue); 1432 } 1433 1434 /** 1435 * stmmac_init_rx_buffers - init the RX descriptor buffer. 1436 * @priv: driver private structure 1437 * @dma_conf: structure to take the dma data 1438 * @p: descriptor pointer 1439 * @i: descriptor index 1440 * @flags: gfp flag 1441 * @queue: RX queue index 1442 * Description: this function is called to allocate a receive buffer, perform 1443 * the DMA mapping and init the descriptor. 1444 */ 1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, 1446 struct stmmac_dma_conf *dma_conf, 1447 struct dma_desc *p, 1448 int i, gfp_t flags, u32 queue) 1449 { 1450 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1451 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1452 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 1453 1454 if (priv->dma_cap.host_dma_width <= 32) 1455 gfp |= GFP_DMA32; 1456 1457 if (!buf->page) { 1458 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); 1459 if (!buf->page) 1460 return -ENOMEM; 1461 buf->page_offset = stmmac_rx_offset(priv); 1462 } 1463 1464 if (priv->sph && !buf->sec_page) { 1465 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); 1466 if (!buf->sec_page) 1467 return -ENOMEM; 1468 1469 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1470 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 1471 } else { 1472 buf->sec_page = NULL; 1473 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 1474 } 1475 1476 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 1477 1478 stmmac_set_desc_addr(priv, p, buf->addr); 1479 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) 1480 stmmac_init_desc3(priv, p); 1481 1482 return 0; 1483 } 1484 1485 /** 1486 * stmmac_free_rx_buffer - free RX dma buffers 1487 * @priv: private structure 1488 * @rx_q: RX queue 1489 * @i: buffer index. 1490 */ 1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, 1492 struct stmmac_rx_queue *rx_q, 1493 int i) 1494 { 1495 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1496 1497 if (buf->page) 1498 page_pool_put_full_page(rx_q->page_pool, buf->page, false); 1499 buf->page = NULL; 1500 1501 if (buf->sec_page) 1502 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); 1503 buf->sec_page = NULL; 1504 } 1505 1506 /** 1507 * stmmac_free_tx_buffer - free RX dma buffers 1508 * @priv: private structure 1509 * @dma_conf: structure to take the dma data 1510 * @queue: RX queue index 1511 * @i: buffer index. 1512 */ 1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, 1514 struct stmmac_dma_conf *dma_conf, 1515 u32 queue, int i) 1516 { 1517 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1518 1519 if (tx_q->tx_skbuff_dma[i].buf && 1520 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { 1521 if (tx_q->tx_skbuff_dma[i].map_as_page) 1522 dma_unmap_page(priv->device, 1523 tx_q->tx_skbuff_dma[i].buf, 1524 tx_q->tx_skbuff_dma[i].len, 1525 DMA_TO_DEVICE); 1526 else 1527 dma_unmap_single(priv->device, 1528 tx_q->tx_skbuff_dma[i].buf, 1529 tx_q->tx_skbuff_dma[i].len, 1530 DMA_TO_DEVICE); 1531 } 1532 1533 if (tx_q->xdpf[i] && 1534 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || 1535 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { 1536 xdp_return_frame(tx_q->xdpf[i]); 1537 tx_q->xdpf[i] = NULL; 1538 } 1539 1540 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) 1541 tx_q->xsk_frames_done++; 1542 1543 if (tx_q->tx_skbuff[i] && 1544 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { 1545 dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1546 tx_q->tx_skbuff[i] = NULL; 1547 } 1548 1549 tx_q->tx_skbuff_dma[i].buf = 0; 1550 tx_q->tx_skbuff_dma[i].map_as_page = false; 1551 } 1552 1553 /** 1554 * dma_free_rx_skbufs - free RX dma buffers 1555 * @priv: private structure 1556 * @dma_conf: structure to take the dma data 1557 * @queue: RX queue index 1558 */ 1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv, 1560 struct stmmac_dma_conf *dma_conf, 1561 u32 queue) 1562 { 1563 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1564 int i; 1565 1566 for (i = 0; i < dma_conf->dma_rx_size; i++) 1567 stmmac_free_rx_buffer(priv, rx_q, i); 1568 } 1569 1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, 1571 struct stmmac_dma_conf *dma_conf, 1572 u32 queue, gfp_t flags) 1573 { 1574 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1575 int i; 1576 1577 for (i = 0; i < dma_conf->dma_rx_size; i++) { 1578 struct dma_desc *p; 1579 int ret; 1580 1581 if (priv->extend_desc) 1582 p = &((rx_q->dma_erx + i)->basic); 1583 else 1584 p = rx_q->dma_rx + i; 1585 1586 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags, 1587 queue); 1588 if (ret) 1589 return ret; 1590 1591 rx_q->buf_alloc_num++; 1592 } 1593 1594 return 0; 1595 } 1596 1597 /** 1598 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool 1599 * @priv: private structure 1600 * @dma_conf: structure to take the dma data 1601 * @queue: RX queue index 1602 */ 1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv, 1604 struct stmmac_dma_conf *dma_conf, 1605 u32 queue) 1606 { 1607 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1608 int i; 1609 1610 for (i = 0; i < dma_conf->dma_rx_size; i++) { 1611 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1612 1613 if (!buf->xdp) 1614 continue; 1615 1616 xsk_buff_free(buf->xdp); 1617 buf->xdp = NULL; 1618 } 1619 } 1620 1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, 1622 struct stmmac_dma_conf *dma_conf, 1623 u32 queue) 1624 { 1625 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1626 int i; 1627 1628 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes) 1629 * in struct xdp_buff_xsk to stash driver specific information. Thus, 1630 * use this macro to make sure no size violations. 1631 */ 1632 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff); 1633 1634 for (i = 0; i < dma_conf->dma_rx_size; i++) { 1635 struct stmmac_rx_buffer *buf; 1636 dma_addr_t dma_addr; 1637 struct dma_desc *p; 1638 1639 if (priv->extend_desc) 1640 p = (struct dma_desc *)(rx_q->dma_erx + i); 1641 else 1642 p = rx_q->dma_rx + i; 1643 1644 buf = &rx_q->buf_pool[i]; 1645 1646 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 1647 if (!buf->xdp) 1648 return -ENOMEM; 1649 1650 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 1651 stmmac_set_desc_addr(priv, p, dma_addr); 1652 rx_q->buf_alloc_num++; 1653 } 1654 1655 return 0; 1656 } 1657 1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) 1659 { 1660 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) 1661 return NULL; 1662 1663 return xsk_get_pool_from_qid(priv->dev, queue); 1664 } 1665 1666 /** 1667 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) 1668 * @priv: driver private structure 1669 * @dma_conf: structure to take the dma data 1670 * @queue: RX queue index 1671 * @flags: gfp flag. 1672 * Description: this function initializes the DMA RX descriptors 1673 * and allocates the socket buffers. It supports the chained and ring 1674 * modes. 1675 */ 1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, 1677 struct stmmac_dma_conf *dma_conf, 1678 u32 queue, gfp_t flags) 1679 { 1680 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1681 int ret; 1682 1683 netif_dbg(priv, probe, priv->dev, 1684 "(%s) dma_rx_phy=0x%08x\n", __func__, 1685 (u32)rx_q->dma_rx_phy); 1686 1687 stmmac_clear_rx_descriptors(priv, dma_conf, queue); 1688 1689 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); 1690 1691 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1692 1693 if (rx_q->xsk_pool) { 1694 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1695 MEM_TYPE_XSK_BUFF_POOL, 1696 NULL)); 1697 netdev_info(priv->dev, 1698 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", 1699 rx_q->queue_index); 1700 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); 1701 } else { 1702 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1703 MEM_TYPE_PAGE_POOL, 1704 rx_q->page_pool)); 1705 netdev_info(priv->dev, 1706 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", 1707 rx_q->queue_index); 1708 } 1709 1710 if (rx_q->xsk_pool) { 1711 /* RX XDP ZC buffer pool may not be populated, e.g. 1712 * xdpsock TX-only. 1713 */ 1714 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); 1715 } else { 1716 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); 1717 if (ret < 0) 1718 return -ENOMEM; 1719 } 1720 1721 /* Setup the chained descriptor addresses */ 1722 if (priv->mode == STMMAC_CHAIN_MODE) { 1723 if (priv->extend_desc) 1724 stmmac_mode_init(priv, rx_q->dma_erx, 1725 rx_q->dma_rx_phy, 1726 dma_conf->dma_rx_size, 1); 1727 else 1728 stmmac_mode_init(priv, rx_q->dma_rx, 1729 rx_q->dma_rx_phy, 1730 dma_conf->dma_rx_size, 0); 1731 } 1732 1733 return 0; 1734 } 1735 1736 static int init_dma_rx_desc_rings(struct net_device *dev, 1737 struct stmmac_dma_conf *dma_conf, 1738 gfp_t flags) 1739 { 1740 struct stmmac_priv *priv = netdev_priv(dev); 1741 u32 rx_count = priv->plat->rx_queues_to_use; 1742 int queue; 1743 int ret; 1744 1745 /* RX INITIALIZATION */ 1746 netif_dbg(priv, probe, priv->dev, 1747 "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1748 1749 for (queue = 0; queue < rx_count; queue++) { 1750 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); 1751 if (ret) 1752 goto err_init_rx_buffers; 1753 } 1754 1755 return 0; 1756 1757 err_init_rx_buffers: 1758 while (queue >= 0) { 1759 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1760 1761 if (rx_q->xsk_pool) 1762 dma_free_rx_xskbufs(priv, dma_conf, queue); 1763 else 1764 dma_free_rx_skbufs(priv, dma_conf, queue); 1765 1766 rx_q->buf_alloc_num = 0; 1767 rx_q->xsk_pool = NULL; 1768 1769 queue--; 1770 } 1771 1772 return ret; 1773 } 1774 1775 /** 1776 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) 1777 * @priv: driver private structure 1778 * @dma_conf: structure to take the dma data 1779 * @queue: TX queue index 1780 * Description: this function initializes the DMA TX descriptors 1781 * and allocates the socket buffers. It supports the chained and ring 1782 * modes. 1783 */ 1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, 1785 struct stmmac_dma_conf *dma_conf, 1786 u32 queue) 1787 { 1788 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1789 int i; 1790 1791 netif_dbg(priv, probe, priv->dev, 1792 "(%s) dma_tx_phy=0x%08x\n", __func__, 1793 (u32)tx_q->dma_tx_phy); 1794 1795 /* Setup the chained descriptor addresses */ 1796 if (priv->mode == STMMAC_CHAIN_MODE) { 1797 if (priv->extend_desc) 1798 stmmac_mode_init(priv, tx_q->dma_etx, 1799 tx_q->dma_tx_phy, 1800 dma_conf->dma_tx_size, 1); 1801 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) 1802 stmmac_mode_init(priv, tx_q->dma_tx, 1803 tx_q->dma_tx_phy, 1804 dma_conf->dma_tx_size, 0); 1805 } 1806 1807 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1808 1809 for (i = 0; i < dma_conf->dma_tx_size; i++) { 1810 struct dma_desc *p; 1811 1812 if (priv->extend_desc) 1813 p = &((tx_q->dma_etx + i)->basic); 1814 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1815 p = &((tx_q->dma_entx + i)->basic); 1816 else 1817 p = tx_q->dma_tx + i; 1818 1819 stmmac_clear_desc(priv, p); 1820 1821 tx_q->tx_skbuff_dma[i].buf = 0; 1822 tx_q->tx_skbuff_dma[i].map_as_page = false; 1823 tx_q->tx_skbuff_dma[i].len = 0; 1824 tx_q->tx_skbuff_dma[i].last_segment = false; 1825 tx_q->tx_skbuff[i] = NULL; 1826 } 1827 1828 return 0; 1829 } 1830 1831 static int init_dma_tx_desc_rings(struct net_device *dev, 1832 struct stmmac_dma_conf *dma_conf) 1833 { 1834 struct stmmac_priv *priv = netdev_priv(dev); 1835 u32 tx_queue_cnt; 1836 u32 queue; 1837 1838 tx_queue_cnt = priv->plat->tx_queues_to_use; 1839 1840 for (queue = 0; queue < tx_queue_cnt; queue++) 1841 __init_dma_tx_desc_rings(priv, dma_conf, queue); 1842 1843 return 0; 1844 } 1845 1846 /** 1847 * init_dma_desc_rings - init the RX/TX descriptor rings 1848 * @dev: net device structure 1849 * @dma_conf: structure to take the dma data 1850 * @flags: gfp flag. 1851 * Description: this function initializes the DMA RX/TX descriptors 1852 * and allocates the socket buffers. It supports the chained and ring 1853 * modes. 1854 */ 1855 static int init_dma_desc_rings(struct net_device *dev, 1856 struct stmmac_dma_conf *dma_conf, 1857 gfp_t flags) 1858 { 1859 struct stmmac_priv *priv = netdev_priv(dev); 1860 int ret; 1861 1862 ret = init_dma_rx_desc_rings(dev, dma_conf, flags); 1863 if (ret) 1864 return ret; 1865 1866 ret = init_dma_tx_desc_rings(dev, dma_conf); 1867 1868 stmmac_clear_descriptors(priv, dma_conf); 1869 1870 if (netif_msg_hw(priv)) 1871 stmmac_display_rings(priv, dma_conf); 1872 1873 return ret; 1874 } 1875 1876 /** 1877 * dma_free_tx_skbufs - free TX dma buffers 1878 * @priv: private structure 1879 * @dma_conf: structure to take the dma data 1880 * @queue: TX queue index 1881 */ 1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv, 1883 struct stmmac_dma_conf *dma_conf, 1884 u32 queue) 1885 { 1886 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1887 int i; 1888 1889 tx_q->xsk_frames_done = 0; 1890 1891 for (i = 0; i < dma_conf->dma_tx_size; i++) 1892 stmmac_free_tx_buffer(priv, dma_conf, queue, i); 1893 1894 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { 1895 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 1896 tx_q->xsk_frames_done = 0; 1897 tx_q->xsk_pool = NULL; 1898 } 1899 } 1900 1901 /** 1902 * stmmac_free_tx_skbufs - free TX skb buffers 1903 * @priv: private structure 1904 */ 1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) 1906 { 1907 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1908 u32 queue; 1909 1910 for (queue = 0; queue < tx_queue_cnt; queue++) 1911 dma_free_tx_skbufs(priv, &priv->dma_conf, queue); 1912 } 1913 1914 /** 1915 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) 1916 * @priv: private structure 1917 * @dma_conf: structure to take the dma data 1918 * @queue: RX queue index 1919 */ 1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, 1921 struct stmmac_dma_conf *dma_conf, 1922 u32 queue) 1923 { 1924 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1925 1926 /* Release the DMA RX socket buffers */ 1927 if (rx_q->xsk_pool) 1928 dma_free_rx_xskbufs(priv, dma_conf, queue); 1929 else 1930 dma_free_rx_skbufs(priv, dma_conf, queue); 1931 1932 rx_q->buf_alloc_num = 0; 1933 rx_q->xsk_pool = NULL; 1934 1935 /* Free DMA regions of consistent memory previously allocated */ 1936 if (!priv->extend_desc) 1937 dma_free_coherent(priv->device, dma_conf->dma_rx_size * 1938 sizeof(struct dma_desc), 1939 rx_q->dma_rx, rx_q->dma_rx_phy); 1940 else 1941 dma_free_coherent(priv->device, dma_conf->dma_rx_size * 1942 sizeof(struct dma_extended_desc), 1943 rx_q->dma_erx, rx_q->dma_rx_phy); 1944 1945 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) 1946 xdp_rxq_info_unreg(&rx_q->xdp_rxq); 1947 1948 kfree(rx_q->buf_pool); 1949 if (rx_q->page_pool) 1950 page_pool_destroy(rx_q->page_pool); 1951 } 1952 1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv, 1954 struct stmmac_dma_conf *dma_conf) 1955 { 1956 u32 rx_count = priv->plat->rx_queues_to_use; 1957 u32 queue; 1958 1959 /* Free RX queue resources */ 1960 for (queue = 0; queue < rx_count; queue++) 1961 __free_dma_rx_desc_resources(priv, dma_conf, queue); 1962 } 1963 1964 /** 1965 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) 1966 * @priv: private structure 1967 * @dma_conf: structure to take the dma data 1968 * @queue: TX queue index 1969 */ 1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, 1971 struct stmmac_dma_conf *dma_conf, 1972 u32 queue) 1973 { 1974 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1975 size_t size; 1976 void *addr; 1977 1978 /* Release the DMA TX socket buffers */ 1979 dma_free_tx_skbufs(priv, dma_conf, queue); 1980 1981 if (priv->extend_desc) { 1982 size = sizeof(struct dma_extended_desc); 1983 addr = tx_q->dma_etx; 1984 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1985 size = sizeof(struct dma_edesc); 1986 addr = tx_q->dma_entx; 1987 } else { 1988 size = sizeof(struct dma_desc); 1989 addr = tx_q->dma_tx; 1990 } 1991 1992 size *= dma_conf->dma_tx_size; 1993 1994 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); 1995 1996 kfree(tx_q->tx_skbuff_dma); 1997 kfree(tx_q->tx_skbuff); 1998 } 1999 2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv, 2001 struct stmmac_dma_conf *dma_conf) 2002 { 2003 u32 tx_count = priv->plat->tx_queues_to_use; 2004 u32 queue; 2005 2006 /* Free TX queue resources */ 2007 for (queue = 0; queue < tx_count; queue++) 2008 __free_dma_tx_desc_resources(priv, dma_conf, queue); 2009 } 2010 2011 /** 2012 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). 2013 * @priv: private structure 2014 * @dma_conf: structure to take the dma data 2015 * @queue: RX queue index 2016 * Description: according to which descriptor can be used (extend or basic) 2017 * this function allocates the resources for TX and RX paths. In case of 2018 * reception, for example, it pre-allocated the RX socket buffer in order to 2019 * allow zero-copy mechanism. 2020 */ 2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, 2022 struct stmmac_dma_conf *dma_conf, 2023 u32 queue) 2024 { 2025 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 2026 struct stmmac_channel *ch = &priv->channel[queue]; 2027 bool xdp_prog = stmmac_xdp_is_enabled(priv); 2028 struct page_pool_params pp_params = { 0 }; 2029 unsigned int num_pages; 2030 unsigned int napi_id; 2031 int ret; 2032 2033 rx_q->queue_index = queue; 2034 rx_q->priv_data = priv; 2035 2036 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 2037 pp_params.pool_size = dma_conf->dma_rx_size; 2038 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); 2039 pp_params.order = ilog2(num_pages); 2040 pp_params.nid = dev_to_node(priv->device); 2041 pp_params.dev = priv->device; 2042 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 2043 pp_params.offset = stmmac_rx_offset(priv); 2044 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); 2045 2046 rx_q->page_pool = page_pool_create(&pp_params); 2047 if (IS_ERR(rx_q->page_pool)) { 2048 ret = PTR_ERR(rx_q->page_pool); 2049 rx_q->page_pool = NULL; 2050 return ret; 2051 } 2052 2053 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, 2054 sizeof(*rx_q->buf_pool), 2055 GFP_KERNEL); 2056 if (!rx_q->buf_pool) 2057 return -ENOMEM; 2058 2059 if (priv->extend_desc) { 2060 rx_q->dma_erx = dma_alloc_coherent(priv->device, 2061 dma_conf->dma_rx_size * 2062 sizeof(struct dma_extended_desc), 2063 &rx_q->dma_rx_phy, 2064 GFP_KERNEL); 2065 if (!rx_q->dma_erx) 2066 return -ENOMEM; 2067 2068 } else { 2069 rx_q->dma_rx = dma_alloc_coherent(priv->device, 2070 dma_conf->dma_rx_size * 2071 sizeof(struct dma_desc), 2072 &rx_q->dma_rx_phy, 2073 GFP_KERNEL); 2074 if (!rx_q->dma_rx) 2075 return -ENOMEM; 2076 } 2077 2078 if (stmmac_xdp_is_enabled(priv) && 2079 test_bit(queue, priv->af_xdp_zc_qps)) 2080 napi_id = ch->rxtx_napi.napi_id; 2081 else 2082 napi_id = ch->rx_napi.napi_id; 2083 2084 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, 2085 rx_q->queue_index, 2086 napi_id); 2087 if (ret) { 2088 netdev_err(priv->dev, "Failed to register xdp rxq info\n"); 2089 return -EINVAL; 2090 } 2091 2092 return 0; 2093 } 2094 2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv, 2096 struct stmmac_dma_conf *dma_conf) 2097 { 2098 u32 rx_count = priv->plat->rx_queues_to_use; 2099 u32 queue; 2100 int ret; 2101 2102 /* RX queues buffers and DMA */ 2103 for (queue = 0; queue < rx_count; queue++) { 2104 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); 2105 if (ret) 2106 goto err_dma; 2107 } 2108 2109 return 0; 2110 2111 err_dma: 2112 free_dma_rx_desc_resources(priv, dma_conf); 2113 2114 return ret; 2115 } 2116 2117 /** 2118 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). 2119 * @priv: private structure 2120 * @dma_conf: structure to take the dma data 2121 * @queue: TX queue index 2122 * Description: according to which descriptor can be used (extend or basic) 2123 * this function allocates the resources for TX and RX paths. In case of 2124 * reception, for example, it pre-allocated the RX socket buffer in order to 2125 * allow zero-copy mechanism. 2126 */ 2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, 2128 struct stmmac_dma_conf *dma_conf, 2129 u32 queue) 2130 { 2131 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 2132 size_t size; 2133 void *addr; 2134 2135 tx_q->queue_index = queue; 2136 tx_q->priv_data = priv; 2137 2138 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, 2139 sizeof(*tx_q->tx_skbuff_dma), 2140 GFP_KERNEL); 2141 if (!tx_q->tx_skbuff_dma) 2142 return -ENOMEM; 2143 2144 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, 2145 sizeof(struct sk_buff *), 2146 GFP_KERNEL); 2147 if (!tx_q->tx_skbuff) 2148 return -ENOMEM; 2149 2150 if (priv->extend_desc) 2151 size = sizeof(struct dma_extended_desc); 2152 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2153 size = sizeof(struct dma_edesc); 2154 else 2155 size = sizeof(struct dma_desc); 2156 2157 size *= dma_conf->dma_tx_size; 2158 2159 addr = dma_alloc_coherent(priv->device, size, 2160 &tx_q->dma_tx_phy, GFP_KERNEL); 2161 if (!addr) 2162 return -ENOMEM; 2163 2164 if (priv->extend_desc) 2165 tx_q->dma_etx = addr; 2166 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2167 tx_q->dma_entx = addr; 2168 else 2169 tx_q->dma_tx = addr; 2170 2171 return 0; 2172 } 2173 2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv, 2175 struct stmmac_dma_conf *dma_conf) 2176 { 2177 u32 tx_count = priv->plat->tx_queues_to_use; 2178 u32 queue; 2179 int ret; 2180 2181 /* TX queues buffers and DMA */ 2182 for (queue = 0; queue < tx_count; queue++) { 2183 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); 2184 if (ret) 2185 goto err_dma; 2186 } 2187 2188 return 0; 2189 2190 err_dma: 2191 free_dma_tx_desc_resources(priv, dma_conf); 2192 return ret; 2193 } 2194 2195 /** 2196 * alloc_dma_desc_resources - alloc TX/RX resources. 2197 * @priv: private structure 2198 * @dma_conf: structure to take the dma data 2199 * Description: according to which descriptor can be used (extend or basic) 2200 * this function allocates the resources for TX and RX paths. In case of 2201 * reception, for example, it pre-allocated the RX socket buffer in order to 2202 * allow zero-copy mechanism. 2203 */ 2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv, 2205 struct stmmac_dma_conf *dma_conf) 2206 { 2207 /* RX Allocation */ 2208 int ret = alloc_dma_rx_desc_resources(priv, dma_conf); 2209 2210 if (ret) 2211 return ret; 2212 2213 ret = alloc_dma_tx_desc_resources(priv, dma_conf); 2214 2215 return ret; 2216 } 2217 2218 /** 2219 * free_dma_desc_resources - free dma desc resources 2220 * @priv: private structure 2221 * @dma_conf: structure to take the dma data 2222 */ 2223 static void free_dma_desc_resources(struct stmmac_priv *priv, 2224 struct stmmac_dma_conf *dma_conf) 2225 { 2226 /* Release the DMA TX socket buffers */ 2227 free_dma_tx_desc_resources(priv, dma_conf); 2228 2229 /* Release the DMA RX socket buffers later 2230 * to ensure all pending XDP_TX buffers are returned. 2231 */ 2232 free_dma_rx_desc_resources(priv, dma_conf); 2233 } 2234 2235 /** 2236 * stmmac_mac_enable_rx_queues - Enable MAC rx queues 2237 * @priv: driver private structure 2238 * Description: It is used for enabling the rx queues in the MAC 2239 */ 2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 2241 { 2242 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2243 int queue; 2244 u8 mode; 2245 2246 for (queue = 0; queue < rx_queues_count; queue++) { 2247 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 2248 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 2249 } 2250 } 2251 2252 /** 2253 * stmmac_start_rx_dma - start RX DMA channel 2254 * @priv: driver private structure 2255 * @chan: RX channel index 2256 * Description: 2257 * This starts a RX DMA channel 2258 */ 2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 2260 { 2261 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 2262 stmmac_start_rx(priv, priv->ioaddr, chan); 2263 } 2264 2265 /** 2266 * stmmac_start_tx_dma - start TX DMA channel 2267 * @priv: driver private structure 2268 * @chan: TX channel index 2269 * Description: 2270 * This starts a TX DMA channel 2271 */ 2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 2273 { 2274 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 2275 stmmac_start_tx(priv, priv->ioaddr, chan); 2276 } 2277 2278 /** 2279 * stmmac_stop_rx_dma - stop RX DMA channel 2280 * @priv: driver private structure 2281 * @chan: RX channel index 2282 * Description: 2283 * This stops a RX DMA channel 2284 */ 2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 2286 { 2287 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 2288 stmmac_stop_rx(priv, priv->ioaddr, chan); 2289 } 2290 2291 /** 2292 * stmmac_stop_tx_dma - stop TX DMA channel 2293 * @priv: driver private structure 2294 * @chan: TX channel index 2295 * Description: 2296 * This stops a TX DMA channel 2297 */ 2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 2299 { 2300 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 2301 stmmac_stop_tx(priv, priv->ioaddr, chan); 2302 } 2303 2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) 2305 { 2306 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2307 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2308 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2309 u32 chan; 2310 2311 for (chan = 0; chan < dma_csr_ch; chan++) { 2312 struct stmmac_channel *ch = &priv->channel[chan]; 2313 unsigned long flags; 2314 2315 spin_lock_irqsave(&ch->lock, flags); 2316 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 2317 spin_unlock_irqrestore(&ch->lock, flags); 2318 } 2319 } 2320 2321 /** 2322 * stmmac_start_all_dma - start all RX and TX DMA channels 2323 * @priv: driver private structure 2324 * Description: 2325 * This starts all the RX and TX DMA channels 2326 */ 2327 static void stmmac_start_all_dma(struct stmmac_priv *priv) 2328 { 2329 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2330 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2331 u32 chan = 0; 2332 2333 for (chan = 0; chan < rx_channels_count; chan++) 2334 stmmac_start_rx_dma(priv, chan); 2335 2336 for (chan = 0; chan < tx_channels_count; chan++) 2337 stmmac_start_tx_dma(priv, chan); 2338 } 2339 2340 /** 2341 * stmmac_stop_all_dma - stop all RX and TX DMA channels 2342 * @priv: driver private structure 2343 * Description: 2344 * This stops the RX and TX DMA channels 2345 */ 2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv) 2347 { 2348 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2349 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2350 u32 chan = 0; 2351 2352 for (chan = 0; chan < rx_channels_count; chan++) 2353 stmmac_stop_rx_dma(priv, chan); 2354 2355 for (chan = 0; chan < tx_channels_count; chan++) 2356 stmmac_stop_tx_dma(priv, chan); 2357 } 2358 2359 /** 2360 * stmmac_dma_operation_mode - HW DMA operation mode 2361 * @priv: driver private structure 2362 * Description: it is used for configuring the DMA operation mode register in 2363 * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 2364 */ 2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 2366 { 2367 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2368 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2369 int rxfifosz = priv->plat->rx_fifo_size; 2370 int txfifosz = priv->plat->tx_fifo_size; 2371 u32 txmode = 0; 2372 u32 rxmode = 0; 2373 u32 chan = 0; 2374 u8 qmode = 0; 2375 2376 if (rxfifosz == 0) 2377 rxfifosz = priv->dma_cap.rx_fifo_size; 2378 if (txfifosz == 0) 2379 txfifosz = priv->dma_cap.tx_fifo_size; 2380 2381 /* Adjust for real per queue fifo size */ 2382 rxfifosz /= rx_channels_count; 2383 txfifosz /= tx_channels_count; 2384 2385 if (priv->plat->force_thresh_dma_mode) { 2386 txmode = tc; 2387 rxmode = tc; 2388 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 2389 /* 2390 * In case of GMAC, SF mode can be enabled 2391 * to perform the TX COE in HW. This depends on: 2392 * 1) TX COE if actually supported 2393 * 2) There is no bugged Jumbo frame support 2394 * that needs to not insert csum in the TDES. 2395 */ 2396 txmode = SF_DMA_MODE; 2397 rxmode = SF_DMA_MODE; 2398 priv->xstats.threshold = SF_DMA_MODE; 2399 } else { 2400 txmode = tc; 2401 rxmode = SF_DMA_MODE; 2402 } 2403 2404 /* configure all channels */ 2405 for (chan = 0; chan < rx_channels_count; chan++) { 2406 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; 2407 u32 buf_size; 2408 2409 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2410 2411 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 2412 rxfifosz, qmode); 2413 2414 if (rx_q->xsk_pool) { 2415 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 2416 stmmac_set_dma_bfsize(priv, priv->ioaddr, 2417 buf_size, 2418 chan); 2419 } else { 2420 stmmac_set_dma_bfsize(priv, priv->ioaddr, 2421 priv->dma_conf.dma_buf_sz, 2422 chan); 2423 } 2424 } 2425 2426 for (chan = 0; chan < tx_channels_count; chan++) { 2427 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2428 2429 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 2430 txfifosz, qmode); 2431 } 2432 } 2433 2434 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 2435 { 2436 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); 2437 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 2438 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; 2439 struct xsk_buff_pool *pool = tx_q->xsk_pool; 2440 unsigned int entry = tx_q->cur_tx; 2441 struct dma_desc *tx_desc = NULL; 2442 struct xdp_desc xdp_desc; 2443 bool work_done = true; 2444 u32 tx_set_ic_bit = 0; 2445 unsigned long flags; 2446 2447 /* Avoids TX time-out as we are sharing with slow path */ 2448 txq_trans_cond_update(nq); 2449 2450 budget = min(budget, stmmac_tx_avail(priv, queue)); 2451 2452 while (budget-- > 0) { 2453 dma_addr_t dma_addr; 2454 bool set_ic; 2455 2456 /* We are sharing with slow path and stop XSK TX desc submission when 2457 * available TX ring is less than threshold. 2458 */ 2459 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || 2460 !netif_carrier_ok(priv->dev)) { 2461 work_done = false; 2462 break; 2463 } 2464 2465 if (!xsk_tx_peek_desc(pool, &xdp_desc)) 2466 break; 2467 2468 if (likely(priv->extend_desc)) 2469 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 2470 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2471 tx_desc = &tx_q->dma_entx[entry].basic; 2472 else 2473 tx_desc = tx_q->dma_tx + entry; 2474 2475 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2476 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); 2477 2478 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; 2479 2480 /* To return XDP buffer to XSK pool, we simple call 2481 * xsk_tx_completed(), so we don't need to fill up 2482 * 'buf' and 'xdpf'. 2483 */ 2484 tx_q->tx_skbuff_dma[entry].buf = 0; 2485 tx_q->xdpf[entry] = NULL; 2486 2487 tx_q->tx_skbuff_dma[entry].map_as_page = false; 2488 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; 2489 tx_q->tx_skbuff_dma[entry].last_segment = true; 2490 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2491 2492 stmmac_set_desc_addr(priv, tx_desc, dma_addr); 2493 2494 tx_q->tx_count_frames++; 2495 2496 if (!priv->tx_coal_frames[queue]) 2497 set_ic = false; 2498 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 2499 set_ic = true; 2500 else 2501 set_ic = false; 2502 2503 if (set_ic) { 2504 tx_q->tx_count_frames = 0; 2505 stmmac_set_tx_ic(priv, tx_desc); 2506 tx_set_ic_bit++; 2507 } 2508 2509 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, 2510 true, priv->mode, true, true, 2511 xdp_desc.len); 2512 2513 stmmac_enable_dma_transmission(priv, priv->ioaddr); 2514 2515 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 2516 entry = tx_q->cur_tx; 2517 } 2518 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 2519 txq_stats->tx_set_ic_bit += tx_set_ic_bit; 2520 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 2521 2522 if (tx_desc) { 2523 stmmac_flush_tx_descriptors(priv, queue); 2524 xsk_tx_release(pool); 2525 } 2526 2527 /* Return true if all of the 3 conditions are met 2528 * a) TX Budget is still available 2529 * b) work_done = true when XSK TX desc peek is empty (no more 2530 * pending XSK TX for transmission) 2531 */ 2532 return !!budget && work_done; 2533 } 2534 2535 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) 2536 { 2537 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { 2538 tc += 64; 2539 2540 if (priv->plat->force_thresh_dma_mode) 2541 stmmac_set_dma_operation_mode(priv, tc, tc, chan); 2542 else 2543 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE, 2544 chan); 2545 2546 priv->xstats.threshold = tc; 2547 } 2548 } 2549 2550 /** 2551 * stmmac_tx_clean - to manage the transmission completion 2552 * @priv: driver private structure 2553 * @budget: napi budget limiting this functions packet handling 2554 * @queue: TX queue index 2555 * Description: it reclaims the transmit resources after transmission completes. 2556 */ 2557 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 2558 { 2559 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 2560 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; 2561 unsigned int bytes_compl = 0, pkts_compl = 0; 2562 unsigned int entry, xmits = 0, count = 0; 2563 u32 tx_packets = 0, tx_errors = 0; 2564 unsigned long flags; 2565 2566 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 2567 2568 tx_q->xsk_frames_done = 0; 2569 2570 entry = tx_q->dirty_tx; 2571 2572 /* Try to clean all TX complete frame in 1 shot */ 2573 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { 2574 struct xdp_frame *xdpf; 2575 struct sk_buff *skb; 2576 struct dma_desc *p; 2577 int status; 2578 2579 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || 2580 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2581 xdpf = tx_q->xdpf[entry]; 2582 skb = NULL; 2583 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2584 xdpf = NULL; 2585 skb = tx_q->tx_skbuff[entry]; 2586 } else { 2587 xdpf = NULL; 2588 skb = NULL; 2589 } 2590 2591 if (priv->extend_desc) 2592 p = (struct dma_desc *)(tx_q->dma_etx + entry); 2593 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2594 p = &tx_q->dma_entx[entry].basic; 2595 else 2596 p = tx_q->dma_tx + entry; 2597 2598 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr); 2599 /* Check if the descriptor is owned by the DMA */ 2600 if (unlikely(status & tx_dma_own)) 2601 break; 2602 2603 count++; 2604 2605 /* Make sure descriptor fields are read after reading 2606 * the own bit. 2607 */ 2608 dma_rmb(); 2609 2610 /* Just consider the last segment and ...*/ 2611 if (likely(!(status & tx_not_ls))) { 2612 /* ... verify the status error condition */ 2613 if (unlikely(status & tx_err)) { 2614 tx_errors++; 2615 if (unlikely(status & tx_err_bump_tc)) 2616 stmmac_bump_dma_threshold(priv, queue); 2617 } else { 2618 tx_packets++; 2619 } 2620 if (skb) 2621 stmmac_get_tx_hwtstamp(priv, p, skb); 2622 } 2623 2624 if (likely(tx_q->tx_skbuff_dma[entry].buf && 2625 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { 2626 if (tx_q->tx_skbuff_dma[entry].map_as_page) 2627 dma_unmap_page(priv->device, 2628 tx_q->tx_skbuff_dma[entry].buf, 2629 tx_q->tx_skbuff_dma[entry].len, 2630 DMA_TO_DEVICE); 2631 else 2632 dma_unmap_single(priv->device, 2633 tx_q->tx_skbuff_dma[entry].buf, 2634 tx_q->tx_skbuff_dma[entry].len, 2635 DMA_TO_DEVICE); 2636 tx_q->tx_skbuff_dma[entry].buf = 0; 2637 tx_q->tx_skbuff_dma[entry].len = 0; 2638 tx_q->tx_skbuff_dma[entry].map_as_page = false; 2639 } 2640 2641 stmmac_clean_desc3(priv, tx_q, p); 2642 2643 tx_q->tx_skbuff_dma[entry].last_segment = false; 2644 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2645 2646 if (xdpf && 2647 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { 2648 xdp_return_frame_rx_napi(xdpf); 2649 tx_q->xdpf[entry] = NULL; 2650 } 2651 2652 if (xdpf && 2653 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2654 xdp_return_frame(xdpf); 2655 tx_q->xdpf[entry] = NULL; 2656 } 2657 2658 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) 2659 tx_q->xsk_frames_done++; 2660 2661 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2662 if (likely(skb)) { 2663 pkts_compl++; 2664 bytes_compl += skb->len; 2665 dev_consume_skb_any(skb); 2666 tx_q->tx_skbuff[entry] = NULL; 2667 } 2668 } 2669 2670 stmmac_release_tx_desc(priv, p, priv->mode); 2671 2672 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 2673 } 2674 tx_q->dirty_tx = entry; 2675 2676 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 2677 pkts_compl, bytes_compl); 2678 2679 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 2680 queue))) && 2681 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { 2682 2683 netif_dbg(priv, tx_done, priv->dev, 2684 "%s: restart transmit\n", __func__); 2685 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 2686 } 2687 2688 if (tx_q->xsk_pool) { 2689 bool work_done; 2690 2691 if (tx_q->xsk_frames_done) 2692 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 2693 2694 if (xsk_uses_need_wakeup(tx_q->xsk_pool)) 2695 xsk_set_tx_need_wakeup(tx_q->xsk_pool); 2696 2697 /* For XSK TX, we try to send as many as possible. 2698 * If XSK work done (XSK TX desc empty and budget still 2699 * available), return "budget - 1" to reenable TX IRQ. 2700 * Else, return "budget" to make NAPI continue polling. 2701 */ 2702 work_done = stmmac_xdp_xmit_zc(priv, queue, 2703 STMMAC_XSK_TX_BUDGET_MAX); 2704 if (work_done) 2705 xmits = budget - 1; 2706 else 2707 xmits = budget; 2708 } 2709 2710 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && 2711 priv->eee_sw_timer_en) { 2712 if (stmmac_enable_eee_mode(priv)) 2713 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 2714 } 2715 2716 /* We still have pending packets, let's call for a new scheduling */ 2717 if (tx_q->dirty_tx != tx_q->cur_tx) 2718 stmmac_tx_timer_arm(priv, queue); 2719 2720 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 2721 txq_stats->tx_packets += tx_packets; 2722 txq_stats->tx_pkt_n += tx_packets; 2723 txq_stats->tx_clean++; 2724 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 2725 2726 priv->xstats.tx_errors += tx_errors; 2727 2728 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 2729 2730 /* Combine decisions from TX clean and XSK TX */ 2731 return max(count, xmits); 2732 } 2733 2734 /** 2735 * stmmac_tx_err - to manage the tx error 2736 * @priv: driver private structure 2737 * @chan: channel index 2738 * Description: it cleans the descriptors and restarts the transmission 2739 * in case of transmission errors. 2740 */ 2741 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 2742 { 2743 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 2744 2745 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 2746 2747 stmmac_stop_tx_dma(priv, chan); 2748 dma_free_tx_skbufs(priv, &priv->dma_conf, chan); 2749 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); 2750 stmmac_reset_tx_queue(priv, chan); 2751 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2752 tx_q->dma_tx_phy, chan); 2753 stmmac_start_tx_dma(priv, chan); 2754 2755 priv->xstats.tx_errors++; 2756 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 2757 } 2758 2759 /** 2760 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 2761 * @priv: driver private structure 2762 * @txmode: TX operating mode 2763 * @rxmode: RX operating mode 2764 * @chan: channel index 2765 * Description: it is used for configuring of the DMA operation mode in 2766 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 2767 * mode. 2768 */ 2769 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 2770 u32 rxmode, u32 chan) 2771 { 2772 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2773 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2774 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2775 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2776 int rxfifosz = priv->plat->rx_fifo_size; 2777 int txfifosz = priv->plat->tx_fifo_size; 2778 2779 if (rxfifosz == 0) 2780 rxfifosz = priv->dma_cap.rx_fifo_size; 2781 if (txfifosz == 0) 2782 txfifosz = priv->dma_cap.tx_fifo_size; 2783 2784 /* Adjust for real per queue fifo size */ 2785 rxfifosz /= rx_channels_count; 2786 txfifosz /= tx_channels_count; 2787 2788 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 2789 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 2790 } 2791 2792 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 2793 { 2794 int ret; 2795 2796 ret = stmmac_safety_feat_irq_status(priv, priv->dev, 2797 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2798 if (ret && (ret != -EINVAL)) { 2799 stmmac_global_err(priv); 2800 return true; 2801 } 2802 2803 return false; 2804 } 2805 2806 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) 2807 { 2808 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 2809 &priv->xstats, chan, dir); 2810 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; 2811 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 2812 struct stmmac_channel *ch = &priv->channel[chan]; 2813 struct napi_struct *rx_napi; 2814 struct napi_struct *tx_napi; 2815 unsigned long flags; 2816 2817 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; 2818 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 2819 2820 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 2821 if (napi_schedule_prep(rx_napi)) { 2822 spin_lock_irqsave(&ch->lock, flags); 2823 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 2824 spin_unlock_irqrestore(&ch->lock, flags); 2825 __napi_schedule(rx_napi); 2826 } 2827 } 2828 2829 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2830 if (napi_schedule_prep(tx_napi)) { 2831 spin_lock_irqsave(&ch->lock, flags); 2832 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 2833 spin_unlock_irqrestore(&ch->lock, flags); 2834 __napi_schedule(tx_napi); 2835 } 2836 } 2837 2838 return status; 2839 } 2840 2841 /** 2842 * stmmac_dma_interrupt - DMA ISR 2843 * @priv: driver private structure 2844 * Description: this is the DMA ISR. It is called by the main ISR. 2845 * It calls the dwmac dma routine and schedule poll method in case of some 2846 * work can be done. 2847 */ 2848 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 2849 { 2850 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2851 u32 rx_channel_count = priv->plat->rx_queues_to_use; 2852 u32 channels_to_check = tx_channel_count > rx_channel_count ? 2853 tx_channel_count : rx_channel_count; 2854 u32 chan; 2855 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 2856 2857 /* Make sure we never check beyond our status buffer. */ 2858 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 2859 channels_to_check = ARRAY_SIZE(status); 2860 2861 for (chan = 0; chan < channels_to_check; chan++) 2862 status[chan] = stmmac_napi_check(priv, chan, 2863 DMA_DIR_RXTX); 2864 2865 for (chan = 0; chan < tx_channel_count; chan++) { 2866 if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 2867 /* Try to bump up the dma threshold on this failure */ 2868 stmmac_bump_dma_threshold(priv, chan); 2869 } else if (unlikely(status[chan] == tx_hard_error)) { 2870 stmmac_tx_err(priv, chan); 2871 } 2872 } 2873 } 2874 2875 /** 2876 * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 2877 * @priv: driver private structure 2878 * Description: this masks the MMC irq, in fact, the counters are managed in SW. 2879 */ 2880 static void stmmac_mmc_setup(struct stmmac_priv *priv) 2881 { 2882 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 2883 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 2884 2885 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); 2886 2887 if (priv->dma_cap.rmon) { 2888 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); 2889 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 2890 } else 2891 netdev_info(priv->dev, "No MAC Management Counters available\n"); 2892 } 2893 2894 /** 2895 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 2896 * @priv: driver private structure 2897 * Description: 2898 * new GMAC chip generations have a new register to indicate the 2899 * presence of the optional feature/functions. 2900 * This can be also used to override the value passed through the 2901 * platform and necessary for old MAC10/100 and GMAC chips. 2902 */ 2903 static int stmmac_get_hw_features(struct stmmac_priv *priv) 2904 { 2905 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2906 } 2907 2908 /** 2909 * stmmac_check_ether_addr - check if the MAC addr is valid 2910 * @priv: driver private structure 2911 * Description: 2912 * it is to verify if the MAC address is valid, in case of failures it 2913 * generates a random MAC address 2914 */ 2915 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2916 { 2917 u8 addr[ETH_ALEN]; 2918 2919 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 2920 stmmac_get_umac_addr(priv, priv->hw, addr, 0); 2921 if (is_valid_ether_addr(addr)) 2922 eth_hw_addr_set(priv->dev, addr); 2923 else 2924 eth_hw_addr_random(priv->dev); 2925 dev_info(priv->device, "device MAC address %pM\n", 2926 priv->dev->dev_addr); 2927 } 2928 } 2929 2930 /** 2931 * stmmac_init_dma_engine - DMA init. 2932 * @priv: driver private structure 2933 * Description: 2934 * It inits the DMA invoking the specific MAC/GMAC callback. 2935 * Some DMA parameters can be passed from the platform; 2936 * in case of these are not passed a default is kept for the MAC or GMAC. 2937 */ 2938 static int stmmac_init_dma_engine(struct stmmac_priv *priv) 2939 { 2940 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2941 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2942 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2943 struct stmmac_rx_queue *rx_q; 2944 struct stmmac_tx_queue *tx_q; 2945 u32 chan = 0; 2946 int atds = 0; 2947 int ret = 0; 2948 2949 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2950 dev_err(priv->device, "Invalid DMA configuration\n"); 2951 return -EINVAL; 2952 } 2953 2954 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2955 atds = 1; 2956 2957 ret = stmmac_reset(priv, priv->ioaddr); 2958 if (ret) { 2959 dev_err(priv->device, "Failed to reset the dma\n"); 2960 return ret; 2961 } 2962 2963 /* DMA Configuration */ 2964 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 2965 2966 if (priv->plat->axi) 2967 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 2968 2969 /* DMA CSR Channel configuration */ 2970 for (chan = 0; chan < dma_csr_ch; chan++) { 2971 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2972 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 2973 } 2974 2975 /* DMA RX Channel Configuration */ 2976 for (chan = 0; chan < rx_channels_count; chan++) { 2977 rx_q = &priv->dma_conf.rx_queue[chan]; 2978 2979 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2980 rx_q->dma_rx_phy, chan); 2981 2982 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2983 (rx_q->buf_alloc_num * 2984 sizeof(struct dma_desc)); 2985 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2986 rx_q->rx_tail_addr, chan); 2987 } 2988 2989 /* DMA TX Channel Configuration */ 2990 for (chan = 0; chan < tx_channels_count; chan++) { 2991 tx_q = &priv->dma_conf.tx_queue[chan]; 2992 2993 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2994 tx_q->dma_tx_phy, chan); 2995 2996 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2997 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2998 tx_q->tx_tail_addr, chan); 2999 } 3000 3001 return ret; 3002 } 3003 3004 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 3005 { 3006 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 3007 u32 tx_coal_timer = priv->tx_coal_timer[queue]; 3008 3009 if (!tx_coal_timer) 3010 return; 3011 3012 hrtimer_start(&tx_q->txtimer, 3013 STMMAC_COAL_TIMER(tx_coal_timer), 3014 HRTIMER_MODE_REL); 3015 } 3016 3017 /** 3018 * stmmac_tx_timer - mitigation sw timer for tx. 3019 * @t: data pointer 3020 * Description: 3021 * This is the timer handler to directly invoke the stmmac_tx_clean. 3022 */ 3023 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) 3024 { 3025 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); 3026 struct stmmac_priv *priv = tx_q->priv_data; 3027 struct stmmac_channel *ch; 3028 struct napi_struct *napi; 3029 3030 ch = &priv->channel[tx_q->queue_index]; 3031 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 3032 3033 if (likely(napi_schedule_prep(napi))) { 3034 unsigned long flags; 3035 3036 spin_lock_irqsave(&ch->lock, flags); 3037 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); 3038 spin_unlock_irqrestore(&ch->lock, flags); 3039 __napi_schedule(napi); 3040 } 3041 3042 return HRTIMER_NORESTART; 3043 } 3044 3045 /** 3046 * stmmac_init_coalesce - init mitigation options. 3047 * @priv: driver private structure 3048 * Description: 3049 * This inits the coalesce parameters: i.e. timer rate, 3050 * timer handler and default threshold used for enabling the 3051 * interrupt on completion bit. 3052 */ 3053 static void stmmac_init_coalesce(struct stmmac_priv *priv) 3054 { 3055 u32 tx_channel_count = priv->plat->tx_queues_to_use; 3056 u32 rx_channel_count = priv->plat->rx_queues_to_use; 3057 u32 chan; 3058 3059 for (chan = 0; chan < tx_channel_count; chan++) { 3060 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3061 3062 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; 3063 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; 3064 3065 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3066 tx_q->txtimer.function = stmmac_tx_timer; 3067 } 3068 3069 for (chan = 0; chan < rx_channel_count; chan++) 3070 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; 3071 } 3072 3073 static void stmmac_set_rings_length(struct stmmac_priv *priv) 3074 { 3075 u32 rx_channels_count = priv->plat->rx_queues_to_use; 3076 u32 tx_channels_count = priv->plat->tx_queues_to_use; 3077 u32 chan; 3078 3079 /* set TX ring length */ 3080 for (chan = 0; chan < tx_channels_count; chan++) 3081 stmmac_set_tx_ring_len(priv, priv->ioaddr, 3082 (priv->dma_conf.dma_tx_size - 1), chan); 3083 3084 /* set RX ring length */ 3085 for (chan = 0; chan < rx_channels_count; chan++) 3086 stmmac_set_rx_ring_len(priv, priv->ioaddr, 3087 (priv->dma_conf.dma_rx_size - 1), chan); 3088 } 3089 3090 /** 3091 * stmmac_set_tx_queue_weight - Set TX queue weight 3092 * @priv: driver private structure 3093 * Description: It is used for setting TX queues weight 3094 */ 3095 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 3096 { 3097 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3098 u32 weight; 3099 u32 queue; 3100 3101 for (queue = 0; queue < tx_queues_count; queue++) { 3102 weight = priv->plat->tx_queues_cfg[queue].weight; 3103 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 3104 } 3105 } 3106 3107 /** 3108 * stmmac_configure_cbs - Configure CBS in TX queue 3109 * @priv: driver private structure 3110 * Description: It is used for configuring CBS in AVB TX queues 3111 */ 3112 static void stmmac_configure_cbs(struct stmmac_priv *priv) 3113 { 3114 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3115 u32 mode_to_use; 3116 u32 queue; 3117 3118 /* queue 0 is reserved for legacy traffic */ 3119 for (queue = 1; queue < tx_queues_count; queue++) { 3120 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 3121 if (mode_to_use == MTL_QUEUE_DCB) 3122 continue; 3123 3124 stmmac_config_cbs(priv, priv->hw, 3125 priv->plat->tx_queues_cfg[queue].send_slope, 3126 priv->plat->tx_queues_cfg[queue].idle_slope, 3127 priv->plat->tx_queues_cfg[queue].high_credit, 3128 priv->plat->tx_queues_cfg[queue].low_credit, 3129 queue); 3130 } 3131 } 3132 3133 /** 3134 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 3135 * @priv: driver private structure 3136 * Description: It is used for mapping RX queues to RX dma channels 3137 */ 3138 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 3139 { 3140 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3141 u32 queue; 3142 u32 chan; 3143 3144 for (queue = 0; queue < rx_queues_count; queue++) { 3145 chan = priv->plat->rx_queues_cfg[queue].chan; 3146 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 3147 } 3148 } 3149 3150 /** 3151 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 3152 * @priv: driver private structure 3153 * Description: It is used for configuring the RX Queue Priority 3154 */ 3155 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 3156 { 3157 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3158 u32 queue; 3159 u32 prio; 3160 3161 for (queue = 0; queue < rx_queues_count; queue++) { 3162 if (!priv->plat->rx_queues_cfg[queue].use_prio) 3163 continue; 3164 3165 prio = priv->plat->rx_queues_cfg[queue].prio; 3166 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 3167 } 3168 } 3169 3170 /** 3171 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 3172 * @priv: driver private structure 3173 * Description: It is used for configuring the TX Queue Priority 3174 */ 3175 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 3176 { 3177 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3178 u32 queue; 3179 u32 prio; 3180 3181 for (queue = 0; queue < tx_queues_count; queue++) { 3182 if (!priv->plat->tx_queues_cfg[queue].use_prio) 3183 continue; 3184 3185 prio = priv->plat->tx_queues_cfg[queue].prio; 3186 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 3187 } 3188 } 3189 3190 /** 3191 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 3192 * @priv: driver private structure 3193 * Description: It is used for configuring the RX queue routing 3194 */ 3195 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 3196 { 3197 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3198 u32 queue; 3199 u8 packet; 3200 3201 for (queue = 0; queue < rx_queues_count; queue++) { 3202 /* no specific packet type routing specified for the queue */ 3203 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 3204 continue; 3205 3206 packet = priv->plat->rx_queues_cfg[queue].pkt_route; 3207 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 3208 } 3209 } 3210 3211 static void stmmac_mac_config_rss(struct stmmac_priv *priv) 3212 { 3213 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { 3214 priv->rss.enable = false; 3215 return; 3216 } 3217 3218 if (priv->dev->features & NETIF_F_RXHASH) 3219 priv->rss.enable = true; 3220 else 3221 priv->rss.enable = false; 3222 3223 stmmac_rss_configure(priv, priv->hw, &priv->rss, 3224 priv->plat->rx_queues_to_use); 3225 } 3226 3227 /** 3228 * stmmac_mtl_configuration - Configure MTL 3229 * @priv: driver private structure 3230 * Description: It is used for configurring MTL 3231 */ 3232 static void stmmac_mtl_configuration(struct stmmac_priv *priv) 3233 { 3234 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3235 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3236 3237 if (tx_queues_count > 1) 3238 stmmac_set_tx_queue_weight(priv); 3239 3240 /* Configure MTL RX algorithms */ 3241 if (rx_queues_count > 1) 3242 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 3243 priv->plat->rx_sched_algorithm); 3244 3245 /* Configure MTL TX algorithms */ 3246 if (tx_queues_count > 1) 3247 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 3248 priv->plat->tx_sched_algorithm); 3249 3250 /* Configure CBS in AVB TX queues */ 3251 if (tx_queues_count > 1) 3252 stmmac_configure_cbs(priv); 3253 3254 /* Map RX MTL to DMA channels */ 3255 stmmac_rx_queue_dma_chan_map(priv); 3256 3257 /* Enable MAC RX Queues */ 3258 stmmac_mac_enable_rx_queues(priv); 3259 3260 /* Set RX priorities */ 3261 if (rx_queues_count > 1) 3262 stmmac_mac_config_rx_queues_prio(priv); 3263 3264 /* Set TX priorities */ 3265 if (tx_queues_count > 1) 3266 stmmac_mac_config_tx_queues_prio(priv); 3267 3268 /* Set RX routing */ 3269 if (rx_queues_count > 1) 3270 stmmac_mac_config_rx_queues_routing(priv); 3271 3272 /* Receive Side Scaling */ 3273 if (rx_queues_count > 1) 3274 stmmac_mac_config_rss(priv); 3275 } 3276 3277 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 3278 { 3279 if (priv->dma_cap.asp) { 3280 netdev_info(priv->dev, "Enabling Safety Features\n"); 3281 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, 3282 priv->plat->safety_feat_cfg); 3283 } else { 3284 netdev_info(priv->dev, "No Safety Features support found\n"); 3285 } 3286 } 3287 3288 static int stmmac_fpe_start_wq(struct stmmac_priv *priv) 3289 { 3290 char *name; 3291 3292 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 3293 clear_bit(__FPE_REMOVING, &priv->fpe_task_state); 3294 3295 name = priv->wq_name; 3296 sprintf(name, "%s-fpe", priv->dev->name); 3297 3298 priv->fpe_wq = create_singlethread_workqueue(name); 3299 if (!priv->fpe_wq) { 3300 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); 3301 3302 return -ENOMEM; 3303 } 3304 netdev_info(priv->dev, "FPE workqueue start"); 3305 3306 return 0; 3307 } 3308 3309 /** 3310 * stmmac_hw_setup - setup mac in a usable state. 3311 * @dev : pointer to the device structure. 3312 * @ptp_register: register PTP if set 3313 * Description: 3314 * this is the main function to setup the HW in a usable state because the 3315 * dma engine is reset, the core registers are configured (e.g. AXI, 3316 * Checksum features, timers). The DMA is ready to start receiving and 3317 * transmitting. 3318 * Return value: 3319 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3320 * file on failure. 3321 */ 3322 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) 3323 { 3324 struct stmmac_priv *priv = netdev_priv(dev); 3325 u32 rx_cnt = priv->plat->rx_queues_to_use; 3326 u32 tx_cnt = priv->plat->tx_queues_to_use; 3327 bool sph_en; 3328 u32 chan; 3329 int ret; 3330 3331 /* DMA initialization and SW reset */ 3332 ret = stmmac_init_dma_engine(priv); 3333 if (ret < 0) { 3334 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 3335 __func__); 3336 return ret; 3337 } 3338 3339 /* Copy the MAC addr into the HW */ 3340 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 3341 3342 /* PS and related bits will be programmed according to the speed */ 3343 if (priv->hw->pcs) { 3344 int speed = priv->plat->mac_port_sel_speed; 3345 3346 if ((speed == SPEED_10) || (speed == SPEED_100) || 3347 (speed == SPEED_1000)) { 3348 priv->hw->ps = speed; 3349 } else { 3350 dev_warn(priv->device, "invalid port speed\n"); 3351 priv->hw->ps = 0; 3352 } 3353 } 3354 3355 /* Initialize the MAC Core */ 3356 stmmac_core_init(priv, priv->hw, dev); 3357 3358 /* Initialize MTL*/ 3359 stmmac_mtl_configuration(priv); 3360 3361 /* Initialize Safety Features */ 3362 stmmac_safety_feat_configuration(priv); 3363 3364 ret = stmmac_rx_ipc(priv, priv->hw); 3365 if (!ret) { 3366 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 3367 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 3368 priv->hw->rx_csum = 0; 3369 } 3370 3371 /* Enable the MAC Rx/Tx */ 3372 stmmac_mac_set(priv, priv->ioaddr, true); 3373 3374 /* Set the HW DMA mode and the COE */ 3375 stmmac_dma_operation_mode(priv); 3376 3377 stmmac_mmc_setup(priv); 3378 3379 if (ptp_register) { 3380 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 3381 if (ret < 0) 3382 netdev_warn(priv->dev, 3383 "failed to enable PTP reference clock: %pe\n", 3384 ERR_PTR(ret)); 3385 } 3386 3387 ret = stmmac_init_ptp(priv); 3388 if (ret == -EOPNOTSUPP) 3389 netdev_info(priv->dev, "PTP not supported by HW\n"); 3390 else if (ret) 3391 netdev_warn(priv->dev, "PTP init failed\n"); 3392 else if (ptp_register) 3393 stmmac_ptp_register(priv); 3394 3395 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; 3396 3397 /* Convert the timer from msec to usec */ 3398 if (!priv->tx_lpi_timer) 3399 priv->tx_lpi_timer = eee_timer * 1000; 3400 3401 if (priv->use_riwt) { 3402 u32 queue; 3403 3404 for (queue = 0; queue < rx_cnt; queue++) { 3405 if (!priv->rx_riwt[queue]) 3406 priv->rx_riwt[queue] = DEF_DMA_RIWT; 3407 3408 stmmac_rx_watchdog(priv, priv->ioaddr, 3409 priv->rx_riwt[queue], queue); 3410 } 3411 } 3412 3413 if (priv->hw->pcs) 3414 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); 3415 3416 /* set TX and RX rings length */ 3417 stmmac_set_rings_length(priv); 3418 3419 /* Enable TSO */ 3420 if (priv->tso) { 3421 for (chan = 0; chan < tx_cnt; chan++) { 3422 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3423 3424 /* TSO and TBS cannot co-exist */ 3425 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3426 continue; 3427 3428 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 3429 } 3430 } 3431 3432 /* Enable Split Header */ 3433 sph_en = (priv->hw->rx_csum > 0) && priv->sph; 3434 for (chan = 0; chan < rx_cnt; chan++) 3435 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 3436 3437 3438 /* VLAN Tag Insertion */ 3439 if (priv->dma_cap.vlins) 3440 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); 3441 3442 /* TBS */ 3443 for (chan = 0; chan < tx_cnt; chan++) { 3444 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3445 int enable = tx_q->tbs & STMMAC_TBS_AVAIL; 3446 3447 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); 3448 } 3449 3450 /* Configure real RX and TX queues */ 3451 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); 3452 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); 3453 3454 /* Start the ball rolling... */ 3455 stmmac_start_all_dma(priv); 3456 3457 if (priv->dma_cap.fpesel) { 3458 stmmac_fpe_start_wq(priv); 3459 3460 if (priv->plat->fpe_cfg->enable) 3461 stmmac_fpe_handshake(priv, true); 3462 } 3463 3464 return 0; 3465 } 3466 3467 static void stmmac_hw_teardown(struct net_device *dev) 3468 { 3469 struct stmmac_priv *priv = netdev_priv(dev); 3470 3471 clk_disable_unprepare(priv->plat->clk_ptp_ref); 3472 } 3473 3474 static void stmmac_free_irq(struct net_device *dev, 3475 enum request_irq_err irq_err, int irq_idx) 3476 { 3477 struct stmmac_priv *priv = netdev_priv(dev); 3478 int j; 3479 3480 switch (irq_err) { 3481 case REQ_IRQ_ERR_ALL: 3482 irq_idx = priv->plat->tx_queues_to_use; 3483 fallthrough; 3484 case REQ_IRQ_ERR_TX: 3485 for (j = irq_idx - 1; j >= 0; j--) { 3486 if (priv->tx_irq[j] > 0) { 3487 irq_set_affinity_hint(priv->tx_irq[j], NULL); 3488 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); 3489 } 3490 } 3491 irq_idx = priv->plat->rx_queues_to_use; 3492 fallthrough; 3493 case REQ_IRQ_ERR_RX: 3494 for (j = irq_idx - 1; j >= 0; j--) { 3495 if (priv->rx_irq[j] > 0) { 3496 irq_set_affinity_hint(priv->rx_irq[j], NULL); 3497 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); 3498 } 3499 } 3500 3501 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) 3502 free_irq(priv->sfty_ue_irq, dev); 3503 fallthrough; 3504 case REQ_IRQ_ERR_SFTY_UE: 3505 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) 3506 free_irq(priv->sfty_ce_irq, dev); 3507 fallthrough; 3508 case REQ_IRQ_ERR_SFTY_CE: 3509 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) 3510 free_irq(priv->lpi_irq, dev); 3511 fallthrough; 3512 case REQ_IRQ_ERR_LPI: 3513 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) 3514 free_irq(priv->wol_irq, dev); 3515 fallthrough; 3516 case REQ_IRQ_ERR_WOL: 3517 free_irq(dev->irq, dev); 3518 fallthrough; 3519 case REQ_IRQ_ERR_MAC: 3520 case REQ_IRQ_ERR_NO: 3521 /* If MAC IRQ request error, no more IRQ to free */ 3522 break; 3523 } 3524 } 3525 3526 static int stmmac_request_irq_multi_msi(struct net_device *dev) 3527 { 3528 struct stmmac_priv *priv = netdev_priv(dev); 3529 enum request_irq_err irq_err; 3530 cpumask_t cpu_mask; 3531 int irq_idx = 0; 3532 char *int_name; 3533 int ret; 3534 int i; 3535 3536 /* For common interrupt */ 3537 int_name = priv->int_name_mac; 3538 sprintf(int_name, "%s:%s", dev->name, "mac"); 3539 ret = request_irq(dev->irq, stmmac_mac_interrupt, 3540 0, int_name, dev); 3541 if (unlikely(ret < 0)) { 3542 netdev_err(priv->dev, 3543 "%s: alloc mac MSI %d (error: %d)\n", 3544 __func__, dev->irq, ret); 3545 irq_err = REQ_IRQ_ERR_MAC; 3546 goto irq_error; 3547 } 3548 3549 /* Request the Wake IRQ in case of another line 3550 * is used for WoL 3551 */ 3552 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 3553 int_name = priv->int_name_wol; 3554 sprintf(int_name, "%s:%s", dev->name, "wol"); 3555 ret = request_irq(priv->wol_irq, 3556 stmmac_mac_interrupt, 3557 0, int_name, dev); 3558 if (unlikely(ret < 0)) { 3559 netdev_err(priv->dev, 3560 "%s: alloc wol MSI %d (error: %d)\n", 3561 __func__, priv->wol_irq, ret); 3562 irq_err = REQ_IRQ_ERR_WOL; 3563 goto irq_error; 3564 } 3565 } 3566 3567 /* Request the LPI IRQ in case of another line 3568 * is used for LPI 3569 */ 3570 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 3571 int_name = priv->int_name_lpi; 3572 sprintf(int_name, "%s:%s", dev->name, "lpi"); 3573 ret = request_irq(priv->lpi_irq, 3574 stmmac_mac_interrupt, 3575 0, int_name, dev); 3576 if (unlikely(ret < 0)) { 3577 netdev_err(priv->dev, 3578 "%s: alloc lpi MSI %d (error: %d)\n", 3579 __func__, priv->lpi_irq, ret); 3580 irq_err = REQ_IRQ_ERR_LPI; 3581 goto irq_error; 3582 } 3583 } 3584 3585 /* Request the Safety Feature Correctible Error line in 3586 * case of another line is used 3587 */ 3588 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { 3589 int_name = priv->int_name_sfty_ce; 3590 sprintf(int_name, "%s:%s", dev->name, "safety-ce"); 3591 ret = request_irq(priv->sfty_ce_irq, 3592 stmmac_safety_interrupt, 3593 0, int_name, dev); 3594 if (unlikely(ret < 0)) { 3595 netdev_err(priv->dev, 3596 "%s: alloc sfty ce MSI %d (error: %d)\n", 3597 __func__, priv->sfty_ce_irq, ret); 3598 irq_err = REQ_IRQ_ERR_SFTY_CE; 3599 goto irq_error; 3600 } 3601 } 3602 3603 /* Request the Safety Feature Uncorrectible Error line in 3604 * case of another line is used 3605 */ 3606 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { 3607 int_name = priv->int_name_sfty_ue; 3608 sprintf(int_name, "%s:%s", dev->name, "safety-ue"); 3609 ret = request_irq(priv->sfty_ue_irq, 3610 stmmac_safety_interrupt, 3611 0, int_name, dev); 3612 if (unlikely(ret < 0)) { 3613 netdev_err(priv->dev, 3614 "%s: alloc sfty ue MSI %d (error: %d)\n", 3615 __func__, priv->sfty_ue_irq, ret); 3616 irq_err = REQ_IRQ_ERR_SFTY_UE; 3617 goto irq_error; 3618 } 3619 } 3620 3621 /* Request Rx MSI irq */ 3622 for (i = 0; i < priv->plat->rx_queues_to_use; i++) { 3623 if (i >= MTL_MAX_RX_QUEUES) 3624 break; 3625 if (priv->rx_irq[i] == 0) 3626 continue; 3627 3628 int_name = priv->int_name_rx_irq[i]; 3629 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); 3630 ret = request_irq(priv->rx_irq[i], 3631 stmmac_msi_intr_rx, 3632 0, int_name, &priv->dma_conf.rx_queue[i]); 3633 if (unlikely(ret < 0)) { 3634 netdev_err(priv->dev, 3635 "%s: alloc rx-%d MSI %d (error: %d)\n", 3636 __func__, i, priv->rx_irq[i], ret); 3637 irq_err = REQ_IRQ_ERR_RX; 3638 irq_idx = i; 3639 goto irq_error; 3640 } 3641 cpumask_clear(&cpu_mask); 3642 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 3643 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); 3644 } 3645 3646 /* Request Tx MSI irq */ 3647 for (i = 0; i < priv->plat->tx_queues_to_use; i++) { 3648 if (i >= MTL_MAX_TX_QUEUES) 3649 break; 3650 if (priv->tx_irq[i] == 0) 3651 continue; 3652 3653 int_name = priv->int_name_tx_irq[i]; 3654 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); 3655 ret = request_irq(priv->tx_irq[i], 3656 stmmac_msi_intr_tx, 3657 0, int_name, &priv->dma_conf.tx_queue[i]); 3658 if (unlikely(ret < 0)) { 3659 netdev_err(priv->dev, 3660 "%s: alloc tx-%d MSI %d (error: %d)\n", 3661 __func__, i, priv->tx_irq[i], ret); 3662 irq_err = REQ_IRQ_ERR_TX; 3663 irq_idx = i; 3664 goto irq_error; 3665 } 3666 cpumask_clear(&cpu_mask); 3667 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 3668 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); 3669 } 3670 3671 return 0; 3672 3673 irq_error: 3674 stmmac_free_irq(dev, irq_err, irq_idx); 3675 return ret; 3676 } 3677 3678 static int stmmac_request_irq_single(struct net_device *dev) 3679 { 3680 struct stmmac_priv *priv = netdev_priv(dev); 3681 enum request_irq_err irq_err; 3682 int ret; 3683 3684 ret = request_irq(dev->irq, stmmac_interrupt, 3685 IRQF_SHARED, dev->name, dev); 3686 if (unlikely(ret < 0)) { 3687 netdev_err(priv->dev, 3688 "%s: ERROR: allocating the IRQ %d (error: %d)\n", 3689 __func__, dev->irq, ret); 3690 irq_err = REQ_IRQ_ERR_MAC; 3691 goto irq_error; 3692 } 3693 3694 /* Request the Wake IRQ in case of another line 3695 * is used for WoL 3696 */ 3697 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 3698 ret = request_irq(priv->wol_irq, stmmac_interrupt, 3699 IRQF_SHARED, dev->name, dev); 3700 if (unlikely(ret < 0)) { 3701 netdev_err(priv->dev, 3702 "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 3703 __func__, priv->wol_irq, ret); 3704 irq_err = REQ_IRQ_ERR_WOL; 3705 goto irq_error; 3706 } 3707 } 3708 3709 /* Request the IRQ lines */ 3710 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 3711 ret = request_irq(priv->lpi_irq, stmmac_interrupt, 3712 IRQF_SHARED, dev->name, dev); 3713 if (unlikely(ret < 0)) { 3714 netdev_err(priv->dev, 3715 "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 3716 __func__, priv->lpi_irq, ret); 3717 irq_err = REQ_IRQ_ERR_LPI; 3718 goto irq_error; 3719 } 3720 } 3721 3722 return 0; 3723 3724 irq_error: 3725 stmmac_free_irq(dev, irq_err, 0); 3726 return ret; 3727 } 3728 3729 static int stmmac_request_irq(struct net_device *dev) 3730 { 3731 struct stmmac_priv *priv = netdev_priv(dev); 3732 int ret; 3733 3734 /* Request the IRQ lines */ 3735 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) 3736 ret = stmmac_request_irq_multi_msi(dev); 3737 else 3738 ret = stmmac_request_irq_single(dev); 3739 3740 return ret; 3741 } 3742 3743 /** 3744 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue 3745 * @priv: driver private structure 3746 * @mtu: MTU to setup the dma queue and buf with 3747 * Description: Allocate and generate a dma_conf based on the provided MTU. 3748 * Allocate the Tx/Rx DMA queue and init them. 3749 * Return value: 3750 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure. 3751 */ 3752 static struct stmmac_dma_conf * 3753 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) 3754 { 3755 struct stmmac_dma_conf *dma_conf; 3756 int chan, bfsize, ret; 3757 3758 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL); 3759 if (!dma_conf) { 3760 netdev_err(priv->dev, "%s: DMA conf allocation failed\n", 3761 __func__); 3762 return ERR_PTR(-ENOMEM); 3763 } 3764 3765 bfsize = stmmac_set_16kib_bfsize(priv, mtu); 3766 if (bfsize < 0) 3767 bfsize = 0; 3768 3769 if (bfsize < BUF_SIZE_16KiB) 3770 bfsize = stmmac_set_bfsize(mtu, 0); 3771 3772 dma_conf->dma_buf_sz = bfsize; 3773 /* Chose the tx/rx size from the already defined one in the 3774 * priv struct. (if defined) 3775 */ 3776 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; 3777 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; 3778 3779 if (!dma_conf->dma_tx_size) 3780 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; 3781 if (!dma_conf->dma_rx_size) 3782 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; 3783 3784 /* Earlier check for TBS */ 3785 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { 3786 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; 3787 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; 3788 3789 /* Setup per-TXQ tbs flag before TX descriptor alloc */ 3790 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; 3791 } 3792 3793 ret = alloc_dma_desc_resources(priv, dma_conf); 3794 if (ret < 0) { 3795 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 3796 __func__); 3797 goto alloc_error; 3798 } 3799 3800 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); 3801 if (ret < 0) { 3802 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 3803 __func__); 3804 goto init_error; 3805 } 3806 3807 return dma_conf; 3808 3809 init_error: 3810 free_dma_desc_resources(priv, dma_conf); 3811 alloc_error: 3812 kfree(dma_conf); 3813 return ERR_PTR(ret); 3814 } 3815 3816 /** 3817 * __stmmac_open - open entry point of the driver 3818 * @dev : pointer to the device structure. 3819 * @dma_conf : structure to take the dma data 3820 * Description: 3821 * This function is the open entry point of the driver. 3822 * Return value: 3823 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3824 * file on failure. 3825 */ 3826 static int __stmmac_open(struct net_device *dev, 3827 struct stmmac_dma_conf *dma_conf) 3828 { 3829 struct stmmac_priv *priv = netdev_priv(dev); 3830 int mode = priv->plat->phy_interface; 3831 u32 chan; 3832 int ret; 3833 3834 ret = pm_runtime_resume_and_get(priv->device); 3835 if (ret < 0) 3836 return ret; 3837 3838 if (priv->hw->pcs != STMMAC_PCS_TBI && 3839 priv->hw->pcs != STMMAC_PCS_RTBI && 3840 (!priv->hw->xpcs || 3841 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) && 3842 !priv->hw->lynx_pcs) { 3843 ret = stmmac_init_phy(dev); 3844 if (ret) { 3845 netdev_err(priv->dev, 3846 "%s: Cannot attach to PHY (error: %d)\n", 3847 __func__, ret); 3848 goto init_phy_error; 3849 } 3850 } 3851 3852 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 3853 3854 buf_sz = dma_conf->dma_buf_sz; 3855 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); 3856 3857 stmmac_reset_queues_param(priv); 3858 3859 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && 3860 priv->plat->serdes_powerup) { 3861 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); 3862 if (ret < 0) { 3863 netdev_err(priv->dev, "%s: Serdes powerup failed\n", 3864 __func__); 3865 goto init_error; 3866 } 3867 } 3868 3869 ret = stmmac_hw_setup(dev, true); 3870 if (ret < 0) { 3871 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 3872 goto init_error; 3873 } 3874 3875 stmmac_init_coalesce(priv); 3876 3877 phylink_start(priv->phylink); 3878 /* We may have called phylink_speed_down before */ 3879 phylink_speed_up(priv->phylink); 3880 3881 ret = stmmac_request_irq(dev); 3882 if (ret) 3883 goto irq_error; 3884 3885 stmmac_enable_all_queues(priv); 3886 netif_tx_start_all_queues(priv->dev); 3887 stmmac_enable_all_dma_irq(priv); 3888 3889 return 0; 3890 3891 irq_error: 3892 phylink_stop(priv->phylink); 3893 3894 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3895 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 3896 3897 stmmac_hw_teardown(dev); 3898 init_error: 3899 phylink_disconnect_phy(priv->phylink); 3900 init_phy_error: 3901 pm_runtime_put(priv->device); 3902 return ret; 3903 } 3904 3905 static int stmmac_open(struct net_device *dev) 3906 { 3907 struct stmmac_priv *priv = netdev_priv(dev); 3908 struct stmmac_dma_conf *dma_conf; 3909 int ret; 3910 3911 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); 3912 if (IS_ERR(dma_conf)) 3913 return PTR_ERR(dma_conf); 3914 3915 ret = __stmmac_open(dev, dma_conf); 3916 if (ret) 3917 free_dma_desc_resources(priv, dma_conf); 3918 3919 kfree(dma_conf); 3920 return ret; 3921 } 3922 3923 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) 3924 { 3925 set_bit(__FPE_REMOVING, &priv->fpe_task_state); 3926 3927 if (priv->fpe_wq) 3928 destroy_workqueue(priv->fpe_wq); 3929 3930 netdev_info(priv->dev, "FPE workqueue stop"); 3931 } 3932 3933 /** 3934 * stmmac_release - close entry point of the driver 3935 * @dev : device pointer. 3936 * Description: 3937 * This is the stop entry point of the driver. 3938 */ 3939 static int stmmac_release(struct net_device *dev) 3940 { 3941 struct stmmac_priv *priv = netdev_priv(dev); 3942 u32 chan; 3943 3944 if (device_may_wakeup(priv->device)) 3945 phylink_speed_down(priv->phylink, false); 3946 /* Stop and disconnect the PHY */ 3947 phylink_stop(priv->phylink); 3948 phylink_disconnect_phy(priv->phylink); 3949 3950 stmmac_disable_all_queues(priv); 3951 3952 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3953 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 3954 3955 netif_tx_disable(dev); 3956 3957 /* Free the IRQ lines */ 3958 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 3959 3960 if (priv->eee_enabled) { 3961 priv->tx_path_in_lpi_mode = false; 3962 del_timer_sync(&priv->eee_ctrl_timer); 3963 } 3964 3965 /* Stop TX/RX DMA and clear the descriptors */ 3966 stmmac_stop_all_dma(priv); 3967 3968 /* Release and free the Rx/Tx resources */ 3969 free_dma_desc_resources(priv, &priv->dma_conf); 3970 3971 /* Disable the MAC Rx/Tx */ 3972 stmmac_mac_set(priv, priv->ioaddr, false); 3973 3974 /* Powerdown Serdes if there is */ 3975 if (priv->plat->serdes_powerdown) 3976 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); 3977 3978 netif_carrier_off(dev); 3979 3980 stmmac_release_ptp(priv); 3981 3982 pm_runtime_put(priv->device); 3983 3984 if (priv->dma_cap.fpesel) 3985 stmmac_fpe_stop_wq(priv); 3986 3987 return 0; 3988 } 3989 3990 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, 3991 struct stmmac_tx_queue *tx_q) 3992 { 3993 u16 tag = 0x0, inner_tag = 0x0; 3994 u32 inner_type = 0x0; 3995 struct dma_desc *p; 3996 3997 if (!priv->dma_cap.vlins) 3998 return false; 3999 if (!skb_vlan_tag_present(skb)) 4000 return false; 4001 if (skb->vlan_proto == htons(ETH_P_8021AD)) { 4002 inner_tag = skb_vlan_tag_get(skb); 4003 inner_type = STMMAC_VLAN_INSERT; 4004 } 4005 4006 tag = skb_vlan_tag_get(skb); 4007 4008 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4009 p = &tx_q->dma_entx[tx_q->cur_tx].basic; 4010 else 4011 p = &tx_q->dma_tx[tx_q->cur_tx]; 4012 4013 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) 4014 return false; 4015 4016 stmmac_set_tx_owner(priv, p); 4017 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 4018 return true; 4019 } 4020 4021 /** 4022 * stmmac_tso_allocator - close entry point of the driver 4023 * @priv: driver private structure 4024 * @des: buffer start address 4025 * @total_len: total length to fill in descriptors 4026 * @last_segment: condition for the last descriptor 4027 * @queue: TX queue index 4028 * Description: 4029 * This function fills descriptor and request new descriptors according to 4030 * buffer length to fill 4031 */ 4032 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, 4033 int total_len, bool last_segment, u32 queue) 4034 { 4035 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4036 struct dma_desc *desc; 4037 u32 buff_size; 4038 int tmp_len; 4039 4040 tmp_len = total_len; 4041 4042 while (tmp_len > 0) { 4043 dma_addr_t curr_addr; 4044 4045 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 4046 priv->dma_conf.dma_tx_size); 4047 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 4048 4049 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4050 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4051 else 4052 desc = &tx_q->dma_tx[tx_q->cur_tx]; 4053 4054 curr_addr = des + (total_len - tmp_len); 4055 if (priv->dma_cap.addr64 <= 32) 4056 desc->des0 = cpu_to_le32(curr_addr); 4057 else 4058 stmmac_set_desc_addr(priv, desc, curr_addr); 4059 4060 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 4061 TSO_MAX_BUFF_SIZE : tmp_len; 4062 4063 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 4064 0, 1, 4065 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 4066 0, 0); 4067 4068 tmp_len -= TSO_MAX_BUFF_SIZE; 4069 } 4070 } 4071 4072 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) 4073 { 4074 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4075 int desc_size; 4076 4077 if (likely(priv->extend_desc)) 4078 desc_size = sizeof(struct dma_extended_desc); 4079 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4080 desc_size = sizeof(struct dma_edesc); 4081 else 4082 desc_size = sizeof(struct dma_desc); 4083 4084 /* The own bit must be the latest setting done when prepare the 4085 * descriptor and then barrier is needed to make sure that 4086 * all is coherent before granting the DMA engine. 4087 */ 4088 wmb(); 4089 4090 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 4091 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 4092 } 4093 4094 /** 4095 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 4096 * @skb : the socket buffer 4097 * @dev : device pointer 4098 * Description: this is the transmit function that is called on TSO frames 4099 * (support available on GMAC4 and newer chips). 4100 * Diagram below show the ring programming in case of TSO frames: 4101 * 4102 * First Descriptor 4103 * -------- 4104 * | DES0 |---> buffer1 = L2/L3/L4 header 4105 * | DES1 |---> TCP Payload (can continue on next descr...) 4106 * | DES2 |---> buffer 1 and 2 len 4107 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 4108 * -------- 4109 * | 4110 * ... 4111 * | 4112 * -------- 4113 * | DES0 | --| Split TCP Payload on Buffers 1 and 2 4114 * | DES1 | --| 4115 * | DES2 | --> buffer 1 and 2 len 4116 * | DES3 | 4117 * -------- 4118 * 4119 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 4120 */ 4121 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 4122 { 4123 struct dma_desc *desc, *first, *mss_desc = NULL; 4124 struct stmmac_priv *priv = netdev_priv(dev); 4125 int nfrags = skb_shinfo(skb)->nr_frags; 4126 u32 queue = skb_get_queue_mapping(skb); 4127 unsigned int first_entry, tx_packets; 4128 struct stmmac_txq_stats *txq_stats; 4129 int tmp_pay_len = 0, first_tx; 4130 struct stmmac_tx_queue *tx_q; 4131 bool has_vlan, set_ic; 4132 u8 proto_hdr_len, hdr; 4133 unsigned long flags; 4134 u32 pay_len, mss; 4135 dma_addr_t des; 4136 int i; 4137 4138 tx_q = &priv->dma_conf.tx_queue[queue]; 4139 txq_stats = &priv->xstats.txq_stats[queue]; 4140 first_tx = tx_q->cur_tx; 4141 4142 /* Compute header lengths */ 4143 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 4144 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); 4145 hdr = sizeof(struct udphdr); 4146 } else { 4147 proto_hdr_len = skb_tcp_all_headers(skb); 4148 hdr = tcp_hdrlen(skb); 4149 } 4150 4151 /* Desc availability based on threshold should be enough safe */ 4152 if (unlikely(stmmac_tx_avail(priv, queue) < 4153 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 4154 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4155 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4156 queue)); 4157 /* This is a hard error, log it. */ 4158 netdev_err(priv->dev, 4159 "%s: Tx Ring full when queue awake\n", 4160 __func__); 4161 } 4162 return NETDEV_TX_BUSY; 4163 } 4164 4165 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 4166 4167 mss = skb_shinfo(skb)->gso_size; 4168 4169 /* set new MSS value if needed */ 4170 if (mss != tx_q->mss) { 4171 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4172 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4173 else 4174 mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; 4175 4176 stmmac_set_mss(priv, mss_desc, mss); 4177 tx_q->mss = mss; 4178 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 4179 priv->dma_conf.dma_tx_size); 4180 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 4181 } 4182 4183 if (netif_msg_tx_queued(priv)) { 4184 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 4185 __func__, hdr, proto_hdr_len, pay_len, mss); 4186 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 4187 skb->data_len); 4188 } 4189 4190 /* Check if VLAN can be inserted by HW */ 4191 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 4192 4193 first_entry = tx_q->cur_tx; 4194 WARN_ON(tx_q->tx_skbuff[first_entry]); 4195 4196 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4197 desc = &tx_q->dma_entx[first_entry].basic; 4198 else 4199 desc = &tx_q->dma_tx[first_entry]; 4200 first = desc; 4201 4202 if (has_vlan) 4203 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 4204 4205 /* first descriptor: fill Headers on Buf1 */ 4206 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 4207 DMA_TO_DEVICE); 4208 if (dma_mapping_error(priv->device, des)) 4209 goto dma_map_err; 4210 4211 tx_q->tx_skbuff_dma[first_entry].buf = des; 4212 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 4213 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4214 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4215 4216 if (priv->dma_cap.addr64 <= 32) { 4217 first->des0 = cpu_to_le32(des); 4218 4219 /* Fill start of payload in buff2 of first descriptor */ 4220 if (pay_len) 4221 first->des1 = cpu_to_le32(des + proto_hdr_len); 4222 4223 /* If needed take extra descriptors to fill the remaining payload */ 4224 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 4225 } else { 4226 stmmac_set_desc_addr(priv, first, des); 4227 tmp_pay_len = pay_len; 4228 des += proto_hdr_len; 4229 pay_len = 0; 4230 } 4231 4232 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 4233 4234 /* Prepare fragments */ 4235 for (i = 0; i < nfrags; i++) { 4236 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4237 4238 des = skb_frag_dma_map(priv->device, frag, 0, 4239 skb_frag_size(frag), 4240 DMA_TO_DEVICE); 4241 if (dma_mapping_error(priv->device, des)) 4242 goto dma_map_err; 4243 4244 stmmac_tso_allocator(priv, des, skb_frag_size(frag), 4245 (i == nfrags - 1), queue); 4246 4247 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 4248 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 4249 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 4250 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4251 } 4252 4253 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 4254 4255 /* Only the last descriptor gets to point to the skb. */ 4256 tx_q->tx_skbuff[tx_q->cur_tx] = skb; 4257 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4258 4259 /* Manage tx mitigation */ 4260 tx_packets = (tx_q->cur_tx + 1) - first_tx; 4261 tx_q->tx_count_frames += tx_packets; 4262 4263 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4264 set_ic = true; 4265 else if (!priv->tx_coal_frames[queue]) 4266 set_ic = false; 4267 else if (tx_packets > priv->tx_coal_frames[queue]) 4268 set_ic = true; 4269 else if ((tx_q->tx_count_frames % 4270 priv->tx_coal_frames[queue]) < tx_packets) 4271 set_ic = true; 4272 else 4273 set_ic = false; 4274 4275 if (set_ic) { 4276 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4277 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4278 else 4279 desc = &tx_q->dma_tx[tx_q->cur_tx]; 4280 4281 tx_q->tx_count_frames = 0; 4282 stmmac_set_tx_ic(priv, desc); 4283 } 4284 4285 /* We've used all descriptors we need for this skb, however, 4286 * advance cur_tx so that it references a fresh descriptor. 4287 * ndo_start_xmit will fill this descriptor the next time it's 4288 * called and stmmac_tx_clean may clean up to this descriptor. 4289 */ 4290 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 4291 4292 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4293 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4294 __func__); 4295 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4296 } 4297 4298 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 4299 txq_stats->tx_bytes += skb->len; 4300 txq_stats->tx_tso_frames++; 4301 txq_stats->tx_tso_nfrags += nfrags; 4302 if (set_ic) 4303 txq_stats->tx_set_ic_bit++; 4304 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 4305 4306 if (priv->sarc_type) 4307 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 4308 4309 skb_tx_timestamp(skb); 4310 4311 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4312 priv->hwts_tx_en)) { 4313 /* declare that device is doing timestamping */ 4314 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4315 stmmac_enable_tx_timestamp(priv, first); 4316 } 4317 4318 /* Complete the first descriptor before granting the DMA */ 4319 stmmac_prepare_tso_tx_desc(priv, first, 1, 4320 proto_hdr_len, 4321 pay_len, 4322 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 4323 hdr / 4, (skb->len - proto_hdr_len)); 4324 4325 /* If context desc is used to change MSS */ 4326 if (mss_desc) { 4327 /* Make sure that first descriptor has been completely 4328 * written, including its own bit. This is because MSS is 4329 * actually before first descriptor, so we need to make 4330 * sure that MSS's own bit is the last thing written. 4331 */ 4332 dma_wmb(); 4333 stmmac_set_tx_owner(priv, mss_desc); 4334 } 4335 4336 if (netif_msg_pktdata(priv)) { 4337 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 4338 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4339 tx_q->cur_tx, first, nfrags); 4340 pr_info(">>> frame to be transmitted: "); 4341 print_pkt(skb->data, skb_headlen(skb)); 4342 } 4343 4344 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4345 4346 stmmac_flush_tx_descriptors(priv, queue); 4347 stmmac_tx_timer_arm(priv, queue); 4348 4349 return NETDEV_TX_OK; 4350 4351 dma_map_err: 4352 dev_err(priv->device, "Tx dma map failed\n"); 4353 dev_kfree_skb(skb); 4354 priv->xstats.tx_dropped++; 4355 return NETDEV_TX_OK; 4356 } 4357 4358 /** 4359 * stmmac_xmit - Tx entry point of the driver 4360 * @skb : the socket buffer 4361 * @dev : device pointer 4362 * Description : this is the tx entry point of the driver. 4363 * It programs the chain or the ring and supports oversized frames 4364 * and SG feature. 4365 */ 4366 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 4367 { 4368 unsigned int first_entry, tx_packets, enh_desc; 4369 struct stmmac_priv *priv = netdev_priv(dev); 4370 unsigned int nopaged_len = skb_headlen(skb); 4371 int i, csum_insertion = 0, is_jumbo = 0; 4372 u32 queue = skb_get_queue_mapping(skb); 4373 int nfrags = skb_shinfo(skb)->nr_frags; 4374 int gso = skb_shinfo(skb)->gso_type; 4375 struct stmmac_txq_stats *txq_stats; 4376 struct dma_edesc *tbs_desc = NULL; 4377 struct dma_desc *desc, *first; 4378 struct stmmac_tx_queue *tx_q; 4379 bool has_vlan, set_ic; 4380 int entry, first_tx; 4381 unsigned long flags; 4382 dma_addr_t des; 4383 4384 tx_q = &priv->dma_conf.tx_queue[queue]; 4385 txq_stats = &priv->xstats.txq_stats[queue]; 4386 first_tx = tx_q->cur_tx; 4387 4388 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) 4389 stmmac_disable_eee_mode(priv); 4390 4391 /* Manage oversized TCP frames for GMAC4 device */ 4392 if (skb_is_gso(skb) && priv->tso) { 4393 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 4394 return stmmac_tso_xmit(skb, dev); 4395 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) 4396 return stmmac_tso_xmit(skb, dev); 4397 } 4398 4399 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 4400 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4401 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4402 queue)); 4403 /* This is a hard error, log it. */ 4404 netdev_err(priv->dev, 4405 "%s: Tx Ring full when queue awake\n", 4406 __func__); 4407 } 4408 return NETDEV_TX_BUSY; 4409 } 4410 4411 /* Check if VLAN can be inserted by HW */ 4412 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 4413 4414 entry = tx_q->cur_tx; 4415 first_entry = entry; 4416 WARN_ON(tx_q->tx_skbuff[first_entry]); 4417 4418 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 4419 4420 if (likely(priv->extend_desc)) 4421 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4422 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4423 desc = &tx_q->dma_entx[entry].basic; 4424 else 4425 desc = tx_q->dma_tx + entry; 4426 4427 first = desc; 4428 4429 if (has_vlan) 4430 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 4431 4432 enh_desc = priv->plat->enh_desc; 4433 /* To program the descriptors according to the size of the frame */ 4434 if (enh_desc) 4435 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 4436 4437 if (unlikely(is_jumbo)) { 4438 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 4439 if (unlikely(entry < 0) && (entry != -EINVAL)) 4440 goto dma_map_err; 4441 } 4442 4443 for (i = 0; i < nfrags; i++) { 4444 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4445 int len = skb_frag_size(frag); 4446 bool last_segment = (i == (nfrags - 1)); 4447 4448 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4449 WARN_ON(tx_q->tx_skbuff[entry]); 4450 4451 if (likely(priv->extend_desc)) 4452 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4453 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4454 desc = &tx_q->dma_entx[entry].basic; 4455 else 4456 desc = tx_q->dma_tx + entry; 4457 4458 des = skb_frag_dma_map(priv->device, frag, 0, len, 4459 DMA_TO_DEVICE); 4460 if (dma_mapping_error(priv->device, des)) 4461 goto dma_map_err; /* should reuse desc w/o issues */ 4462 4463 tx_q->tx_skbuff_dma[entry].buf = des; 4464 4465 stmmac_set_desc_addr(priv, desc, des); 4466 4467 tx_q->tx_skbuff_dma[entry].map_as_page = true; 4468 tx_q->tx_skbuff_dma[entry].len = len; 4469 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 4470 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4471 4472 /* Prepare the descriptor and set the own bit too */ 4473 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 4474 priv->mode, 1, last_segment, skb->len); 4475 } 4476 4477 /* Only the last descriptor gets to point to the skb. */ 4478 tx_q->tx_skbuff[entry] = skb; 4479 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4480 4481 /* According to the coalesce parameter the IC bit for the latest 4482 * segment is reset and the timer re-started to clean the tx status. 4483 * This approach takes care about the fragments: desc is the first 4484 * element in case of no SG. 4485 */ 4486 tx_packets = (entry + 1) - first_tx; 4487 tx_q->tx_count_frames += tx_packets; 4488 4489 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4490 set_ic = true; 4491 else if (!priv->tx_coal_frames[queue]) 4492 set_ic = false; 4493 else if (tx_packets > priv->tx_coal_frames[queue]) 4494 set_ic = true; 4495 else if ((tx_q->tx_count_frames % 4496 priv->tx_coal_frames[queue]) < tx_packets) 4497 set_ic = true; 4498 else 4499 set_ic = false; 4500 4501 if (set_ic) { 4502 if (likely(priv->extend_desc)) 4503 desc = &tx_q->dma_etx[entry].basic; 4504 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4505 desc = &tx_q->dma_entx[entry].basic; 4506 else 4507 desc = &tx_q->dma_tx[entry]; 4508 4509 tx_q->tx_count_frames = 0; 4510 stmmac_set_tx_ic(priv, desc); 4511 } 4512 4513 /* We've used all descriptors we need for this skb, however, 4514 * advance cur_tx so that it references a fresh descriptor. 4515 * ndo_start_xmit will fill this descriptor the next time it's 4516 * called and stmmac_tx_clean may clean up to this descriptor. 4517 */ 4518 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4519 tx_q->cur_tx = entry; 4520 4521 if (netif_msg_pktdata(priv)) { 4522 netdev_dbg(priv->dev, 4523 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 4524 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4525 entry, first, nfrags); 4526 4527 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 4528 print_pkt(skb->data, skb->len); 4529 } 4530 4531 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4532 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4533 __func__); 4534 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4535 } 4536 4537 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 4538 txq_stats->tx_bytes += skb->len; 4539 if (set_ic) 4540 txq_stats->tx_set_ic_bit++; 4541 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 4542 4543 if (priv->sarc_type) 4544 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 4545 4546 skb_tx_timestamp(skb); 4547 4548 /* Ready to fill the first descriptor and set the OWN bit w/o any 4549 * problems because all the descriptors are actually ready to be 4550 * passed to the DMA engine. 4551 */ 4552 if (likely(!is_jumbo)) { 4553 bool last_segment = (nfrags == 0); 4554 4555 des = dma_map_single(priv->device, skb->data, 4556 nopaged_len, DMA_TO_DEVICE); 4557 if (dma_mapping_error(priv->device, des)) 4558 goto dma_map_err; 4559 4560 tx_q->tx_skbuff_dma[first_entry].buf = des; 4561 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4562 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4563 4564 stmmac_set_desc_addr(priv, first, des); 4565 4566 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 4567 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 4568 4569 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4570 priv->hwts_tx_en)) { 4571 /* declare that device is doing timestamping */ 4572 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4573 stmmac_enable_tx_timestamp(priv, first); 4574 } 4575 4576 /* Prepare the first descriptor setting the OWN bit too */ 4577 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 4578 csum_insertion, priv->mode, 0, last_segment, 4579 skb->len); 4580 } 4581 4582 if (tx_q->tbs & STMMAC_TBS_EN) { 4583 struct timespec64 ts = ns_to_timespec64(skb->tstamp); 4584 4585 tbs_desc = &tx_q->dma_entx[first_entry]; 4586 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); 4587 } 4588 4589 stmmac_set_tx_owner(priv, first); 4590 4591 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4592 4593 stmmac_enable_dma_transmission(priv, priv->ioaddr); 4594 4595 stmmac_flush_tx_descriptors(priv, queue); 4596 stmmac_tx_timer_arm(priv, queue); 4597 4598 return NETDEV_TX_OK; 4599 4600 dma_map_err: 4601 netdev_err(priv->dev, "Tx DMA map failed\n"); 4602 dev_kfree_skb(skb); 4603 priv->xstats.tx_dropped++; 4604 return NETDEV_TX_OK; 4605 } 4606 4607 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 4608 { 4609 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); 4610 __be16 vlan_proto = veth->h_vlan_proto; 4611 u16 vlanid; 4612 4613 if ((vlan_proto == htons(ETH_P_8021Q) && 4614 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 4615 (vlan_proto == htons(ETH_P_8021AD) && 4616 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 4617 /* pop the vlan tag */ 4618 vlanid = ntohs(veth->h_vlan_TCI); 4619 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 4620 skb_pull(skb, VLAN_HLEN); 4621 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 4622 } 4623 } 4624 4625 /** 4626 * stmmac_rx_refill - refill used skb preallocated buffers 4627 * @priv: driver private structure 4628 * @queue: RX queue index 4629 * Description : this is to reallocate the skb for the reception process 4630 * that is based on zero-copy. 4631 */ 4632 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 4633 { 4634 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 4635 int dirty = stmmac_rx_dirty(priv, queue); 4636 unsigned int entry = rx_q->dirty_rx; 4637 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 4638 4639 if (priv->dma_cap.host_dma_width <= 32) 4640 gfp |= GFP_DMA32; 4641 4642 while (dirty-- > 0) { 4643 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4644 struct dma_desc *p; 4645 bool use_rx_wd; 4646 4647 if (priv->extend_desc) 4648 p = (struct dma_desc *)(rx_q->dma_erx + entry); 4649 else 4650 p = rx_q->dma_rx + entry; 4651 4652 if (!buf->page) { 4653 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); 4654 if (!buf->page) 4655 break; 4656 } 4657 4658 if (priv->sph && !buf->sec_page) { 4659 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); 4660 if (!buf->sec_page) 4661 break; 4662 4663 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 4664 } 4665 4666 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 4667 4668 stmmac_set_desc_addr(priv, p, buf->addr); 4669 if (priv->sph) 4670 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 4671 else 4672 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 4673 stmmac_refill_desc3(priv, rx_q, p); 4674 4675 rx_q->rx_count_frames++; 4676 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4677 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 4678 rx_q->rx_count_frames = 0; 4679 4680 use_rx_wd = !priv->rx_coal_frames[queue]; 4681 use_rx_wd |= rx_q->rx_count_frames > 0; 4682 if (!priv->use_riwt) 4683 use_rx_wd = false; 4684 4685 dma_wmb(); 4686 stmmac_set_rx_owner(priv, p, use_rx_wd); 4687 4688 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); 4689 } 4690 rx_q->dirty_rx = entry; 4691 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4692 (rx_q->dirty_rx * sizeof(struct dma_desc)); 4693 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 4694 } 4695 4696 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, 4697 struct dma_desc *p, 4698 int status, unsigned int len) 4699 { 4700 unsigned int plen = 0, hlen = 0; 4701 int coe = priv->hw->rx_csum; 4702 4703 /* Not first descriptor, buffer is always zero */ 4704 if (priv->sph && len) 4705 return 0; 4706 4707 /* First descriptor, get split header length */ 4708 stmmac_get_rx_header_len(priv, p, &hlen); 4709 if (priv->sph && hlen) { 4710 priv->xstats.rx_split_hdr_pkt_n++; 4711 return hlen; 4712 } 4713 4714 /* First descriptor, not last descriptor and not split header */ 4715 if (status & rx_not_ls) 4716 return priv->dma_conf.dma_buf_sz; 4717 4718 plen = stmmac_get_rx_frame_len(priv, p, coe); 4719 4720 /* First descriptor and last descriptor and not split header */ 4721 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); 4722 } 4723 4724 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, 4725 struct dma_desc *p, 4726 int status, unsigned int len) 4727 { 4728 int coe = priv->hw->rx_csum; 4729 unsigned int plen = 0; 4730 4731 /* Not split header, buffer is not available */ 4732 if (!priv->sph) 4733 return 0; 4734 4735 /* Not last descriptor */ 4736 if (status & rx_not_ls) 4737 return priv->dma_conf.dma_buf_sz; 4738 4739 plen = stmmac_get_rx_frame_len(priv, p, coe); 4740 4741 /* Last descriptor */ 4742 return plen - len; 4743 } 4744 4745 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, 4746 struct xdp_frame *xdpf, bool dma_map) 4747 { 4748 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; 4749 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4750 unsigned int entry = tx_q->cur_tx; 4751 struct dma_desc *tx_desc; 4752 dma_addr_t dma_addr; 4753 bool set_ic; 4754 4755 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) 4756 return STMMAC_XDP_CONSUMED; 4757 4758 if (likely(priv->extend_desc)) 4759 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4760 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4761 tx_desc = &tx_q->dma_entx[entry].basic; 4762 else 4763 tx_desc = tx_q->dma_tx + entry; 4764 4765 if (dma_map) { 4766 dma_addr = dma_map_single(priv->device, xdpf->data, 4767 xdpf->len, DMA_TO_DEVICE); 4768 if (dma_mapping_error(priv->device, dma_addr)) 4769 return STMMAC_XDP_CONSUMED; 4770 4771 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; 4772 } else { 4773 struct page *page = virt_to_page(xdpf->data); 4774 4775 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + 4776 xdpf->headroom; 4777 dma_sync_single_for_device(priv->device, dma_addr, 4778 xdpf->len, DMA_BIDIRECTIONAL); 4779 4780 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; 4781 } 4782 4783 tx_q->tx_skbuff_dma[entry].buf = dma_addr; 4784 tx_q->tx_skbuff_dma[entry].map_as_page = false; 4785 tx_q->tx_skbuff_dma[entry].len = xdpf->len; 4786 tx_q->tx_skbuff_dma[entry].last_segment = true; 4787 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 4788 4789 tx_q->xdpf[entry] = xdpf; 4790 4791 stmmac_set_desc_addr(priv, tx_desc, dma_addr); 4792 4793 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, 4794 true, priv->mode, true, true, 4795 xdpf->len); 4796 4797 tx_q->tx_count_frames++; 4798 4799 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 4800 set_ic = true; 4801 else 4802 set_ic = false; 4803 4804 if (set_ic) { 4805 unsigned long flags; 4806 tx_q->tx_count_frames = 0; 4807 stmmac_set_tx_ic(priv, tx_desc); 4808 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 4809 txq_stats->tx_set_ic_bit++; 4810 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 4811 } 4812 4813 stmmac_enable_dma_transmission(priv, priv->ioaddr); 4814 4815 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4816 tx_q->cur_tx = entry; 4817 4818 return STMMAC_XDP_TX; 4819 } 4820 4821 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, 4822 int cpu) 4823 { 4824 int index = cpu; 4825 4826 if (unlikely(index < 0)) 4827 index = 0; 4828 4829 while (index >= priv->plat->tx_queues_to_use) 4830 index -= priv->plat->tx_queues_to_use; 4831 4832 return index; 4833 } 4834 4835 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, 4836 struct xdp_buff *xdp) 4837 { 4838 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 4839 int cpu = smp_processor_id(); 4840 struct netdev_queue *nq; 4841 int queue; 4842 int res; 4843 4844 if (unlikely(!xdpf)) 4845 return STMMAC_XDP_CONSUMED; 4846 4847 queue = stmmac_xdp_get_tx_queue(priv, cpu); 4848 nq = netdev_get_tx_queue(priv->dev, queue); 4849 4850 __netif_tx_lock(nq, cpu); 4851 /* Avoids TX time-out as we are sharing with slow path */ 4852 txq_trans_cond_update(nq); 4853 4854 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); 4855 if (res == STMMAC_XDP_TX) 4856 stmmac_flush_tx_descriptors(priv, queue); 4857 4858 __netif_tx_unlock(nq); 4859 4860 return res; 4861 } 4862 4863 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, 4864 struct bpf_prog *prog, 4865 struct xdp_buff *xdp) 4866 { 4867 u32 act; 4868 int res; 4869 4870 act = bpf_prog_run_xdp(prog, xdp); 4871 switch (act) { 4872 case XDP_PASS: 4873 res = STMMAC_XDP_PASS; 4874 break; 4875 case XDP_TX: 4876 res = stmmac_xdp_xmit_back(priv, xdp); 4877 break; 4878 case XDP_REDIRECT: 4879 if (xdp_do_redirect(priv->dev, xdp, prog) < 0) 4880 res = STMMAC_XDP_CONSUMED; 4881 else 4882 res = STMMAC_XDP_REDIRECT; 4883 break; 4884 default: 4885 bpf_warn_invalid_xdp_action(priv->dev, prog, act); 4886 fallthrough; 4887 case XDP_ABORTED: 4888 trace_xdp_exception(priv->dev, prog, act); 4889 fallthrough; 4890 case XDP_DROP: 4891 res = STMMAC_XDP_CONSUMED; 4892 break; 4893 } 4894 4895 return res; 4896 } 4897 4898 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, 4899 struct xdp_buff *xdp) 4900 { 4901 struct bpf_prog *prog; 4902 int res; 4903 4904 prog = READ_ONCE(priv->xdp_prog); 4905 if (!prog) { 4906 res = STMMAC_XDP_PASS; 4907 goto out; 4908 } 4909 4910 res = __stmmac_xdp_run_prog(priv, prog, xdp); 4911 out: 4912 return ERR_PTR(-res); 4913 } 4914 4915 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, 4916 int xdp_status) 4917 { 4918 int cpu = smp_processor_id(); 4919 int queue; 4920 4921 queue = stmmac_xdp_get_tx_queue(priv, cpu); 4922 4923 if (xdp_status & STMMAC_XDP_TX) 4924 stmmac_tx_timer_arm(priv, queue); 4925 4926 if (xdp_status & STMMAC_XDP_REDIRECT) 4927 xdp_do_flush(); 4928 } 4929 4930 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, 4931 struct xdp_buff *xdp) 4932 { 4933 unsigned int metasize = xdp->data - xdp->data_meta; 4934 unsigned int datasize = xdp->data_end - xdp->data; 4935 struct sk_buff *skb; 4936 4937 skb = __napi_alloc_skb(&ch->rxtx_napi, 4938 xdp->data_end - xdp->data_hard_start, 4939 GFP_ATOMIC | __GFP_NOWARN); 4940 if (unlikely(!skb)) 4941 return NULL; 4942 4943 skb_reserve(skb, xdp->data - xdp->data_hard_start); 4944 memcpy(__skb_put(skb, datasize), xdp->data, datasize); 4945 if (metasize) 4946 skb_metadata_set(skb, metasize); 4947 4948 return skb; 4949 } 4950 4951 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, 4952 struct dma_desc *p, struct dma_desc *np, 4953 struct xdp_buff *xdp) 4954 { 4955 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; 4956 struct stmmac_channel *ch = &priv->channel[queue]; 4957 unsigned int len = xdp->data_end - xdp->data; 4958 enum pkt_hash_types hash_type; 4959 int coe = priv->hw->rx_csum; 4960 unsigned long flags; 4961 struct sk_buff *skb; 4962 u32 hash; 4963 4964 skb = stmmac_construct_skb_zc(ch, xdp); 4965 if (!skb) { 4966 priv->xstats.rx_dropped++; 4967 return; 4968 } 4969 4970 stmmac_get_rx_hwtstamp(priv, p, np, skb); 4971 stmmac_rx_vlan(priv->dev, skb); 4972 skb->protocol = eth_type_trans(skb, priv->dev); 4973 4974 if (unlikely(!coe)) 4975 skb_checksum_none_assert(skb); 4976 else 4977 skb->ip_summed = CHECKSUM_UNNECESSARY; 4978 4979 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 4980 skb_set_hash(skb, hash, hash_type); 4981 4982 skb_record_rx_queue(skb, queue); 4983 napi_gro_receive(&ch->rxtx_napi, skb); 4984 4985 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 4986 rxq_stats->rx_pkt_n++; 4987 rxq_stats->rx_bytes += len; 4988 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 4989 } 4990 4991 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 4992 { 4993 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 4994 unsigned int entry = rx_q->dirty_rx; 4995 struct dma_desc *rx_desc = NULL; 4996 bool ret = true; 4997 4998 budget = min(budget, stmmac_rx_dirty(priv, queue)); 4999 5000 while (budget-- > 0 && entry != rx_q->cur_rx) { 5001 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 5002 dma_addr_t dma_addr; 5003 bool use_rx_wd; 5004 5005 if (!buf->xdp) { 5006 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 5007 if (!buf->xdp) { 5008 ret = false; 5009 break; 5010 } 5011 } 5012 5013 if (priv->extend_desc) 5014 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); 5015 else 5016 rx_desc = rx_q->dma_rx + entry; 5017 5018 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 5019 stmmac_set_desc_addr(priv, rx_desc, dma_addr); 5020 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); 5021 stmmac_refill_desc3(priv, rx_q, rx_desc); 5022 5023 rx_q->rx_count_frames++; 5024 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 5025 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 5026 rx_q->rx_count_frames = 0; 5027 5028 use_rx_wd = !priv->rx_coal_frames[queue]; 5029 use_rx_wd |= rx_q->rx_count_frames > 0; 5030 if (!priv->use_riwt) 5031 use_rx_wd = false; 5032 5033 dma_wmb(); 5034 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); 5035 5036 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); 5037 } 5038 5039 if (rx_desc) { 5040 rx_q->dirty_rx = entry; 5041 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 5042 (rx_q->dirty_rx * sizeof(struct dma_desc)); 5043 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 5044 } 5045 5046 return ret; 5047 } 5048 5049 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp) 5050 { 5051 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used 5052 * to represent incoming packet, whereas cb field in the same structure 5053 * is used to store driver specific info. Thus, struct stmmac_xdp_buff 5054 * is laid on top of xdp and cb fields of struct xdp_buff_xsk. 5055 */ 5056 return (struct stmmac_xdp_buff *)xdp; 5057 } 5058 5059 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) 5060 { 5061 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; 5062 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 5063 unsigned int count = 0, error = 0, len = 0; 5064 int dirty = stmmac_rx_dirty(priv, queue); 5065 unsigned int next_entry = rx_q->cur_rx; 5066 u32 rx_errors = 0, rx_dropped = 0; 5067 unsigned int desc_size; 5068 struct bpf_prog *prog; 5069 bool failure = false; 5070 unsigned long flags; 5071 int xdp_status = 0; 5072 int status = 0; 5073 5074 if (netif_msg_rx_status(priv)) { 5075 void *rx_head; 5076 5077 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 5078 if (priv->extend_desc) { 5079 rx_head = (void *)rx_q->dma_erx; 5080 desc_size = sizeof(struct dma_extended_desc); 5081 } else { 5082 rx_head = (void *)rx_q->dma_rx; 5083 desc_size = sizeof(struct dma_desc); 5084 } 5085 5086 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, 5087 rx_q->dma_rx_phy, desc_size); 5088 } 5089 while (count < limit) { 5090 struct stmmac_rx_buffer *buf; 5091 struct stmmac_xdp_buff *ctx; 5092 unsigned int buf1_len = 0; 5093 struct dma_desc *np, *p; 5094 int entry; 5095 int res; 5096 5097 if (!count && rx_q->state_saved) { 5098 error = rx_q->state.error; 5099 len = rx_q->state.len; 5100 } else { 5101 rx_q->state_saved = false; 5102 error = 0; 5103 len = 0; 5104 } 5105 5106 if (count >= limit) 5107 break; 5108 5109 read_again: 5110 buf1_len = 0; 5111 entry = next_entry; 5112 buf = &rx_q->buf_pool[entry]; 5113 5114 if (dirty >= STMMAC_RX_FILL_BATCH) { 5115 failure = failure || 5116 !stmmac_rx_refill_zc(priv, queue, dirty); 5117 dirty = 0; 5118 } 5119 5120 if (priv->extend_desc) 5121 p = (struct dma_desc *)(rx_q->dma_erx + entry); 5122 else 5123 p = rx_q->dma_rx + entry; 5124 5125 /* read the status of the incoming frame */ 5126 status = stmmac_rx_status(priv, &priv->xstats, p); 5127 /* check if managed by the DMA otherwise go ahead */ 5128 if (unlikely(status & dma_own)) 5129 break; 5130 5131 /* Prefetch the next RX descriptor */ 5132 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 5133 priv->dma_conf.dma_rx_size); 5134 next_entry = rx_q->cur_rx; 5135 5136 if (priv->extend_desc) 5137 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5138 else 5139 np = rx_q->dma_rx + next_entry; 5140 5141 prefetch(np); 5142 5143 /* Ensure a valid XSK buffer before proceed */ 5144 if (!buf->xdp) 5145 break; 5146 5147 if (priv->extend_desc) 5148 stmmac_rx_extended_status(priv, &priv->xstats, 5149 rx_q->dma_erx + entry); 5150 if (unlikely(status == discard_frame)) { 5151 xsk_buff_free(buf->xdp); 5152 buf->xdp = NULL; 5153 dirty++; 5154 error = 1; 5155 if (!priv->hwts_rx_en) 5156 rx_errors++; 5157 } 5158 5159 if (unlikely(error && (status & rx_not_ls))) 5160 goto read_again; 5161 if (unlikely(error)) { 5162 count++; 5163 continue; 5164 } 5165 5166 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ 5167 if (likely(status & rx_not_ls)) { 5168 xsk_buff_free(buf->xdp); 5169 buf->xdp = NULL; 5170 dirty++; 5171 count++; 5172 goto read_again; 5173 } 5174 5175 ctx = xsk_buff_to_stmmac_ctx(buf->xdp); 5176 ctx->priv = priv; 5177 ctx->desc = p; 5178 ctx->ndesc = np; 5179 5180 /* XDP ZC Frame only support primary buffers for now */ 5181 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 5182 len += buf1_len; 5183 5184 /* ACS is disabled; strip manually. */ 5185 if (likely(!(status & rx_not_ls))) { 5186 buf1_len -= ETH_FCS_LEN; 5187 len -= ETH_FCS_LEN; 5188 } 5189 5190 /* RX buffer is good and fit into a XSK pool buffer */ 5191 buf->xdp->data_end = buf->xdp->data + buf1_len; 5192 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); 5193 5194 prog = READ_ONCE(priv->xdp_prog); 5195 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); 5196 5197 switch (res) { 5198 case STMMAC_XDP_PASS: 5199 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); 5200 xsk_buff_free(buf->xdp); 5201 break; 5202 case STMMAC_XDP_CONSUMED: 5203 xsk_buff_free(buf->xdp); 5204 rx_dropped++; 5205 break; 5206 case STMMAC_XDP_TX: 5207 case STMMAC_XDP_REDIRECT: 5208 xdp_status |= res; 5209 break; 5210 } 5211 5212 buf->xdp = NULL; 5213 dirty++; 5214 count++; 5215 } 5216 5217 if (status & rx_not_ls) { 5218 rx_q->state_saved = true; 5219 rx_q->state.error = error; 5220 rx_q->state.len = len; 5221 } 5222 5223 stmmac_finalize_xdp_rx(priv, xdp_status); 5224 5225 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5226 rxq_stats->rx_pkt_n += count; 5227 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5228 5229 priv->xstats.rx_dropped += rx_dropped; 5230 priv->xstats.rx_errors += rx_errors; 5231 5232 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { 5233 if (failure || stmmac_rx_dirty(priv, queue) > 0) 5234 xsk_set_rx_need_wakeup(rx_q->xsk_pool); 5235 else 5236 xsk_clear_rx_need_wakeup(rx_q->xsk_pool); 5237 5238 return (int)count; 5239 } 5240 5241 return failure ? limit : (int)count; 5242 } 5243 5244 /** 5245 * stmmac_rx - manage the receive process 5246 * @priv: driver private structure 5247 * @limit: napi bugget 5248 * @queue: RX queue index. 5249 * Description : this the function called by the napi poll method. 5250 * It gets all the frames inside the ring. 5251 */ 5252 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 5253 { 5254 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0; 5255 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; 5256 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 5257 struct stmmac_channel *ch = &priv->channel[queue]; 5258 unsigned int count = 0, error = 0, len = 0; 5259 int status = 0, coe = priv->hw->rx_csum; 5260 unsigned int next_entry = rx_q->cur_rx; 5261 enum dma_data_direction dma_dir; 5262 unsigned int desc_size; 5263 struct sk_buff *skb = NULL; 5264 struct stmmac_xdp_buff ctx; 5265 unsigned long flags; 5266 int xdp_status = 0; 5267 int buf_sz; 5268 5269 dma_dir = page_pool_get_dma_dir(rx_q->page_pool); 5270 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; 5271 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); 5272 5273 if (netif_msg_rx_status(priv)) { 5274 void *rx_head; 5275 5276 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 5277 if (priv->extend_desc) { 5278 rx_head = (void *)rx_q->dma_erx; 5279 desc_size = sizeof(struct dma_extended_desc); 5280 } else { 5281 rx_head = (void *)rx_q->dma_rx; 5282 desc_size = sizeof(struct dma_desc); 5283 } 5284 5285 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, 5286 rx_q->dma_rx_phy, desc_size); 5287 } 5288 while (count < limit) { 5289 unsigned int buf1_len = 0, buf2_len = 0; 5290 enum pkt_hash_types hash_type; 5291 struct stmmac_rx_buffer *buf; 5292 struct dma_desc *np, *p; 5293 int entry; 5294 u32 hash; 5295 5296 if (!count && rx_q->state_saved) { 5297 skb = rx_q->state.skb; 5298 error = rx_q->state.error; 5299 len = rx_q->state.len; 5300 } else { 5301 rx_q->state_saved = false; 5302 skb = NULL; 5303 error = 0; 5304 len = 0; 5305 } 5306 5307 read_again: 5308 if (count >= limit) 5309 break; 5310 5311 buf1_len = 0; 5312 buf2_len = 0; 5313 entry = next_entry; 5314 buf = &rx_q->buf_pool[entry]; 5315 5316 if (priv->extend_desc) 5317 p = (struct dma_desc *)(rx_q->dma_erx + entry); 5318 else 5319 p = rx_q->dma_rx + entry; 5320 5321 /* read the status of the incoming frame */ 5322 status = stmmac_rx_status(priv, &priv->xstats, p); 5323 /* check if managed by the DMA otherwise go ahead */ 5324 if (unlikely(status & dma_own)) 5325 break; 5326 5327 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 5328 priv->dma_conf.dma_rx_size); 5329 next_entry = rx_q->cur_rx; 5330 5331 if (priv->extend_desc) 5332 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5333 else 5334 np = rx_q->dma_rx + next_entry; 5335 5336 prefetch(np); 5337 5338 if (priv->extend_desc) 5339 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); 5340 if (unlikely(status == discard_frame)) { 5341 page_pool_recycle_direct(rx_q->page_pool, buf->page); 5342 buf->page = NULL; 5343 error = 1; 5344 if (!priv->hwts_rx_en) 5345 rx_errors++; 5346 } 5347 5348 if (unlikely(error && (status & rx_not_ls))) 5349 goto read_again; 5350 if (unlikely(error)) { 5351 dev_kfree_skb(skb); 5352 skb = NULL; 5353 count++; 5354 continue; 5355 } 5356 5357 /* Buffer is good. Go on. */ 5358 5359 prefetch(page_address(buf->page) + buf->page_offset); 5360 if (buf->sec_page) 5361 prefetch(page_address(buf->sec_page)); 5362 5363 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 5364 len += buf1_len; 5365 buf2_len = stmmac_rx_buf2_len(priv, p, status, len); 5366 len += buf2_len; 5367 5368 /* ACS is disabled; strip manually. */ 5369 if (likely(!(status & rx_not_ls))) { 5370 if (buf2_len) { 5371 buf2_len -= ETH_FCS_LEN; 5372 len -= ETH_FCS_LEN; 5373 } else if (buf1_len) { 5374 buf1_len -= ETH_FCS_LEN; 5375 len -= ETH_FCS_LEN; 5376 } 5377 } 5378 5379 if (!skb) { 5380 unsigned int pre_len, sync_len; 5381 5382 dma_sync_single_for_cpu(priv->device, buf->addr, 5383 buf1_len, dma_dir); 5384 5385 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq); 5386 xdp_prepare_buff(&ctx.xdp, page_address(buf->page), 5387 buf->page_offset, buf1_len, true); 5388 5389 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - 5390 buf->page_offset; 5391 5392 ctx.priv = priv; 5393 ctx.desc = p; 5394 ctx.ndesc = np; 5395 5396 skb = stmmac_xdp_run_prog(priv, &ctx.xdp); 5397 /* Due xdp_adjust_tail: DMA sync for_device 5398 * cover max len CPU touch 5399 */ 5400 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - 5401 buf->page_offset; 5402 sync_len = max(sync_len, pre_len); 5403 5404 /* For Not XDP_PASS verdict */ 5405 if (IS_ERR(skb)) { 5406 unsigned int xdp_res = -PTR_ERR(skb); 5407 5408 if (xdp_res & STMMAC_XDP_CONSUMED) { 5409 page_pool_put_page(rx_q->page_pool, 5410 virt_to_head_page(ctx.xdp.data), 5411 sync_len, true); 5412 buf->page = NULL; 5413 rx_dropped++; 5414 5415 /* Clear skb as it was set as 5416 * status by XDP program. 5417 */ 5418 skb = NULL; 5419 5420 if (unlikely((status & rx_not_ls))) 5421 goto read_again; 5422 5423 count++; 5424 continue; 5425 } else if (xdp_res & (STMMAC_XDP_TX | 5426 STMMAC_XDP_REDIRECT)) { 5427 xdp_status |= xdp_res; 5428 buf->page = NULL; 5429 skb = NULL; 5430 count++; 5431 continue; 5432 } 5433 } 5434 } 5435 5436 if (!skb) { 5437 /* XDP program may expand or reduce tail */ 5438 buf1_len = ctx.xdp.data_end - ctx.xdp.data; 5439 5440 skb = napi_alloc_skb(&ch->rx_napi, buf1_len); 5441 if (!skb) { 5442 rx_dropped++; 5443 count++; 5444 goto drain_data; 5445 } 5446 5447 /* XDP program may adjust header */ 5448 skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len); 5449 skb_put(skb, buf1_len); 5450 5451 /* Data payload copied into SKB, page ready for recycle */ 5452 page_pool_recycle_direct(rx_q->page_pool, buf->page); 5453 buf->page = NULL; 5454 } else if (buf1_len) { 5455 dma_sync_single_for_cpu(priv->device, buf->addr, 5456 buf1_len, dma_dir); 5457 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 5458 buf->page, buf->page_offset, buf1_len, 5459 priv->dma_conf.dma_buf_sz); 5460 5461 /* Data payload appended into SKB */ 5462 skb_mark_for_recycle(skb); 5463 buf->page = NULL; 5464 } 5465 5466 if (buf2_len) { 5467 dma_sync_single_for_cpu(priv->device, buf->sec_addr, 5468 buf2_len, dma_dir); 5469 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 5470 buf->sec_page, 0, buf2_len, 5471 priv->dma_conf.dma_buf_sz); 5472 5473 /* Data payload appended into SKB */ 5474 skb_mark_for_recycle(skb); 5475 buf->sec_page = NULL; 5476 } 5477 5478 drain_data: 5479 if (likely(status & rx_not_ls)) 5480 goto read_again; 5481 if (!skb) 5482 continue; 5483 5484 /* Got entire packet into SKB. Finish it. */ 5485 5486 stmmac_get_rx_hwtstamp(priv, p, np, skb); 5487 stmmac_rx_vlan(priv->dev, skb); 5488 skb->protocol = eth_type_trans(skb, priv->dev); 5489 5490 if (unlikely(!coe)) 5491 skb_checksum_none_assert(skb); 5492 else 5493 skb->ip_summed = CHECKSUM_UNNECESSARY; 5494 5495 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 5496 skb_set_hash(skb, hash, hash_type); 5497 5498 skb_record_rx_queue(skb, queue); 5499 napi_gro_receive(&ch->rx_napi, skb); 5500 skb = NULL; 5501 5502 rx_packets++; 5503 rx_bytes += len; 5504 count++; 5505 } 5506 5507 if (status & rx_not_ls || skb) { 5508 rx_q->state_saved = true; 5509 rx_q->state.skb = skb; 5510 rx_q->state.error = error; 5511 rx_q->state.len = len; 5512 } 5513 5514 stmmac_finalize_xdp_rx(priv, xdp_status); 5515 5516 stmmac_rx_refill(priv, queue); 5517 5518 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5519 rxq_stats->rx_packets += rx_packets; 5520 rxq_stats->rx_bytes += rx_bytes; 5521 rxq_stats->rx_pkt_n += count; 5522 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5523 5524 priv->xstats.rx_dropped += rx_dropped; 5525 priv->xstats.rx_errors += rx_errors; 5526 5527 return count; 5528 } 5529 5530 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 5531 { 5532 struct stmmac_channel *ch = 5533 container_of(napi, struct stmmac_channel, rx_napi); 5534 struct stmmac_priv *priv = ch->priv_data; 5535 struct stmmac_rxq_stats *rxq_stats; 5536 u32 chan = ch->index; 5537 unsigned long flags; 5538 int work_done; 5539 5540 rxq_stats = &priv->xstats.rxq_stats[chan]; 5541 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5542 rxq_stats->napi_poll++; 5543 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5544 5545 work_done = stmmac_rx(priv, budget, chan); 5546 if (work_done < budget && napi_complete_done(napi, work_done)) { 5547 unsigned long flags; 5548 5549 spin_lock_irqsave(&ch->lock, flags); 5550 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 5551 spin_unlock_irqrestore(&ch->lock, flags); 5552 } 5553 5554 return work_done; 5555 } 5556 5557 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 5558 { 5559 struct stmmac_channel *ch = 5560 container_of(napi, struct stmmac_channel, tx_napi); 5561 struct stmmac_priv *priv = ch->priv_data; 5562 struct stmmac_txq_stats *txq_stats; 5563 u32 chan = ch->index; 5564 unsigned long flags; 5565 int work_done; 5566 5567 txq_stats = &priv->xstats.txq_stats[chan]; 5568 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 5569 txq_stats->napi_poll++; 5570 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 5571 5572 work_done = stmmac_tx_clean(priv, budget, chan); 5573 work_done = min(work_done, budget); 5574 5575 if (work_done < budget && napi_complete_done(napi, work_done)) { 5576 unsigned long flags; 5577 5578 spin_lock_irqsave(&ch->lock, flags); 5579 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 5580 spin_unlock_irqrestore(&ch->lock, flags); 5581 } 5582 5583 return work_done; 5584 } 5585 5586 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) 5587 { 5588 struct stmmac_channel *ch = 5589 container_of(napi, struct stmmac_channel, rxtx_napi); 5590 struct stmmac_priv *priv = ch->priv_data; 5591 int rx_done, tx_done, rxtx_done; 5592 struct stmmac_rxq_stats *rxq_stats; 5593 struct stmmac_txq_stats *txq_stats; 5594 u32 chan = ch->index; 5595 unsigned long flags; 5596 5597 rxq_stats = &priv->xstats.rxq_stats[chan]; 5598 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5599 rxq_stats->napi_poll++; 5600 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5601 5602 txq_stats = &priv->xstats.txq_stats[chan]; 5603 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 5604 txq_stats->napi_poll++; 5605 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 5606 5607 tx_done = stmmac_tx_clean(priv, budget, chan); 5608 tx_done = min(tx_done, budget); 5609 5610 rx_done = stmmac_rx_zc(priv, budget, chan); 5611 5612 rxtx_done = max(tx_done, rx_done); 5613 5614 /* If either TX or RX work is not complete, return budget 5615 * and keep pooling 5616 */ 5617 if (rxtx_done >= budget) 5618 return budget; 5619 5620 /* all work done, exit the polling mode */ 5621 if (napi_complete_done(napi, rxtx_done)) { 5622 unsigned long flags; 5623 5624 spin_lock_irqsave(&ch->lock, flags); 5625 /* Both RX and TX work done are compelte, 5626 * so enable both RX & TX IRQs. 5627 */ 5628 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 5629 spin_unlock_irqrestore(&ch->lock, flags); 5630 } 5631 5632 return min(rxtx_done, budget - 1); 5633 } 5634 5635 /** 5636 * stmmac_tx_timeout 5637 * @dev : Pointer to net device structure 5638 * @txqueue: the index of the hanging transmit queue 5639 * Description: this function is called when a packet transmission fails to 5640 * complete within a reasonable time. The driver will mark the error in the 5641 * netdev structure and arrange for the device to be reset to a sane state 5642 * in order to transmit a new packet. 5643 */ 5644 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) 5645 { 5646 struct stmmac_priv *priv = netdev_priv(dev); 5647 5648 stmmac_global_err(priv); 5649 } 5650 5651 /** 5652 * stmmac_set_rx_mode - entry point for multicast addressing 5653 * @dev : pointer to the device structure 5654 * Description: 5655 * This function is a driver entry point which gets called by the kernel 5656 * whenever multicast addresses must be enabled/disabled. 5657 * Return value: 5658 * void. 5659 */ 5660 static void stmmac_set_rx_mode(struct net_device *dev) 5661 { 5662 struct stmmac_priv *priv = netdev_priv(dev); 5663 5664 stmmac_set_filter(priv, priv->hw, dev); 5665 } 5666 5667 /** 5668 * stmmac_change_mtu - entry point to change MTU size for the device. 5669 * @dev : device pointer. 5670 * @new_mtu : the new MTU size for the device. 5671 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 5672 * to drive packet transmission. Ethernet has an MTU of 1500 octets 5673 * (ETH_DATA_LEN). This value can be changed with ifconfig. 5674 * Return value: 5675 * 0 on success and an appropriate (-)ve integer as defined in errno.h 5676 * file on failure. 5677 */ 5678 static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 5679 { 5680 struct stmmac_priv *priv = netdev_priv(dev); 5681 int txfifosz = priv->plat->tx_fifo_size; 5682 struct stmmac_dma_conf *dma_conf; 5683 const int mtu = new_mtu; 5684 int ret; 5685 5686 if (txfifosz == 0) 5687 txfifosz = priv->dma_cap.tx_fifo_size; 5688 5689 txfifosz /= priv->plat->tx_queues_to_use; 5690 5691 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { 5692 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); 5693 return -EINVAL; 5694 } 5695 5696 new_mtu = STMMAC_ALIGN(new_mtu); 5697 5698 /* If condition true, FIFO is too small or MTU too large */ 5699 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) 5700 return -EINVAL; 5701 5702 if (netif_running(dev)) { 5703 netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); 5704 /* Try to allocate the new DMA conf with the new mtu */ 5705 dma_conf = stmmac_setup_dma_desc(priv, mtu); 5706 if (IS_ERR(dma_conf)) { 5707 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", 5708 mtu); 5709 return PTR_ERR(dma_conf); 5710 } 5711 5712 stmmac_release(dev); 5713 5714 ret = __stmmac_open(dev, dma_conf); 5715 if (ret) { 5716 free_dma_desc_resources(priv, dma_conf); 5717 kfree(dma_conf); 5718 netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); 5719 return ret; 5720 } 5721 5722 kfree(dma_conf); 5723 5724 stmmac_set_rx_mode(dev); 5725 } 5726 5727 dev->mtu = mtu; 5728 netdev_update_features(dev); 5729 5730 return 0; 5731 } 5732 5733 static netdev_features_t stmmac_fix_features(struct net_device *dev, 5734 netdev_features_t features) 5735 { 5736 struct stmmac_priv *priv = netdev_priv(dev); 5737 5738 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 5739 features &= ~NETIF_F_RXCSUM; 5740 5741 if (!priv->plat->tx_coe) 5742 features &= ~NETIF_F_CSUM_MASK; 5743 5744 /* Some GMAC devices have a bugged Jumbo frame support that 5745 * needs to have the Tx COE disabled for oversized frames 5746 * (due to limited buffer sizes). In this case we disable 5747 * the TX csum insertion in the TDES and not use SF. 5748 */ 5749 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 5750 features &= ~NETIF_F_CSUM_MASK; 5751 5752 /* Disable tso if asked by ethtool */ 5753 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { 5754 if (features & NETIF_F_TSO) 5755 priv->tso = true; 5756 else 5757 priv->tso = false; 5758 } 5759 5760 return features; 5761 } 5762 5763 static int stmmac_set_features(struct net_device *netdev, 5764 netdev_features_t features) 5765 { 5766 struct stmmac_priv *priv = netdev_priv(netdev); 5767 5768 /* Keep the COE Type in case of csum is supporting */ 5769 if (features & NETIF_F_RXCSUM) 5770 priv->hw->rx_csum = priv->plat->rx_coe; 5771 else 5772 priv->hw->rx_csum = 0; 5773 /* No check needed because rx_coe has been set before and it will be 5774 * fixed in case of issue. 5775 */ 5776 stmmac_rx_ipc(priv, priv->hw); 5777 5778 if (priv->sph_cap) { 5779 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; 5780 u32 chan; 5781 5782 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) 5783 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 5784 } 5785 5786 return 0; 5787 } 5788 5789 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) 5790 { 5791 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 5792 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 5793 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 5794 bool *hs_enable = &fpe_cfg->hs_enable; 5795 5796 if (status == FPE_EVENT_UNKNOWN || !*hs_enable) 5797 return; 5798 5799 /* If LP has sent verify mPacket, LP is FPE capable */ 5800 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { 5801 if (*lp_state < FPE_STATE_CAPABLE) 5802 *lp_state = FPE_STATE_CAPABLE; 5803 5804 /* If user has requested FPE enable, quickly response */ 5805 if (*hs_enable) 5806 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 5807 fpe_cfg, 5808 MPACKET_RESPONSE); 5809 } 5810 5811 /* If Local has sent verify mPacket, Local is FPE capable */ 5812 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { 5813 if (*lo_state < FPE_STATE_CAPABLE) 5814 *lo_state = FPE_STATE_CAPABLE; 5815 } 5816 5817 /* If LP has sent response mPacket, LP is entering FPE ON */ 5818 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) 5819 *lp_state = FPE_STATE_ENTERING_ON; 5820 5821 /* If Local has sent response mPacket, Local is entering FPE ON */ 5822 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) 5823 *lo_state = FPE_STATE_ENTERING_ON; 5824 5825 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && 5826 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && 5827 priv->fpe_wq) { 5828 queue_work(priv->fpe_wq, &priv->fpe_task); 5829 } 5830 } 5831 5832 static void stmmac_common_interrupt(struct stmmac_priv *priv) 5833 { 5834 u32 rx_cnt = priv->plat->rx_queues_to_use; 5835 u32 tx_cnt = priv->plat->tx_queues_to_use; 5836 u32 queues_count; 5837 u32 queue; 5838 bool xmac; 5839 5840 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 5841 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 5842 5843 if (priv->irq_wake) 5844 pm_wakeup_event(priv->device, 0); 5845 5846 if (priv->dma_cap.estsel) 5847 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, 5848 &priv->xstats, tx_cnt); 5849 5850 if (priv->dma_cap.fpesel) { 5851 int status = stmmac_fpe_irq_status(priv, priv->ioaddr, 5852 priv->dev); 5853 5854 stmmac_fpe_event_status(priv, status); 5855 } 5856 5857 /* To handle GMAC own interrupts */ 5858 if ((priv->plat->has_gmac) || xmac) { 5859 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 5860 5861 if (unlikely(status)) { 5862 /* For LPI we need to save the tx status */ 5863 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 5864 priv->tx_path_in_lpi_mode = true; 5865 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 5866 priv->tx_path_in_lpi_mode = false; 5867 } 5868 5869 for (queue = 0; queue < queues_count; queue++) { 5870 status = stmmac_host_mtl_irq_status(priv, priv->hw, 5871 queue); 5872 } 5873 5874 /* PCS link status */ 5875 if (priv->hw->pcs && 5876 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { 5877 if (priv->xstats.pcs_link) 5878 netif_carrier_on(priv->dev); 5879 else 5880 netif_carrier_off(priv->dev); 5881 } 5882 5883 stmmac_timestamp_interrupt(priv, priv); 5884 } 5885 } 5886 5887 /** 5888 * stmmac_interrupt - main ISR 5889 * @irq: interrupt number. 5890 * @dev_id: to pass the net device pointer. 5891 * Description: this is the main driver interrupt service routine. 5892 * It can call: 5893 * o DMA service routine (to manage incoming frame reception and transmission 5894 * status) 5895 * o Core interrupts to manage: remote wake-up, management counter, LPI 5896 * interrupts. 5897 */ 5898 static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 5899 { 5900 struct net_device *dev = (struct net_device *)dev_id; 5901 struct stmmac_priv *priv = netdev_priv(dev); 5902 5903 /* Check if adapter is up */ 5904 if (test_bit(STMMAC_DOWN, &priv->state)) 5905 return IRQ_HANDLED; 5906 5907 /* Check if a fatal error happened */ 5908 if (stmmac_safety_feat_interrupt(priv)) 5909 return IRQ_HANDLED; 5910 5911 /* To handle Common interrupts */ 5912 stmmac_common_interrupt(priv); 5913 5914 /* To handle DMA interrupts */ 5915 stmmac_dma_interrupt(priv); 5916 5917 return IRQ_HANDLED; 5918 } 5919 5920 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) 5921 { 5922 struct net_device *dev = (struct net_device *)dev_id; 5923 struct stmmac_priv *priv = netdev_priv(dev); 5924 5925 if (unlikely(!dev)) { 5926 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5927 return IRQ_NONE; 5928 } 5929 5930 /* Check if adapter is up */ 5931 if (test_bit(STMMAC_DOWN, &priv->state)) 5932 return IRQ_HANDLED; 5933 5934 /* To handle Common interrupts */ 5935 stmmac_common_interrupt(priv); 5936 5937 return IRQ_HANDLED; 5938 } 5939 5940 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) 5941 { 5942 struct net_device *dev = (struct net_device *)dev_id; 5943 struct stmmac_priv *priv = netdev_priv(dev); 5944 5945 if (unlikely(!dev)) { 5946 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5947 return IRQ_NONE; 5948 } 5949 5950 /* Check if adapter is up */ 5951 if (test_bit(STMMAC_DOWN, &priv->state)) 5952 return IRQ_HANDLED; 5953 5954 /* Check if a fatal error happened */ 5955 stmmac_safety_feat_interrupt(priv); 5956 5957 return IRQ_HANDLED; 5958 } 5959 5960 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) 5961 { 5962 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; 5963 struct stmmac_dma_conf *dma_conf; 5964 int chan = tx_q->queue_index; 5965 struct stmmac_priv *priv; 5966 int status; 5967 5968 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); 5969 priv = container_of(dma_conf, struct stmmac_priv, dma_conf); 5970 5971 if (unlikely(!data)) { 5972 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5973 return IRQ_NONE; 5974 } 5975 5976 /* Check if adapter is up */ 5977 if (test_bit(STMMAC_DOWN, &priv->state)) 5978 return IRQ_HANDLED; 5979 5980 status = stmmac_napi_check(priv, chan, DMA_DIR_TX); 5981 5982 if (unlikely(status & tx_hard_error_bump_tc)) { 5983 /* Try to bump up the dma threshold on this failure */ 5984 stmmac_bump_dma_threshold(priv, chan); 5985 } else if (unlikely(status == tx_hard_error)) { 5986 stmmac_tx_err(priv, chan); 5987 } 5988 5989 return IRQ_HANDLED; 5990 } 5991 5992 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) 5993 { 5994 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; 5995 struct stmmac_dma_conf *dma_conf; 5996 int chan = rx_q->queue_index; 5997 struct stmmac_priv *priv; 5998 5999 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); 6000 priv = container_of(dma_conf, struct stmmac_priv, dma_conf); 6001 6002 if (unlikely(!data)) { 6003 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 6004 return IRQ_NONE; 6005 } 6006 6007 /* Check if adapter is up */ 6008 if (test_bit(STMMAC_DOWN, &priv->state)) 6009 return IRQ_HANDLED; 6010 6011 stmmac_napi_check(priv, chan, DMA_DIR_RX); 6012 6013 return IRQ_HANDLED; 6014 } 6015 6016 /** 6017 * stmmac_ioctl - Entry point for the Ioctl 6018 * @dev: Device pointer. 6019 * @rq: An IOCTL specefic structure, that can contain a pointer to 6020 * a proprietary structure used to pass information to the driver. 6021 * @cmd: IOCTL command 6022 * Description: 6023 * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 6024 */ 6025 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 6026 { 6027 struct stmmac_priv *priv = netdev_priv (dev); 6028 int ret = -EOPNOTSUPP; 6029 6030 if (!netif_running(dev)) 6031 return -EINVAL; 6032 6033 switch (cmd) { 6034 case SIOCGMIIPHY: 6035 case SIOCGMIIREG: 6036 case SIOCSMIIREG: 6037 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); 6038 break; 6039 case SIOCSHWTSTAMP: 6040 ret = stmmac_hwtstamp_set(dev, rq); 6041 break; 6042 case SIOCGHWTSTAMP: 6043 ret = stmmac_hwtstamp_get(dev, rq); 6044 break; 6045 default: 6046 break; 6047 } 6048 6049 return ret; 6050 } 6051 6052 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 6053 void *cb_priv) 6054 { 6055 struct stmmac_priv *priv = cb_priv; 6056 int ret = -EOPNOTSUPP; 6057 6058 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) 6059 return ret; 6060 6061 __stmmac_disable_all_queues(priv); 6062 6063 switch (type) { 6064 case TC_SETUP_CLSU32: 6065 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 6066 break; 6067 case TC_SETUP_CLSFLOWER: 6068 ret = stmmac_tc_setup_cls(priv, priv, type_data); 6069 break; 6070 default: 6071 break; 6072 } 6073 6074 stmmac_enable_all_queues(priv); 6075 return ret; 6076 } 6077 6078 static LIST_HEAD(stmmac_block_cb_list); 6079 6080 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 6081 void *type_data) 6082 { 6083 struct stmmac_priv *priv = netdev_priv(ndev); 6084 6085 switch (type) { 6086 case TC_QUERY_CAPS: 6087 return stmmac_tc_query_caps(priv, priv, type_data); 6088 case TC_SETUP_BLOCK: 6089 return flow_block_cb_setup_simple(type_data, 6090 &stmmac_block_cb_list, 6091 stmmac_setup_tc_block_cb, 6092 priv, priv, true); 6093 case TC_SETUP_QDISC_CBS: 6094 return stmmac_tc_setup_cbs(priv, priv, type_data); 6095 case TC_SETUP_QDISC_TAPRIO: 6096 return stmmac_tc_setup_taprio(priv, priv, type_data); 6097 case TC_SETUP_QDISC_ETF: 6098 return stmmac_tc_setup_etf(priv, priv, type_data); 6099 default: 6100 return -EOPNOTSUPP; 6101 } 6102 } 6103 6104 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, 6105 struct net_device *sb_dev) 6106 { 6107 int gso = skb_shinfo(skb)->gso_type; 6108 6109 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { 6110 /* 6111 * There is no way to determine the number of TSO/USO 6112 * capable Queues. Let's use always the Queue 0 6113 * because if TSO/USO is supported then at least this 6114 * one will be capable. 6115 */ 6116 return 0; 6117 } 6118 6119 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; 6120 } 6121 6122 static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 6123 { 6124 struct stmmac_priv *priv = netdev_priv(ndev); 6125 int ret = 0; 6126 6127 ret = pm_runtime_resume_and_get(priv->device); 6128 if (ret < 0) 6129 return ret; 6130 6131 ret = eth_mac_addr(ndev, addr); 6132 if (ret) 6133 goto set_mac_error; 6134 6135 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 6136 6137 set_mac_error: 6138 pm_runtime_put(priv->device); 6139 6140 return ret; 6141 } 6142 6143 #ifdef CONFIG_DEBUG_FS 6144 static struct dentry *stmmac_fs_dir; 6145 6146 static void sysfs_display_ring(void *head, int size, int extend_desc, 6147 struct seq_file *seq, dma_addr_t dma_phy_addr) 6148 { 6149 int i; 6150 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 6151 struct dma_desc *p = (struct dma_desc *)head; 6152 dma_addr_t dma_addr; 6153 6154 for (i = 0; i < size; i++) { 6155 if (extend_desc) { 6156 dma_addr = dma_phy_addr + i * sizeof(*ep); 6157 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 6158 i, &dma_addr, 6159 le32_to_cpu(ep->basic.des0), 6160 le32_to_cpu(ep->basic.des1), 6161 le32_to_cpu(ep->basic.des2), 6162 le32_to_cpu(ep->basic.des3)); 6163 ep++; 6164 } else { 6165 dma_addr = dma_phy_addr + i * sizeof(*p); 6166 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 6167 i, &dma_addr, 6168 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 6169 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 6170 p++; 6171 } 6172 seq_printf(seq, "\n"); 6173 } 6174 } 6175 6176 static int stmmac_rings_status_show(struct seq_file *seq, void *v) 6177 { 6178 struct net_device *dev = seq->private; 6179 struct stmmac_priv *priv = netdev_priv(dev); 6180 u32 rx_count = priv->plat->rx_queues_to_use; 6181 u32 tx_count = priv->plat->tx_queues_to_use; 6182 u32 queue; 6183 6184 if ((dev->flags & IFF_UP) == 0) 6185 return 0; 6186 6187 for (queue = 0; queue < rx_count; queue++) { 6188 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 6189 6190 seq_printf(seq, "RX Queue %d:\n", queue); 6191 6192 if (priv->extend_desc) { 6193 seq_printf(seq, "Extended descriptor ring:\n"); 6194 sysfs_display_ring((void *)rx_q->dma_erx, 6195 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); 6196 } else { 6197 seq_printf(seq, "Descriptor ring:\n"); 6198 sysfs_display_ring((void *)rx_q->dma_rx, 6199 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); 6200 } 6201 } 6202 6203 for (queue = 0; queue < tx_count; queue++) { 6204 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 6205 6206 seq_printf(seq, "TX Queue %d:\n", queue); 6207 6208 if (priv->extend_desc) { 6209 seq_printf(seq, "Extended descriptor ring:\n"); 6210 sysfs_display_ring((void *)tx_q->dma_etx, 6211 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); 6212 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { 6213 seq_printf(seq, "Descriptor ring:\n"); 6214 sysfs_display_ring((void *)tx_q->dma_tx, 6215 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); 6216 } 6217 } 6218 6219 return 0; 6220 } 6221 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 6222 6223 static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 6224 { 6225 static const char * const dwxgmac_timestamp_source[] = { 6226 "None", 6227 "Internal", 6228 "External", 6229 "Both", 6230 }; 6231 static const char * const dwxgmac_safety_feature_desc[] = { 6232 "No", 6233 "All Safety Features with ECC and Parity", 6234 "All Safety Features without ECC or Parity", 6235 "All Safety Features with Parity Only", 6236 "ECC Only", 6237 "UNDEFINED", 6238 "UNDEFINED", 6239 "UNDEFINED", 6240 }; 6241 struct net_device *dev = seq->private; 6242 struct stmmac_priv *priv = netdev_priv(dev); 6243 6244 if (!priv->hw_cap_support) { 6245 seq_printf(seq, "DMA HW features not supported\n"); 6246 return 0; 6247 } 6248 6249 seq_printf(seq, "==============================\n"); 6250 seq_printf(seq, "\tDMA HW features\n"); 6251 seq_printf(seq, "==============================\n"); 6252 6253 seq_printf(seq, "\t10/100 Mbps: %s\n", 6254 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 6255 seq_printf(seq, "\t1000 Mbps: %s\n", 6256 (priv->dma_cap.mbps_1000) ? "Y" : "N"); 6257 seq_printf(seq, "\tHalf duplex: %s\n", 6258 (priv->dma_cap.half_duplex) ? "Y" : "N"); 6259 if (priv->plat->has_xgmac) { 6260 seq_printf(seq, 6261 "\tNumber of Additional MAC address registers: %d\n", 6262 priv->dma_cap.multi_addr); 6263 } else { 6264 seq_printf(seq, "\tHash Filter: %s\n", 6265 (priv->dma_cap.hash_filter) ? "Y" : "N"); 6266 seq_printf(seq, "\tMultiple MAC address registers: %s\n", 6267 (priv->dma_cap.multi_addr) ? "Y" : "N"); 6268 } 6269 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 6270 (priv->dma_cap.pcs) ? "Y" : "N"); 6271 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 6272 (priv->dma_cap.sma_mdio) ? "Y" : "N"); 6273 seq_printf(seq, "\tPMT Remote wake up: %s\n", 6274 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 6275 seq_printf(seq, "\tPMT Magic Frame: %s\n", 6276 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 6277 seq_printf(seq, "\tRMON module: %s\n", 6278 (priv->dma_cap.rmon) ? "Y" : "N"); 6279 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 6280 (priv->dma_cap.time_stamp) ? "Y" : "N"); 6281 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 6282 (priv->dma_cap.atime_stamp) ? "Y" : "N"); 6283 if (priv->plat->has_xgmac) 6284 seq_printf(seq, "\tTimestamp System Time Source: %s\n", 6285 dwxgmac_timestamp_source[priv->dma_cap.tssrc]); 6286 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 6287 (priv->dma_cap.eee) ? "Y" : "N"); 6288 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 6289 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 6290 (priv->dma_cap.tx_coe) ? "Y" : "N"); 6291 if (priv->synopsys_id >= DWMAC_CORE_4_00 || 6292 priv->plat->has_xgmac) { 6293 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 6294 (priv->dma_cap.rx_coe) ? "Y" : "N"); 6295 } else { 6296 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 6297 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 6298 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 6299 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 6300 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 6301 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 6302 } 6303 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 6304 priv->dma_cap.number_rx_channel); 6305 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 6306 priv->dma_cap.number_tx_channel); 6307 seq_printf(seq, "\tNumber of Additional RX queues: %d\n", 6308 priv->dma_cap.number_rx_queues); 6309 seq_printf(seq, "\tNumber of Additional TX queues: %d\n", 6310 priv->dma_cap.number_tx_queues); 6311 seq_printf(seq, "\tEnhanced descriptors: %s\n", 6312 (priv->dma_cap.enh_desc) ? "Y" : "N"); 6313 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); 6314 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); 6315 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ? 6316 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0); 6317 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); 6318 seq_printf(seq, "\tNumber of PPS Outputs: %d\n", 6319 priv->dma_cap.pps_out_num); 6320 seq_printf(seq, "\tSafety Features: %s\n", 6321 dwxgmac_safety_feature_desc[priv->dma_cap.asp]); 6322 seq_printf(seq, "\tFlexible RX Parser: %s\n", 6323 priv->dma_cap.frpsel ? "Y" : "N"); 6324 seq_printf(seq, "\tEnhanced Addressing: %d\n", 6325 priv->dma_cap.host_dma_width); 6326 seq_printf(seq, "\tReceive Side Scaling: %s\n", 6327 priv->dma_cap.rssen ? "Y" : "N"); 6328 seq_printf(seq, "\tVLAN Hash Filtering: %s\n", 6329 priv->dma_cap.vlhash ? "Y" : "N"); 6330 seq_printf(seq, "\tSplit Header: %s\n", 6331 priv->dma_cap.sphen ? "Y" : "N"); 6332 seq_printf(seq, "\tVLAN TX Insertion: %s\n", 6333 priv->dma_cap.vlins ? "Y" : "N"); 6334 seq_printf(seq, "\tDouble VLAN: %s\n", 6335 priv->dma_cap.dvlan ? "Y" : "N"); 6336 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", 6337 priv->dma_cap.l3l4fnum); 6338 seq_printf(seq, "\tARP Offloading: %s\n", 6339 priv->dma_cap.arpoffsel ? "Y" : "N"); 6340 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", 6341 priv->dma_cap.estsel ? "Y" : "N"); 6342 seq_printf(seq, "\tFrame Preemption (FPE): %s\n", 6343 priv->dma_cap.fpesel ? "Y" : "N"); 6344 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", 6345 priv->dma_cap.tbssel ? "Y" : "N"); 6346 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n", 6347 priv->dma_cap.tbs_ch_num); 6348 seq_printf(seq, "\tPer-Stream Filtering: %s\n", 6349 priv->dma_cap.sgfsel ? "Y" : "N"); 6350 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n", 6351 BIT(priv->dma_cap.ttsfd) >> 1); 6352 seq_printf(seq, "\tNumber of Traffic Classes: %d\n", 6353 priv->dma_cap.numtc); 6354 seq_printf(seq, "\tDCB Feature: %s\n", 6355 priv->dma_cap.dcben ? "Y" : "N"); 6356 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n", 6357 priv->dma_cap.advthword ? "Y" : "N"); 6358 seq_printf(seq, "\tPTP Offload: %s\n", 6359 priv->dma_cap.ptoen ? "Y" : "N"); 6360 seq_printf(seq, "\tOne-Step Timestamping: %s\n", 6361 priv->dma_cap.osten ? "Y" : "N"); 6362 seq_printf(seq, "\tPriority-Based Flow Control: %s\n", 6363 priv->dma_cap.pfcen ? "Y" : "N"); 6364 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n", 6365 BIT(priv->dma_cap.frpes) << 6); 6366 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n", 6367 BIT(priv->dma_cap.frpbs) << 6); 6368 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n", 6369 priv->dma_cap.frppipe_num); 6370 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n", 6371 priv->dma_cap.nrvf_num ? 6372 (BIT(priv->dma_cap.nrvf_num) << 1) : 0); 6373 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n", 6374 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0); 6375 seq_printf(seq, "\tDepth of GCL: %lu\n", 6376 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0); 6377 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n", 6378 priv->dma_cap.cbtisel ? "Y" : "N"); 6379 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n", 6380 priv->dma_cap.aux_snapshot_n); 6381 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n", 6382 priv->dma_cap.pou_ost_en ? "Y" : "N"); 6383 seq_printf(seq, "\tEnhanced DMA: %s\n", 6384 priv->dma_cap.edma ? "Y" : "N"); 6385 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n", 6386 priv->dma_cap.ediffc ? "Y" : "N"); 6387 seq_printf(seq, "\tVxLAN/NVGRE: %s\n", 6388 priv->dma_cap.vxn ? "Y" : "N"); 6389 seq_printf(seq, "\tDebug Memory Interface: %s\n", 6390 priv->dma_cap.dbgmem ? "Y" : "N"); 6391 seq_printf(seq, "\tNumber of Policing Counters: %lu\n", 6392 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0); 6393 return 0; 6394 } 6395 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 6396 6397 /* Use network device events to rename debugfs file entries. 6398 */ 6399 static int stmmac_device_event(struct notifier_block *unused, 6400 unsigned long event, void *ptr) 6401 { 6402 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6403 struct stmmac_priv *priv = netdev_priv(dev); 6404 6405 if (dev->netdev_ops != &stmmac_netdev_ops) 6406 goto done; 6407 6408 switch (event) { 6409 case NETDEV_CHANGENAME: 6410 if (priv->dbgfs_dir) 6411 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, 6412 priv->dbgfs_dir, 6413 stmmac_fs_dir, 6414 dev->name); 6415 break; 6416 } 6417 done: 6418 return NOTIFY_DONE; 6419 } 6420 6421 static struct notifier_block stmmac_notifier = { 6422 .notifier_call = stmmac_device_event, 6423 }; 6424 6425 static void stmmac_init_fs(struct net_device *dev) 6426 { 6427 struct stmmac_priv *priv = netdev_priv(dev); 6428 6429 rtnl_lock(); 6430 6431 /* Create per netdev entries */ 6432 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 6433 6434 /* Entry to report DMA RX/TX rings */ 6435 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, 6436 &stmmac_rings_status_fops); 6437 6438 /* Entry to report the DMA HW features */ 6439 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, 6440 &stmmac_dma_cap_fops); 6441 6442 rtnl_unlock(); 6443 } 6444 6445 static void stmmac_exit_fs(struct net_device *dev) 6446 { 6447 struct stmmac_priv *priv = netdev_priv(dev); 6448 6449 debugfs_remove_recursive(priv->dbgfs_dir); 6450 } 6451 #endif /* CONFIG_DEBUG_FS */ 6452 6453 static u32 stmmac_vid_crc32_le(__le16 vid_le) 6454 { 6455 unsigned char *data = (unsigned char *)&vid_le; 6456 unsigned char data_byte = 0; 6457 u32 crc = ~0x0; 6458 u32 temp = 0; 6459 int i, bits; 6460 6461 bits = get_bitmask_order(VLAN_VID_MASK); 6462 for (i = 0; i < bits; i++) { 6463 if ((i % 8) == 0) 6464 data_byte = data[i / 8]; 6465 6466 temp = ((crc & 1) ^ data_byte) & 1; 6467 crc >>= 1; 6468 data_byte >>= 1; 6469 6470 if (temp) 6471 crc ^= 0xedb88320; 6472 } 6473 6474 return crc; 6475 } 6476 6477 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) 6478 { 6479 u32 crc, hash = 0; 6480 __le16 pmatch = 0; 6481 int count = 0; 6482 u16 vid = 0; 6483 6484 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 6485 __le16 vid_le = cpu_to_le16(vid); 6486 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; 6487 hash |= (1 << crc); 6488 count++; 6489 } 6490 6491 if (!priv->dma_cap.vlhash) { 6492 if (count > 2) /* VID = 0 always passes filter */ 6493 return -EOPNOTSUPP; 6494 6495 pmatch = cpu_to_le16(vid); 6496 hash = 0; 6497 } 6498 6499 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); 6500 } 6501 6502 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 6503 { 6504 struct stmmac_priv *priv = netdev_priv(ndev); 6505 bool is_double = false; 6506 int ret; 6507 6508 ret = pm_runtime_resume_and_get(priv->device); 6509 if (ret < 0) 6510 return ret; 6511 6512 if (be16_to_cpu(proto) == ETH_P_8021AD) 6513 is_double = true; 6514 6515 set_bit(vid, priv->active_vlans); 6516 ret = stmmac_vlan_update(priv, is_double); 6517 if (ret) { 6518 clear_bit(vid, priv->active_vlans); 6519 goto err_pm_put; 6520 } 6521 6522 if (priv->hw->num_vlan) { 6523 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6524 if (ret) 6525 goto err_pm_put; 6526 } 6527 err_pm_put: 6528 pm_runtime_put(priv->device); 6529 6530 return ret; 6531 } 6532 6533 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 6534 { 6535 struct stmmac_priv *priv = netdev_priv(ndev); 6536 bool is_double = false; 6537 int ret; 6538 6539 ret = pm_runtime_resume_and_get(priv->device); 6540 if (ret < 0) 6541 return ret; 6542 6543 if (be16_to_cpu(proto) == ETH_P_8021AD) 6544 is_double = true; 6545 6546 clear_bit(vid, priv->active_vlans); 6547 6548 if (priv->hw->num_vlan) { 6549 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6550 if (ret) 6551 goto del_vlan_error; 6552 } 6553 6554 ret = stmmac_vlan_update(priv, is_double); 6555 6556 del_vlan_error: 6557 pm_runtime_put(priv->device); 6558 6559 return ret; 6560 } 6561 6562 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) 6563 { 6564 struct stmmac_priv *priv = netdev_priv(dev); 6565 6566 switch (bpf->command) { 6567 case XDP_SETUP_PROG: 6568 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); 6569 case XDP_SETUP_XSK_POOL: 6570 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, 6571 bpf->xsk.queue_id); 6572 default: 6573 return -EOPNOTSUPP; 6574 } 6575 } 6576 6577 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, 6578 struct xdp_frame **frames, u32 flags) 6579 { 6580 struct stmmac_priv *priv = netdev_priv(dev); 6581 int cpu = smp_processor_id(); 6582 struct netdev_queue *nq; 6583 int i, nxmit = 0; 6584 int queue; 6585 6586 if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) 6587 return -ENETDOWN; 6588 6589 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 6590 return -EINVAL; 6591 6592 queue = stmmac_xdp_get_tx_queue(priv, cpu); 6593 nq = netdev_get_tx_queue(priv->dev, queue); 6594 6595 __netif_tx_lock(nq, cpu); 6596 /* Avoids TX time-out as we are sharing with slow path */ 6597 txq_trans_cond_update(nq); 6598 6599 for (i = 0; i < num_frames; i++) { 6600 int res; 6601 6602 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); 6603 if (res == STMMAC_XDP_CONSUMED) 6604 break; 6605 6606 nxmit++; 6607 } 6608 6609 if (flags & XDP_XMIT_FLUSH) { 6610 stmmac_flush_tx_descriptors(priv, queue); 6611 stmmac_tx_timer_arm(priv, queue); 6612 } 6613 6614 __netif_tx_unlock(nq); 6615 6616 return nxmit; 6617 } 6618 6619 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) 6620 { 6621 struct stmmac_channel *ch = &priv->channel[queue]; 6622 unsigned long flags; 6623 6624 spin_lock_irqsave(&ch->lock, flags); 6625 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6626 spin_unlock_irqrestore(&ch->lock, flags); 6627 6628 stmmac_stop_rx_dma(priv, queue); 6629 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6630 } 6631 6632 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) 6633 { 6634 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 6635 struct stmmac_channel *ch = &priv->channel[queue]; 6636 unsigned long flags; 6637 u32 buf_size; 6638 int ret; 6639 6640 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6641 if (ret) { 6642 netdev_err(priv->dev, "Failed to alloc RX desc.\n"); 6643 return; 6644 } 6645 6646 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); 6647 if (ret) { 6648 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6649 netdev_err(priv->dev, "Failed to init RX desc.\n"); 6650 return; 6651 } 6652 6653 stmmac_reset_rx_queue(priv, queue); 6654 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); 6655 6656 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6657 rx_q->dma_rx_phy, rx_q->queue_index); 6658 6659 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * 6660 sizeof(struct dma_desc)); 6661 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6662 rx_q->rx_tail_addr, rx_q->queue_index); 6663 6664 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6665 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6666 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6667 buf_size, 6668 rx_q->queue_index); 6669 } else { 6670 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6671 priv->dma_conf.dma_buf_sz, 6672 rx_q->queue_index); 6673 } 6674 6675 stmmac_start_rx_dma(priv, queue); 6676 6677 spin_lock_irqsave(&ch->lock, flags); 6678 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6679 spin_unlock_irqrestore(&ch->lock, flags); 6680 } 6681 6682 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) 6683 { 6684 struct stmmac_channel *ch = &priv->channel[queue]; 6685 unsigned long flags; 6686 6687 spin_lock_irqsave(&ch->lock, flags); 6688 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6689 spin_unlock_irqrestore(&ch->lock, flags); 6690 6691 stmmac_stop_tx_dma(priv, queue); 6692 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6693 } 6694 6695 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) 6696 { 6697 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 6698 struct stmmac_channel *ch = &priv->channel[queue]; 6699 unsigned long flags; 6700 int ret; 6701 6702 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6703 if (ret) { 6704 netdev_err(priv->dev, "Failed to alloc TX desc.\n"); 6705 return; 6706 } 6707 6708 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); 6709 if (ret) { 6710 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6711 netdev_err(priv->dev, "Failed to init TX desc.\n"); 6712 return; 6713 } 6714 6715 stmmac_reset_tx_queue(priv, queue); 6716 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); 6717 6718 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6719 tx_q->dma_tx_phy, tx_q->queue_index); 6720 6721 if (tx_q->tbs & STMMAC_TBS_AVAIL) 6722 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); 6723 6724 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6725 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6726 tx_q->tx_tail_addr, tx_q->queue_index); 6727 6728 stmmac_start_tx_dma(priv, queue); 6729 6730 spin_lock_irqsave(&ch->lock, flags); 6731 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6732 spin_unlock_irqrestore(&ch->lock, flags); 6733 } 6734 6735 void stmmac_xdp_release(struct net_device *dev) 6736 { 6737 struct stmmac_priv *priv = netdev_priv(dev); 6738 u32 chan; 6739 6740 /* Ensure tx function is not running */ 6741 netif_tx_disable(dev); 6742 6743 /* Disable NAPI process */ 6744 stmmac_disable_all_queues(priv); 6745 6746 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 6747 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 6748 6749 /* Free the IRQ lines */ 6750 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 6751 6752 /* Stop TX/RX DMA channels */ 6753 stmmac_stop_all_dma(priv); 6754 6755 /* Release and free the Rx/Tx resources */ 6756 free_dma_desc_resources(priv, &priv->dma_conf); 6757 6758 /* Disable the MAC Rx/Tx */ 6759 stmmac_mac_set(priv, priv->ioaddr, false); 6760 6761 /* set trans_start so we don't get spurious 6762 * watchdogs during reset 6763 */ 6764 netif_trans_update(dev); 6765 netif_carrier_off(dev); 6766 } 6767 6768 int stmmac_xdp_open(struct net_device *dev) 6769 { 6770 struct stmmac_priv *priv = netdev_priv(dev); 6771 u32 rx_cnt = priv->plat->rx_queues_to_use; 6772 u32 tx_cnt = priv->plat->tx_queues_to_use; 6773 u32 dma_csr_ch = max(rx_cnt, tx_cnt); 6774 struct stmmac_rx_queue *rx_q; 6775 struct stmmac_tx_queue *tx_q; 6776 u32 buf_size; 6777 bool sph_en; 6778 u32 chan; 6779 int ret; 6780 6781 ret = alloc_dma_desc_resources(priv, &priv->dma_conf); 6782 if (ret < 0) { 6783 netdev_err(dev, "%s: DMA descriptors allocation failed\n", 6784 __func__); 6785 goto dma_desc_error; 6786 } 6787 6788 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); 6789 if (ret < 0) { 6790 netdev_err(dev, "%s: DMA descriptors initialization failed\n", 6791 __func__); 6792 goto init_error; 6793 } 6794 6795 stmmac_reset_queues_param(priv); 6796 6797 /* DMA CSR Channel configuration */ 6798 for (chan = 0; chan < dma_csr_ch; chan++) { 6799 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 6800 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 6801 } 6802 6803 /* Adjust Split header */ 6804 sph_en = (priv->hw->rx_csum > 0) && priv->sph; 6805 6806 /* DMA RX Channel Configuration */ 6807 for (chan = 0; chan < rx_cnt; chan++) { 6808 rx_q = &priv->dma_conf.rx_queue[chan]; 6809 6810 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6811 rx_q->dma_rx_phy, chan); 6812 6813 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 6814 (rx_q->buf_alloc_num * 6815 sizeof(struct dma_desc)); 6816 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6817 rx_q->rx_tail_addr, chan); 6818 6819 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6820 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6821 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6822 buf_size, 6823 rx_q->queue_index); 6824 } else { 6825 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6826 priv->dma_conf.dma_buf_sz, 6827 rx_q->queue_index); 6828 } 6829 6830 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 6831 } 6832 6833 /* DMA TX Channel Configuration */ 6834 for (chan = 0; chan < tx_cnt; chan++) { 6835 tx_q = &priv->dma_conf.tx_queue[chan]; 6836 6837 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6838 tx_q->dma_tx_phy, chan); 6839 6840 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6841 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6842 tx_q->tx_tail_addr, chan); 6843 6844 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6845 tx_q->txtimer.function = stmmac_tx_timer; 6846 } 6847 6848 /* Enable the MAC Rx/Tx */ 6849 stmmac_mac_set(priv, priv->ioaddr, true); 6850 6851 /* Start Rx & Tx DMA Channels */ 6852 stmmac_start_all_dma(priv); 6853 6854 ret = stmmac_request_irq(dev); 6855 if (ret) 6856 goto irq_error; 6857 6858 /* Enable NAPI process*/ 6859 stmmac_enable_all_queues(priv); 6860 netif_carrier_on(dev); 6861 netif_tx_start_all_queues(dev); 6862 stmmac_enable_all_dma_irq(priv); 6863 6864 return 0; 6865 6866 irq_error: 6867 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 6868 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 6869 6870 stmmac_hw_teardown(dev); 6871 init_error: 6872 free_dma_desc_resources(priv, &priv->dma_conf); 6873 dma_desc_error: 6874 return ret; 6875 } 6876 6877 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) 6878 { 6879 struct stmmac_priv *priv = netdev_priv(dev); 6880 struct stmmac_rx_queue *rx_q; 6881 struct stmmac_tx_queue *tx_q; 6882 struct stmmac_channel *ch; 6883 6884 if (test_bit(STMMAC_DOWN, &priv->state) || 6885 !netif_carrier_ok(priv->dev)) 6886 return -ENETDOWN; 6887 6888 if (!stmmac_xdp_is_enabled(priv)) 6889 return -EINVAL; 6890 6891 if (queue >= priv->plat->rx_queues_to_use || 6892 queue >= priv->plat->tx_queues_to_use) 6893 return -EINVAL; 6894 6895 rx_q = &priv->dma_conf.rx_queue[queue]; 6896 tx_q = &priv->dma_conf.tx_queue[queue]; 6897 ch = &priv->channel[queue]; 6898 6899 if (!rx_q->xsk_pool && !tx_q->xsk_pool) 6900 return -EINVAL; 6901 6902 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { 6903 /* EQoS does not have per-DMA channel SW interrupt, 6904 * so we schedule RX Napi straight-away. 6905 */ 6906 if (likely(napi_schedule_prep(&ch->rxtx_napi))) 6907 __napi_schedule(&ch->rxtx_napi); 6908 } 6909 6910 return 0; 6911 } 6912 6913 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6914 { 6915 struct stmmac_priv *priv = netdev_priv(dev); 6916 u32 tx_cnt = priv->plat->tx_queues_to_use; 6917 u32 rx_cnt = priv->plat->rx_queues_to_use; 6918 unsigned int start; 6919 int q; 6920 6921 for (q = 0; q < tx_cnt; q++) { 6922 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; 6923 u64 tx_packets; 6924 u64 tx_bytes; 6925 6926 do { 6927 start = u64_stats_fetch_begin(&txq_stats->syncp); 6928 tx_packets = txq_stats->tx_packets; 6929 tx_bytes = txq_stats->tx_bytes; 6930 } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); 6931 6932 stats->tx_packets += tx_packets; 6933 stats->tx_bytes += tx_bytes; 6934 } 6935 6936 for (q = 0; q < rx_cnt; q++) { 6937 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; 6938 u64 rx_packets; 6939 u64 rx_bytes; 6940 6941 do { 6942 start = u64_stats_fetch_begin(&rxq_stats->syncp); 6943 rx_packets = rxq_stats->rx_packets; 6944 rx_bytes = rxq_stats->rx_bytes; 6945 } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); 6946 6947 stats->rx_packets += rx_packets; 6948 stats->rx_bytes += rx_bytes; 6949 } 6950 6951 stats->rx_dropped = priv->xstats.rx_dropped; 6952 stats->rx_errors = priv->xstats.rx_errors; 6953 stats->tx_dropped = priv->xstats.tx_dropped; 6954 stats->tx_errors = priv->xstats.tx_errors; 6955 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier; 6956 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision; 6957 stats->rx_length_errors = priv->xstats.rx_length; 6958 stats->rx_crc_errors = priv->xstats.rx_crc_errors; 6959 stats->rx_over_errors = priv->xstats.rx_overflow_cntr; 6960 stats->rx_missed_errors = priv->xstats.rx_missed_cntr; 6961 } 6962 6963 static const struct net_device_ops stmmac_netdev_ops = { 6964 .ndo_open = stmmac_open, 6965 .ndo_start_xmit = stmmac_xmit, 6966 .ndo_stop = stmmac_release, 6967 .ndo_change_mtu = stmmac_change_mtu, 6968 .ndo_fix_features = stmmac_fix_features, 6969 .ndo_set_features = stmmac_set_features, 6970 .ndo_set_rx_mode = stmmac_set_rx_mode, 6971 .ndo_tx_timeout = stmmac_tx_timeout, 6972 .ndo_eth_ioctl = stmmac_ioctl, 6973 .ndo_get_stats64 = stmmac_get_stats64, 6974 .ndo_setup_tc = stmmac_setup_tc, 6975 .ndo_select_queue = stmmac_select_queue, 6976 .ndo_set_mac_address = stmmac_set_mac_address, 6977 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, 6978 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, 6979 .ndo_bpf = stmmac_bpf, 6980 .ndo_xdp_xmit = stmmac_xdp_xmit, 6981 .ndo_xsk_wakeup = stmmac_xsk_wakeup, 6982 }; 6983 6984 static void stmmac_reset_subtask(struct stmmac_priv *priv) 6985 { 6986 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 6987 return; 6988 if (test_bit(STMMAC_DOWN, &priv->state)) 6989 return; 6990 6991 netdev_err(priv->dev, "Reset adapter.\n"); 6992 6993 rtnl_lock(); 6994 netif_trans_update(priv->dev); 6995 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 6996 usleep_range(1000, 2000); 6997 6998 set_bit(STMMAC_DOWN, &priv->state); 6999 dev_close(priv->dev); 7000 dev_open(priv->dev, NULL); 7001 clear_bit(STMMAC_DOWN, &priv->state); 7002 clear_bit(STMMAC_RESETING, &priv->state); 7003 rtnl_unlock(); 7004 } 7005 7006 static void stmmac_service_task(struct work_struct *work) 7007 { 7008 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 7009 service_task); 7010 7011 stmmac_reset_subtask(priv); 7012 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 7013 } 7014 7015 /** 7016 * stmmac_hw_init - Init the MAC device 7017 * @priv: driver private structure 7018 * Description: this function is to configure the MAC device according to 7019 * some platform parameters or the HW capability register. It prepares the 7020 * driver to use either ring or chain modes and to setup either enhanced or 7021 * normal descriptors. 7022 */ 7023 static int stmmac_hw_init(struct stmmac_priv *priv) 7024 { 7025 int ret; 7026 7027 /* dwmac-sun8i only work in chain mode */ 7028 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) 7029 chain_mode = 1; 7030 priv->chain_mode = chain_mode; 7031 7032 /* Initialize HW Interface */ 7033 ret = stmmac_hwif_init(priv); 7034 if (ret) 7035 return ret; 7036 7037 /* Get the HW capability (new GMAC newer than 3.50a) */ 7038 priv->hw_cap_support = stmmac_get_hw_features(priv); 7039 if (priv->hw_cap_support) { 7040 dev_info(priv->device, "DMA HW capability register supported\n"); 7041 7042 /* We can override some gmac/dma configuration fields: e.g. 7043 * enh_desc, tx_coe (e.g. that are passed through the 7044 * platform) with the values from the HW capability 7045 * register (if supported). 7046 */ 7047 priv->plat->enh_desc = priv->dma_cap.enh_desc; 7048 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && 7049 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL); 7050 priv->hw->pmt = priv->plat->pmt; 7051 if (priv->dma_cap.hash_tb_sz) { 7052 priv->hw->multicast_filter_bins = 7053 (BIT(priv->dma_cap.hash_tb_sz) << 5); 7054 priv->hw->mcast_bits_log2 = 7055 ilog2(priv->hw->multicast_filter_bins); 7056 } 7057 7058 /* TXCOE doesn't work in thresh DMA mode */ 7059 if (priv->plat->force_thresh_dma_mode) 7060 priv->plat->tx_coe = 0; 7061 else 7062 priv->plat->tx_coe = priv->dma_cap.tx_coe; 7063 7064 /* In case of GMAC4 rx_coe is from HW cap register. */ 7065 priv->plat->rx_coe = priv->dma_cap.rx_coe; 7066 7067 if (priv->dma_cap.rx_coe_type2) 7068 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 7069 else if (priv->dma_cap.rx_coe_type1) 7070 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 7071 7072 } else { 7073 dev_info(priv->device, "No HW DMA feature register supported\n"); 7074 } 7075 7076 if (priv->plat->rx_coe) { 7077 priv->hw->rx_csum = priv->plat->rx_coe; 7078 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 7079 if (priv->synopsys_id < DWMAC_CORE_4_00) 7080 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 7081 } 7082 if (priv->plat->tx_coe) 7083 dev_info(priv->device, "TX Checksum insertion supported\n"); 7084 7085 if (priv->plat->pmt) { 7086 dev_info(priv->device, "Wake-Up On Lan supported\n"); 7087 device_set_wakeup_capable(priv->device, 1); 7088 } 7089 7090 if (priv->dma_cap.tsoen) 7091 dev_info(priv->device, "TSO supported\n"); 7092 7093 priv->hw->vlan_fail_q_en = 7094 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); 7095 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; 7096 7097 /* Run HW quirks, if any */ 7098 if (priv->hwif_quirks) { 7099 ret = priv->hwif_quirks(priv); 7100 if (ret) 7101 return ret; 7102 } 7103 7104 /* Rx Watchdog is available in the COREs newer than the 3.40. 7105 * In some case, for example on bugged HW this feature 7106 * has to be disable and this can be done by passing the 7107 * riwt_off field from the platform. 7108 */ 7109 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 7110 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 7111 priv->use_riwt = 1; 7112 dev_info(priv->device, 7113 "Enable RX Mitigation via HW Watchdog Timer\n"); 7114 } 7115 7116 return 0; 7117 } 7118 7119 static void stmmac_napi_add(struct net_device *dev) 7120 { 7121 struct stmmac_priv *priv = netdev_priv(dev); 7122 u32 queue, maxq; 7123 7124 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 7125 7126 for (queue = 0; queue < maxq; queue++) { 7127 struct stmmac_channel *ch = &priv->channel[queue]; 7128 7129 ch->priv_data = priv; 7130 ch->index = queue; 7131 spin_lock_init(&ch->lock); 7132 7133 if (queue < priv->plat->rx_queues_to_use) { 7134 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); 7135 } 7136 if (queue < priv->plat->tx_queues_to_use) { 7137 netif_napi_add_tx(dev, &ch->tx_napi, 7138 stmmac_napi_poll_tx); 7139 } 7140 if (queue < priv->plat->rx_queues_to_use && 7141 queue < priv->plat->tx_queues_to_use) { 7142 netif_napi_add(dev, &ch->rxtx_napi, 7143 stmmac_napi_poll_rxtx); 7144 } 7145 } 7146 } 7147 7148 static void stmmac_napi_del(struct net_device *dev) 7149 { 7150 struct stmmac_priv *priv = netdev_priv(dev); 7151 u32 queue, maxq; 7152 7153 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 7154 7155 for (queue = 0; queue < maxq; queue++) { 7156 struct stmmac_channel *ch = &priv->channel[queue]; 7157 7158 if (queue < priv->plat->rx_queues_to_use) 7159 netif_napi_del(&ch->rx_napi); 7160 if (queue < priv->plat->tx_queues_to_use) 7161 netif_napi_del(&ch->tx_napi); 7162 if (queue < priv->plat->rx_queues_to_use && 7163 queue < priv->plat->tx_queues_to_use) { 7164 netif_napi_del(&ch->rxtx_napi); 7165 } 7166 } 7167 } 7168 7169 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) 7170 { 7171 struct stmmac_priv *priv = netdev_priv(dev); 7172 int ret = 0, i; 7173 7174 if (netif_running(dev)) 7175 stmmac_release(dev); 7176 7177 stmmac_napi_del(dev); 7178 7179 priv->plat->rx_queues_to_use = rx_cnt; 7180 priv->plat->tx_queues_to_use = tx_cnt; 7181 if (!netif_is_rxfh_configured(dev)) 7182 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 7183 priv->rss.table[i] = ethtool_rxfh_indir_default(i, 7184 rx_cnt); 7185 7186 stmmac_set_half_duplex(priv); 7187 stmmac_napi_add(dev); 7188 7189 if (netif_running(dev)) 7190 ret = stmmac_open(dev); 7191 7192 return ret; 7193 } 7194 7195 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) 7196 { 7197 struct stmmac_priv *priv = netdev_priv(dev); 7198 int ret = 0; 7199 7200 if (netif_running(dev)) 7201 stmmac_release(dev); 7202 7203 priv->dma_conf.dma_rx_size = rx_size; 7204 priv->dma_conf.dma_tx_size = tx_size; 7205 7206 if (netif_running(dev)) 7207 ret = stmmac_open(dev); 7208 7209 return ret; 7210 } 7211 7212 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" 7213 static void stmmac_fpe_lp_task(struct work_struct *work) 7214 { 7215 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 7216 fpe_task); 7217 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 7218 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 7219 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 7220 bool *hs_enable = &fpe_cfg->hs_enable; 7221 bool *enable = &fpe_cfg->enable; 7222 int retries = 20; 7223 7224 while (retries-- > 0) { 7225 /* Bail out immediately if FPE handshake is OFF */ 7226 if (*lo_state == FPE_STATE_OFF || !*hs_enable) 7227 break; 7228 7229 if (*lo_state == FPE_STATE_ENTERING_ON && 7230 *lp_state == FPE_STATE_ENTERING_ON) { 7231 stmmac_fpe_configure(priv, priv->ioaddr, 7232 fpe_cfg, 7233 priv->plat->tx_queues_to_use, 7234 priv->plat->rx_queues_to_use, 7235 *enable); 7236 7237 netdev_info(priv->dev, "configured FPE\n"); 7238 7239 *lo_state = FPE_STATE_ON; 7240 *lp_state = FPE_STATE_ON; 7241 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); 7242 break; 7243 } 7244 7245 if ((*lo_state == FPE_STATE_CAPABLE || 7246 *lo_state == FPE_STATE_ENTERING_ON) && 7247 *lp_state != FPE_STATE_ON) { 7248 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, 7249 *lo_state, *lp_state); 7250 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 7251 fpe_cfg, 7252 MPACKET_VERIFY); 7253 } 7254 /* Sleep then retry */ 7255 msleep(500); 7256 } 7257 7258 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 7259 } 7260 7261 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) 7262 { 7263 if (priv->plat->fpe_cfg->hs_enable != enable) { 7264 if (enable) { 7265 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 7266 priv->plat->fpe_cfg, 7267 MPACKET_VERIFY); 7268 } else { 7269 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; 7270 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; 7271 } 7272 7273 priv->plat->fpe_cfg->hs_enable = enable; 7274 } 7275 } 7276 7277 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) 7278 { 7279 const struct stmmac_xdp_buff *ctx = (void *)_ctx; 7280 struct dma_desc *desc_contains_ts = ctx->desc; 7281 struct stmmac_priv *priv = ctx->priv; 7282 struct dma_desc *ndesc = ctx->ndesc; 7283 struct dma_desc *desc = ctx->desc; 7284 u64 ns = 0; 7285 7286 if (!priv->hwts_rx_en) 7287 return -ENODATA; 7288 7289 /* For GMAC4, the valid timestamp is from CTX next desc. */ 7290 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 7291 desc_contains_ts = ndesc; 7292 7293 /* Check if timestamp is available */ 7294 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) { 7295 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns); 7296 ns -= priv->plat->cdc_error_adj; 7297 *timestamp = ns_to_ktime(ns); 7298 return 0; 7299 } 7300 7301 return -ENODATA; 7302 } 7303 7304 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = { 7305 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp, 7306 }; 7307 7308 /** 7309 * stmmac_dvr_probe 7310 * @device: device pointer 7311 * @plat_dat: platform data pointer 7312 * @res: stmmac resource pointer 7313 * Description: this is the main probe function used to 7314 * call the alloc_etherdev, allocate the priv structure. 7315 * Return: 7316 * returns 0 on success, otherwise errno. 7317 */ 7318 int stmmac_dvr_probe(struct device *device, 7319 struct plat_stmmacenet_data *plat_dat, 7320 struct stmmac_resources *res) 7321 { 7322 struct net_device *ndev = NULL; 7323 struct stmmac_priv *priv; 7324 u32 rxq; 7325 int i, ret = 0; 7326 7327 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), 7328 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); 7329 if (!ndev) 7330 return -ENOMEM; 7331 7332 SET_NETDEV_DEV(ndev, device); 7333 7334 priv = netdev_priv(ndev); 7335 priv->device = device; 7336 priv->dev = ndev; 7337 7338 for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 7339 u64_stats_init(&priv->xstats.rxq_stats[i].syncp); 7340 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 7341 u64_stats_init(&priv->xstats.txq_stats[i].syncp); 7342 7343 stmmac_set_ethtool_ops(ndev); 7344 priv->pause = pause; 7345 priv->plat = plat_dat; 7346 priv->ioaddr = res->addr; 7347 priv->dev->base_addr = (unsigned long)res->addr; 7348 priv->plat->dma_cfg->multi_msi_en = 7349 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN); 7350 7351 priv->dev->irq = res->irq; 7352 priv->wol_irq = res->wol_irq; 7353 priv->lpi_irq = res->lpi_irq; 7354 priv->sfty_ce_irq = res->sfty_ce_irq; 7355 priv->sfty_ue_irq = res->sfty_ue_irq; 7356 for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 7357 priv->rx_irq[i] = res->rx_irq[i]; 7358 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 7359 priv->tx_irq[i] = res->tx_irq[i]; 7360 7361 if (!is_zero_ether_addr(res->mac)) 7362 eth_hw_addr_set(priv->dev, res->mac); 7363 7364 dev_set_drvdata(device, priv->dev); 7365 7366 /* Verify driver arguments */ 7367 stmmac_verify_args(); 7368 7369 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); 7370 if (!priv->af_xdp_zc_qps) 7371 return -ENOMEM; 7372 7373 /* Allocate workqueue */ 7374 priv->wq = create_singlethread_workqueue("stmmac_wq"); 7375 if (!priv->wq) { 7376 dev_err(priv->device, "failed to create workqueue\n"); 7377 ret = -ENOMEM; 7378 goto error_wq_init; 7379 } 7380 7381 INIT_WORK(&priv->service_task, stmmac_service_task); 7382 7383 /* Initialize Link Partner FPE workqueue */ 7384 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); 7385 7386 /* Override with kernel parameters if supplied XXX CRS XXX 7387 * this needs to have multiple instances 7388 */ 7389 if ((phyaddr >= 0) && (phyaddr <= 31)) 7390 priv->plat->phy_addr = phyaddr; 7391 7392 if (priv->plat->stmmac_rst) { 7393 ret = reset_control_assert(priv->plat->stmmac_rst); 7394 reset_control_deassert(priv->plat->stmmac_rst); 7395 /* Some reset controllers have only reset callback instead of 7396 * assert + deassert callbacks pair. 7397 */ 7398 if (ret == -ENOTSUPP) 7399 reset_control_reset(priv->plat->stmmac_rst); 7400 } 7401 7402 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); 7403 if (ret == -ENOTSUPP) 7404 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", 7405 ERR_PTR(ret)); 7406 7407 /* Init MAC and get the capabilities */ 7408 ret = stmmac_hw_init(priv); 7409 if (ret) 7410 goto error_hw_init; 7411 7412 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. 7413 */ 7414 if (priv->synopsys_id < DWMAC_CORE_5_20) 7415 priv->plat->dma_cfg->dche = false; 7416 7417 stmmac_check_ether_addr(priv); 7418 7419 ndev->netdev_ops = &stmmac_netdev_ops; 7420 7421 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops; 7422 7423 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 7424 NETIF_F_RXCSUM; 7425 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 7426 NETDEV_XDP_ACT_XSK_ZEROCOPY; 7427 7428 ret = stmmac_tc_init(priv, priv); 7429 if (!ret) { 7430 ndev->hw_features |= NETIF_F_HW_TC; 7431 } 7432 7433 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { 7434 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 7435 if (priv->plat->has_gmac4) 7436 ndev->hw_features |= NETIF_F_GSO_UDP_L4; 7437 priv->tso = true; 7438 dev_info(priv->device, "TSO feature enabled\n"); 7439 } 7440 7441 if (priv->dma_cap.sphen && 7442 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) { 7443 ndev->hw_features |= NETIF_F_GRO; 7444 priv->sph_cap = true; 7445 priv->sph = priv->sph_cap; 7446 dev_info(priv->device, "SPH feature enabled\n"); 7447 } 7448 7449 /* Ideally our host DMA address width is the same as for the 7450 * device. However, it may differ and then we have to use our 7451 * host DMA width for allocation and the device DMA width for 7452 * register handling. 7453 */ 7454 if (priv->plat->host_dma_width) 7455 priv->dma_cap.host_dma_width = priv->plat->host_dma_width; 7456 else 7457 priv->dma_cap.host_dma_width = priv->dma_cap.addr64; 7458 7459 if (priv->dma_cap.host_dma_width) { 7460 ret = dma_set_mask_and_coherent(device, 7461 DMA_BIT_MASK(priv->dma_cap.host_dma_width)); 7462 if (!ret) { 7463 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n", 7464 priv->dma_cap.host_dma_width, priv->dma_cap.addr64); 7465 7466 /* 7467 * If more than 32 bits can be addressed, make sure to 7468 * enable enhanced addressing mode. 7469 */ 7470 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 7471 priv->plat->dma_cfg->eame = true; 7472 } else { 7473 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); 7474 if (ret) { 7475 dev_err(priv->device, "Failed to set DMA Mask\n"); 7476 goto error_hw_init; 7477 } 7478 7479 priv->dma_cap.host_dma_width = 32; 7480 } 7481 } 7482 7483 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 7484 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 7485 #ifdef STMMAC_VLAN_TAG_USED 7486 /* Both mac100 and gmac support receive VLAN tag detection */ 7487 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 7488 if (priv->dma_cap.vlhash) { 7489 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 7490 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 7491 } 7492 if (priv->dma_cap.vlins) { 7493 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; 7494 if (priv->dma_cap.dvlan) 7495 ndev->features |= NETIF_F_HW_VLAN_STAG_TX; 7496 } 7497 #endif 7498 priv->msg_enable = netif_msg_init(debug, default_msg_level); 7499 7500 priv->xstats.threshold = tc; 7501 7502 /* Initialize RSS */ 7503 rxq = priv->plat->rx_queues_to_use; 7504 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); 7505 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 7506 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); 7507 7508 if (priv->dma_cap.rssen && priv->plat->rss_en) 7509 ndev->features |= NETIF_F_RXHASH; 7510 7511 ndev->vlan_features |= ndev->features; 7512 /* TSO doesn't work on VLANs yet */ 7513 ndev->vlan_features &= ~NETIF_F_TSO; 7514 7515 /* MTU range: 46 - hw-specific max */ 7516 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 7517 if (priv->plat->has_xgmac) 7518 ndev->max_mtu = XGMAC_JUMBO_LEN; 7519 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 7520 ndev->max_mtu = JUMBO_LEN; 7521 else 7522 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 7523 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 7524 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 7525 */ 7526 if ((priv->plat->maxmtu < ndev->max_mtu) && 7527 (priv->plat->maxmtu >= ndev->min_mtu)) 7528 ndev->max_mtu = priv->plat->maxmtu; 7529 else if (priv->plat->maxmtu < ndev->min_mtu) 7530 dev_warn(priv->device, 7531 "%s: warning: maxmtu having invalid value (%d)\n", 7532 __func__, priv->plat->maxmtu); 7533 7534 if (flow_ctrl) 7535 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 7536 7537 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 7538 7539 /* Setup channels NAPI */ 7540 stmmac_napi_add(ndev); 7541 7542 mutex_init(&priv->lock); 7543 7544 /* If a specific clk_csr value is passed from the platform 7545 * this means that the CSR Clock Range selection cannot be 7546 * changed at run-time and it is fixed. Viceversa the driver'll try to 7547 * set the MDC clock dynamically according to the csr actual 7548 * clock input. 7549 */ 7550 if (priv->plat->clk_csr >= 0) 7551 priv->clk_csr = priv->plat->clk_csr; 7552 else 7553 stmmac_clk_csr_set(priv); 7554 7555 stmmac_check_pcs_mode(priv); 7556 7557 pm_runtime_get_noresume(device); 7558 pm_runtime_set_active(device); 7559 if (!pm_runtime_enabled(device)) 7560 pm_runtime_enable(device); 7561 7562 if (priv->hw->pcs != STMMAC_PCS_TBI && 7563 priv->hw->pcs != STMMAC_PCS_RTBI) { 7564 /* MDIO bus Registration */ 7565 ret = stmmac_mdio_register(ndev); 7566 if (ret < 0) { 7567 dev_err_probe(priv->device, ret, 7568 "%s: MDIO bus (id: %d) registration failed\n", 7569 __func__, priv->plat->bus_id); 7570 goto error_mdio_register; 7571 } 7572 } 7573 7574 if (priv->plat->speed_mode_2500) 7575 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); 7576 7577 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { 7578 ret = stmmac_xpcs_setup(priv->mii); 7579 if (ret) 7580 goto error_xpcs_setup; 7581 } 7582 7583 ret = stmmac_phy_setup(priv); 7584 if (ret) { 7585 netdev_err(ndev, "failed to setup phy (%d)\n", ret); 7586 goto error_phy_setup; 7587 } 7588 7589 ret = register_netdev(ndev); 7590 if (ret) { 7591 dev_err(priv->device, "%s: ERROR %i registering the device\n", 7592 __func__, ret); 7593 goto error_netdev_register; 7594 } 7595 7596 #ifdef CONFIG_DEBUG_FS 7597 stmmac_init_fs(ndev); 7598 #endif 7599 7600 if (priv->plat->dump_debug_regs) 7601 priv->plat->dump_debug_regs(priv->plat->bsp_priv); 7602 7603 /* Let pm_runtime_put() disable the clocks. 7604 * If CONFIG_PM is not enabled, the clocks will stay powered. 7605 */ 7606 pm_runtime_put(device); 7607 7608 return ret; 7609 7610 error_netdev_register: 7611 phylink_destroy(priv->phylink); 7612 error_xpcs_setup: 7613 error_phy_setup: 7614 if (priv->hw->pcs != STMMAC_PCS_TBI && 7615 priv->hw->pcs != STMMAC_PCS_RTBI) 7616 stmmac_mdio_unregister(ndev); 7617 error_mdio_register: 7618 stmmac_napi_del(ndev); 7619 error_hw_init: 7620 destroy_workqueue(priv->wq); 7621 error_wq_init: 7622 bitmap_free(priv->af_xdp_zc_qps); 7623 7624 return ret; 7625 } 7626 EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 7627 7628 /** 7629 * stmmac_dvr_remove 7630 * @dev: device pointer 7631 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 7632 * changes the link status, releases the DMA descriptor rings. 7633 */ 7634 void stmmac_dvr_remove(struct device *dev) 7635 { 7636 struct net_device *ndev = dev_get_drvdata(dev); 7637 struct stmmac_priv *priv = netdev_priv(ndev); 7638 7639 netdev_info(priv->dev, "%s: removing driver", __func__); 7640 7641 pm_runtime_get_sync(dev); 7642 7643 stmmac_stop_all_dma(priv); 7644 stmmac_mac_set(priv, priv->ioaddr, false); 7645 netif_carrier_off(ndev); 7646 unregister_netdev(ndev); 7647 7648 #ifdef CONFIG_DEBUG_FS 7649 stmmac_exit_fs(ndev); 7650 #endif 7651 phylink_destroy(priv->phylink); 7652 if (priv->plat->stmmac_rst) 7653 reset_control_assert(priv->plat->stmmac_rst); 7654 reset_control_assert(priv->plat->stmmac_ahb_rst); 7655 if (priv->hw->pcs != STMMAC_PCS_TBI && 7656 priv->hw->pcs != STMMAC_PCS_RTBI) 7657 stmmac_mdio_unregister(ndev); 7658 destroy_workqueue(priv->wq); 7659 mutex_destroy(&priv->lock); 7660 bitmap_free(priv->af_xdp_zc_qps); 7661 7662 pm_runtime_disable(dev); 7663 pm_runtime_put_noidle(dev); 7664 } 7665 EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 7666 7667 /** 7668 * stmmac_suspend - suspend callback 7669 * @dev: device pointer 7670 * Description: this is the function to suspend the device and it is called 7671 * by the platform driver to stop the network queue, release the resources, 7672 * program the PMT register (for WoL), clean and release driver resources. 7673 */ 7674 int stmmac_suspend(struct device *dev) 7675 { 7676 struct net_device *ndev = dev_get_drvdata(dev); 7677 struct stmmac_priv *priv = netdev_priv(ndev); 7678 u32 chan; 7679 7680 if (!ndev || !netif_running(ndev)) 7681 return 0; 7682 7683 mutex_lock(&priv->lock); 7684 7685 netif_device_detach(ndev); 7686 7687 stmmac_disable_all_queues(priv); 7688 7689 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 7690 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 7691 7692 if (priv->eee_enabled) { 7693 priv->tx_path_in_lpi_mode = false; 7694 del_timer_sync(&priv->eee_ctrl_timer); 7695 } 7696 7697 /* Stop TX/RX DMA */ 7698 stmmac_stop_all_dma(priv); 7699 7700 if (priv->plat->serdes_powerdown) 7701 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 7702 7703 /* Enable Power down mode by programming the PMT regs */ 7704 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7705 stmmac_pmt(priv, priv->hw, priv->wolopts); 7706 priv->irq_wake = 1; 7707 } else { 7708 stmmac_mac_set(priv, priv->ioaddr, false); 7709 pinctrl_pm_select_sleep_state(priv->device); 7710 } 7711 7712 mutex_unlock(&priv->lock); 7713 7714 rtnl_lock(); 7715 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7716 phylink_suspend(priv->phylink, true); 7717 } else { 7718 if (device_may_wakeup(priv->device)) 7719 phylink_speed_down(priv->phylink, false); 7720 phylink_suspend(priv->phylink, false); 7721 } 7722 rtnl_unlock(); 7723 7724 if (priv->dma_cap.fpesel) { 7725 /* Disable FPE */ 7726 stmmac_fpe_configure(priv, priv->ioaddr, 7727 priv->plat->fpe_cfg, 7728 priv->plat->tx_queues_to_use, 7729 priv->plat->rx_queues_to_use, false); 7730 7731 stmmac_fpe_handshake(priv, false); 7732 stmmac_fpe_stop_wq(priv); 7733 } 7734 7735 priv->speed = SPEED_UNKNOWN; 7736 return 0; 7737 } 7738 EXPORT_SYMBOL_GPL(stmmac_suspend); 7739 7740 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) 7741 { 7742 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 7743 7744 rx_q->cur_rx = 0; 7745 rx_q->dirty_rx = 0; 7746 } 7747 7748 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) 7749 { 7750 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 7751 7752 tx_q->cur_tx = 0; 7753 tx_q->dirty_tx = 0; 7754 tx_q->mss = 0; 7755 7756 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 7757 } 7758 7759 /** 7760 * stmmac_reset_queues_param - reset queue parameters 7761 * @priv: device pointer 7762 */ 7763 static void stmmac_reset_queues_param(struct stmmac_priv *priv) 7764 { 7765 u32 rx_cnt = priv->plat->rx_queues_to_use; 7766 u32 tx_cnt = priv->plat->tx_queues_to_use; 7767 u32 queue; 7768 7769 for (queue = 0; queue < rx_cnt; queue++) 7770 stmmac_reset_rx_queue(priv, queue); 7771 7772 for (queue = 0; queue < tx_cnt; queue++) 7773 stmmac_reset_tx_queue(priv, queue); 7774 } 7775 7776 /** 7777 * stmmac_resume - resume callback 7778 * @dev: device pointer 7779 * Description: when resume this function is invoked to setup the DMA and CORE 7780 * in a usable state. 7781 */ 7782 int stmmac_resume(struct device *dev) 7783 { 7784 struct net_device *ndev = dev_get_drvdata(dev); 7785 struct stmmac_priv *priv = netdev_priv(ndev); 7786 int ret; 7787 7788 if (!netif_running(ndev)) 7789 return 0; 7790 7791 /* Power Down bit, into the PM register, is cleared 7792 * automatically as soon as a magic packet or a Wake-up frame 7793 * is received. Anyway, it's better to manually clear 7794 * this bit because it can generate problems while resuming 7795 * from another devices (e.g. serial console). 7796 */ 7797 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7798 mutex_lock(&priv->lock); 7799 stmmac_pmt(priv, priv->hw, 0); 7800 mutex_unlock(&priv->lock); 7801 priv->irq_wake = 0; 7802 } else { 7803 pinctrl_pm_select_default_state(priv->device); 7804 /* reset the phy so that it's ready */ 7805 if (priv->mii) 7806 stmmac_mdio_reset(priv->mii); 7807 } 7808 7809 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && 7810 priv->plat->serdes_powerup) { 7811 ret = priv->plat->serdes_powerup(ndev, 7812 priv->plat->bsp_priv); 7813 7814 if (ret < 0) 7815 return ret; 7816 } 7817 7818 rtnl_lock(); 7819 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7820 phylink_resume(priv->phylink); 7821 } else { 7822 phylink_resume(priv->phylink); 7823 if (device_may_wakeup(priv->device)) 7824 phylink_speed_up(priv->phylink); 7825 } 7826 rtnl_unlock(); 7827 7828 rtnl_lock(); 7829 mutex_lock(&priv->lock); 7830 7831 stmmac_reset_queues_param(priv); 7832 7833 stmmac_free_tx_skbufs(priv); 7834 stmmac_clear_descriptors(priv, &priv->dma_conf); 7835 7836 stmmac_hw_setup(ndev, false); 7837 stmmac_init_coalesce(priv); 7838 stmmac_set_rx_mode(ndev); 7839 7840 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); 7841 7842 stmmac_enable_all_queues(priv); 7843 stmmac_enable_all_dma_irq(priv); 7844 7845 mutex_unlock(&priv->lock); 7846 rtnl_unlock(); 7847 7848 netif_device_attach(ndev); 7849 7850 return 0; 7851 } 7852 EXPORT_SYMBOL_GPL(stmmac_resume); 7853 7854 #ifndef MODULE 7855 static int __init stmmac_cmdline_opt(char *str) 7856 { 7857 char *opt; 7858 7859 if (!str || !*str) 7860 return 1; 7861 while ((opt = strsep(&str, ",")) != NULL) { 7862 if (!strncmp(opt, "debug:", 6)) { 7863 if (kstrtoint(opt + 6, 0, &debug)) 7864 goto err; 7865 } else if (!strncmp(opt, "phyaddr:", 8)) { 7866 if (kstrtoint(opt + 8, 0, &phyaddr)) 7867 goto err; 7868 } else if (!strncmp(opt, "buf_sz:", 7)) { 7869 if (kstrtoint(opt + 7, 0, &buf_sz)) 7870 goto err; 7871 } else if (!strncmp(opt, "tc:", 3)) { 7872 if (kstrtoint(opt + 3, 0, &tc)) 7873 goto err; 7874 } else if (!strncmp(opt, "watchdog:", 9)) { 7875 if (kstrtoint(opt + 9, 0, &watchdog)) 7876 goto err; 7877 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 7878 if (kstrtoint(opt + 10, 0, &flow_ctrl)) 7879 goto err; 7880 } else if (!strncmp(opt, "pause:", 6)) { 7881 if (kstrtoint(opt + 6, 0, &pause)) 7882 goto err; 7883 } else if (!strncmp(opt, "eee_timer:", 10)) { 7884 if (kstrtoint(opt + 10, 0, &eee_timer)) 7885 goto err; 7886 } else if (!strncmp(opt, "chain_mode:", 11)) { 7887 if (kstrtoint(opt + 11, 0, &chain_mode)) 7888 goto err; 7889 } 7890 } 7891 return 1; 7892 7893 err: 7894 pr_err("%s: ERROR broken module parameter conversion", __func__); 7895 return 1; 7896 } 7897 7898 __setup("stmmaceth=", stmmac_cmdline_opt); 7899 #endif /* MODULE */ 7900 7901 static int __init stmmac_init(void) 7902 { 7903 #ifdef CONFIG_DEBUG_FS 7904 /* Create debugfs main directory if it doesn't exist yet */ 7905 if (!stmmac_fs_dir) 7906 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 7907 register_netdevice_notifier(&stmmac_notifier); 7908 #endif 7909 7910 return 0; 7911 } 7912 7913 static void __exit stmmac_exit(void) 7914 { 7915 #ifdef CONFIG_DEBUG_FS 7916 unregister_netdevice_notifier(&stmmac_notifier); 7917 debugfs_remove_recursive(stmmac_fs_dir); 7918 #endif 7919 } 7920 7921 module_init(stmmac_init) 7922 module_exit(stmmac_exit) 7923 7924 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 7925 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 7926 MODULE_LICENSE("GPL"); 7927