1 // SPDX-License-Identifier: GPL-2.0-only 2 /******************************************************************************* 3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 4 ST Ethernet IPs are built around a Synopsys IP Core. 5 6 Copyright(C) 2007-2011 STMicroelectronics Ltd 7 8 9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 10 11 Documentation available at: 12 http://www.stlinux.com 13 Support available at: 14 https://bugzilla.stlinux.com/ 15 *******************************************************************************/ 16 17 #include <linux/clk.h> 18 #include <linux/kernel.h> 19 #include <linux/interrupt.h> 20 #include <linux/ip.h> 21 #include <linux/tcp.h> 22 #include <linux/skbuff.h> 23 #include <linux/ethtool.h> 24 #include <linux/if_ether.h> 25 #include <linux/crc32.h> 26 #include <linux/mii.h> 27 #include <linux/if.h> 28 #include <linux/if_vlan.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/slab.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/prefetch.h> 33 #include <linux/pinctrl/consumer.h> 34 #ifdef CONFIG_DEBUG_FS 35 #include <linux/debugfs.h> 36 #include <linux/seq_file.h> 37 #endif /* CONFIG_DEBUG_FS */ 38 #include <linux/net_tstamp.h> 39 #include <linux/phylink.h> 40 #include <linux/udp.h> 41 #include <linux/bpf_trace.h> 42 #include <net/page_pool/helpers.h> 43 #include <net/pkt_cls.h> 44 #include <net/xdp_sock_drv.h> 45 #include "stmmac_ptp.h" 46 #include "stmmac.h" 47 #include "stmmac_xdp.h" 48 #include <linux/reset.h> 49 #include <linux/of_mdio.h> 50 #include "dwmac1000.h" 51 #include "dwxgmac2.h" 52 #include "hwif.h" 53 54 /* As long as the interface is active, we keep the timestamping counter enabled 55 * with fine resolution and binary rollover. This avoid non-monotonic behavior 56 * (clock jumps) when changing timestamping settings at runtime. 57 */ 58 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ 59 PTP_TCR_TSCTRLSSR) 60 61 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 63 64 /* Module parameters */ 65 #define TX_TIMEO 5000 66 static int watchdog = TX_TIMEO; 67 module_param(watchdog, int, 0644); 68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 69 70 static int debug = -1; 71 module_param(debug, int, 0644); 72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 73 74 static int phyaddr = -1; 75 module_param(phyaddr, int, 0444); 76 MODULE_PARM_DESC(phyaddr, "Physical device address"); 77 78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) 79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) 80 81 /* Limit to make sure XDP TX and slow path can coexist */ 82 #define STMMAC_XSK_TX_BUDGET_MAX 256 83 #define STMMAC_TX_XSK_AVAIL 16 84 #define STMMAC_RX_FILL_BATCH 16 85 86 #define STMMAC_XDP_PASS 0 87 #define STMMAC_XDP_CONSUMED BIT(0) 88 #define STMMAC_XDP_TX BIT(1) 89 #define STMMAC_XDP_REDIRECT BIT(2) 90 91 static int flow_ctrl = FLOW_AUTO; 92 module_param(flow_ctrl, int, 0644); 93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 94 95 static int pause = PAUSE_TIME; 96 module_param(pause, int, 0644); 97 MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 98 99 #define TC_DEFAULT 64 100 static int tc = TC_DEFAULT; 101 module_param(tc, int, 0644); 102 MODULE_PARM_DESC(tc, "DMA threshold control value"); 103 104 #define DEFAULT_BUFSIZE 1536 105 static int buf_sz = DEFAULT_BUFSIZE; 106 module_param(buf_sz, int, 0644); 107 MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 108 109 #define STMMAC_RX_COPYBREAK 256 110 111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 112 NETIF_MSG_LINK | NETIF_MSG_IFUP | 113 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 114 115 #define STMMAC_DEFAULT_LPI_TIMER 1000 116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 117 module_param(eee_timer, int, 0644); 118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) 120 121 /* By default the driver will use the ring mode to manage tx and rx descriptors, 122 * but allow user to force to use the chain instead of the ring 123 */ 124 static unsigned int chain_mode; 125 module_param(chain_mode, int, 0444); 126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 127 128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 129 /* For MSI interrupts handling */ 130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); 131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); 132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); 133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); 134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue); 135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue); 136 static void stmmac_reset_queues_param(struct stmmac_priv *priv); 137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); 138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); 139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 140 u32 rxmode, u32 chan); 141 142 #ifdef CONFIG_DEBUG_FS 143 static const struct net_device_ops stmmac_netdev_ops; 144 static void stmmac_init_fs(struct net_device *dev); 145 static void stmmac_exit_fs(struct net_device *dev); 146 #endif 147 148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) 149 150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) 151 { 152 int ret = 0; 153 154 if (enabled) { 155 ret = clk_prepare_enable(priv->plat->stmmac_clk); 156 if (ret) 157 return ret; 158 ret = clk_prepare_enable(priv->plat->pclk); 159 if (ret) { 160 clk_disable_unprepare(priv->plat->stmmac_clk); 161 return ret; 162 } 163 if (priv->plat->clks_config) { 164 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); 165 if (ret) { 166 clk_disable_unprepare(priv->plat->stmmac_clk); 167 clk_disable_unprepare(priv->plat->pclk); 168 return ret; 169 } 170 } 171 } else { 172 clk_disable_unprepare(priv->plat->stmmac_clk); 173 clk_disable_unprepare(priv->plat->pclk); 174 if (priv->plat->clks_config) 175 priv->plat->clks_config(priv->plat->bsp_priv, enabled); 176 } 177 178 return ret; 179 } 180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); 181 182 /** 183 * stmmac_verify_args - verify the driver parameters. 184 * Description: it checks the driver parameters and set a default in case of 185 * errors. 186 */ 187 static void stmmac_verify_args(void) 188 { 189 if (unlikely(watchdog < 0)) 190 watchdog = TX_TIMEO; 191 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 192 buf_sz = DEFAULT_BUFSIZE; 193 if (unlikely(flow_ctrl > 1)) 194 flow_ctrl = FLOW_AUTO; 195 else if (likely(flow_ctrl < 0)) 196 flow_ctrl = FLOW_OFF; 197 if (unlikely((pause < 0) || (pause > 0xffff))) 198 pause = PAUSE_TIME; 199 if (eee_timer < 0) 200 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 201 } 202 203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv) 204 { 205 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 206 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 207 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 208 u32 queue; 209 210 for (queue = 0; queue < maxq; queue++) { 211 struct stmmac_channel *ch = &priv->channel[queue]; 212 213 if (stmmac_xdp_is_enabled(priv) && 214 test_bit(queue, priv->af_xdp_zc_qps)) { 215 napi_disable(&ch->rxtx_napi); 216 continue; 217 } 218 219 if (queue < rx_queues_cnt) 220 napi_disable(&ch->rx_napi); 221 if (queue < tx_queues_cnt) 222 napi_disable(&ch->tx_napi); 223 } 224 } 225 226 /** 227 * stmmac_disable_all_queues - Disable all queues 228 * @priv: driver private structure 229 */ 230 static void stmmac_disable_all_queues(struct stmmac_priv *priv) 231 { 232 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 233 struct stmmac_rx_queue *rx_q; 234 u32 queue; 235 236 /* synchronize_rcu() needed for pending XDP buffers to drain */ 237 for (queue = 0; queue < rx_queues_cnt; queue++) { 238 rx_q = &priv->dma_conf.rx_queue[queue]; 239 if (rx_q->xsk_pool) { 240 synchronize_rcu(); 241 break; 242 } 243 } 244 245 __stmmac_disable_all_queues(priv); 246 } 247 248 /** 249 * stmmac_enable_all_queues - Enable all queues 250 * @priv: driver private structure 251 */ 252 static void stmmac_enable_all_queues(struct stmmac_priv *priv) 253 { 254 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 255 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 256 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 257 u32 queue; 258 259 for (queue = 0; queue < maxq; queue++) { 260 struct stmmac_channel *ch = &priv->channel[queue]; 261 262 if (stmmac_xdp_is_enabled(priv) && 263 test_bit(queue, priv->af_xdp_zc_qps)) { 264 napi_enable(&ch->rxtx_napi); 265 continue; 266 } 267 268 if (queue < rx_queues_cnt) 269 napi_enable(&ch->rx_napi); 270 if (queue < tx_queues_cnt) 271 napi_enable(&ch->tx_napi); 272 } 273 } 274 275 static void stmmac_service_event_schedule(struct stmmac_priv *priv) 276 { 277 if (!test_bit(STMMAC_DOWN, &priv->state) && 278 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 279 queue_work(priv->wq, &priv->service_task); 280 } 281 282 static void stmmac_global_err(struct stmmac_priv *priv) 283 { 284 netif_carrier_off(priv->dev); 285 set_bit(STMMAC_RESET_REQUESTED, &priv->state); 286 stmmac_service_event_schedule(priv); 287 } 288 289 /** 290 * stmmac_clk_csr_set - dynamically set the MDC clock 291 * @priv: driver private structure 292 * Description: this is to dynamically set the MDC clock according to the csr 293 * clock input. 294 * Note: 295 * If a specific clk_csr value is passed from the platform 296 * this means that the CSR Clock Range selection cannot be 297 * changed at run-time and it is fixed (as reported in the driver 298 * documentation). Viceversa the driver will try to set the MDC 299 * clock dynamically according to the actual clock input. 300 */ 301 static void stmmac_clk_csr_set(struct stmmac_priv *priv) 302 { 303 u32 clk_rate; 304 305 clk_rate = clk_get_rate(priv->plat->stmmac_clk); 306 307 /* Platform provided default clk_csr would be assumed valid 308 * for all other cases except for the below mentioned ones. 309 * For values higher than the IEEE 802.3 specified frequency 310 * we can not estimate the proper divider as it is not known 311 * the frequency of clk_csr_i. So we do not change the default 312 * divider. 313 */ 314 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 315 if (clk_rate < CSR_F_35M) 316 priv->clk_csr = STMMAC_CSR_20_35M; 317 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 318 priv->clk_csr = STMMAC_CSR_35_60M; 319 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 320 priv->clk_csr = STMMAC_CSR_60_100M; 321 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 322 priv->clk_csr = STMMAC_CSR_100_150M; 323 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 324 priv->clk_csr = STMMAC_CSR_150_250M; 325 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) 326 priv->clk_csr = STMMAC_CSR_250_300M; 327 } 328 329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { 330 if (clk_rate > 160000000) 331 priv->clk_csr = 0x03; 332 else if (clk_rate > 80000000) 333 priv->clk_csr = 0x02; 334 else if (clk_rate > 40000000) 335 priv->clk_csr = 0x01; 336 else 337 priv->clk_csr = 0; 338 } 339 340 if (priv->plat->has_xgmac) { 341 if (clk_rate > 400000000) 342 priv->clk_csr = 0x5; 343 else if (clk_rate > 350000000) 344 priv->clk_csr = 0x4; 345 else if (clk_rate > 300000000) 346 priv->clk_csr = 0x3; 347 else if (clk_rate > 250000000) 348 priv->clk_csr = 0x2; 349 else if (clk_rate > 150000000) 350 priv->clk_csr = 0x1; 351 else 352 priv->clk_csr = 0x0; 353 } 354 } 355 356 static void print_pkt(unsigned char *buf, int len) 357 { 358 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 360 } 361 362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 363 { 364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 365 u32 avail; 366 367 if (tx_q->dirty_tx > tx_q->cur_tx) 368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 369 else 370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; 371 372 return avail; 373 } 374 375 /** 376 * stmmac_rx_dirty - Get RX queue dirty 377 * @priv: driver private structure 378 * @queue: RX queue index 379 */ 380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 381 { 382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 383 u32 dirty; 384 385 if (rx_q->dirty_rx <= rx_q->cur_rx) 386 dirty = rx_q->cur_rx - rx_q->dirty_rx; 387 else 388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; 389 390 return dirty; 391 } 392 393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) 394 { 395 int tx_lpi_timer; 396 397 /* Clear/set the SW EEE timer flag based on LPI ET enablement */ 398 priv->eee_sw_timer_en = en ? 0 : 1; 399 tx_lpi_timer = en ? priv->tx_lpi_timer : 0; 400 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); 401 } 402 403 /** 404 * stmmac_enable_eee_mode - check and enter in LPI mode 405 * @priv: driver private structure 406 * Description: this function is to verify and enter in LPI mode in case of 407 * EEE. 408 */ 409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv) 410 { 411 u32 tx_cnt = priv->plat->tx_queues_to_use; 412 u32 queue; 413 414 /* check if all TX queues have the work finished */ 415 for (queue = 0; queue < tx_cnt; queue++) { 416 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 417 418 if (tx_q->dirty_tx != tx_q->cur_tx) 419 return -EBUSY; /* still unfinished work */ 420 } 421 422 /* Check and enter in LPI mode */ 423 if (!priv->tx_path_in_lpi_mode) 424 stmmac_set_eee_mode(priv, priv->hw, 425 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING); 426 return 0; 427 } 428 429 /** 430 * stmmac_disable_eee_mode - disable and exit from LPI mode 431 * @priv: driver private structure 432 * Description: this function is to exit and disable EEE in case of 433 * LPI state is true. This is called by the xmit. 434 */ 435 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 436 { 437 if (!priv->eee_sw_timer_en) { 438 stmmac_lpi_entry_timer_config(priv, 0); 439 return; 440 } 441 442 stmmac_reset_eee_mode(priv, priv->hw); 443 del_timer_sync(&priv->eee_ctrl_timer); 444 priv->tx_path_in_lpi_mode = false; 445 } 446 447 /** 448 * stmmac_eee_ctrl_timer - EEE TX SW timer. 449 * @t: timer_list struct containing private info 450 * Description: 451 * if there is no data transfer and if we are not in LPI state, 452 * then MAC Transmitter can be moved to LPI state. 453 */ 454 static void stmmac_eee_ctrl_timer(struct timer_list *t) 455 { 456 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 457 458 if (stmmac_enable_eee_mode(priv)) 459 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 460 } 461 462 /** 463 * stmmac_eee_init - init EEE 464 * @priv: driver private structure 465 * Description: 466 * if the GMAC supports the EEE (from the HW cap reg) and the phy device 467 * can also manage EEE, this function enable the LPI state and start related 468 * timer. 469 */ 470 bool stmmac_eee_init(struct stmmac_priv *priv) 471 { 472 int eee_tw_timer = priv->eee_tw_timer; 473 474 /* Using PCS we cannot dial with the phy registers at this stage 475 * so we do not support extra feature like EEE. 476 */ 477 if (priv->hw->pcs == STMMAC_PCS_TBI || 478 priv->hw->pcs == STMMAC_PCS_RTBI) 479 return false; 480 481 /* Check if MAC core supports the EEE feature. */ 482 if (!priv->dma_cap.eee) 483 return false; 484 485 mutex_lock(&priv->lock); 486 487 /* Check if it needs to be deactivated */ 488 if (!priv->eee_active) { 489 if (priv->eee_enabled) { 490 netdev_dbg(priv->dev, "disable EEE\n"); 491 stmmac_lpi_entry_timer_config(priv, 0); 492 del_timer_sync(&priv->eee_ctrl_timer); 493 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); 494 if (priv->hw->xpcs) 495 xpcs_config_eee(priv->hw->xpcs, 496 priv->plat->mult_fact_100ns, 497 false); 498 } 499 mutex_unlock(&priv->lock); 500 return false; 501 } 502 503 if (priv->eee_active && !priv->eee_enabled) { 504 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 505 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 506 eee_tw_timer); 507 if (priv->hw->xpcs) 508 xpcs_config_eee(priv->hw->xpcs, 509 priv->plat->mult_fact_100ns, 510 true); 511 } 512 513 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { 514 del_timer_sync(&priv->eee_ctrl_timer); 515 priv->tx_path_in_lpi_mode = false; 516 stmmac_lpi_entry_timer_config(priv, 1); 517 } else { 518 stmmac_lpi_entry_timer_config(priv, 0); 519 mod_timer(&priv->eee_ctrl_timer, 520 STMMAC_LPI_T(priv->tx_lpi_timer)); 521 } 522 523 mutex_unlock(&priv->lock); 524 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 525 return true; 526 } 527 528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps 529 * @priv: driver private structure 530 * @p : descriptor pointer 531 * @skb : the socket buffer 532 * Description : 533 * This function will read timestamp from the descriptor & pass it to stack. 534 * and also perform some sanity checks. 535 */ 536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 537 struct dma_desc *p, struct sk_buff *skb) 538 { 539 struct skb_shared_hwtstamps shhwtstamp; 540 bool found = false; 541 u64 ns = 0; 542 543 if (!priv->hwts_tx_en) 544 return; 545 546 /* exit if skb doesn't support hw tstamp */ 547 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 548 return; 549 550 /* check tx tstamp status */ 551 if (stmmac_get_tx_timestamp_status(priv, p)) { 552 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 553 found = true; 554 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { 555 found = true; 556 } 557 558 if (found) { 559 ns -= priv->plat->cdc_error_adj; 560 561 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 562 shhwtstamp.hwtstamp = ns_to_ktime(ns); 563 564 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 565 /* pass tstamp to stack */ 566 skb_tstamp_tx(skb, &shhwtstamp); 567 } 568 } 569 570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps 571 * @priv: driver private structure 572 * @p : descriptor pointer 573 * @np : next descriptor pointer 574 * @skb : the socket buffer 575 * Description : 576 * This function will read received packet's timestamp from the descriptor 577 * and pass it to stack. It also perform some sanity checks. 578 */ 579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 580 struct dma_desc *np, struct sk_buff *skb) 581 { 582 struct skb_shared_hwtstamps *shhwtstamp = NULL; 583 struct dma_desc *desc = p; 584 u64 ns = 0; 585 586 if (!priv->hwts_rx_en) 587 return; 588 /* For GMAC4, the valid timestamp is from CTX next desc. */ 589 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 590 desc = np; 591 592 /* Check if timestamp is available */ 593 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 594 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 595 596 ns -= priv->plat->cdc_error_adj; 597 598 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 599 shhwtstamp = skb_hwtstamps(skb); 600 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 601 shhwtstamp->hwtstamp = ns_to_ktime(ns); 602 } else { 603 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 604 } 605 } 606 607 /** 608 * stmmac_hwtstamp_set - control hardware timestamping. 609 * @dev: device pointer. 610 * @ifr: An IOCTL specific structure, that can contain a pointer to 611 * a proprietary structure used to pass information to the driver. 612 * Description: 613 * This function configures the MAC to enable/disable both outgoing(TX) 614 * and incoming(RX) packets time stamping based on user input. 615 * Return Value: 616 * 0 on success and an appropriate -ve integer on failure. 617 */ 618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 619 { 620 struct stmmac_priv *priv = netdev_priv(dev); 621 struct hwtstamp_config config; 622 u32 ptp_v2 = 0; 623 u32 tstamp_all = 0; 624 u32 ptp_over_ipv4_udp = 0; 625 u32 ptp_over_ipv6_udp = 0; 626 u32 ptp_over_ethernet = 0; 627 u32 snap_type_sel = 0; 628 u32 ts_master_en = 0; 629 u32 ts_event_en = 0; 630 631 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 632 netdev_alert(priv->dev, "No support for HW time stamping\n"); 633 priv->hwts_tx_en = 0; 634 priv->hwts_rx_en = 0; 635 636 return -EOPNOTSUPP; 637 } 638 639 if (copy_from_user(&config, ifr->ifr_data, 640 sizeof(config))) 641 return -EFAULT; 642 643 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 644 __func__, config.flags, config.tx_type, config.rx_filter); 645 646 if (config.tx_type != HWTSTAMP_TX_OFF && 647 config.tx_type != HWTSTAMP_TX_ON) 648 return -ERANGE; 649 650 if (priv->adv_ts) { 651 switch (config.rx_filter) { 652 case HWTSTAMP_FILTER_NONE: 653 /* time stamp no incoming packet at all */ 654 config.rx_filter = HWTSTAMP_FILTER_NONE; 655 break; 656 657 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 658 /* PTP v1, UDP, any kind of event packet */ 659 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 660 /* 'xmac' hardware can support Sync, Pdelay_Req and 661 * Pdelay_resp by setting bit14 and bits17/16 to 01 662 * This leaves Delay_Req timestamps out. 663 * Enable all events *and* general purpose message 664 * timestamping 665 */ 666 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 667 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 668 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 669 break; 670 671 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 672 /* PTP v1, UDP, Sync packet */ 673 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 674 /* take time stamp for SYNC messages only */ 675 ts_event_en = PTP_TCR_TSEVNTENA; 676 677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 679 break; 680 681 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 682 /* PTP v1, UDP, Delay_req packet */ 683 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 684 /* take time stamp for Delay_Req messages only */ 685 ts_master_en = PTP_TCR_TSMSTRENA; 686 ts_event_en = PTP_TCR_TSEVNTENA; 687 688 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 689 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 690 break; 691 692 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 693 /* PTP v2, UDP, any kind of event packet */ 694 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 695 ptp_v2 = PTP_TCR_TSVER2ENA; 696 /* take time stamp for all event messages */ 697 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 698 699 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 700 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 701 break; 702 703 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 704 /* PTP v2, UDP, Sync packet */ 705 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 706 ptp_v2 = PTP_TCR_TSVER2ENA; 707 /* take time stamp for SYNC messages only */ 708 ts_event_en = PTP_TCR_TSEVNTENA; 709 710 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 711 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 712 break; 713 714 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 715 /* PTP v2, UDP, Delay_req packet */ 716 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 717 ptp_v2 = PTP_TCR_TSVER2ENA; 718 /* take time stamp for Delay_Req messages only */ 719 ts_master_en = PTP_TCR_TSMSTRENA; 720 ts_event_en = PTP_TCR_TSEVNTENA; 721 722 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 723 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 724 break; 725 726 case HWTSTAMP_FILTER_PTP_V2_EVENT: 727 /* PTP v2/802.AS1 any layer, any kind of event packet */ 728 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 729 ptp_v2 = PTP_TCR_TSVER2ENA; 730 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 731 if (priv->synopsys_id < DWMAC_CORE_4_10) 732 ts_event_en = PTP_TCR_TSEVNTENA; 733 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 734 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 735 ptp_over_ethernet = PTP_TCR_TSIPENA; 736 break; 737 738 case HWTSTAMP_FILTER_PTP_V2_SYNC: 739 /* PTP v2/802.AS1, any layer, Sync packet */ 740 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 741 ptp_v2 = PTP_TCR_TSVER2ENA; 742 /* take time stamp for SYNC messages only */ 743 ts_event_en = PTP_TCR_TSEVNTENA; 744 745 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 746 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 747 ptp_over_ethernet = PTP_TCR_TSIPENA; 748 break; 749 750 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 751 /* PTP v2/802.AS1, any layer, Delay_req packet */ 752 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 753 ptp_v2 = PTP_TCR_TSVER2ENA; 754 /* take time stamp for Delay_Req messages only */ 755 ts_master_en = PTP_TCR_TSMSTRENA; 756 ts_event_en = PTP_TCR_TSEVNTENA; 757 758 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 759 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 760 ptp_over_ethernet = PTP_TCR_TSIPENA; 761 break; 762 763 case HWTSTAMP_FILTER_NTP_ALL: 764 case HWTSTAMP_FILTER_ALL: 765 /* time stamp any incoming packet */ 766 config.rx_filter = HWTSTAMP_FILTER_ALL; 767 tstamp_all = PTP_TCR_TSENALL; 768 break; 769 770 default: 771 return -ERANGE; 772 } 773 } else { 774 switch (config.rx_filter) { 775 case HWTSTAMP_FILTER_NONE: 776 config.rx_filter = HWTSTAMP_FILTER_NONE; 777 break; 778 default: 779 /* PTP v1, UDP, any kind of event packet */ 780 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 781 break; 782 } 783 } 784 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 785 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 786 787 priv->systime_flags = STMMAC_HWTS_ACTIVE; 788 789 if (priv->hwts_tx_en || priv->hwts_rx_en) { 790 priv->systime_flags |= tstamp_all | ptp_v2 | 791 ptp_over_ethernet | ptp_over_ipv6_udp | 792 ptp_over_ipv4_udp | ts_event_en | 793 ts_master_en | snap_type_sel; 794 } 795 796 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); 797 798 memcpy(&priv->tstamp_config, &config, sizeof(config)); 799 800 return copy_to_user(ifr->ifr_data, &config, 801 sizeof(config)) ? -EFAULT : 0; 802 } 803 804 /** 805 * stmmac_hwtstamp_get - read hardware timestamping. 806 * @dev: device pointer. 807 * @ifr: An IOCTL specific structure, that can contain a pointer to 808 * a proprietary structure used to pass information to the driver. 809 * Description: 810 * This function obtain the current hardware timestamping settings 811 * as requested. 812 */ 813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 814 { 815 struct stmmac_priv *priv = netdev_priv(dev); 816 struct hwtstamp_config *config = &priv->tstamp_config; 817 818 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 819 return -EOPNOTSUPP; 820 821 return copy_to_user(ifr->ifr_data, config, 822 sizeof(*config)) ? -EFAULT : 0; 823 } 824 825 /** 826 * stmmac_init_tstamp_counter - init hardware timestamping counter 827 * @priv: driver private structure 828 * @systime_flags: timestamping flags 829 * Description: 830 * Initialize hardware counter for packet timestamping. 831 * This is valid as long as the interface is open and not suspended. 832 * Will be rerun after resuming from suspend, case in which the timestamping 833 * flags updated by stmmac_hwtstamp_set() also need to be restored. 834 */ 835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) 836 { 837 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 838 struct timespec64 now; 839 u32 sec_inc = 0; 840 u64 temp = 0; 841 842 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 843 return -EOPNOTSUPP; 844 845 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); 846 priv->systime_flags = systime_flags; 847 848 /* program Sub Second Increment reg */ 849 stmmac_config_sub_second_increment(priv, priv->ptpaddr, 850 priv->plat->clk_ptp_rate, 851 xmac, &sec_inc); 852 temp = div_u64(1000000000ULL, sec_inc); 853 854 /* Store sub second increment for later use */ 855 priv->sub_second_inc = sec_inc; 856 857 /* calculate default added value: 858 * formula is : 859 * addend = (2^32)/freq_div_ratio; 860 * where, freq_div_ratio = 1e9ns/sec_inc 861 */ 862 temp = (u64)(temp << 32); 863 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 864 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 865 866 /* initialize system time */ 867 ktime_get_real_ts64(&now); 868 869 /* lower 32 bits of tv_sec are safe until y2106 */ 870 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); 871 872 return 0; 873 } 874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); 875 876 /** 877 * stmmac_init_ptp - init PTP 878 * @priv: driver private structure 879 * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 880 * This is done by looking at the HW cap. register. 881 * This function also registers the ptp driver. 882 */ 883 static int stmmac_init_ptp(struct stmmac_priv *priv) 884 { 885 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 886 int ret; 887 888 if (priv->plat->ptp_clk_freq_config) 889 priv->plat->ptp_clk_freq_config(priv); 890 891 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); 892 if (ret) 893 return ret; 894 895 priv->adv_ts = 0; 896 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 897 if (xmac && priv->dma_cap.atime_stamp) 898 priv->adv_ts = 1; 899 /* Dwmac 3.x core with extend_desc can support adv_ts */ 900 else if (priv->extend_desc && priv->dma_cap.atime_stamp) 901 priv->adv_ts = 1; 902 903 if (priv->dma_cap.time_stamp) 904 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 905 906 if (priv->adv_ts) 907 netdev_info(priv->dev, 908 "IEEE 1588-2008 Advanced Timestamp supported\n"); 909 910 priv->hwts_tx_en = 0; 911 priv->hwts_rx_en = 0; 912 913 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) 914 stmmac_hwtstamp_correct_latency(priv, priv); 915 916 return 0; 917 } 918 919 static void stmmac_release_ptp(struct stmmac_priv *priv) 920 { 921 clk_disable_unprepare(priv->plat->clk_ptp_ref); 922 stmmac_ptp_unregister(priv); 923 } 924 925 /** 926 * stmmac_mac_flow_ctrl - Configure flow control in all queues 927 * @priv: driver private structure 928 * @duplex: duplex passed to the next function 929 * Description: It is used for configuring the flow control in all queues 930 */ 931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 932 { 933 u32 tx_cnt = priv->plat->tx_queues_to_use; 934 935 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 936 priv->pause, tx_cnt); 937 } 938 939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, 940 phy_interface_t interface) 941 { 942 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 943 944 if (priv->hw->xpcs) 945 return &priv->hw->xpcs->pcs; 946 947 if (priv->hw->lynx_pcs) 948 return priv->hw->lynx_pcs; 949 950 return NULL; 951 } 952 953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, 954 const struct phylink_link_state *state) 955 { 956 /* Nothing to do, xpcs_config() handles everything */ 957 } 958 959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) 960 { 961 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 962 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 963 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 964 bool *hs_enable = &fpe_cfg->hs_enable; 965 966 if (is_up && *hs_enable) { 967 stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, 968 MPACKET_VERIFY); 969 } else { 970 *lo_state = FPE_STATE_OFF; 971 *lp_state = FPE_STATE_OFF; 972 } 973 } 974 975 static void stmmac_mac_link_down(struct phylink_config *config, 976 unsigned int mode, phy_interface_t interface) 977 { 978 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 979 980 stmmac_mac_set(priv, priv->ioaddr, false); 981 priv->eee_active = false; 982 priv->tx_lpi_enabled = false; 983 priv->eee_enabled = stmmac_eee_init(priv); 984 stmmac_set_eee_pls(priv, priv->hw, false); 985 986 if (priv->dma_cap.fpesel) 987 stmmac_fpe_link_state_handle(priv, false); 988 } 989 990 static void stmmac_mac_link_up(struct phylink_config *config, 991 struct phy_device *phy, 992 unsigned int mode, phy_interface_t interface, 993 int speed, int duplex, 994 bool tx_pause, bool rx_pause) 995 { 996 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 997 u32 old_ctrl, ctrl; 998 999 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && 1000 priv->plat->serdes_powerup) 1001 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv); 1002 1003 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 1004 ctrl = old_ctrl & ~priv->hw->link.speed_mask; 1005 1006 if (interface == PHY_INTERFACE_MODE_USXGMII) { 1007 switch (speed) { 1008 case SPEED_10000: 1009 ctrl |= priv->hw->link.xgmii.speed10000; 1010 break; 1011 case SPEED_5000: 1012 ctrl |= priv->hw->link.xgmii.speed5000; 1013 break; 1014 case SPEED_2500: 1015 ctrl |= priv->hw->link.xgmii.speed2500; 1016 break; 1017 default: 1018 return; 1019 } 1020 } else if (interface == PHY_INTERFACE_MODE_XLGMII) { 1021 switch (speed) { 1022 case SPEED_100000: 1023 ctrl |= priv->hw->link.xlgmii.speed100000; 1024 break; 1025 case SPEED_50000: 1026 ctrl |= priv->hw->link.xlgmii.speed50000; 1027 break; 1028 case SPEED_40000: 1029 ctrl |= priv->hw->link.xlgmii.speed40000; 1030 break; 1031 case SPEED_25000: 1032 ctrl |= priv->hw->link.xlgmii.speed25000; 1033 break; 1034 case SPEED_10000: 1035 ctrl |= priv->hw->link.xgmii.speed10000; 1036 break; 1037 case SPEED_2500: 1038 ctrl |= priv->hw->link.speed2500; 1039 break; 1040 case SPEED_1000: 1041 ctrl |= priv->hw->link.speed1000; 1042 break; 1043 default: 1044 return; 1045 } 1046 } else { 1047 switch (speed) { 1048 case SPEED_2500: 1049 ctrl |= priv->hw->link.speed2500; 1050 break; 1051 case SPEED_1000: 1052 ctrl |= priv->hw->link.speed1000; 1053 break; 1054 case SPEED_100: 1055 ctrl |= priv->hw->link.speed100; 1056 break; 1057 case SPEED_10: 1058 ctrl |= priv->hw->link.speed10; 1059 break; 1060 default: 1061 return; 1062 } 1063 } 1064 1065 priv->speed = speed; 1066 1067 if (priv->plat->fix_mac_speed) 1068 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode); 1069 1070 if (!duplex) 1071 ctrl &= ~priv->hw->link.duplex; 1072 else 1073 ctrl |= priv->hw->link.duplex; 1074 1075 /* Flow Control operation */ 1076 if (rx_pause && tx_pause) 1077 priv->flow_ctrl = FLOW_AUTO; 1078 else if (rx_pause && !tx_pause) 1079 priv->flow_ctrl = FLOW_RX; 1080 else if (!rx_pause && tx_pause) 1081 priv->flow_ctrl = FLOW_TX; 1082 else 1083 priv->flow_ctrl = FLOW_OFF; 1084 1085 stmmac_mac_flow_ctrl(priv, duplex); 1086 1087 if (ctrl != old_ctrl) 1088 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 1089 1090 stmmac_mac_set(priv, priv->ioaddr, true); 1091 if (phy && priv->dma_cap.eee) { 1092 priv->eee_active = 1093 phy_init_eee(phy, !(priv->plat->flags & 1094 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0; 1095 priv->eee_enabled = stmmac_eee_init(priv); 1096 priv->tx_lpi_enabled = priv->eee_enabled; 1097 stmmac_set_eee_pls(priv, priv->hw, true); 1098 } 1099 1100 if (priv->dma_cap.fpesel) 1101 stmmac_fpe_link_state_handle(priv, true); 1102 1103 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) 1104 stmmac_hwtstamp_correct_latency(priv, priv); 1105 } 1106 1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = { 1108 .mac_select_pcs = stmmac_mac_select_pcs, 1109 .mac_config = stmmac_mac_config, 1110 .mac_link_down = stmmac_mac_link_down, 1111 .mac_link_up = stmmac_mac_link_up, 1112 }; 1113 1114 /** 1115 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 1116 * @priv: driver private structure 1117 * Description: this is to verify if the HW supports the PCS. 1118 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 1119 * configured for the TBI, RTBI, or SGMII PHY interface. 1120 */ 1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 1122 { 1123 int interface = priv->plat->mac_interface; 1124 1125 if (priv->dma_cap.pcs) { 1126 if ((interface == PHY_INTERFACE_MODE_RGMII) || 1127 (interface == PHY_INTERFACE_MODE_RGMII_ID) || 1128 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1129 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 1130 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 1131 priv->hw->pcs = STMMAC_PCS_RGMII; 1132 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 1133 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 1134 priv->hw->pcs = STMMAC_PCS_SGMII; 1135 } 1136 } 1137 } 1138 1139 /** 1140 * stmmac_init_phy - PHY initialization 1141 * @dev: net device structure 1142 * Description: it initializes the driver's PHY state, and attaches the PHY 1143 * to the mac driver. 1144 * Return value: 1145 * 0 on success 1146 */ 1147 static int stmmac_init_phy(struct net_device *dev) 1148 { 1149 struct stmmac_priv *priv = netdev_priv(dev); 1150 struct fwnode_handle *phy_fwnode; 1151 struct fwnode_handle *fwnode; 1152 int ret; 1153 1154 if (!phylink_expects_phy(priv->phylink)) 1155 return 0; 1156 1157 fwnode = priv->plat->port_node; 1158 if (!fwnode) 1159 fwnode = dev_fwnode(priv->device); 1160 1161 if (fwnode) 1162 phy_fwnode = fwnode_get_phy_node(fwnode); 1163 else 1164 phy_fwnode = NULL; 1165 1166 /* Some DT bindings do not set-up the PHY handle. Let's try to 1167 * manually parse it 1168 */ 1169 if (!phy_fwnode || IS_ERR(phy_fwnode)) { 1170 int addr = priv->plat->phy_addr; 1171 struct phy_device *phydev; 1172 1173 if (addr < 0) { 1174 netdev_err(priv->dev, "no phy found\n"); 1175 return -ENODEV; 1176 } 1177 1178 phydev = mdiobus_get_phy(priv->mii, addr); 1179 if (!phydev) { 1180 netdev_err(priv->dev, "no phy at addr %d\n", addr); 1181 return -ENODEV; 1182 } 1183 1184 ret = phylink_connect_phy(priv->phylink, phydev); 1185 } else { 1186 fwnode_handle_put(phy_fwnode); 1187 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); 1188 } 1189 1190 if (!priv->plat->pmt) { 1191 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1192 1193 phylink_ethtool_get_wol(priv->phylink, &wol); 1194 device_set_wakeup_capable(priv->device, !!wol.supported); 1195 device_set_wakeup_enable(priv->device, !!wol.wolopts); 1196 } 1197 1198 return ret; 1199 } 1200 1201 static void stmmac_set_half_duplex(struct stmmac_priv *priv) 1202 { 1203 /* Half-Duplex can only work with single tx queue */ 1204 if (priv->plat->tx_queues_to_use > 1) 1205 priv->phylink_config.mac_capabilities &= 1206 ~(MAC_10HD | MAC_100HD | MAC_1000HD); 1207 else 1208 priv->phylink_config.mac_capabilities |= 1209 (MAC_10HD | MAC_100HD | MAC_1000HD); 1210 } 1211 1212 static int stmmac_phy_setup(struct stmmac_priv *priv) 1213 { 1214 struct stmmac_mdio_bus_data *mdio_bus_data; 1215 int mode = priv->plat->phy_interface; 1216 struct fwnode_handle *fwnode; 1217 struct phylink *phylink; 1218 int max_speed; 1219 1220 priv->phylink_config.dev = &priv->dev->dev; 1221 priv->phylink_config.type = PHYLINK_NETDEV; 1222 priv->phylink_config.mac_managed_pm = true; 1223 1224 mdio_bus_data = priv->plat->mdio_bus_data; 1225 if (mdio_bus_data) 1226 priv->phylink_config.ovr_an_inband = 1227 mdio_bus_data->xpcs_an_inband; 1228 1229 /* Set the platform/firmware specified interface mode. Note, phylink 1230 * deals with the PHY interface mode, not the MAC interface mode. 1231 */ 1232 __set_bit(mode, priv->phylink_config.supported_interfaces); 1233 1234 /* If we have an xpcs, it defines which PHY interfaces are supported. */ 1235 if (priv->hw->xpcs) 1236 xpcs_get_interfaces(priv->hw->xpcs, 1237 priv->phylink_config.supported_interfaces); 1238 1239 priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1240 MAC_10FD | MAC_100FD | 1241 MAC_1000FD; 1242 1243 stmmac_set_half_duplex(priv); 1244 1245 /* Get the MAC specific capabilities */ 1246 stmmac_mac_phylink_get_caps(priv); 1247 1248 max_speed = priv->plat->max_speed; 1249 if (max_speed) 1250 phylink_limit_mac_speed(&priv->phylink_config, max_speed); 1251 1252 fwnode = priv->plat->port_node; 1253 if (!fwnode) 1254 fwnode = dev_fwnode(priv->device); 1255 1256 phylink = phylink_create(&priv->phylink_config, fwnode, 1257 mode, &stmmac_phylink_mac_ops); 1258 if (IS_ERR(phylink)) 1259 return PTR_ERR(phylink); 1260 1261 priv->phylink = phylink; 1262 return 0; 1263 } 1264 1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv, 1266 struct stmmac_dma_conf *dma_conf) 1267 { 1268 u32 rx_cnt = priv->plat->rx_queues_to_use; 1269 unsigned int desc_size; 1270 void *head_rx; 1271 u32 queue; 1272 1273 /* Display RX rings */ 1274 for (queue = 0; queue < rx_cnt; queue++) { 1275 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1276 1277 pr_info("\tRX Queue %u rings\n", queue); 1278 1279 if (priv->extend_desc) { 1280 head_rx = (void *)rx_q->dma_erx; 1281 desc_size = sizeof(struct dma_extended_desc); 1282 } else { 1283 head_rx = (void *)rx_q->dma_rx; 1284 desc_size = sizeof(struct dma_desc); 1285 } 1286 1287 /* Display RX ring */ 1288 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, 1289 rx_q->dma_rx_phy, desc_size); 1290 } 1291 } 1292 1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv, 1294 struct stmmac_dma_conf *dma_conf) 1295 { 1296 u32 tx_cnt = priv->plat->tx_queues_to_use; 1297 unsigned int desc_size; 1298 void *head_tx; 1299 u32 queue; 1300 1301 /* Display TX rings */ 1302 for (queue = 0; queue < tx_cnt; queue++) { 1303 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1304 1305 pr_info("\tTX Queue %d rings\n", queue); 1306 1307 if (priv->extend_desc) { 1308 head_tx = (void *)tx_q->dma_etx; 1309 desc_size = sizeof(struct dma_extended_desc); 1310 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1311 head_tx = (void *)tx_q->dma_entx; 1312 desc_size = sizeof(struct dma_edesc); 1313 } else { 1314 head_tx = (void *)tx_q->dma_tx; 1315 desc_size = sizeof(struct dma_desc); 1316 } 1317 1318 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, 1319 tx_q->dma_tx_phy, desc_size); 1320 } 1321 } 1322 1323 static void stmmac_display_rings(struct stmmac_priv *priv, 1324 struct stmmac_dma_conf *dma_conf) 1325 { 1326 /* Display RX ring */ 1327 stmmac_display_rx_rings(priv, dma_conf); 1328 1329 /* Display TX ring */ 1330 stmmac_display_tx_rings(priv, dma_conf); 1331 } 1332 1333 static int stmmac_set_bfsize(int mtu, int bufsize) 1334 { 1335 int ret = bufsize; 1336 1337 if (mtu >= BUF_SIZE_8KiB) 1338 ret = BUF_SIZE_16KiB; 1339 else if (mtu >= BUF_SIZE_4KiB) 1340 ret = BUF_SIZE_8KiB; 1341 else if (mtu >= BUF_SIZE_2KiB) 1342 ret = BUF_SIZE_4KiB; 1343 else if (mtu > DEFAULT_BUFSIZE) 1344 ret = BUF_SIZE_2KiB; 1345 else 1346 ret = DEFAULT_BUFSIZE; 1347 1348 return ret; 1349 } 1350 1351 /** 1352 * stmmac_clear_rx_descriptors - clear RX descriptors 1353 * @priv: driver private structure 1354 * @dma_conf: structure to take the dma data 1355 * @queue: RX queue index 1356 * Description: this function is called to clear the RX descriptors 1357 * in case of both basic and extended descriptors are used. 1358 */ 1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, 1360 struct stmmac_dma_conf *dma_conf, 1361 u32 queue) 1362 { 1363 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1364 int i; 1365 1366 /* Clear the RX descriptors */ 1367 for (i = 0; i < dma_conf->dma_rx_size; i++) 1368 if (priv->extend_desc) 1369 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1370 priv->use_riwt, priv->mode, 1371 (i == dma_conf->dma_rx_size - 1), 1372 dma_conf->dma_buf_sz); 1373 else 1374 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1375 priv->use_riwt, priv->mode, 1376 (i == dma_conf->dma_rx_size - 1), 1377 dma_conf->dma_buf_sz); 1378 } 1379 1380 /** 1381 * stmmac_clear_tx_descriptors - clear tx descriptors 1382 * @priv: driver private structure 1383 * @dma_conf: structure to take the dma data 1384 * @queue: TX queue index. 1385 * Description: this function is called to clear the TX descriptors 1386 * in case of both basic and extended descriptors are used. 1387 */ 1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, 1389 struct stmmac_dma_conf *dma_conf, 1390 u32 queue) 1391 { 1392 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1393 int i; 1394 1395 /* Clear the TX descriptors */ 1396 for (i = 0; i < dma_conf->dma_tx_size; i++) { 1397 int last = (i == (dma_conf->dma_tx_size - 1)); 1398 struct dma_desc *p; 1399 1400 if (priv->extend_desc) 1401 p = &tx_q->dma_etx[i].basic; 1402 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1403 p = &tx_q->dma_entx[i].basic; 1404 else 1405 p = &tx_q->dma_tx[i]; 1406 1407 stmmac_init_tx_desc(priv, p, priv->mode, last); 1408 } 1409 } 1410 1411 /** 1412 * stmmac_clear_descriptors - clear descriptors 1413 * @priv: driver private structure 1414 * @dma_conf: structure to take the dma data 1415 * Description: this function is called to clear the TX and RX descriptors 1416 * in case of both basic and extended descriptors are used. 1417 */ 1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv, 1419 struct stmmac_dma_conf *dma_conf) 1420 { 1421 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1422 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1423 u32 queue; 1424 1425 /* Clear the RX descriptors */ 1426 for (queue = 0; queue < rx_queue_cnt; queue++) 1427 stmmac_clear_rx_descriptors(priv, dma_conf, queue); 1428 1429 /* Clear the TX descriptors */ 1430 for (queue = 0; queue < tx_queue_cnt; queue++) 1431 stmmac_clear_tx_descriptors(priv, dma_conf, queue); 1432 } 1433 1434 /** 1435 * stmmac_init_rx_buffers - init the RX descriptor buffer. 1436 * @priv: driver private structure 1437 * @dma_conf: structure to take the dma data 1438 * @p: descriptor pointer 1439 * @i: descriptor index 1440 * @flags: gfp flag 1441 * @queue: RX queue index 1442 * Description: this function is called to allocate a receive buffer, perform 1443 * the DMA mapping and init the descriptor. 1444 */ 1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, 1446 struct stmmac_dma_conf *dma_conf, 1447 struct dma_desc *p, 1448 int i, gfp_t flags, u32 queue) 1449 { 1450 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1451 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1452 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 1453 1454 if (priv->dma_cap.host_dma_width <= 32) 1455 gfp |= GFP_DMA32; 1456 1457 if (!buf->page) { 1458 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); 1459 if (!buf->page) 1460 return -ENOMEM; 1461 buf->page_offset = stmmac_rx_offset(priv); 1462 } 1463 1464 if (priv->sph && !buf->sec_page) { 1465 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); 1466 if (!buf->sec_page) 1467 return -ENOMEM; 1468 1469 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1470 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 1471 } else { 1472 buf->sec_page = NULL; 1473 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 1474 } 1475 1476 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 1477 1478 stmmac_set_desc_addr(priv, p, buf->addr); 1479 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) 1480 stmmac_init_desc3(priv, p); 1481 1482 return 0; 1483 } 1484 1485 /** 1486 * stmmac_free_rx_buffer - free RX dma buffers 1487 * @priv: private structure 1488 * @rx_q: RX queue 1489 * @i: buffer index. 1490 */ 1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, 1492 struct stmmac_rx_queue *rx_q, 1493 int i) 1494 { 1495 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1496 1497 if (buf->page) 1498 page_pool_put_full_page(rx_q->page_pool, buf->page, false); 1499 buf->page = NULL; 1500 1501 if (buf->sec_page) 1502 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); 1503 buf->sec_page = NULL; 1504 } 1505 1506 /** 1507 * stmmac_free_tx_buffer - free RX dma buffers 1508 * @priv: private structure 1509 * @dma_conf: structure to take the dma data 1510 * @queue: RX queue index 1511 * @i: buffer index. 1512 */ 1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, 1514 struct stmmac_dma_conf *dma_conf, 1515 u32 queue, int i) 1516 { 1517 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1518 1519 if (tx_q->tx_skbuff_dma[i].buf && 1520 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { 1521 if (tx_q->tx_skbuff_dma[i].map_as_page) 1522 dma_unmap_page(priv->device, 1523 tx_q->tx_skbuff_dma[i].buf, 1524 tx_q->tx_skbuff_dma[i].len, 1525 DMA_TO_DEVICE); 1526 else 1527 dma_unmap_single(priv->device, 1528 tx_q->tx_skbuff_dma[i].buf, 1529 tx_q->tx_skbuff_dma[i].len, 1530 DMA_TO_DEVICE); 1531 } 1532 1533 if (tx_q->xdpf[i] && 1534 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || 1535 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { 1536 xdp_return_frame(tx_q->xdpf[i]); 1537 tx_q->xdpf[i] = NULL; 1538 } 1539 1540 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) 1541 tx_q->xsk_frames_done++; 1542 1543 if (tx_q->tx_skbuff[i] && 1544 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { 1545 dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1546 tx_q->tx_skbuff[i] = NULL; 1547 } 1548 1549 tx_q->tx_skbuff_dma[i].buf = 0; 1550 tx_q->tx_skbuff_dma[i].map_as_page = false; 1551 } 1552 1553 /** 1554 * dma_free_rx_skbufs - free RX dma buffers 1555 * @priv: private structure 1556 * @dma_conf: structure to take the dma data 1557 * @queue: RX queue index 1558 */ 1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv, 1560 struct stmmac_dma_conf *dma_conf, 1561 u32 queue) 1562 { 1563 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1564 int i; 1565 1566 for (i = 0; i < dma_conf->dma_rx_size; i++) 1567 stmmac_free_rx_buffer(priv, rx_q, i); 1568 } 1569 1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, 1571 struct stmmac_dma_conf *dma_conf, 1572 u32 queue, gfp_t flags) 1573 { 1574 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1575 int i; 1576 1577 for (i = 0; i < dma_conf->dma_rx_size; i++) { 1578 struct dma_desc *p; 1579 int ret; 1580 1581 if (priv->extend_desc) 1582 p = &((rx_q->dma_erx + i)->basic); 1583 else 1584 p = rx_q->dma_rx + i; 1585 1586 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags, 1587 queue); 1588 if (ret) 1589 return ret; 1590 1591 rx_q->buf_alloc_num++; 1592 } 1593 1594 return 0; 1595 } 1596 1597 /** 1598 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool 1599 * @priv: private structure 1600 * @dma_conf: structure to take the dma data 1601 * @queue: RX queue index 1602 */ 1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv, 1604 struct stmmac_dma_conf *dma_conf, 1605 u32 queue) 1606 { 1607 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1608 int i; 1609 1610 for (i = 0; i < dma_conf->dma_rx_size; i++) { 1611 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1612 1613 if (!buf->xdp) 1614 continue; 1615 1616 xsk_buff_free(buf->xdp); 1617 buf->xdp = NULL; 1618 } 1619 } 1620 1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, 1622 struct stmmac_dma_conf *dma_conf, 1623 u32 queue) 1624 { 1625 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1626 int i; 1627 1628 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes) 1629 * in struct xdp_buff_xsk to stash driver specific information. Thus, 1630 * use this macro to make sure no size violations. 1631 */ 1632 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff); 1633 1634 for (i = 0; i < dma_conf->dma_rx_size; i++) { 1635 struct stmmac_rx_buffer *buf; 1636 dma_addr_t dma_addr; 1637 struct dma_desc *p; 1638 1639 if (priv->extend_desc) 1640 p = (struct dma_desc *)(rx_q->dma_erx + i); 1641 else 1642 p = rx_q->dma_rx + i; 1643 1644 buf = &rx_q->buf_pool[i]; 1645 1646 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 1647 if (!buf->xdp) 1648 return -ENOMEM; 1649 1650 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 1651 stmmac_set_desc_addr(priv, p, dma_addr); 1652 rx_q->buf_alloc_num++; 1653 } 1654 1655 return 0; 1656 } 1657 1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) 1659 { 1660 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) 1661 return NULL; 1662 1663 return xsk_get_pool_from_qid(priv->dev, queue); 1664 } 1665 1666 /** 1667 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) 1668 * @priv: driver private structure 1669 * @dma_conf: structure to take the dma data 1670 * @queue: RX queue index 1671 * @flags: gfp flag. 1672 * Description: this function initializes the DMA RX descriptors 1673 * and allocates the socket buffers. It supports the chained and ring 1674 * modes. 1675 */ 1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, 1677 struct stmmac_dma_conf *dma_conf, 1678 u32 queue, gfp_t flags) 1679 { 1680 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1681 int ret; 1682 1683 netif_dbg(priv, probe, priv->dev, 1684 "(%s) dma_rx_phy=0x%08x\n", __func__, 1685 (u32)rx_q->dma_rx_phy); 1686 1687 stmmac_clear_rx_descriptors(priv, dma_conf, queue); 1688 1689 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); 1690 1691 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1692 1693 if (rx_q->xsk_pool) { 1694 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1695 MEM_TYPE_XSK_BUFF_POOL, 1696 NULL)); 1697 netdev_info(priv->dev, 1698 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", 1699 rx_q->queue_index); 1700 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); 1701 } else { 1702 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1703 MEM_TYPE_PAGE_POOL, 1704 rx_q->page_pool)); 1705 netdev_info(priv->dev, 1706 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", 1707 rx_q->queue_index); 1708 } 1709 1710 if (rx_q->xsk_pool) { 1711 /* RX XDP ZC buffer pool may not be populated, e.g. 1712 * xdpsock TX-only. 1713 */ 1714 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); 1715 } else { 1716 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); 1717 if (ret < 0) 1718 return -ENOMEM; 1719 } 1720 1721 /* Setup the chained descriptor addresses */ 1722 if (priv->mode == STMMAC_CHAIN_MODE) { 1723 if (priv->extend_desc) 1724 stmmac_mode_init(priv, rx_q->dma_erx, 1725 rx_q->dma_rx_phy, 1726 dma_conf->dma_rx_size, 1); 1727 else 1728 stmmac_mode_init(priv, rx_q->dma_rx, 1729 rx_q->dma_rx_phy, 1730 dma_conf->dma_rx_size, 0); 1731 } 1732 1733 return 0; 1734 } 1735 1736 static int init_dma_rx_desc_rings(struct net_device *dev, 1737 struct stmmac_dma_conf *dma_conf, 1738 gfp_t flags) 1739 { 1740 struct stmmac_priv *priv = netdev_priv(dev); 1741 u32 rx_count = priv->plat->rx_queues_to_use; 1742 int queue; 1743 int ret; 1744 1745 /* RX INITIALIZATION */ 1746 netif_dbg(priv, probe, priv->dev, 1747 "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1748 1749 for (queue = 0; queue < rx_count; queue++) { 1750 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); 1751 if (ret) 1752 goto err_init_rx_buffers; 1753 } 1754 1755 return 0; 1756 1757 err_init_rx_buffers: 1758 while (queue >= 0) { 1759 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1760 1761 if (rx_q->xsk_pool) 1762 dma_free_rx_xskbufs(priv, dma_conf, queue); 1763 else 1764 dma_free_rx_skbufs(priv, dma_conf, queue); 1765 1766 rx_q->buf_alloc_num = 0; 1767 rx_q->xsk_pool = NULL; 1768 1769 queue--; 1770 } 1771 1772 return ret; 1773 } 1774 1775 /** 1776 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) 1777 * @priv: driver private structure 1778 * @dma_conf: structure to take the dma data 1779 * @queue: TX queue index 1780 * Description: this function initializes the DMA TX descriptors 1781 * and allocates the socket buffers. It supports the chained and ring 1782 * modes. 1783 */ 1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, 1785 struct stmmac_dma_conf *dma_conf, 1786 u32 queue) 1787 { 1788 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1789 int i; 1790 1791 netif_dbg(priv, probe, priv->dev, 1792 "(%s) dma_tx_phy=0x%08x\n", __func__, 1793 (u32)tx_q->dma_tx_phy); 1794 1795 /* Setup the chained descriptor addresses */ 1796 if (priv->mode == STMMAC_CHAIN_MODE) { 1797 if (priv->extend_desc) 1798 stmmac_mode_init(priv, tx_q->dma_etx, 1799 tx_q->dma_tx_phy, 1800 dma_conf->dma_tx_size, 1); 1801 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) 1802 stmmac_mode_init(priv, tx_q->dma_tx, 1803 tx_q->dma_tx_phy, 1804 dma_conf->dma_tx_size, 0); 1805 } 1806 1807 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1808 1809 for (i = 0; i < dma_conf->dma_tx_size; i++) { 1810 struct dma_desc *p; 1811 1812 if (priv->extend_desc) 1813 p = &((tx_q->dma_etx + i)->basic); 1814 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1815 p = &((tx_q->dma_entx + i)->basic); 1816 else 1817 p = tx_q->dma_tx + i; 1818 1819 stmmac_clear_desc(priv, p); 1820 1821 tx_q->tx_skbuff_dma[i].buf = 0; 1822 tx_q->tx_skbuff_dma[i].map_as_page = false; 1823 tx_q->tx_skbuff_dma[i].len = 0; 1824 tx_q->tx_skbuff_dma[i].last_segment = false; 1825 tx_q->tx_skbuff[i] = NULL; 1826 } 1827 1828 return 0; 1829 } 1830 1831 static int init_dma_tx_desc_rings(struct net_device *dev, 1832 struct stmmac_dma_conf *dma_conf) 1833 { 1834 struct stmmac_priv *priv = netdev_priv(dev); 1835 u32 tx_queue_cnt; 1836 u32 queue; 1837 1838 tx_queue_cnt = priv->plat->tx_queues_to_use; 1839 1840 for (queue = 0; queue < tx_queue_cnt; queue++) 1841 __init_dma_tx_desc_rings(priv, dma_conf, queue); 1842 1843 return 0; 1844 } 1845 1846 /** 1847 * init_dma_desc_rings - init the RX/TX descriptor rings 1848 * @dev: net device structure 1849 * @dma_conf: structure to take the dma data 1850 * @flags: gfp flag. 1851 * Description: this function initializes the DMA RX/TX descriptors 1852 * and allocates the socket buffers. It supports the chained and ring 1853 * modes. 1854 */ 1855 static int init_dma_desc_rings(struct net_device *dev, 1856 struct stmmac_dma_conf *dma_conf, 1857 gfp_t flags) 1858 { 1859 struct stmmac_priv *priv = netdev_priv(dev); 1860 int ret; 1861 1862 ret = init_dma_rx_desc_rings(dev, dma_conf, flags); 1863 if (ret) 1864 return ret; 1865 1866 ret = init_dma_tx_desc_rings(dev, dma_conf); 1867 1868 stmmac_clear_descriptors(priv, dma_conf); 1869 1870 if (netif_msg_hw(priv)) 1871 stmmac_display_rings(priv, dma_conf); 1872 1873 return ret; 1874 } 1875 1876 /** 1877 * dma_free_tx_skbufs - free TX dma buffers 1878 * @priv: private structure 1879 * @dma_conf: structure to take the dma data 1880 * @queue: TX queue index 1881 */ 1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv, 1883 struct stmmac_dma_conf *dma_conf, 1884 u32 queue) 1885 { 1886 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1887 int i; 1888 1889 tx_q->xsk_frames_done = 0; 1890 1891 for (i = 0; i < dma_conf->dma_tx_size; i++) 1892 stmmac_free_tx_buffer(priv, dma_conf, queue, i); 1893 1894 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { 1895 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 1896 tx_q->xsk_frames_done = 0; 1897 tx_q->xsk_pool = NULL; 1898 } 1899 } 1900 1901 /** 1902 * stmmac_free_tx_skbufs - free TX skb buffers 1903 * @priv: private structure 1904 */ 1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) 1906 { 1907 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1908 u32 queue; 1909 1910 for (queue = 0; queue < tx_queue_cnt; queue++) 1911 dma_free_tx_skbufs(priv, &priv->dma_conf, queue); 1912 } 1913 1914 /** 1915 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) 1916 * @priv: private structure 1917 * @dma_conf: structure to take the dma data 1918 * @queue: RX queue index 1919 */ 1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, 1921 struct stmmac_dma_conf *dma_conf, 1922 u32 queue) 1923 { 1924 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1925 1926 /* Release the DMA RX socket buffers */ 1927 if (rx_q->xsk_pool) 1928 dma_free_rx_xskbufs(priv, dma_conf, queue); 1929 else 1930 dma_free_rx_skbufs(priv, dma_conf, queue); 1931 1932 rx_q->buf_alloc_num = 0; 1933 rx_q->xsk_pool = NULL; 1934 1935 /* Free DMA regions of consistent memory previously allocated */ 1936 if (!priv->extend_desc) 1937 dma_free_coherent(priv->device, dma_conf->dma_rx_size * 1938 sizeof(struct dma_desc), 1939 rx_q->dma_rx, rx_q->dma_rx_phy); 1940 else 1941 dma_free_coherent(priv->device, dma_conf->dma_rx_size * 1942 sizeof(struct dma_extended_desc), 1943 rx_q->dma_erx, rx_q->dma_rx_phy); 1944 1945 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) 1946 xdp_rxq_info_unreg(&rx_q->xdp_rxq); 1947 1948 kfree(rx_q->buf_pool); 1949 if (rx_q->page_pool) 1950 page_pool_destroy(rx_q->page_pool); 1951 } 1952 1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv, 1954 struct stmmac_dma_conf *dma_conf) 1955 { 1956 u32 rx_count = priv->plat->rx_queues_to_use; 1957 u32 queue; 1958 1959 /* Free RX queue resources */ 1960 for (queue = 0; queue < rx_count; queue++) 1961 __free_dma_rx_desc_resources(priv, dma_conf, queue); 1962 } 1963 1964 /** 1965 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) 1966 * @priv: private structure 1967 * @dma_conf: structure to take the dma data 1968 * @queue: TX queue index 1969 */ 1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, 1971 struct stmmac_dma_conf *dma_conf, 1972 u32 queue) 1973 { 1974 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1975 size_t size; 1976 void *addr; 1977 1978 /* Release the DMA TX socket buffers */ 1979 dma_free_tx_skbufs(priv, dma_conf, queue); 1980 1981 if (priv->extend_desc) { 1982 size = sizeof(struct dma_extended_desc); 1983 addr = tx_q->dma_etx; 1984 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1985 size = sizeof(struct dma_edesc); 1986 addr = tx_q->dma_entx; 1987 } else { 1988 size = sizeof(struct dma_desc); 1989 addr = tx_q->dma_tx; 1990 } 1991 1992 size *= dma_conf->dma_tx_size; 1993 1994 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); 1995 1996 kfree(tx_q->tx_skbuff_dma); 1997 kfree(tx_q->tx_skbuff); 1998 } 1999 2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv, 2001 struct stmmac_dma_conf *dma_conf) 2002 { 2003 u32 tx_count = priv->plat->tx_queues_to_use; 2004 u32 queue; 2005 2006 /* Free TX queue resources */ 2007 for (queue = 0; queue < tx_count; queue++) 2008 __free_dma_tx_desc_resources(priv, dma_conf, queue); 2009 } 2010 2011 /** 2012 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). 2013 * @priv: private structure 2014 * @dma_conf: structure to take the dma data 2015 * @queue: RX queue index 2016 * Description: according to which descriptor can be used (extend or basic) 2017 * this function allocates the resources for TX and RX paths. In case of 2018 * reception, for example, it pre-allocated the RX socket buffer in order to 2019 * allow zero-copy mechanism. 2020 */ 2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, 2022 struct stmmac_dma_conf *dma_conf, 2023 u32 queue) 2024 { 2025 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 2026 struct stmmac_channel *ch = &priv->channel[queue]; 2027 bool xdp_prog = stmmac_xdp_is_enabled(priv); 2028 struct page_pool_params pp_params = { 0 }; 2029 unsigned int num_pages; 2030 unsigned int napi_id; 2031 int ret; 2032 2033 rx_q->queue_index = queue; 2034 rx_q->priv_data = priv; 2035 2036 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 2037 pp_params.pool_size = dma_conf->dma_rx_size; 2038 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); 2039 pp_params.order = ilog2(num_pages); 2040 pp_params.nid = dev_to_node(priv->device); 2041 pp_params.dev = priv->device; 2042 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 2043 pp_params.offset = stmmac_rx_offset(priv); 2044 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); 2045 2046 rx_q->page_pool = page_pool_create(&pp_params); 2047 if (IS_ERR(rx_q->page_pool)) { 2048 ret = PTR_ERR(rx_q->page_pool); 2049 rx_q->page_pool = NULL; 2050 return ret; 2051 } 2052 2053 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, 2054 sizeof(*rx_q->buf_pool), 2055 GFP_KERNEL); 2056 if (!rx_q->buf_pool) 2057 return -ENOMEM; 2058 2059 if (priv->extend_desc) { 2060 rx_q->dma_erx = dma_alloc_coherent(priv->device, 2061 dma_conf->dma_rx_size * 2062 sizeof(struct dma_extended_desc), 2063 &rx_q->dma_rx_phy, 2064 GFP_KERNEL); 2065 if (!rx_q->dma_erx) 2066 return -ENOMEM; 2067 2068 } else { 2069 rx_q->dma_rx = dma_alloc_coherent(priv->device, 2070 dma_conf->dma_rx_size * 2071 sizeof(struct dma_desc), 2072 &rx_q->dma_rx_phy, 2073 GFP_KERNEL); 2074 if (!rx_q->dma_rx) 2075 return -ENOMEM; 2076 } 2077 2078 if (stmmac_xdp_is_enabled(priv) && 2079 test_bit(queue, priv->af_xdp_zc_qps)) 2080 napi_id = ch->rxtx_napi.napi_id; 2081 else 2082 napi_id = ch->rx_napi.napi_id; 2083 2084 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, 2085 rx_q->queue_index, 2086 napi_id); 2087 if (ret) { 2088 netdev_err(priv->dev, "Failed to register xdp rxq info\n"); 2089 return -EINVAL; 2090 } 2091 2092 return 0; 2093 } 2094 2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv, 2096 struct stmmac_dma_conf *dma_conf) 2097 { 2098 u32 rx_count = priv->plat->rx_queues_to_use; 2099 u32 queue; 2100 int ret; 2101 2102 /* RX queues buffers and DMA */ 2103 for (queue = 0; queue < rx_count; queue++) { 2104 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); 2105 if (ret) 2106 goto err_dma; 2107 } 2108 2109 return 0; 2110 2111 err_dma: 2112 free_dma_rx_desc_resources(priv, dma_conf); 2113 2114 return ret; 2115 } 2116 2117 /** 2118 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). 2119 * @priv: private structure 2120 * @dma_conf: structure to take the dma data 2121 * @queue: TX queue index 2122 * Description: according to which descriptor can be used (extend or basic) 2123 * this function allocates the resources for TX and RX paths. In case of 2124 * reception, for example, it pre-allocated the RX socket buffer in order to 2125 * allow zero-copy mechanism. 2126 */ 2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, 2128 struct stmmac_dma_conf *dma_conf, 2129 u32 queue) 2130 { 2131 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 2132 size_t size; 2133 void *addr; 2134 2135 tx_q->queue_index = queue; 2136 tx_q->priv_data = priv; 2137 2138 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, 2139 sizeof(*tx_q->tx_skbuff_dma), 2140 GFP_KERNEL); 2141 if (!tx_q->tx_skbuff_dma) 2142 return -ENOMEM; 2143 2144 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, 2145 sizeof(struct sk_buff *), 2146 GFP_KERNEL); 2147 if (!tx_q->tx_skbuff) 2148 return -ENOMEM; 2149 2150 if (priv->extend_desc) 2151 size = sizeof(struct dma_extended_desc); 2152 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2153 size = sizeof(struct dma_edesc); 2154 else 2155 size = sizeof(struct dma_desc); 2156 2157 size *= dma_conf->dma_tx_size; 2158 2159 addr = dma_alloc_coherent(priv->device, size, 2160 &tx_q->dma_tx_phy, GFP_KERNEL); 2161 if (!addr) 2162 return -ENOMEM; 2163 2164 if (priv->extend_desc) 2165 tx_q->dma_etx = addr; 2166 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2167 tx_q->dma_entx = addr; 2168 else 2169 tx_q->dma_tx = addr; 2170 2171 return 0; 2172 } 2173 2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv, 2175 struct stmmac_dma_conf *dma_conf) 2176 { 2177 u32 tx_count = priv->plat->tx_queues_to_use; 2178 u32 queue; 2179 int ret; 2180 2181 /* TX queues buffers and DMA */ 2182 for (queue = 0; queue < tx_count; queue++) { 2183 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); 2184 if (ret) 2185 goto err_dma; 2186 } 2187 2188 return 0; 2189 2190 err_dma: 2191 free_dma_tx_desc_resources(priv, dma_conf); 2192 return ret; 2193 } 2194 2195 /** 2196 * alloc_dma_desc_resources - alloc TX/RX resources. 2197 * @priv: private structure 2198 * @dma_conf: structure to take the dma data 2199 * Description: according to which descriptor can be used (extend or basic) 2200 * this function allocates the resources for TX and RX paths. In case of 2201 * reception, for example, it pre-allocated the RX socket buffer in order to 2202 * allow zero-copy mechanism. 2203 */ 2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv, 2205 struct stmmac_dma_conf *dma_conf) 2206 { 2207 /* RX Allocation */ 2208 int ret = alloc_dma_rx_desc_resources(priv, dma_conf); 2209 2210 if (ret) 2211 return ret; 2212 2213 ret = alloc_dma_tx_desc_resources(priv, dma_conf); 2214 2215 return ret; 2216 } 2217 2218 /** 2219 * free_dma_desc_resources - free dma desc resources 2220 * @priv: private structure 2221 * @dma_conf: structure to take the dma data 2222 */ 2223 static void free_dma_desc_resources(struct stmmac_priv *priv, 2224 struct stmmac_dma_conf *dma_conf) 2225 { 2226 /* Release the DMA TX socket buffers */ 2227 free_dma_tx_desc_resources(priv, dma_conf); 2228 2229 /* Release the DMA RX socket buffers later 2230 * to ensure all pending XDP_TX buffers are returned. 2231 */ 2232 free_dma_rx_desc_resources(priv, dma_conf); 2233 } 2234 2235 /** 2236 * stmmac_mac_enable_rx_queues - Enable MAC rx queues 2237 * @priv: driver private structure 2238 * Description: It is used for enabling the rx queues in the MAC 2239 */ 2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 2241 { 2242 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2243 int queue; 2244 u8 mode; 2245 2246 for (queue = 0; queue < rx_queues_count; queue++) { 2247 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 2248 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 2249 } 2250 } 2251 2252 /** 2253 * stmmac_start_rx_dma - start RX DMA channel 2254 * @priv: driver private structure 2255 * @chan: RX channel index 2256 * Description: 2257 * This starts a RX DMA channel 2258 */ 2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 2260 { 2261 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 2262 stmmac_start_rx(priv, priv->ioaddr, chan); 2263 } 2264 2265 /** 2266 * stmmac_start_tx_dma - start TX DMA channel 2267 * @priv: driver private structure 2268 * @chan: TX channel index 2269 * Description: 2270 * This starts a TX DMA channel 2271 */ 2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 2273 { 2274 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 2275 stmmac_start_tx(priv, priv->ioaddr, chan); 2276 } 2277 2278 /** 2279 * stmmac_stop_rx_dma - stop RX DMA channel 2280 * @priv: driver private structure 2281 * @chan: RX channel index 2282 * Description: 2283 * This stops a RX DMA channel 2284 */ 2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 2286 { 2287 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 2288 stmmac_stop_rx(priv, priv->ioaddr, chan); 2289 } 2290 2291 /** 2292 * stmmac_stop_tx_dma - stop TX DMA channel 2293 * @priv: driver private structure 2294 * @chan: TX channel index 2295 * Description: 2296 * This stops a TX DMA channel 2297 */ 2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 2299 { 2300 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 2301 stmmac_stop_tx(priv, priv->ioaddr, chan); 2302 } 2303 2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) 2305 { 2306 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2307 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2308 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2309 u32 chan; 2310 2311 for (chan = 0; chan < dma_csr_ch; chan++) { 2312 struct stmmac_channel *ch = &priv->channel[chan]; 2313 unsigned long flags; 2314 2315 spin_lock_irqsave(&ch->lock, flags); 2316 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 2317 spin_unlock_irqrestore(&ch->lock, flags); 2318 } 2319 } 2320 2321 /** 2322 * stmmac_start_all_dma - start all RX and TX DMA channels 2323 * @priv: driver private structure 2324 * Description: 2325 * This starts all the RX and TX DMA channels 2326 */ 2327 static void stmmac_start_all_dma(struct stmmac_priv *priv) 2328 { 2329 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2330 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2331 u32 chan = 0; 2332 2333 for (chan = 0; chan < rx_channels_count; chan++) 2334 stmmac_start_rx_dma(priv, chan); 2335 2336 for (chan = 0; chan < tx_channels_count; chan++) 2337 stmmac_start_tx_dma(priv, chan); 2338 } 2339 2340 /** 2341 * stmmac_stop_all_dma - stop all RX and TX DMA channels 2342 * @priv: driver private structure 2343 * Description: 2344 * This stops the RX and TX DMA channels 2345 */ 2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv) 2347 { 2348 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2349 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2350 u32 chan = 0; 2351 2352 for (chan = 0; chan < rx_channels_count; chan++) 2353 stmmac_stop_rx_dma(priv, chan); 2354 2355 for (chan = 0; chan < tx_channels_count; chan++) 2356 stmmac_stop_tx_dma(priv, chan); 2357 } 2358 2359 /** 2360 * stmmac_dma_operation_mode - HW DMA operation mode 2361 * @priv: driver private structure 2362 * Description: it is used for configuring the DMA operation mode register in 2363 * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 2364 */ 2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 2366 { 2367 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2368 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2369 int rxfifosz = priv->plat->rx_fifo_size; 2370 int txfifosz = priv->plat->tx_fifo_size; 2371 u32 txmode = 0; 2372 u32 rxmode = 0; 2373 u32 chan = 0; 2374 u8 qmode = 0; 2375 2376 if (rxfifosz == 0) 2377 rxfifosz = priv->dma_cap.rx_fifo_size; 2378 if (txfifosz == 0) 2379 txfifosz = priv->dma_cap.tx_fifo_size; 2380 2381 /* Adjust for real per queue fifo size */ 2382 rxfifosz /= rx_channels_count; 2383 txfifosz /= tx_channels_count; 2384 2385 if (priv->plat->force_thresh_dma_mode) { 2386 txmode = tc; 2387 rxmode = tc; 2388 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 2389 /* 2390 * In case of GMAC, SF mode can be enabled 2391 * to perform the TX COE in HW. This depends on: 2392 * 1) TX COE if actually supported 2393 * 2) There is no bugged Jumbo frame support 2394 * that needs to not insert csum in the TDES. 2395 */ 2396 txmode = SF_DMA_MODE; 2397 rxmode = SF_DMA_MODE; 2398 priv->xstats.threshold = SF_DMA_MODE; 2399 } else { 2400 txmode = tc; 2401 rxmode = SF_DMA_MODE; 2402 } 2403 2404 /* configure all channels */ 2405 for (chan = 0; chan < rx_channels_count; chan++) { 2406 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; 2407 u32 buf_size; 2408 2409 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2410 2411 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 2412 rxfifosz, qmode); 2413 2414 if (rx_q->xsk_pool) { 2415 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 2416 stmmac_set_dma_bfsize(priv, priv->ioaddr, 2417 buf_size, 2418 chan); 2419 } else { 2420 stmmac_set_dma_bfsize(priv, priv->ioaddr, 2421 priv->dma_conf.dma_buf_sz, 2422 chan); 2423 } 2424 } 2425 2426 for (chan = 0; chan < tx_channels_count; chan++) { 2427 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2428 2429 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 2430 txfifosz, qmode); 2431 } 2432 } 2433 2434 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 2435 { 2436 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); 2437 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 2438 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; 2439 struct xsk_buff_pool *pool = tx_q->xsk_pool; 2440 unsigned int entry = tx_q->cur_tx; 2441 struct dma_desc *tx_desc = NULL; 2442 struct xdp_desc xdp_desc; 2443 bool work_done = true; 2444 u32 tx_set_ic_bit = 0; 2445 unsigned long flags; 2446 2447 /* Avoids TX time-out as we are sharing with slow path */ 2448 txq_trans_cond_update(nq); 2449 2450 budget = min(budget, stmmac_tx_avail(priv, queue)); 2451 2452 while (budget-- > 0) { 2453 dma_addr_t dma_addr; 2454 bool set_ic; 2455 2456 /* We are sharing with slow path and stop XSK TX desc submission when 2457 * available TX ring is less than threshold. 2458 */ 2459 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || 2460 !netif_carrier_ok(priv->dev)) { 2461 work_done = false; 2462 break; 2463 } 2464 2465 if (!xsk_tx_peek_desc(pool, &xdp_desc)) 2466 break; 2467 2468 if (likely(priv->extend_desc)) 2469 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 2470 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2471 tx_desc = &tx_q->dma_entx[entry].basic; 2472 else 2473 tx_desc = tx_q->dma_tx + entry; 2474 2475 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2476 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); 2477 2478 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; 2479 2480 /* To return XDP buffer to XSK pool, we simple call 2481 * xsk_tx_completed(), so we don't need to fill up 2482 * 'buf' and 'xdpf'. 2483 */ 2484 tx_q->tx_skbuff_dma[entry].buf = 0; 2485 tx_q->xdpf[entry] = NULL; 2486 2487 tx_q->tx_skbuff_dma[entry].map_as_page = false; 2488 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; 2489 tx_q->tx_skbuff_dma[entry].last_segment = true; 2490 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2491 2492 stmmac_set_desc_addr(priv, tx_desc, dma_addr); 2493 2494 tx_q->tx_count_frames++; 2495 2496 if (!priv->tx_coal_frames[queue]) 2497 set_ic = false; 2498 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 2499 set_ic = true; 2500 else 2501 set_ic = false; 2502 2503 if (set_ic) { 2504 tx_q->tx_count_frames = 0; 2505 stmmac_set_tx_ic(priv, tx_desc); 2506 tx_set_ic_bit++; 2507 } 2508 2509 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, 2510 true, priv->mode, true, true, 2511 xdp_desc.len); 2512 2513 stmmac_enable_dma_transmission(priv, priv->ioaddr); 2514 2515 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 2516 entry = tx_q->cur_tx; 2517 } 2518 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 2519 txq_stats->tx_set_ic_bit += tx_set_ic_bit; 2520 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 2521 2522 if (tx_desc) { 2523 stmmac_flush_tx_descriptors(priv, queue); 2524 xsk_tx_release(pool); 2525 } 2526 2527 /* Return true if all of the 3 conditions are met 2528 * a) TX Budget is still available 2529 * b) work_done = true when XSK TX desc peek is empty (no more 2530 * pending XSK TX for transmission) 2531 */ 2532 return !!budget && work_done; 2533 } 2534 2535 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) 2536 { 2537 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { 2538 tc += 64; 2539 2540 if (priv->plat->force_thresh_dma_mode) 2541 stmmac_set_dma_operation_mode(priv, tc, tc, chan); 2542 else 2543 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE, 2544 chan); 2545 2546 priv->xstats.threshold = tc; 2547 } 2548 } 2549 2550 /** 2551 * stmmac_tx_clean - to manage the transmission completion 2552 * @priv: driver private structure 2553 * @budget: napi budget limiting this functions packet handling 2554 * @queue: TX queue index 2555 * Description: it reclaims the transmit resources after transmission completes. 2556 */ 2557 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 2558 { 2559 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 2560 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; 2561 unsigned int bytes_compl = 0, pkts_compl = 0; 2562 unsigned int entry, xmits = 0, count = 0; 2563 u32 tx_packets = 0, tx_errors = 0; 2564 unsigned long flags; 2565 2566 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 2567 2568 tx_q->xsk_frames_done = 0; 2569 2570 entry = tx_q->dirty_tx; 2571 2572 /* Try to clean all TX complete frame in 1 shot */ 2573 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { 2574 struct xdp_frame *xdpf; 2575 struct sk_buff *skb; 2576 struct dma_desc *p; 2577 int status; 2578 2579 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || 2580 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2581 xdpf = tx_q->xdpf[entry]; 2582 skb = NULL; 2583 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2584 xdpf = NULL; 2585 skb = tx_q->tx_skbuff[entry]; 2586 } else { 2587 xdpf = NULL; 2588 skb = NULL; 2589 } 2590 2591 if (priv->extend_desc) 2592 p = (struct dma_desc *)(tx_q->dma_etx + entry); 2593 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2594 p = &tx_q->dma_entx[entry].basic; 2595 else 2596 p = tx_q->dma_tx + entry; 2597 2598 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr); 2599 /* Check if the descriptor is owned by the DMA */ 2600 if (unlikely(status & tx_dma_own)) 2601 break; 2602 2603 count++; 2604 2605 /* Make sure descriptor fields are read after reading 2606 * the own bit. 2607 */ 2608 dma_rmb(); 2609 2610 /* Just consider the last segment and ...*/ 2611 if (likely(!(status & tx_not_ls))) { 2612 /* ... verify the status error condition */ 2613 if (unlikely(status & tx_err)) { 2614 tx_errors++; 2615 if (unlikely(status & tx_err_bump_tc)) 2616 stmmac_bump_dma_threshold(priv, queue); 2617 } else { 2618 tx_packets++; 2619 } 2620 if (skb) 2621 stmmac_get_tx_hwtstamp(priv, p, skb); 2622 } 2623 2624 if (likely(tx_q->tx_skbuff_dma[entry].buf && 2625 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { 2626 if (tx_q->tx_skbuff_dma[entry].map_as_page) 2627 dma_unmap_page(priv->device, 2628 tx_q->tx_skbuff_dma[entry].buf, 2629 tx_q->tx_skbuff_dma[entry].len, 2630 DMA_TO_DEVICE); 2631 else 2632 dma_unmap_single(priv->device, 2633 tx_q->tx_skbuff_dma[entry].buf, 2634 tx_q->tx_skbuff_dma[entry].len, 2635 DMA_TO_DEVICE); 2636 tx_q->tx_skbuff_dma[entry].buf = 0; 2637 tx_q->tx_skbuff_dma[entry].len = 0; 2638 tx_q->tx_skbuff_dma[entry].map_as_page = false; 2639 } 2640 2641 stmmac_clean_desc3(priv, tx_q, p); 2642 2643 tx_q->tx_skbuff_dma[entry].last_segment = false; 2644 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2645 2646 if (xdpf && 2647 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { 2648 xdp_return_frame_rx_napi(xdpf); 2649 tx_q->xdpf[entry] = NULL; 2650 } 2651 2652 if (xdpf && 2653 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2654 xdp_return_frame(xdpf); 2655 tx_q->xdpf[entry] = NULL; 2656 } 2657 2658 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) 2659 tx_q->xsk_frames_done++; 2660 2661 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2662 if (likely(skb)) { 2663 pkts_compl++; 2664 bytes_compl += skb->len; 2665 dev_consume_skb_any(skb); 2666 tx_q->tx_skbuff[entry] = NULL; 2667 } 2668 } 2669 2670 stmmac_release_tx_desc(priv, p, priv->mode); 2671 2672 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 2673 } 2674 tx_q->dirty_tx = entry; 2675 2676 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 2677 pkts_compl, bytes_compl); 2678 2679 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 2680 queue))) && 2681 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { 2682 2683 netif_dbg(priv, tx_done, priv->dev, 2684 "%s: restart transmit\n", __func__); 2685 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 2686 } 2687 2688 if (tx_q->xsk_pool) { 2689 bool work_done; 2690 2691 if (tx_q->xsk_frames_done) 2692 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 2693 2694 if (xsk_uses_need_wakeup(tx_q->xsk_pool)) 2695 xsk_set_tx_need_wakeup(tx_q->xsk_pool); 2696 2697 /* For XSK TX, we try to send as many as possible. 2698 * If XSK work done (XSK TX desc empty and budget still 2699 * available), return "budget - 1" to reenable TX IRQ. 2700 * Else, return "budget" to make NAPI continue polling. 2701 */ 2702 work_done = stmmac_xdp_xmit_zc(priv, queue, 2703 STMMAC_XSK_TX_BUDGET_MAX); 2704 if (work_done) 2705 xmits = budget - 1; 2706 else 2707 xmits = budget; 2708 } 2709 2710 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && 2711 priv->eee_sw_timer_en) { 2712 if (stmmac_enable_eee_mode(priv)) 2713 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 2714 } 2715 2716 /* We still have pending packets, let's call for a new scheduling */ 2717 if (tx_q->dirty_tx != tx_q->cur_tx) 2718 stmmac_tx_timer_arm(priv, queue); 2719 2720 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 2721 txq_stats->tx_packets += tx_packets; 2722 txq_stats->tx_pkt_n += tx_packets; 2723 txq_stats->tx_clean++; 2724 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 2725 2726 priv->xstats.tx_errors += tx_errors; 2727 2728 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 2729 2730 /* Combine decisions from TX clean and XSK TX */ 2731 return max(count, xmits); 2732 } 2733 2734 /** 2735 * stmmac_tx_err - to manage the tx error 2736 * @priv: driver private structure 2737 * @chan: channel index 2738 * Description: it cleans the descriptors and restarts the transmission 2739 * in case of transmission errors. 2740 */ 2741 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 2742 { 2743 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 2744 2745 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 2746 2747 stmmac_stop_tx_dma(priv, chan); 2748 dma_free_tx_skbufs(priv, &priv->dma_conf, chan); 2749 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); 2750 stmmac_reset_tx_queue(priv, chan); 2751 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2752 tx_q->dma_tx_phy, chan); 2753 stmmac_start_tx_dma(priv, chan); 2754 2755 priv->xstats.tx_errors++; 2756 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 2757 } 2758 2759 /** 2760 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 2761 * @priv: driver private structure 2762 * @txmode: TX operating mode 2763 * @rxmode: RX operating mode 2764 * @chan: channel index 2765 * Description: it is used for configuring of the DMA operation mode in 2766 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 2767 * mode. 2768 */ 2769 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 2770 u32 rxmode, u32 chan) 2771 { 2772 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2773 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2774 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2775 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2776 int rxfifosz = priv->plat->rx_fifo_size; 2777 int txfifosz = priv->plat->tx_fifo_size; 2778 2779 if (rxfifosz == 0) 2780 rxfifosz = priv->dma_cap.rx_fifo_size; 2781 if (txfifosz == 0) 2782 txfifosz = priv->dma_cap.tx_fifo_size; 2783 2784 /* Adjust for real per queue fifo size */ 2785 rxfifosz /= rx_channels_count; 2786 txfifosz /= tx_channels_count; 2787 2788 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 2789 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 2790 } 2791 2792 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 2793 { 2794 int ret; 2795 2796 ret = stmmac_safety_feat_irq_status(priv, priv->dev, 2797 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2798 if (ret && (ret != -EINVAL)) { 2799 stmmac_global_err(priv); 2800 return true; 2801 } 2802 2803 return false; 2804 } 2805 2806 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) 2807 { 2808 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 2809 &priv->xstats, chan, dir); 2810 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; 2811 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 2812 struct stmmac_channel *ch = &priv->channel[chan]; 2813 struct napi_struct *rx_napi; 2814 struct napi_struct *tx_napi; 2815 unsigned long flags; 2816 2817 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; 2818 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 2819 2820 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 2821 if (napi_schedule_prep(rx_napi)) { 2822 spin_lock_irqsave(&ch->lock, flags); 2823 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 2824 spin_unlock_irqrestore(&ch->lock, flags); 2825 __napi_schedule(rx_napi); 2826 } 2827 } 2828 2829 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2830 if (napi_schedule_prep(tx_napi)) { 2831 spin_lock_irqsave(&ch->lock, flags); 2832 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 2833 spin_unlock_irqrestore(&ch->lock, flags); 2834 __napi_schedule(tx_napi); 2835 } 2836 } 2837 2838 return status; 2839 } 2840 2841 /** 2842 * stmmac_dma_interrupt - DMA ISR 2843 * @priv: driver private structure 2844 * Description: this is the DMA ISR. It is called by the main ISR. 2845 * It calls the dwmac dma routine and schedule poll method in case of some 2846 * work can be done. 2847 */ 2848 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 2849 { 2850 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2851 u32 rx_channel_count = priv->plat->rx_queues_to_use; 2852 u32 channels_to_check = tx_channel_count > rx_channel_count ? 2853 tx_channel_count : rx_channel_count; 2854 u32 chan; 2855 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 2856 2857 /* Make sure we never check beyond our status buffer. */ 2858 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 2859 channels_to_check = ARRAY_SIZE(status); 2860 2861 for (chan = 0; chan < channels_to_check; chan++) 2862 status[chan] = stmmac_napi_check(priv, chan, 2863 DMA_DIR_RXTX); 2864 2865 for (chan = 0; chan < tx_channel_count; chan++) { 2866 if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 2867 /* Try to bump up the dma threshold on this failure */ 2868 stmmac_bump_dma_threshold(priv, chan); 2869 } else if (unlikely(status[chan] == tx_hard_error)) { 2870 stmmac_tx_err(priv, chan); 2871 } 2872 } 2873 } 2874 2875 /** 2876 * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 2877 * @priv: driver private structure 2878 * Description: this masks the MMC irq, in fact, the counters are managed in SW. 2879 */ 2880 static void stmmac_mmc_setup(struct stmmac_priv *priv) 2881 { 2882 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 2883 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 2884 2885 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); 2886 2887 if (priv->dma_cap.rmon) { 2888 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); 2889 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 2890 } else 2891 netdev_info(priv->dev, "No MAC Management Counters available\n"); 2892 } 2893 2894 /** 2895 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 2896 * @priv: driver private structure 2897 * Description: 2898 * new GMAC chip generations have a new register to indicate the 2899 * presence of the optional feature/functions. 2900 * This can be also used to override the value passed through the 2901 * platform and necessary for old MAC10/100 and GMAC chips. 2902 */ 2903 static int stmmac_get_hw_features(struct stmmac_priv *priv) 2904 { 2905 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2906 } 2907 2908 /** 2909 * stmmac_check_ether_addr - check if the MAC addr is valid 2910 * @priv: driver private structure 2911 * Description: 2912 * it is to verify if the MAC address is valid, in case of failures it 2913 * generates a random MAC address 2914 */ 2915 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2916 { 2917 u8 addr[ETH_ALEN]; 2918 2919 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 2920 stmmac_get_umac_addr(priv, priv->hw, addr, 0); 2921 if (is_valid_ether_addr(addr)) 2922 eth_hw_addr_set(priv->dev, addr); 2923 else 2924 eth_hw_addr_random(priv->dev); 2925 dev_info(priv->device, "device MAC address %pM\n", 2926 priv->dev->dev_addr); 2927 } 2928 } 2929 2930 /** 2931 * stmmac_init_dma_engine - DMA init. 2932 * @priv: driver private structure 2933 * Description: 2934 * It inits the DMA invoking the specific MAC/GMAC callback. 2935 * Some DMA parameters can be passed from the platform; 2936 * in case of these are not passed a default is kept for the MAC or GMAC. 2937 */ 2938 static int stmmac_init_dma_engine(struct stmmac_priv *priv) 2939 { 2940 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2941 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2942 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2943 struct stmmac_rx_queue *rx_q; 2944 struct stmmac_tx_queue *tx_q; 2945 u32 chan = 0; 2946 int atds = 0; 2947 int ret = 0; 2948 2949 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2950 dev_err(priv->device, "Invalid DMA configuration\n"); 2951 return -EINVAL; 2952 } 2953 2954 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2955 atds = 1; 2956 2957 ret = stmmac_reset(priv, priv->ioaddr); 2958 if (ret) { 2959 dev_err(priv->device, "Failed to reset the dma\n"); 2960 return ret; 2961 } 2962 2963 /* DMA Configuration */ 2964 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 2965 2966 if (priv->plat->axi) 2967 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 2968 2969 /* DMA CSR Channel configuration */ 2970 for (chan = 0; chan < dma_csr_ch; chan++) { 2971 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2972 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 2973 } 2974 2975 /* DMA RX Channel Configuration */ 2976 for (chan = 0; chan < rx_channels_count; chan++) { 2977 rx_q = &priv->dma_conf.rx_queue[chan]; 2978 2979 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2980 rx_q->dma_rx_phy, chan); 2981 2982 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2983 (rx_q->buf_alloc_num * 2984 sizeof(struct dma_desc)); 2985 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2986 rx_q->rx_tail_addr, chan); 2987 } 2988 2989 /* DMA TX Channel Configuration */ 2990 for (chan = 0; chan < tx_channels_count; chan++) { 2991 tx_q = &priv->dma_conf.tx_queue[chan]; 2992 2993 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2994 tx_q->dma_tx_phy, chan); 2995 2996 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2997 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2998 tx_q->tx_tail_addr, chan); 2999 } 3000 3001 return ret; 3002 } 3003 3004 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 3005 { 3006 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 3007 u32 tx_coal_timer = priv->tx_coal_timer[queue]; 3008 3009 if (!tx_coal_timer) 3010 return; 3011 3012 hrtimer_start(&tx_q->txtimer, 3013 STMMAC_COAL_TIMER(tx_coal_timer), 3014 HRTIMER_MODE_REL); 3015 } 3016 3017 /** 3018 * stmmac_tx_timer - mitigation sw timer for tx. 3019 * @t: data pointer 3020 * Description: 3021 * This is the timer handler to directly invoke the stmmac_tx_clean. 3022 */ 3023 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) 3024 { 3025 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); 3026 struct stmmac_priv *priv = tx_q->priv_data; 3027 struct stmmac_channel *ch; 3028 struct napi_struct *napi; 3029 3030 ch = &priv->channel[tx_q->queue_index]; 3031 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 3032 3033 if (likely(napi_schedule_prep(napi))) { 3034 unsigned long flags; 3035 3036 spin_lock_irqsave(&ch->lock, flags); 3037 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); 3038 spin_unlock_irqrestore(&ch->lock, flags); 3039 __napi_schedule(napi); 3040 } 3041 3042 return HRTIMER_NORESTART; 3043 } 3044 3045 /** 3046 * stmmac_init_coalesce - init mitigation options. 3047 * @priv: driver private structure 3048 * Description: 3049 * This inits the coalesce parameters: i.e. timer rate, 3050 * timer handler and default threshold used for enabling the 3051 * interrupt on completion bit. 3052 */ 3053 static void stmmac_init_coalesce(struct stmmac_priv *priv) 3054 { 3055 u32 tx_channel_count = priv->plat->tx_queues_to_use; 3056 u32 rx_channel_count = priv->plat->rx_queues_to_use; 3057 u32 chan; 3058 3059 for (chan = 0; chan < tx_channel_count; chan++) { 3060 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3061 3062 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; 3063 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; 3064 3065 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3066 tx_q->txtimer.function = stmmac_tx_timer; 3067 } 3068 3069 for (chan = 0; chan < rx_channel_count; chan++) 3070 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; 3071 } 3072 3073 static void stmmac_set_rings_length(struct stmmac_priv *priv) 3074 { 3075 u32 rx_channels_count = priv->plat->rx_queues_to_use; 3076 u32 tx_channels_count = priv->plat->tx_queues_to_use; 3077 u32 chan; 3078 3079 /* set TX ring length */ 3080 for (chan = 0; chan < tx_channels_count; chan++) 3081 stmmac_set_tx_ring_len(priv, priv->ioaddr, 3082 (priv->dma_conf.dma_tx_size - 1), chan); 3083 3084 /* set RX ring length */ 3085 for (chan = 0; chan < rx_channels_count; chan++) 3086 stmmac_set_rx_ring_len(priv, priv->ioaddr, 3087 (priv->dma_conf.dma_rx_size - 1), chan); 3088 } 3089 3090 /** 3091 * stmmac_set_tx_queue_weight - Set TX queue weight 3092 * @priv: driver private structure 3093 * Description: It is used for setting TX queues weight 3094 */ 3095 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 3096 { 3097 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3098 u32 weight; 3099 u32 queue; 3100 3101 for (queue = 0; queue < tx_queues_count; queue++) { 3102 weight = priv->plat->tx_queues_cfg[queue].weight; 3103 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 3104 } 3105 } 3106 3107 /** 3108 * stmmac_configure_cbs - Configure CBS in TX queue 3109 * @priv: driver private structure 3110 * Description: It is used for configuring CBS in AVB TX queues 3111 */ 3112 static void stmmac_configure_cbs(struct stmmac_priv *priv) 3113 { 3114 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3115 u32 mode_to_use; 3116 u32 queue; 3117 3118 /* queue 0 is reserved for legacy traffic */ 3119 for (queue = 1; queue < tx_queues_count; queue++) { 3120 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 3121 if (mode_to_use == MTL_QUEUE_DCB) 3122 continue; 3123 3124 stmmac_config_cbs(priv, priv->hw, 3125 priv->plat->tx_queues_cfg[queue].send_slope, 3126 priv->plat->tx_queues_cfg[queue].idle_slope, 3127 priv->plat->tx_queues_cfg[queue].high_credit, 3128 priv->plat->tx_queues_cfg[queue].low_credit, 3129 queue); 3130 } 3131 } 3132 3133 /** 3134 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 3135 * @priv: driver private structure 3136 * Description: It is used for mapping RX queues to RX dma channels 3137 */ 3138 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 3139 { 3140 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3141 u32 queue; 3142 u32 chan; 3143 3144 for (queue = 0; queue < rx_queues_count; queue++) { 3145 chan = priv->plat->rx_queues_cfg[queue].chan; 3146 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 3147 } 3148 } 3149 3150 /** 3151 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 3152 * @priv: driver private structure 3153 * Description: It is used for configuring the RX Queue Priority 3154 */ 3155 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 3156 { 3157 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3158 u32 queue; 3159 u32 prio; 3160 3161 for (queue = 0; queue < rx_queues_count; queue++) { 3162 if (!priv->plat->rx_queues_cfg[queue].use_prio) 3163 continue; 3164 3165 prio = priv->plat->rx_queues_cfg[queue].prio; 3166 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 3167 } 3168 } 3169 3170 /** 3171 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 3172 * @priv: driver private structure 3173 * Description: It is used for configuring the TX Queue Priority 3174 */ 3175 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 3176 { 3177 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3178 u32 queue; 3179 u32 prio; 3180 3181 for (queue = 0; queue < tx_queues_count; queue++) { 3182 if (!priv->plat->tx_queues_cfg[queue].use_prio) 3183 continue; 3184 3185 prio = priv->plat->tx_queues_cfg[queue].prio; 3186 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 3187 } 3188 } 3189 3190 /** 3191 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 3192 * @priv: driver private structure 3193 * Description: It is used for configuring the RX queue routing 3194 */ 3195 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 3196 { 3197 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3198 u32 queue; 3199 u8 packet; 3200 3201 for (queue = 0; queue < rx_queues_count; queue++) { 3202 /* no specific packet type routing specified for the queue */ 3203 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 3204 continue; 3205 3206 packet = priv->plat->rx_queues_cfg[queue].pkt_route; 3207 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 3208 } 3209 } 3210 3211 static void stmmac_mac_config_rss(struct stmmac_priv *priv) 3212 { 3213 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { 3214 priv->rss.enable = false; 3215 return; 3216 } 3217 3218 if (priv->dev->features & NETIF_F_RXHASH) 3219 priv->rss.enable = true; 3220 else 3221 priv->rss.enable = false; 3222 3223 stmmac_rss_configure(priv, priv->hw, &priv->rss, 3224 priv->plat->rx_queues_to_use); 3225 } 3226 3227 /** 3228 * stmmac_mtl_configuration - Configure MTL 3229 * @priv: driver private structure 3230 * Description: It is used for configurring MTL 3231 */ 3232 static void stmmac_mtl_configuration(struct stmmac_priv *priv) 3233 { 3234 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3235 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3236 3237 if (tx_queues_count > 1) 3238 stmmac_set_tx_queue_weight(priv); 3239 3240 /* Configure MTL RX algorithms */ 3241 if (rx_queues_count > 1) 3242 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 3243 priv->plat->rx_sched_algorithm); 3244 3245 /* Configure MTL TX algorithms */ 3246 if (tx_queues_count > 1) 3247 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 3248 priv->plat->tx_sched_algorithm); 3249 3250 /* Configure CBS in AVB TX queues */ 3251 if (tx_queues_count > 1) 3252 stmmac_configure_cbs(priv); 3253 3254 /* Map RX MTL to DMA channels */ 3255 stmmac_rx_queue_dma_chan_map(priv); 3256 3257 /* Enable MAC RX Queues */ 3258 stmmac_mac_enable_rx_queues(priv); 3259 3260 /* Set RX priorities */ 3261 if (rx_queues_count > 1) 3262 stmmac_mac_config_rx_queues_prio(priv); 3263 3264 /* Set TX priorities */ 3265 if (tx_queues_count > 1) 3266 stmmac_mac_config_tx_queues_prio(priv); 3267 3268 /* Set RX routing */ 3269 if (rx_queues_count > 1) 3270 stmmac_mac_config_rx_queues_routing(priv); 3271 3272 /* Receive Side Scaling */ 3273 if (rx_queues_count > 1) 3274 stmmac_mac_config_rss(priv); 3275 } 3276 3277 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 3278 { 3279 if (priv->dma_cap.asp) { 3280 netdev_info(priv->dev, "Enabling Safety Features\n"); 3281 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, 3282 priv->plat->safety_feat_cfg); 3283 } else { 3284 netdev_info(priv->dev, "No Safety Features support found\n"); 3285 } 3286 } 3287 3288 static int stmmac_fpe_start_wq(struct stmmac_priv *priv) 3289 { 3290 char *name; 3291 3292 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 3293 clear_bit(__FPE_REMOVING, &priv->fpe_task_state); 3294 3295 name = priv->wq_name; 3296 sprintf(name, "%s-fpe", priv->dev->name); 3297 3298 priv->fpe_wq = create_singlethread_workqueue(name); 3299 if (!priv->fpe_wq) { 3300 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); 3301 3302 return -ENOMEM; 3303 } 3304 netdev_info(priv->dev, "FPE workqueue start"); 3305 3306 return 0; 3307 } 3308 3309 /** 3310 * stmmac_hw_setup - setup mac in a usable state. 3311 * @dev : pointer to the device structure. 3312 * @ptp_register: register PTP if set 3313 * Description: 3314 * this is the main function to setup the HW in a usable state because the 3315 * dma engine is reset, the core registers are configured (e.g. AXI, 3316 * Checksum features, timers). The DMA is ready to start receiving and 3317 * transmitting. 3318 * Return value: 3319 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3320 * file on failure. 3321 */ 3322 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) 3323 { 3324 struct stmmac_priv *priv = netdev_priv(dev); 3325 u32 rx_cnt = priv->plat->rx_queues_to_use; 3326 u32 tx_cnt = priv->plat->tx_queues_to_use; 3327 bool sph_en; 3328 u32 chan; 3329 int ret; 3330 3331 /* DMA initialization and SW reset */ 3332 ret = stmmac_init_dma_engine(priv); 3333 if (ret < 0) { 3334 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 3335 __func__); 3336 return ret; 3337 } 3338 3339 /* Copy the MAC addr into the HW */ 3340 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 3341 3342 /* PS and related bits will be programmed according to the speed */ 3343 if (priv->hw->pcs) { 3344 int speed = priv->plat->mac_port_sel_speed; 3345 3346 if ((speed == SPEED_10) || (speed == SPEED_100) || 3347 (speed == SPEED_1000)) { 3348 priv->hw->ps = speed; 3349 } else { 3350 dev_warn(priv->device, "invalid port speed\n"); 3351 priv->hw->ps = 0; 3352 } 3353 } 3354 3355 /* Initialize the MAC Core */ 3356 stmmac_core_init(priv, priv->hw, dev); 3357 3358 /* Initialize MTL*/ 3359 stmmac_mtl_configuration(priv); 3360 3361 /* Initialize Safety Features */ 3362 stmmac_safety_feat_configuration(priv); 3363 3364 ret = stmmac_rx_ipc(priv, priv->hw); 3365 if (!ret) { 3366 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 3367 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 3368 priv->hw->rx_csum = 0; 3369 } 3370 3371 /* Enable the MAC Rx/Tx */ 3372 stmmac_mac_set(priv, priv->ioaddr, true); 3373 3374 /* Set the HW DMA mode and the COE */ 3375 stmmac_dma_operation_mode(priv); 3376 3377 stmmac_mmc_setup(priv); 3378 3379 if (ptp_register) { 3380 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 3381 if (ret < 0) 3382 netdev_warn(priv->dev, 3383 "failed to enable PTP reference clock: %pe\n", 3384 ERR_PTR(ret)); 3385 } 3386 3387 ret = stmmac_init_ptp(priv); 3388 if (ret == -EOPNOTSUPP) 3389 netdev_info(priv->dev, "PTP not supported by HW\n"); 3390 else if (ret) 3391 netdev_warn(priv->dev, "PTP init failed\n"); 3392 else if (ptp_register) 3393 stmmac_ptp_register(priv); 3394 3395 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; 3396 3397 /* Convert the timer from msec to usec */ 3398 if (!priv->tx_lpi_timer) 3399 priv->tx_lpi_timer = eee_timer * 1000; 3400 3401 if (priv->use_riwt) { 3402 u32 queue; 3403 3404 for (queue = 0; queue < rx_cnt; queue++) { 3405 if (!priv->rx_riwt[queue]) 3406 priv->rx_riwt[queue] = DEF_DMA_RIWT; 3407 3408 stmmac_rx_watchdog(priv, priv->ioaddr, 3409 priv->rx_riwt[queue], queue); 3410 } 3411 } 3412 3413 if (priv->hw->pcs) 3414 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); 3415 3416 /* set TX and RX rings length */ 3417 stmmac_set_rings_length(priv); 3418 3419 /* Enable TSO */ 3420 if (priv->tso) { 3421 for (chan = 0; chan < tx_cnt; chan++) { 3422 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3423 3424 /* TSO and TBS cannot co-exist */ 3425 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3426 continue; 3427 3428 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 3429 } 3430 } 3431 3432 /* Enable Split Header */ 3433 sph_en = (priv->hw->rx_csum > 0) && priv->sph; 3434 for (chan = 0; chan < rx_cnt; chan++) 3435 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 3436 3437 3438 /* VLAN Tag Insertion */ 3439 if (priv->dma_cap.vlins) 3440 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); 3441 3442 /* TBS */ 3443 for (chan = 0; chan < tx_cnt; chan++) { 3444 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3445 int enable = tx_q->tbs & STMMAC_TBS_AVAIL; 3446 3447 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); 3448 } 3449 3450 /* Configure real RX and TX queues */ 3451 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); 3452 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); 3453 3454 /* Start the ball rolling... */ 3455 stmmac_start_all_dma(priv); 3456 3457 if (priv->dma_cap.fpesel) { 3458 stmmac_fpe_start_wq(priv); 3459 3460 if (priv->plat->fpe_cfg->enable) 3461 stmmac_fpe_handshake(priv, true); 3462 } 3463 3464 return 0; 3465 } 3466 3467 static void stmmac_hw_teardown(struct net_device *dev) 3468 { 3469 struct stmmac_priv *priv = netdev_priv(dev); 3470 3471 clk_disable_unprepare(priv->plat->clk_ptp_ref); 3472 } 3473 3474 static void stmmac_free_irq(struct net_device *dev, 3475 enum request_irq_err irq_err, int irq_idx) 3476 { 3477 struct stmmac_priv *priv = netdev_priv(dev); 3478 int j; 3479 3480 switch (irq_err) { 3481 case REQ_IRQ_ERR_ALL: 3482 irq_idx = priv->plat->tx_queues_to_use; 3483 fallthrough; 3484 case REQ_IRQ_ERR_TX: 3485 for (j = irq_idx - 1; j >= 0; j--) { 3486 if (priv->tx_irq[j] > 0) { 3487 irq_set_affinity_hint(priv->tx_irq[j], NULL); 3488 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); 3489 } 3490 } 3491 irq_idx = priv->plat->rx_queues_to_use; 3492 fallthrough; 3493 case REQ_IRQ_ERR_RX: 3494 for (j = irq_idx - 1; j >= 0; j--) { 3495 if (priv->rx_irq[j] > 0) { 3496 irq_set_affinity_hint(priv->rx_irq[j], NULL); 3497 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); 3498 } 3499 } 3500 3501 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) 3502 free_irq(priv->sfty_ue_irq, dev); 3503 fallthrough; 3504 case REQ_IRQ_ERR_SFTY_UE: 3505 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) 3506 free_irq(priv->sfty_ce_irq, dev); 3507 fallthrough; 3508 case REQ_IRQ_ERR_SFTY_CE: 3509 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) 3510 free_irq(priv->lpi_irq, dev); 3511 fallthrough; 3512 case REQ_IRQ_ERR_LPI: 3513 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) 3514 free_irq(priv->wol_irq, dev); 3515 fallthrough; 3516 case REQ_IRQ_ERR_WOL: 3517 free_irq(dev->irq, dev); 3518 fallthrough; 3519 case REQ_IRQ_ERR_MAC: 3520 case REQ_IRQ_ERR_NO: 3521 /* If MAC IRQ request error, no more IRQ to free */ 3522 break; 3523 } 3524 } 3525 3526 static int stmmac_request_irq_multi_msi(struct net_device *dev) 3527 { 3528 struct stmmac_priv *priv = netdev_priv(dev); 3529 enum request_irq_err irq_err; 3530 cpumask_t cpu_mask; 3531 int irq_idx = 0; 3532 char *int_name; 3533 int ret; 3534 int i; 3535 3536 /* For common interrupt */ 3537 int_name = priv->int_name_mac; 3538 sprintf(int_name, "%s:%s", dev->name, "mac"); 3539 ret = request_irq(dev->irq, stmmac_mac_interrupt, 3540 0, int_name, dev); 3541 if (unlikely(ret < 0)) { 3542 netdev_err(priv->dev, 3543 "%s: alloc mac MSI %d (error: %d)\n", 3544 __func__, dev->irq, ret); 3545 irq_err = REQ_IRQ_ERR_MAC; 3546 goto irq_error; 3547 } 3548 3549 /* Request the Wake IRQ in case of another line 3550 * is used for WoL 3551 */ 3552 priv->wol_irq_disabled = true; 3553 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 3554 int_name = priv->int_name_wol; 3555 sprintf(int_name, "%s:%s", dev->name, "wol"); 3556 ret = request_irq(priv->wol_irq, 3557 stmmac_mac_interrupt, 3558 0, int_name, dev); 3559 if (unlikely(ret < 0)) { 3560 netdev_err(priv->dev, 3561 "%s: alloc wol MSI %d (error: %d)\n", 3562 __func__, priv->wol_irq, ret); 3563 irq_err = REQ_IRQ_ERR_WOL; 3564 goto irq_error; 3565 } 3566 } 3567 3568 /* Request the LPI IRQ in case of another line 3569 * is used for LPI 3570 */ 3571 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 3572 int_name = priv->int_name_lpi; 3573 sprintf(int_name, "%s:%s", dev->name, "lpi"); 3574 ret = request_irq(priv->lpi_irq, 3575 stmmac_mac_interrupt, 3576 0, int_name, dev); 3577 if (unlikely(ret < 0)) { 3578 netdev_err(priv->dev, 3579 "%s: alloc lpi MSI %d (error: %d)\n", 3580 __func__, priv->lpi_irq, ret); 3581 irq_err = REQ_IRQ_ERR_LPI; 3582 goto irq_error; 3583 } 3584 } 3585 3586 /* Request the Safety Feature Correctible Error line in 3587 * case of another line is used 3588 */ 3589 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { 3590 int_name = priv->int_name_sfty_ce; 3591 sprintf(int_name, "%s:%s", dev->name, "safety-ce"); 3592 ret = request_irq(priv->sfty_ce_irq, 3593 stmmac_safety_interrupt, 3594 0, int_name, dev); 3595 if (unlikely(ret < 0)) { 3596 netdev_err(priv->dev, 3597 "%s: alloc sfty ce MSI %d (error: %d)\n", 3598 __func__, priv->sfty_ce_irq, ret); 3599 irq_err = REQ_IRQ_ERR_SFTY_CE; 3600 goto irq_error; 3601 } 3602 } 3603 3604 /* Request the Safety Feature Uncorrectible Error line in 3605 * case of another line is used 3606 */ 3607 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { 3608 int_name = priv->int_name_sfty_ue; 3609 sprintf(int_name, "%s:%s", dev->name, "safety-ue"); 3610 ret = request_irq(priv->sfty_ue_irq, 3611 stmmac_safety_interrupt, 3612 0, int_name, dev); 3613 if (unlikely(ret < 0)) { 3614 netdev_err(priv->dev, 3615 "%s: alloc sfty ue MSI %d (error: %d)\n", 3616 __func__, priv->sfty_ue_irq, ret); 3617 irq_err = REQ_IRQ_ERR_SFTY_UE; 3618 goto irq_error; 3619 } 3620 } 3621 3622 /* Request Rx MSI irq */ 3623 for (i = 0; i < priv->plat->rx_queues_to_use; i++) { 3624 if (i >= MTL_MAX_RX_QUEUES) 3625 break; 3626 if (priv->rx_irq[i] == 0) 3627 continue; 3628 3629 int_name = priv->int_name_rx_irq[i]; 3630 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); 3631 ret = request_irq(priv->rx_irq[i], 3632 stmmac_msi_intr_rx, 3633 0, int_name, &priv->dma_conf.rx_queue[i]); 3634 if (unlikely(ret < 0)) { 3635 netdev_err(priv->dev, 3636 "%s: alloc rx-%d MSI %d (error: %d)\n", 3637 __func__, i, priv->rx_irq[i], ret); 3638 irq_err = REQ_IRQ_ERR_RX; 3639 irq_idx = i; 3640 goto irq_error; 3641 } 3642 cpumask_clear(&cpu_mask); 3643 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 3644 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); 3645 } 3646 3647 /* Request Tx MSI irq */ 3648 for (i = 0; i < priv->plat->tx_queues_to_use; i++) { 3649 if (i >= MTL_MAX_TX_QUEUES) 3650 break; 3651 if (priv->tx_irq[i] == 0) 3652 continue; 3653 3654 int_name = priv->int_name_tx_irq[i]; 3655 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); 3656 ret = request_irq(priv->tx_irq[i], 3657 stmmac_msi_intr_tx, 3658 0, int_name, &priv->dma_conf.tx_queue[i]); 3659 if (unlikely(ret < 0)) { 3660 netdev_err(priv->dev, 3661 "%s: alloc tx-%d MSI %d (error: %d)\n", 3662 __func__, i, priv->tx_irq[i], ret); 3663 irq_err = REQ_IRQ_ERR_TX; 3664 irq_idx = i; 3665 goto irq_error; 3666 } 3667 cpumask_clear(&cpu_mask); 3668 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 3669 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); 3670 } 3671 3672 return 0; 3673 3674 irq_error: 3675 stmmac_free_irq(dev, irq_err, irq_idx); 3676 return ret; 3677 } 3678 3679 static int stmmac_request_irq_single(struct net_device *dev) 3680 { 3681 struct stmmac_priv *priv = netdev_priv(dev); 3682 enum request_irq_err irq_err; 3683 int ret; 3684 3685 ret = request_irq(dev->irq, stmmac_interrupt, 3686 IRQF_SHARED, dev->name, dev); 3687 if (unlikely(ret < 0)) { 3688 netdev_err(priv->dev, 3689 "%s: ERROR: allocating the IRQ %d (error: %d)\n", 3690 __func__, dev->irq, ret); 3691 irq_err = REQ_IRQ_ERR_MAC; 3692 goto irq_error; 3693 } 3694 3695 /* Request the Wake IRQ in case of another line 3696 * is used for WoL 3697 */ 3698 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 3699 ret = request_irq(priv->wol_irq, stmmac_interrupt, 3700 IRQF_SHARED, dev->name, dev); 3701 if (unlikely(ret < 0)) { 3702 netdev_err(priv->dev, 3703 "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 3704 __func__, priv->wol_irq, ret); 3705 irq_err = REQ_IRQ_ERR_WOL; 3706 goto irq_error; 3707 } 3708 } 3709 3710 /* Request the IRQ lines */ 3711 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 3712 ret = request_irq(priv->lpi_irq, stmmac_interrupt, 3713 IRQF_SHARED, dev->name, dev); 3714 if (unlikely(ret < 0)) { 3715 netdev_err(priv->dev, 3716 "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 3717 __func__, priv->lpi_irq, ret); 3718 irq_err = REQ_IRQ_ERR_LPI; 3719 goto irq_error; 3720 } 3721 } 3722 3723 return 0; 3724 3725 irq_error: 3726 stmmac_free_irq(dev, irq_err, 0); 3727 return ret; 3728 } 3729 3730 static int stmmac_request_irq(struct net_device *dev) 3731 { 3732 struct stmmac_priv *priv = netdev_priv(dev); 3733 int ret; 3734 3735 /* Request the IRQ lines */ 3736 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) 3737 ret = stmmac_request_irq_multi_msi(dev); 3738 else 3739 ret = stmmac_request_irq_single(dev); 3740 3741 return ret; 3742 } 3743 3744 /** 3745 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue 3746 * @priv: driver private structure 3747 * @mtu: MTU to setup the dma queue and buf with 3748 * Description: Allocate and generate a dma_conf based on the provided MTU. 3749 * Allocate the Tx/Rx DMA queue and init them. 3750 * Return value: 3751 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure. 3752 */ 3753 static struct stmmac_dma_conf * 3754 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) 3755 { 3756 struct stmmac_dma_conf *dma_conf; 3757 int chan, bfsize, ret; 3758 3759 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL); 3760 if (!dma_conf) { 3761 netdev_err(priv->dev, "%s: DMA conf allocation failed\n", 3762 __func__); 3763 return ERR_PTR(-ENOMEM); 3764 } 3765 3766 bfsize = stmmac_set_16kib_bfsize(priv, mtu); 3767 if (bfsize < 0) 3768 bfsize = 0; 3769 3770 if (bfsize < BUF_SIZE_16KiB) 3771 bfsize = stmmac_set_bfsize(mtu, 0); 3772 3773 dma_conf->dma_buf_sz = bfsize; 3774 /* Chose the tx/rx size from the already defined one in the 3775 * priv struct. (if defined) 3776 */ 3777 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; 3778 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; 3779 3780 if (!dma_conf->dma_tx_size) 3781 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; 3782 if (!dma_conf->dma_rx_size) 3783 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; 3784 3785 /* Earlier check for TBS */ 3786 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { 3787 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; 3788 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; 3789 3790 /* Setup per-TXQ tbs flag before TX descriptor alloc */ 3791 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; 3792 } 3793 3794 ret = alloc_dma_desc_resources(priv, dma_conf); 3795 if (ret < 0) { 3796 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 3797 __func__); 3798 goto alloc_error; 3799 } 3800 3801 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); 3802 if (ret < 0) { 3803 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 3804 __func__); 3805 goto init_error; 3806 } 3807 3808 return dma_conf; 3809 3810 init_error: 3811 free_dma_desc_resources(priv, dma_conf); 3812 alloc_error: 3813 kfree(dma_conf); 3814 return ERR_PTR(ret); 3815 } 3816 3817 /** 3818 * __stmmac_open - open entry point of the driver 3819 * @dev : pointer to the device structure. 3820 * @dma_conf : structure to take the dma data 3821 * Description: 3822 * This function is the open entry point of the driver. 3823 * Return value: 3824 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3825 * file on failure. 3826 */ 3827 static int __stmmac_open(struct net_device *dev, 3828 struct stmmac_dma_conf *dma_conf) 3829 { 3830 struct stmmac_priv *priv = netdev_priv(dev); 3831 int mode = priv->plat->phy_interface; 3832 u32 chan; 3833 int ret; 3834 3835 ret = pm_runtime_resume_and_get(priv->device); 3836 if (ret < 0) 3837 return ret; 3838 3839 if (priv->hw->pcs != STMMAC_PCS_TBI && 3840 priv->hw->pcs != STMMAC_PCS_RTBI && 3841 (!priv->hw->xpcs || 3842 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) && 3843 !priv->hw->lynx_pcs) { 3844 ret = stmmac_init_phy(dev); 3845 if (ret) { 3846 netdev_err(priv->dev, 3847 "%s: Cannot attach to PHY (error: %d)\n", 3848 __func__, ret); 3849 goto init_phy_error; 3850 } 3851 } 3852 3853 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 3854 3855 buf_sz = dma_conf->dma_buf_sz; 3856 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); 3857 3858 stmmac_reset_queues_param(priv); 3859 3860 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && 3861 priv->plat->serdes_powerup) { 3862 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); 3863 if (ret < 0) { 3864 netdev_err(priv->dev, "%s: Serdes powerup failed\n", 3865 __func__); 3866 goto init_error; 3867 } 3868 } 3869 3870 ret = stmmac_hw_setup(dev, true); 3871 if (ret < 0) { 3872 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 3873 goto init_error; 3874 } 3875 3876 stmmac_init_coalesce(priv); 3877 3878 phylink_start(priv->phylink); 3879 /* We may have called phylink_speed_down before */ 3880 phylink_speed_up(priv->phylink); 3881 3882 ret = stmmac_request_irq(dev); 3883 if (ret) 3884 goto irq_error; 3885 3886 stmmac_enable_all_queues(priv); 3887 netif_tx_start_all_queues(priv->dev); 3888 stmmac_enable_all_dma_irq(priv); 3889 3890 return 0; 3891 3892 irq_error: 3893 phylink_stop(priv->phylink); 3894 3895 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3896 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 3897 3898 stmmac_hw_teardown(dev); 3899 init_error: 3900 phylink_disconnect_phy(priv->phylink); 3901 init_phy_error: 3902 pm_runtime_put(priv->device); 3903 return ret; 3904 } 3905 3906 static int stmmac_open(struct net_device *dev) 3907 { 3908 struct stmmac_priv *priv = netdev_priv(dev); 3909 struct stmmac_dma_conf *dma_conf; 3910 int ret; 3911 3912 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); 3913 if (IS_ERR(dma_conf)) 3914 return PTR_ERR(dma_conf); 3915 3916 ret = __stmmac_open(dev, dma_conf); 3917 if (ret) 3918 free_dma_desc_resources(priv, dma_conf); 3919 3920 kfree(dma_conf); 3921 return ret; 3922 } 3923 3924 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) 3925 { 3926 set_bit(__FPE_REMOVING, &priv->fpe_task_state); 3927 3928 if (priv->fpe_wq) 3929 destroy_workqueue(priv->fpe_wq); 3930 3931 netdev_info(priv->dev, "FPE workqueue stop"); 3932 } 3933 3934 /** 3935 * stmmac_release - close entry point of the driver 3936 * @dev : device pointer. 3937 * Description: 3938 * This is the stop entry point of the driver. 3939 */ 3940 static int stmmac_release(struct net_device *dev) 3941 { 3942 struct stmmac_priv *priv = netdev_priv(dev); 3943 u32 chan; 3944 3945 if (device_may_wakeup(priv->device)) 3946 phylink_speed_down(priv->phylink, false); 3947 /* Stop and disconnect the PHY */ 3948 phylink_stop(priv->phylink); 3949 phylink_disconnect_phy(priv->phylink); 3950 3951 stmmac_disable_all_queues(priv); 3952 3953 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3954 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 3955 3956 netif_tx_disable(dev); 3957 3958 /* Free the IRQ lines */ 3959 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 3960 3961 if (priv->eee_enabled) { 3962 priv->tx_path_in_lpi_mode = false; 3963 del_timer_sync(&priv->eee_ctrl_timer); 3964 } 3965 3966 /* Stop TX/RX DMA and clear the descriptors */ 3967 stmmac_stop_all_dma(priv); 3968 3969 /* Release and free the Rx/Tx resources */ 3970 free_dma_desc_resources(priv, &priv->dma_conf); 3971 3972 /* Disable the MAC Rx/Tx */ 3973 stmmac_mac_set(priv, priv->ioaddr, false); 3974 3975 /* Powerdown Serdes if there is */ 3976 if (priv->plat->serdes_powerdown) 3977 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); 3978 3979 netif_carrier_off(dev); 3980 3981 stmmac_release_ptp(priv); 3982 3983 pm_runtime_put(priv->device); 3984 3985 if (priv->dma_cap.fpesel) 3986 stmmac_fpe_stop_wq(priv); 3987 3988 return 0; 3989 } 3990 3991 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, 3992 struct stmmac_tx_queue *tx_q) 3993 { 3994 u16 tag = 0x0, inner_tag = 0x0; 3995 u32 inner_type = 0x0; 3996 struct dma_desc *p; 3997 3998 if (!priv->dma_cap.vlins) 3999 return false; 4000 if (!skb_vlan_tag_present(skb)) 4001 return false; 4002 if (skb->vlan_proto == htons(ETH_P_8021AD)) { 4003 inner_tag = skb_vlan_tag_get(skb); 4004 inner_type = STMMAC_VLAN_INSERT; 4005 } 4006 4007 tag = skb_vlan_tag_get(skb); 4008 4009 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4010 p = &tx_q->dma_entx[tx_q->cur_tx].basic; 4011 else 4012 p = &tx_q->dma_tx[tx_q->cur_tx]; 4013 4014 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) 4015 return false; 4016 4017 stmmac_set_tx_owner(priv, p); 4018 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 4019 return true; 4020 } 4021 4022 /** 4023 * stmmac_tso_allocator - close entry point of the driver 4024 * @priv: driver private structure 4025 * @des: buffer start address 4026 * @total_len: total length to fill in descriptors 4027 * @last_segment: condition for the last descriptor 4028 * @queue: TX queue index 4029 * Description: 4030 * This function fills descriptor and request new descriptors according to 4031 * buffer length to fill 4032 */ 4033 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, 4034 int total_len, bool last_segment, u32 queue) 4035 { 4036 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4037 struct dma_desc *desc; 4038 u32 buff_size; 4039 int tmp_len; 4040 4041 tmp_len = total_len; 4042 4043 while (tmp_len > 0) { 4044 dma_addr_t curr_addr; 4045 4046 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 4047 priv->dma_conf.dma_tx_size); 4048 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 4049 4050 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4051 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4052 else 4053 desc = &tx_q->dma_tx[tx_q->cur_tx]; 4054 4055 curr_addr = des + (total_len - tmp_len); 4056 if (priv->dma_cap.addr64 <= 32) 4057 desc->des0 = cpu_to_le32(curr_addr); 4058 else 4059 stmmac_set_desc_addr(priv, desc, curr_addr); 4060 4061 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 4062 TSO_MAX_BUFF_SIZE : tmp_len; 4063 4064 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 4065 0, 1, 4066 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 4067 0, 0); 4068 4069 tmp_len -= TSO_MAX_BUFF_SIZE; 4070 } 4071 } 4072 4073 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) 4074 { 4075 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4076 int desc_size; 4077 4078 if (likely(priv->extend_desc)) 4079 desc_size = sizeof(struct dma_extended_desc); 4080 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4081 desc_size = sizeof(struct dma_edesc); 4082 else 4083 desc_size = sizeof(struct dma_desc); 4084 4085 /* The own bit must be the latest setting done when prepare the 4086 * descriptor and then barrier is needed to make sure that 4087 * all is coherent before granting the DMA engine. 4088 */ 4089 wmb(); 4090 4091 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 4092 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 4093 } 4094 4095 /** 4096 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 4097 * @skb : the socket buffer 4098 * @dev : device pointer 4099 * Description: this is the transmit function that is called on TSO frames 4100 * (support available on GMAC4 and newer chips). 4101 * Diagram below show the ring programming in case of TSO frames: 4102 * 4103 * First Descriptor 4104 * -------- 4105 * | DES0 |---> buffer1 = L2/L3/L4 header 4106 * | DES1 |---> TCP Payload (can continue on next descr...) 4107 * | DES2 |---> buffer 1 and 2 len 4108 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 4109 * -------- 4110 * | 4111 * ... 4112 * | 4113 * -------- 4114 * | DES0 | --| Split TCP Payload on Buffers 1 and 2 4115 * | DES1 | --| 4116 * | DES2 | --> buffer 1 and 2 len 4117 * | DES3 | 4118 * -------- 4119 * 4120 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 4121 */ 4122 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 4123 { 4124 struct dma_desc *desc, *first, *mss_desc = NULL; 4125 struct stmmac_priv *priv = netdev_priv(dev); 4126 int nfrags = skb_shinfo(skb)->nr_frags; 4127 u32 queue = skb_get_queue_mapping(skb); 4128 unsigned int first_entry, tx_packets; 4129 struct stmmac_txq_stats *txq_stats; 4130 int tmp_pay_len = 0, first_tx; 4131 struct stmmac_tx_queue *tx_q; 4132 bool has_vlan, set_ic; 4133 u8 proto_hdr_len, hdr; 4134 unsigned long flags; 4135 u32 pay_len, mss; 4136 dma_addr_t des; 4137 int i; 4138 4139 tx_q = &priv->dma_conf.tx_queue[queue]; 4140 txq_stats = &priv->xstats.txq_stats[queue]; 4141 first_tx = tx_q->cur_tx; 4142 4143 /* Compute header lengths */ 4144 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 4145 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); 4146 hdr = sizeof(struct udphdr); 4147 } else { 4148 proto_hdr_len = skb_tcp_all_headers(skb); 4149 hdr = tcp_hdrlen(skb); 4150 } 4151 4152 /* Desc availability based on threshold should be enough safe */ 4153 if (unlikely(stmmac_tx_avail(priv, queue) < 4154 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 4155 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4156 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4157 queue)); 4158 /* This is a hard error, log it. */ 4159 netdev_err(priv->dev, 4160 "%s: Tx Ring full when queue awake\n", 4161 __func__); 4162 } 4163 return NETDEV_TX_BUSY; 4164 } 4165 4166 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 4167 4168 mss = skb_shinfo(skb)->gso_size; 4169 4170 /* set new MSS value if needed */ 4171 if (mss != tx_q->mss) { 4172 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4173 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4174 else 4175 mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; 4176 4177 stmmac_set_mss(priv, mss_desc, mss); 4178 tx_q->mss = mss; 4179 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 4180 priv->dma_conf.dma_tx_size); 4181 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 4182 } 4183 4184 if (netif_msg_tx_queued(priv)) { 4185 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 4186 __func__, hdr, proto_hdr_len, pay_len, mss); 4187 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 4188 skb->data_len); 4189 } 4190 4191 /* Check if VLAN can be inserted by HW */ 4192 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 4193 4194 first_entry = tx_q->cur_tx; 4195 WARN_ON(tx_q->tx_skbuff[first_entry]); 4196 4197 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4198 desc = &tx_q->dma_entx[first_entry].basic; 4199 else 4200 desc = &tx_q->dma_tx[first_entry]; 4201 first = desc; 4202 4203 if (has_vlan) 4204 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 4205 4206 /* first descriptor: fill Headers on Buf1 */ 4207 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 4208 DMA_TO_DEVICE); 4209 if (dma_mapping_error(priv->device, des)) 4210 goto dma_map_err; 4211 4212 tx_q->tx_skbuff_dma[first_entry].buf = des; 4213 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 4214 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4215 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4216 4217 if (priv->dma_cap.addr64 <= 32) { 4218 first->des0 = cpu_to_le32(des); 4219 4220 /* Fill start of payload in buff2 of first descriptor */ 4221 if (pay_len) 4222 first->des1 = cpu_to_le32(des + proto_hdr_len); 4223 4224 /* If needed take extra descriptors to fill the remaining payload */ 4225 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 4226 } else { 4227 stmmac_set_desc_addr(priv, first, des); 4228 tmp_pay_len = pay_len; 4229 des += proto_hdr_len; 4230 pay_len = 0; 4231 } 4232 4233 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 4234 4235 /* Prepare fragments */ 4236 for (i = 0; i < nfrags; i++) { 4237 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4238 4239 des = skb_frag_dma_map(priv->device, frag, 0, 4240 skb_frag_size(frag), 4241 DMA_TO_DEVICE); 4242 if (dma_mapping_error(priv->device, des)) 4243 goto dma_map_err; 4244 4245 stmmac_tso_allocator(priv, des, skb_frag_size(frag), 4246 (i == nfrags - 1), queue); 4247 4248 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 4249 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 4250 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 4251 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4252 } 4253 4254 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 4255 4256 /* Only the last descriptor gets to point to the skb. */ 4257 tx_q->tx_skbuff[tx_q->cur_tx] = skb; 4258 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4259 4260 /* Manage tx mitigation */ 4261 tx_packets = (tx_q->cur_tx + 1) - first_tx; 4262 tx_q->tx_count_frames += tx_packets; 4263 4264 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4265 set_ic = true; 4266 else if (!priv->tx_coal_frames[queue]) 4267 set_ic = false; 4268 else if (tx_packets > priv->tx_coal_frames[queue]) 4269 set_ic = true; 4270 else if ((tx_q->tx_count_frames % 4271 priv->tx_coal_frames[queue]) < tx_packets) 4272 set_ic = true; 4273 else 4274 set_ic = false; 4275 4276 if (set_ic) { 4277 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4278 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4279 else 4280 desc = &tx_q->dma_tx[tx_q->cur_tx]; 4281 4282 tx_q->tx_count_frames = 0; 4283 stmmac_set_tx_ic(priv, desc); 4284 } 4285 4286 /* We've used all descriptors we need for this skb, however, 4287 * advance cur_tx so that it references a fresh descriptor. 4288 * ndo_start_xmit will fill this descriptor the next time it's 4289 * called and stmmac_tx_clean may clean up to this descriptor. 4290 */ 4291 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 4292 4293 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4294 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4295 __func__); 4296 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4297 } 4298 4299 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 4300 txq_stats->tx_bytes += skb->len; 4301 txq_stats->tx_tso_frames++; 4302 txq_stats->tx_tso_nfrags += nfrags; 4303 if (set_ic) 4304 txq_stats->tx_set_ic_bit++; 4305 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 4306 4307 if (priv->sarc_type) 4308 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 4309 4310 skb_tx_timestamp(skb); 4311 4312 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4313 priv->hwts_tx_en)) { 4314 /* declare that device is doing timestamping */ 4315 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4316 stmmac_enable_tx_timestamp(priv, first); 4317 } 4318 4319 /* Complete the first descriptor before granting the DMA */ 4320 stmmac_prepare_tso_tx_desc(priv, first, 1, 4321 proto_hdr_len, 4322 pay_len, 4323 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 4324 hdr / 4, (skb->len - proto_hdr_len)); 4325 4326 /* If context desc is used to change MSS */ 4327 if (mss_desc) { 4328 /* Make sure that first descriptor has been completely 4329 * written, including its own bit. This is because MSS is 4330 * actually before first descriptor, so we need to make 4331 * sure that MSS's own bit is the last thing written. 4332 */ 4333 dma_wmb(); 4334 stmmac_set_tx_owner(priv, mss_desc); 4335 } 4336 4337 if (netif_msg_pktdata(priv)) { 4338 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 4339 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4340 tx_q->cur_tx, first, nfrags); 4341 pr_info(">>> frame to be transmitted: "); 4342 print_pkt(skb->data, skb_headlen(skb)); 4343 } 4344 4345 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4346 4347 stmmac_flush_tx_descriptors(priv, queue); 4348 stmmac_tx_timer_arm(priv, queue); 4349 4350 return NETDEV_TX_OK; 4351 4352 dma_map_err: 4353 dev_err(priv->device, "Tx dma map failed\n"); 4354 dev_kfree_skb(skb); 4355 priv->xstats.tx_dropped++; 4356 return NETDEV_TX_OK; 4357 } 4358 4359 /** 4360 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype 4361 * @skb: socket buffer to check 4362 * 4363 * Check if a packet has an ethertype that will trigger the IP header checks 4364 * and IP/TCP checksum engine of the stmmac core. 4365 * 4366 * Return: true if the ethertype can trigger the checksum engine, false 4367 * otherwise 4368 */ 4369 static bool stmmac_has_ip_ethertype(struct sk_buff *skb) 4370 { 4371 int depth = 0; 4372 __be16 proto; 4373 4374 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb), 4375 &depth); 4376 4377 return (depth <= ETH_HLEN) && 4378 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6)); 4379 } 4380 4381 /** 4382 * stmmac_xmit - Tx entry point of the driver 4383 * @skb : the socket buffer 4384 * @dev : device pointer 4385 * Description : this is the tx entry point of the driver. 4386 * It programs the chain or the ring and supports oversized frames 4387 * and SG feature. 4388 */ 4389 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 4390 { 4391 unsigned int first_entry, tx_packets, enh_desc; 4392 struct stmmac_priv *priv = netdev_priv(dev); 4393 unsigned int nopaged_len = skb_headlen(skb); 4394 int i, csum_insertion = 0, is_jumbo = 0; 4395 u32 queue = skb_get_queue_mapping(skb); 4396 int nfrags = skb_shinfo(skb)->nr_frags; 4397 int gso = skb_shinfo(skb)->gso_type; 4398 struct stmmac_txq_stats *txq_stats; 4399 struct dma_edesc *tbs_desc = NULL; 4400 struct dma_desc *desc, *first; 4401 struct stmmac_tx_queue *tx_q; 4402 bool has_vlan, set_ic; 4403 int entry, first_tx; 4404 unsigned long flags; 4405 dma_addr_t des; 4406 4407 tx_q = &priv->dma_conf.tx_queue[queue]; 4408 txq_stats = &priv->xstats.txq_stats[queue]; 4409 first_tx = tx_q->cur_tx; 4410 4411 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) 4412 stmmac_disable_eee_mode(priv); 4413 4414 /* Manage oversized TCP frames for GMAC4 device */ 4415 if (skb_is_gso(skb) && priv->tso) { 4416 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 4417 return stmmac_tso_xmit(skb, dev); 4418 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) 4419 return stmmac_tso_xmit(skb, dev); 4420 } 4421 4422 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 4423 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4424 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4425 queue)); 4426 /* This is a hard error, log it. */ 4427 netdev_err(priv->dev, 4428 "%s: Tx Ring full when queue awake\n", 4429 __func__); 4430 } 4431 return NETDEV_TX_BUSY; 4432 } 4433 4434 /* Check if VLAN can be inserted by HW */ 4435 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 4436 4437 entry = tx_q->cur_tx; 4438 first_entry = entry; 4439 WARN_ON(tx_q->tx_skbuff[first_entry]); 4440 4441 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 4442 /* DWMAC IPs can be synthesized to support tx coe only for a few tx 4443 * queues. In that case, checksum offloading for those queues that don't 4444 * support tx coe needs to fallback to software checksum calculation. 4445 * 4446 * Packets that won't trigger the COE e.g. most DSA-tagged packets will 4447 * also have to be checksummed in software. 4448 */ 4449 if (csum_insertion && 4450 (priv->plat->tx_queues_cfg[queue].coe_unsupported || 4451 !stmmac_has_ip_ethertype(skb))) { 4452 if (unlikely(skb_checksum_help(skb))) 4453 goto dma_map_err; 4454 csum_insertion = !csum_insertion; 4455 } 4456 4457 if (likely(priv->extend_desc)) 4458 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4459 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4460 desc = &tx_q->dma_entx[entry].basic; 4461 else 4462 desc = tx_q->dma_tx + entry; 4463 4464 first = desc; 4465 4466 if (has_vlan) 4467 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 4468 4469 enh_desc = priv->plat->enh_desc; 4470 /* To program the descriptors according to the size of the frame */ 4471 if (enh_desc) 4472 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 4473 4474 if (unlikely(is_jumbo)) { 4475 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 4476 if (unlikely(entry < 0) && (entry != -EINVAL)) 4477 goto dma_map_err; 4478 } 4479 4480 for (i = 0; i < nfrags; i++) { 4481 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4482 int len = skb_frag_size(frag); 4483 bool last_segment = (i == (nfrags - 1)); 4484 4485 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4486 WARN_ON(tx_q->tx_skbuff[entry]); 4487 4488 if (likely(priv->extend_desc)) 4489 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4490 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4491 desc = &tx_q->dma_entx[entry].basic; 4492 else 4493 desc = tx_q->dma_tx + entry; 4494 4495 des = skb_frag_dma_map(priv->device, frag, 0, len, 4496 DMA_TO_DEVICE); 4497 if (dma_mapping_error(priv->device, des)) 4498 goto dma_map_err; /* should reuse desc w/o issues */ 4499 4500 tx_q->tx_skbuff_dma[entry].buf = des; 4501 4502 stmmac_set_desc_addr(priv, desc, des); 4503 4504 tx_q->tx_skbuff_dma[entry].map_as_page = true; 4505 tx_q->tx_skbuff_dma[entry].len = len; 4506 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 4507 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4508 4509 /* Prepare the descriptor and set the own bit too */ 4510 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 4511 priv->mode, 1, last_segment, skb->len); 4512 } 4513 4514 /* Only the last descriptor gets to point to the skb. */ 4515 tx_q->tx_skbuff[entry] = skb; 4516 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4517 4518 /* According to the coalesce parameter the IC bit for the latest 4519 * segment is reset and the timer re-started to clean the tx status. 4520 * This approach takes care about the fragments: desc is the first 4521 * element in case of no SG. 4522 */ 4523 tx_packets = (entry + 1) - first_tx; 4524 tx_q->tx_count_frames += tx_packets; 4525 4526 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4527 set_ic = true; 4528 else if (!priv->tx_coal_frames[queue]) 4529 set_ic = false; 4530 else if (tx_packets > priv->tx_coal_frames[queue]) 4531 set_ic = true; 4532 else if ((tx_q->tx_count_frames % 4533 priv->tx_coal_frames[queue]) < tx_packets) 4534 set_ic = true; 4535 else 4536 set_ic = false; 4537 4538 if (set_ic) { 4539 if (likely(priv->extend_desc)) 4540 desc = &tx_q->dma_etx[entry].basic; 4541 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4542 desc = &tx_q->dma_entx[entry].basic; 4543 else 4544 desc = &tx_q->dma_tx[entry]; 4545 4546 tx_q->tx_count_frames = 0; 4547 stmmac_set_tx_ic(priv, desc); 4548 } 4549 4550 /* We've used all descriptors we need for this skb, however, 4551 * advance cur_tx so that it references a fresh descriptor. 4552 * ndo_start_xmit will fill this descriptor the next time it's 4553 * called and stmmac_tx_clean may clean up to this descriptor. 4554 */ 4555 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4556 tx_q->cur_tx = entry; 4557 4558 if (netif_msg_pktdata(priv)) { 4559 netdev_dbg(priv->dev, 4560 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 4561 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4562 entry, first, nfrags); 4563 4564 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 4565 print_pkt(skb->data, skb->len); 4566 } 4567 4568 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4569 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4570 __func__); 4571 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4572 } 4573 4574 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 4575 txq_stats->tx_bytes += skb->len; 4576 if (set_ic) 4577 txq_stats->tx_set_ic_bit++; 4578 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 4579 4580 if (priv->sarc_type) 4581 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 4582 4583 skb_tx_timestamp(skb); 4584 4585 /* Ready to fill the first descriptor and set the OWN bit w/o any 4586 * problems because all the descriptors are actually ready to be 4587 * passed to the DMA engine. 4588 */ 4589 if (likely(!is_jumbo)) { 4590 bool last_segment = (nfrags == 0); 4591 4592 des = dma_map_single(priv->device, skb->data, 4593 nopaged_len, DMA_TO_DEVICE); 4594 if (dma_mapping_error(priv->device, des)) 4595 goto dma_map_err; 4596 4597 tx_q->tx_skbuff_dma[first_entry].buf = des; 4598 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4599 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4600 4601 stmmac_set_desc_addr(priv, first, des); 4602 4603 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 4604 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 4605 4606 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4607 priv->hwts_tx_en)) { 4608 /* declare that device is doing timestamping */ 4609 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4610 stmmac_enable_tx_timestamp(priv, first); 4611 } 4612 4613 /* Prepare the first descriptor setting the OWN bit too */ 4614 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 4615 csum_insertion, priv->mode, 0, last_segment, 4616 skb->len); 4617 } 4618 4619 if (tx_q->tbs & STMMAC_TBS_EN) { 4620 struct timespec64 ts = ns_to_timespec64(skb->tstamp); 4621 4622 tbs_desc = &tx_q->dma_entx[first_entry]; 4623 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); 4624 } 4625 4626 stmmac_set_tx_owner(priv, first); 4627 4628 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4629 4630 stmmac_enable_dma_transmission(priv, priv->ioaddr); 4631 4632 stmmac_flush_tx_descriptors(priv, queue); 4633 stmmac_tx_timer_arm(priv, queue); 4634 4635 return NETDEV_TX_OK; 4636 4637 dma_map_err: 4638 netdev_err(priv->dev, "Tx DMA map failed\n"); 4639 dev_kfree_skb(skb); 4640 priv->xstats.tx_dropped++; 4641 return NETDEV_TX_OK; 4642 } 4643 4644 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 4645 { 4646 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); 4647 __be16 vlan_proto = veth->h_vlan_proto; 4648 u16 vlanid; 4649 4650 if ((vlan_proto == htons(ETH_P_8021Q) && 4651 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 4652 (vlan_proto == htons(ETH_P_8021AD) && 4653 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 4654 /* pop the vlan tag */ 4655 vlanid = ntohs(veth->h_vlan_TCI); 4656 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 4657 skb_pull(skb, VLAN_HLEN); 4658 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 4659 } 4660 } 4661 4662 /** 4663 * stmmac_rx_refill - refill used skb preallocated buffers 4664 * @priv: driver private structure 4665 * @queue: RX queue index 4666 * Description : this is to reallocate the skb for the reception process 4667 * that is based on zero-copy. 4668 */ 4669 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 4670 { 4671 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 4672 int dirty = stmmac_rx_dirty(priv, queue); 4673 unsigned int entry = rx_q->dirty_rx; 4674 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 4675 4676 if (priv->dma_cap.host_dma_width <= 32) 4677 gfp |= GFP_DMA32; 4678 4679 while (dirty-- > 0) { 4680 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4681 struct dma_desc *p; 4682 bool use_rx_wd; 4683 4684 if (priv->extend_desc) 4685 p = (struct dma_desc *)(rx_q->dma_erx + entry); 4686 else 4687 p = rx_q->dma_rx + entry; 4688 4689 if (!buf->page) { 4690 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); 4691 if (!buf->page) 4692 break; 4693 } 4694 4695 if (priv->sph && !buf->sec_page) { 4696 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); 4697 if (!buf->sec_page) 4698 break; 4699 4700 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 4701 } 4702 4703 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 4704 4705 stmmac_set_desc_addr(priv, p, buf->addr); 4706 if (priv->sph) 4707 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 4708 else 4709 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 4710 stmmac_refill_desc3(priv, rx_q, p); 4711 4712 rx_q->rx_count_frames++; 4713 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4714 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 4715 rx_q->rx_count_frames = 0; 4716 4717 use_rx_wd = !priv->rx_coal_frames[queue]; 4718 use_rx_wd |= rx_q->rx_count_frames > 0; 4719 if (!priv->use_riwt) 4720 use_rx_wd = false; 4721 4722 dma_wmb(); 4723 stmmac_set_rx_owner(priv, p, use_rx_wd); 4724 4725 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); 4726 } 4727 rx_q->dirty_rx = entry; 4728 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4729 (rx_q->dirty_rx * sizeof(struct dma_desc)); 4730 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 4731 } 4732 4733 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, 4734 struct dma_desc *p, 4735 int status, unsigned int len) 4736 { 4737 unsigned int plen = 0, hlen = 0; 4738 int coe = priv->hw->rx_csum; 4739 4740 /* Not first descriptor, buffer is always zero */ 4741 if (priv->sph && len) 4742 return 0; 4743 4744 /* First descriptor, get split header length */ 4745 stmmac_get_rx_header_len(priv, p, &hlen); 4746 if (priv->sph && hlen) { 4747 priv->xstats.rx_split_hdr_pkt_n++; 4748 return hlen; 4749 } 4750 4751 /* First descriptor, not last descriptor and not split header */ 4752 if (status & rx_not_ls) 4753 return priv->dma_conf.dma_buf_sz; 4754 4755 plen = stmmac_get_rx_frame_len(priv, p, coe); 4756 4757 /* First descriptor and last descriptor and not split header */ 4758 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); 4759 } 4760 4761 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, 4762 struct dma_desc *p, 4763 int status, unsigned int len) 4764 { 4765 int coe = priv->hw->rx_csum; 4766 unsigned int plen = 0; 4767 4768 /* Not split header, buffer is not available */ 4769 if (!priv->sph) 4770 return 0; 4771 4772 /* Not last descriptor */ 4773 if (status & rx_not_ls) 4774 return priv->dma_conf.dma_buf_sz; 4775 4776 plen = stmmac_get_rx_frame_len(priv, p, coe); 4777 4778 /* Last descriptor */ 4779 return plen - len; 4780 } 4781 4782 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, 4783 struct xdp_frame *xdpf, bool dma_map) 4784 { 4785 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; 4786 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4787 unsigned int entry = tx_q->cur_tx; 4788 struct dma_desc *tx_desc; 4789 dma_addr_t dma_addr; 4790 bool set_ic; 4791 4792 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) 4793 return STMMAC_XDP_CONSUMED; 4794 4795 if (likely(priv->extend_desc)) 4796 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4797 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4798 tx_desc = &tx_q->dma_entx[entry].basic; 4799 else 4800 tx_desc = tx_q->dma_tx + entry; 4801 4802 if (dma_map) { 4803 dma_addr = dma_map_single(priv->device, xdpf->data, 4804 xdpf->len, DMA_TO_DEVICE); 4805 if (dma_mapping_error(priv->device, dma_addr)) 4806 return STMMAC_XDP_CONSUMED; 4807 4808 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; 4809 } else { 4810 struct page *page = virt_to_page(xdpf->data); 4811 4812 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + 4813 xdpf->headroom; 4814 dma_sync_single_for_device(priv->device, dma_addr, 4815 xdpf->len, DMA_BIDIRECTIONAL); 4816 4817 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; 4818 } 4819 4820 tx_q->tx_skbuff_dma[entry].buf = dma_addr; 4821 tx_q->tx_skbuff_dma[entry].map_as_page = false; 4822 tx_q->tx_skbuff_dma[entry].len = xdpf->len; 4823 tx_q->tx_skbuff_dma[entry].last_segment = true; 4824 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 4825 4826 tx_q->xdpf[entry] = xdpf; 4827 4828 stmmac_set_desc_addr(priv, tx_desc, dma_addr); 4829 4830 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, 4831 true, priv->mode, true, true, 4832 xdpf->len); 4833 4834 tx_q->tx_count_frames++; 4835 4836 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 4837 set_ic = true; 4838 else 4839 set_ic = false; 4840 4841 if (set_ic) { 4842 unsigned long flags; 4843 tx_q->tx_count_frames = 0; 4844 stmmac_set_tx_ic(priv, tx_desc); 4845 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 4846 txq_stats->tx_set_ic_bit++; 4847 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 4848 } 4849 4850 stmmac_enable_dma_transmission(priv, priv->ioaddr); 4851 4852 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4853 tx_q->cur_tx = entry; 4854 4855 return STMMAC_XDP_TX; 4856 } 4857 4858 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, 4859 int cpu) 4860 { 4861 int index = cpu; 4862 4863 if (unlikely(index < 0)) 4864 index = 0; 4865 4866 while (index >= priv->plat->tx_queues_to_use) 4867 index -= priv->plat->tx_queues_to_use; 4868 4869 return index; 4870 } 4871 4872 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, 4873 struct xdp_buff *xdp) 4874 { 4875 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 4876 int cpu = smp_processor_id(); 4877 struct netdev_queue *nq; 4878 int queue; 4879 int res; 4880 4881 if (unlikely(!xdpf)) 4882 return STMMAC_XDP_CONSUMED; 4883 4884 queue = stmmac_xdp_get_tx_queue(priv, cpu); 4885 nq = netdev_get_tx_queue(priv->dev, queue); 4886 4887 __netif_tx_lock(nq, cpu); 4888 /* Avoids TX time-out as we are sharing with slow path */ 4889 txq_trans_cond_update(nq); 4890 4891 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); 4892 if (res == STMMAC_XDP_TX) 4893 stmmac_flush_tx_descriptors(priv, queue); 4894 4895 __netif_tx_unlock(nq); 4896 4897 return res; 4898 } 4899 4900 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, 4901 struct bpf_prog *prog, 4902 struct xdp_buff *xdp) 4903 { 4904 u32 act; 4905 int res; 4906 4907 act = bpf_prog_run_xdp(prog, xdp); 4908 switch (act) { 4909 case XDP_PASS: 4910 res = STMMAC_XDP_PASS; 4911 break; 4912 case XDP_TX: 4913 res = stmmac_xdp_xmit_back(priv, xdp); 4914 break; 4915 case XDP_REDIRECT: 4916 if (xdp_do_redirect(priv->dev, xdp, prog) < 0) 4917 res = STMMAC_XDP_CONSUMED; 4918 else 4919 res = STMMAC_XDP_REDIRECT; 4920 break; 4921 default: 4922 bpf_warn_invalid_xdp_action(priv->dev, prog, act); 4923 fallthrough; 4924 case XDP_ABORTED: 4925 trace_xdp_exception(priv->dev, prog, act); 4926 fallthrough; 4927 case XDP_DROP: 4928 res = STMMAC_XDP_CONSUMED; 4929 break; 4930 } 4931 4932 return res; 4933 } 4934 4935 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, 4936 struct xdp_buff *xdp) 4937 { 4938 struct bpf_prog *prog; 4939 int res; 4940 4941 prog = READ_ONCE(priv->xdp_prog); 4942 if (!prog) { 4943 res = STMMAC_XDP_PASS; 4944 goto out; 4945 } 4946 4947 res = __stmmac_xdp_run_prog(priv, prog, xdp); 4948 out: 4949 return ERR_PTR(-res); 4950 } 4951 4952 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, 4953 int xdp_status) 4954 { 4955 int cpu = smp_processor_id(); 4956 int queue; 4957 4958 queue = stmmac_xdp_get_tx_queue(priv, cpu); 4959 4960 if (xdp_status & STMMAC_XDP_TX) 4961 stmmac_tx_timer_arm(priv, queue); 4962 4963 if (xdp_status & STMMAC_XDP_REDIRECT) 4964 xdp_do_flush(); 4965 } 4966 4967 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, 4968 struct xdp_buff *xdp) 4969 { 4970 unsigned int metasize = xdp->data - xdp->data_meta; 4971 unsigned int datasize = xdp->data_end - xdp->data; 4972 struct sk_buff *skb; 4973 4974 skb = __napi_alloc_skb(&ch->rxtx_napi, 4975 xdp->data_end - xdp->data_hard_start, 4976 GFP_ATOMIC | __GFP_NOWARN); 4977 if (unlikely(!skb)) 4978 return NULL; 4979 4980 skb_reserve(skb, xdp->data - xdp->data_hard_start); 4981 memcpy(__skb_put(skb, datasize), xdp->data, datasize); 4982 if (metasize) 4983 skb_metadata_set(skb, metasize); 4984 4985 return skb; 4986 } 4987 4988 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, 4989 struct dma_desc *p, struct dma_desc *np, 4990 struct xdp_buff *xdp) 4991 { 4992 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; 4993 struct stmmac_channel *ch = &priv->channel[queue]; 4994 unsigned int len = xdp->data_end - xdp->data; 4995 enum pkt_hash_types hash_type; 4996 int coe = priv->hw->rx_csum; 4997 unsigned long flags; 4998 struct sk_buff *skb; 4999 u32 hash; 5000 5001 skb = stmmac_construct_skb_zc(ch, xdp); 5002 if (!skb) { 5003 priv->xstats.rx_dropped++; 5004 return; 5005 } 5006 5007 stmmac_get_rx_hwtstamp(priv, p, np, skb); 5008 stmmac_rx_vlan(priv->dev, skb); 5009 skb->protocol = eth_type_trans(skb, priv->dev); 5010 5011 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb)) 5012 skb_checksum_none_assert(skb); 5013 else 5014 skb->ip_summed = CHECKSUM_UNNECESSARY; 5015 5016 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 5017 skb_set_hash(skb, hash, hash_type); 5018 5019 skb_record_rx_queue(skb, queue); 5020 napi_gro_receive(&ch->rxtx_napi, skb); 5021 5022 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5023 rxq_stats->rx_pkt_n++; 5024 rxq_stats->rx_bytes += len; 5025 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5026 } 5027 5028 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 5029 { 5030 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 5031 unsigned int entry = rx_q->dirty_rx; 5032 struct dma_desc *rx_desc = NULL; 5033 bool ret = true; 5034 5035 budget = min(budget, stmmac_rx_dirty(priv, queue)); 5036 5037 while (budget-- > 0 && entry != rx_q->cur_rx) { 5038 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 5039 dma_addr_t dma_addr; 5040 bool use_rx_wd; 5041 5042 if (!buf->xdp) { 5043 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 5044 if (!buf->xdp) { 5045 ret = false; 5046 break; 5047 } 5048 } 5049 5050 if (priv->extend_desc) 5051 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); 5052 else 5053 rx_desc = rx_q->dma_rx + entry; 5054 5055 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 5056 stmmac_set_desc_addr(priv, rx_desc, dma_addr); 5057 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); 5058 stmmac_refill_desc3(priv, rx_q, rx_desc); 5059 5060 rx_q->rx_count_frames++; 5061 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 5062 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 5063 rx_q->rx_count_frames = 0; 5064 5065 use_rx_wd = !priv->rx_coal_frames[queue]; 5066 use_rx_wd |= rx_q->rx_count_frames > 0; 5067 if (!priv->use_riwt) 5068 use_rx_wd = false; 5069 5070 dma_wmb(); 5071 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); 5072 5073 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); 5074 } 5075 5076 if (rx_desc) { 5077 rx_q->dirty_rx = entry; 5078 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 5079 (rx_q->dirty_rx * sizeof(struct dma_desc)); 5080 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 5081 } 5082 5083 return ret; 5084 } 5085 5086 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp) 5087 { 5088 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used 5089 * to represent incoming packet, whereas cb field in the same structure 5090 * is used to store driver specific info. Thus, struct stmmac_xdp_buff 5091 * is laid on top of xdp and cb fields of struct xdp_buff_xsk. 5092 */ 5093 return (struct stmmac_xdp_buff *)xdp; 5094 } 5095 5096 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) 5097 { 5098 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; 5099 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 5100 unsigned int count = 0, error = 0, len = 0; 5101 int dirty = stmmac_rx_dirty(priv, queue); 5102 unsigned int next_entry = rx_q->cur_rx; 5103 u32 rx_errors = 0, rx_dropped = 0; 5104 unsigned int desc_size; 5105 struct bpf_prog *prog; 5106 bool failure = false; 5107 unsigned long flags; 5108 int xdp_status = 0; 5109 int status = 0; 5110 5111 if (netif_msg_rx_status(priv)) { 5112 void *rx_head; 5113 5114 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 5115 if (priv->extend_desc) { 5116 rx_head = (void *)rx_q->dma_erx; 5117 desc_size = sizeof(struct dma_extended_desc); 5118 } else { 5119 rx_head = (void *)rx_q->dma_rx; 5120 desc_size = sizeof(struct dma_desc); 5121 } 5122 5123 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, 5124 rx_q->dma_rx_phy, desc_size); 5125 } 5126 while (count < limit) { 5127 struct stmmac_rx_buffer *buf; 5128 struct stmmac_xdp_buff *ctx; 5129 unsigned int buf1_len = 0; 5130 struct dma_desc *np, *p; 5131 int entry; 5132 int res; 5133 5134 if (!count && rx_q->state_saved) { 5135 error = rx_q->state.error; 5136 len = rx_q->state.len; 5137 } else { 5138 rx_q->state_saved = false; 5139 error = 0; 5140 len = 0; 5141 } 5142 5143 if (count >= limit) 5144 break; 5145 5146 read_again: 5147 buf1_len = 0; 5148 entry = next_entry; 5149 buf = &rx_q->buf_pool[entry]; 5150 5151 if (dirty >= STMMAC_RX_FILL_BATCH) { 5152 failure = failure || 5153 !stmmac_rx_refill_zc(priv, queue, dirty); 5154 dirty = 0; 5155 } 5156 5157 if (priv->extend_desc) 5158 p = (struct dma_desc *)(rx_q->dma_erx + entry); 5159 else 5160 p = rx_q->dma_rx + entry; 5161 5162 /* read the status of the incoming frame */ 5163 status = stmmac_rx_status(priv, &priv->xstats, p); 5164 /* check if managed by the DMA otherwise go ahead */ 5165 if (unlikely(status & dma_own)) 5166 break; 5167 5168 /* Prefetch the next RX descriptor */ 5169 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 5170 priv->dma_conf.dma_rx_size); 5171 next_entry = rx_q->cur_rx; 5172 5173 if (priv->extend_desc) 5174 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5175 else 5176 np = rx_q->dma_rx + next_entry; 5177 5178 prefetch(np); 5179 5180 /* Ensure a valid XSK buffer before proceed */ 5181 if (!buf->xdp) 5182 break; 5183 5184 if (priv->extend_desc) 5185 stmmac_rx_extended_status(priv, &priv->xstats, 5186 rx_q->dma_erx + entry); 5187 if (unlikely(status == discard_frame)) { 5188 xsk_buff_free(buf->xdp); 5189 buf->xdp = NULL; 5190 dirty++; 5191 error = 1; 5192 if (!priv->hwts_rx_en) 5193 rx_errors++; 5194 } 5195 5196 if (unlikely(error && (status & rx_not_ls))) 5197 goto read_again; 5198 if (unlikely(error)) { 5199 count++; 5200 continue; 5201 } 5202 5203 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ 5204 if (likely(status & rx_not_ls)) { 5205 xsk_buff_free(buf->xdp); 5206 buf->xdp = NULL; 5207 dirty++; 5208 count++; 5209 goto read_again; 5210 } 5211 5212 ctx = xsk_buff_to_stmmac_ctx(buf->xdp); 5213 ctx->priv = priv; 5214 ctx->desc = p; 5215 ctx->ndesc = np; 5216 5217 /* XDP ZC Frame only support primary buffers for now */ 5218 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 5219 len += buf1_len; 5220 5221 /* ACS is disabled; strip manually. */ 5222 if (likely(!(status & rx_not_ls))) { 5223 buf1_len -= ETH_FCS_LEN; 5224 len -= ETH_FCS_LEN; 5225 } 5226 5227 /* RX buffer is good and fit into a XSK pool buffer */ 5228 buf->xdp->data_end = buf->xdp->data + buf1_len; 5229 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); 5230 5231 prog = READ_ONCE(priv->xdp_prog); 5232 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); 5233 5234 switch (res) { 5235 case STMMAC_XDP_PASS: 5236 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); 5237 xsk_buff_free(buf->xdp); 5238 break; 5239 case STMMAC_XDP_CONSUMED: 5240 xsk_buff_free(buf->xdp); 5241 rx_dropped++; 5242 break; 5243 case STMMAC_XDP_TX: 5244 case STMMAC_XDP_REDIRECT: 5245 xdp_status |= res; 5246 break; 5247 } 5248 5249 buf->xdp = NULL; 5250 dirty++; 5251 count++; 5252 } 5253 5254 if (status & rx_not_ls) { 5255 rx_q->state_saved = true; 5256 rx_q->state.error = error; 5257 rx_q->state.len = len; 5258 } 5259 5260 stmmac_finalize_xdp_rx(priv, xdp_status); 5261 5262 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5263 rxq_stats->rx_pkt_n += count; 5264 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5265 5266 priv->xstats.rx_dropped += rx_dropped; 5267 priv->xstats.rx_errors += rx_errors; 5268 5269 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { 5270 if (failure || stmmac_rx_dirty(priv, queue) > 0) 5271 xsk_set_rx_need_wakeup(rx_q->xsk_pool); 5272 else 5273 xsk_clear_rx_need_wakeup(rx_q->xsk_pool); 5274 5275 return (int)count; 5276 } 5277 5278 return failure ? limit : (int)count; 5279 } 5280 5281 /** 5282 * stmmac_rx - manage the receive process 5283 * @priv: driver private structure 5284 * @limit: napi bugget 5285 * @queue: RX queue index. 5286 * Description : this the function called by the napi poll method. 5287 * It gets all the frames inside the ring. 5288 */ 5289 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 5290 { 5291 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0; 5292 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; 5293 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 5294 struct stmmac_channel *ch = &priv->channel[queue]; 5295 unsigned int count = 0, error = 0, len = 0; 5296 int status = 0, coe = priv->hw->rx_csum; 5297 unsigned int next_entry = rx_q->cur_rx; 5298 enum dma_data_direction dma_dir; 5299 unsigned int desc_size; 5300 struct sk_buff *skb = NULL; 5301 struct stmmac_xdp_buff ctx; 5302 unsigned long flags; 5303 int xdp_status = 0; 5304 int buf_sz; 5305 5306 dma_dir = page_pool_get_dma_dir(rx_q->page_pool); 5307 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; 5308 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); 5309 5310 if (netif_msg_rx_status(priv)) { 5311 void *rx_head; 5312 5313 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 5314 if (priv->extend_desc) { 5315 rx_head = (void *)rx_q->dma_erx; 5316 desc_size = sizeof(struct dma_extended_desc); 5317 } else { 5318 rx_head = (void *)rx_q->dma_rx; 5319 desc_size = sizeof(struct dma_desc); 5320 } 5321 5322 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, 5323 rx_q->dma_rx_phy, desc_size); 5324 } 5325 while (count < limit) { 5326 unsigned int buf1_len = 0, buf2_len = 0; 5327 enum pkt_hash_types hash_type; 5328 struct stmmac_rx_buffer *buf; 5329 struct dma_desc *np, *p; 5330 int entry; 5331 u32 hash; 5332 5333 if (!count && rx_q->state_saved) { 5334 skb = rx_q->state.skb; 5335 error = rx_q->state.error; 5336 len = rx_q->state.len; 5337 } else { 5338 rx_q->state_saved = false; 5339 skb = NULL; 5340 error = 0; 5341 len = 0; 5342 } 5343 5344 read_again: 5345 if (count >= limit) 5346 break; 5347 5348 buf1_len = 0; 5349 buf2_len = 0; 5350 entry = next_entry; 5351 buf = &rx_q->buf_pool[entry]; 5352 5353 if (priv->extend_desc) 5354 p = (struct dma_desc *)(rx_q->dma_erx + entry); 5355 else 5356 p = rx_q->dma_rx + entry; 5357 5358 /* read the status of the incoming frame */ 5359 status = stmmac_rx_status(priv, &priv->xstats, p); 5360 /* check if managed by the DMA otherwise go ahead */ 5361 if (unlikely(status & dma_own)) 5362 break; 5363 5364 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 5365 priv->dma_conf.dma_rx_size); 5366 next_entry = rx_q->cur_rx; 5367 5368 if (priv->extend_desc) 5369 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5370 else 5371 np = rx_q->dma_rx + next_entry; 5372 5373 prefetch(np); 5374 5375 if (priv->extend_desc) 5376 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); 5377 if (unlikely(status == discard_frame)) { 5378 page_pool_recycle_direct(rx_q->page_pool, buf->page); 5379 buf->page = NULL; 5380 error = 1; 5381 if (!priv->hwts_rx_en) 5382 rx_errors++; 5383 } 5384 5385 if (unlikely(error && (status & rx_not_ls))) 5386 goto read_again; 5387 if (unlikely(error)) { 5388 dev_kfree_skb(skb); 5389 skb = NULL; 5390 count++; 5391 continue; 5392 } 5393 5394 /* Buffer is good. Go on. */ 5395 5396 prefetch(page_address(buf->page) + buf->page_offset); 5397 if (buf->sec_page) 5398 prefetch(page_address(buf->sec_page)); 5399 5400 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 5401 len += buf1_len; 5402 buf2_len = stmmac_rx_buf2_len(priv, p, status, len); 5403 len += buf2_len; 5404 5405 /* ACS is disabled; strip manually. */ 5406 if (likely(!(status & rx_not_ls))) { 5407 if (buf2_len) { 5408 buf2_len -= ETH_FCS_LEN; 5409 len -= ETH_FCS_LEN; 5410 } else if (buf1_len) { 5411 buf1_len -= ETH_FCS_LEN; 5412 len -= ETH_FCS_LEN; 5413 } 5414 } 5415 5416 if (!skb) { 5417 unsigned int pre_len, sync_len; 5418 5419 dma_sync_single_for_cpu(priv->device, buf->addr, 5420 buf1_len, dma_dir); 5421 5422 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq); 5423 xdp_prepare_buff(&ctx.xdp, page_address(buf->page), 5424 buf->page_offset, buf1_len, true); 5425 5426 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - 5427 buf->page_offset; 5428 5429 ctx.priv = priv; 5430 ctx.desc = p; 5431 ctx.ndesc = np; 5432 5433 skb = stmmac_xdp_run_prog(priv, &ctx.xdp); 5434 /* Due xdp_adjust_tail: DMA sync for_device 5435 * cover max len CPU touch 5436 */ 5437 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - 5438 buf->page_offset; 5439 sync_len = max(sync_len, pre_len); 5440 5441 /* For Not XDP_PASS verdict */ 5442 if (IS_ERR(skb)) { 5443 unsigned int xdp_res = -PTR_ERR(skb); 5444 5445 if (xdp_res & STMMAC_XDP_CONSUMED) { 5446 page_pool_put_page(rx_q->page_pool, 5447 virt_to_head_page(ctx.xdp.data), 5448 sync_len, true); 5449 buf->page = NULL; 5450 rx_dropped++; 5451 5452 /* Clear skb as it was set as 5453 * status by XDP program. 5454 */ 5455 skb = NULL; 5456 5457 if (unlikely((status & rx_not_ls))) 5458 goto read_again; 5459 5460 count++; 5461 continue; 5462 } else if (xdp_res & (STMMAC_XDP_TX | 5463 STMMAC_XDP_REDIRECT)) { 5464 xdp_status |= xdp_res; 5465 buf->page = NULL; 5466 skb = NULL; 5467 count++; 5468 continue; 5469 } 5470 } 5471 } 5472 5473 if (!skb) { 5474 /* XDP program may expand or reduce tail */ 5475 buf1_len = ctx.xdp.data_end - ctx.xdp.data; 5476 5477 skb = napi_alloc_skb(&ch->rx_napi, buf1_len); 5478 if (!skb) { 5479 rx_dropped++; 5480 count++; 5481 goto drain_data; 5482 } 5483 5484 /* XDP program may adjust header */ 5485 skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len); 5486 skb_put(skb, buf1_len); 5487 5488 /* Data payload copied into SKB, page ready for recycle */ 5489 page_pool_recycle_direct(rx_q->page_pool, buf->page); 5490 buf->page = NULL; 5491 } else if (buf1_len) { 5492 dma_sync_single_for_cpu(priv->device, buf->addr, 5493 buf1_len, dma_dir); 5494 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 5495 buf->page, buf->page_offset, buf1_len, 5496 priv->dma_conf.dma_buf_sz); 5497 5498 /* Data payload appended into SKB */ 5499 skb_mark_for_recycle(skb); 5500 buf->page = NULL; 5501 } 5502 5503 if (buf2_len) { 5504 dma_sync_single_for_cpu(priv->device, buf->sec_addr, 5505 buf2_len, dma_dir); 5506 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 5507 buf->sec_page, 0, buf2_len, 5508 priv->dma_conf.dma_buf_sz); 5509 5510 /* Data payload appended into SKB */ 5511 skb_mark_for_recycle(skb); 5512 buf->sec_page = NULL; 5513 } 5514 5515 drain_data: 5516 if (likely(status & rx_not_ls)) 5517 goto read_again; 5518 if (!skb) 5519 continue; 5520 5521 /* Got entire packet into SKB. Finish it. */ 5522 5523 stmmac_get_rx_hwtstamp(priv, p, np, skb); 5524 stmmac_rx_vlan(priv->dev, skb); 5525 skb->protocol = eth_type_trans(skb, priv->dev); 5526 5527 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb)) 5528 skb_checksum_none_assert(skb); 5529 else 5530 skb->ip_summed = CHECKSUM_UNNECESSARY; 5531 5532 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 5533 skb_set_hash(skb, hash, hash_type); 5534 5535 skb_record_rx_queue(skb, queue); 5536 napi_gro_receive(&ch->rx_napi, skb); 5537 skb = NULL; 5538 5539 rx_packets++; 5540 rx_bytes += len; 5541 count++; 5542 } 5543 5544 if (status & rx_not_ls || skb) { 5545 rx_q->state_saved = true; 5546 rx_q->state.skb = skb; 5547 rx_q->state.error = error; 5548 rx_q->state.len = len; 5549 } 5550 5551 stmmac_finalize_xdp_rx(priv, xdp_status); 5552 5553 stmmac_rx_refill(priv, queue); 5554 5555 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5556 rxq_stats->rx_packets += rx_packets; 5557 rxq_stats->rx_bytes += rx_bytes; 5558 rxq_stats->rx_pkt_n += count; 5559 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5560 5561 priv->xstats.rx_dropped += rx_dropped; 5562 priv->xstats.rx_errors += rx_errors; 5563 5564 return count; 5565 } 5566 5567 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 5568 { 5569 struct stmmac_channel *ch = 5570 container_of(napi, struct stmmac_channel, rx_napi); 5571 struct stmmac_priv *priv = ch->priv_data; 5572 struct stmmac_rxq_stats *rxq_stats; 5573 u32 chan = ch->index; 5574 unsigned long flags; 5575 int work_done; 5576 5577 rxq_stats = &priv->xstats.rxq_stats[chan]; 5578 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5579 rxq_stats->napi_poll++; 5580 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5581 5582 work_done = stmmac_rx(priv, budget, chan); 5583 if (work_done < budget && napi_complete_done(napi, work_done)) { 5584 unsigned long flags; 5585 5586 spin_lock_irqsave(&ch->lock, flags); 5587 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 5588 spin_unlock_irqrestore(&ch->lock, flags); 5589 } 5590 5591 return work_done; 5592 } 5593 5594 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 5595 { 5596 struct stmmac_channel *ch = 5597 container_of(napi, struct stmmac_channel, tx_napi); 5598 struct stmmac_priv *priv = ch->priv_data; 5599 struct stmmac_txq_stats *txq_stats; 5600 u32 chan = ch->index; 5601 unsigned long flags; 5602 int work_done; 5603 5604 txq_stats = &priv->xstats.txq_stats[chan]; 5605 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 5606 txq_stats->napi_poll++; 5607 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 5608 5609 work_done = stmmac_tx_clean(priv, budget, chan); 5610 work_done = min(work_done, budget); 5611 5612 if (work_done < budget && napi_complete_done(napi, work_done)) { 5613 unsigned long flags; 5614 5615 spin_lock_irqsave(&ch->lock, flags); 5616 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 5617 spin_unlock_irqrestore(&ch->lock, flags); 5618 } 5619 5620 return work_done; 5621 } 5622 5623 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) 5624 { 5625 struct stmmac_channel *ch = 5626 container_of(napi, struct stmmac_channel, rxtx_napi); 5627 struct stmmac_priv *priv = ch->priv_data; 5628 int rx_done, tx_done, rxtx_done; 5629 struct stmmac_rxq_stats *rxq_stats; 5630 struct stmmac_txq_stats *txq_stats; 5631 u32 chan = ch->index; 5632 unsigned long flags; 5633 5634 rxq_stats = &priv->xstats.rxq_stats[chan]; 5635 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5636 rxq_stats->napi_poll++; 5637 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5638 5639 txq_stats = &priv->xstats.txq_stats[chan]; 5640 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 5641 txq_stats->napi_poll++; 5642 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 5643 5644 tx_done = stmmac_tx_clean(priv, budget, chan); 5645 tx_done = min(tx_done, budget); 5646 5647 rx_done = stmmac_rx_zc(priv, budget, chan); 5648 5649 rxtx_done = max(tx_done, rx_done); 5650 5651 /* If either TX or RX work is not complete, return budget 5652 * and keep pooling 5653 */ 5654 if (rxtx_done >= budget) 5655 return budget; 5656 5657 /* all work done, exit the polling mode */ 5658 if (napi_complete_done(napi, rxtx_done)) { 5659 unsigned long flags; 5660 5661 spin_lock_irqsave(&ch->lock, flags); 5662 /* Both RX and TX work done are compelte, 5663 * so enable both RX & TX IRQs. 5664 */ 5665 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 5666 spin_unlock_irqrestore(&ch->lock, flags); 5667 } 5668 5669 return min(rxtx_done, budget - 1); 5670 } 5671 5672 /** 5673 * stmmac_tx_timeout 5674 * @dev : Pointer to net device structure 5675 * @txqueue: the index of the hanging transmit queue 5676 * Description: this function is called when a packet transmission fails to 5677 * complete within a reasonable time. The driver will mark the error in the 5678 * netdev structure and arrange for the device to be reset to a sane state 5679 * in order to transmit a new packet. 5680 */ 5681 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) 5682 { 5683 struct stmmac_priv *priv = netdev_priv(dev); 5684 5685 stmmac_global_err(priv); 5686 } 5687 5688 /** 5689 * stmmac_set_rx_mode - entry point for multicast addressing 5690 * @dev : pointer to the device structure 5691 * Description: 5692 * This function is a driver entry point which gets called by the kernel 5693 * whenever multicast addresses must be enabled/disabled. 5694 * Return value: 5695 * void. 5696 */ 5697 static void stmmac_set_rx_mode(struct net_device *dev) 5698 { 5699 struct stmmac_priv *priv = netdev_priv(dev); 5700 5701 stmmac_set_filter(priv, priv->hw, dev); 5702 } 5703 5704 /** 5705 * stmmac_change_mtu - entry point to change MTU size for the device. 5706 * @dev : device pointer. 5707 * @new_mtu : the new MTU size for the device. 5708 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 5709 * to drive packet transmission. Ethernet has an MTU of 1500 octets 5710 * (ETH_DATA_LEN). This value can be changed with ifconfig. 5711 * Return value: 5712 * 0 on success and an appropriate (-)ve integer as defined in errno.h 5713 * file on failure. 5714 */ 5715 static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 5716 { 5717 struct stmmac_priv *priv = netdev_priv(dev); 5718 int txfifosz = priv->plat->tx_fifo_size; 5719 struct stmmac_dma_conf *dma_conf; 5720 const int mtu = new_mtu; 5721 int ret; 5722 5723 if (txfifosz == 0) 5724 txfifosz = priv->dma_cap.tx_fifo_size; 5725 5726 txfifosz /= priv->plat->tx_queues_to_use; 5727 5728 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { 5729 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); 5730 return -EINVAL; 5731 } 5732 5733 new_mtu = STMMAC_ALIGN(new_mtu); 5734 5735 /* If condition true, FIFO is too small or MTU too large */ 5736 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) 5737 return -EINVAL; 5738 5739 if (netif_running(dev)) { 5740 netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); 5741 /* Try to allocate the new DMA conf with the new mtu */ 5742 dma_conf = stmmac_setup_dma_desc(priv, mtu); 5743 if (IS_ERR(dma_conf)) { 5744 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", 5745 mtu); 5746 return PTR_ERR(dma_conf); 5747 } 5748 5749 stmmac_release(dev); 5750 5751 ret = __stmmac_open(dev, dma_conf); 5752 if (ret) { 5753 free_dma_desc_resources(priv, dma_conf); 5754 kfree(dma_conf); 5755 netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); 5756 return ret; 5757 } 5758 5759 kfree(dma_conf); 5760 5761 stmmac_set_rx_mode(dev); 5762 } 5763 5764 dev->mtu = mtu; 5765 netdev_update_features(dev); 5766 5767 return 0; 5768 } 5769 5770 static netdev_features_t stmmac_fix_features(struct net_device *dev, 5771 netdev_features_t features) 5772 { 5773 struct stmmac_priv *priv = netdev_priv(dev); 5774 5775 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 5776 features &= ~NETIF_F_RXCSUM; 5777 5778 if (!priv->plat->tx_coe) 5779 features &= ~NETIF_F_CSUM_MASK; 5780 5781 /* Some GMAC devices have a bugged Jumbo frame support that 5782 * needs to have the Tx COE disabled for oversized frames 5783 * (due to limited buffer sizes). In this case we disable 5784 * the TX csum insertion in the TDES and not use SF. 5785 */ 5786 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 5787 features &= ~NETIF_F_CSUM_MASK; 5788 5789 /* Disable tso if asked by ethtool */ 5790 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { 5791 if (features & NETIF_F_TSO) 5792 priv->tso = true; 5793 else 5794 priv->tso = false; 5795 } 5796 5797 return features; 5798 } 5799 5800 static int stmmac_set_features(struct net_device *netdev, 5801 netdev_features_t features) 5802 { 5803 struct stmmac_priv *priv = netdev_priv(netdev); 5804 5805 /* Keep the COE Type in case of csum is supporting */ 5806 if (features & NETIF_F_RXCSUM) 5807 priv->hw->rx_csum = priv->plat->rx_coe; 5808 else 5809 priv->hw->rx_csum = 0; 5810 /* No check needed because rx_coe has been set before and it will be 5811 * fixed in case of issue. 5812 */ 5813 stmmac_rx_ipc(priv, priv->hw); 5814 5815 if (priv->sph_cap) { 5816 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; 5817 u32 chan; 5818 5819 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) 5820 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 5821 } 5822 5823 return 0; 5824 } 5825 5826 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) 5827 { 5828 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 5829 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 5830 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 5831 bool *hs_enable = &fpe_cfg->hs_enable; 5832 5833 if (status == FPE_EVENT_UNKNOWN || !*hs_enable) 5834 return; 5835 5836 /* If LP has sent verify mPacket, LP is FPE capable */ 5837 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { 5838 if (*lp_state < FPE_STATE_CAPABLE) 5839 *lp_state = FPE_STATE_CAPABLE; 5840 5841 /* If user has requested FPE enable, quickly response */ 5842 if (*hs_enable) 5843 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 5844 fpe_cfg, 5845 MPACKET_RESPONSE); 5846 } 5847 5848 /* If Local has sent verify mPacket, Local is FPE capable */ 5849 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { 5850 if (*lo_state < FPE_STATE_CAPABLE) 5851 *lo_state = FPE_STATE_CAPABLE; 5852 } 5853 5854 /* If LP has sent response mPacket, LP is entering FPE ON */ 5855 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) 5856 *lp_state = FPE_STATE_ENTERING_ON; 5857 5858 /* If Local has sent response mPacket, Local is entering FPE ON */ 5859 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) 5860 *lo_state = FPE_STATE_ENTERING_ON; 5861 5862 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && 5863 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && 5864 priv->fpe_wq) { 5865 queue_work(priv->fpe_wq, &priv->fpe_task); 5866 } 5867 } 5868 5869 static void stmmac_common_interrupt(struct stmmac_priv *priv) 5870 { 5871 u32 rx_cnt = priv->plat->rx_queues_to_use; 5872 u32 tx_cnt = priv->plat->tx_queues_to_use; 5873 u32 queues_count; 5874 u32 queue; 5875 bool xmac; 5876 5877 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 5878 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 5879 5880 if (priv->irq_wake) 5881 pm_wakeup_event(priv->device, 0); 5882 5883 if (priv->dma_cap.estsel) 5884 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, 5885 &priv->xstats, tx_cnt); 5886 5887 if (priv->dma_cap.fpesel) { 5888 int status = stmmac_fpe_irq_status(priv, priv->ioaddr, 5889 priv->dev); 5890 5891 stmmac_fpe_event_status(priv, status); 5892 } 5893 5894 /* To handle GMAC own interrupts */ 5895 if ((priv->plat->has_gmac) || xmac) { 5896 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 5897 5898 if (unlikely(status)) { 5899 /* For LPI we need to save the tx status */ 5900 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 5901 priv->tx_path_in_lpi_mode = true; 5902 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 5903 priv->tx_path_in_lpi_mode = false; 5904 } 5905 5906 for (queue = 0; queue < queues_count; queue++) { 5907 status = stmmac_host_mtl_irq_status(priv, priv->hw, 5908 queue); 5909 } 5910 5911 /* PCS link status */ 5912 if (priv->hw->pcs && 5913 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { 5914 if (priv->xstats.pcs_link) 5915 netif_carrier_on(priv->dev); 5916 else 5917 netif_carrier_off(priv->dev); 5918 } 5919 5920 stmmac_timestamp_interrupt(priv, priv); 5921 } 5922 } 5923 5924 /** 5925 * stmmac_interrupt - main ISR 5926 * @irq: interrupt number. 5927 * @dev_id: to pass the net device pointer. 5928 * Description: this is the main driver interrupt service routine. 5929 * It can call: 5930 * o DMA service routine (to manage incoming frame reception and transmission 5931 * status) 5932 * o Core interrupts to manage: remote wake-up, management counter, LPI 5933 * interrupts. 5934 */ 5935 static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 5936 { 5937 struct net_device *dev = (struct net_device *)dev_id; 5938 struct stmmac_priv *priv = netdev_priv(dev); 5939 5940 /* Check if adapter is up */ 5941 if (test_bit(STMMAC_DOWN, &priv->state)) 5942 return IRQ_HANDLED; 5943 5944 /* Check if a fatal error happened */ 5945 if (stmmac_safety_feat_interrupt(priv)) 5946 return IRQ_HANDLED; 5947 5948 /* To handle Common interrupts */ 5949 stmmac_common_interrupt(priv); 5950 5951 /* To handle DMA interrupts */ 5952 stmmac_dma_interrupt(priv); 5953 5954 return IRQ_HANDLED; 5955 } 5956 5957 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) 5958 { 5959 struct net_device *dev = (struct net_device *)dev_id; 5960 struct stmmac_priv *priv = netdev_priv(dev); 5961 5962 if (unlikely(!dev)) { 5963 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5964 return IRQ_NONE; 5965 } 5966 5967 /* Check if adapter is up */ 5968 if (test_bit(STMMAC_DOWN, &priv->state)) 5969 return IRQ_HANDLED; 5970 5971 /* To handle Common interrupts */ 5972 stmmac_common_interrupt(priv); 5973 5974 return IRQ_HANDLED; 5975 } 5976 5977 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) 5978 { 5979 struct net_device *dev = (struct net_device *)dev_id; 5980 struct stmmac_priv *priv = netdev_priv(dev); 5981 5982 if (unlikely(!dev)) { 5983 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5984 return IRQ_NONE; 5985 } 5986 5987 /* Check if adapter is up */ 5988 if (test_bit(STMMAC_DOWN, &priv->state)) 5989 return IRQ_HANDLED; 5990 5991 /* Check if a fatal error happened */ 5992 stmmac_safety_feat_interrupt(priv); 5993 5994 return IRQ_HANDLED; 5995 } 5996 5997 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) 5998 { 5999 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; 6000 struct stmmac_dma_conf *dma_conf; 6001 int chan = tx_q->queue_index; 6002 struct stmmac_priv *priv; 6003 int status; 6004 6005 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); 6006 priv = container_of(dma_conf, struct stmmac_priv, dma_conf); 6007 6008 if (unlikely(!data)) { 6009 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 6010 return IRQ_NONE; 6011 } 6012 6013 /* Check if adapter is up */ 6014 if (test_bit(STMMAC_DOWN, &priv->state)) 6015 return IRQ_HANDLED; 6016 6017 status = stmmac_napi_check(priv, chan, DMA_DIR_TX); 6018 6019 if (unlikely(status & tx_hard_error_bump_tc)) { 6020 /* Try to bump up the dma threshold on this failure */ 6021 stmmac_bump_dma_threshold(priv, chan); 6022 } else if (unlikely(status == tx_hard_error)) { 6023 stmmac_tx_err(priv, chan); 6024 } 6025 6026 return IRQ_HANDLED; 6027 } 6028 6029 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) 6030 { 6031 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; 6032 struct stmmac_dma_conf *dma_conf; 6033 int chan = rx_q->queue_index; 6034 struct stmmac_priv *priv; 6035 6036 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); 6037 priv = container_of(dma_conf, struct stmmac_priv, dma_conf); 6038 6039 if (unlikely(!data)) { 6040 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 6041 return IRQ_NONE; 6042 } 6043 6044 /* Check if adapter is up */ 6045 if (test_bit(STMMAC_DOWN, &priv->state)) 6046 return IRQ_HANDLED; 6047 6048 stmmac_napi_check(priv, chan, DMA_DIR_RX); 6049 6050 return IRQ_HANDLED; 6051 } 6052 6053 /** 6054 * stmmac_ioctl - Entry point for the Ioctl 6055 * @dev: Device pointer. 6056 * @rq: An IOCTL specefic structure, that can contain a pointer to 6057 * a proprietary structure used to pass information to the driver. 6058 * @cmd: IOCTL command 6059 * Description: 6060 * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 6061 */ 6062 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 6063 { 6064 struct stmmac_priv *priv = netdev_priv (dev); 6065 int ret = -EOPNOTSUPP; 6066 6067 if (!netif_running(dev)) 6068 return -EINVAL; 6069 6070 switch (cmd) { 6071 case SIOCGMIIPHY: 6072 case SIOCGMIIREG: 6073 case SIOCSMIIREG: 6074 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); 6075 break; 6076 case SIOCSHWTSTAMP: 6077 ret = stmmac_hwtstamp_set(dev, rq); 6078 break; 6079 case SIOCGHWTSTAMP: 6080 ret = stmmac_hwtstamp_get(dev, rq); 6081 break; 6082 default: 6083 break; 6084 } 6085 6086 return ret; 6087 } 6088 6089 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 6090 void *cb_priv) 6091 { 6092 struct stmmac_priv *priv = cb_priv; 6093 int ret = -EOPNOTSUPP; 6094 6095 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) 6096 return ret; 6097 6098 __stmmac_disable_all_queues(priv); 6099 6100 switch (type) { 6101 case TC_SETUP_CLSU32: 6102 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 6103 break; 6104 case TC_SETUP_CLSFLOWER: 6105 ret = stmmac_tc_setup_cls(priv, priv, type_data); 6106 break; 6107 default: 6108 break; 6109 } 6110 6111 stmmac_enable_all_queues(priv); 6112 return ret; 6113 } 6114 6115 static LIST_HEAD(stmmac_block_cb_list); 6116 6117 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 6118 void *type_data) 6119 { 6120 struct stmmac_priv *priv = netdev_priv(ndev); 6121 6122 switch (type) { 6123 case TC_QUERY_CAPS: 6124 return stmmac_tc_query_caps(priv, priv, type_data); 6125 case TC_SETUP_BLOCK: 6126 return flow_block_cb_setup_simple(type_data, 6127 &stmmac_block_cb_list, 6128 stmmac_setup_tc_block_cb, 6129 priv, priv, true); 6130 case TC_SETUP_QDISC_CBS: 6131 return stmmac_tc_setup_cbs(priv, priv, type_data); 6132 case TC_SETUP_QDISC_TAPRIO: 6133 return stmmac_tc_setup_taprio(priv, priv, type_data); 6134 case TC_SETUP_QDISC_ETF: 6135 return stmmac_tc_setup_etf(priv, priv, type_data); 6136 default: 6137 return -EOPNOTSUPP; 6138 } 6139 } 6140 6141 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, 6142 struct net_device *sb_dev) 6143 { 6144 int gso = skb_shinfo(skb)->gso_type; 6145 6146 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { 6147 /* 6148 * There is no way to determine the number of TSO/USO 6149 * capable Queues. Let's use always the Queue 0 6150 * because if TSO/USO is supported then at least this 6151 * one will be capable. 6152 */ 6153 return 0; 6154 } 6155 6156 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; 6157 } 6158 6159 static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 6160 { 6161 struct stmmac_priv *priv = netdev_priv(ndev); 6162 int ret = 0; 6163 6164 ret = pm_runtime_resume_and_get(priv->device); 6165 if (ret < 0) 6166 return ret; 6167 6168 ret = eth_mac_addr(ndev, addr); 6169 if (ret) 6170 goto set_mac_error; 6171 6172 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 6173 6174 set_mac_error: 6175 pm_runtime_put(priv->device); 6176 6177 return ret; 6178 } 6179 6180 #ifdef CONFIG_DEBUG_FS 6181 static struct dentry *stmmac_fs_dir; 6182 6183 static void sysfs_display_ring(void *head, int size, int extend_desc, 6184 struct seq_file *seq, dma_addr_t dma_phy_addr) 6185 { 6186 int i; 6187 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 6188 struct dma_desc *p = (struct dma_desc *)head; 6189 dma_addr_t dma_addr; 6190 6191 for (i = 0; i < size; i++) { 6192 if (extend_desc) { 6193 dma_addr = dma_phy_addr + i * sizeof(*ep); 6194 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 6195 i, &dma_addr, 6196 le32_to_cpu(ep->basic.des0), 6197 le32_to_cpu(ep->basic.des1), 6198 le32_to_cpu(ep->basic.des2), 6199 le32_to_cpu(ep->basic.des3)); 6200 ep++; 6201 } else { 6202 dma_addr = dma_phy_addr + i * sizeof(*p); 6203 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 6204 i, &dma_addr, 6205 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 6206 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 6207 p++; 6208 } 6209 seq_printf(seq, "\n"); 6210 } 6211 } 6212 6213 static int stmmac_rings_status_show(struct seq_file *seq, void *v) 6214 { 6215 struct net_device *dev = seq->private; 6216 struct stmmac_priv *priv = netdev_priv(dev); 6217 u32 rx_count = priv->plat->rx_queues_to_use; 6218 u32 tx_count = priv->plat->tx_queues_to_use; 6219 u32 queue; 6220 6221 if ((dev->flags & IFF_UP) == 0) 6222 return 0; 6223 6224 for (queue = 0; queue < rx_count; queue++) { 6225 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 6226 6227 seq_printf(seq, "RX Queue %d:\n", queue); 6228 6229 if (priv->extend_desc) { 6230 seq_printf(seq, "Extended descriptor ring:\n"); 6231 sysfs_display_ring((void *)rx_q->dma_erx, 6232 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); 6233 } else { 6234 seq_printf(seq, "Descriptor ring:\n"); 6235 sysfs_display_ring((void *)rx_q->dma_rx, 6236 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); 6237 } 6238 } 6239 6240 for (queue = 0; queue < tx_count; queue++) { 6241 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 6242 6243 seq_printf(seq, "TX Queue %d:\n", queue); 6244 6245 if (priv->extend_desc) { 6246 seq_printf(seq, "Extended descriptor ring:\n"); 6247 sysfs_display_ring((void *)tx_q->dma_etx, 6248 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); 6249 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { 6250 seq_printf(seq, "Descriptor ring:\n"); 6251 sysfs_display_ring((void *)tx_q->dma_tx, 6252 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); 6253 } 6254 } 6255 6256 return 0; 6257 } 6258 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 6259 6260 static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 6261 { 6262 static const char * const dwxgmac_timestamp_source[] = { 6263 "None", 6264 "Internal", 6265 "External", 6266 "Both", 6267 }; 6268 static const char * const dwxgmac_safety_feature_desc[] = { 6269 "No", 6270 "All Safety Features with ECC and Parity", 6271 "All Safety Features without ECC or Parity", 6272 "All Safety Features with Parity Only", 6273 "ECC Only", 6274 "UNDEFINED", 6275 "UNDEFINED", 6276 "UNDEFINED", 6277 }; 6278 struct net_device *dev = seq->private; 6279 struct stmmac_priv *priv = netdev_priv(dev); 6280 6281 if (!priv->hw_cap_support) { 6282 seq_printf(seq, "DMA HW features not supported\n"); 6283 return 0; 6284 } 6285 6286 seq_printf(seq, "==============================\n"); 6287 seq_printf(seq, "\tDMA HW features\n"); 6288 seq_printf(seq, "==============================\n"); 6289 6290 seq_printf(seq, "\t10/100 Mbps: %s\n", 6291 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 6292 seq_printf(seq, "\t1000 Mbps: %s\n", 6293 (priv->dma_cap.mbps_1000) ? "Y" : "N"); 6294 seq_printf(seq, "\tHalf duplex: %s\n", 6295 (priv->dma_cap.half_duplex) ? "Y" : "N"); 6296 if (priv->plat->has_xgmac) { 6297 seq_printf(seq, 6298 "\tNumber of Additional MAC address registers: %d\n", 6299 priv->dma_cap.multi_addr); 6300 } else { 6301 seq_printf(seq, "\tHash Filter: %s\n", 6302 (priv->dma_cap.hash_filter) ? "Y" : "N"); 6303 seq_printf(seq, "\tMultiple MAC address registers: %s\n", 6304 (priv->dma_cap.multi_addr) ? "Y" : "N"); 6305 } 6306 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 6307 (priv->dma_cap.pcs) ? "Y" : "N"); 6308 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 6309 (priv->dma_cap.sma_mdio) ? "Y" : "N"); 6310 seq_printf(seq, "\tPMT Remote wake up: %s\n", 6311 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 6312 seq_printf(seq, "\tPMT Magic Frame: %s\n", 6313 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 6314 seq_printf(seq, "\tRMON module: %s\n", 6315 (priv->dma_cap.rmon) ? "Y" : "N"); 6316 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 6317 (priv->dma_cap.time_stamp) ? "Y" : "N"); 6318 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 6319 (priv->dma_cap.atime_stamp) ? "Y" : "N"); 6320 if (priv->plat->has_xgmac) 6321 seq_printf(seq, "\tTimestamp System Time Source: %s\n", 6322 dwxgmac_timestamp_source[priv->dma_cap.tssrc]); 6323 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 6324 (priv->dma_cap.eee) ? "Y" : "N"); 6325 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 6326 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 6327 (priv->dma_cap.tx_coe) ? "Y" : "N"); 6328 if (priv->synopsys_id >= DWMAC_CORE_4_00 || 6329 priv->plat->has_xgmac) { 6330 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 6331 (priv->dma_cap.rx_coe) ? "Y" : "N"); 6332 } else { 6333 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 6334 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 6335 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 6336 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 6337 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 6338 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 6339 } 6340 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 6341 priv->dma_cap.number_rx_channel); 6342 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 6343 priv->dma_cap.number_tx_channel); 6344 seq_printf(seq, "\tNumber of Additional RX queues: %d\n", 6345 priv->dma_cap.number_rx_queues); 6346 seq_printf(seq, "\tNumber of Additional TX queues: %d\n", 6347 priv->dma_cap.number_tx_queues); 6348 seq_printf(seq, "\tEnhanced descriptors: %s\n", 6349 (priv->dma_cap.enh_desc) ? "Y" : "N"); 6350 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); 6351 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); 6352 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ? 6353 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0); 6354 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); 6355 seq_printf(seq, "\tNumber of PPS Outputs: %d\n", 6356 priv->dma_cap.pps_out_num); 6357 seq_printf(seq, "\tSafety Features: %s\n", 6358 dwxgmac_safety_feature_desc[priv->dma_cap.asp]); 6359 seq_printf(seq, "\tFlexible RX Parser: %s\n", 6360 priv->dma_cap.frpsel ? "Y" : "N"); 6361 seq_printf(seq, "\tEnhanced Addressing: %d\n", 6362 priv->dma_cap.host_dma_width); 6363 seq_printf(seq, "\tReceive Side Scaling: %s\n", 6364 priv->dma_cap.rssen ? "Y" : "N"); 6365 seq_printf(seq, "\tVLAN Hash Filtering: %s\n", 6366 priv->dma_cap.vlhash ? "Y" : "N"); 6367 seq_printf(seq, "\tSplit Header: %s\n", 6368 priv->dma_cap.sphen ? "Y" : "N"); 6369 seq_printf(seq, "\tVLAN TX Insertion: %s\n", 6370 priv->dma_cap.vlins ? "Y" : "N"); 6371 seq_printf(seq, "\tDouble VLAN: %s\n", 6372 priv->dma_cap.dvlan ? "Y" : "N"); 6373 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", 6374 priv->dma_cap.l3l4fnum); 6375 seq_printf(seq, "\tARP Offloading: %s\n", 6376 priv->dma_cap.arpoffsel ? "Y" : "N"); 6377 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", 6378 priv->dma_cap.estsel ? "Y" : "N"); 6379 seq_printf(seq, "\tFrame Preemption (FPE): %s\n", 6380 priv->dma_cap.fpesel ? "Y" : "N"); 6381 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", 6382 priv->dma_cap.tbssel ? "Y" : "N"); 6383 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n", 6384 priv->dma_cap.tbs_ch_num); 6385 seq_printf(seq, "\tPer-Stream Filtering: %s\n", 6386 priv->dma_cap.sgfsel ? "Y" : "N"); 6387 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n", 6388 BIT(priv->dma_cap.ttsfd) >> 1); 6389 seq_printf(seq, "\tNumber of Traffic Classes: %d\n", 6390 priv->dma_cap.numtc); 6391 seq_printf(seq, "\tDCB Feature: %s\n", 6392 priv->dma_cap.dcben ? "Y" : "N"); 6393 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n", 6394 priv->dma_cap.advthword ? "Y" : "N"); 6395 seq_printf(seq, "\tPTP Offload: %s\n", 6396 priv->dma_cap.ptoen ? "Y" : "N"); 6397 seq_printf(seq, "\tOne-Step Timestamping: %s\n", 6398 priv->dma_cap.osten ? "Y" : "N"); 6399 seq_printf(seq, "\tPriority-Based Flow Control: %s\n", 6400 priv->dma_cap.pfcen ? "Y" : "N"); 6401 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n", 6402 BIT(priv->dma_cap.frpes) << 6); 6403 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n", 6404 BIT(priv->dma_cap.frpbs) << 6); 6405 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n", 6406 priv->dma_cap.frppipe_num); 6407 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n", 6408 priv->dma_cap.nrvf_num ? 6409 (BIT(priv->dma_cap.nrvf_num) << 1) : 0); 6410 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n", 6411 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0); 6412 seq_printf(seq, "\tDepth of GCL: %lu\n", 6413 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0); 6414 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n", 6415 priv->dma_cap.cbtisel ? "Y" : "N"); 6416 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n", 6417 priv->dma_cap.aux_snapshot_n); 6418 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n", 6419 priv->dma_cap.pou_ost_en ? "Y" : "N"); 6420 seq_printf(seq, "\tEnhanced DMA: %s\n", 6421 priv->dma_cap.edma ? "Y" : "N"); 6422 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n", 6423 priv->dma_cap.ediffc ? "Y" : "N"); 6424 seq_printf(seq, "\tVxLAN/NVGRE: %s\n", 6425 priv->dma_cap.vxn ? "Y" : "N"); 6426 seq_printf(seq, "\tDebug Memory Interface: %s\n", 6427 priv->dma_cap.dbgmem ? "Y" : "N"); 6428 seq_printf(seq, "\tNumber of Policing Counters: %lu\n", 6429 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0); 6430 return 0; 6431 } 6432 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 6433 6434 /* Use network device events to rename debugfs file entries. 6435 */ 6436 static int stmmac_device_event(struct notifier_block *unused, 6437 unsigned long event, void *ptr) 6438 { 6439 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6440 struct stmmac_priv *priv = netdev_priv(dev); 6441 6442 if (dev->netdev_ops != &stmmac_netdev_ops) 6443 goto done; 6444 6445 switch (event) { 6446 case NETDEV_CHANGENAME: 6447 if (priv->dbgfs_dir) 6448 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, 6449 priv->dbgfs_dir, 6450 stmmac_fs_dir, 6451 dev->name); 6452 break; 6453 } 6454 done: 6455 return NOTIFY_DONE; 6456 } 6457 6458 static struct notifier_block stmmac_notifier = { 6459 .notifier_call = stmmac_device_event, 6460 }; 6461 6462 static void stmmac_init_fs(struct net_device *dev) 6463 { 6464 struct stmmac_priv *priv = netdev_priv(dev); 6465 6466 rtnl_lock(); 6467 6468 /* Create per netdev entries */ 6469 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 6470 6471 /* Entry to report DMA RX/TX rings */ 6472 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, 6473 &stmmac_rings_status_fops); 6474 6475 /* Entry to report the DMA HW features */ 6476 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, 6477 &stmmac_dma_cap_fops); 6478 6479 rtnl_unlock(); 6480 } 6481 6482 static void stmmac_exit_fs(struct net_device *dev) 6483 { 6484 struct stmmac_priv *priv = netdev_priv(dev); 6485 6486 debugfs_remove_recursive(priv->dbgfs_dir); 6487 } 6488 #endif /* CONFIG_DEBUG_FS */ 6489 6490 static u32 stmmac_vid_crc32_le(__le16 vid_le) 6491 { 6492 unsigned char *data = (unsigned char *)&vid_le; 6493 unsigned char data_byte = 0; 6494 u32 crc = ~0x0; 6495 u32 temp = 0; 6496 int i, bits; 6497 6498 bits = get_bitmask_order(VLAN_VID_MASK); 6499 for (i = 0; i < bits; i++) { 6500 if ((i % 8) == 0) 6501 data_byte = data[i / 8]; 6502 6503 temp = ((crc & 1) ^ data_byte) & 1; 6504 crc >>= 1; 6505 data_byte >>= 1; 6506 6507 if (temp) 6508 crc ^= 0xedb88320; 6509 } 6510 6511 return crc; 6512 } 6513 6514 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) 6515 { 6516 u32 crc, hash = 0; 6517 __le16 pmatch = 0; 6518 int count = 0; 6519 u16 vid = 0; 6520 6521 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 6522 __le16 vid_le = cpu_to_le16(vid); 6523 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; 6524 hash |= (1 << crc); 6525 count++; 6526 } 6527 6528 if (!priv->dma_cap.vlhash) { 6529 if (count > 2) /* VID = 0 always passes filter */ 6530 return -EOPNOTSUPP; 6531 6532 pmatch = cpu_to_le16(vid); 6533 hash = 0; 6534 } 6535 6536 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); 6537 } 6538 6539 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 6540 { 6541 struct stmmac_priv *priv = netdev_priv(ndev); 6542 bool is_double = false; 6543 int ret; 6544 6545 ret = pm_runtime_resume_and_get(priv->device); 6546 if (ret < 0) 6547 return ret; 6548 6549 if (be16_to_cpu(proto) == ETH_P_8021AD) 6550 is_double = true; 6551 6552 set_bit(vid, priv->active_vlans); 6553 ret = stmmac_vlan_update(priv, is_double); 6554 if (ret) { 6555 clear_bit(vid, priv->active_vlans); 6556 goto err_pm_put; 6557 } 6558 6559 if (priv->hw->num_vlan) { 6560 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6561 if (ret) 6562 goto err_pm_put; 6563 } 6564 err_pm_put: 6565 pm_runtime_put(priv->device); 6566 6567 return ret; 6568 } 6569 6570 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 6571 { 6572 struct stmmac_priv *priv = netdev_priv(ndev); 6573 bool is_double = false; 6574 int ret; 6575 6576 ret = pm_runtime_resume_and_get(priv->device); 6577 if (ret < 0) 6578 return ret; 6579 6580 if (be16_to_cpu(proto) == ETH_P_8021AD) 6581 is_double = true; 6582 6583 clear_bit(vid, priv->active_vlans); 6584 6585 if (priv->hw->num_vlan) { 6586 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6587 if (ret) 6588 goto del_vlan_error; 6589 } 6590 6591 ret = stmmac_vlan_update(priv, is_double); 6592 6593 del_vlan_error: 6594 pm_runtime_put(priv->device); 6595 6596 return ret; 6597 } 6598 6599 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) 6600 { 6601 struct stmmac_priv *priv = netdev_priv(dev); 6602 6603 switch (bpf->command) { 6604 case XDP_SETUP_PROG: 6605 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); 6606 case XDP_SETUP_XSK_POOL: 6607 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, 6608 bpf->xsk.queue_id); 6609 default: 6610 return -EOPNOTSUPP; 6611 } 6612 } 6613 6614 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, 6615 struct xdp_frame **frames, u32 flags) 6616 { 6617 struct stmmac_priv *priv = netdev_priv(dev); 6618 int cpu = smp_processor_id(); 6619 struct netdev_queue *nq; 6620 int i, nxmit = 0; 6621 int queue; 6622 6623 if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) 6624 return -ENETDOWN; 6625 6626 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 6627 return -EINVAL; 6628 6629 queue = stmmac_xdp_get_tx_queue(priv, cpu); 6630 nq = netdev_get_tx_queue(priv->dev, queue); 6631 6632 __netif_tx_lock(nq, cpu); 6633 /* Avoids TX time-out as we are sharing with slow path */ 6634 txq_trans_cond_update(nq); 6635 6636 for (i = 0; i < num_frames; i++) { 6637 int res; 6638 6639 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); 6640 if (res == STMMAC_XDP_CONSUMED) 6641 break; 6642 6643 nxmit++; 6644 } 6645 6646 if (flags & XDP_XMIT_FLUSH) { 6647 stmmac_flush_tx_descriptors(priv, queue); 6648 stmmac_tx_timer_arm(priv, queue); 6649 } 6650 6651 __netif_tx_unlock(nq); 6652 6653 return nxmit; 6654 } 6655 6656 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) 6657 { 6658 struct stmmac_channel *ch = &priv->channel[queue]; 6659 unsigned long flags; 6660 6661 spin_lock_irqsave(&ch->lock, flags); 6662 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6663 spin_unlock_irqrestore(&ch->lock, flags); 6664 6665 stmmac_stop_rx_dma(priv, queue); 6666 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6667 } 6668 6669 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) 6670 { 6671 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 6672 struct stmmac_channel *ch = &priv->channel[queue]; 6673 unsigned long flags; 6674 u32 buf_size; 6675 int ret; 6676 6677 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6678 if (ret) { 6679 netdev_err(priv->dev, "Failed to alloc RX desc.\n"); 6680 return; 6681 } 6682 6683 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); 6684 if (ret) { 6685 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6686 netdev_err(priv->dev, "Failed to init RX desc.\n"); 6687 return; 6688 } 6689 6690 stmmac_reset_rx_queue(priv, queue); 6691 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); 6692 6693 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6694 rx_q->dma_rx_phy, rx_q->queue_index); 6695 6696 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * 6697 sizeof(struct dma_desc)); 6698 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6699 rx_q->rx_tail_addr, rx_q->queue_index); 6700 6701 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6702 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6703 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6704 buf_size, 6705 rx_q->queue_index); 6706 } else { 6707 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6708 priv->dma_conf.dma_buf_sz, 6709 rx_q->queue_index); 6710 } 6711 6712 stmmac_start_rx_dma(priv, queue); 6713 6714 spin_lock_irqsave(&ch->lock, flags); 6715 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6716 spin_unlock_irqrestore(&ch->lock, flags); 6717 } 6718 6719 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) 6720 { 6721 struct stmmac_channel *ch = &priv->channel[queue]; 6722 unsigned long flags; 6723 6724 spin_lock_irqsave(&ch->lock, flags); 6725 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6726 spin_unlock_irqrestore(&ch->lock, flags); 6727 6728 stmmac_stop_tx_dma(priv, queue); 6729 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6730 } 6731 6732 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) 6733 { 6734 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 6735 struct stmmac_channel *ch = &priv->channel[queue]; 6736 unsigned long flags; 6737 int ret; 6738 6739 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6740 if (ret) { 6741 netdev_err(priv->dev, "Failed to alloc TX desc.\n"); 6742 return; 6743 } 6744 6745 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); 6746 if (ret) { 6747 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6748 netdev_err(priv->dev, "Failed to init TX desc.\n"); 6749 return; 6750 } 6751 6752 stmmac_reset_tx_queue(priv, queue); 6753 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); 6754 6755 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6756 tx_q->dma_tx_phy, tx_q->queue_index); 6757 6758 if (tx_q->tbs & STMMAC_TBS_AVAIL) 6759 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); 6760 6761 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6762 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6763 tx_q->tx_tail_addr, tx_q->queue_index); 6764 6765 stmmac_start_tx_dma(priv, queue); 6766 6767 spin_lock_irqsave(&ch->lock, flags); 6768 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6769 spin_unlock_irqrestore(&ch->lock, flags); 6770 } 6771 6772 void stmmac_xdp_release(struct net_device *dev) 6773 { 6774 struct stmmac_priv *priv = netdev_priv(dev); 6775 u32 chan; 6776 6777 /* Ensure tx function is not running */ 6778 netif_tx_disable(dev); 6779 6780 /* Disable NAPI process */ 6781 stmmac_disable_all_queues(priv); 6782 6783 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 6784 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 6785 6786 /* Free the IRQ lines */ 6787 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 6788 6789 /* Stop TX/RX DMA channels */ 6790 stmmac_stop_all_dma(priv); 6791 6792 /* Release and free the Rx/Tx resources */ 6793 free_dma_desc_resources(priv, &priv->dma_conf); 6794 6795 /* Disable the MAC Rx/Tx */ 6796 stmmac_mac_set(priv, priv->ioaddr, false); 6797 6798 /* set trans_start so we don't get spurious 6799 * watchdogs during reset 6800 */ 6801 netif_trans_update(dev); 6802 netif_carrier_off(dev); 6803 } 6804 6805 int stmmac_xdp_open(struct net_device *dev) 6806 { 6807 struct stmmac_priv *priv = netdev_priv(dev); 6808 u32 rx_cnt = priv->plat->rx_queues_to_use; 6809 u32 tx_cnt = priv->plat->tx_queues_to_use; 6810 u32 dma_csr_ch = max(rx_cnt, tx_cnt); 6811 struct stmmac_rx_queue *rx_q; 6812 struct stmmac_tx_queue *tx_q; 6813 u32 buf_size; 6814 bool sph_en; 6815 u32 chan; 6816 int ret; 6817 6818 ret = alloc_dma_desc_resources(priv, &priv->dma_conf); 6819 if (ret < 0) { 6820 netdev_err(dev, "%s: DMA descriptors allocation failed\n", 6821 __func__); 6822 goto dma_desc_error; 6823 } 6824 6825 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); 6826 if (ret < 0) { 6827 netdev_err(dev, "%s: DMA descriptors initialization failed\n", 6828 __func__); 6829 goto init_error; 6830 } 6831 6832 stmmac_reset_queues_param(priv); 6833 6834 /* DMA CSR Channel configuration */ 6835 for (chan = 0; chan < dma_csr_ch; chan++) { 6836 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 6837 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 6838 } 6839 6840 /* Adjust Split header */ 6841 sph_en = (priv->hw->rx_csum > 0) && priv->sph; 6842 6843 /* DMA RX Channel Configuration */ 6844 for (chan = 0; chan < rx_cnt; chan++) { 6845 rx_q = &priv->dma_conf.rx_queue[chan]; 6846 6847 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6848 rx_q->dma_rx_phy, chan); 6849 6850 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 6851 (rx_q->buf_alloc_num * 6852 sizeof(struct dma_desc)); 6853 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6854 rx_q->rx_tail_addr, chan); 6855 6856 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6857 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6858 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6859 buf_size, 6860 rx_q->queue_index); 6861 } else { 6862 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6863 priv->dma_conf.dma_buf_sz, 6864 rx_q->queue_index); 6865 } 6866 6867 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 6868 } 6869 6870 /* DMA TX Channel Configuration */ 6871 for (chan = 0; chan < tx_cnt; chan++) { 6872 tx_q = &priv->dma_conf.tx_queue[chan]; 6873 6874 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6875 tx_q->dma_tx_phy, chan); 6876 6877 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6878 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6879 tx_q->tx_tail_addr, chan); 6880 6881 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6882 tx_q->txtimer.function = stmmac_tx_timer; 6883 } 6884 6885 /* Enable the MAC Rx/Tx */ 6886 stmmac_mac_set(priv, priv->ioaddr, true); 6887 6888 /* Start Rx & Tx DMA Channels */ 6889 stmmac_start_all_dma(priv); 6890 6891 ret = stmmac_request_irq(dev); 6892 if (ret) 6893 goto irq_error; 6894 6895 /* Enable NAPI process*/ 6896 stmmac_enable_all_queues(priv); 6897 netif_carrier_on(dev); 6898 netif_tx_start_all_queues(dev); 6899 stmmac_enable_all_dma_irq(priv); 6900 6901 return 0; 6902 6903 irq_error: 6904 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 6905 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 6906 6907 stmmac_hw_teardown(dev); 6908 init_error: 6909 free_dma_desc_resources(priv, &priv->dma_conf); 6910 dma_desc_error: 6911 return ret; 6912 } 6913 6914 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) 6915 { 6916 struct stmmac_priv *priv = netdev_priv(dev); 6917 struct stmmac_rx_queue *rx_q; 6918 struct stmmac_tx_queue *tx_q; 6919 struct stmmac_channel *ch; 6920 6921 if (test_bit(STMMAC_DOWN, &priv->state) || 6922 !netif_carrier_ok(priv->dev)) 6923 return -ENETDOWN; 6924 6925 if (!stmmac_xdp_is_enabled(priv)) 6926 return -EINVAL; 6927 6928 if (queue >= priv->plat->rx_queues_to_use || 6929 queue >= priv->plat->tx_queues_to_use) 6930 return -EINVAL; 6931 6932 rx_q = &priv->dma_conf.rx_queue[queue]; 6933 tx_q = &priv->dma_conf.tx_queue[queue]; 6934 ch = &priv->channel[queue]; 6935 6936 if (!rx_q->xsk_pool && !tx_q->xsk_pool) 6937 return -EINVAL; 6938 6939 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { 6940 /* EQoS does not have per-DMA channel SW interrupt, 6941 * so we schedule RX Napi straight-away. 6942 */ 6943 if (likely(napi_schedule_prep(&ch->rxtx_napi))) 6944 __napi_schedule(&ch->rxtx_napi); 6945 } 6946 6947 return 0; 6948 } 6949 6950 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6951 { 6952 struct stmmac_priv *priv = netdev_priv(dev); 6953 u32 tx_cnt = priv->plat->tx_queues_to_use; 6954 u32 rx_cnt = priv->plat->rx_queues_to_use; 6955 unsigned int start; 6956 int q; 6957 6958 for (q = 0; q < tx_cnt; q++) { 6959 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; 6960 u64 tx_packets; 6961 u64 tx_bytes; 6962 6963 do { 6964 start = u64_stats_fetch_begin(&txq_stats->syncp); 6965 tx_packets = txq_stats->tx_packets; 6966 tx_bytes = txq_stats->tx_bytes; 6967 } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); 6968 6969 stats->tx_packets += tx_packets; 6970 stats->tx_bytes += tx_bytes; 6971 } 6972 6973 for (q = 0; q < rx_cnt; q++) { 6974 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; 6975 u64 rx_packets; 6976 u64 rx_bytes; 6977 6978 do { 6979 start = u64_stats_fetch_begin(&rxq_stats->syncp); 6980 rx_packets = rxq_stats->rx_packets; 6981 rx_bytes = rxq_stats->rx_bytes; 6982 } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); 6983 6984 stats->rx_packets += rx_packets; 6985 stats->rx_bytes += rx_bytes; 6986 } 6987 6988 stats->rx_dropped = priv->xstats.rx_dropped; 6989 stats->rx_errors = priv->xstats.rx_errors; 6990 stats->tx_dropped = priv->xstats.tx_dropped; 6991 stats->tx_errors = priv->xstats.tx_errors; 6992 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier; 6993 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision; 6994 stats->rx_length_errors = priv->xstats.rx_length; 6995 stats->rx_crc_errors = priv->xstats.rx_crc_errors; 6996 stats->rx_over_errors = priv->xstats.rx_overflow_cntr; 6997 stats->rx_missed_errors = priv->xstats.rx_missed_cntr; 6998 } 6999 7000 static const struct net_device_ops stmmac_netdev_ops = { 7001 .ndo_open = stmmac_open, 7002 .ndo_start_xmit = stmmac_xmit, 7003 .ndo_stop = stmmac_release, 7004 .ndo_change_mtu = stmmac_change_mtu, 7005 .ndo_fix_features = stmmac_fix_features, 7006 .ndo_set_features = stmmac_set_features, 7007 .ndo_set_rx_mode = stmmac_set_rx_mode, 7008 .ndo_tx_timeout = stmmac_tx_timeout, 7009 .ndo_eth_ioctl = stmmac_ioctl, 7010 .ndo_get_stats64 = stmmac_get_stats64, 7011 .ndo_setup_tc = stmmac_setup_tc, 7012 .ndo_select_queue = stmmac_select_queue, 7013 .ndo_set_mac_address = stmmac_set_mac_address, 7014 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, 7015 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, 7016 .ndo_bpf = stmmac_bpf, 7017 .ndo_xdp_xmit = stmmac_xdp_xmit, 7018 .ndo_xsk_wakeup = stmmac_xsk_wakeup, 7019 }; 7020 7021 static void stmmac_reset_subtask(struct stmmac_priv *priv) 7022 { 7023 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 7024 return; 7025 if (test_bit(STMMAC_DOWN, &priv->state)) 7026 return; 7027 7028 netdev_err(priv->dev, "Reset adapter.\n"); 7029 7030 rtnl_lock(); 7031 netif_trans_update(priv->dev); 7032 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 7033 usleep_range(1000, 2000); 7034 7035 set_bit(STMMAC_DOWN, &priv->state); 7036 dev_close(priv->dev); 7037 dev_open(priv->dev, NULL); 7038 clear_bit(STMMAC_DOWN, &priv->state); 7039 clear_bit(STMMAC_RESETING, &priv->state); 7040 rtnl_unlock(); 7041 } 7042 7043 static void stmmac_service_task(struct work_struct *work) 7044 { 7045 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 7046 service_task); 7047 7048 stmmac_reset_subtask(priv); 7049 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 7050 } 7051 7052 /** 7053 * stmmac_hw_init - Init the MAC device 7054 * @priv: driver private structure 7055 * Description: this function is to configure the MAC device according to 7056 * some platform parameters or the HW capability register. It prepares the 7057 * driver to use either ring or chain modes and to setup either enhanced or 7058 * normal descriptors. 7059 */ 7060 static int stmmac_hw_init(struct stmmac_priv *priv) 7061 { 7062 int ret; 7063 7064 /* dwmac-sun8i only work in chain mode */ 7065 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) 7066 chain_mode = 1; 7067 priv->chain_mode = chain_mode; 7068 7069 /* Initialize HW Interface */ 7070 ret = stmmac_hwif_init(priv); 7071 if (ret) 7072 return ret; 7073 7074 /* Get the HW capability (new GMAC newer than 3.50a) */ 7075 priv->hw_cap_support = stmmac_get_hw_features(priv); 7076 if (priv->hw_cap_support) { 7077 dev_info(priv->device, "DMA HW capability register supported\n"); 7078 7079 /* We can override some gmac/dma configuration fields: e.g. 7080 * enh_desc, tx_coe (e.g. that are passed through the 7081 * platform) with the values from the HW capability 7082 * register (if supported). 7083 */ 7084 priv->plat->enh_desc = priv->dma_cap.enh_desc; 7085 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && 7086 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL); 7087 priv->hw->pmt = priv->plat->pmt; 7088 if (priv->dma_cap.hash_tb_sz) { 7089 priv->hw->multicast_filter_bins = 7090 (BIT(priv->dma_cap.hash_tb_sz) << 5); 7091 priv->hw->mcast_bits_log2 = 7092 ilog2(priv->hw->multicast_filter_bins); 7093 } 7094 7095 /* TXCOE doesn't work in thresh DMA mode */ 7096 if (priv->plat->force_thresh_dma_mode) 7097 priv->plat->tx_coe = 0; 7098 else 7099 priv->plat->tx_coe = priv->dma_cap.tx_coe; 7100 7101 /* In case of GMAC4 rx_coe is from HW cap register. */ 7102 priv->plat->rx_coe = priv->dma_cap.rx_coe; 7103 7104 if (priv->dma_cap.rx_coe_type2) 7105 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 7106 else if (priv->dma_cap.rx_coe_type1) 7107 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 7108 7109 } else { 7110 dev_info(priv->device, "No HW DMA feature register supported\n"); 7111 } 7112 7113 if (priv->plat->rx_coe) { 7114 priv->hw->rx_csum = priv->plat->rx_coe; 7115 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 7116 if (priv->synopsys_id < DWMAC_CORE_4_00) 7117 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 7118 } 7119 if (priv->plat->tx_coe) 7120 dev_info(priv->device, "TX Checksum insertion supported\n"); 7121 7122 if (priv->plat->pmt) { 7123 dev_info(priv->device, "Wake-Up On Lan supported\n"); 7124 device_set_wakeup_capable(priv->device, 1); 7125 } 7126 7127 if (priv->dma_cap.tsoen) 7128 dev_info(priv->device, "TSO supported\n"); 7129 7130 priv->hw->vlan_fail_q_en = 7131 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); 7132 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; 7133 7134 /* Run HW quirks, if any */ 7135 if (priv->hwif_quirks) { 7136 ret = priv->hwif_quirks(priv); 7137 if (ret) 7138 return ret; 7139 } 7140 7141 /* Rx Watchdog is available in the COREs newer than the 3.40. 7142 * In some case, for example on bugged HW this feature 7143 * has to be disable and this can be done by passing the 7144 * riwt_off field from the platform. 7145 */ 7146 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 7147 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 7148 priv->use_riwt = 1; 7149 dev_info(priv->device, 7150 "Enable RX Mitigation via HW Watchdog Timer\n"); 7151 } 7152 7153 return 0; 7154 } 7155 7156 static void stmmac_napi_add(struct net_device *dev) 7157 { 7158 struct stmmac_priv *priv = netdev_priv(dev); 7159 u32 queue, maxq; 7160 7161 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 7162 7163 for (queue = 0; queue < maxq; queue++) { 7164 struct stmmac_channel *ch = &priv->channel[queue]; 7165 7166 ch->priv_data = priv; 7167 ch->index = queue; 7168 spin_lock_init(&ch->lock); 7169 7170 if (queue < priv->plat->rx_queues_to_use) { 7171 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); 7172 } 7173 if (queue < priv->plat->tx_queues_to_use) { 7174 netif_napi_add_tx(dev, &ch->tx_napi, 7175 stmmac_napi_poll_tx); 7176 } 7177 if (queue < priv->plat->rx_queues_to_use && 7178 queue < priv->plat->tx_queues_to_use) { 7179 netif_napi_add(dev, &ch->rxtx_napi, 7180 stmmac_napi_poll_rxtx); 7181 } 7182 } 7183 } 7184 7185 static void stmmac_napi_del(struct net_device *dev) 7186 { 7187 struct stmmac_priv *priv = netdev_priv(dev); 7188 u32 queue, maxq; 7189 7190 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 7191 7192 for (queue = 0; queue < maxq; queue++) { 7193 struct stmmac_channel *ch = &priv->channel[queue]; 7194 7195 if (queue < priv->plat->rx_queues_to_use) 7196 netif_napi_del(&ch->rx_napi); 7197 if (queue < priv->plat->tx_queues_to_use) 7198 netif_napi_del(&ch->tx_napi); 7199 if (queue < priv->plat->rx_queues_to_use && 7200 queue < priv->plat->tx_queues_to_use) { 7201 netif_napi_del(&ch->rxtx_napi); 7202 } 7203 } 7204 } 7205 7206 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) 7207 { 7208 struct stmmac_priv *priv = netdev_priv(dev); 7209 int ret = 0, i; 7210 7211 if (netif_running(dev)) 7212 stmmac_release(dev); 7213 7214 stmmac_napi_del(dev); 7215 7216 priv->plat->rx_queues_to_use = rx_cnt; 7217 priv->plat->tx_queues_to_use = tx_cnt; 7218 if (!netif_is_rxfh_configured(dev)) 7219 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 7220 priv->rss.table[i] = ethtool_rxfh_indir_default(i, 7221 rx_cnt); 7222 7223 stmmac_set_half_duplex(priv); 7224 stmmac_napi_add(dev); 7225 7226 if (netif_running(dev)) 7227 ret = stmmac_open(dev); 7228 7229 return ret; 7230 } 7231 7232 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) 7233 { 7234 struct stmmac_priv *priv = netdev_priv(dev); 7235 int ret = 0; 7236 7237 if (netif_running(dev)) 7238 stmmac_release(dev); 7239 7240 priv->dma_conf.dma_rx_size = rx_size; 7241 priv->dma_conf.dma_tx_size = tx_size; 7242 7243 if (netif_running(dev)) 7244 ret = stmmac_open(dev); 7245 7246 return ret; 7247 } 7248 7249 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" 7250 static void stmmac_fpe_lp_task(struct work_struct *work) 7251 { 7252 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 7253 fpe_task); 7254 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 7255 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 7256 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 7257 bool *hs_enable = &fpe_cfg->hs_enable; 7258 bool *enable = &fpe_cfg->enable; 7259 int retries = 20; 7260 7261 while (retries-- > 0) { 7262 /* Bail out immediately if FPE handshake is OFF */ 7263 if (*lo_state == FPE_STATE_OFF || !*hs_enable) 7264 break; 7265 7266 if (*lo_state == FPE_STATE_ENTERING_ON && 7267 *lp_state == FPE_STATE_ENTERING_ON) { 7268 stmmac_fpe_configure(priv, priv->ioaddr, 7269 fpe_cfg, 7270 priv->plat->tx_queues_to_use, 7271 priv->plat->rx_queues_to_use, 7272 *enable); 7273 7274 netdev_info(priv->dev, "configured FPE\n"); 7275 7276 *lo_state = FPE_STATE_ON; 7277 *lp_state = FPE_STATE_ON; 7278 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); 7279 break; 7280 } 7281 7282 if ((*lo_state == FPE_STATE_CAPABLE || 7283 *lo_state == FPE_STATE_ENTERING_ON) && 7284 *lp_state != FPE_STATE_ON) { 7285 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, 7286 *lo_state, *lp_state); 7287 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 7288 fpe_cfg, 7289 MPACKET_VERIFY); 7290 } 7291 /* Sleep then retry */ 7292 msleep(500); 7293 } 7294 7295 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 7296 } 7297 7298 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) 7299 { 7300 if (priv->plat->fpe_cfg->hs_enable != enable) { 7301 if (enable) { 7302 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 7303 priv->plat->fpe_cfg, 7304 MPACKET_VERIFY); 7305 } else { 7306 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; 7307 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; 7308 } 7309 7310 priv->plat->fpe_cfg->hs_enable = enable; 7311 } 7312 } 7313 7314 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) 7315 { 7316 const struct stmmac_xdp_buff *ctx = (void *)_ctx; 7317 struct dma_desc *desc_contains_ts = ctx->desc; 7318 struct stmmac_priv *priv = ctx->priv; 7319 struct dma_desc *ndesc = ctx->ndesc; 7320 struct dma_desc *desc = ctx->desc; 7321 u64 ns = 0; 7322 7323 if (!priv->hwts_rx_en) 7324 return -ENODATA; 7325 7326 /* For GMAC4, the valid timestamp is from CTX next desc. */ 7327 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 7328 desc_contains_ts = ndesc; 7329 7330 /* Check if timestamp is available */ 7331 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) { 7332 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns); 7333 ns -= priv->plat->cdc_error_adj; 7334 *timestamp = ns_to_ktime(ns); 7335 return 0; 7336 } 7337 7338 return -ENODATA; 7339 } 7340 7341 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = { 7342 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp, 7343 }; 7344 7345 /** 7346 * stmmac_dvr_probe 7347 * @device: device pointer 7348 * @plat_dat: platform data pointer 7349 * @res: stmmac resource pointer 7350 * Description: this is the main probe function used to 7351 * call the alloc_etherdev, allocate the priv structure. 7352 * Return: 7353 * returns 0 on success, otherwise errno. 7354 */ 7355 int stmmac_dvr_probe(struct device *device, 7356 struct plat_stmmacenet_data *plat_dat, 7357 struct stmmac_resources *res) 7358 { 7359 struct net_device *ndev = NULL; 7360 struct stmmac_priv *priv; 7361 u32 rxq; 7362 int i, ret = 0; 7363 7364 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), 7365 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); 7366 if (!ndev) 7367 return -ENOMEM; 7368 7369 SET_NETDEV_DEV(ndev, device); 7370 7371 priv = netdev_priv(ndev); 7372 priv->device = device; 7373 priv->dev = ndev; 7374 7375 for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 7376 u64_stats_init(&priv->xstats.rxq_stats[i].syncp); 7377 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 7378 u64_stats_init(&priv->xstats.txq_stats[i].syncp); 7379 7380 stmmac_set_ethtool_ops(ndev); 7381 priv->pause = pause; 7382 priv->plat = plat_dat; 7383 priv->ioaddr = res->addr; 7384 priv->dev->base_addr = (unsigned long)res->addr; 7385 priv->plat->dma_cfg->multi_msi_en = 7386 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN); 7387 7388 priv->dev->irq = res->irq; 7389 priv->wol_irq = res->wol_irq; 7390 priv->lpi_irq = res->lpi_irq; 7391 priv->sfty_ce_irq = res->sfty_ce_irq; 7392 priv->sfty_ue_irq = res->sfty_ue_irq; 7393 for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 7394 priv->rx_irq[i] = res->rx_irq[i]; 7395 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 7396 priv->tx_irq[i] = res->tx_irq[i]; 7397 7398 if (!is_zero_ether_addr(res->mac)) 7399 eth_hw_addr_set(priv->dev, res->mac); 7400 7401 dev_set_drvdata(device, priv->dev); 7402 7403 /* Verify driver arguments */ 7404 stmmac_verify_args(); 7405 7406 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); 7407 if (!priv->af_xdp_zc_qps) 7408 return -ENOMEM; 7409 7410 /* Allocate workqueue */ 7411 priv->wq = create_singlethread_workqueue("stmmac_wq"); 7412 if (!priv->wq) { 7413 dev_err(priv->device, "failed to create workqueue\n"); 7414 ret = -ENOMEM; 7415 goto error_wq_init; 7416 } 7417 7418 INIT_WORK(&priv->service_task, stmmac_service_task); 7419 7420 /* Initialize Link Partner FPE workqueue */ 7421 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); 7422 7423 /* Override with kernel parameters if supplied XXX CRS XXX 7424 * this needs to have multiple instances 7425 */ 7426 if ((phyaddr >= 0) && (phyaddr <= 31)) 7427 priv->plat->phy_addr = phyaddr; 7428 7429 if (priv->plat->stmmac_rst) { 7430 ret = reset_control_assert(priv->plat->stmmac_rst); 7431 reset_control_deassert(priv->plat->stmmac_rst); 7432 /* Some reset controllers have only reset callback instead of 7433 * assert + deassert callbacks pair. 7434 */ 7435 if (ret == -ENOTSUPP) 7436 reset_control_reset(priv->plat->stmmac_rst); 7437 } 7438 7439 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); 7440 if (ret == -ENOTSUPP) 7441 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", 7442 ERR_PTR(ret)); 7443 7444 /* Wait a bit for the reset to take effect */ 7445 udelay(10); 7446 7447 /* Init MAC and get the capabilities */ 7448 ret = stmmac_hw_init(priv); 7449 if (ret) 7450 goto error_hw_init; 7451 7452 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. 7453 */ 7454 if (priv->synopsys_id < DWMAC_CORE_5_20) 7455 priv->plat->dma_cfg->dche = false; 7456 7457 stmmac_check_ether_addr(priv); 7458 7459 ndev->netdev_ops = &stmmac_netdev_ops; 7460 7461 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops; 7462 7463 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 7464 NETIF_F_RXCSUM; 7465 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 7466 NETDEV_XDP_ACT_XSK_ZEROCOPY; 7467 7468 ret = stmmac_tc_init(priv, priv); 7469 if (!ret) { 7470 ndev->hw_features |= NETIF_F_HW_TC; 7471 } 7472 7473 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { 7474 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 7475 if (priv->plat->has_gmac4) 7476 ndev->hw_features |= NETIF_F_GSO_UDP_L4; 7477 priv->tso = true; 7478 dev_info(priv->device, "TSO feature enabled\n"); 7479 } 7480 7481 if (priv->dma_cap.sphen && 7482 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) { 7483 ndev->hw_features |= NETIF_F_GRO; 7484 priv->sph_cap = true; 7485 priv->sph = priv->sph_cap; 7486 dev_info(priv->device, "SPH feature enabled\n"); 7487 } 7488 7489 /* Ideally our host DMA address width is the same as for the 7490 * device. However, it may differ and then we have to use our 7491 * host DMA width for allocation and the device DMA width for 7492 * register handling. 7493 */ 7494 if (priv->plat->host_dma_width) 7495 priv->dma_cap.host_dma_width = priv->plat->host_dma_width; 7496 else 7497 priv->dma_cap.host_dma_width = priv->dma_cap.addr64; 7498 7499 if (priv->dma_cap.host_dma_width) { 7500 ret = dma_set_mask_and_coherent(device, 7501 DMA_BIT_MASK(priv->dma_cap.host_dma_width)); 7502 if (!ret) { 7503 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n", 7504 priv->dma_cap.host_dma_width, priv->dma_cap.addr64); 7505 7506 /* 7507 * If more than 32 bits can be addressed, make sure to 7508 * enable enhanced addressing mode. 7509 */ 7510 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 7511 priv->plat->dma_cfg->eame = true; 7512 } else { 7513 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); 7514 if (ret) { 7515 dev_err(priv->device, "Failed to set DMA Mask\n"); 7516 goto error_hw_init; 7517 } 7518 7519 priv->dma_cap.host_dma_width = 32; 7520 } 7521 } 7522 7523 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 7524 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 7525 #ifdef STMMAC_VLAN_TAG_USED 7526 /* Both mac100 and gmac support receive VLAN tag detection */ 7527 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 7528 if (priv->dma_cap.vlhash) { 7529 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 7530 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 7531 } 7532 if (priv->dma_cap.vlins) { 7533 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; 7534 if (priv->dma_cap.dvlan) 7535 ndev->features |= NETIF_F_HW_VLAN_STAG_TX; 7536 } 7537 #endif 7538 priv->msg_enable = netif_msg_init(debug, default_msg_level); 7539 7540 priv->xstats.threshold = tc; 7541 7542 /* Initialize RSS */ 7543 rxq = priv->plat->rx_queues_to_use; 7544 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); 7545 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 7546 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); 7547 7548 if (priv->dma_cap.rssen && priv->plat->rss_en) 7549 ndev->features |= NETIF_F_RXHASH; 7550 7551 ndev->vlan_features |= ndev->features; 7552 /* TSO doesn't work on VLANs yet */ 7553 ndev->vlan_features &= ~NETIF_F_TSO; 7554 7555 /* MTU range: 46 - hw-specific max */ 7556 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 7557 if (priv->plat->has_xgmac) 7558 ndev->max_mtu = XGMAC_JUMBO_LEN; 7559 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 7560 ndev->max_mtu = JUMBO_LEN; 7561 else 7562 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 7563 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 7564 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 7565 */ 7566 if ((priv->plat->maxmtu < ndev->max_mtu) && 7567 (priv->plat->maxmtu >= ndev->min_mtu)) 7568 ndev->max_mtu = priv->plat->maxmtu; 7569 else if (priv->plat->maxmtu < ndev->min_mtu) 7570 dev_warn(priv->device, 7571 "%s: warning: maxmtu having invalid value (%d)\n", 7572 __func__, priv->plat->maxmtu); 7573 7574 if (flow_ctrl) 7575 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 7576 7577 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 7578 7579 /* Setup channels NAPI */ 7580 stmmac_napi_add(ndev); 7581 7582 mutex_init(&priv->lock); 7583 7584 /* If a specific clk_csr value is passed from the platform 7585 * this means that the CSR Clock Range selection cannot be 7586 * changed at run-time and it is fixed. Viceversa the driver'll try to 7587 * set the MDC clock dynamically according to the csr actual 7588 * clock input. 7589 */ 7590 if (priv->plat->clk_csr >= 0) 7591 priv->clk_csr = priv->plat->clk_csr; 7592 else 7593 stmmac_clk_csr_set(priv); 7594 7595 stmmac_check_pcs_mode(priv); 7596 7597 pm_runtime_get_noresume(device); 7598 pm_runtime_set_active(device); 7599 if (!pm_runtime_enabled(device)) 7600 pm_runtime_enable(device); 7601 7602 if (priv->hw->pcs != STMMAC_PCS_TBI && 7603 priv->hw->pcs != STMMAC_PCS_RTBI) { 7604 /* MDIO bus Registration */ 7605 ret = stmmac_mdio_register(ndev); 7606 if (ret < 0) { 7607 dev_err_probe(priv->device, ret, 7608 "%s: MDIO bus (id: %d) registration failed\n", 7609 __func__, priv->plat->bus_id); 7610 goto error_mdio_register; 7611 } 7612 } 7613 7614 if (priv->plat->speed_mode_2500) 7615 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); 7616 7617 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { 7618 ret = stmmac_xpcs_setup(priv->mii); 7619 if (ret) 7620 goto error_xpcs_setup; 7621 } 7622 7623 ret = stmmac_phy_setup(priv); 7624 if (ret) { 7625 netdev_err(ndev, "failed to setup phy (%d)\n", ret); 7626 goto error_phy_setup; 7627 } 7628 7629 ret = register_netdev(ndev); 7630 if (ret) { 7631 dev_err(priv->device, "%s: ERROR %i registering the device\n", 7632 __func__, ret); 7633 goto error_netdev_register; 7634 } 7635 7636 #ifdef CONFIG_DEBUG_FS 7637 stmmac_init_fs(ndev); 7638 #endif 7639 7640 if (priv->plat->dump_debug_regs) 7641 priv->plat->dump_debug_regs(priv->plat->bsp_priv); 7642 7643 /* Let pm_runtime_put() disable the clocks. 7644 * If CONFIG_PM is not enabled, the clocks will stay powered. 7645 */ 7646 pm_runtime_put(device); 7647 7648 return ret; 7649 7650 error_netdev_register: 7651 phylink_destroy(priv->phylink); 7652 error_xpcs_setup: 7653 error_phy_setup: 7654 if (priv->hw->pcs != STMMAC_PCS_TBI && 7655 priv->hw->pcs != STMMAC_PCS_RTBI) 7656 stmmac_mdio_unregister(ndev); 7657 error_mdio_register: 7658 stmmac_napi_del(ndev); 7659 error_hw_init: 7660 destroy_workqueue(priv->wq); 7661 error_wq_init: 7662 bitmap_free(priv->af_xdp_zc_qps); 7663 7664 return ret; 7665 } 7666 EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 7667 7668 /** 7669 * stmmac_dvr_remove 7670 * @dev: device pointer 7671 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 7672 * changes the link status, releases the DMA descriptor rings. 7673 */ 7674 void stmmac_dvr_remove(struct device *dev) 7675 { 7676 struct net_device *ndev = dev_get_drvdata(dev); 7677 struct stmmac_priv *priv = netdev_priv(ndev); 7678 7679 netdev_info(priv->dev, "%s: removing driver", __func__); 7680 7681 pm_runtime_get_sync(dev); 7682 7683 stmmac_stop_all_dma(priv); 7684 stmmac_mac_set(priv, priv->ioaddr, false); 7685 netif_carrier_off(ndev); 7686 unregister_netdev(ndev); 7687 7688 #ifdef CONFIG_DEBUG_FS 7689 stmmac_exit_fs(ndev); 7690 #endif 7691 phylink_destroy(priv->phylink); 7692 if (priv->plat->stmmac_rst) 7693 reset_control_assert(priv->plat->stmmac_rst); 7694 reset_control_assert(priv->plat->stmmac_ahb_rst); 7695 if (priv->hw->pcs != STMMAC_PCS_TBI && 7696 priv->hw->pcs != STMMAC_PCS_RTBI) 7697 stmmac_mdio_unregister(ndev); 7698 destroy_workqueue(priv->wq); 7699 mutex_destroy(&priv->lock); 7700 bitmap_free(priv->af_xdp_zc_qps); 7701 7702 pm_runtime_disable(dev); 7703 pm_runtime_put_noidle(dev); 7704 } 7705 EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 7706 7707 /** 7708 * stmmac_suspend - suspend callback 7709 * @dev: device pointer 7710 * Description: this is the function to suspend the device and it is called 7711 * by the platform driver to stop the network queue, release the resources, 7712 * program the PMT register (for WoL), clean and release driver resources. 7713 */ 7714 int stmmac_suspend(struct device *dev) 7715 { 7716 struct net_device *ndev = dev_get_drvdata(dev); 7717 struct stmmac_priv *priv = netdev_priv(ndev); 7718 u32 chan; 7719 7720 if (!ndev || !netif_running(ndev)) 7721 return 0; 7722 7723 mutex_lock(&priv->lock); 7724 7725 netif_device_detach(ndev); 7726 7727 stmmac_disable_all_queues(priv); 7728 7729 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 7730 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 7731 7732 if (priv->eee_enabled) { 7733 priv->tx_path_in_lpi_mode = false; 7734 del_timer_sync(&priv->eee_ctrl_timer); 7735 } 7736 7737 /* Stop TX/RX DMA */ 7738 stmmac_stop_all_dma(priv); 7739 7740 if (priv->plat->serdes_powerdown) 7741 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 7742 7743 /* Enable Power down mode by programming the PMT regs */ 7744 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7745 stmmac_pmt(priv, priv->hw, priv->wolopts); 7746 priv->irq_wake = 1; 7747 } else { 7748 stmmac_mac_set(priv, priv->ioaddr, false); 7749 pinctrl_pm_select_sleep_state(priv->device); 7750 } 7751 7752 mutex_unlock(&priv->lock); 7753 7754 rtnl_lock(); 7755 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7756 phylink_suspend(priv->phylink, true); 7757 } else { 7758 if (device_may_wakeup(priv->device)) 7759 phylink_speed_down(priv->phylink, false); 7760 phylink_suspend(priv->phylink, false); 7761 } 7762 rtnl_unlock(); 7763 7764 if (priv->dma_cap.fpesel) { 7765 /* Disable FPE */ 7766 stmmac_fpe_configure(priv, priv->ioaddr, 7767 priv->plat->fpe_cfg, 7768 priv->plat->tx_queues_to_use, 7769 priv->plat->rx_queues_to_use, false); 7770 7771 stmmac_fpe_handshake(priv, false); 7772 stmmac_fpe_stop_wq(priv); 7773 } 7774 7775 priv->speed = SPEED_UNKNOWN; 7776 return 0; 7777 } 7778 EXPORT_SYMBOL_GPL(stmmac_suspend); 7779 7780 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) 7781 { 7782 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 7783 7784 rx_q->cur_rx = 0; 7785 rx_q->dirty_rx = 0; 7786 } 7787 7788 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) 7789 { 7790 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 7791 7792 tx_q->cur_tx = 0; 7793 tx_q->dirty_tx = 0; 7794 tx_q->mss = 0; 7795 7796 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 7797 } 7798 7799 /** 7800 * stmmac_reset_queues_param - reset queue parameters 7801 * @priv: device pointer 7802 */ 7803 static void stmmac_reset_queues_param(struct stmmac_priv *priv) 7804 { 7805 u32 rx_cnt = priv->plat->rx_queues_to_use; 7806 u32 tx_cnt = priv->plat->tx_queues_to_use; 7807 u32 queue; 7808 7809 for (queue = 0; queue < rx_cnt; queue++) 7810 stmmac_reset_rx_queue(priv, queue); 7811 7812 for (queue = 0; queue < tx_cnt; queue++) 7813 stmmac_reset_tx_queue(priv, queue); 7814 } 7815 7816 /** 7817 * stmmac_resume - resume callback 7818 * @dev: device pointer 7819 * Description: when resume this function is invoked to setup the DMA and CORE 7820 * in a usable state. 7821 */ 7822 int stmmac_resume(struct device *dev) 7823 { 7824 struct net_device *ndev = dev_get_drvdata(dev); 7825 struct stmmac_priv *priv = netdev_priv(ndev); 7826 int ret; 7827 7828 if (!netif_running(ndev)) 7829 return 0; 7830 7831 /* Power Down bit, into the PM register, is cleared 7832 * automatically as soon as a magic packet or a Wake-up frame 7833 * is received. Anyway, it's better to manually clear 7834 * this bit because it can generate problems while resuming 7835 * from another devices (e.g. serial console). 7836 */ 7837 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7838 mutex_lock(&priv->lock); 7839 stmmac_pmt(priv, priv->hw, 0); 7840 mutex_unlock(&priv->lock); 7841 priv->irq_wake = 0; 7842 } else { 7843 pinctrl_pm_select_default_state(priv->device); 7844 /* reset the phy so that it's ready */ 7845 if (priv->mii) 7846 stmmac_mdio_reset(priv->mii); 7847 } 7848 7849 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && 7850 priv->plat->serdes_powerup) { 7851 ret = priv->plat->serdes_powerup(ndev, 7852 priv->plat->bsp_priv); 7853 7854 if (ret < 0) 7855 return ret; 7856 } 7857 7858 rtnl_lock(); 7859 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7860 phylink_resume(priv->phylink); 7861 } else { 7862 phylink_resume(priv->phylink); 7863 if (device_may_wakeup(priv->device)) 7864 phylink_speed_up(priv->phylink); 7865 } 7866 rtnl_unlock(); 7867 7868 rtnl_lock(); 7869 mutex_lock(&priv->lock); 7870 7871 stmmac_reset_queues_param(priv); 7872 7873 stmmac_free_tx_skbufs(priv); 7874 stmmac_clear_descriptors(priv, &priv->dma_conf); 7875 7876 stmmac_hw_setup(ndev, false); 7877 stmmac_init_coalesce(priv); 7878 stmmac_set_rx_mode(ndev); 7879 7880 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); 7881 7882 stmmac_enable_all_queues(priv); 7883 stmmac_enable_all_dma_irq(priv); 7884 7885 mutex_unlock(&priv->lock); 7886 rtnl_unlock(); 7887 7888 netif_device_attach(ndev); 7889 7890 return 0; 7891 } 7892 EXPORT_SYMBOL_GPL(stmmac_resume); 7893 7894 #ifndef MODULE 7895 static int __init stmmac_cmdline_opt(char *str) 7896 { 7897 char *opt; 7898 7899 if (!str || !*str) 7900 return 1; 7901 while ((opt = strsep(&str, ",")) != NULL) { 7902 if (!strncmp(opt, "debug:", 6)) { 7903 if (kstrtoint(opt + 6, 0, &debug)) 7904 goto err; 7905 } else if (!strncmp(opt, "phyaddr:", 8)) { 7906 if (kstrtoint(opt + 8, 0, &phyaddr)) 7907 goto err; 7908 } else if (!strncmp(opt, "buf_sz:", 7)) { 7909 if (kstrtoint(opt + 7, 0, &buf_sz)) 7910 goto err; 7911 } else if (!strncmp(opt, "tc:", 3)) { 7912 if (kstrtoint(opt + 3, 0, &tc)) 7913 goto err; 7914 } else if (!strncmp(opt, "watchdog:", 9)) { 7915 if (kstrtoint(opt + 9, 0, &watchdog)) 7916 goto err; 7917 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 7918 if (kstrtoint(opt + 10, 0, &flow_ctrl)) 7919 goto err; 7920 } else if (!strncmp(opt, "pause:", 6)) { 7921 if (kstrtoint(opt + 6, 0, &pause)) 7922 goto err; 7923 } else if (!strncmp(opt, "eee_timer:", 10)) { 7924 if (kstrtoint(opt + 10, 0, &eee_timer)) 7925 goto err; 7926 } else if (!strncmp(opt, "chain_mode:", 11)) { 7927 if (kstrtoint(opt + 11, 0, &chain_mode)) 7928 goto err; 7929 } 7930 } 7931 return 1; 7932 7933 err: 7934 pr_err("%s: ERROR broken module parameter conversion", __func__); 7935 return 1; 7936 } 7937 7938 __setup("stmmaceth=", stmmac_cmdline_opt); 7939 #endif /* MODULE */ 7940 7941 static int __init stmmac_init(void) 7942 { 7943 #ifdef CONFIG_DEBUG_FS 7944 /* Create debugfs main directory if it doesn't exist yet */ 7945 if (!stmmac_fs_dir) 7946 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 7947 register_netdevice_notifier(&stmmac_notifier); 7948 #endif 7949 7950 return 0; 7951 } 7952 7953 static void __exit stmmac_exit(void) 7954 { 7955 #ifdef CONFIG_DEBUG_FS 7956 unregister_netdevice_notifier(&stmmac_notifier); 7957 debugfs_remove_recursive(stmmac_fs_dir); 7958 #endif 7959 } 7960 7961 module_init(stmmac_init) 7962 module_exit(stmmac_exit) 7963 7964 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 7965 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 7966 MODULE_LICENSE("GPL"); 7967