1 // SPDX-License-Identifier: GPL-2.0-only 2 /******************************************************************************* 3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 4 ST Ethernet IPs are built around a Synopsys IP Core. 5 6 Copyright(C) 2007-2011 STMicroelectronics Ltd 7 8 9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 10 11 Documentation available at: 12 http://www.stlinux.com 13 Support available at: 14 https://bugzilla.stlinux.com/ 15 *******************************************************************************/ 16 17 #include <linux/clk.h> 18 #include <linux/kernel.h> 19 #include <linux/interrupt.h> 20 #include <linux/ip.h> 21 #include <linux/tcp.h> 22 #include <linux/skbuff.h> 23 #include <linux/ethtool.h> 24 #include <linux/if_ether.h> 25 #include <linux/crc32.h> 26 #include <linux/mii.h> 27 #include <linux/if.h> 28 #include <linux/if_vlan.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/slab.h> 31 #include <linux/prefetch.h> 32 #include <linux/pinctrl/consumer.h> 33 #ifdef CONFIG_DEBUG_FS 34 #include <linux/debugfs.h> 35 #include <linux/seq_file.h> 36 #endif /* CONFIG_DEBUG_FS */ 37 #include <linux/net_tstamp.h> 38 #include <net/pkt_cls.h> 39 #include "stmmac_ptp.h" 40 #include "stmmac.h" 41 #include <linux/reset.h> 42 #include <linux/of_mdio.h> 43 #include "dwmac1000.h" 44 #include "dwxgmac2.h" 45 #include "hwif.h" 46 47 #define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) 48 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 49 50 /* Module parameters */ 51 #define TX_TIMEO 5000 52 static int watchdog = TX_TIMEO; 53 module_param(watchdog, int, 0644); 54 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 55 56 static int debug = -1; 57 module_param(debug, int, 0644); 58 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 59 60 static int phyaddr = -1; 61 module_param(phyaddr, int, 0444); 62 MODULE_PARM_DESC(phyaddr, "Physical device address"); 63 64 #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) 65 #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) 66 67 static int flow_ctrl = FLOW_AUTO; 68 module_param(flow_ctrl, int, 0644); 69 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 70 71 static int pause = PAUSE_TIME; 72 module_param(pause, int, 0644); 73 MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 74 75 #define TC_DEFAULT 64 76 static int tc = TC_DEFAULT; 77 module_param(tc, int, 0644); 78 MODULE_PARM_DESC(tc, "DMA threshold control value"); 79 80 #define DEFAULT_BUFSIZE 1536 81 static int buf_sz = DEFAULT_BUFSIZE; 82 module_param(buf_sz, int, 0644); 83 MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 84 85 #define STMMAC_RX_COPYBREAK 256 86 87 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 88 NETIF_MSG_LINK | NETIF_MSG_IFUP | 89 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 90 91 #define STMMAC_DEFAULT_LPI_TIMER 1000 92 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 93 module_param(eee_timer, int, 0644); 94 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 95 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) 96 97 /* By default the driver will use the ring mode to manage tx and rx descriptors, 98 * but allow user to force to use the chain instead of the ring 99 */ 100 static unsigned int chain_mode; 101 module_param(chain_mode, int, 0444); 102 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 103 104 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 105 106 #ifdef CONFIG_DEBUG_FS 107 static int stmmac_init_fs(struct net_device *dev); 108 static void stmmac_exit_fs(struct net_device *dev); 109 #endif 110 111 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) 112 113 /** 114 * stmmac_verify_args - verify the driver parameters. 115 * Description: it checks the driver parameters and set a default in case of 116 * errors. 117 */ 118 static void stmmac_verify_args(void) 119 { 120 if (unlikely(watchdog < 0)) 121 watchdog = TX_TIMEO; 122 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 123 buf_sz = DEFAULT_BUFSIZE; 124 if (unlikely(flow_ctrl > 1)) 125 flow_ctrl = FLOW_AUTO; 126 else if (likely(flow_ctrl < 0)) 127 flow_ctrl = FLOW_OFF; 128 if (unlikely((pause < 0) || (pause > 0xffff))) 129 pause = PAUSE_TIME; 130 if (eee_timer < 0) 131 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 132 } 133 134 /** 135 * stmmac_disable_all_queues - Disable all queues 136 * @priv: driver private structure 137 */ 138 static void stmmac_disable_all_queues(struct stmmac_priv *priv) 139 { 140 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 141 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 142 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 143 u32 queue; 144 145 for (queue = 0; queue < maxq; queue++) { 146 struct stmmac_channel *ch = &priv->channel[queue]; 147 148 if (queue < rx_queues_cnt) 149 napi_disable(&ch->rx_napi); 150 if (queue < tx_queues_cnt) 151 napi_disable(&ch->tx_napi); 152 } 153 } 154 155 /** 156 * stmmac_enable_all_queues - Enable all queues 157 * @priv: driver private structure 158 */ 159 static void stmmac_enable_all_queues(struct stmmac_priv *priv) 160 { 161 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 162 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 163 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 164 u32 queue; 165 166 for (queue = 0; queue < maxq; queue++) { 167 struct stmmac_channel *ch = &priv->channel[queue]; 168 169 if (queue < rx_queues_cnt) 170 napi_enable(&ch->rx_napi); 171 if (queue < tx_queues_cnt) 172 napi_enable(&ch->tx_napi); 173 } 174 } 175 176 /** 177 * stmmac_stop_all_queues - Stop all queues 178 * @priv: driver private structure 179 */ 180 static void stmmac_stop_all_queues(struct stmmac_priv *priv) 181 { 182 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 183 u32 queue; 184 185 for (queue = 0; queue < tx_queues_cnt; queue++) 186 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 187 } 188 189 /** 190 * stmmac_start_all_queues - Start all queues 191 * @priv: driver private structure 192 */ 193 static void stmmac_start_all_queues(struct stmmac_priv *priv) 194 { 195 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 196 u32 queue; 197 198 for (queue = 0; queue < tx_queues_cnt; queue++) 199 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue)); 200 } 201 202 static void stmmac_service_event_schedule(struct stmmac_priv *priv) 203 { 204 if (!test_bit(STMMAC_DOWN, &priv->state) && 205 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 206 queue_work(priv->wq, &priv->service_task); 207 } 208 209 static void stmmac_global_err(struct stmmac_priv *priv) 210 { 211 netif_carrier_off(priv->dev); 212 set_bit(STMMAC_RESET_REQUESTED, &priv->state); 213 stmmac_service_event_schedule(priv); 214 } 215 216 /** 217 * stmmac_clk_csr_set - dynamically set the MDC clock 218 * @priv: driver private structure 219 * Description: this is to dynamically set the MDC clock according to the csr 220 * clock input. 221 * Note: 222 * If a specific clk_csr value is passed from the platform 223 * this means that the CSR Clock Range selection cannot be 224 * changed at run-time and it is fixed (as reported in the driver 225 * documentation). Viceversa the driver will try to set the MDC 226 * clock dynamically according to the actual clock input. 227 */ 228 static void stmmac_clk_csr_set(struct stmmac_priv *priv) 229 { 230 u32 clk_rate; 231 232 clk_rate = clk_get_rate(priv->plat->stmmac_clk); 233 234 /* Platform provided default clk_csr would be assumed valid 235 * for all other cases except for the below mentioned ones. 236 * For values higher than the IEEE 802.3 specified frequency 237 * we can not estimate the proper divider as it is not known 238 * the frequency of clk_csr_i. So we do not change the default 239 * divider. 240 */ 241 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 242 if (clk_rate < CSR_F_35M) 243 priv->clk_csr = STMMAC_CSR_20_35M; 244 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 245 priv->clk_csr = STMMAC_CSR_35_60M; 246 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 247 priv->clk_csr = STMMAC_CSR_60_100M; 248 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 249 priv->clk_csr = STMMAC_CSR_100_150M; 250 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 251 priv->clk_csr = STMMAC_CSR_150_250M; 252 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 253 priv->clk_csr = STMMAC_CSR_250_300M; 254 } 255 256 if (priv->plat->has_sun8i) { 257 if (clk_rate > 160000000) 258 priv->clk_csr = 0x03; 259 else if (clk_rate > 80000000) 260 priv->clk_csr = 0x02; 261 else if (clk_rate > 40000000) 262 priv->clk_csr = 0x01; 263 else 264 priv->clk_csr = 0; 265 } 266 267 if (priv->plat->has_xgmac) { 268 if (clk_rate > 400000000) 269 priv->clk_csr = 0x5; 270 else if (clk_rate > 350000000) 271 priv->clk_csr = 0x4; 272 else if (clk_rate > 300000000) 273 priv->clk_csr = 0x3; 274 else if (clk_rate > 250000000) 275 priv->clk_csr = 0x2; 276 else if (clk_rate > 150000000) 277 priv->clk_csr = 0x1; 278 else 279 priv->clk_csr = 0x0; 280 } 281 } 282 283 static void print_pkt(unsigned char *buf, int len) 284 { 285 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 286 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 287 } 288 289 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 290 { 291 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 292 u32 avail; 293 294 if (tx_q->dirty_tx > tx_q->cur_tx) 295 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 296 else 297 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1; 298 299 return avail; 300 } 301 302 /** 303 * stmmac_rx_dirty - Get RX queue dirty 304 * @priv: driver private structure 305 * @queue: RX queue index 306 */ 307 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 308 { 309 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 310 u32 dirty; 311 312 if (rx_q->dirty_rx <= rx_q->cur_rx) 313 dirty = rx_q->cur_rx - rx_q->dirty_rx; 314 else 315 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx; 316 317 return dirty; 318 } 319 320 /** 321 * stmmac_hw_fix_mac_speed - callback for speed selection 322 * @priv: driver private structure 323 * Description: on some platforms (e.g. ST), some HW system configuration 324 * registers have to be set according to the link speed negotiated. 325 */ 326 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) 327 { 328 struct net_device *ndev = priv->dev; 329 struct phy_device *phydev = ndev->phydev; 330 331 if (likely(priv->plat->fix_mac_speed)) 332 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); 333 } 334 335 /** 336 * stmmac_enable_eee_mode - check and enter in LPI mode 337 * @priv: driver private structure 338 * Description: this function is to verify and enter in LPI mode in case of 339 * EEE. 340 */ 341 static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 342 { 343 u32 tx_cnt = priv->plat->tx_queues_to_use; 344 u32 queue; 345 346 /* check if all TX queues have the work finished */ 347 for (queue = 0; queue < tx_cnt; queue++) { 348 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 349 350 if (tx_q->dirty_tx != tx_q->cur_tx) 351 return; /* still unfinished work */ 352 } 353 354 /* Check and enter in LPI mode */ 355 if (!priv->tx_path_in_lpi_mode) 356 stmmac_set_eee_mode(priv, priv->hw, 357 priv->plat->en_tx_lpi_clockgating); 358 } 359 360 /** 361 * stmmac_disable_eee_mode - disable and exit from LPI mode 362 * @priv: driver private structure 363 * Description: this function is to exit and disable EEE in case of 364 * LPI state is true. This is called by the xmit. 365 */ 366 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 367 { 368 stmmac_reset_eee_mode(priv, priv->hw); 369 del_timer_sync(&priv->eee_ctrl_timer); 370 priv->tx_path_in_lpi_mode = false; 371 } 372 373 /** 374 * stmmac_eee_ctrl_timer - EEE TX SW timer. 375 * @arg : data hook 376 * Description: 377 * if there is no data transfer and if we are not in LPI state, 378 * then MAC Transmitter can be moved to LPI state. 379 */ 380 static void stmmac_eee_ctrl_timer(struct timer_list *t) 381 { 382 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 383 384 stmmac_enable_eee_mode(priv); 385 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 386 } 387 388 /** 389 * stmmac_eee_init - init EEE 390 * @priv: driver private structure 391 * Description: 392 * if the GMAC supports the EEE (from the HW cap reg) and the phy device 393 * can also manage EEE, this function enable the LPI state and start related 394 * timer. 395 */ 396 bool stmmac_eee_init(struct stmmac_priv *priv) 397 { 398 struct net_device *ndev = priv->dev; 399 int interface = priv->plat->interface; 400 bool ret = false; 401 402 if ((interface != PHY_INTERFACE_MODE_MII) && 403 (interface != PHY_INTERFACE_MODE_GMII) && 404 !phy_interface_mode_is_rgmii(interface)) 405 goto out; 406 407 /* Using PCS we cannot dial with the phy registers at this stage 408 * so we do not support extra feature like EEE. 409 */ 410 if ((priv->hw->pcs == STMMAC_PCS_RGMII) || 411 (priv->hw->pcs == STMMAC_PCS_TBI) || 412 (priv->hw->pcs == STMMAC_PCS_RTBI)) 413 goto out; 414 415 /* MAC core supports the EEE feature. */ 416 if (priv->dma_cap.eee) { 417 int tx_lpi_timer = priv->tx_lpi_timer; 418 419 /* Check if the PHY supports EEE */ 420 if (phy_init_eee(ndev->phydev, 1)) { 421 /* To manage at run-time if the EEE cannot be supported 422 * anymore (for example because the lp caps have been 423 * changed). 424 * In that case the driver disable own timers. 425 */ 426 mutex_lock(&priv->lock); 427 if (priv->eee_active) { 428 netdev_dbg(priv->dev, "disable EEE\n"); 429 del_timer_sync(&priv->eee_ctrl_timer); 430 stmmac_set_eee_timer(priv, priv->hw, 0, 431 tx_lpi_timer); 432 } 433 priv->eee_active = 0; 434 mutex_unlock(&priv->lock); 435 goto out; 436 } 437 /* Activate the EEE and start timers */ 438 mutex_lock(&priv->lock); 439 if (!priv->eee_active) { 440 priv->eee_active = 1; 441 timer_setup(&priv->eee_ctrl_timer, 442 stmmac_eee_ctrl_timer, 0); 443 mod_timer(&priv->eee_ctrl_timer, 444 STMMAC_LPI_T(eee_timer)); 445 446 stmmac_set_eee_timer(priv, priv->hw, 447 STMMAC_DEFAULT_LIT_LS, tx_lpi_timer); 448 } 449 /* Set HW EEE according to the speed */ 450 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link); 451 452 ret = true; 453 mutex_unlock(&priv->lock); 454 455 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 456 } 457 out: 458 return ret; 459 } 460 461 /* stmmac_get_tx_hwtstamp - get HW TX timestamps 462 * @priv: driver private structure 463 * @p : descriptor pointer 464 * @skb : the socket buffer 465 * Description : 466 * This function will read timestamp from the descriptor & pass it to stack. 467 * and also perform some sanity checks. 468 */ 469 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 470 struct dma_desc *p, struct sk_buff *skb) 471 { 472 struct skb_shared_hwtstamps shhwtstamp; 473 u64 ns = 0; 474 475 if (!priv->hwts_tx_en) 476 return; 477 478 /* exit if skb doesn't support hw tstamp */ 479 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 480 return; 481 482 /* check tx tstamp status */ 483 if (stmmac_get_tx_timestamp_status(priv, p)) { 484 /* get the valid tstamp */ 485 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 486 487 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 488 shhwtstamp.hwtstamp = ns_to_ktime(ns); 489 490 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 491 /* pass tstamp to stack */ 492 skb_tstamp_tx(skb, &shhwtstamp); 493 } 494 495 return; 496 } 497 498 /* stmmac_get_rx_hwtstamp - get HW RX timestamps 499 * @priv: driver private structure 500 * @p : descriptor pointer 501 * @np : next descriptor pointer 502 * @skb : the socket buffer 503 * Description : 504 * This function will read received packet's timestamp from the descriptor 505 * and pass it to stack. It also perform some sanity checks. 506 */ 507 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 508 struct dma_desc *np, struct sk_buff *skb) 509 { 510 struct skb_shared_hwtstamps *shhwtstamp = NULL; 511 struct dma_desc *desc = p; 512 u64 ns = 0; 513 514 if (!priv->hwts_rx_en) 515 return; 516 /* For GMAC4, the valid timestamp is from CTX next desc. */ 517 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 518 desc = np; 519 520 /* Check if timestamp is available */ 521 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 522 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 523 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 524 shhwtstamp = skb_hwtstamps(skb); 525 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 526 shhwtstamp->hwtstamp = ns_to_ktime(ns); 527 } else { 528 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 529 } 530 } 531 532 /** 533 * stmmac_hwtstamp_set - control hardware timestamping. 534 * @dev: device pointer. 535 * @ifr: An IOCTL specific structure, that can contain a pointer to 536 * a proprietary structure used to pass information to the driver. 537 * Description: 538 * This function configures the MAC to enable/disable both outgoing(TX) 539 * and incoming(RX) packets time stamping based on user input. 540 * Return Value: 541 * 0 on success and an appropriate -ve integer on failure. 542 */ 543 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 544 { 545 struct stmmac_priv *priv = netdev_priv(dev); 546 struct hwtstamp_config config; 547 struct timespec64 now; 548 u64 temp = 0; 549 u32 ptp_v2 = 0; 550 u32 tstamp_all = 0; 551 u32 ptp_over_ipv4_udp = 0; 552 u32 ptp_over_ipv6_udp = 0; 553 u32 ptp_over_ethernet = 0; 554 u32 snap_type_sel = 0; 555 u32 ts_master_en = 0; 556 u32 ts_event_en = 0; 557 u32 sec_inc = 0; 558 u32 value = 0; 559 bool xmac; 560 561 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 562 563 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 564 netdev_alert(priv->dev, "No support for HW time stamping\n"); 565 priv->hwts_tx_en = 0; 566 priv->hwts_rx_en = 0; 567 568 return -EOPNOTSUPP; 569 } 570 571 if (copy_from_user(&config, ifr->ifr_data, 572 sizeof(config))) 573 return -EFAULT; 574 575 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 576 __func__, config.flags, config.tx_type, config.rx_filter); 577 578 /* reserved for future extensions */ 579 if (config.flags) 580 return -EINVAL; 581 582 if (config.tx_type != HWTSTAMP_TX_OFF && 583 config.tx_type != HWTSTAMP_TX_ON) 584 return -ERANGE; 585 586 if (priv->adv_ts) { 587 switch (config.rx_filter) { 588 case HWTSTAMP_FILTER_NONE: 589 /* time stamp no incoming packet at all */ 590 config.rx_filter = HWTSTAMP_FILTER_NONE; 591 break; 592 593 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 594 /* PTP v1, UDP, any kind of event packet */ 595 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 596 /* 'xmac' hardware can support Sync, Pdelay_Req and 597 * Pdelay_resp by setting bit14 and bits17/16 to 01 598 * This leaves Delay_Req timestamps out. 599 * Enable all events *and* general purpose message 600 * timestamping 601 */ 602 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 603 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 604 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 605 break; 606 607 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 608 /* PTP v1, UDP, Sync packet */ 609 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 610 /* take time stamp for SYNC messages only */ 611 ts_event_en = PTP_TCR_TSEVNTENA; 612 613 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 614 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 615 break; 616 617 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 618 /* PTP v1, UDP, Delay_req packet */ 619 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 620 /* take time stamp for Delay_Req messages only */ 621 ts_master_en = PTP_TCR_TSMSTRENA; 622 ts_event_en = PTP_TCR_TSEVNTENA; 623 624 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 625 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 626 break; 627 628 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 629 /* PTP v2, UDP, any kind of event packet */ 630 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 631 ptp_v2 = PTP_TCR_TSVER2ENA; 632 /* take time stamp for all event messages */ 633 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 634 635 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 636 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 637 break; 638 639 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 640 /* PTP v2, UDP, Sync packet */ 641 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 642 ptp_v2 = PTP_TCR_TSVER2ENA; 643 /* take time stamp for SYNC messages only */ 644 ts_event_en = PTP_TCR_TSEVNTENA; 645 646 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 647 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 648 break; 649 650 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 651 /* PTP v2, UDP, Delay_req packet */ 652 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 653 ptp_v2 = PTP_TCR_TSVER2ENA; 654 /* take time stamp for Delay_Req messages only */ 655 ts_master_en = PTP_TCR_TSMSTRENA; 656 ts_event_en = PTP_TCR_TSEVNTENA; 657 658 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 659 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 660 break; 661 662 case HWTSTAMP_FILTER_PTP_V2_EVENT: 663 /* PTP v2/802.AS1 any layer, any kind of event packet */ 664 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 665 ptp_v2 = PTP_TCR_TSVER2ENA; 666 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 667 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 668 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 669 ptp_over_ethernet = PTP_TCR_TSIPENA; 670 break; 671 672 case HWTSTAMP_FILTER_PTP_V2_SYNC: 673 /* PTP v2/802.AS1, any layer, Sync packet */ 674 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 675 ptp_v2 = PTP_TCR_TSVER2ENA; 676 /* take time stamp for SYNC messages only */ 677 ts_event_en = PTP_TCR_TSEVNTENA; 678 679 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 680 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 681 ptp_over_ethernet = PTP_TCR_TSIPENA; 682 break; 683 684 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 685 /* PTP v2/802.AS1, any layer, Delay_req packet */ 686 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 687 ptp_v2 = PTP_TCR_TSVER2ENA; 688 /* take time stamp for Delay_Req messages only */ 689 ts_master_en = PTP_TCR_TSMSTRENA; 690 ts_event_en = PTP_TCR_TSEVNTENA; 691 692 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 693 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 694 ptp_over_ethernet = PTP_TCR_TSIPENA; 695 break; 696 697 case HWTSTAMP_FILTER_NTP_ALL: 698 case HWTSTAMP_FILTER_ALL: 699 /* time stamp any incoming packet */ 700 config.rx_filter = HWTSTAMP_FILTER_ALL; 701 tstamp_all = PTP_TCR_TSENALL; 702 break; 703 704 default: 705 return -ERANGE; 706 } 707 } else { 708 switch (config.rx_filter) { 709 case HWTSTAMP_FILTER_NONE: 710 config.rx_filter = HWTSTAMP_FILTER_NONE; 711 break; 712 default: 713 /* PTP v1, UDP, any kind of event packet */ 714 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 715 break; 716 } 717 } 718 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 719 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 720 721 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 722 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); 723 else { 724 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 725 tstamp_all | ptp_v2 | ptp_over_ethernet | 726 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 727 ts_master_en | snap_type_sel); 728 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); 729 730 /* program Sub Second Increment reg */ 731 stmmac_config_sub_second_increment(priv, 732 priv->ptpaddr, priv->plat->clk_ptp_rate, 733 xmac, &sec_inc); 734 temp = div_u64(1000000000ULL, sec_inc); 735 736 /* Store sub second increment and flags for later use */ 737 priv->sub_second_inc = sec_inc; 738 priv->systime_flags = value; 739 740 /* calculate default added value: 741 * formula is : 742 * addend = (2^32)/freq_div_ratio; 743 * where, freq_div_ratio = 1e9ns/sec_inc 744 */ 745 temp = (u64)(temp << 32); 746 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 747 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 748 749 /* initialize system time */ 750 ktime_get_real_ts64(&now); 751 752 /* lower 32 bits of tv_sec are safe until y2106 */ 753 stmmac_init_systime(priv, priv->ptpaddr, 754 (u32)now.tv_sec, now.tv_nsec); 755 } 756 757 memcpy(&priv->tstamp_config, &config, sizeof(config)); 758 759 return copy_to_user(ifr->ifr_data, &config, 760 sizeof(config)) ? -EFAULT : 0; 761 } 762 763 /** 764 * stmmac_hwtstamp_get - read hardware timestamping. 765 * @dev: device pointer. 766 * @ifr: An IOCTL specific structure, that can contain a pointer to 767 * a proprietary structure used to pass information to the driver. 768 * Description: 769 * This function obtain the current hardware timestamping settings 770 as requested. 771 */ 772 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 773 { 774 struct stmmac_priv *priv = netdev_priv(dev); 775 struct hwtstamp_config *config = &priv->tstamp_config; 776 777 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 778 return -EOPNOTSUPP; 779 780 return copy_to_user(ifr->ifr_data, config, 781 sizeof(*config)) ? -EFAULT : 0; 782 } 783 784 /** 785 * stmmac_init_ptp - init PTP 786 * @priv: driver private structure 787 * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 788 * This is done by looking at the HW cap. register. 789 * This function also registers the ptp driver. 790 */ 791 static int stmmac_init_ptp(struct stmmac_priv *priv) 792 { 793 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 794 795 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 796 return -EOPNOTSUPP; 797 798 priv->adv_ts = 0; 799 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 800 if (xmac && priv->dma_cap.atime_stamp) 801 priv->adv_ts = 1; 802 /* Dwmac 3.x core with extend_desc can support adv_ts */ 803 else if (priv->extend_desc && priv->dma_cap.atime_stamp) 804 priv->adv_ts = 1; 805 806 if (priv->dma_cap.time_stamp) 807 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 808 809 if (priv->adv_ts) 810 netdev_info(priv->dev, 811 "IEEE 1588-2008 Advanced Timestamp supported\n"); 812 813 priv->hwts_tx_en = 0; 814 priv->hwts_rx_en = 0; 815 816 stmmac_ptp_register(priv); 817 818 return 0; 819 } 820 821 static void stmmac_release_ptp(struct stmmac_priv *priv) 822 { 823 if (priv->plat->clk_ptp_ref) 824 clk_disable_unprepare(priv->plat->clk_ptp_ref); 825 stmmac_ptp_unregister(priv); 826 } 827 828 /** 829 * stmmac_mac_flow_ctrl - Configure flow control in all queues 830 * @priv: driver private structure 831 * Description: It is used for configuring the flow control in all queues 832 */ 833 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 834 { 835 u32 tx_cnt = priv->plat->tx_queues_to_use; 836 837 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 838 priv->pause, tx_cnt); 839 } 840 841 /** 842 * stmmac_adjust_link - adjusts the link parameters 843 * @dev: net device structure 844 * Description: this is the helper called by the physical abstraction layer 845 * drivers to communicate the phy link status. According the speed and duplex 846 * this driver can invoke registered glue-logic as well. 847 * It also invoke the eee initialization because it could happen when switch 848 * on different networks (that are eee capable). 849 */ 850 static void stmmac_adjust_link(struct net_device *dev) 851 { 852 struct stmmac_priv *priv = netdev_priv(dev); 853 struct phy_device *phydev = dev->phydev; 854 bool new_state = false; 855 856 if (!phydev) 857 return; 858 859 mutex_lock(&priv->lock); 860 861 if (phydev->link) { 862 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 863 864 /* Now we make sure that we can be in full duplex mode. 865 * If not, we operate in half-duplex mode. */ 866 if (phydev->duplex != priv->oldduplex) { 867 new_state = true; 868 if (!phydev->duplex) 869 ctrl &= ~priv->hw->link.duplex; 870 else 871 ctrl |= priv->hw->link.duplex; 872 priv->oldduplex = phydev->duplex; 873 } 874 /* Flow Control operation */ 875 if (phydev->pause) 876 stmmac_mac_flow_ctrl(priv, phydev->duplex); 877 878 if (phydev->speed != priv->speed) { 879 new_state = true; 880 ctrl &= ~priv->hw->link.speed_mask; 881 switch (phydev->speed) { 882 case SPEED_1000: 883 ctrl |= priv->hw->link.speed1000; 884 break; 885 case SPEED_100: 886 ctrl |= priv->hw->link.speed100; 887 break; 888 case SPEED_10: 889 ctrl |= priv->hw->link.speed10; 890 break; 891 default: 892 netif_warn(priv, link, priv->dev, 893 "broken speed: %d\n", phydev->speed); 894 phydev->speed = SPEED_UNKNOWN; 895 break; 896 } 897 if (phydev->speed != SPEED_UNKNOWN) 898 stmmac_hw_fix_mac_speed(priv); 899 priv->speed = phydev->speed; 900 } 901 902 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 903 904 if (!priv->oldlink) { 905 new_state = true; 906 priv->oldlink = true; 907 } 908 } else if (priv->oldlink) { 909 new_state = true; 910 priv->oldlink = false; 911 priv->speed = SPEED_UNKNOWN; 912 priv->oldduplex = DUPLEX_UNKNOWN; 913 } 914 915 if (new_state && netif_msg_link(priv)) 916 phy_print_status(phydev); 917 918 mutex_unlock(&priv->lock); 919 920 if (phydev->is_pseudo_fixed_link) 921 /* Stop PHY layer to call the hook to adjust the link in case 922 * of a switch is attached to the stmmac driver. 923 */ 924 phydev->irq = PHY_IGNORE_INTERRUPT; 925 else 926 /* At this stage, init the EEE if supported. 927 * Never called in case of fixed_link. 928 */ 929 priv->eee_enabled = stmmac_eee_init(priv); 930 } 931 932 /** 933 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 934 * @priv: driver private structure 935 * Description: this is to verify if the HW supports the PCS. 936 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 937 * configured for the TBI, RTBI, or SGMII PHY interface. 938 */ 939 static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 940 { 941 int interface = priv->plat->interface; 942 943 if (priv->dma_cap.pcs) { 944 if ((interface == PHY_INTERFACE_MODE_RGMII) || 945 (interface == PHY_INTERFACE_MODE_RGMII_ID) || 946 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 947 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 948 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 949 priv->hw->pcs = STMMAC_PCS_RGMII; 950 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 951 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 952 priv->hw->pcs = STMMAC_PCS_SGMII; 953 } 954 } 955 } 956 957 /** 958 * stmmac_init_phy - PHY initialization 959 * @dev: net device structure 960 * Description: it initializes the driver's PHY state, and attaches the PHY 961 * to the mac driver. 962 * Return value: 963 * 0 on success 964 */ 965 static int stmmac_init_phy(struct net_device *dev) 966 { 967 struct stmmac_priv *priv = netdev_priv(dev); 968 u32 tx_cnt = priv->plat->tx_queues_to_use; 969 struct phy_device *phydev; 970 char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 971 char bus_id[MII_BUS_ID_SIZE]; 972 int interface = priv->plat->interface; 973 int max_speed = priv->plat->max_speed; 974 priv->oldlink = false; 975 priv->speed = SPEED_UNKNOWN; 976 priv->oldduplex = DUPLEX_UNKNOWN; 977 978 if (priv->plat->phy_node) { 979 phydev = of_phy_connect(dev, priv->plat->phy_node, 980 &stmmac_adjust_link, 0, interface); 981 } else { 982 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", 983 priv->plat->bus_id); 984 985 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 986 priv->plat->phy_addr); 987 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__, 988 phy_id_fmt); 989 990 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 991 interface); 992 } 993 994 if (IS_ERR_OR_NULL(phydev)) { 995 netdev_err(priv->dev, "Could not attach to PHY\n"); 996 if (!phydev) 997 return -ENODEV; 998 999 return PTR_ERR(phydev); 1000 } 1001 1002 /* Stop Advertising 1000BASE Capability if interface is not GMII */ 1003 if ((interface == PHY_INTERFACE_MODE_MII) || 1004 (interface == PHY_INTERFACE_MODE_RMII) || 1005 (max_speed < 1000 && max_speed > 0)) 1006 phy_set_max_speed(phydev, SPEED_100); 1007 1008 /* 1009 * Half-duplex mode not supported with multiqueue 1010 * half-duplex can only works with single queue 1011 */ 1012 if (tx_cnt > 1) { 1013 phy_remove_link_mode(phydev, 1014 ETHTOOL_LINK_MODE_10baseT_Half_BIT); 1015 phy_remove_link_mode(phydev, 1016 ETHTOOL_LINK_MODE_100baseT_Half_BIT); 1017 phy_remove_link_mode(phydev, 1018 ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1019 } 1020 1021 /* 1022 * Broken HW is sometimes missing the pull-up resistor on the 1023 * MDIO line, which results in reads to non-existent devices returning 1024 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent 1025 * device as well. 1026 * Note: phydev->phy_id is the result of reading the UID PHY registers. 1027 */ 1028 if (!priv->plat->phy_node && phydev->phy_id == 0) { 1029 phy_disconnect(phydev); 1030 return -ENODEV; 1031 } 1032 1033 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid 1034 * subsequent PHY polling, make sure we force a link transition if 1035 * we have a UP/DOWN/UP transition 1036 */ 1037 if (phydev->is_pseudo_fixed_link) 1038 phydev->irq = PHY_POLL; 1039 1040 phy_attached_info(phydev); 1041 return 0; 1042 } 1043 1044 static void stmmac_display_rx_rings(struct stmmac_priv *priv) 1045 { 1046 u32 rx_cnt = priv->plat->rx_queues_to_use; 1047 void *head_rx; 1048 u32 queue; 1049 1050 /* Display RX rings */ 1051 for (queue = 0; queue < rx_cnt; queue++) { 1052 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1053 1054 pr_info("\tRX Queue %u rings\n", queue); 1055 1056 if (priv->extend_desc) 1057 head_rx = (void *)rx_q->dma_erx; 1058 else 1059 head_rx = (void *)rx_q->dma_rx; 1060 1061 /* Display RX ring */ 1062 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true); 1063 } 1064 } 1065 1066 static void stmmac_display_tx_rings(struct stmmac_priv *priv) 1067 { 1068 u32 tx_cnt = priv->plat->tx_queues_to_use; 1069 void *head_tx; 1070 u32 queue; 1071 1072 /* Display TX rings */ 1073 for (queue = 0; queue < tx_cnt; queue++) { 1074 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1075 1076 pr_info("\tTX Queue %d rings\n", queue); 1077 1078 if (priv->extend_desc) 1079 head_tx = (void *)tx_q->dma_etx; 1080 else 1081 head_tx = (void *)tx_q->dma_tx; 1082 1083 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false); 1084 } 1085 } 1086 1087 static void stmmac_display_rings(struct stmmac_priv *priv) 1088 { 1089 /* Display RX ring */ 1090 stmmac_display_rx_rings(priv); 1091 1092 /* Display TX ring */ 1093 stmmac_display_tx_rings(priv); 1094 } 1095 1096 static int stmmac_set_bfsize(int mtu, int bufsize) 1097 { 1098 int ret = bufsize; 1099 1100 if (mtu >= BUF_SIZE_4KiB) 1101 ret = BUF_SIZE_8KiB; 1102 else if (mtu >= BUF_SIZE_2KiB) 1103 ret = BUF_SIZE_4KiB; 1104 else if (mtu > DEFAULT_BUFSIZE) 1105 ret = BUF_SIZE_2KiB; 1106 else 1107 ret = DEFAULT_BUFSIZE; 1108 1109 return ret; 1110 } 1111 1112 /** 1113 * stmmac_clear_rx_descriptors - clear RX descriptors 1114 * @priv: driver private structure 1115 * @queue: RX queue index 1116 * Description: this function is called to clear the RX descriptors 1117 * in case of both basic and extended descriptors are used. 1118 */ 1119 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) 1120 { 1121 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1122 int i; 1123 1124 /* Clear the RX descriptors */ 1125 for (i = 0; i < DMA_RX_SIZE; i++) 1126 if (priv->extend_desc) 1127 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1128 priv->use_riwt, priv->mode, 1129 (i == DMA_RX_SIZE - 1), 1130 priv->dma_buf_sz); 1131 else 1132 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1133 priv->use_riwt, priv->mode, 1134 (i == DMA_RX_SIZE - 1), 1135 priv->dma_buf_sz); 1136 } 1137 1138 /** 1139 * stmmac_clear_tx_descriptors - clear tx descriptors 1140 * @priv: driver private structure 1141 * @queue: TX queue index. 1142 * Description: this function is called to clear the TX descriptors 1143 * in case of both basic and extended descriptors are used. 1144 */ 1145 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) 1146 { 1147 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1148 int i; 1149 1150 /* Clear the TX descriptors */ 1151 for (i = 0; i < DMA_TX_SIZE; i++) 1152 if (priv->extend_desc) 1153 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, 1154 priv->mode, (i == DMA_TX_SIZE - 1)); 1155 else 1156 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], 1157 priv->mode, (i == DMA_TX_SIZE - 1)); 1158 } 1159 1160 /** 1161 * stmmac_clear_descriptors - clear descriptors 1162 * @priv: driver private structure 1163 * Description: this function is called to clear the TX and RX descriptors 1164 * in case of both basic and extended descriptors are used. 1165 */ 1166 static void stmmac_clear_descriptors(struct stmmac_priv *priv) 1167 { 1168 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1169 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1170 u32 queue; 1171 1172 /* Clear the RX descriptors */ 1173 for (queue = 0; queue < rx_queue_cnt; queue++) 1174 stmmac_clear_rx_descriptors(priv, queue); 1175 1176 /* Clear the TX descriptors */ 1177 for (queue = 0; queue < tx_queue_cnt; queue++) 1178 stmmac_clear_tx_descriptors(priv, queue); 1179 } 1180 1181 /** 1182 * stmmac_init_rx_buffers - init the RX descriptor buffer. 1183 * @priv: driver private structure 1184 * @p: descriptor pointer 1185 * @i: descriptor index 1186 * @flags: gfp flag 1187 * @queue: RX queue index 1188 * Description: this function is called to allocate a receive buffer, perform 1189 * the DMA mapping and init the descriptor. 1190 */ 1191 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 1192 int i, gfp_t flags, u32 queue) 1193 { 1194 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1195 struct sk_buff *skb; 1196 1197 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); 1198 if (!skb) { 1199 netdev_err(priv->dev, 1200 "%s: Rx init fails; skb is NULL\n", __func__); 1201 return -ENOMEM; 1202 } 1203 rx_q->rx_skbuff[i] = skb; 1204 rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, 1205 priv->dma_buf_sz, 1206 DMA_FROM_DEVICE); 1207 if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) { 1208 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); 1209 dev_kfree_skb_any(skb); 1210 return -EINVAL; 1211 } 1212 1213 stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]); 1214 1215 if (priv->dma_buf_sz == BUF_SIZE_16KiB) 1216 stmmac_init_desc3(priv, p); 1217 1218 return 0; 1219 } 1220 1221 /** 1222 * stmmac_free_rx_buffer - free RX dma buffers 1223 * @priv: private structure 1224 * @queue: RX queue index 1225 * @i: buffer index. 1226 */ 1227 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) 1228 { 1229 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1230 1231 if (rx_q->rx_skbuff[i]) { 1232 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i], 1233 priv->dma_buf_sz, DMA_FROM_DEVICE); 1234 dev_kfree_skb_any(rx_q->rx_skbuff[i]); 1235 } 1236 rx_q->rx_skbuff[i] = NULL; 1237 } 1238 1239 /** 1240 * stmmac_free_tx_buffer - free RX dma buffers 1241 * @priv: private structure 1242 * @queue: RX queue index 1243 * @i: buffer index. 1244 */ 1245 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) 1246 { 1247 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1248 1249 if (tx_q->tx_skbuff_dma[i].buf) { 1250 if (tx_q->tx_skbuff_dma[i].map_as_page) 1251 dma_unmap_page(priv->device, 1252 tx_q->tx_skbuff_dma[i].buf, 1253 tx_q->tx_skbuff_dma[i].len, 1254 DMA_TO_DEVICE); 1255 else 1256 dma_unmap_single(priv->device, 1257 tx_q->tx_skbuff_dma[i].buf, 1258 tx_q->tx_skbuff_dma[i].len, 1259 DMA_TO_DEVICE); 1260 } 1261 1262 if (tx_q->tx_skbuff[i]) { 1263 dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1264 tx_q->tx_skbuff[i] = NULL; 1265 tx_q->tx_skbuff_dma[i].buf = 0; 1266 tx_q->tx_skbuff_dma[i].map_as_page = false; 1267 } 1268 } 1269 1270 /** 1271 * init_dma_rx_desc_rings - init the RX descriptor rings 1272 * @dev: net device structure 1273 * @flags: gfp flag. 1274 * Description: this function initializes the DMA RX descriptors 1275 * and allocates the socket buffers. It supports the chained and ring 1276 * modes. 1277 */ 1278 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) 1279 { 1280 struct stmmac_priv *priv = netdev_priv(dev); 1281 u32 rx_count = priv->plat->rx_queues_to_use; 1282 int ret = -ENOMEM; 1283 int bfsize = 0; 1284 int queue; 1285 int i; 1286 1287 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); 1288 if (bfsize < 0) 1289 bfsize = 0; 1290 1291 if (bfsize < BUF_SIZE_16KiB) 1292 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 1293 1294 priv->dma_buf_sz = bfsize; 1295 1296 /* RX INITIALIZATION */ 1297 netif_dbg(priv, probe, priv->dev, 1298 "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1299 1300 for (queue = 0; queue < rx_count; queue++) { 1301 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1302 1303 netif_dbg(priv, probe, priv->dev, 1304 "(%s) dma_rx_phy=0x%08x\n", __func__, 1305 (u32)rx_q->dma_rx_phy); 1306 1307 for (i = 0; i < DMA_RX_SIZE; i++) { 1308 struct dma_desc *p; 1309 1310 if (priv->extend_desc) 1311 p = &((rx_q->dma_erx + i)->basic); 1312 else 1313 p = rx_q->dma_rx + i; 1314 1315 ret = stmmac_init_rx_buffers(priv, p, i, flags, 1316 queue); 1317 if (ret) 1318 goto err_init_rx_buffers; 1319 1320 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", 1321 rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data, 1322 (unsigned int)rx_q->rx_skbuff_dma[i]); 1323 } 1324 1325 rx_q->cur_rx = 0; 1326 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); 1327 1328 stmmac_clear_rx_descriptors(priv, queue); 1329 1330 /* Setup the chained descriptor addresses */ 1331 if (priv->mode == STMMAC_CHAIN_MODE) { 1332 if (priv->extend_desc) 1333 stmmac_mode_init(priv, rx_q->dma_erx, 1334 rx_q->dma_rx_phy, DMA_RX_SIZE, 1); 1335 else 1336 stmmac_mode_init(priv, rx_q->dma_rx, 1337 rx_q->dma_rx_phy, DMA_RX_SIZE, 0); 1338 } 1339 } 1340 1341 buf_sz = bfsize; 1342 1343 return 0; 1344 1345 err_init_rx_buffers: 1346 while (queue >= 0) { 1347 while (--i >= 0) 1348 stmmac_free_rx_buffer(priv, queue, i); 1349 1350 if (queue == 0) 1351 break; 1352 1353 i = DMA_RX_SIZE; 1354 queue--; 1355 } 1356 1357 return ret; 1358 } 1359 1360 /** 1361 * init_dma_tx_desc_rings - init the TX descriptor rings 1362 * @dev: net device structure. 1363 * Description: this function initializes the DMA TX descriptors 1364 * and allocates the socket buffers. It supports the chained and ring 1365 * modes. 1366 */ 1367 static int init_dma_tx_desc_rings(struct net_device *dev) 1368 { 1369 struct stmmac_priv *priv = netdev_priv(dev); 1370 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1371 u32 queue; 1372 int i; 1373 1374 for (queue = 0; queue < tx_queue_cnt; queue++) { 1375 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1376 1377 netif_dbg(priv, probe, priv->dev, 1378 "(%s) dma_tx_phy=0x%08x\n", __func__, 1379 (u32)tx_q->dma_tx_phy); 1380 1381 /* Setup the chained descriptor addresses */ 1382 if (priv->mode == STMMAC_CHAIN_MODE) { 1383 if (priv->extend_desc) 1384 stmmac_mode_init(priv, tx_q->dma_etx, 1385 tx_q->dma_tx_phy, DMA_TX_SIZE, 1); 1386 else 1387 stmmac_mode_init(priv, tx_q->dma_tx, 1388 tx_q->dma_tx_phy, DMA_TX_SIZE, 0); 1389 } 1390 1391 for (i = 0; i < DMA_TX_SIZE; i++) { 1392 struct dma_desc *p; 1393 if (priv->extend_desc) 1394 p = &((tx_q->dma_etx + i)->basic); 1395 else 1396 p = tx_q->dma_tx + i; 1397 1398 stmmac_clear_desc(priv, p); 1399 1400 tx_q->tx_skbuff_dma[i].buf = 0; 1401 tx_q->tx_skbuff_dma[i].map_as_page = false; 1402 tx_q->tx_skbuff_dma[i].len = 0; 1403 tx_q->tx_skbuff_dma[i].last_segment = false; 1404 tx_q->tx_skbuff[i] = NULL; 1405 } 1406 1407 tx_q->dirty_tx = 0; 1408 tx_q->cur_tx = 0; 1409 tx_q->mss = 0; 1410 1411 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 1412 } 1413 1414 return 0; 1415 } 1416 1417 /** 1418 * init_dma_desc_rings - init the RX/TX descriptor rings 1419 * @dev: net device structure 1420 * @flags: gfp flag. 1421 * Description: this function initializes the DMA RX/TX descriptors 1422 * and allocates the socket buffers. It supports the chained and ring 1423 * modes. 1424 */ 1425 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) 1426 { 1427 struct stmmac_priv *priv = netdev_priv(dev); 1428 int ret; 1429 1430 ret = init_dma_rx_desc_rings(dev, flags); 1431 if (ret) 1432 return ret; 1433 1434 ret = init_dma_tx_desc_rings(dev); 1435 1436 stmmac_clear_descriptors(priv); 1437 1438 if (netif_msg_hw(priv)) 1439 stmmac_display_rings(priv); 1440 1441 return ret; 1442 } 1443 1444 /** 1445 * dma_free_rx_skbufs - free RX dma buffers 1446 * @priv: private structure 1447 * @queue: RX queue index 1448 */ 1449 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) 1450 { 1451 int i; 1452 1453 for (i = 0; i < DMA_RX_SIZE; i++) 1454 stmmac_free_rx_buffer(priv, queue, i); 1455 } 1456 1457 /** 1458 * dma_free_tx_skbufs - free TX dma buffers 1459 * @priv: private structure 1460 * @queue: TX queue index 1461 */ 1462 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) 1463 { 1464 int i; 1465 1466 for (i = 0; i < DMA_TX_SIZE; i++) 1467 stmmac_free_tx_buffer(priv, queue, i); 1468 } 1469 1470 /** 1471 * free_dma_rx_desc_resources - free RX dma desc resources 1472 * @priv: private structure 1473 */ 1474 static void free_dma_rx_desc_resources(struct stmmac_priv *priv) 1475 { 1476 u32 rx_count = priv->plat->rx_queues_to_use; 1477 u32 queue; 1478 1479 /* Free RX queue resources */ 1480 for (queue = 0; queue < rx_count; queue++) { 1481 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1482 1483 /* Release the DMA RX socket buffers */ 1484 dma_free_rx_skbufs(priv, queue); 1485 1486 /* Free DMA regions of consistent memory previously allocated */ 1487 if (!priv->extend_desc) 1488 dma_free_coherent(priv->device, 1489 DMA_RX_SIZE * sizeof(struct dma_desc), 1490 rx_q->dma_rx, rx_q->dma_rx_phy); 1491 else 1492 dma_free_coherent(priv->device, DMA_RX_SIZE * 1493 sizeof(struct dma_extended_desc), 1494 rx_q->dma_erx, rx_q->dma_rx_phy); 1495 1496 kfree(rx_q->rx_skbuff_dma); 1497 kfree(rx_q->rx_skbuff); 1498 } 1499 } 1500 1501 /** 1502 * free_dma_tx_desc_resources - free TX dma desc resources 1503 * @priv: private structure 1504 */ 1505 static void free_dma_tx_desc_resources(struct stmmac_priv *priv) 1506 { 1507 u32 tx_count = priv->plat->tx_queues_to_use; 1508 u32 queue; 1509 1510 /* Free TX queue resources */ 1511 for (queue = 0; queue < tx_count; queue++) { 1512 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1513 1514 /* Release the DMA TX socket buffers */ 1515 dma_free_tx_skbufs(priv, queue); 1516 1517 /* Free DMA regions of consistent memory previously allocated */ 1518 if (!priv->extend_desc) 1519 dma_free_coherent(priv->device, 1520 DMA_TX_SIZE * sizeof(struct dma_desc), 1521 tx_q->dma_tx, tx_q->dma_tx_phy); 1522 else 1523 dma_free_coherent(priv->device, DMA_TX_SIZE * 1524 sizeof(struct dma_extended_desc), 1525 tx_q->dma_etx, tx_q->dma_tx_phy); 1526 1527 kfree(tx_q->tx_skbuff_dma); 1528 kfree(tx_q->tx_skbuff); 1529 } 1530 } 1531 1532 /** 1533 * alloc_dma_rx_desc_resources - alloc RX resources. 1534 * @priv: private structure 1535 * Description: according to which descriptor can be used (extend or basic) 1536 * this function allocates the resources for TX and RX paths. In case of 1537 * reception, for example, it pre-allocated the RX socket buffer in order to 1538 * allow zero-copy mechanism. 1539 */ 1540 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) 1541 { 1542 u32 rx_count = priv->plat->rx_queues_to_use; 1543 int ret = -ENOMEM; 1544 u32 queue; 1545 1546 /* RX queues buffers and DMA */ 1547 for (queue = 0; queue < rx_count; queue++) { 1548 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1549 1550 rx_q->queue_index = queue; 1551 rx_q->priv_data = priv; 1552 1553 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, 1554 sizeof(dma_addr_t), 1555 GFP_KERNEL); 1556 if (!rx_q->rx_skbuff_dma) 1557 goto err_dma; 1558 1559 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, 1560 sizeof(struct sk_buff *), 1561 GFP_KERNEL); 1562 if (!rx_q->rx_skbuff) 1563 goto err_dma; 1564 1565 if (priv->extend_desc) { 1566 rx_q->dma_erx = dma_alloc_coherent(priv->device, 1567 DMA_RX_SIZE * sizeof(struct dma_extended_desc), 1568 &rx_q->dma_rx_phy, 1569 GFP_KERNEL); 1570 if (!rx_q->dma_erx) 1571 goto err_dma; 1572 1573 } else { 1574 rx_q->dma_rx = dma_alloc_coherent(priv->device, 1575 DMA_RX_SIZE * sizeof(struct dma_desc), 1576 &rx_q->dma_rx_phy, 1577 GFP_KERNEL); 1578 if (!rx_q->dma_rx) 1579 goto err_dma; 1580 } 1581 } 1582 1583 return 0; 1584 1585 err_dma: 1586 free_dma_rx_desc_resources(priv); 1587 1588 return ret; 1589 } 1590 1591 /** 1592 * alloc_dma_tx_desc_resources - alloc TX resources. 1593 * @priv: private structure 1594 * Description: according to which descriptor can be used (extend or basic) 1595 * this function allocates the resources for TX and RX paths. In case of 1596 * reception, for example, it pre-allocated the RX socket buffer in order to 1597 * allow zero-copy mechanism. 1598 */ 1599 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) 1600 { 1601 u32 tx_count = priv->plat->tx_queues_to_use; 1602 int ret = -ENOMEM; 1603 u32 queue; 1604 1605 /* TX queues buffers and DMA */ 1606 for (queue = 0; queue < tx_count; queue++) { 1607 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1608 1609 tx_q->queue_index = queue; 1610 tx_q->priv_data = priv; 1611 1612 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, 1613 sizeof(*tx_q->tx_skbuff_dma), 1614 GFP_KERNEL); 1615 if (!tx_q->tx_skbuff_dma) 1616 goto err_dma; 1617 1618 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, 1619 sizeof(struct sk_buff *), 1620 GFP_KERNEL); 1621 if (!tx_q->tx_skbuff) 1622 goto err_dma; 1623 1624 if (priv->extend_desc) { 1625 tx_q->dma_etx = dma_alloc_coherent(priv->device, 1626 DMA_TX_SIZE * sizeof(struct dma_extended_desc), 1627 &tx_q->dma_tx_phy, 1628 GFP_KERNEL); 1629 if (!tx_q->dma_etx) 1630 goto err_dma; 1631 } else { 1632 tx_q->dma_tx = dma_alloc_coherent(priv->device, 1633 DMA_TX_SIZE * sizeof(struct dma_desc), 1634 &tx_q->dma_tx_phy, 1635 GFP_KERNEL); 1636 if (!tx_q->dma_tx) 1637 goto err_dma; 1638 } 1639 } 1640 1641 return 0; 1642 1643 err_dma: 1644 free_dma_tx_desc_resources(priv); 1645 1646 return ret; 1647 } 1648 1649 /** 1650 * alloc_dma_desc_resources - alloc TX/RX resources. 1651 * @priv: private structure 1652 * Description: according to which descriptor can be used (extend or basic) 1653 * this function allocates the resources for TX and RX paths. In case of 1654 * reception, for example, it pre-allocated the RX socket buffer in order to 1655 * allow zero-copy mechanism. 1656 */ 1657 static int alloc_dma_desc_resources(struct stmmac_priv *priv) 1658 { 1659 /* RX Allocation */ 1660 int ret = alloc_dma_rx_desc_resources(priv); 1661 1662 if (ret) 1663 return ret; 1664 1665 ret = alloc_dma_tx_desc_resources(priv); 1666 1667 return ret; 1668 } 1669 1670 /** 1671 * free_dma_desc_resources - free dma desc resources 1672 * @priv: private structure 1673 */ 1674 static void free_dma_desc_resources(struct stmmac_priv *priv) 1675 { 1676 /* Release the DMA RX socket buffers */ 1677 free_dma_rx_desc_resources(priv); 1678 1679 /* Release the DMA TX socket buffers */ 1680 free_dma_tx_desc_resources(priv); 1681 } 1682 1683 /** 1684 * stmmac_mac_enable_rx_queues - Enable MAC rx queues 1685 * @priv: driver private structure 1686 * Description: It is used for enabling the rx queues in the MAC 1687 */ 1688 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 1689 { 1690 u32 rx_queues_count = priv->plat->rx_queues_to_use; 1691 int queue; 1692 u8 mode; 1693 1694 for (queue = 0; queue < rx_queues_count; queue++) { 1695 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 1696 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 1697 } 1698 } 1699 1700 /** 1701 * stmmac_start_rx_dma - start RX DMA channel 1702 * @priv: driver private structure 1703 * @chan: RX channel index 1704 * Description: 1705 * This starts a RX DMA channel 1706 */ 1707 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 1708 { 1709 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 1710 stmmac_start_rx(priv, priv->ioaddr, chan); 1711 } 1712 1713 /** 1714 * stmmac_start_tx_dma - start TX DMA channel 1715 * @priv: driver private structure 1716 * @chan: TX channel index 1717 * Description: 1718 * This starts a TX DMA channel 1719 */ 1720 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 1721 { 1722 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 1723 stmmac_start_tx(priv, priv->ioaddr, chan); 1724 } 1725 1726 /** 1727 * stmmac_stop_rx_dma - stop RX DMA channel 1728 * @priv: driver private structure 1729 * @chan: RX channel index 1730 * Description: 1731 * This stops a RX DMA channel 1732 */ 1733 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 1734 { 1735 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 1736 stmmac_stop_rx(priv, priv->ioaddr, chan); 1737 } 1738 1739 /** 1740 * stmmac_stop_tx_dma - stop TX DMA channel 1741 * @priv: driver private structure 1742 * @chan: TX channel index 1743 * Description: 1744 * This stops a TX DMA channel 1745 */ 1746 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 1747 { 1748 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 1749 stmmac_stop_tx(priv, priv->ioaddr, chan); 1750 } 1751 1752 /** 1753 * stmmac_start_all_dma - start all RX and TX DMA channels 1754 * @priv: driver private structure 1755 * Description: 1756 * This starts all the RX and TX DMA channels 1757 */ 1758 static void stmmac_start_all_dma(struct stmmac_priv *priv) 1759 { 1760 u32 rx_channels_count = priv->plat->rx_queues_to_use; 1761 u32 tx_channels_count = priv->plat->tx_queues_to_use; 1762 u32 chan = 0; 1763 1764 for (chan = 0; chan < rx_channels_count; chan++) 1765 stmmac_start_rx_dma(priv, chan); 1766 1767 for (chan = 0; chan < tx_channels_count; chan++) 1768 stmmac_start_tx_dma(priv, chan); 1769 } 1770 1771 /** 1772 * stmmac_stop_all_dma - stop all RX and TX DMA channels 1773 * @priv: driver private structure 1774 * Description: 1775 * This stops the RX and TX DMA channels 1776 */ 1777 static void stmmac_stop_all_dma(struct stmmac_priv *priv) 1778 { 1779 u32 rx_channels_count = priv->plat->rx_queues_to_use; 1780 u32 tx_channels_count = priv->plat->tx_queues_to_use; 1781 u32 chan = 0; 1782 1783 for (chan = 0; chan < rx_channels_count; chan++) 1784 stmmac_stop_rx_dma(priv, chan); 1785 1786 for (chan = 0; chan < tx_channels_count; chan++) 1787 stmmac_stop_tx_dma(priv, chan); 1788 } 1789 1790 /** 1791 * stmmac_dma_operation_mode - HW DMA operation mode 1792 * @priv: driver private structure 1793 * Description: it is used for configuring the DMA operation mode register in 1794 * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 1795 */ 1796 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 1797 { 1798 u32 rx_channels_count = priv->plat->rx_queues_to_use; 1799 u32 tx_channels_count = priv->plat->tx_queues_to_use; 1800 int rxfifosz = priv->plat->rx_fifo_size; 1801 int txfifosz = priv->plat->tx_fifo_size; 1802 u32 txmode = 0; 1803 u32 rxmode = 0; 1804 u32 chan = 0; 1805 u8 qmode = 0; 1806 1807 if (rxfifosz == 0) 1808 rxfifosz = priv->dma_cap.rx_fifo_size; 1809 if (txfifosz == 0) 1810 txfifosz = priv->dma_cap.tx_fifo_size; 1811 1812 /* Adjust for real per queue fifo size */ 1813 rxfifosz /= rx_channels_count; 1814 txfifosz /= tx_channels_count; 1815 1816 if (priv->plat->force_thresh_dma_mode) { 1817 txmode = tc; 1818 rxmode = tc; 1819 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 1820 /* 1821 * In case of GMAC, SF mode can be enabled 1822 * to perform the TX COE in HW. This depends on: 1823 * 1) TX COE if actually supported 1824 * 2) There is no bugged Jumbo frame support 1825 * that needs to not insert csum in the TDES. 1826 */ 1827 txmode = SF_DMA_MODE; 1828 rxmode = SF_DMA_MODE; 1829 priv->xstats.threshold = SF_DMA_MODE; 1830 } else { 1831 txmode = tc; 1832 rxmode = SF_DMA_MODE; 1833 } 1834 1835 /* configure all channels */ 1836 for (chan = 0; chan < rx_channels_count; chan++) { 1837 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 1838 1839 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 1840 rxfifosz, qmode); 1841 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, 1842 chan); 1843 } 1844 1845 for (chan = 0; chan < tx_channels_count; chan++) { 1846 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 1847 1848 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 1849 txfifosz, qmode); 1850 } 1851 } 1852 1853 /** 1854 * stmmac_tx_clean - to manage the transmission completion 1855 * @priv: driver private structure 1856 * @queue: TX queue index 1857 * Description: it reclaims the transmit resources after transmission completes. 1858 */ 1859 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 1860 { 1861 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1862 unsigned int bytes_compl = 0, pkts_compl = 0; 1863 unsigned int entry, count = 0; 1864 1865 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 1866 1867 priv->xstats.tx_clean++; 1868 1869 entry = tx_q->dirty_tx; 1870 while ((entry != tx_q->cur_tx) && (count < budget)) { 1871 struct sk_buff *skb = tx_q->tx_skbuff[entry]; 1872 struct dma_desc *p; 1873 int status; 1874 1875 if (priv->extend_desc) 1876 p = (struct dma_desc *)(tx_q->dma_etx + entry); 1877 else 1878 p = tx_q->dma_tx + entry; 1879 1880 status = stmmac_tx_status(priv, &priv->dev->stats, 1881 &priv->xstats, p, priv->ioaddr); 1882 /* Check if the descriptor is owned by the DMA */ 1883 if (unlikely(status & tx_dma_own)) 1884 break; 1885 1886 count++; 1887 1888 /* Make sure descriptor fields are read after reading 1889 * the own bit. 1890 */ 1891 dma_rmb(); 1892 1893 /* Just consider the last segment and ...*/ 1894 if (likely(!(status & tx_not_ls))) { 1895 /* ... verify the status error condition */ 1896 if (unlikely(status & tx_err)) { 1897 priv->dev->stats.tx_errors++; 1898 } else { 1899 priv->dev->stats.tx_packets++; 1900 priv->xstats.tx_pkt_n++; 1901 } 1902 stmmac_get_tx_hwtstamp(priv, p, skb); 1903 } 1904 1905 if (likely(tx_q->tx_skbuff_dma[entry].buf)) { 1906 if (tx_q->tx_skbuff_dma[entry].map_as_page) 1907 dma_unmap_page(priv->device, 1908 tx_q->tx_skbuff_dma[entry].buf, 1909 tx_q->tx_skbuff_dma[entry].len, 1910 DMA_TO_DEVICE); 1911 else 1912 dma_unmap_single(priv->device, 1913 tx_q->tx_skbuff_dma[entry].buf, 1914 tx_q->tx_skbuff_dma[entry].len, 1915 DMA_TO_DEVICE); 1916 tx_q->tx_skbuff_dma[entry].buf = 0; 1917 tx_q->tx_skbuff_dma[entry].len = 0; 1918 tx_q->tx_skbuff_dma[entry].map_as_page = false; 1919 } 1920 1921 stmmac_clean_desc3(priv, tx_q, p); 1922 1923 tx_q->tx_skbuff_dma[entry].last_segment = false; 1924 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 1925 1926 if (likely(skb != NULL)) { 1927 pkts_compl++; 1928 bytes_compl += skb->len; 1929 dev_consume_skb_any(skb); 1930 tx_q->tx_skbuff[entry] = NULL; 1931 } 1932 1933 stmmac_release_tx_desc(priv, p, priv->mode); 1934 1935 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 1936 } 1937 tx_q->dirty_tx = entry; 1938 1939 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 1940 pkts_compl, bytes_compl); 1941 1942 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 1943 queue))) && 1944 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) { 1945 1946 netif_dbg(priv, tx_done, priv->dev, 1947 "%s: restart transmit\n", __func__); 1948 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 1949 } 1950 1951 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { 1952 stmmac_enable_eee_mode(priv); 1953 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 1954 } 1955 1956 /* We still have pending packets, let's call for a new scheduling */ 1957 if (tx_q->dirty_tx != tx_q->cur_tx) 1958 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10)); 1959 1960 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 1961 1962 return count; 1963 } 1964 1965 /** 1966 * stmmac_tx_err - to manage the tx error 1967 * @priv: driver private structure 1968 * @chan: channel index 1969 * Description: it cleans the descriptors and restarts the transmission 1970 * in case of transmission errors. 1971 */ 1972 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 1973 { 1974 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 1975 int i; 1976 1977 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 1978 1979 stmmac_stop_tx_dma(priv, chan); 1980 dma_free_tx_skbufs(priv, chan); 1981 for (i = 0; i < DMA_TX_SIZE; i++) 1982 if (priv->extend_desc) 1983 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, 1984 priv->mode, (i == DMA_TX_SIZE - 1)); 1985 else 1986 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], 1987 priv->mode, (i == DMA_TX_SIZE - 1)); 1988 tx_q->dirty_tx = 0; 1989 tx_q->cur_tx = 0; 1990 tx_q->mss = 0; 1991 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); 1992 stmmac_start_tx_dma(priv, chan); 1993 1994 priv->dev->stats.tx_errors++; 1995 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 1996 } 1997 1998 /** 1999 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 2000 * @priv: driver private structure 2001 * @txmode: TX operating mode 2002 * @rxmode: RX operating mode 2003 * @chan: channel index 2004 * Description: it is used for configuring of the DMA operation mode in 2005 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 2006 * mode. 2007 */ 2008 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 2009 u32 rxmode, u32 chan) 2010 { 2011 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2012 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2013 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2014 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2015 int rxfifosz = priv->plat->rx_fifo_size; 2016 int txfifosz = priv->plat->tx_fifo_size; 2017 2018 if (rxfifosz == 0) 2019 rxfifosz = priv->dma_cap.rx_fifo_size; 2020 if (txfifosz == 0) 2021 txfifosz = priv->dma_cap.tx_fifo_size; 2022 2023 /* Adjust for real per queue fifo size */ 2024 rxfifosz /= rx_channels_count; 2025 txfifosz /= tx_channels_count; 2026 2027 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 2028 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 2029 } 2030 2031 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 2032 { 2033 int ret; 2034 2035 ret = stmmac_safety_feat_irq_status(priv, priv->dev, 2036 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2037 if (ret && (ret != -EINVAL)) { 2038 stmmac_global_err(priv); 2039 return true; 2040 } 2041 2042 return false; 2043 } 2044 2045 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) 2046 { 2047 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 2048 &priv->xstats, chan); 2049 struct stmmac_channel *ch = &priv->channel[chan]; 2050 2051 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 2052 stmmac_disable_dma_irq(priv, priv->ioaddr, chan); 2053 napi_schedule_irqoff(&ch->rx_napi); 2054 } 2055 2056 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2057 stmmac_disable_dma_irq(priv, priv->ioaddr, chan); 2058 napi_schedule_irqoff(&ch->tx_napi); 2059 } 2060 2061 return status; 2062 } 2063 2064 /** 2065 * stmmac_dma_interrupt - DMA ISR 2066 * @priv: driver private structure 2067 * Description: this is the DMA ISR. It is called by the main ISR. 2068 * It calls the dwmac dma routine and schedule poll method in case of some 2069 * work can be done. 2070 */ 2071 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 2072 { 2073 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2074 u32 rx_channel_count = priv->plat->rx_queues_to_use; 2075 u32 channels_to_check = tx_channel_count > rx_channel_count ? 2076 tx_channel_count : rx_channel_count; 2077 u32 chan; 2078 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 2079 2080 /* Make sure we never check beyond our status buffer. */ 2081 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 2082 channels_to_check = ARRAY_SIZE(status); 2083 2084 for (chan = 0; chan < channels_to_check; chan++) 2085 status[chan] = stmmac_napi_check(priv, chan); 2086 2087 for (chan = 0; chan < tx_channel_count; chan++) { 2088 if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 2089 /* Try to bump up the dma threshold on this failure */ 2090 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 2091 (tc <= 256)) { 2092 tc += 64; 2093 if (priv->plat->force_thresh_dma_mode) 2094 stmmac_set_dma_operation_mode(priv, 2095 tc, 2096 tc, 2097 chan); 2098 else 2099 stmmac_set_dma_operation_mode(priv, 2100 tc, 2101 SF_DMA_MODE, 2102 chan); 2103 priv->xstats.threshold = tc; 2104 } 2105 } else if (unlikely(status[chan] == tx_hard_error)) { 2106 stmmac_tx_err(priv, chan); 2107 } 2108 } 2109 } 2110 2111 /** 2112 * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 2113 * @priv: driver private structure 2114 * Description: this masks the MMC irq, in fact, the counters are managed in SW. 2115 */ 2116 static void stmmac_mmc_setup(struct stmmac_priv *priv) 2117 { 2118 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 2119 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 2120 2121 dwmac_mmc_intr_all_mask(priv->mmcaddr); 2122 2123 if (priv->dma_cap.rmon) { 2124 dwmac_mmc_ctrl(priv->mmcaddr, mode); 2125 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 2126 } else 2127 netdev_info(priv->dev, "No MAC Management Counters available\n"); 2128 } 2129 2130 /** 2131 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 2132 * @priv: driver private structure 2133 * Description: 2134 * new GMAC chip generations have a new register to indicate the 2135 * presence of the optional feature/functions. 2136 * This can be also used to override the value passed through the 2137 * platform and necessary for old MAC10/100 and GMAC chips. 2138 */ 2139 static int stmmac_get_hw_features(struct stmmac_priv *priv) 2140 { 2141 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2142 } 2143 2144 /** 2145 * stmmac_check_ether_addr - check if the MAC addr is valid 2146 * @priv: driver private structure 2147 * Description: 2148 * it is to verify if the MAC address is valid, in case of failures it 2149 * generates a random MAC address 2150 */ 2151 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2152 { 2153 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 2154 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); 2155 if (!is_valid_ether_addr(priv->dev->dev_addr)) 2156 eth_hw_addr_random(priv->dev); 2157 netdev_info(priv->dev, "device MAC address %pM\n", 2158 priv->dev->dev_addr); 2159 } 2160 } 2161 2162 /** 2163 * stmmac_init_dma_engine - DMA init. 2164 * @priv: driver private structure 2165 * Description: 2166 * It inits the DMA invoking the specific MAC/GMAC callback. 2167 * Some DMA parameters can be passed from the platform; 2168 * in case of these are not passed a default is kept for the MAC or GMAC. 2169 */ 2170 static int stmmac_init_dma_engine(struct stmmac_priv *priv) 2171 { 2172 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2173 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2174 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2175 struct stmmac_rx_queue *rx_q; 2176 struct stmmac_tx_queue *tx_q; 2177 u32 chan = 0; 2178 int atds = 0; 2179 int ret = 0; 2180 2181 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2182 dev_err(priv->device, "Invalid DMA configuration\n"); 2183 return -EINVAL; 2184 } 2185 2186 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2187 atds = 1; 2188 2189 ret = stmmac_reset(priv, priv->ioaddr); 2190 if (ret) { 2191 dev_err(priv->device, "Failed to reset the dma\n"); 2192 return ret; 2193 } 2194 2195 /* DMA Configuration */ 2196 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 2197 2198 if (priv->plat->axi) 2199 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 2200 2201 /* DMA CSR Channel configuration */ 2202 for (chan = 0; chan < dma_csr_ch; chan++) 2203 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2204 2205 /* DMA RX Channel Configuration */ 2206 for (chan = 0; chan < rx_channels_count; chan++) { 2207 rx_q = &priv->rx_queue[chan]; 2208 2209 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2210 rx_q->dma_rx_phy, chan); 2211 2212 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2213 (DMA_RX_SIZE * sizeof(struct dma_desc)); 2214 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2215 rx_q->rx_tail_addr, chan); 2216 } 2217 2218 /* DMA TX Channel Configuration */ 2219 for (chan = 0; chan < tx_channels_count; chan++) { 2220 tx_q = &priv->tx_queue[chan]; 2221 2222 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2223 tx_q->dma_tx_phy, chan); 2224 2225 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2226 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2227 tx_q->tx_tail_addr, chan); 2228 } 2229 2230 return ret; 2231 } 2232 2233 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 2234 { 2235 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2236 2237 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); 2238 } 2239 2240 /** 2241 * stmmac_tx_timer - mitigation sw timer for tx. 2242 * @data: data pointer 2243 * Description: 2244 * This is the timer handler to directly invoke the stmmac_tx_clean. 2245 */ 2246 static void stmmac_tx_timer(struct timer_list *t) 2247 { 2248 struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer); 2249 struct stmmac_priv *priv = tx_q->priv_data; 2250 struct stmmac_channel *ch; 2251 2252 ch = &priv->channel[tx_q->queue_index]; 2253 2254 /* 2255 * If NAPI is already running we can miss some events. Let's rearm 2256 * the timer and try again. 2257 */ 2258 if (likely(napi_schedule_prep(&ch->tx_napi))) 2259 __napi_schedule(&ch->tx_napi); 2260 else 2261 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10)); 2262 } 2263 2264 /** 2265 * stmmac_init_tx_coalesce - init tx mitigation options. 2266 * @priv: driver private structure 2267 * Description: 2268 * This inits the transmit coalesce parameters: i.e. timer rate, 2269 * timer handler and default threshold used for enabling the 2270 * interrupt on completion bit. 2271 */ 2272 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) 2273 { 2274 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2275 u32 chan; 2276 2277 priv->tx_coal_frames = STMMAC_TX_FRAMES; 2278 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; 2279 2280 for (chan = 0; chan < tx_channel_count; chan++) { 2281 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2282 2283 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0); 2284 } 2285 } 2286 2287 static void stmmac_set_rings_length(struct stmmac_priv *priv) 2288 { 2289 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2290 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2291 u32 chan; 2292 2293 /* set TX ring length */ 2294 for (chan = 0; chan < tx_channels_count; chan++) 2295 stmmac_set_tx_ring_len(priv, priv->ioaddr, 2296 (DMA_TX_SIZE - 1), chan); 2297 2298 /* set RX ring length */ 2299 for (chan = 0; chan < rx_channels_count; chan++) 2300 stmmac_set_rx_ring_len(priv, priv->ioaddr, 2301 (DMA_RX_SIZE - 1), chan); 2302 } 2303 2304 /** 2305 * stmmac_set_tx_queue_weight - Set TX queue weight 2306 * @priv: driver private structure 2307 * Description: It is used for setting TX queues weight 2308 */ 2309 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 2310 { 2311 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2312 u32 weight; 2313 u32 queue; 2314 2315 for (queue = 0; queue < tx_queues_count; queue++) { 2316 weight = priv->plat->tx_queues_cfg[queue].weight; 2317 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 2318 } 2319 } 2320 2321 /** 2322 * stmmac_configure_cbs - Configure CBS in TX queue 2323 * @priv: driver private structure 2324 * Description: It is used for configuring CBS in AVB TX queues 2325 */ 2326 static void stmmac_configure_cbs(struct stmmac_priv *priv) 2327 { 2328 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2329 u32 mode_to_use; 2330 u32 queue; 2331 2332 /* queue 0 is reserved for legacy traffic */ 2333 for (queue = 1; queue < tx_queues_count; queue++) { 2334 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 2335 if (mode_to_use == MTL_QUEUE_DCB) 2336 continue; 2337 2338 stmmac_config_cbs(priv, priv->hw, 2339 priv->plat->tx_queues_cfg[queue].send_slope, 2340 priv->plat->tx_queues_cfg[queue].idle_slope, 2341 priv->plat->tx_queues_cfg[queue].high_credit, 2342 priv->plat->tx_queues_cfg[queue].low_credit, 2343 queue); 2344 } 2345 } 2346 2347 /** 2348 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 2349 * @priv: driver private structure 2350 * Description: It is used for mapping RX queues to RX dma channels 2351 */ 2352 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 2353 { 2354 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2355 u32 queue; 2356 u32 chan; 2357 2358 for (queue = 0; queue < rx_queues_count; queue++) { 2359 chan = priv->plat->rx_queues_cfg[queue].chan; 2360 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 2361 } 2362 } 2363 2364 /** 2365 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 2366 * @priv: driver private structure 2367 * Description: It is used for configuring the RX Queue Priority 2368 */ 2369 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 2370 { 2371 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2372 u32 queue; 2373 u32 prio; 2374 2375 for (queue = 0; queue < rx_queues_count; queue++) { 2376 if (!priv->plat->rx_queues_cfg[queue].use_prio) 2377 continue; 2378 2379 prio = priv->plat->rx_queues_cfg[queue].prio; 2380 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 2381 } 2382 } 2383 2384 /** 2385 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 2386 * @priv: driver private structure 2387 * Description: It is used for configuring the TX Queue Priority 2388 */ 2389 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 2390 { 2391 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2392 u32 queue; 2393 u32 prio; 2394 2395 for (queue = 0; queue < tx_queues_count; queue++) { 2396 if (!priv->plat->tx_queues_cfg[queue].use_prio) 2397 continue; 2398 2399 prio = priv->plat->tx_queues_cfg[queue].prio; 2400 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 2401 } 2402 } 2403 2404 /** 2405 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 2406 * @priv: driver private structure 2407 * Description: It is used for configuring the RX queue routing 2408 */ 2409 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 2410 { 2411 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2412 u32 queue; 2413 u8 packet; 2414 2415 for (queue = 0; queue < rx_queues_count; queue++) { 2416 /* no specific packet type routing specified for the queue */ 2417 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 2418 continue; 2419 2420 packet = priv->plat->rx_queues_cfg[queue].pkt_route; 2421 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 2422 } 2423 } 2424 2425 /** 2426 * stmmac_mtl_configuration - Configure MTL 2427 * @priv: driver private structure 2428 * Description: It is used for configurring MTL 2429 */ 2430 static void stmmac_mtl_configuration(struct stmmac_priv *priv) 2431 { 2432 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2433 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2434 2435 if (tx_queues_count > 1) 2436 stmmac_set_tx_queue_weight(priv); 2437 2438 /* Configure MTL RX algorithms */ 2439 if (rx_queues_count > 1) 2440 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 2441 priv->plat->rx_sched_algorithm); 2442 2443 /* Configure MTL TX algorithms */ 2444 if (tx_queues_count > 1) 2445 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 2446 priv->plat->tx_sched_algorithm); 2447 2448 /* Configure CBS in AVB TX queues */ 2449 if (tx_queues_count > 1) 2450 stmmac_configure_cbs(priv); 2451 2452 /* Map RX MTL to DMA channels */ 2453 stmmac_rx_queue_dma_chan_map(priv); 2454 2455 /* Enable MAC RX Queues */ 2456 stmmac_mac_enable_rx_queues(priv); 2457 2458 /* Set RX priorities */ 2459 if (rx_queues_count > 1) 2460 stmmac_mac_config_rx_queues_prio(priv); 2461 2462 /* Set TX priorities */ 2463 if (tx_queues_count > 1) 2464 stmmac_mac_config_tx_queues_prio(priv); 2465 2466 /* Set RX routing */ 2467 if (rx_queues_count > 1) 2468 stmmac_mac_config_rx_queues_routing(priv); 2469 } 2470 2471 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 2472 { 2473 if (priv->dma_cap.asp) { 2474 netdev_info(priv->dev, "Enabling Safety Features\n"); 2475 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp); 2476 } else { 2477 netdev_info(priv->dev, "No Safety Features support found\n"); 2478 } 2479 } 2480 2481 /** 2482 * stmmac_hw_setup - setup mac in a usable state. 2483 * @dev : pointer to the device structure. 2484 * Description: 2485 * this is the main function to setup the HW in a usable state because the 2486 * dma engine is reset, the core registers are configured (e.g. AXI, 2487 * Checksum features, timers). The DMA is ready to start receiving and 2488 * transmitting. 2489 * Return value: 2490 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2491 * file on failure. 2492 */ 2493 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) 2494 { 2495 struct stmmac_priv *priv = netdev_priv(dev); 2496 u32 rx_cnt = priv->plat->rx_queues_to_use; 2497 u32 tx_cnt = priv->plat->tx_queues_to_use; 2498 u32 chan; 2499 int ret; 2500 2501 /* DMA initialization and SW reset */ 2502 ret = stmmac_init_dma_engine(priv); 2503 if (ret < 0) { 2504 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 2505 __func__); 2506 return ret; 2507 } 2508 2509 /* Copy the MAC addr into the HW */ 2510 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 2511 2512 /* PS and related bits will be programmed according to the speed */ 2513 if (priv->hw->pcs) { 2514 int speed = priv->plat->mac_port_sel_speed; 2515 2516 if ((speed == SPEED_10) || (speed == SPEED_100) || 2517 (speed == SPEED_1000)) { 2518 priv->hw->ps = speed; 2519 } else { 2520 dev_warn(priv->device, "invalid port speed\n"); 2521 priv->hw->ps = 0; 2522 } 2523 } 2524 2525 /* Initialize the MAC Core */ 2526 stmmac_core_init(priv, priv->hw, dev); 2527 2528 /* Initialize MTL*/ 2529 stmmac_mtl_configuration(priv); 2530 2531 /* Initialize Safety Features */ 2532 stmmac_safety_feat_configuration(priv); 2533 2534 ret = stmmac_rx_ipc(priv, priv->hw); 2535 if (!ret) { 2536 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 2537 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 2538 priv->hw->rx_csum = 0; 2539 } 2540 2541 /* Enable the MAC Rx/Tx */ 2542 stmmac_mac_set(priv, priv->ioaddr, true); 2543 2544 /* Set the HW DMA mode and the COE */ 2545 stmmac_dma_operation_mode(priv); 2546 2547 stmmac_mmc_setup(priv); 2548 2549 if (init_ptp) { 2550 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 2551 if (ret < 0) 2552 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); 2553 2554 ret = stmmac_init_ptp(priv); 2555 if (ret == -EOPNOTSUPP) 2556 netdev_warn(priv->dev, "PTP not supported by HW\n"); 2557 else if (ret) 2558 netdev_warn(priv->dev, "PTP init failed\n"); 2559 } 2560 2561 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; 2562 2563 if (priv->use_riwt) { 2564 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt); 2565 if (!ret) 2566 priv->rx_riwt = MAX_DMA_RIWT; 2567 } 2568 2569 if (priv->hw->pcs) 2570 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0); 2571 2572 /* set TX and RX rings length */ 2573 stmmac_set_rings_length(priv); 2574 2575 /* Enable TSO */ 2576 if (priv->tso) { 2577 for (chan = 0; chan < tx_cnt; chan++) 2578 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 2579 } 2580 2581 /* Start the ball rolling... */ 2582 stmmac_start_all_dma(priv); 2583 2584 return 0; 2585 } 2586 2587 static void stmmac_hw_teardown(struct net_device *dev) 2588 { 2589 struct stmmac_priv *priv = netdev_priv(dev); 2590 2591 clk_disable_unprepare(priv->plat->clk_ptp_ref); 2592 } 2593 2594 /** 2595 * stmmac_open - open entry point of the driver 2596 * @dev : pointer to the device structure. 2597 * Description: 2598 * This function is the open entry point of the driver. 2599 * Return value: 2600 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2601 * file on failure. 2602 */ 2603 static int stmmac_open(struct net_device *dev) 2604 { 2605 struct stmmac_priv *priv = netdev_priv(dev); 2606 u32 chan; 2607 int ret; 2608 2609 if (priv->hw->pcs != STMMAC_PCS_RGMII && 2610 priv->hw->pcs != STMMAC_PCS_TBI && 2611 priv->hw->pcs != STMMAC_PCS_RTBI) { 2612 ret = stmmac_init_phy(dev); 2613 if (ret) { 2614 netdev_err(priv->dev, 2615 "%s: Cannot attach to PHY (error: %d)\n", 2616 __func__, ret); 2617 return ret; 2618 } 2619 } 2620 2621 /* Extra statistics */ 2622 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 2623 priv->xstats.threshold = tc; 2624 2625 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 2626 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 2627 2628 ret = alloc_dma_desc_resources(priv); 2629 if (ret < 0) { 2630 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 2631 __func__); 2632 goto dma_desc_error; 2633 } 2634 2635 ret = init_dma_desc_rings(dev, GFP_KERNEL); 2636 if (ret < 0) { 2637 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 2638 __func__); 2639 goto init_error; 2640 } 2641 2642 ret = stmmac_hw_setup(dev, true); 2643 if (ret < 0) { 2644 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 2645 goto init_error; 2646 } 2647 2648 stmmac_init_tx_coalesce(priv); 2649 2650 if (dev->phydev) 2651 phy_start(dev->phydev); 2652 2653 /* Request the IRQ lines */ 2654 ret = request_irq(dev->irq, stmmac_interrupt, 2655 IRQF_SHARED, dev->name, dev); 2656 if (unlikely(ret < 0)) { 2657 netdev_err(priv->dev, 2658 "%s: ERROR: allocating the IRQ %d (error: %d)\n", 2659 __func__, dev->irq, ret); 2660 goto irq_error; 2661 } 2662 2663 /* Request the Wake IRQ in case of another line is used for WoL */ 2664 if (priv->wol_irq != dev->irq) { 2665 ret = request_irq(priv->wol_irq, stmmac_interrupt, 2666 IRQF_SHARED, dev->name, dev); 2667 if (unlikely(ret < 0)) { 2668 netdev_err(priv->dev, 2669 "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 2670 __func__, priv->wol_irq, ret); 2671 goto wolirq_error; 2672 } 2673 } 2674 2675 /* Request the IRQ lines */ 2676 if (priv->lpi_irq > 0) { 2677 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, 2678 dev->name, dev); 2679 if (unlikely(ret < 0)) { 2680 netdev_err(priv->dev, 2681 "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 2682 __func__, priv->lpi_irq, ret); 2683 goto lpiirq_error; 2684 } 2685 } 2686 2687 stmmac_enable_all_queues(priv); 2688 stmmac_start_all_queues(priv); 2689 2690 return 0; 2691 2692 lpiirq_error: 2693 if (priv->wol_irq != dev->irq) 2694 free_irq(priv->wol_irq, dev); 2695 wolirq_error: 2696 free_irq(dev->irq, dev); 2697 irq_error: 2698 if (dev->phydev) 2699 phy_stop(dev->phydev); 2700 2701 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 2702 del_timer_sync(&priv->tx_queue[chan].txtimer); 2703 2704 stmmac_hw_teardown(dev); 2705 init_error: 2706 free_dma_desc_resources(priv); 2707 dma_desc_error: 2708 if (dev->phydev) 2709 phy_disconnect(dev->phydev); 2710 2711 return ret; 2712 } 2713 2714 /** 2715 * stmmac_release - close entry point of the driver 2716 * @dev : device pointer. 2717 * Description: 2718 * This is the stop entry point of the driver. 2719 */ 2720 static int stmmac_release(struct net_device *dev) 2721 { 2722 struct stmmac_priv *priv = netdev_priv(dev); 2723 u32 chan; 2724 2725 if (priv->eee_enabled) 2726 del_timer_sync(&priv->eee_ctrl_timer); 2727 2728 /* Stop and disconnect the PHY */ 2729 if (dev->phydev) { 2730 phy_stop(dev->phydev); 2731 phy_disconnect(dev->phydev); 2732 } 2733 2734 stmmac_stop_all_queues(priv); 2735 2736 stmmac_disable_all_queues(priv); 2737 2738 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 2739 del_timer_sync(&priv->tx_queue[chan].txtimer); 2740 2741 /* Free the IRQ lines */ 2742 free_irq(dev->irq, dev); 2743 if (priv->wol_irq != dev->irq) 2744 free_irq(priv->wol_irq, dev); 2745 if (priv->lpi_irq > 0) 2746 free_irq(priv->lpi_irq, dev); 2747 2748 /* Stop TX/RX DMA and clear the descriptors */ 2749 stmmac_stop_all_dma(priv); 2750 2751 /* Release and free the Rx/Tx resources */ 2752 free_dma_desc_resources(priv); 2753 2754 /* Disable the MAC Rx/Tx */ 2755 stmmac_mac_set(priv, priv->ioaddr, false); 2756 2757 netif_carrier_off(dev); 2758 2759 stmmac_release_ptp(priv); 2760 2761 return 0; 2762 } 2763 2764 /** 2765 * stmmac_tso_allocator - close entry point of the driver 2766 * @priv: driver private structure 2767 * @des: buffer start address 2768 * @total_len: total length to fill in descriptors 2769 * @last_segmant: condition for the last descriptor 2770 * @queue: TX queue index 2771 * Description: 2772 * This function fills descriptor and request new descriptors according to 2773 * buffer length to fill 2774 */ 2775 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, 2776 int total_len, bool last_segment, u32 queue) 2777 { 2778 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2779 struct dma_desc *desc; 2780 u32 buff_size; 2781 int tmp_len; 2782 2783 tmp_len = total_len; 2784 2785 while (tmp_len > 0) { 2786 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2787 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 2788 desc = tx_q->dma_tx + tx_q->cur_tx; 2789 2790 desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); 2791 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 2792 TSO_MAX_BUFF_SIZE : tmp_len; 2793 2794 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 2795 0, 1, 2796 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 2797 0, 0); 2798 2799 tmp_len -= TSO_MAX_BUFF_SIZE; 2800 } 2801 } 2802 2803 /** 2804 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 2805 * @skb : the socket buffer 2806 * @dev : device pointer 2807 * Description: this is the transmit function that is called on TSO frames 2808 * (support available on GMAC4 and newer chips). 2809 * Diagram below show the ring programming in case of TSO frames: 2810 * 2811 * First Descriptor 2812 * -------- 2813 * | DES0 |---> buffer1 = L2/L3/L4 header 2814 * | DES1 |---> TCP Payload (can continue on next descr...) 2815 * | DES2 |---> buffer 1 and 2 len 2816 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 2817 * -------- 2818 * | 2819 * ... 2820 * | 2821 * -------- 2822 * | DES0 | --| Split TCP Payload on Buffers 1 and 2 2823 * | DES1 | --| 2824 * | DES2 | --> buffer 1 and 2 len 2825 * | DES3 | 2826 * -------- 2827 * 2828 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 2829 */ 2830 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 2831 { 2832 struct dma_desc *desc, *first, *mss_desc = NULL; 2833 struct stmmac_priv *priv = netdev_priv(dev); 2834 int nfrags = skb_shinfo(skb)->nr_frags; 2835 u32 queue = skb_get_queue_mapping(skb); 2836 unsigned int first_entry, des; 2837 struct stmmac_tx_queue *tx_q; 2838 int tmp_pay_len = 0; 2839 u32 pay_len, mss; 2840 u8 proto_hdr_len; 2841 int i; 2842 2843 tx_q = &priv->tx_queue[queue]; 2844 2845 /* Compute header lengths */ 2846 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2847 2848 /* Desc availability based on threshold should be enough safe */ 2849 if (unlikely(stmmac_tx_avail(priv, queue) < 2850 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 2851 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 2852 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 2853 queue)); 2854 /* This is a hard error, log it. */ 2855 netdev_err(priv->dev, 2856 "%s: Tx Ring full when queue awake\n", 2857 __func__); 2858 } 2859 return NETDEV_TX_BUSY; 2860 } 2861 2862 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 2863 2864 mss = skb_shinfo(skb)->gso_size; 2865 2866 /* set new MSS value if needed */ 2867 if (mss != tx_q->mss) { 2868 mss_desc = tx_q->dma_tx + tx_q->cur_tx; 2869 stmmac_set_mss(priv, mss_desc, mss); 2870 tx_q->mss = mss; 2871 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2872 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 2873 } 2874 2875 if (netif_msg_tx_queued(priv)) { 2876 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 2877 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss); 2878 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 2879 skb->data_len); 2880 } 2881 2882 first_entry = tx_q->cur_tx; 2883 WARN_ON(tx_q->tx_skbuff[first_entry]); 2884 2885 desc = tx_q->dma_tx + first_entry; 2886 first = desc; 2887 2888 /* first descriptor: fill Headers on Buf1 */ 2889 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 2890 DMA_TO_DEVICE); 2891 if (dma_mapping_error(priv->device, des)) 2892 goto dma_map_err; 2893 2894 tx_q->tx_skbuff_dma[first_entry].buf = des; 2895 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 2896 2897 first->des0 = cpu_to_le32(des); 2898 2899 /* Fill start of payload in buff2 of first descriptor */ 2900 if (pay_len) 2901 first->des1 = cpu_to_le32(des + proto_hdr_len); 2902 2903 /* If needed take extra descriptors to fill the remaining payload */ 2904 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 2905 2906 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 2907 2908 /* Prepare fragments */ 2909 for (i = 0; i < nfrags; i++) { 2910 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2911 2912 des = skb_frag_dma_map(priv->device, frag, 0, 2913 skb_frag_size(frag), 2914 DMA_TO_DEVICE); 2915 if (dma_mapping_error(priv->device, des)) 2916 goto dma_map_err; 2917 2918 stmmac_tso_allocator(priv, des, skb_frag_size(frag), 2919 (i == nfrags - 1), queue); 2920 2921 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 2922 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 2923 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 2924 } 2925 2926 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 2927 2928 /* Only the last descriptor gets to point to the skb. */ 2929 tx_q->tx_skbuff[tx_q->cur_tx] = skb; 2930 2931 /* We've used all descriptors we need for this skb, however, 2932 * advance cur_tx so that it references a fresh descriptor. 2933 * ndo_start_xmit will fill this descriptor the next time it's 2934 * called and stmmac_tx_clean may clean up to this descriptor. 2935 */ 2936 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2937 2938 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 2939 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 2940 __func__); 2941 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 2942 } 2943 2944 dev->stats.tx_bytes += skb->len; 2945 priv->xstats.tx_tso_frames++; 2946 priv->xstats.tx_tso_nfrags += nfrags; 2947 2948 /* Manage tx mitigation */ 2949 tx_q->tx_count_frames += nfrags + 1; 2950 if (priv->tx_coal_frames <= tx_q->tx_count_frames) { 2951 stmmac_set_tx_ic(priv, desc); 2952 priv->xstats.tx_set_ic_bit++; 2953 tx_q->tx_count_frames = 0; 2954 } else { 2955 stmmac_tx_timer_arm(priv, queue); 2956 } 2957 2958 skb_tx_timestamp(skb); 2959 2960 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 2961 priv->hwts_tx_en)) { 2962 /* declare that device is doing timestamping */ 2963 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2964 stmmac_enable_tx_timestamp(priv, first); 2965 } 2966 2967 /* Complete the first descriptor before granting the DMA */ 2968 stmmac_prepare_tso_tx_desc(priv, first, 1, 2969 proto_hdr_len, 2970 pay_len, 2971 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 2972 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); 2973 2974 /* If context desc is used to change MSS */ 2975 if (mss_desc) { 2976 /* Make sure that first descriptor has been completely 2977 * written, including its own bit. This is because MSS is 2978 * actually before first descriptor, so we need to make 2979 * sure that MSS's own bit is the last thing written. 2980 */ 2981 dma_wmb(); 2982 stmmac_set_tx_owner(priv, mss_desc); 2983 } 2984 2985 /* The own bit must be the latest setting done when prepare the 2986 * descriptor and then barrier is needed to make sure that 2987 * all is coherent before granting the DMA engine. 2988 */ 2989 wmb(); 2990 2991 if (netif_msg_pktdata(priv)) { 2992 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 2993 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 2994 tx_q->cur_tx, first, nfrags); 2995 2996 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0); 2997 2998 pr_info(">>> frame to be transmitted: "); 2999 print_pkt(skb->data, skb_headlen(skb)); 3000 } 3001 3002 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3003 3004 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); 3005 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3006 3007 return NETDEV_TX_OK; 3008 3009 dma_map_err: 3010 dev_err(priv->device, "Tx dma map failed\n"); 3011 dev_kfree_skb(skb); 3012 priv->dev->stats.tx_dropped++; 3013 return NETDEV_TX_OK; 3014 } 3015 3016 /** 3017 * stmmac_xmit - Tx entry point of the driver 3018 * @skb : the socket buffer 3019 * @dev : device pointer 3020 * Description : this is the tx entry point of the driver. 3021 * It programs the chain or the ring and supports oversized frames 3022 * and SG feature. 3023 */ 3024 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 3025 { 3026 struct stmmac_priv *priv = netdev_priv(dev); 3027 unsigned int nopaged_len = skb_headlen(skb); 3028 int i, csum_insertion = 0, is_jumbo = 0; 3029 u32 queue = skb_get_queue_mapping(skb); 3030 int nfrags = skb_shinfo(skb)->nr_frags; 3031 int entry; 3032 unsigned int first_entry; 3033 struct dma_desc *desc, *first; 3034 struct stmmac_tx_queue *tx_q; 3035 unsigned int enh_desc; 3036 unsigned int des; 3037 3038 tx_q = &priv->tx_queue[queue]; 3039 3040 if (priv->tx_path_in_lpi_mode) 3041 stmmac_disable_eee_mode(priv); 3042 3043 /* Manage oversized TCP frames for GMAC4 device */ 3044 if (skb_is_gso(skb) && priv->tso) { 3045 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 3046 /* 3047 * There is no way to determine the number of TSO 3048 * capable Queues. Let's use always the Queue 0 3049 * because if TSO is supported then at least this 3050 * one will be capable. 3051 */ 3052 skb_set_queue_mapping(skb, 0); 3053 3054 return stmmac_tso_xmit(skb, dev); 3055 } 3056 } 3057 3058 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 3059 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 3060 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 3061 queue)); 3062 /* This is a hard error, log it. */ 3063 netdev_err(priv->dev, 3064 "%s: Tx Ring full when queue awake\n", 3065 __func__); 3066 } 3067 return NETDEV_TX_BUSY; 3068 } 3069 3070 entry = tx_q->cur_tx; 3071 first_entry = entry; 3072 WARN_ON(tx_q->tx_skbuff[first_entry]); 3073 3074 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 3075 3076 if (likely(priv->extend_desc)) 3077 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 3078 else 3079 desc = tx_q->dma_tx + entry; 3080 3081 first = desc; 3082 3083 enh_desc = priv->plat->enh_desc; 3084 /* To program the descriptors according to the size of the frame */ 3085 if (enh_desc) 3086 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 3087 3088 if (unlikely(is_jumbo)) { 3089 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 3090 if (unlikely(entry < 0) && (entry != -EINVAL)) 3091 goto dma_map_err; 3092 } 3093 3094 for (i = 0; i < nfrags; i++) { 3095 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3096 int len = skb_frag_size(frag); 3097 bool last_segment = (i == (nfrags - 1)); 3098 3099 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3100 WARN_ON(tx_q->tx_skbuff[entry]); 3101 3102 if (likely(priv->extend_desc)) 3103 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 3104 else 3105 desc = tx_q->dma_tx + entry; 3106 3107 des = skb_frag_dma_map(priv->device, frag, 0, len, 3108 DMA_TO_DEVICE); 3109 if (dma_mapping_error(priv->device, des)) 3110 goto dma_map_err; /* should reuse desc w/o issues */ 3111 3112 tx_q->tx_skbuff_dma[entry].buf = des; 3113 3114 stmmac_set_desc_addr(priv, desc, des); 3115 3116 tx_q->tx_skbuff_dma[entry].map_as_page = true; 3117 tx_q->tx_skbuff_dma[entry].len = len; 3118 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 3119 3120 /* Prepare the descriptor and set the own bit too */ 3121 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 3122 priv->mode, 1, last_segment, skb->len); 3123 } 3124 3125 /* Only the last descriptor gets to point to the skb. */ 3126 tx_q->tx_skbuff[entry] = skb; 3127 3128 /* We've used all descriptors we need for this skb, however, 3129 * advance cur_tx so that it references a fresh descriptor. 3130 * ndo_start_xmit will fill this descriptor the next time it's 3131 * called and stmmac_tx_clean may clean up to this descriptor. 3132 */ 3133 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3134 tx_q->cur_tx = entry; 3135 3136 if (netif_msg_pktdata(priv)) { 3137 void *tx_head; 3138 3139 netdev_dbg(priv->dev, 3140 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 3141 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 3142 entry, first, nfrags); 3143 3144 if (priv->extend_desc) 3145 tx_head = (void *)tx_q->dma_etx; 3146 else 3147 tx_head = (void *)tx_q->dma_tx; 3148 3149 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false); 3150 3151 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 3152 print_pkt(skb->data, skb->len); 3153 } 3154 3155 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 3156 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 3157 __func__); 3158 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 3159 } 3160 3161 dev->stats.tx_bytes += skb->len; 3162 3163 /* According to the coalesce parameter the IC bit for the latest 3164 * segment is reset and the timer re-started to clean the tx status. 3165 * This approach takes care about the fragments: desc is the first 3166 * element in case of no SG. 3167 */ 3168 tx_q->tx_count_frames += nfrags + 1; 3169 if (priv->tx_coal_frames <= tx_q->tx_count_frames) { 3170 stmmac_set_tx_ic(priv, desc); 3171 priv->xstats.tx_set_ic_bit++; 3172 tx_q->tx_count_frames = 0; 3173 } else { 3174 stmmac_tx_timer_arm(priv, queue); 3175 } 3176 3177 skb_tx_timestamp(skb); 3178 3179 /* Ready to fill the first descriptor and set the OWN bit w/o any 3180 * problems because all the descriptors are actually ready to be 3181 * passed to the DMA engine. 3182 */ 3183 if (likely(!is_jumbo)) { 3184 bool last_segment = (nfrags == 0); 3185 3186 des = dma_map_single(priv->device, skb->data, 3187 nopaged_len, DMA_TO_DEVICE); 3188 if (dma_mapping_error(priv->device, des)) 3189 goto dma_map_err; 3190 3191 tx_q->tx_skbuff_dma[first_entry].buf = des; 3192 3193 stmmac_set_desc_addr(priv, first, des); 3194 3195 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 3196 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 3197 3198 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 3199 priv->hwts_tx_en)) { 3200 /* declare that device is doing timestamping */ 3201 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3202 stmmac_enable_tx_timestamp(priv, first); 3203 } 3204 3205 /* Prepare the first descriptor setting the OWN bit too */ 3206 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 3207 csum_insertion, priv->mode, 1, last_segment, 3208 skb->len); 3209 } else { 3210 stmmac_set_tx_owner(priv, first); 3211 } 3212 3213 /* The own bit must be the latest setting done when prepare the 3214 * descriptor and then barrier is needed to make sure that 3215 * all is coherent before granting the DMA engine. 3216 */ 3217 wmb(); 3218 3219 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3220 3221 stmmac_enable_dma_transmission(priv, priv->ioaddr); 3222 3223 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); 3224 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3225 3226 return NETDEV_TX_OK; 3227 3228 dma_map_err: 3229 netdev_err(priv->dev, "Tx DMA map failed\n"); 3230 dev_kfree_skb(skb); 3231 priv->dev->stats.tx_dropped++; 3232 return NETDEV_TX_OK; 3233 } 3234 3235 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 3236 { 3237 struct vlan_ethhdr *veth; 3238 __be16 vlan_proto; 3239 u16 vlanid; 3240 3241 veth = (struct vlan_ethhdr *)skb->data; 3242 vlan_proto = veth->h_vlan_proto; 3243 3244 if ((vlan_proto == htons(ETH_P_8021Q) && 3245 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 3246 (vlan_proto == htons(ETH_P_8021AD) && 3247 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 3248 /* pop the vlan tag */ 3249 vlanid = ntohs(veth->h_vlan_TCI); 3250 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 3251 skb_pull(skb, VLAN_HLEN); 3252 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 3253 } 3254 } 3255 3256 3257 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q) 3258 { 3259 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH) 3260 return 0; 3261 3262 return 1; 3263 } 3264 3265 /** 3266 * stmmac_rx_refill - refill used skb preallocated buffers 3267 * @priv: driver private structure 3268 * @queue: RX queue index 3269 * Description : this is to reallocate the skb for the reception process 3270 * that is based on zero-copy. 3271 */ 3272 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 3273 { 3274 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3275 int dirty = stmmac_rx_dirty(priv, queue); 3276 unsigned int entry = rx_q->dirty_rx; 3277 3278 int bfsize = priv->dma_buf_sz; 3279 3280 while (dirty-- > 0) { 3281 struct dma_desc *p; 3282 3283 if (priv->extend_desc) 3284 p = (struct dma_desc *)(rx_q->dma_erx + entry); 3285 else 3286 p = rx_q->dma_rx + entry; 3287 3288 if (likely(!rx_q->rx_skbuff[entry])) { 3289 struct sk_buff *skb; 3290 3291 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); 3292 if (unlikely(!skb)) { 3293 /* so for a while no zero-copy! */ 3294 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH; 3295 if (unlikely(net_ratelimit())) 3296 dev_err(priv->device, 3297 "fail to alloc skb entry %d\n", 3298 entry); 3299 break; 3300 } 3301 3302 rx_q->rx_skbuff[entry] = skb; 3303 rx_q->rx_skbuff_dma[entry] = 3304 dma_map_single(priv->device, skb->data, bfsize, 3305 DMA_FROM_DEVICE); 3306 if (dma_mapping_error(priv->device, 3307 rx_q->rx_skbuff_dma[entry])) { 3308 netdev_err(priv->dev, "Rx DMA map failed\n"); 3309 dev_kfree_skb(skb); 3310 break; 3311 } 3312 3313 stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]); 3314 stmmac_refill_desc3(priv, rx_q, p); 3315 3316 if (rx_q->rx_zeroc_thresh > 0) 3317 rx_q->rx_zeroc_thresh--; 3318 3319 netif_dbg(priv, rx_status, priv->dev, 3320 "refill entry #%d\n", entry); 3321 } 3322 dma_wmb(); 3323 3324 stmmac_set_rx_owner(priv, p, priv->use_riwt); 3325 3326 dma_wmb(); 3327 3328 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); 3329 } 3330 rx_q->dirty_rx = entry; 3331 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 3332 } 3333 3334 /** 3335 * stmmac_rx - manage the receive process 3336 * @priv: driver private structure 3337 * @limit: napi bugget 3338 * @queue: RX queue index. 3339 * Description : this the function called by the napi poll method. 3340 * It gets all the frames inside the ring. 3341 */ 3342 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 3343 { 3344 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3345 struct stmmac_channel *ch = &priv->channel[queue]; 3346 unsigned int next_entry = rx_q->cur_rx; 3347 int coe = priv->hw->rx_csum; 3348 unsigned int count = 0; 3349 bool xmac; 3350 3351 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 3352 3353 if (netif_msg_rx_status(priv)) { 3354 void *rx_head; 3355 3356 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 3357 if (priv->extend_desc) 3358 rx_head = (void *)rx_q->dma_erx; 3359 else 3360 rx_head = (void *)rx_q->dma_rx; 3361 3362 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); 3363 } 3364 while (count < limit) { 3365 int entry, status; 3366 struct dma_desc *p; 3367 struct dma_desc *np; 3368 3369 entry = next_entry; 3370 3371 if (priv->extend_desc) 3372 p = (struct dma_desc *)(rx_q->dma_erx + entry); 3373 else 3374 p = rx_q->dma_rx + entry; 3375 3376 /* read the status of the incoming frame */ 3377 status = stmmac_rx_status(priv, &priv->dev->stats, 3378 &priv->xstats, p); 3379 /* check if managed by the DMA otherwise go ahead */ 3380 if (unlikely(status & dma_own)) 3381 break; 3382 3383 count++; 3384 3385 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); 3386 next_entry = rx_q->cur_rx; 3387 3388 if (priv->extend_desc) 3389 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 3390 else 3391 np = rx_q->dma_rx + next_entry; 3392 3393 prefetch(np); 3394 3395 if (priv->extend_desc) 3396 stmmac_rx_extended_status(priv, &priv->dev->stats, 3397 &priv->xstats, rx_q->dma_erx + entry); 3398 if (unlikely(status == discard_frame)) { 3399 priv->dev->stats.rx_errors++; 3400 if (priv->hwts_rx_en && !priv->extend_desc) { 3401 /* DESC2 & DESC3 will be overwritten by device 3402 * with timestamp value, hence reinitialize 3403 * them in stmmac_rx_refill() function so that 3404 * device can reuse it. 3405 */ 3406 dev_kfree_skb_any(rx_q->rx_skbuff[entry]); 3407 rx_q->rx_skbuff[entry] = NULL; 3408 dma_unmap_single(priv->device, 3409 rx_q->rx_skbuff_dma[entry], 3410 priv->dma_buf_sz, 3411 DMA_FROM_DEVICE); 3412 } 3413 } else { 3414 struct sk_buff *skb; 3415 int frame_len; 3416 unsigned int des; 3417 3418 stmmac_get_desc_addr(priv, p, &des); 3419 frame_len = stmmac_get_rx_frame_len(priv, p, coe); 3420 3421 /* If frame length is greater than skb buffer size 3422 * (preallocated during init) then the packet is 3423 * ignored 3424 */ 3425 if (frame_len > priv->dma_buf_sz) { 3426 if (net_ratelimit()) 3427 netdev_err(priv->dev, 3428 "len %d larger than size (%d)\n", 3429 frame_len, priv->dma_buf_sz); 3430 priv->dev->stats.rx_length_errors++; 3431 continue; 3432 } 3433 3434 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 3435 * Type frames (LLC/LLC-SNAP) 3436 * 3437 * llc_snap is never checked in GMAC >= 4, so this ACS 3438 * feature is always disabled and packets need to be 3439 * stripped manually. 3440 */ 3441 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || 3442 unlikely(status != llc_snap)) 3443 frame_len -= ETH_FCS_LEN; 3444 3445 if (netif_msg_rx_status(priv)) { 3446 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n", 3447 p, entry, des); 3448 netdev_dbg(priv->dev, "frame size %d, COE: %d\n", 3449 frame_len, status); 3450 } 3451 3452 /* The zero-copy is always used for all the sizes 3453 * in case of GMAC4 because it needs 3454 * to refill the used descriptors, always. 3455 */ 3456 if (unlikely(!xmac && 3457 ((frame_len < priv->rx_copybreak) || 3458 stmmac_rx_threshold_count(rx_q)))) { 3459 skb = netdev_alloc_skb_ip_align(priv->dev, 3460 frame_len); 3461 if (unlikely(!skb)) { 3462 if (net_ratelimit()) 3463 dev_warn(priv->device, 3464 "packet dropped\n"); 3465 priv->dev->stats.rx_dropped++; 3466 continue; 3467 } 3468 3469 dma_sync_single_for_cpu(priv->device, 3470 rx_q->rx_skbuff_dma 3471 [entry], frame_len, 3472 DMA_FROM_DEVICE); 3473 skb_copy_to_linear_data(skb, 3474 rx_q-> 3475 rx_skbuff[entry]->data, 3476 frame_len); 3477 3478 skb_put(skb, frame_len); 3479 dma_sync_single_for_device(priv->device, 3480 rx_q->rx_skbuff_dma 3481 [entry], frame_len, 3482 DMA_FROM_DEVICE); 3483 } else { 3484 skb = rx_q->rx_skbuff[entry]; 3485 if (unlikely(!skb)) { 3486 if (net_ratelimit()) 3487 netdev_err(priv->dev, 3488 "%s: Inconsistent Rx chain\n", 3489 priv->dev->name); 3490 priv->dev->stats.rx_dropped++; 3491 continue; 3492 } 3493 prefetch(skb->data - NET_IP_ALIGN); 3494 rx_q->rx_skbuff[entry] = NULL; 3495 rx_q->rx_zeroc_thresh++; 3496 3497 skb_put(skb, frame_len); 3498 dma_unmap_single(priv->device, 3499 rx_q->rx_skbuff_dma[entry], 3500 priv->dma_buf_sz, 3501 DMA_FROM_DEVICE); 3502 } 3503 3504 if (netif_msg_pktdata(priv)) { 3505 netdev_dbg(priv->dev, "frame received (%dbytes)", 3506 frame_len); 3507 print_pkt(skb->data, frame_len); 3508 } 3509 3510 stmmac_get_rx_hwtstamp(priv, p, np, skb); 3511 3512 stmmac_rx_vlan(priv->dev, skb); 3513 3514 skb->protocol = eth_type_trans(skb, priv->dev); 3515 3516 if (unlikely(!coe)) 3517 skb_checksum_none_assert(skb); 3518 else 3519 skb->ip_summed = CHECKSUM_UNNECESSARY; 3520 3521 napi_gro_receive(&ch->rx_napi, skb); 3522 3523 priv->dev->stats.rx_packets++; 3524 priv->dev->stats.rx_bytes += frame_len; 3525 } 3526 } 3527 3528 stmmac_rx_refill(priv, queue); 3529 3530 priv->xstats.rx_pkt_n += count; 3531 3532 return count; 3533 } 3534 3535 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 3536 { 3537 struct stmmac_channel *ch = 3538 container_of(napi, struct stmmac_channel, rx_napi); 3539 struct stmmac_priv *priv = ch->priv_data; 3540 u32 chan = ch->index; 3541 int work_done; 3542 3543 priv->xstats.napi_poll++; 3544 3545 work_done = stmmac_rx(priv, budget, chan); 3546 if (work_done < budget && napi_complete_done(napi, work_done)) 3547 stmmac_enable_dma_irq(priv, priv->ioaddr, chan); 3548 return work_done; 3549 } 3550 3551 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 3552 { 3553 struct stmmac_channel *ch = 3554 container_of(napi, struct stmmac_channel, tx_napi); 3555 struct stmmac_priv *priv = ch->priv_data; 3556 struct stmmac_tx_queue *tx_q; 3557 u32 chan = ch->index; 3558 int work_done; 3559 3560 priv->xstats.napi_poll++; 3561 3562 work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan); 3563 work_done = min(work_done, budget); 3564 3565 if (work_done < budget && napi_complete_done(napi, work_done)) 3566 stmmac_enable_dma_irq(priv, priv->ioaddr, chan); 3567 3568 /* Force transmission restart */ 3569 tx_q = &priv->tx_queue[chan]; 3570 if (tx_q->cur_tx != tx_q->dirty_tx) { 3571 stmmac_enable_dma_transmission(priv, priv->ioaddr); 3572 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, 3573 chan); 3574 } 3575 3576 return work_done; 3577 } 3578 3579 /** 3580 * stmmac_tx_timeout 3581 * @dev : Pointer to net device structure 3582 * Description: this function is called when a packet transmission fails to 3583 * complete within a reasonable time. The driver will mark the error in the 3584 * netdev structure and arrange for the device to be reset to a sane state 3585 * in order to transmit a new packet. 3586 */ 3587 static void stmmac_tx_timeout(struct net_device *dev) 3588 { 3589 struct stmmac_priv *priv = netdev_priv(dev); 3590 3591 stmmac_global_err(priv); 3592 } 3593 3594 /** 3595 * stmmac_set_rx_mode - entry point for multicast addressing 3596 * @dev : pointer to the device structure 3597 * Description: 3598 * This function is a driver entry point which gets called by the kernel 3599 * whenever multicast addresses must be enabled/disabled. 3600 * Return value: 3601 * void. 3602 */ 3603 static void stmmac_set_rx_mode(struct net_device *dev) 3604 { 3605 struct stmmac_priv *priv = netdev_priv(dev); 3606 3607 stmmac_set_filter(priv, priv->hw, dev); 3608 } 3609 3610 /** 3611 * stmmac_change_mtu - entry point to change MTU size for the device. 3612 * @dev : device pointer. 3613 * @new_mtu : the new MTU size for the device. 3614 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 3615 * to drive packet transmission. Ethernet has an MTU of 1500 octets 3616 * (ETH_DATA_LEN). This value can be changed with ifconfig. 3617 * Return value: 3618 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3619 * file on failure. 3620 */ 3621 static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 3622 { 3623 struct stmmac_priv *priv = netdev_priv(dev); 3624 3625 if (netif_running(dev)) { 3626 netdev_err(priv->dev, "must be stopped to change its MTU\n"); 3627 return -EBUSY; 3628 } 3629 3630 dev->mtu = new_mtu; 3631 3632 netdev_update_features(dev); 3633 3634 return 0; 3635 } 3636 3637 static netdev_features_t stmmac_fix_features(struct net_device *dev, 3638 netdev_features_t features) 3639 { 3640 struct stmmac_priv *priv = netdev_priv(dev); 3641 3642 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 3643 features &= ~NETIF_F_RXCSUM; 3644 3645 if (!priv->plat->tx_coe) 3646 features &= ~NETIF_F_CSUM_MASK; 3647 3648 /* Some GMAC devices have a bugged Jumbo frame support that 3649 * needs to have the Tx COE disabled for oversized frames 3650 * (due to limited buffer sizes). In this case we disable 3651 * the TX csum insertion in the TDES and not use SF. 3652 */ 3653 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 3654 features &= ~NETIF_F_CSUM_MASK; 3655 3656 /* Disable tso if asked by ethtool */ 3657 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 3658 if (features & NETIF_F_TSO) 3659 priv->tso = true; 3660 else 3661 priv->tso = false; 3662 } 3663 3664 return features; 3665 } 3666 3667 static int stmmac_set_features(struct net_device *netdev, 3668 netdev_features_t features) 3669 { 3670 struct stmmac_priv *priv = netdev_priv(netdev); 3671 3672 /* Keep the COE Type in case of csum is supporting */ 3673 if (features & NETIF_F_RXCSUM) 3674 priv->hw->rx_csum = priv->plat->rx_coe; 3675 else 3676 priv->hw->rx_csum = 0; 3677 /* No check needed because rx_coe has been set before and it will be 3678 * fixed in case of issue. 3679 */ 3680 stmmac_rx_ipc(priv, priv->hw); 3681 3682 return 0; 3683 } 3684 3685 /** 3686 * stmmac_interrupt - main ISR 3687 * @irq: interrupt number. 3688 * @dev_id: to pass the net device pointer. 3689 * Description: this is the main driver interrupt service routine. 3690 * It can call: 3691 * o DMA service routine (to manage incoming frame reception and transmission 3692 * status) 3693 * o Core interrupts to manage: remote wake-up, management counter, LPI 3694 * interrupts. 3695 */ 3696 static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 3697 { 3698 struct net_device *dev = (struct net_device *)dev_id; 3699 struct stmmac_priv *priv = netdev_priv(dev); 3700 u32 rx_cnt = priv->plat->rx_queues_to_use; 3701 u32 tx_cnt = priv->plat->tx_queues_to_use; 3702 u32 queues_count; 3703 u32 queue; 3704 bool xmac; 3705 3706 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 3707 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 3708 3709 if (priv->irq_wake) 3710 pm_wakeup_event(priv->device, 0); 3711 3712 if (unlikely(!dev)) { 3713 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 3714 return IRQ_NONE; 3715 } 3716 3717 /* Check if adapter is up */ 3718 if (test_bit(STMMAC_DOWN, &priv->state)) 3719 return IRQ_HANDLED; 3720 /* Check if a fatal error happened */ 3721 if (stmmac_safety_feat_interrupt(priv)) 3722 return IRQ_HANDLED; 3723 3724 /* To handle GMAC own interrupts */ 3725 if ((priv->plat->has_gmac) || xmac) { 3726 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 3727 int mtl_status; 3728 3729 if (unlikely(status)) { 3730 /* For LPI we need to save the tx status */ 3731 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 3732 priv->tx_path_in_lpi_mode = true; 3733 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 3734 priv->tx_path_in_lpi_mode = false; 3735 } 3736 3737 for (queue = 0; queue < queues_count; queue++) { 3738 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3739 3740 mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw, 3741 queue); 3742 if (mtl_status != -EINVAL) 3743 status |= mtl_status; 3744 3745 if (status & CORE_IRQ_MTL_RX_OVERFLOW) 3746 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 3747 rx_q->rx_tail_addr, 3748 queue); 3749 } 3750 3751 /* PCS link status */ 3752 if (priv->hw->pcs) { 3753 if (priv->xstats.pcs_link) 3754 netif_carrier_on(dev); 3755 else 3756 netif_carrier_off(dev); 3757 } 3758 } 3759 3760 /* To handle DMA interrupts */ 3761 stmmac_dma_interrupt(priv); 3762 3763 return IRQ_HANDLED; 3764 } 3765 3766 #ifdef CONFIG_NET_POLL_CONTROLLER 3767 /* Polling receive - used by NETCONSOLE and other diagnostic tools 3768 * to allow network I/O with interrupts disabled. 3769 */ 3770 static void stmmac_poll_controller(struct net_device *dev) 3771 { 3772 disable_irq(dev->irq); 3773 stmmac_interrupt(dev->irq, dev); 3774 enable_irq(dev->irq); 3775 } 3776 #endif 3777 3778 /** 3779 * stmmac_ioctl - Entry point for the Ioctl 3780 * @dev: Device pointer. 3781 * @rq: An IOCTL specefic structure, that can contain a pointer to 3782 * a proprietary structure used to pass information to the driver. 3783 * @cmd: IOCTL command 3784 * Description: 3785 * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 3786 */ 3787 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3788 { 3789 int ret = -EOPNOTSUPP; 3790 3791 if (!netif_running(dev)) 3792 return -EINVAL; 3793 3794 switch (cmd) { 3795 case SIOCGMIIPHY: 3796 case SIOCGMIIREG: 3797 case SIOCSMIIREG: 3798 if (!dev->phydev) 3799 return -EINVAL; 3800 ret = phy_mii_ioctl(dev->phydev, rq, cmd); 3801 break; 3802 case SIOCSHWTSTAMP: 3803 ret = stmmac_hwtstamp_set(dev, rq); 3804 break; 3805 case SIOCGHWTSTAMP: 3806 ret = stmmac_hwtstamp_get(dev, rq); 3807 break; 3808 default: 3809 break; 3810 } 3811 3812 return ret; 3813 } 3814 3815 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3816 void *cb_priv) 3817 { 3818 struct stmmac_priv *priv = cb_priv; 3819 int ret = -EOPNOTSUPP; 3820 3821 stmmac_disable_all_queues(priv); 3822 3823 switch (type) { 3824 case TC_SETUP_CLSU32: 3825 if (tc_cls_can_offload_and_chain0(priv->dev, type_data)) 3826 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 3827 break; 3828 default: 3829 break; 3830 } 3831 3832 stmmac_enable_all_queues(priv); 3833 return ret; 3834 } 3835 3836 static int stmmac_setup_tc_block(struct stmmac_priv *priv, 3837 struct tc_block_offload *f) 3838 { 3839 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 3840 return -EOPNOTSUPP; 3841 3842 switch (f->command) { 3843 case TC_BLOCK_BIND: 3844 return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb, 3845 priv, priv, f->extack); 3846 case TC_BLOCK_UNBIND: 3847 tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv); 3848 return 0; 3849 default: 3850 return -EOPNOTSUPP; 3851 } 3852 } 3853 3854 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 3855 void *type_data) 3856 { 3857 struct stmmac_priv *priv = netdev_priv(ndev); 3858 3859 switch (type) { 3860 case TC_SETUP_BLOCK: 3861 return stmmac_setup_tc_block(priv, type_data); 3862 case TC_SETUP_QDISC_CBS: 3863 return stmmac_tc_setup_cbs(priv, priv, type_data); 3864 default: 3865 return -EOPNOTSUPP; 3866 } 3867 } 3868 3869 static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 3870 { 3871 struct stmmac_priv *priv = netdev_priv(ndev); 3872 int ret = 0; 3873 3874 ret = eth_mac_addr(ndev, addr); 3875 if (ret) 3876 return ret; 3877 3878 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 3879 3880 return ret; 3881 } 3882 3883 #ifdef CONFIG_DEBUG_FS 3884 static struct dentry *stmmac_fs_dir; 3885 3886 static void sysfs_display_ring(void *head, int size, int extend_desc, 3887 struct seq_file *seq) 3888 { 3889 int i; 3890 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 3891 struct dma_desc *p = (struct dma_desc *)head; 3892 3893 for (i = 0; i < size; i++) { 3894 if (extend_desc) { 3895 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 3896 i, (unsigned int)virt_to_phys(ep), 3897 le32_to_cpu(ep->basic.des0), 3898 le32_to_cpu(ep->basic.des1), 3899 le32_to_cpu(ep->basic.des2), 3900 le32_to_cpu(ep->basic.des3)); 3901 ep++; 3902 } else { 3903 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 3904 i, (unsigned int)virt_to_phys(p), 3905 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 3906 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 3907 p++; 3908 } 3909 seq_printf(seq, "\n"); 3910 } 3911 } 3912 3913 static int stmmac_rings_status_show(struct seq_file *seq, void *v) 3914 { 3915 struct net_device *dev = seq->private; 3916 struct stmmac_priv *priv = netdev_priv(dev); 3917 u32 rx_count = priv->plat->rx_queues_to_use; 3918 u32 tx_count = priv->plat->tx_queues_to_use; 3919 u32 queue; 3920 3921 if ((dev->flags & IFF_UP) == 0) 3922 return 0; 3923 3924 for (queue = 0; queue < rx_count; queue++) { 3925 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3926 3927 seq_printf(seq, "RX Queue %d:\n", queue); 3928 3929 if (priv->extend_desc) { 3930 seq_printf(seq, "Extended descriptor ring:\n"); 3931 sysfs_display_ring((void *)rx_q->dma_erx, 3932 DMA_RX_SIZE, 1, seq); 3933 } else { 3934 seq_printf(seq, "Descriptor ring:\n"); 3935 sysfs_display_ring((void *)rx_q->dma_rx, 3936 DMA_RX_SIZE, 0, seq); 3937 } 3938 } 3939 3940 for (queue = 0; queue < tx_count; queue++) { 3941 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 3942 3943 seq_printf(seq, "TX Queue %d:\n", queue); 3944 3945 if (priv->extend_desc) { 3946 seq_printf(seq, "Extended descriptor ring:\n"); 3947 sysfs_display_ring((void *)tx_q->dma_etx, 3948 DMA_TX_SIZE, 1, seq); 3949 } else { 3950 seq_printf(seq, "Descriptor ring:\n"); 3951 sysfs_display_ring((void *)tx_q->dma_tx, 3952 DMA_TX_SIZE, 0, seq); 3953 } 3954 } 3955 3956 return 0; 3957 } 3958 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 3959 3960 static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 3961 { 3962 struct net_device *dev = seq->private; 3963 struct stmmac_priv *priv = netdev_priv(dev); 3964 3965 if (!priv->hw_cap_support) { 3966 seq_printf(seq, "DMA HW features not supported\n"); 3967 return 0; 3968 } 3969 3970 seq_printf(seq, "==============================\n"); 3971 seq_printf(seq, "\tDMA HW features\n"); 3972 seq_printf(seq, "==============================\n"); 3973 3974 seq_printf(seq, "\t10/100 Mbps: %s\n", 3975 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 3976 seq_printf(seq, "\t1000 Mbps: %s\n", 3977 (priv->dma_cap.mbps_1000) ? "Y" : "N"); 3978 seq_printf(seq, "\tHalf duplex: %s\n", 3979 (priv->dma_cap.half_duplex) ? "Y" : "N"); 3980 seq_printf(seq, "\tHash Filter: %s\n", 3981 (priv->dma_cap.hash_filter) ? "Y" : "N"); 3982 seq_printf(seq, "\tMultiple MAC address registers: %s\n", 3983 (priv->dma_cap.multi_addr) ? "Y" : "N"); 3984 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 3985 (priv->dma_cap.pcs) ? "Y" : "N"); 3986 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 3987 (priv->dma_cap.sma_mdio) ? "Y" : "N"); 3988 seq_printf(seq, "\tPMT Remote wake up: %s\n", 3989 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 3990 seq_printf(seq, "\tPMT Magic Frame: %s\n", 3991 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 3992 seq_printf(seq, "\tRMON module: %s\n", 3993 (priv->dma_cap.rmon) ? "Y" : "N"); 3994 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 3995 (priv->dma_cap.time_stamp) ? "Y" : "N"); 3996 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 3997 (priv->dma_cap.atime_stamp) ? "Y" : "N"); 3998 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 3999 (priv->dma_cap.eee) ? "Y" : "N"); 4000 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 4001 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 4002 (priv->dma_cap.tx_coe) ? "Y" : "N"); 4003 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 4004 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 4005 (priv->dma_cap.rx_coe) ? "Y" : "N"); 4006 } else { 4007 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 4008 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 4009 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 4010 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 4011 } 4012 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 4013 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 4014 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 4015 priv->dma_cap.number_rx_channel); 4016 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 4017 priv->dma_cap.number_tx_channel); 4018 seq_printf(seq, "\tEnhanced descriptors: %s\n", 4019 (priv->dma_cap.enh_desc) ? "Y" : "N"); 4020 4021 return 0; 4022 } 4023 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 4024 4025 static int stmmac_init_fs(struct net_device *dev) 4026 { 4027 struct stmmac_priv *priv = netdev_priv(dev); 4028 4029 /* Create per netdev entries */ 4030 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 4031 4032 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { 4033 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n"); 4034 4035 return -ENOMEM; 4036 } 4037 4038 /* Entry to report DMA RX/TX rings */ 4039 priv->dbgfs_rings_status = 4040 debugfs_create_file("descriptors_status", 0444, 4041 priv->dbgfs_dir, dev, 4042 &stmmac_rings_status_fops); 4043 4044 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { 4045 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n"); 4046 debugfs_remove_recursive(priv->dbgfs_dir); 4047 4048 return -ENOMEM; 4049 } 4050 4051 /* Entry to report the DMA HW features */ 4052 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444, 4053 priv->dbgfs_dir, 4054 dev, &stmmac_dma_cap_fops); 4055 4056 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { 4057 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n"); 4058 debugfs_remove_recursive(priv->dbgfs_dir); 4059 4060 return -ENOMEM; 4061 } 4062 4063 return 0; 4064 } 4065 4066 static void stmmac_exit_fs(struct net_device *dev) 4067 { 4068 struct stmmac_priv *priv = netdev_priv(dev); 4069 4070 debugfs_remove_recursive(priv->dbgfs_dir); 4071 } 4072 #endif /* CONFIG_DEBUG_FS */ 4073 4074 static const struct net_device_ops stmmac_netdev_ops = { 4075 .ndo_open = stmmac_open, 4076 .ndo_start_xmit = stmmac_xmit, 4077 .ndo_stop = stmmac_release, 4078 .ndo_change_mtu = stmmac_change_mtu, 4079 .ndo_fix_features = stmmac_fix_features, 4080 .ndo_set_features = stmmac_set_features, 4081 .ndo_set_rx_mode = stmmac_set_rx_mode, 4082 .ndo_tx_timeout = stmmac_tx_timeout, 4083 .ndo_do_ioctl = stmmac_ioctl, 4084 .ndo_setup_tc = stmmac_setup_tc, 4085 #ifdef CONFIG_NET_POLL_CONTROLLER 4086 .ndo_poll_controller = stmmac_poll_controller, 4087 #endif 4088 .ndo_set_mac_address = stmmac_set_mac_address, 4089 }; 4090 4091 static void stmmac_reset_subtask(struct stmmac_priv *priv) 4092 { 4093 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 4094 return; 4095 if (test_bit(STMMAC_DOWN, &priv->state)) 4096 return; 4097 4098 netdev_err(priv->dev, "Reset adapter.\n"); 4099 4100 rtnl_lock(); 4101 netif_trans_update(priv->dev); 4102 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 4103 usleep_range(1000, 2000); 4104 4105 set_bit(STMMAC_DOWN, &priv->state); 4106 dev_close(priv->dev); 4107 dev_open(priv->dev, NULL); 4108 clear_bit(STMMAC_DOWN, &priv->state); 4109 clear_bit(STMMAC_RESETING, &priv->state); 4110 rtnl_unlock(); 4111 } 4112 4113 static void stmmac_service_task(struct work_struct *work) 4114 { 4115 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 4116 service_task); 4117 4118 stmmac_reset_subtask(priv); 4119 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 4120 } 4121 4122 /** 4123 * stmmac_hw_init - Init the MAC device 4124 * @priv: driver private structure 4125 * Description: this function is to configure the MAC device according to 4126 * some platform parameters or the HW capability register. It prepares the 4127 * driver to use either ring or chain modes and to setup either enhanced or 4128 * normal descriptors. 4129 */ 4130 static int stmmac_hw_init(struct stmmac_priv *priv) 4131 { 4132 int ret; 4133 4134 /* dwmac-sun8i only work in chain mode */ 4135 if (priv->plat->has_sun8i) 4136 chain_mode = 1; 4137 priv->chain_mode = chain_mode; 4138 4139 /* Initialize HW Interface */ 4140 ret = stmmac_hwif_init(priv); 4141 if (ret) 4142 return ret; 4143 4144 /* Get the HW capability (new GMAC newer than 3.50a) */ 4145 priv->hw_cap_support = stmmac_get_hw_features(priv); 4146 if (priv->hw_cap_support) { 4147 dev_info(priv->device, "DMA HW capability register supported\n"); 4148 4149 /* We can override some gmac/dma configuration fields: e.g. 4150 * enh_desc, tx_coe (e.g. that are passed through the 4151 * platform) with the values from the HW capability 4152 * register (if supported). 4153 */ 4154 priv->plat->enh_desc = priv->dma_cap.enh_desc; 4155 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 4156 priv->hw->pmt = priv->plat->pmt; 4157 4158 /* TXCOE doesn't work in thresh DMA mode */ 4159 if (priv->plat->force_thresh_dma_mode) 4160 priv->plat->tx_coe = 0; 4161 else 4162 priv->plat->tx_coe = priv->dma_cap.tx_coe; 4163 4164 /* In case of GMAC4 rx_coe is from HW cap register. */ 4165 priv->plat->rx_coe = priv->dma_cap.rx_coe; 4166 4167 if (priv->dma_cap.rx_coe_type2) 4168 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 4169 else if (priv->dma_cap.rx_coe_type1) 4170 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 4171 4172 } else { 4173 dev_info(priv->device, "No HW DMA feature register supported\n"); 4174 } 4175 4176 if (priv->plat->rx_coe) { 4177 priv->hw->rx_csum = priv->plat->rx_coe; 4178 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 4179 if (priv->synopsys_id < DWMAC_CORE_4_00) 4180 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 4181 } 4182 if (priv->plat->tx_coe) 4183 dev_info(priv->device, "TX Checksum insertion supported\n"); 4184 4185 if (priv->plat->pmt) { 4186 dev_info(priv->device, "Wake-Up On Lan supported\n"); 4187 device_set_wakeup_capable(priv->device, 1); 4188 } 4189 4190 if (priv->dma_cap.tsoen) 4191 dev_info(priv->device, "TSO supported\n"); 4192 4193 /* Run HW quirks, if any */ 4194 if (priv->hwif_quirks) { 4195 ret = priv->hwif_quirks(priv); 4196 if (ret) 4197 return ret; 4198 } 4199 4200 /* Rx Watchdog is available in the COREs newer than the 3.40. 4201 * In some case, for example on bugged HW this feature 4202 * has to be disable and this can be done by passing the 4203 * riwt_off field from the platform. 4204 */ 4205 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 4206 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 4207 priv->use_riwt = 1; 4208 dev_info(priv->device, 4209 "Enable RX Mitigation via HW Watchdog Timer\n"); 4210 } 4211 4212 return 0; 4213 } 4214 4215 /** 4216 * stmmac_dvr_probe 4217 * @device: device pointer 4218 * @plat_dat: platform data pointer 4219 * @res: stmmac resource pointer 4220 * Description: this is the main probe function used to 4221 * call the alloc_etherdev, allocate the priv structure. 4222 * Return: 4223 * returns 0 on success, otherwise errno. 4224 */ 4225 int stmmac_dvr_probe(struct device *device, 4226 struct plat_stmmacenet_data *plat_dat, 4227 struct stmmac_resources *res) 4228 { 4229 struct net_device *ndev = NULL; 4230 struct stmmac_priv *priv; 4231 u32 queue, maxq; 4232 int ret = 0; 4233 4234 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), 4235 MTL_MAX_TX_QUEUES, 4236 MTL_MAX_RX_QUEUES); 4237 if (!ndev) 4238 return -ENOMEM; 4239 4240 SET_NETDEV_DEV(ndev, device); 4241 4242 priv = netdev_priv(ndev); 4243 priv->device = device; 4244 priv->dev = ndev; 4245 4246 stmmac_set_ethtool_ops(ndev); 4247 priv->pause = pause; 4248 priv->plat = plat_dat; 4249 priv->ioaddr = res->addr; 4250 priv->dev->base_addr = (unsigned long)res->addr; 4251 4252 priv->dev->irq = res->irq; 4253 priv->wol_irq = res->wol_irq; 4254 priv->lpi_irq = res->lpi_irq; 4255 4256 if (!IS_ERR_OR_NULL(res->mac)) 4257 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 4258 4259 dev_set_drvdata(device, priv->dev); 4260 4261 /* Verify driver arguments */ 4262 stmmac_verify_args(); 4263 4264 /* Allocate workqueue */ 4265 priv->wq = create_singlethread_workqueue("stmmac_wq"); 4266 if (!priv->wq) { 4267 dev_err(priv->device, "failed to create workqueue\n"); 4268 ret = -ENOMEM; 4269 goto error_wq; 4270 } 4271 4272 INIT_WORK(&priv->service_task, stmmac_service_task); 4273 4274 /* Override with kernel parameters if supplied XXX CRS XXX 4275 * this needs to have multiple instances 4276 */ 4277 if ((phyaddr >= 0) && (phyaddr <= 31)) 4278 priv->plat->phy_addr = phyaddr; 4279 4280 if (priv->plat->stmmac_rst) { 4281 ret = reset_control_assert(priv->plat->stmmac_rst); 4282 reset_control_deassert(priv->plat->stmmac_rst); 4283 /* Some reset controllers have only reset callback instead of 4284 * assert + deassert callbacks pair. 4285 */ 4286 if (ret == -ENOTSUPP) 4287 reset_control_reset(priv->plat->stmmac_rst); 4288 } 4289 4290 /* Init MAC and get the capabilities */ 4291 ret = stmmac_hw_init(priv); 4292 if (ret) 4293 goto error_hw_init; 4294 4295 stmmac_check_ether_addr(priv); 4296 4297 /* Configure real RX and TX queues */ 4298 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); 4299 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); 4300 4301 ndev->netdev_ops = &stmmac_netdev_ops; 4302 4303 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 4304 NETIF_F_RXCSUM; 4305 4306 ret = stmmac_tc_init(priv, priv); 4307 if (!ret) { 4308 ndev->hw_features |= NETIF_F_HW_TC; 4309 } 4310 4311 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 4312 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 4313 priv->tso = true; 4314 dev_info(priv->device, "TSO feature enabled\n"); 4315 } 4316 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 4317 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 4318 #ifdef STMMAC_VLAN_TAG_USED 4319 /* Both mac100 and gmac support receive VLAN tag detection */ 4320 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 4321 #endif 4322 priv->msg_enable = netif_msg_init(debug, default_msg_level); 4323 4324 /* MTU range: 46 - hw-specific max */ 4325 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 4326 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 4327 ndev->max_mtu = JUMBO_LEN; 4328 else if (priv->plat->has_xgmac) 4329 ndev->max_mtu = XGMAC_JUMBO_LEN; 4330 else 4331 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 4332 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 4333 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 4334 */ 4335 if ((priv->plat->maxmtu < ndev->max_mtu) && 4336 (priv->plat->maxmtu >= ndev->min_mtu)) 4337 ndev->max_mtu = priv->plat->maxmtu; 4338 else if (priv->plat->maxmtu < ndev->min_mtu) 4339 dev_warn(priv->device, 4340 "%s: warning: maxmtu having invalid value (%d)\n", 4341 __func__, priv->plat->maxmtu); 4342 4343 if (flow_ctrl) 4344 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 4345 4346 /* Setup channels NAPI */ 4347 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 4348 4349 for (queue = 0; queue < maxq; queue++) { 4350 struct stmmac_channel *ch = &priv->channel[queue]; 4351 4352 ch->priv_data = priv; 4353 ch->index = queue; 4354 4355 if (queue < priv->plat->rx_queues_to_use) { 4356 netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx, 4357 NAPI_POLL_WEIGHT); 4358 } 4359 if (queue < priv->plat->tx_queues_to_use) { 4360 netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx, 4361 NAPI_POLL_WEIGHT); 4362 } 4363 } 4364 4365 mutex_init(&priv->lock); 4366 4367 /* If a specific clk_csr value is passed from the platform 4368 * this means that the CSR Clock Range selection cannot be 4369 * changed at run-time and it is fixed. Viceversa the driver'll try to 4370 * set the MDC clock dynamically according to the csr actual 4371 * clock input. 4372 */ 4373 if (priv->plat->clk_csr >= 0) 4374 priv->clk_csr = priv->plat->clk_csr; 4375 else 4376 stmmac_clk_csr_set(priv); 4377 4378 stmmac_check_pcs_mode(priv); 4379 4380 if (priv->hw->pcs != STMMAC_PCS_RGMII && 4381 priv->hw->pcs != STMMAC_PCS_TBI && 4382 priv->hw->pcs != STMMAC_PCS_RTBI) { 4383 /* MDIO bus Registration */ 4384 ret = stmmac_mdio_register(ndev); 4385 if (ret < 0) { 4386 dev_err(priv->device, 4387 "%s: MDIO bus (id: %d) registration failed", 4388 __func__, priv->plat->bus_id); 4389 goto error_mdio_register; 4390 } 4391 } 4392 4393 ret = register_netdev(ndev); 4394 if (ret) { 4395 dev_err(priv->device, "%s: ERROR %i registering the device\n", 4396 __func__, ret); 4397 goto error_netdev_register; 4398 } 4399 4400 #ifdef CONFIG_DEBUG_FS 4401 ret = stmmac_init_fs(ndev); 4402 if (ret < 0) 4403 netdev_warn(priv->dev, "%s: failed debugFS registration\n", 4404 __func__); 4405 #endif 4406 4407 return ret; 4408 4409 error_netdev_register: 4410 if (priv->hw->pcs != STMMAC_PCS_RGMII && 4411 priv->hw->pcs != STMMAC_PCS_TBI && 4412 priv->hw->pcs != STMMAC_PCS_RTBI) 4413 stmmac_mdio_unregister(ndev); 4414 error_mdio_register: 4415 for (queue = 0; queue < maxq; queue++) { 4416 struct stmmac_channel *ch = &priv->channel[queue]; 4417 4418 if (queue < priv->plat->rx_queues_to_use) 4419 netif_napi_del(&ch->rx_napi); 4420 if (queue < priv->plat->tx_queues_to_use) 4421 netif_napi_del(&ch->tx_napi); 4422 } 4423 error_hw_init: 4424 destroy_workqueue(priv->wq); 4425 error_wq: 4426 free_netdev(ndev); 4427 4428 return ret; 4429 } 4430 EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 4431 4432 /** 4433 * stmmac_dvr_remove 4434 * @dev: device pointer 4435 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 4436 * changes the link status, releases the DMA descriptor rings. 4437 */ 4438 int stmmac_dvr_remove(struct device *dev) 4439 { 4440 struct net_device *ndev = dev_get_drvdata(dev); 4441 struct stmmac_priv *priv = netdev_priv(ndev); 4442 4443 netdev_info(priv->dev, "%s: removing driver", __func__); 4444 4445 #ifdef CONFIG_DEBUG_FS 4446 stmmac_exit_fs(ndev); 4447 #endif 4448 stmmac_stop_all_dma(priv); 4449 4450 stmmac_mac_set(priv, priv->ioaddr, false); 4451 netif_carrier_off(ndev); 4452 unregister_netdev(ndev); 4453 if (priv->plat->stmmac_rst) 4454 reset_control_assert(priv->plat->stmmac_rst); 4455 clk_disable_unprepare(priv->plat->pclk); 4456 clk_disable_unprepare(priv->plat->stmmac_clk); 4457 if (priv->hw->pcs != STMMAC_PCS_RGMII && 4458 priv->hw->pcs != STMMAC_PCS_TBI && 4459 priv->hw->pcs != STMMAC_PCS_RTBI) 4460 stmmac_mdio_unregister(ndev); 4461 destroy_workqueue(priv->wq); 4462 mutex_destroy(&priv->lock); 4463 free_netdev(ndev); 4464 4465 return 0; 4466 } 4467 EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 4468 4469 /** 4470 * stmmac_suspend - suspend callback 4471 * @dev: device pointer 4472 * Description: this is the function to suspend the device and it is called 4473 * by the platform driver to stop the network queue, release the resources, 4474 * program the PMT register (for WoL), clean and release driver resources. 4475 */ 4476 int stmmac_suspend(struct device *dev) 4477 { 4478 struct net_device *ndev = dev_get_drvdata(dev); 4479 struct stmmac_priv *priv = netdev_priv(ndev); 4480 4481 if (!ndev || !netif_running(ndev)) 4482 return 0; 4483 4484 if (ndev->phydev) 4485 phy_stop(ndev->phydev); 4486 4487 mutex_lock(&priv->lock); 4488 4489 netif_device_detach(ndev); 4490 stmmac_stop_all_queues(priv); 4491 4492 stmmac_disable_all_queues(priv); 4493 4494 /* Stop TX/RX DMA */ 4495 stmmac_stop_all_dma(priv); 4496 4497 /* Enable Power down mode by programming the PMT regs */ 4498 if (device_may_wakeup(priv->device)) { 4499 stmmac_pmt(priv, priv->hw, priv->wolopts); 4500 priv->irq_wake = 1; 4501 } else { 4502 stmmac_mac_set(priv, priv->ioaddr, false); 4503 pinctrl_pm_select_sleep_state(priv->device); 4504 /* Disable clock in case of PWM is off */ 4505 clk_disable(priv->plat->pclk); 4506 clk_disable(priv->plat->stmmac_clk); 4507 } 4508 mutex_unlock(&priv->lock); 4509 4510 priv->oldlink = false; 4511 priv->speed = SPEED_UNKNOWN; 4512 priv->oldduplex = DUPLEX_UNKNOWN; 4513 return 0; 4514 } 4515 EXPORT_SYMBOL_GPL(stmmac_suspend); 4516 4517 /** 4518 * stmmac_reset_queues_param - reset queue parameters 4519 * @dev: device pointer 4520 */ 4521 static void stmmac_reset_queues_param(struct stmmac_priv *priv) 4522 { 4523 u32 rx_cnt = priv->plat->rx_queues_to_use; 4524 u32 tx_cnt = priv->plat->tx_queues_to_use; 4525 u32 queue; 4526 4527 for (queue = 0; queue < rx_cnt; queue++) { 4528 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4529 4530 rx_q->cur_rx = 0; 4531 rx_q->dirty_rx = 0; 4532 } 4533 4534 for (queue = 0; queue < tx_cnt; queue++) { 4535 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 4536 4537 tx_q->cur_tx = 0; 4538 tx_q->dirty_tx = 0; 4539 tx_q->mss = 0; 4540 } 4541 } 4542 4543 /** 4544 * stmmac_resume - resume callback 4545 * @dev: device pointer 4546 * Description: when resume this function is invoked to setup the DMA and CORE 4547 * in a usable state. 4548 */ 4549 int stmmac_resume(struct device *dev) 4550 { 4551 struct net_device *ndev = dev_get_drvdata(dev); 4552 struct stmmac_priv *priv = netdev_priv(ndev); 4553 4554 if (!netif_running(ndev)) 4555 return 0; 4556 4557 /* Power Down bit, into the PM register, is cleared 4558 * automatically as soon as a magic packet or a Wake-up frame 4559 * is received. Anyway, it's better to manually clear 4560 * this bit because it can generate problems while resuming 4561 * from another devices (e.g. serial console). 4562 */ 4563 if (device_may_wakeup(priv->device)) { 4564 mutex_lock(&priv->lock); 4565 stmmac_pmt(priv, priv->hw, 0); 4566 mutex_unlock(&priv->lock); 4567 priv->irq_wake = 0; 4568 } else { 4569 pinctrl_pm_select_default_state(priv->device); 4570 /* enable the clk previously disabled */ 4571 clk_enable(priv->plat->stmmac_clk); 4572 clk_enable(priv->plat->pclk); 4573 /* reset the phy so that it's ready */ 4574 if (priv->mii) 4575 stmmac_mdio_reset(priv->mii); 4576 } 4577 4578 netif_device_attach(ndev); 4579 4580 mutex_lock(&priv->lock); 4581 4582 stmmac_reset_queues_param(priv); 4583 4584 stmmac_clear_descriptors(priv); 4585 4586 stmmac_hw_setup(ndev, false); 4587 stmmac_init_tx_coalesce(priv); 4588 stmmac_set_rx_mode(ndev); 4589 4590 stmmac_enable_all_queues(priv); 4591 4592 stmmac_start_all_queues(priv); 4593 4594 mutex_unlock(&priv->lock); 4595 4596 if (ndev->phydev) 4597 phy_start(ndev->phydev); 4598 4599 return 0; 4600 } 4601 EXPORT_SYMBOL_GPL(stmmac_resume); 4602 4603 #ifndef MODULE 4604 static int __init stmmac_cmdline_opt(char *str) 4605 { 4606 char *opt; 4607 4608 if (!str || !*str) 4609 return -EINVAL; 4610 while ((opt = strsep(&str, ",")) != NULL) { 4611 if (!strncmp(opt, "debug:", 6)) { 4612 if (kstrtoint(opt + 6, 0, &debug)) 4613 goto err; 4614 } else if (!strncmp(opt, "phyaddr:", 8)) { 4615 if (kstrtoint(opt + 8, 0, &phyaddr)) 4616 goto err; 4617 } else if (!strncmp(opt, "buf_sz:", 7)) { 4618 if (kstrtoint(opt + 7, 0, &buf_sz)) 4619 goto err; 4620 } else if (!strncmp(opt, "tc:", 3)) { 4621 if (kstrtoint(opt + 3, 0, &tc)) 4622 goto err; 4623 } else if (!strncmp(opt, "watchdog:", 9)) { 4624 if (kstrtoint(opt + 9, 0, &watchdog)) 4625 goto err; 4626 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 4627 if (kstrtoint(opt + 10, 0, &flow_ctrl)) 4628 goto err; 4629 } else if (!strncmp(opt, "pause:", 6)) { 4630 if (kstrtoint(opt + 6, 0, &pause)) 4631 goto err; 4632 } else if (!strncmp(opt, "eee_timer:", 10)) { 4633 if (kstrtoint(opt + 10, 0, &eee_timer)) 4634 goto err; 4635 } else if (!strncmp(opt, "chain_mode:", 11)) { 4636 if (kstrtoint(opt + 11, 0, &chain_mode)) 4637 goto err; 4638 } 4639 } 4640 return 0; 4641 4642 err: 4643 pr_err("%s: ERROR broken module parameter conversion", __func__); 4644 return -EINVAL; 4645 } 4646 4647 __setup("stmmaceth=", stmmac_cmdline_opt); 4648 #endif /* MODULE */ 4649 4650 static int __init stmmac_init(void) 4651 { 4652 #ifdef CONFIG_DEBUG_FS 4653 /* Create debugfs main directory if it doesn't exist yet */ 4654 if (!stmmac_fs_dir) { 4655 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 4656 4657 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { 4658 pr_err("ERROR %s, debugfs create directory failed\n", 4659 STMMAC_RESOURCE_NAME); 4660 4661 return -ENOMEM; 4662 } 4663 } 4664 #endif 4665 4666 return 0; 4667 } 4668 4669 static void __exit stmmac_exit(void) 4670 { 4671 #ifdef CONFIG_DEBUG_FS 4672 debugfs_remove_recursive(stmmac_fs_dir); 4673 #endif 4674 } 4675 4676 module_init(stmmac_init) 4677 module_exit(stmmac_exit) 4678 4679 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 4680 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 4681 MODULE_LICENSE("GPL"); 4682