1 /******************************************************************************* 2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 3 ST Ethernet IPs are built around a Synopsys IP Core. 4 5 Copyright(C) 2007-2011 STMicroelectronics Ltd 6 7 This program is free software; you can redistribute it and/or modify it 8 under the terms and conditions of the GNU General Public License, 9 version 2, as published by the Free Software Foundation. 10 11 This program is distributed in the hope it will be useful, but WITHOUT 12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 more details. 15 16 You should have received a copy of the GNU General Public License along with 17 this program; if not, write to the Free Software Foundation, Inc., 18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 20 The full GNU General Public License is included in this distribution in 21 the file called "COPYING". 22 23 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 24 25 Documentation available at: 26 http://www.stlinux.com 27 Support available at: 28 https://bugzilla.stlinux.com/ 29 *******************************************************************************/ 30 31 #include <linux/clk.h> 32 #include <linux/kernel.h> 33 #include <linux/interrupt.h> 34 #include <linux/ip.h> 35 #include <linux/tcp.h> 36 #include <linux/skbuff.h> 37 #include <linux/ethtool.h> 38 #include <linux/if_ether.h> 39 #include <linux/crc32.h> 40 #include <linux/mii.h> 41 #include <linux/if.h> 42 #include <linux/if_vlan.h> 43 #include <linux/dma-mapping.h> 44 #include <linux/slab.h> 45 #include <linux/prefetch.h> 46 #include <linux/pinctrl/consumer.h> 47 #ifdef CONFIG_DEBUG_FS 48 #include <linux/debugfs.h> 49 #include <linux/seq_file.h> 50 #endif /* CONFIG_DEBUG_FS */ 51 #include <linux/net_tstamp.h> 52 #include "stmmac_ptp.h" 53 #include "stmmac.h" 54 #include <linux/reset.h> 55 #include <linux/of_mdio.h> 56 #include "dwmac1000.h" 57 58 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) 59 60 /* Module parameters */ 61 #define TX_TIMEO 5000 62 static int watchdog = TX_TIMEO; 63 module_param(watchdog, int, S_IRUGO | S_IWUSR); 64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 65 66 static int debug = -1; 67 module_param(debug, int, S_IRUGO | S_IWUSR); 68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 69 70 static int phyaddr = -1; 71 module_param(phyaddr, int, S_IRUGO); 72 MODULE_PARM_DESC(phyaddr, "Physical device address"); 73 74 #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) 75 #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) 76 77 static int flow_ctrl = FLOW_OFF; 78 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); 79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 80 81 static int pause = PAUSE_TIME; 82 module_param(pause, int, S_IRUGO | S_IWUSR); 83 MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 84 85 #define TC_DEFAULT 64 86 static int tc = TC_DEFAULT; 87 module_param(tc, int, S_IRUGO | S_IWUSR); 88 MODULE_PARM_DESC(tc, "DMA threshold control value"); 89 90 #define DEFAULT_BUFSIZE 1536 91 static int buf_sz = DEFAULT_BUFSIZE; 92 module_param(buf_sz, int, S_IRUGO | S_IWUSR); 93 MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 94 95 #define STMMAC_RX_COPYBREAK 256 96 97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 98 NETIF_MSG_LINK | NETIF_MSG_IFUP | 99 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 100 101 #define STMMAC_DEFAULT_LPI_TIMER 1000 102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 103 module_param(eee_timer, int, S_IRUGO | S_IWUSR); 104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) 106 107 /* By default the driver will use the ring mode to manage tx and rx descriptors 108 * but passing this value so user can force to use the chain instead of the ring 109 */ 110 static unsigned int chain_mode; 111 module_param(chain_mode, int, S_IRUGO); 112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 113 114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 115 116 #ifdef CONFIG_DEBUG_FS 117 static int stmmac_init_fs(struct net_device *dev); 118 static void stmmac_exit_fs(struct net_device *dev); 119 #endif 120 121 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) 122 123 /** 124 * stmmac_verify_args - verify the driver parameters. 125 * Description: it checks the driver parameters and set a default in case of 126 * errors. 127 */ 128 static void stmmac_verify_args(void) 129 { 130 if (unlikely(watchdog < 0)) 131 watchdog = TX_TIMEO; 132 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 133 buf_sz = DEFAULT_BUFSIZE; 134 if (unlikely(flow_ctrl > 1)) 135 flow_ctrl = FLOW_AUTO; 136 else if (likely(flow_ctrl < 0)) 137 flow_ctrl = FLOW_OFF; 138 if (unlikely((pause < 0) || (pause > 0xffff))) 139 pause = PAUSE_TIME; 140 if (eee_timer < 0) 141 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 142 } 143 144 /** 145 * stmmac_clk_csr_set - dynamically set the MDC clock 146 * @priv: driver private structure 147 * Description: this is to dynamically set the MDC clock according to the csr 148 * clock input. 149 * Note: 150 * If a specific clk_csr value is passed from the platform 151 * this means that the CSR Clock Range selection cannot be 152 * changed at run-time and it is fixed (as reported in the driver 153 * documentation). Viceversa the driver will try to set the MDC 154 * clock dynamically according to the actual clock input. 155 */ 156 static void stmmac_clk_csr_set(struct stmmac_priv *priv) 157 { 158 u32 clk_rate; 159 160 clk_rate = clk_get_rate(priv->stmmac_clk); 161 162 /* Platform provided default clk_csr would be assumed valid 163 * for all other cases except for the below mentioned ones. 164 * For values higher than the IEEE 802.3 specified frequency 165 * we can not estimate the proper divider as it is not known 166 * the frequency of clk_csr_i. So we do not change the default 167 * divider. 168 */ 169 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 170 if (clk_rate < CSR_F_35M) 171 priv->clk_csr = STMMAC_CSR_20_35M; 172 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 173 priv->clk_csr = STMMAC_CSR_35_60M; 174 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 175 priv->clk_csr = STMMAC_CSR_60_100M; 176 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 177 priv->clk_csr = STMMAC_CSR_100_150M; 178 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 179 priv->clk_csr = STMMAC_CSR_150_250M; 180 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 181 priv->clk_csr = STMMAC_CSR_250_300M; 182 } 183 } 184 185 static void print_pkt(unsigned char *buf, int len) 186 { 187 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 188 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 189 } 190 191 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) 192 { 193 unsigned avail; 194 195 if (priv->dirty_tx > priv->cur_tx) 196 avail = priv->dirty_tx - priv->cur_tx - 1; 197 else 198 avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1; 199 200 return avail; 201 } 202 203 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv) 204 { 205 unsigned dirty; 206 207 if (priv->dirty_rx <= priv->cur_rx) 208 dirty = priv->cur_rx - priv->dirty_rx; 209 else 210 dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx; 211 212 return dirty; 213 } 214 215 /** 216 * stmmac_hw_fix_mac_speed - callback for speed selection 217 * @priv: driver private structure 218 * Description: on some platforms (e.g. ST), some HW system configuraton 219 * registers have to be set according to the link speed negotiated. 220 */ 221 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) 222 { 223 struct phy_device *phydev = priv->phydev; 224 225 if (likely(priv->plat->fix_mac_speed)) 226 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); 227 } 228 229 /** 230 * stmmac_enable_eee_mode - check and enter in LPI mode 231 * @priv: driver private structure 232 * Description: this function is to verify and enter in LPI mode in case of 233 * EEE. 234 */ 235 static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 236 { 237 /* Check and enter in LPI mode */ 238 if ((priv->dirty_tx == priv->cur_tx) && 239 (priv->tx_path_in_lpi_mode == false)) 240 priv->hw->mac->set_eee_mode(priv->hw); 241 } 242 243 /** 244 * stmmac_disable_eee_mode - disable and exit from LPI mode 245 * @priv: driver private structure 246 * Description: this function is to exit and disable EEE in case of 247 * LPI state is true. This is called by the xmit. 248 */ 249 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 250 { 251 priv->hw->mac->reset_eee_mode(priv->hw); 252 del_timer_sync(&priv->eee_ctrl_timer); 253 priv->tx_path_in_lpi_mode = false; 254 } 255 256 /** 257 * stmmac_eee_ctrl_timer - EEE TX SW timer. 258 * @arg : data hook 259 * Description: 260 * if there is no data transfer and if we are not in LPI state, 261 * then MAC Transmitter can be moved to LPI state. 262 */ 263 static void stmmac_eee_ctrl_timer(unsigned long arg) 264 { 265 struct stmmac_priv *priv = (struct stmmac_priv *)arg; 266 267 stmmac_enable_eee_mode(priv); 268 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 269 } 270 271 /** 272 * stmmac_eee_init - init EEE 273 * @priv: driver private structure 274 * Description: 275 * if the GMAC supports the EEE (from the HW cap reg) and the phy device 276 * can also manage EEE, this function enable the LPI state and start related 277 * timer. 278 */ 279 bool stmmac_eee_init(struct stmmac_priv *priv) 280 { 281 char *phy_bus_name = priv->plat->phy_bus_name; 282 unsigned long flags; 283 bool ret = false; 284 285 /* Using PCS we cannot dial with the phy registers at this stage 286 * so we do not support extra feature like EEE. 287 */ 288 if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) || 289 (priv->pcs == STMMAC_PCS_RTBI)) 290 goto out; 291 292 /* Never init EEE in case of a switch is attached */ 293 if (phy_bus_name && (!strcmp(phy_bus_name, "fixed"))) 294 goto out; 295 296 /* MAC core supports the EEE feature. */ 297 if (priv->dma_cap.eee) { 298 int tx_lpi_timer = priv->tx_lpi_timer; 299 300 /* Check if the PHY supports EEE */ 301 if (phy_init_eee(priv->phydev, 1)) { 302 /* To manage at run-time if the EEE cannot be supported 303 * anymore (for example because the lp caps have been 304 * changed). 305 * In that case the driver disable own timers. 306 */ 307 spin_lock_irqsave(&priv->lock, flags); 308 if (priv->eee_active) { 309 pr_debug("stmmac: disable EEE\n"); 310 del_timer_sync(&priv->eee_ctrl_timer); 311 priv->hw->mac->set_eee_timer(priv->hw, 0, 312 tx_lpi_timer); 313 } 314 priv->eee_active = 0; 315 spin_unlock_irqrestore(&priv->lock, flags); 316 goto out; 317 } 318 /* Activate the EEE and start timers */ 319 spin_lock_irqsave(&priv->lock, flags); 320 if (!priv->eee_active) { 321 priv->eee_active = 1; 322 setup_timer(&priv->eee_ctrl_timer, 323 stmmac_eee_ctrl_timer, 324 (unsigned long)priv); 325 mod_timer(&priv->eee_ctrl_timer, 326 STMMAC_LPI_T(eee_timer)); 327 328 priv->hw->mac->set_eee_timer(priv->hw, 329 STMMAC_DEFAULT_LIT_LS, 330 tx_lpi_timer); 331 } 332 /* Set HW EEE according to the speed */ 333 priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); 334 335 ret = true; 336 spin_unlock_irqrestore(&priv->lock, flags); 337 338 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); 339 } 340 out: 341 return ret; 342 } 343 344 /* stmmac_get_tx_hwtstamp - get HW TX timestamps 345 * @priv: driver private structure 346 * @entry : descriptor index to be used. 347 * @skb : the socket buffer 348 * Description : 349 * This function will read timestamp from the descriptor & pass it to stack. 350 * and also perform some sanity checks. 351 */ 352 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 353 unsigned int entry, struct sk_buff *skb) 354 { 355 struct skb_shared_hwtstamps shhwtstamp; 356 u64 ns; 357 void *desc = NULL; 358 359 if (!priv->hwts_tx_en) 360 return; 361 362 /* exit if skb doesn't support hw tstamp */ 363 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 364 return; 365 366 if (priv->adv_ts) 367 desc = (priv->dma_etx + entry); 368 else 369 desc = (priv->dma_tx + entry); 370 371 /* check tx tstamp status */ 372 if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc)) 373 return; 374 375 /* get the valid tstamp */ 376 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 377 378 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 379 shhwtstamp.hwtstamp = ns_to_ktime(ns); 380 /* pass tstamp to stack */ 381 skb_tstamp_tx(skb, &shhwtstamp); 382 383 return; 384 } 385 386 /* stmmac_get_rx_hwtstamp - get HW RX timestamps 387 * @priv: driver private structure 388 * @entry : descriptor index to be used. 389 * @skb : the socket buffer 390 * Description : 391 * This function will read received packet's timestamp from the descriptor 392 * and pass it to stack. It also perform some sanity checks. 393 */ 394 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, 395 unsigned int entry, struct sk_buff *skb) 396 { 397 struct skb_shared_hwtstamps *shhwtstamp = NULL; 398 u64 ns; 399 void *desc = NULL; 400 401 if (!priv->hwts_rx_en) 402 return; 403 404 if (priv->adv_ts) 405 desc = (priv->dma_erx + entry); 406 else 407 desc = (priv->dma_rx + entry); 408 409 /* exit if rx tstamp is not valid */ 410 if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) 411 return; 412 413 /* get valid tstamp */ 414 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 415 shhwtstamp = skb_hwtstamps(skb); 416 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 417 shhwtstamp->hwtstamp = ns_to_ktime(ns); 418 } 419 420 /** 421 * stmmac_hwtstamp_ioctl - control hardware timestamping. 422 * @dev: device pointer. 423 * @ifr: An IOCTL specefic structure, that can contain a pointer to 424 * a proprietary structure used to pass information to the driver. 425 * Description: 426 * This function configures the MAC to enable/disable both outgoing(TX) 427 * and incoming(RX) packets time stamping based on user input. 428 * Return Value: 429 * 0 on success and an appropriate -ve integer on failure. 430 */ 431 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) 432 { 433 struct stmmac_priv *priv = netdev_priv(dev); 434 struct hwtstamp_config config; 435 struct timespec64 now; 436 u64 temp = 0; 437 u32 ptp_v2 = 0; 438 u32 tstamp_all = 0; 439 u32 ptp_over_ipv4_udp = 0; 440 u32 ptp_over_ipv6_udp = 0; 441 u32 ptp_over_ethernet = 0; 442 u32 snap_type_sel = 0; 443 u32 ts_master_en = 0; 444 u32 ts_event_en = 0; 445 u32 value = 0; 446 u32 sec_inc; 447 448 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 449 netdev_alert(priv->dev, "No support for HW time stamping\n"); 450 priv->hwts_tx_en = 0; 451 priv->hwts_rx_en = 0; 452 453 return -EOPNOTSUPP; 454 } 455 456 if (copy_from_user(&config, ifr->ifr_data, 457 sizeof(struct hwtstamp_config))) 458 return -EFAULT; 459 460 pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 461 __func__, config.flags, config.tx_type, config.rx_filter); 462 463 /* reserved for future extensions */ 464 if (config.flags) 465 return -EINVAL; 466 467 if (config.tx_type != HWTSTAMP_TX_OFF && 468 config.tx_type != HWTSTAMP_TX_ON) 469 return -ERANGE; 470 471 if (priv->adv_ts) { 472 switch (config.rx_filter) { 473 case HWTSTAMP_FILTER_NONE: 474 /* time stamp no incoming packet at all */ 475 config.rx_filter = HWTSTAMP_FILTER_NONE; 476 break; 477 478 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 479 /* PTP v1, UDP, any kind of event packet */ 480 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 481 /* take time stamp for all event messages */ 482 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 483 484 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 485 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 486 break; 487 488 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 489 /* PTP v1, UDP, Sync packet */ 490 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 491 /* take time stamp for SYNC messages only */ 492 ts_event_en = PTP_TCR_TSEVNTENA; 493 494 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 495 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 496 break; 497 498 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 499 /* PTP v1, UDP, Delay_req packet */ 500 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 501 /* take time stamp for Delay_Req messages only */ 502 ts_master_en = PTP_TCR_TSMSTRENA; 503 ts_event_en = PTP_TCR_TSEVNTENA; 504 505 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 506 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 507 break; 508 509 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 510 /* PTP v2, UDP, any kind of event packet */ 511 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 512 ptp_v2 = PTP_TCR_TSVER2ENA; 513 /* take time stamp for all event messages */ 514 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 515 516 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 517 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 518 break; 519 520 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 521 /* PTP v2, UDP, Sync packet */ 522 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 523 ptp_v2 = PTP_TCR_TSVER2ENA; 524 /* take time stamp for SYNC messages only */ 525 ts_event_en = PTP_TCR_TSEVNTENA; 526 527 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 528 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 529 break; 530 531 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 532 /* PTP v2, UDP, Delay_req packet */ 533 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 534 ptp_v2 = PTP_TCR_TSVER2ENA; 535 /* take time stamp for Delay_Req messages only */ 536 ts_master_en = PTP_TCR_TSMSTRENA; 537 ts_event_en = PTP_TCR_TSEVNTENA; 538 539 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 540 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 541 break; 542 543 case HWTSTAMP_FILTER_PTP_V2_EVENT: 544 /* PTP v2/802.AS1 any layer, any kind of event packet */ 545 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 546 ptp_v2 = PTP_TCR_TSVER2ENA; 547 /* take time stamp for all event messages */ 548 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 549 550 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 551 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 552 ptp_over_ethernet = PTP_TCR_TSIPENA; 553 break; 554 555 case HWTSTAMP_FILTER_PTP_V2_SYNC: 556 /* PTP v2/802.AS1, any layer, Sync packet */ 557 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 558 ptp_v2 = PTP_TCR_TSVER2ENA; 559 /* take time stamp for SYNC messages only */ 560 ts_event_en = PTP_TCR_TSEVNTENA; 561 562 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 563 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 564 ptp_over_ethernet = PTP_TCR_TSIPENA; 565 break; 566 567 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 568 /* PTP v2/802.AS1, any layer, Delay_req packet */ 569 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 570 ptp_v2 = PTP_TCR_TSVER2ENA; 571 /* take time stamp for Delay_Req messages only */ 572 ts_master_en = PTP_TCR_TSMSTRENA; 573 ts_event_en = PTP_TCR_TSEVNTENA; 574 575 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 576 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 577 ptp_over_ethernet = PTP_TCR_TSIPENA; 578 break; 579 580 case HWTSTAMP_FILTER_ALL: 581 /* time stamp any incoming packet */ 582 config.rx_filter = HWTSTAMP_FILTER_ALL; 583 tstamp_all = PTP_TCR_TSENALL; 584 break; 585 586 default: 587 return -ERANGE; 588 } 589 } else { 590 switch (config.rx_filter) { 591 case HWTSTAMP_FILTER_NONE: 592 config.rx_filter = HWTSTAMP_FILTER_NONE; 593 break; 594 default: 595 /* PTP v1, UDP, any kind of event packet */ 596 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 597 break; 598 } 599 } 600 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 601 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 602 603 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 604 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); 605 else { 606 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 607 tstamp_all | ptp_v2 | ptp_over_ethernet | 608 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 609 ts_master_en | snap_type_sel); 610 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); 611 612 /* program Sub Second Increment reg */ 613 sec_inc = priv->hw->ptp->config_sub_second_increment( 614 priv->ioaddr, priv->clk_ptp_rate); 615 temp = div_u64(1000000000ULL, sec_inc); 616 617 /* calculate default added value: 618 * formula is : 619 * addend = (2^32)/freq_div_ratio; 620 * where, freq_div_ratio = 1e9ns/sec_inc 621 */ 622 temp = (u64)(temp << 32); 623 priv->default_addend = div_u64(temp, priv->clk_ptp_rate); 624 priv->hw->ptp->config_addend(priv->ioaddr, 625 priv->default_addend); 626 627 /* initialize system time */ 628 ktime_get_real_ts64(&now); 629 630 /* lower 32 bits of tv_sec are safe until y2106 */ 631 priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec, 632 now.tv_nsec); 633 } 634 635 return copy_to_user(ifr->ifr_data, &config, 636 sizeof(struct hwtstamp_config)) ? -EFAULT : 0; 637 } 638 639 /** 640 * stmmac_init_ptp - init PTP 641 * @priv: driver private structure 642 * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 643 * This is done by looking at the HW cap. register. 644 * This function also registers the ptp driver. 645 */ 646 static int stmmac_init_ptp(struct stmmac_priv *priv) 647 { 648 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 649 return -EOPNOTSUPP; 650 651 /* Fall-back to main clock in case of no PTP ref is passed */ 652 priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref"); 653 if (IS_ERR(priv->clk_ptp_ref)) { 654 priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk); 655 priv->clk_ptp_ref = NULL; 656 } else { 657 clk_prepare_enable(priv->clk_ptp_ref); 658 priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref); 659 } 660 661 priv->adv_ts = 0; 662 if (priv->dma_cap.atime_stamp && priv->extend_desc) 663 priv->adv_ts = 1; 664 665 if (netif_msg_hw(priv) && priv->dma_cap.time_stamp) 666 pr_debug("IEEE 1588-2002 Time Stamp supported\n"); 667 668 if (netif_msg_hw(priv) && priv->adv_ts) 669 pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n"); 670 671 priv->hw->ptp = &stmmac_ptp; 672 priv->hwts_tx_en = 0; 673 priv->hwts_rx_en = 0; 674 675 return stmmac_ptp_register(priv); 676 } 677 678 static void stmmac_release_ptp(struct stmmac_priv *priv) 679 { 680 if (priv->clk_ptp_ref) 681 clk_disable_unprepare(priv->clk_ptp_ref); 682 stmmac_ptp_unregister(priv); 683 } 684 685 /** 686 * stmmac_adjust_link - adjusts the link parameters 687 * @dev: net device structure 688 * Description: this is the helper called by the physical abstraction layer 689 * drivers to communicate the phy link status. According the speed and duplex 690 * this driver can invoke registered glue-logic as well. 691 * It also invoke the eee initialization because it could happen when switch 692 * on different networks (that are eee capable). 693 */ 694 static void stmmac_adjust_link(struct net_device *dev) 695 { 696 struct stmmac_priv *priv = netdev_priv(dev); 697 struct phy_device *phydev = priv->phydev; 698 unsigned long flags; 699 int new_state = 0; 700 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; 701 702 if (phydev == NULL) 703 return; 704 705 spin_lock_irqsave(&priv->lock, flags); 706 707 if (phydev->link) { 708 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 709 710 /* Now we make sure that we can be in full duplex mode. 711 * If not, we operate in half-duplex mode. */ 712 if (phydev->duplex != priv->oldduplex) { 713 new_state = 1; 714 if (!(phydev->duplex)) 715 ctrl &= ~priv->hw->link.duplex; 716 else 717 ctrl |= priv->hw->link.duplex; 718 priv->oldduplex = phydev->duplex; 719 } 720 /* Flow Control operation */ 721 if (phydev->pause) 722 priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex, 723 fc, pause_time); 724 725 if (phydev->speed != priv->speed) { 726 new_state = 1; 727 switch (phydev->speed) { 728 case 1000: 729 if (likely(priv->plat->has_gmac)) 730 ctrl &= ~priv->hw->link.port; 731 stmmac_hw_fix_mac_speed(priv); 732 break; 733 case 100: 734 case 10: 735 if (priv->plat->has_gmac) { 736 ctrl |= priv->hw->link.port; 737 if (phydev->speed == SPEED_100) { 738 ctrl |= priv->hw->link.speed; 739 } else { 740 ctrl &= ~(priv->hw->link.speed); 741 } 742 } else { 743 ctrl &= ~priv->hw->link.port; 744 } 745 stmmac_hw_fix_mac_speed(priv); 746 break; 747 default: 748 if (netif_msg_link(priv)) 749 pr_warn("%s: Speed (%d) not 10/100\n", 750 dev->name, phydev->speed); 751 break; 752 } 753 754 priv->speed = phydev->speed; 755 } 756 757 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 758 759 if (!priv->oldlink) { 760 new_state = 1; 761 priv->oldlink = 1; 762 } 763 } else if (priv->oldlink) { 764 new_state = 1; 765 priv->oldlink = 0; 766 priv->speed = 0; 767 priv->oldduplex = -1; 768 } 769 770 if (new_state && netif_msg_link(priv)) 771 phy_print_status(phydev); 772 773 spin_unlock_irqrestore(&priv->lock, flags); 774 775 /* At this stage, it could be needed to setup the EEE or adjust some 776 * MAC related HW registers. 777 */ 778 priv->eee_enabled = stmmac_eee_init(priv); 779 } 780 781 /** 782 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 783 * @priv: driver private structure 784 * Description: this is to verify if the HW supports the PCS. 785 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 786 * configured for the TBI, RTBI, or SGMII PHY interface. 787 */ 788 static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 789 { 790 int interface = priv->plat->interface; 791 792 if (priv->dma_cap.pcs) { 793 if ((interface == PHY_INTERFACE_MODE_RGMII) || 794 (interface == PHY_INTERFACE_MODE_RGMII_ID) || 795 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 796 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 797 pr_debug("STMMAC: PCS RGMII support enable\n"); 798 priv->pcs = STMMAC_PCS_RGMII; 799 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 800 pr_debug("STMMAC: PCS SGMII support enable\n"); 801 priv->pcs = STMMAC_PCS_SGMII; 802 } 803 } 804 } 805 806 /** 807 * stmmac_init_phy - PHY initialization 808 * @dev: net device structure 809 * Description: it initializes the driver's PHY state, and attaches the PHY 810 * to the mac driver. 811 * Return value: 812 * 0 on success 813 */ 814 static int stmmac_init_phy(struct net_device *dev) 815 { 816 struct stmmac_priv *priv = netdev_priv(dev); 817 struct phy_device *phydev; 818 char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 819 char bus_id[MII_BUS_ID_SIZE]; 820 int interface = priv->plat->interface; 821 int max_speed = priv->plat->max_speed; 822 priv->oldlink = 0; 823 priv->speed = 0; 824 priv->oldduplex = -1; 825 826 if (priv->plat->phy_node) { 827 phydev = of_phy_connect(dev, priv->plat->phy_node, 828 &stmmac_adjust_link, 0, interface); 829 } else { 830 if (priv->plat->phy_bus_name) 831 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", 832 priv->plat->phy_bus_name, priv->plat->bus_id); 833 else 834 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", 835 priv->plat->bus_id); 836 837 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 838 priv->plat->phy_addr); 839 pr_debug("stmmac_init_phy: trying to attach to %s\n", 840 phy_id_fmt); 841 842 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 843 interface); 844 } 845 846 if (IS_ERR_OR_NULL(phydev)) { 847 pr_err("%s: Could not attach to PHY\n", dev->name); 848 if (!phydev) 849 return -ENODEV; 850 851 return PTR_ERR(phydev); 852 } 853 854 /* Stop Advertising 1000BASE Capability if interface is not GMII */ 855 if ((interface == PHY_INTERFACE_MODE_MII) || 856 (interface == PHY_INTERFACE_MODE_RMII) || 857 (max_speed < 1000 && max_speed > 0)) 858 phydev->advertising &= ~(SUPPORTED_1000baseT_Half | 859 SUPPORTED_1000baseT_Full); 860 861 /* 862 * Broken HW is sometimes missing the pull-up resistor on the 863 * MDIO line, which results in reads to non-existent devices returning 864 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent 865 * device as well. 866 * Note: phydev->phy_id is the result of reading the UID PHY registers. 867 */ 868 if (!priv->plat->phy_node && phydev->phy_id == 0) { 869 phy_disconnect(phydev); 870 return -ENODEV; 871 } 872 873 /* If attached to a switch, there is no reason to poll phy handler */ 874 if (priv->plat->phy_bus_name) 875 if (!strcmp(priv->plat->phy_bus_name, "fixed")) 876 phydev->irq = PHY_IGNORE_INTERRUPT; 877 878 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" 879 " Link = %d\n", dev->name, phydev->phy_id, phydev->link); 880 881 priv->phydev = phydev; 882 883 return 0; 884 } 885 886 /** 887 * stmmac_display_ring - display ring 888 * @head: pointer to the head of the ring passed. 889 * @size: size of the ring. 890 * @extend_desc: to verify if extended descriptors are used. 891 * Description: display the control/status and buffer descriptors. 892 */ 893 static void stmmac_display_ring(void *head, int size, int extend_desc) 894 { 895 int i; 896 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 897 struct dma_desc *p = (struct dma_desc *)head; 898 899 for (i = 0; i < size; i++) { 900 u64 x; 901 if (extend_desc) { 902 x = *(u64 *) ep; 903 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 904 i, (unsigned int)virt_to_phys(ep), 905 (unsigned int)x, (unsigned int)(x >> 32), 906 ep->basic.des2, ep->basic.des3); 907 ep++; 908 } else { 909 x = *(u64 *) p; 910 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x", 911 i, (unsigned int)virt_to_phys(p), 912 (unsigned int)x, (unsigned int)(x >> 32), 913 p->des2, p->des3); 914 p++; 915 } 916 pr_info("\n"); 917 } 918 } 919 920 static void stmmac_display_rings(struct stmmac_priv *priv) 921 { 922 if (priv->extend_desc) { 923 pr_info("Extended RX descriptor ring:\n"); 924 stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1); 925 pr_info("Extended TX descriptor ring:\n"); 926 stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1); 927 } else { 928 pr_info("RX descriptor ring:\n"); 929 stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0); 930 pr_info("TX descriptor ring:\n"); 931 stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0); 932 } 933 } 934 935 static int stmmac_set_bfsize(int mtu, int bufsize) 936 { 937 int ret = bufsize; 938 939 if (mtu >= BUF_SIZE_4KiB) 940 ret = BUF_SIZE_8KiB; 941 else if (mtu >= BUF_SIZE_2KiB) 942 ret = BUF_SIZE_4KiB; 943 else if (mtu > DEFAULT_BUFSIZE) 944 ret = BUF_SIZE_2KiB; 945 else 946 ret = DEFAULT_BUFSIZE; 947 948 return ret; 949 } 950 951 /** 952 * stmmac_clear_descriptors - clear descriptors 953 * @priv: driver private structure 954 * Description: this function is called to clear the tx and rx descriptors 955 * in case of both basic and extended descriptors are used. 956 */ 957 static void stmmac_clear_descriptors(struct stmmac_priv *priv) 958 { 959 int i; 960 961 /* Clear the Rx/Tx descriptors */ 962 for (i = 0; i < DMA_RX_SIZE; i++) 963 if (priv->extend_desc) 964 priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic, 965 priv->use_riwt, priv->mode, 966 (i == DMA_RX_SIZE - 1)); 967 else 968 priv->hw->desc->init_rx_desc(&priv->dma_rx[i], 969 priv->use_riwt, priv->mode, 970 (i == DMA_RX_SIZE - 1)); 971 for (i = 0; i < DMA_TX_SIZE; i++) 972 if (priv->extend_desc) 973 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, 974 priv->mode, 975 (i == DMA_TX_SIZE - 1)); 976 else 977 priv->hw->desc->init_tx_desc(&priv->dma_tx[i], 978 priv->mode, 979 (i == DMA_TX_SIZE - 1)); 980 } 981 982 /** 983 * stmmac_init_rx_buffers - init the RX descriptor buffer. 984 * @priv: driver private structure 985 * @p: descriptor pointer 986 * @i: descriptor index 987 * @flags: gfp flag. 988 * Description: this function is called to allocate a receive buffer, perform 989 * the DMA mapping and init the descriptor. 990 */ 991 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 992 int i, gfp_t flags) 993 { 994 struct sk_buff *skb; 995 996 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); 997 if (!skb) { 998 pr_err("%s: Rx init fails; skb is NULL\n", __func__); 999 return -ENOMEM; 1000 } 1001 priv->rx_skbuff[i] = skb; 1002 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, 1003 priv->dma_buf_sz, 1004 DMA_FROM_DEVICE); 1005 if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { 1006 pr_err("%s: DMA mapping error\n", __func__); 1007 dev_kfree_skb_any(skb); 1008 return -EINVAL; 1009 } 1010 1011 p->des2 = priv->rx_skbuff_dma[i]; 1012 1013 if ((priv->hw->mode->init_desc3) && 1014 (priv->dma_buf_sz == BUF_SIZE_16KiB)) 1015 priv->hw->mode->init_desc3(p); 1016 1017 return 0; 1018 } 1019 1020 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) 1021 { 1022 if (priv->rx_skbuff[i]) { 1023 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], 1024 priv->dma_buf_sz, DMA_FROM_DEVICE); 1025 dev_kfree_skb_any(priv->rx_skbuff[i]); 1026 } 1027 priv->rx_skbuff[i] = NULL; 1028 } 1029 1030 /** 1031 * init_dma_desc_rings - init the RX/TX descriptor rings 1032 * @dev: net device structure 1033 * @flags: gfp flag. 1034 * Description: this function initializes the DMA RX/TX descriptors 1035 * and allocates the socket buffers. It suppors the chained and ring 1036 * modes. 1037 */ 1038 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) 1039 { 1040 int i; 1041 struct stmmac_priv *priv = netdev_priv(dev); 1042 unsigned int bfsize = 0; 1043 int ret = -ENOMEM; 1044 1045 if (priv->hw->mode->set_16kib_bfsize) 1046 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); 1047 1048 if (bfsize < BUF_SIZE_16KiB) 1049 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 1050 1051 priv->dma_buf_sz = bfsize; 1052 1053 if (netif_msg_probe(priv)) { 1054 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, 1055 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); 1056 1057 /* RX INITIALIZATION */ 1058 pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n"); 1059 } 1060 for (i = 0; i < DMA_RX_SIZE; i++) { 1061 struct dma_desc *p; 1062 if (priv->extend_desc) 1063 p = &((priv->dma_erx + i)->basic); 1064 else 1065 p = priv->dma_rx + i; 1066 1067 ret = stmmac_init_rx_buffers(priv, p, i, flags); 1068 if (ret) 1069 goto err_init_rx_buffers; 1070 1071 if (netif_msg_probe(priv)) 1072 pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], 1073 priv->rx_skbuff[i]->data, 1074 (unsigned int)priv->rx_skbuff_dma[i]); 1075 } 1076 priv->cur_rx = 0; 1077 priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); 1078 buf_sz = bfsize; 1079 1080 /* Setup the chained descriptor addresses */ 1081 if (priv->mode == STMMAC_CHAIN_MODE) { 1082 if (priv->extend_desc) { 1083 priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, 1084 DMA_RX_SIZE, 1); 1085 priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, 1086 DMA_TX_SIZE, 1); 1087 } else { 1088 priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, 1089 DMA_RX_SIZE, 0); 1090 priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, 1091 DMA_TX_SIZE, 0); 1092 } 1093 } 1094 1095 /* TX INITIALIZATION */ 1096 for (i = 0; i < DMA_TX_SIZE; i++) { 1097 struct dma_desc *p; 1098 if (priv->extend_desc) 1099 p = &((priv->dma_etx + i)->basic); 1100 else 1101 p = priv->dma_tx + i; 1102 p->des2 = 0; 1103 priv->tx_skbuff_dma[i].buf = 0; 1104 priv->tx_skbuff_dma[i].map_as_page = false; 1105 priv->tx_skbuff_dma[i].len = 0; 1106 priv->tx_skbuff_dma[i].last_segment = false; 1107 priv->tx_skbuff[i] = NULL; 1108 } 1109 1110 priv->dirty_tx = 0; 1111 priv->cur_tx = 0; 1112 netdev_reset_queue(priv->dev); 1113 1114 stmmac_clear_descriptors(priv); 1115 1116 if (netif_msg_hw(priv)) 1117 stmmac_display_rings(priv); 1118 1119 return 0; 1120 err_init_rx_buffers: 1121 while (--i >= 0) 1122 stmmac_free_rx_buffers(priv, i); 1123 return ret; 1124 } 1125 1126 static void dma_free_rx_skbufs(struct stmmac_priv *priv) 1127 { 1128 int i; 1129 1130 for (i = 0; i < DMA_RX_SIZE; i++) 1131 stmmac_free_rx_buffers(priv, i); 1132 } 1133 1134 static void dma_free_tx_skbufs(struct stmmac_priv *priv) 1135 { 1136 int i; 1137 1138 for (i = 0; i < DMA_TX_SIZE; i++) { 1139 struct dma_desc *p; 1140 1141 if (priv->extend_desc) 1142 p = &((priv->dma_etx + i)->basic); 1143 else 1144 p = priv->dma_tx + i; 1145 1146 if (priv->tx_skbuff_dma[i].buf) { 1147 if (priv->tx_skbuff_dma[i].map_as_page) 1148 dma_unmap_page(priv->device, 1149 priv->tx_skbuff_dma[i].buf, 1150 priv->tx_skbuff_dma[i].len, 1151 DMA_TO_DEVICE); 1152 else 1153 dma_unmap_single(priv->device, 1154 priv->tx_skbuff_dma[i].buf, 1155 priv->tx_skbuff_dma[i].len, 1156 DMA_TO_DEVICE); 1157 } 1158 1159 if (priv->tx_skbuff[i] != NULL) { 1160 dev_kfree_skb_any(priv->tx_skbuff[i]); 1161 priv->tx_skbuff[i] = NULL; 1162 priv->tx_skbuff_dma[i].buf = 0; 1163 priv->tx_skbuff_dma[i].map_as_page = false; 1164 } 1165 } 1166 } 1167 1168 /** 1169 * alloc_dma_desc_resources - alloc TX/RX resources. 1170 * @priv: private structure 1171 * Description: according to which descriptor can be used (extend or basic) 1172 * this function allocates the resources for TX and RX paths. In case of 1173 * reception, for example, it pre-allocated the RX socket buffer in order to 1174 * allow zero-copy mechanism. 1175 */ 1176 static int alloc_dma_desc_resources(struct stmmac_priv *priv) 1177 { 1178 int ret = -ENOMEM; 1179 1180 priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), 1181 GFP_KERNEL); 1182 if (!priv->rx_skbuff_dma) 1183 return -ENOMEM; 1184 1185 priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), 1186 GFP_KERNEL); 1187 if (!priv->rx_skbuff) 1188 goto err_rx_skbuff; 1189 1190 priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, 1191 sizeof(*priv->tx_skbuff_dma), 1192 GFP_KERNEL); 1193 if (!priv->tx_skbuff_dma) 1194 goto err_tx_skbuff_dma; 1195 1196 priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), 1197 GFP_KERNEL); 1198 if (!priv->tx_skbuff) 1199 goto err_tx_skbuff; 1200 1201 if (priv->extend_desc) { 1202 priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * 1203 sizeof(struct 1204 dma_extended_desc), 1205 &priv->dma_rx_phy, 1206 GFP_KERNEL); 1207 if (!priv->dma_erx) 1208 goto err_dma; 1209 1210 priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * 1211 sizeof(struct 1212 dma_extended_desc), 1213 &priv->dma_tx_phy, 1214 GFP_KERNEL); 1215 if (!priv->dma_etx) { 1216 dma_free_coherent(priv->device, DMA_RX_SIZE * 1217 sizeof(struct dma_extended_desc), 1218 priv->dma_erx, priv->dma_rx_phy); 1219 goto err_dma; 1220 } 1221 } else { 1222 priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * 1223 sizeof(struct dma_desc), 1224 &priv->dma_rx_phy, 1225 GFP_KERNEL); 1226 if (!priv->dma_rx) 1227 goto err_dma; 1228 1229 priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * 1230 sizeof(struct dma_desc), 1231 &priv->dma_tx_phy, 1232 GFP_KERNEL); 1233 if (!priv->dma_tx) { 1234 dma_free_coherent(priv->device, DMA_RX_SIZE * 1235 sizeof(struct dma_desc), 1236 priv->dma_rx, priv->dma_rx_phy); 1237 goto err_dma; 1238 } 1239 } 1240 1241 return 0; 1242 1243 err_dma: 1244 kfree(priv->tx_skbuff); 1245 err_tx_skbuff: 1246 kfree(priv->tx_skbuff_dma); 1247 err_tx_skbuff_dma: 1248 kfree(priv->rx_skbuff); 1249 err_rx_skbuff: 1250 kfree(priv->rx_skbuff_dma); 1251 return ret; 1252 } 1253 1254 static void free_dma_desc_resources(struct stmmac_priv *priv) 1255 { 1256 /* Release the DMA TX/RX socket buffers */ 1257 dma_free_rx_skbufs(priv); 1258 dma_free_tx_skbufs(priv); 1259 1260 /* Free DMA regions of consistent memory previously allocated */ 1261 if (!priv->extend_desc) { 1262 dma_free_coherent(priv->device, 1263 DMA_TX_SIZE * sizeof(struct dma_desc), 1264 priv->dma_tx, priv->dma_tx_phy); 1265 dma_free_coherent(priv->device, 1266 DMA_RX_SIZE * sizeof(struct dma_desc), 1267 priv->dma_rx, priv->dma_rx_phy); 1268 } else { 1269 dma_free_coherent(priv->device, DMA_TX_SIZE * 1270 sizeof(struct dma_extended_desc), 1271 priv->dma_etx, priv->dma_tx_phy); 1272 dma_free_coherent(priv->device, DMA_RX_SIZE * 1273 sizeof(struct dma_extended_desc), 1274 priv->dma_erx, priv->dma_rx_phy); 1275 } 1276 kfree(priv->rx_skbuff_dma); 1277 kfree(priv->rx_skbuff); 1278 kfree(priv->tx_skbuff_dma); 1279 kfree(priv->tx_skbuff); 1280 } 1281 1282 /** 1283 * stmmac_dma_operation_mode - HW DMA operation mode 1284 * @priv: driver private structure 1285 * Description: it is used for configuring the DMA operation mode register in 1286 * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 1287 */ 1288 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 1289 { 1290 int rxfifosz = priv->plat->rx_fifo_size; 1291 1292 if (priv->plat->force_thresh_dma_mode) 1293 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz); 1294 else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 1295 /* 1296 * In case of GMAC, SF mode can be enabled 1297 * to perform the TX COE in HW. This depends on: 1298 * 1) TX COE if actually supported 1299 * 2) There is no bugged Jumbo frame support 1300 * that needs to not insert csum in the TDES. 1301 */ 1302 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE, 1303 rxfifosz); 1304 priv->xstats.threshold = SF_DMA_MODE; 1305 } else 1306 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE, 1307 rxfifosz); 1308 } 1309 1310 /** 1311 * stmmac_tx_clean - to manage the transmission completion 1312 * @priv: driver private structure 1313 * Description: it reclaims the transmit resources after transmission completes. 1314 */ 1315 static void stmmac_tx_clean(struct stmmac_priv *priv) 1316 { 1317 unsigned int bytes_compl = 0, pkts_compl = 0; 1318 unsigned int entry = priv->dirty_tx; 1319 1320 spin_lock(&priv->tx_lock); 1321 1322 priv->xstats.tx_clean++; 1323 1324 while (entry != priv->cur_tx) { 1325 struct sk_buff *skb = priv->tx_skbuff[entry]; 1326 struct dma_desc *p; 1327 int status; 1328 1329 if (priv->extend_desc) 1330 p = (struct dma_desc *)(priv->dma_etx + entry); 1331 else 1332 p = priv->dma_tx + entry; 1333 1334 status = priv->hw->desc->tx_status(&priv->dev->stats, 1335 &priv->xstats, p, 1336 priv->ioaddr); 1337 /* Check if the descriptor is owned by the DMA */ 1338 if (unlikely(status & tx_dma_own)) 1339 break; 1340 1341 /* Just consider the last segment and ...*/ 1342 if (likely(!(status & tx_not_ls))) { 1343 /* ... verify the status error condition */ 1344 if (unlikely(status & tx_err)) { 1345 priv->dev->stats.tx_errors++; 1346 } else { 1347 priv->dev->stats.tx_packets++; 1348 priv->xstats.tx_pkt_n++; 1349 } 1350 stmmac_get_tx_hwtstamp(priv, entry, skb); 1351 } 1352 1353 if (likely(priv->tx_skbuff_dma[entry].buf)) { 1354 if (priv->tx_skbuff_dma[entry].map_as_page) 1355 dma_unmap_page(priv->device, 1356 priv->tx_skbuff_dma[entry].buf, 1357 priv->tx_skbuff_dma[entry].len, 1358 DMA_TO_DEVICE); 1359 else 1360 dma_unmap_single(priv->device, 1361 priv->tx_skbuff_dma[entry].buf, 1362 priv->tx_skbuff_dma[entry].len, 1363 DMA_TO_DEVICE); 1364 priv->tx_skbuff_dma[entry].buf = 0; 1365 priv->tx_skbuff_dma[entry].map_as_page = false; 1366 } 1367 priv->hw->mode->clean_desc3(priv, p); 1368 priv->tx_skbuff_dma[entry].last_segment = false; 1369 priv->tx_skbuff_dma[entry].is_jumbo = false; 1370 1371 if (likely(skb != NULL)) { 1372 pkts_compl++; 1373 bytes_compl += skb->len; 1374 dev_consume_skb_any(skb); 1375 priv->tx_skbuff[entry] = NULL; 1376 } 1377 1378 priv->hw->desc->release_tx_desc(p, priv->mode); 1379 1380 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 1381 } 1382 priv->dirty_tx = entry; 1383 1384 netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); 1385 1386 if (unlikely(netif_queue_stopped(priv->dev) && 1387 stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) { 1388 netif_tx_lock(priv->dev); 1389 if (netif_queue_stopped(priv->dev) && 1390 stmmac_tx_avail(priv) > STMMAC_TX_THRESH) { 1391 if (netif_msg_tx_done(priv)) 1392 pr_debug("%s: restart transmit\n", __func__); 1393 netif_wake_queue(priv->dev); 1394 } 1395 netif_tx_unlock(priv->dev); 1396 } 1397 1398 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { 1399 stmmac_enable_eee_mode(priv); 1400 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 1401 } 1402 spin_unlock(&priv->tx_lock); 1403 } 1404 1405 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv) 1406 { 1407 priv->hw->dma->enable_dma_irq(priv->ioaddr); 1408 } 1409 1410 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) 1411 { 1412 priv->hw->dma->disable_dma_irq(priv->ioaddr); 1413 } 1414 1415 /** 1416 * stmmac_tx_err - to manage the tx error 1417 * @priv: driver private structure 1418 * Description: it cleans the descriptors and restarts the transmission 1419 * in case of transmission errors. 1420 */ 1421 static void stmmac_tx_err(struct stmmac_priv *priv) 1422 { 1423 int i; 1424 netif_stop_queue(priv->dev); 1425 1426 priv->hw->dma->stop_tx(priv->ioaddr); 1427 dma_free_tx_skbufs(priv); 1428 for (i = 0; i < DMA_TX_SIZE; i++) 1429 if (priv->extend_desc) 1430 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, 1431 priv->mode, 1432 (i == DMA_TX_SIZE - 1)); 1433 else 1434 priv->hw->desc->init_tx_desc(&priv->dma_tx[i], 1435 priv->mode, 1436 (i == DMA_TX_SIZE - 1)); 1437 priv->dirty_tx = 0; 1438 priv->cur_tx = 0; 1439 netdev_reset_queue(priv->dev); 1440 priv->hw->dma->start_tx(priv->ioaddr); 1441 1442 priv->dev->stats.tx_errors++; 1443 netif_wake_queue(priv->dev); 1444 } 1445 1446 /** 1447 * stmmac_dma_interrupt - DMA ISR 1448 * @priv: driver private structure 1449 * Description: this is the DMA ISR. It is called by the main ISR. 1450 * It calls the dwmac dma routine and schedule poll method in case of some 1451 * work can be done. 1452 */ 1453 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 1454 { 1455 int status; 1456 int rxfifosz = priv->plat->rx_fifo_size; 1457 1458 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); 1459 if (likely((status & handle_rx)) || (status & handle_tx)) { 1460 if (likely(napi_schedule_prep(&priv->napi))) { 1461 stmmac_disable_dma_irq(priv); 1462 __napi_schedule(&priv->napi); 1463 } 1464 } 1465 if (unlikely(status & tx_hard_error_bump_tc)) { 1466 /* Try to bump up the dma threshold on this failure */ 1467 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 1468 (tc <= 256)) { 1469 tc += 64; 1470 if (priv->plat->force_thresh_dma_mode) 1471 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, 1472 rxfifosz); 1473 else 1474 priv->hw->dma->dma_mode(priv->ioaddr, tc, 1475 SF_DMA_MODE, rxfifosz); 1476 priv->xstats.threshold = tc; 1477 } 1478 } else if (unlikely(status == tx_hard_error)) 1479 stmmac_tx_err(priv); 1480 } 1481 1482 /** 1483 * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 1484 * @priv: driver private structure 1485 * Description: this masks the MMC irq, in fact, the counters are managed in SW. 1486 */ 1487 static void stmmac_mmc_setup(struct stmmac_priv *priv) 1488 { 1489 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 1490 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 1491 1492 dwmac_mmc_intr_all_mask(priv->ioaddr); 1493 1494 if (priv->dma_cap.rmon) { 1495 dwmac_mmc_ctrl(priv->ioaddr, mode); 1496 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 1497 } else 1498 pr_info(" No MAC Management Counters available\n"); 1499 } 1500 1501 /** 1502 * stmmac_get_synopsys_id - return the SYINID. 1503 * @priv: driver private structure 1504 * Description: this simple function is to decode and return the SYINID 1505 * starting from the HW core register. 1506 */ 1507 static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) 1508 { 1509 u32 hwid = priv->hw->synopsys_uid; 1510 1511 /* Check Synopsys Id (not available on old chips) */ 1512 if (likely(hwid)) { 1513 u32 uid = ((hwid & 0x0000ff00) >> 8); 1514 u32 synid = (hwid & 0x000000ff); 1515 1516 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n", 1517 uid, synid); 1518 1519 return synid; 1520 } 1521 return 0; 1522 } 1523 1524 /** 1525 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors 1526 * @priv: driver private structure 1527 * Description: select the Enhanced/Alternate or Normal descriptors. 1528 * In case of Enhanced/Alternate, it checks if the extended descriptors are 1529 * supported by the HW capability register. 1530 */ 1531 static void stmmac_selec_desc_mode(struct stmmac_priv *priv) 1532 { 1533 if (priv->plat->enh_desc) { 1534 pr_info(" Enhanced/Alternate descriptors\n"); 1535 1536 /* GMAC older than 3.50 has no extended descriptors */ 1537 if (priv->synopsys_id >= DWMAC_CORE_3_50) { 1538 pr_info("\tEnabled extended descriptors\n"); 1539 priv->extend_desc = 1; 1540 } else 1541 pr_warn("Extended descriptors not supported\n"); 1542 1543 priv->hw->desc = &enh_desc_ops; 1544 } else { 1545 pr_info(" Normal descriptors\n"); 1546 priv->hw->desc = &ndesc_ops; 1547 } 1548 } 1549 1550 /** 1551 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 1552 * @priv: driver private structure 1553 * Description: 1554 * new GMAC chip generations have a new register to indicate the 1555 * presence of the optional feature/functions. 1556 * This can be also used to override the value passed through the 1557 * platform and necessary for old MAC10/100 and GMAC chips. 1558 */ 1559 static int stmmac_get_hw_features(struct stmmac_priv *priv) 1560 { 1561 u32 hw_cap = 0; 1562 1563 if (priv->hw->dma->get_hw_feature) { 1564 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr); 1565 1566 priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); 1567 priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; 1568 priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; 1569 priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; 1570 priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5; 1571 priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6; 1572 priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8; 1573 priv->dma_cap.pmt_remote_wake_up = 1574 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; 1575 priv->dma_cap.pmt_magic_frame = 1576 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; 1577 /* MMC */ 1578 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; 1579 /* IEEE 1588-2002 */ 1580 priv->dma_cap.time_stamp = 1581 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12; 1582 /* IEEE 1588-2008 */ 1583 priv->dma_cap.atime_stamp = 1584 (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13; 1585 /* 802.3az - Energy-Efficient Ethernet (EEE) */ 1586 priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14; 1587 priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15; 1588 /* TX and RX csum */ 1589 priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16; 1590 priv->dma_cap.rx_coe_type1 = 1591 (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17; 1592 priv->dma_cap.rx_coe_type2 = 1593 (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18; 1594 priv->dma_cap.rxfifo_over_2048 = 1595 (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19; 1596 /* TX and RX number of channels */ 1597 priv->dma_cap.number_rx_channel = 1598 (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20; 1599 priv->dma_cap.number_tx_channel = 1600 (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22; 1601 /* Alternate (enhanced) DESC mode */ 1602 priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; 1603 } 1604 1605 return hw_cap; 1606 } 1607 1608 /** 1609 * stmmac_check_ether_addr - check if the MAC addr is valid 1610 * @priv: driver private structure 1611 * Description: 1612 * it is to verify if the MAC address is valid, in case of failures it 1613 * generates a random MAC address 1614 */ 1615 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 1616 { 1617 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 1618 priv->hw->mac->get_umac_addr(priv->hw, 1619 priv->dev->dev_addr, 0); 1620 if (!is_valid_ether_addr(priv->dev->dev_addr)) 1621 eth_hw_addr_random(priv->dev); 1622 pr_info("%s: device MAC address %pM\n", priv->dev->name, 1623 priv->dev->dev_addr); 1624 } 1625 } 1626 1627 /** 1628 * stmmac_init_dma_engine - DMA init. 1629 * @priv: driver private structure 1630 * Description: 1631 * It inits the DMA invoking the specific MAC/GMAC callback. 1632 * Some DMA parameters can be passed from the platform; 1633 * in case of these are not passed a default is kept for the MAC or GMAC. 1634 */ 1635 static int stmmac_init_dma_engine(struct stmmac_priv *priv) 1636 { 1637 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, aal = 0; 1638 int mixed_burst = 0; 1639 int atds = 0; 1640 int ret = 0; 1641 1642 if (priv->plat->dma_cfg) { 1643 pbl = priv->plat->dma_cfg->pbl; 1644 fixed_burst = priv->plat->dma_cfg->fixed_burst; 1645 mixed_burst = priv->plat->dma_cfg->mixed_burst; 1646 aal = priv->plat->dma_cfg->aal; 1647 } 1648 1649 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 1650 atds = 1; 1651 1652 ret = priv->hw->dma->reset(priv->ioaddr); 1653 if (ret) { 1654 dev_err(priv->device, "Failed to reset the dma\n"); 1655 return ret; 1656 } 1657 1658 priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, 1659 aal, priv->dma_tx_phy, priv->dma_rx_phy, atds); 1660 1661 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && 1662 (priv->plat->axi && priv->hw->dma->axi)) 1663 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi); 1664 1665 return ret; 1666 } 1667 1668 /** 1669 * stmmac_tx_timer - mitigation sw timer for tx. 1670 * @data: data pointer 1671 * Description: 1672 * This is the timer handler to directly invoke the stmmac_tx_clean. 1673 */ 1674 static void stmmac_tx_timer(unsigned long data) 1675 { 1676 struct stmmac_priv *priv = (struct stmmac_priv *)data; 1677 1678 stmmac_tx_clean(priv); 1679 } 1680 1681 /** 1682 * stmmac_init_tx_coalesce - init tx mitigation options. 1683 * @priv: driver private structure 1684 * Description: 1685 * This inits the transmit coalesce parameters: i.e. timer rate, 1686 * timer handler and default threshold used for enabling the 1687 * interrupt on completion bit. 1688 */ 1689 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) 1690 { 1691 priv->tx_coal_frames = STMMAC_TX_FRAMES; 1692 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; 1693 init_timer(&priv->txtimer); 1694 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); 1695 priv->txtimer.data = (unsigned long)priv; 1696 priv->txtimer.function = stmmac_tx_timer; 1697 add_timer(&priv->txtimer); 1698 } 1699 1700 /** 1701 * stmmac_hw_setup - setup mac in a usable state. 1702 * @dev : pointer to the device structure. 1703 * Description: 1704 * this is the main function to setup the HW in a usable state because the 1705 * dma engine is reset, the core registers are configured (e.g. AXI, 1706 * Checksum features, timers). The DMA is ready to start receiving and 1707 * transmitting. 1708 * Return value: 1709 * 0 on success and an appropriate (-)ve integer as defined in errno.h 1710 * file on failure. 1711 */ 1712 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) 1713 { 1714 struct stmmac_priv *priv = netdev_priv(dev); 1715 int ret; 1716 1717 /* DMA initialization and SW reset */ 1718 ret = stmmac_init_dma_engine(priv); 1719 if (ret < 0) { 1720 pr_err("%s: DMA engine initialization failed\n", __func__); 1721 return ret; 1722 } 1723 1724 /* Copy the MAC addr into the HW */ 1725 priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0); 1726 1727 /* If required, perform hw setup of the bus. */ 1728 if (priv->plat->bus_setup) 1729 priv->plat->bus_setup(priv->ioaddr); 1730 1731 /* Initialize the MAC Core */ 1732 priv->hw->mac->core_init(priv->hw, dev->mtu); 1733 1734 ret = priv->hw->mac->rx_ipc(priv->hw); 1735 if (!ret) { 1736 pr_warn(" RX IPC Checksum Offload disabled\n"); 1737 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 1738 priv->hw->rx_csum = 0; 1739 } 1740 1741 /* Enable the MAC Rx/Tx */ 1742 stmmac_set_mac(priv->ioaddr, true); 1743 1744 /* Set the HW DMA mode and the COE */ 1745 stmmac_dma_operation_mode(priv); 1746 1747 stmmac_mmc_setup(priv); 1748 1749 if (init_ptp) { 1750 ret = stmmac_init_ptp(priv); 1751 if (ret && ret != -EOPNOTSUPP) 1752 pr_warn("%s: failed PTP initialisation\n", __func__); 1753 } 1754 1755 #ifdef CONFIG_DEBUG_FS 1756 ret = stmmac_init_fs(dev); 1757 if (ret < 0) 1758 pr_warn("%s: failed debugFS registration\n", __func__); 1759 #endif 1760 /* Start the ball rolling... */ 1761 pr_debug("%s: DMA RX/TX processes started...\n", dev->name); 1762 priv->hw->dma->start_tx(priv->ioaddr); 1763 priv->hw->dma->start_rx(priv->ioaddr); 1764 1765 /* Dump DMA/MAC registers */ 1766 if (netif_msg_hw(priv)) { 1767 priv->hw->mac->dump_regs(priv->hw); 1768 priv->hw->dma->dump_regs(priv->ioaddr); 1769 } 1770 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; 1771 1772 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { 1773 priv->rx_riwt = MAX_DMA_RIWT; 1774 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); 1775 } 1776 1777 if (priv->pcs && priv->hw->mac->ctrl_ane) 1778 priv->hw->mac->ctrl_ane(priv->hw, 0); 1779 1780 return 0; 1781 } 1782 1783 /** 1784 * stmmac_open - open entry point of the driver 1785 * @dev : pointer to the device structure. 1786 * Description: 1787 * This function is the open entry point of the driver. 1788 * Return value: 1789 * 0 on success and an appropriate (-)ve integer as defined in errno.h 1790 * file on failure. 1791 */ 1792 static int stmmac_open(struct net_device *dev) 1793 { 1794 struct stmmac_priv *priv = netdev_priv(dev); 1795 int ret; 1796 1797 stmmac_check_ether_addr(priv); 1798 1799 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 1800 priv->pcs != STMMAC_PCS_RTBI) { 1801 ret = stmmac_init_phy(dev); 1802 if (ret) { 1803 pr_err("%s: Cannot attach to PHY (error: %d)\n", 1804 __func__, ret); 1805 return ret; 1806 } 1807 } 1808 1809 /* Extra statistics */ 1810 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 1811 priv->xstats.threshold = tc; 1812 1813 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 1814 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 1815 1816 ret = alloc_dma_desc_resources(priv); 1817 if (ret < 0) { 1818 pr_err("%s: DMA descriptors allocation failed\n", __func__); 1819 goto dma_desc_error; 1820 } 1821 1822 ret = init_dma_desc_rings(dev, GFP_KERNEL); 1823 if (ret < 0) { 1824 pr_err("%s: DMA descriptors initialization failed\n", __func__); 1825 goto init_error; 1826 } 1827 1828 ret = stmmac_hw_setup(dev, true); 1829 if (ret < 0) { 1830 pr_err("%s: Hw setup failed\n", __func__); 1831 goto init_error; 1832 } 1833 1834 stmmac_init_tx_coalesce(priv); 1835 1836 if (priv->phydev) 1837 phy_start(priv->phydev); 1838 1839 /* Request the IRQ lines */ 1840 ret = request_irq(dev->irq, stmmac_interrupt, 1841 IRQF_SHARED, dev->name, dev); 1842 if (unlikely(ret < 0)) { 1843 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", 1844 __func__, dev->irq, ret); 1845 goto init_error; 1846 } 1847 1848 /* Request the Wake IRQ in case of another line is used for WoL */ 1849 if (priv->wol_irq != dev->irq) { 1850 ret = request_irq(priv->wol_irq, stmmac_interrupt, 1851 IRQF_SHARED, dev->name, dev); 1852 if (unlikely(ret < 0)) { 1853 pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n", 1854 __func__, priv->wol_irq, ret); 1855 goto wolirq_error; 1856 } 1857 } 1858 1859 /* Request the IRQ lines */ 1860 if (priv->lpi_irq > 0) { 1861 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, 1862 dev->name, dev); 1863 if (unlikely(ret < 0)) { 1864 pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n", 1865 __func__, priv->lpi_irq, ret); 1866 goto lpiirq_error; 1867 } 1868 } 1869 1870 napi_enable(&priv->napi); 1871 netif_start_queue(dev); 1872 1873 return 0; 1874 1875 lpiirq_error: 1876 if (priv->wol_irq != dev->irq) 1877 free_irq(priv->wol_irq, dev); 1878 wolirq_error: 1879 free_irq(dev->irq, dev); 1880 1881 init_error: 1882 free_dma_desc_resources(priv); 1883 dma_desc_error: 1884 if (priv->phydev) 1885 phy_disconnect(priv->phydev); 1886 1887 return ret; 1888 } 1889 1890 /** 1891 * stmmac_release - close entry point of the driver 1892 * @dev : device pointer. 1893 * Description: 1894 * This is the stop entry point of the driver. 1895 */ 1896 static int stmmac_release(struct net_device *dev) 1897 { 1898 struct stmmac_priv *priv = netdev_priv(dev); 1899 1900 if (priv->eee_enabled) 1901 del_timer_sync(&priv->eee_ctrl_timer); 1902 1903 /* Stop and disconnect the PHY */ 1904 if (priv->phydev) { 1905 phy_stop(priv->phydev); 1906 phy_disconnect(priv->phydev); 1907 priv->phydev = NULL; 1908 } 1909 1910 netif_stop_queue(dev); 1911 1912 napi_disable(&priv->napi); 1913 1914 del_timer_sync(&priv->txtimer); 1915 1916 /* Free the IRQ lines */ 1917 free_irq(dev->irq, dev); 1918 if (priv->wol_irq != dev->irq) 1919 free_irq(priv->wol_irq, dev); 1920 if (priv->lpi_irq > 0) 1921 free_irq(priv->lpi_irq, dev); 1922 1923 /* Stop TX/RX DMA and clear the descriptors */ 1924 priv->hw->dma->stop_tx(priv->ioaddr); 1925 priv->hw->dma->stop_rx(priv->ioaddr); 1926 1927 /* Release and free the Rx/Tx resources */ 1928 free_dma_desc_resources(priv); 1929 1930 /* Disable the MAC Rx/Tx */ 1931 stmmac_set_mac(priv->ioaddr, false); 1932 1933 netif_carrier_off(dev); 1934 1935 #ifdef CONFIG_DEBUG_FS 1936 stmmac_exit_fs(dev); 1937 #endif 1938 1939 stmmac_release_ptp(priv); 1940 1941 return 0; 1942 } 1943 1944 /** 1945 * stmmac_xmit - Tx entry point of the driver 1946 * @skb : the socket buffer 1947 * @dev : device pointer 1948 * Description : this is the tx entry point of the driver. 1949 * It programs the chain or the ring and supports oversized frames 1950 * and SG feature. 1951 */ 1952 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 1953 { 1954 struct stmmac_priv *priv = netdev_priv(dev); 1955 unsigned int nopaged_len = skb_headlen(skb); 1956 int i, csum_insertion = 0, is_jumbo = 0; 1957 int nfrags = skb_shinfo(skb)->nr_frags; 1958 unsigned int entry, first_entry; 1959 struct dma_desc *desc, *first; 1960 unsigned int enh_desc; 1961 1962 spin_lock(&priv->tx_lock); 1963 1964 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { 1965 spin_unlock(&priv->tx_lock); 1966 if (!netif_queue_stopped(dev)) { 1967 netif_stop_queue(dev); 1968 /* This is a hard error, log it. */ 1969 pr_err("%s: Tx Ring full when queue awake\n", __func__); 1970 } 1971 return NETDEV_TX_BUSY; 1972 } 1973 1974 if (priv->tx_path_in_lpi_mode) 1975 stmmac_disable_eee_mode(priv); 1976 1977 entry = priv->cur_tx; 1978 first_entry = entry; 1979 1980 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 1981 1982 if (likely(priv->extend_desc)) 1983 desc = (struct dma_desc *)(priv->dma_etx + entry); 1984 else 1985 desc = priv->dma_tx + entry; 1986 1987 first = desc; 1988 1989 priv->tx_skbuff[first_entry] = skb; 1990 1991 enh_desc = priv->plat->enh_desc; 1992 /* To program the descriptors according to the size of the frame */ 1993 if (enh_desc) 1994 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); 1995 1996 if (unlikely(is_jumbo)) { 1997 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); 1998 if (unlikely(entry < 0)) 1999 goto dma_map_err; 2000 } 2001 2002 for (i = 0; i < nfrags; i++) { 2003 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2004 int len = skb_frag_size(frag); 2005 bool last_segment = (i == (nfrags - 1)); 2006 2007 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 2008 2009 if (likely(priv->extend_desc)) 2010 desc = (struct dma_desc *)(priv->dma_etx + entry); 2011 else 2012 desc = priv->dma_tx + entry; 2013 2014 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, 2015 DMA_TO_DEVICE); 2016 if (dma_mapping_error(priv->device, desc->des2)) 2017 goto dma_map_err; /* should reuse desc w/o issues */ 2018 2019 priv->tx_skbuff[entry] = NULL; 2020 priv->tx_skbuff_dma[entry].buf = desc->des2; 2021 priv->tx_skbuff_dma[entry].map_as_page = true; 2022 priv->tx_skbuff_dma[entry].len = len; 2023 priv->tx_skbuff_dma[entry].last_segment = last_segment; 2024 2025 /* Prepare the descriptor and set the own bit too */ 2026 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, 2027 priv->mode, 1, last_segment); 2028 } 2029 2030 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 2031 2032 priv->cur_tx = entry; 2033 2034 if (netif_msg_pktdata(priv)) { 2035 pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 2036 __func__, priv->cur_tx, priv->dirty_tx, first_entry, 2037 entry, first, nfrags); 2038 2039 if (priv->extend_desc) 2040 stmmac_display_ring((void *)priv->dma_etx, 2041 DMA_TX_SIZE, 1); 2042 else 2043 stmmac_display_ring((void *)priv->dma_tx, 2044 DMA_TX_SIZE, 0); 2045 2046 pr_debug(">>> frame to be transmitted: "); 2047 print_pkt(skb->data, skb->len); 2048 } 2049 2050 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { 2051 if (netif_msg_hw(priv)) 2052 pr_debug("%s: stop transmitted packets\n", __func__); 2053 netif_stop_queue(dev); 2054 } 2055 2056 dev->stats.tx_bytes += skb->len; 2057 2058 /* According to the coalesce parameter the IC bit for the latest 2059 * segment is reset and the timer re-started to clean the tx status. 2060 * This approach takes care about the fragments: desc is the first 2061 * element in case of no SG. 2062 */ 2063 priv->tx_count_frames += nfrags + 1; 2064 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { 2065 mod_timer(&priv->txtimer, 2066 STMMAC_COAL_TIMER(priv->tx_coal_timer)); 2067 } else { 2068 priv->tx_count_frames = 0; 2069 priv->hw->desc->set_tx_ic(desc); 2070 priv->xstats.tx_set_ic_bit++; 2071 } 2072 2073 if (!priv->hwts_tx_en) 2074 skb_tx_timestamp(skb); 2075 2076 /* Ready to fill the first descriptor and set the OWN bit w/o any 2077 * problems because all the descriptors are actually ready to be 2078 * passed to the DMA engine. 2079 */ 2080 if (likely(!is_jumbo)) { 2081 bool last_segment = (nfrags == 0); 2082 2083 first->des2 = dma_map_single(priv->device, skb->data, 2084 nopaged_len, DMA_TO_DEVICE); 2085 if (dma_mapping_error(priv->device, first->des2)) 2086 goto dma_map_err; 2087 2088 priv->tx_skbuff_dma[first_entry].buf = first->des2; 2089 priv->tx_skbuff_dma[first_entry].len = nopaged_len; 2090 priv->tx_skbuff_dma[first_entry].last_segment = last_segment; 2091 2092 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 2093 priv->hwts_tx_en)) { 2094 /* declare that device is doing timestamping */ 2095 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2096 priv->hw->desc->enable_tx_timestamp(first); 2097 } 2098 2099 /* Prepare the first descriptor setting the OWN bit too */ 2100 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, 2101 csum_insertion, priv->mode, 1, 2102 last_segment); 2103 2104 /* The own bit must be the latest setting done when prepare the 2105 * descriptor and then barrier is needed to make sure that 2106 * all is coherent before granting the DMA engine. 2107 */ 2108 smp_wmb(); 2109 } 2110 2111 netdev_sent_queue(dev, skb->len); 2112 priv->hw->dma->enable_dma_transmission(priv->ioaddr); 2113 2114 spin_unlock(&priv->tx_lock); 2115 return NETDEV_TX_OK; 2116 2117 dma_map_err: 2118 spin_unlock(&priv->tx_lock); 2119 dev_err(priv->device, "Tx dma map failed\n"); 2120 dev_kfree_skb(skb); 2121 priv->dev->stats.tx_dropped++; 2122 return NETDEV_TX_OK; 2123 } 2124 2125 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 2126 { 2127 struct ethhdr *ehdr; 2128 u16 vlanid; 2129 2130 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) == 2131 NETIF_F_HW_VLAN_CTAG_RX && 2132 !__vlan_get_tag(skb, &vlanid)) { 2133 /* pop the vlan tag */ 2134 ehdr = (struct ethhdr *)skb->data; 2135 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2); 2136 skb_pull(skb, VLAN_HLEN); 2137 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); 2138 } 2139 } 2140 2141 2142 static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv) 2143 { 2144 if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH) 2145 return 0; 2146 2147 return 1; 2148 } 2149 2150 /** 2151 * stmmac_rx_refill - refill used skb preallocated buffers 2152 * @priv: driver private structure 2153 * Description : this is to reallocate the skb for the reception process 2154 * that is based on zero-copy. 2155 */ 2156 static inline void stmmac_rx_refill(struct stmmac_priv *priv) 2157 { 2158 int bfsize = priv->dma_buf_sz; 2159 unsigned int entry = priv->dirty_rx; 2160 int dirty = stmmac_rx_dirty(priv); 2161 2162 while (dirty-- > 0) { 2163 struct dma_desc *p; 2164 2165 if (priv->extend_desc) 2166 p = (struct dma_desc *)(priv->dma_erx + entry); 2167 else 2168 p = priv->dma_rx + entry; 2169 2170 if (likely(priv->rx_skbuff[entry] == NULL)) { 2171 struct sk_buff *skb; 2172 2173 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); 2174 if (unlikely(!skb)) { 2175 /* so for a while no zero-copy! */ 2176 priv->rx_zeroc_thresh = STMMAC_RX_THRESH; 2177 if (unlikely(net_ratelimit())) 2178 dev_err(priv->device, 2179 "fail to alloc skb entry %d\n", 2180 entry); 2181 break; 2182 } 2183 2184 priv->rx_skbuff[entry] = skb; 2185 priv->rx_skbuff_dma[entry] = 2186 dma_map_single(priv->device, skb->data, bfsize, 2187 DMA_FROM_DEVICE); 2188 if (dma_mapping_error(priv->device, 2189 priv->rx_skbuff_dma[entry])) { 2190 dev_err(priv->device, "Rx dma map failed\n"); 2191 dev_kfree_skb(skb); 2192 break; 2193 } 2194 p->des2 = priv->rx_skbuff_dma[entry]; 2195 2196 priv->hw->mode->refill_desc3(priv, p); 2197 2198 if (priv->rx_zeroc_thresh > 0) 2199 priv->rx_zeroc_thresh--; 2200 2201 if (netif_msg_rx_status(priv)) 2202 pr_debug("\trefill entry #%d\n", entry); 2203 } 2204 2205 wmb(); 2206 priv->hw->desc->set_rx_owner(p); 2207 wmb(); 2208 2209 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); 2210 } 2211 priv->dirty_rx = entry; 2212 } 2213 2214 /** 2215 * stmmac_rx - manage the receive process 2216 * @priv: driver private structure 2217 * @limit: napi bugget. 2218 * Description : this the function called by the napi poll method. 2219 * It gets all the frames inside the ring. 2220 */ 2221 static int stmmac_rx(struct stmmac_priv *priv, int limit) 2222 { 2223 unsigned int entry = priv->cur_rx; 2224 unsigned int next_entry; 2225 unsigned int count = 0; 2226 int coe = priv->hw->rx_csum; 2227 2228 if (netif_msg_rx_status(priv)) { 2229 pr_debug("%s: descriptor ring:\n", __func__); 2230 if (priv->extend_desc) 2231 stmmac_display_ring((void *)priv->dma_erx, 2232 DMA_RX_SIZE, 1); 2233 else 2234 stmmac_display_ring((void *)priv->dma_rx, 2235 DMA_RX_SIZE, 0); 2236 } 2237 while (count < limit) { 2238 int status; 2239 struct dma_desc *p; 2240 2241 if (priv->extend_desc) 2242 p = (struct dma_desc *)(priv->dma_erx + entry); 2243 else 2244 p = priv->dma_rx + entry; 2245 2246 /* read the status of the incoming frame */ 2247 status = priv->hw->desc->rx_status(&priv->dev->stats, 2248 &priv->xstats, p); 2249 /* check if managed by the DMA otherwise go ahead */ 2250 if (unlikely(status & dma_own)) 2251 break; 2252 2253 count++; 2254 2255 priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE); 2256 next_entry = priv->cur_rx; 2257 2258 if (priv->extend_desc) 2259 prefetch(priv->dma_erx + next_entry); 2260 else 2261 prefetch(priv->dma_rx + next_entry); 2262 2263 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) 2264 priv->hw->desc->rx_extended_status(&priv->dev->stats, 2265 &priv->xstats, 2266 priv->dma_erx + 2267 entry); 2268 if (unlikely(status == discard_frame)) { 2269 priv->dev->stats.rx_errors++; 2270 if (priv->hwts_rx_en && !priv->extend_desc) { 2271 /* DESC2 & DESC3 will be overwitten by device 2272 * with timestamp value, hence reinitialize 2273 * them in stmmac_rx_refill() function so that 2274 * device can reuse it. 2275 */ 2276 priv->rx_skbuff[entry] = NULL; 2277 dma_unmap_single(priv->device, 2278 priv->rx_skbuff_dma[entry], 2279 priv->dma_buf_sz, 2280 DMA_FROM_DEVICE); 2281 } 2282 } else { 2283 struct sk_buff *skb; 2284 int frame_len; 2285 2286 frame_len = priv->hw->desc->get_rx_frame_len(p, coe); 2287 2288 /* check if frame_len fits the preallocated memory */ 2289 if (frame_len > priv->dma_buf_sz) { 2290 priv->dev->stats.rx_length_errors++; 2291 break; 2292 } 2293 2294 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 2295 * Type frames (LLC/LLC-SNAP) 2296 */ 2297 if (unlikely(status != llc_snap)) 2298 frame_len -= ETH_FCS_LEN; 2299 2300 if (netif_msg_rx_status(priv)) { 2301 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", 2302 p, entry, p->des2); 2303 if (frame_len > ETH_FRAME_LEN) 2304 pr_debug("\tframe size %d, COE: %d\n", 2305 frame_len, status); 2306 } 2307 2308 if (unlikely((frame_len < priv->rx_copybreak) || 2309 stmmac_rx_threshold_count(priv))) { 2310 skb = netdev_alloc_skb_ip_align(priv->dev, 2311 frame_len); 2312 if (unlikely(!skb)) { 2313 if (net_ratelimit()) 2314 dev_warn(priv->device, 2315 "packet dropped\n"); 2316 priv->dev->stats.rx_dropped++; 2317 break; 2318 } 2319 2320 dma_sync_single_for_cpu(priv->device, 2321 priv->rx_skbuff_dma 2322 [entry], frame_len, 2323 DMA_FROM_DEVICE); 2324 skb_copy_to_linear_data(skb, 2325 priv-> 2326 rx_skbuff[entry]->data, 2327 frame_len); 2328 2329 skb_put(skb, frame_len); 2330 dma_sync_single_for_device(priv->device, 2331 priv->rx_skbuff_dma 2332 [entry], frame_len, 2333 DMA_FROM_DEVICE); 2334 } else { 2335 skb = priv->rx_skbuff[entry]; 2336 if (unlikely(!skb)) { 2337 pr_err("%s: Inconsistent Rx chain\n", 2338 priv->dev->name); 2339 priv->dev->stats.rx_dropped++; 2340 break; 2341 } 2342 prefetch(skb->data - NET_IP_ALIGN); 2343 priv->rx_skbuff[entry] = NULL; 2344 priv->rx_zeroc_thresh++; 2345 2346 skb_put(skb, frame_len); 2347 dma_unmap_single(priv->device, 2348 priv->rx_skbuff_dma[entry], 2349 priv->dma_buf_sz, 2350 DMA_FROM_DEVICE); 2351 } 2352 2353 stmmac_get_rx_hwtstamp(priv, entry, skb); 2354 2355 if (netif_msg_pktdata(priv)) { 2356 pr_debug("frame received (%dbytes)", frame_len); 2357 print_pkt(skb->data, frame_len); 2358 } 2359 2360 stmmac_rx_vlan(priv->dev, skb); 2361 2362 skb->protocol = eth_type_trans(skb, priv->dev); 2363 2364 if (unlikely(!coe)) 2365 skb_checksum_none_assert(skb); 2366 else 2367 skb->ip_summed = CHECKSUM_UNNECESSARY; 2368 2369 napi_gro_receive(&priv->napi, skb); 2370 2371 priv->dev->stats.rx_packets++; 2372 priv->dev->stats.rx_bytes += frame_len; 2373 } 2374 entry = next_entry; 2375 } 2376 2377 stmmac_rx_refill(priv); 2378 2379 priv->xstats.rx_pkt_n += count; 2380 2381 return count; 2382 } 2383 2384 /** 2385 * stmmac_poll - stmmac poll method (NAPI) 2386 * @napi : pointer to the napi structure. 2387 * @budget : maximum number of packets that the current CPU can receive from 2388 * all interfaces. 2389 * Description : 2390 * To look at the incoming frames and clear the tx resources. 2391 */ 2392 static int stmmac_poll(struct napi_struct *napi, int budget) 2393 { 2394 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); 2395 int work_done = 0; 2396 2397 priv->xstats.napi_poll++; 2398 stmmac_tx_clean(priv); 2399 2400 work_done = stmmac_rx(priv, budget); 2401 if (work_done < budget) { 2402 napi_complete(napi); 2403 stmmac_enable_dma_irq(priv); 2404 } 2405 return work_done; 2406 } 2407 2408 /** 2409 * stmmac_tx_timeout 2410 * @dev : Pointer to net device structure 2411 * Description: this function is called when a packet transmission fails to 2412 * complete within a reasonable time. The driver will mark the error in the 2413 * netdev structure and arrange for the device to be reset to a sane state 2414 * in order to transmit a new packet. 2415 */ 2416 static void stmmac_tx_timeout(struct net_device *dev) 2417 { 2418 struct stmmac_priv *priv = netdev_priv(dev); 2419 2420 /* Clear Tx resources and restart transmitting again */ 2421 stmmac_tx_err(priv); 2422 } 2423 2424 /** 2425 * stmmac_set_rx_mode - entry point for multicast addressing 2426 * @dev : pointer to the device structure 2427 * Description: 2428 * This function is a driver entry point which gets called by the kernel 2429 * whenever multicast addresses must be enabled/disabled. 2430 * Return value: 2431 * void. 2432 */ 2433 static void stmmac_set_rx_mode(struct net_device *dev) 2434 { 2435 struct stmmac_priv *priv = netdev_priv(dev); 2436 2437 priv->hw->mac->set_filter(priv->hw, dev); 2438 } 2439 2440 /** 2441 * stmmac_change_mtu - entry point to change MTU size for the device. 2442 * @dev : device pointer. 2443 * @new_mtu : the new MTU size for the device. 2444 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 2445 * to drive packet transmission. Ethernet has an MTU of 1500 octets 2446 * (ETH_DATA_LEN). This value can be changed with ifconfig. 2447 * Return value: 2448 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2449 * file on failure. 2450 */ 2451 static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 2452 { 2453 struct stmmac_priv *priv = netdev_priv(dev); 2454 int max_mtu; 2455 2456 if (netif_running(dev)) { 2457 pr_err("%s: must be stopped to change its MTU\n", dev->name); 2458 return -EBUSY; 2459 } 2460 2461 if (priv->plat->enh_desc) 2462 max_mtu = JUMBO_LEN; 2463 else 2464 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 2465 2466 if (priv->plat->maxmtu < max_mtu) 2467 max_mtu = priv->plat->maxmtu; 2468 2469 if ((new_mtu < 46) || (new_mtu > max_mtu)) { 2470 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu); 2471 return -EINVAL; 2472 } 2473 2474 dev->mtu = new_mtu; 2475 netdev_update_features(dev); 2476 2477 return 0; 2478 } 2479 2480 static netdev_features_t stmmac_fix_features(struct net_device *dev, 2481 netdev_features_t features) 2482 { 2483 struct stmmac_priv *priv = netdev_priv(dev); 2484 2485 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 2486 features &= ~NETIF_F_RXCSUM; 2487 2488 if (!priv->plat->tx_coe) 2489 features &= ~NETIF_F_CSUM_MASK; 2490 2491 /* Some GMAC devices have a bugged Jumbo frame support that 2492 * needs to have the Tx COE disabled for oversized frames 2493 * (due to limited buffer sizes). In this case we disable 2494 * the TX csum insertionin the TDES and not use SF. 2495 */ 2496 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 2497 features &= ~NETIF_F_CSUM_MASK; 2498 2499 return features; 2500 } 2501 2502 static int stmmac_set_features(struct net_device *netdev, 2503 netdev_features_t features) 2504 { 2505 struct stmmac_priv *priv = netdev_priv(netdev); 2506 2507 /* Keep the COE Type in case of csum is supporting */ 2508 if (features & NETIF_F_RXCSUM) 2509 priv->hw->rx_csum = priv->plat->rx_coe; 2510 else 2511 priv->hw->rx_csum = 0; 2512 /* No check needed because rx_coe has been set before and it will be 2513 * fixed in case of issue. 2514 */ 2515 priv->hw->mac->rx_ipc(priv->hw); 2516 2517 return 0; 2518 } 2519 2520 /** 2521 * stmmac_interrupt - main ISR 2522 * @irq: interrupt number. 2523 * @dev_id: to pass the net device pointer. 2524 * Description: this is the main driver interrupt service routine. 2525 * It can call: 2526 * o DMA service routine (to manage incoming frame reception and transmission 2527 * status) 2528 * o Core interrupts to manage: remote wake-up, management counter, LPI 2529 * interrupts. 2530 */ 2531 static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 2532 { 2533 struct net_device *dev = (struct net_device *)dev_id; 2534 struct stmmac_priv *priv = netdev_priv(dev); 2535 2536 if (priv->irq_wake) 2537 pm_wakeup_event(priv->device, 0); 2538 2539 if (unlikely(!dev)) { 2540 pr_err("%s: invalid dev pointer\n", __func__); 2541 return IRQ_NONE; 2542 } 2543 2544 /* To handle GMAC own interrupts */ 2545 if (priv->plat->has_gmac) { 2546 int status = priv->hw->mac->host_irq_status(priv->hw, 2547 &priv->xstats); 2548 if (unlikely(status)) { 2549 /* For LPI we need to save the tx status */ 2550 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 2551 priv->tx_path_in_lpi_mode = true; 2552 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 2553 priv->tx_path_in_lpi_mode = false; 2554 } 2555 } 2556 2557 /* To handle DMA interrupts */ 2558 stmmac_dma_interrupt(priv); 2559 2560 return IRQ_HANDLED; 2561 } 2562 2563 #ifdef CONFIG_NET_POLL_CONTROLLER 2564 /* Polling receive - used by NETCONSOLE and other diagnostic tools 2565 * to allow network I/O with interrupts disabled. 2566 */ 2567 static void stmmac_poll_controller(struct net_device *dev) 2568 { 2569 disable_irq(dev->irq); 2570 stmmac_interrupt(dev->irq, dev); 2571 enable_irq(dev->irq); 2572 } 2573 #endif 2574 2575 /** 2576 * stmmac_ioctl - Entry point for the Ioctl 2577 * @dev: Device pointer. 2578 * @rq: An IOCTL specefic structure, that can contain a pointer to 2579 * a proprietary structure used to pass information to the driver. 2580 * @cmd: IOCTL command 2581 * Description: 2582 * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 2583 */ 2584 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2585 { 2586 struct stmmac_priv *priv = netdev_priv(dev); 2587 int ret = -EOPNOTSUPP; 2588 2589 if (!netif_running(dev)) 2590 return -EINVAL; 2591 2592 switch (cmd) { 2593 case SIOCGMIIPHY: 2594 case SIOCGMIIREG: 2595 case SIOCSMIIREG: 2596 if (!priv->phydev) 2597 return -EINVAL; 2598 ret = phy_mii_ioctl(priv->phydev, rq, cmd); 2599 break; 2600 case SIOCSHWTSTAMP: 2601 ret = stmmac_hwtstamp_ioctl(dev, rq); 2602 break; 2603 default: 2604 break; 2605 } 2606 2607 return ret; 2608 } 2609 2610 #ifdef CONFIG_DEBUG_FS 2611 static struct dentry *stmmac_fs_dir; 2612 2613 static void sysfs_display_ring(void *head, int size, int extend_desc, 2614 struct seq_file *seq) 2615 { 2616 int i; 2617 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 2618 struct dma_desc *p = (struct dma_desc *)head; 2619 2620 for (i = 0; i < size; i++) { 2621 u64 x; 2622 if (extend_desc) { 2623 x = *(u64 *) ep; 2624 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 2625 i, (unsigned int)virt_to_phys(ep), 2626 (unsigned int)x, (unsigned int)(x >> 32), 2627 ep->basic.des2, ep->basic.des3); 2628 ep++; 2629 } else { 2630 x = *(u64 *) p; 2631 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 2632 i, (unsigned int)virt_to_phys(ep), 2633 (unsigned int)x, (unsigned int)(x >> 32), 2634 p->des2, p->des3); 2635 p++; 2636 } 2637 seq_printf(seq, "\n"); 2638 } 2639 } 2640 2641 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) 2642 { 2643 struct net_device *dev = seq->private; 2644 struct stmmac_priv *priv = netdev_priv(dev); 2645 2646 if (priv->extend_desc) { 2647 seq_printf(seq, "Extended RX descriptor ring:\n"); 2648 sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq); 2649 seq_printf(seq, "Extended TX descriptor ring:\n"); 2650 sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq); 2651 } else { 2652 seq_printf(seq, "RX descriptor ring:\n"); 2653 sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq); 2654 seq_printf(seq, "TX descriptor ring:\n"); 2655 sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq); 2656 } 2657 2658 return 0; 2659 } 2660 2661 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file) 2662 { 2663 return single_open(file, stmmac_sysfs_ring_read, inode->i_private); 2664 } 2665 2666 static const struct file_operations stmmac_rings_status_fops = { 2667 .owner = THIS_MODULE, 2668 .open = stmmac_sysfs_ring_open, 2669 .read = seq_read, 2670 .llseek = seq_lseek, 2671 .release = single_release, 2672 }; 2673 2674 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) 2675 { 2676 struct net_device *dev = seq->private; 2677 struct stmmac_priv *priv = netdev_priv(dev); 2678 2679 if (!priv->hw_cap_support) { 2680 seq_printf(seq, "DMA HW features not supported\n"); 2681 return 0; 2682 } 2683 2684 seq_printf(seq, "==============================\n"); 2685 seq_printf(seq, "\tDMA HW features\n"); 2686 seq_printf(seq, "==============================\n"); 2687 2688 seq_printf(seq, "\t10/100 Mbps %s\n", 2689 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 2690 seq_printf(seq, "\t1000 Mbps %s\n", 2691 (priv->dma_cap.mbps_1000) ? "Y" : "N"); 2692 seq_printf(seq, "\tHalf duple %s\n", 2693 (priv->dma_cap.half_duplex) ? "Y" : "N"); 2694 seq_printf(seq, "\tHash Filter: %s\n", 2695 (priv->dma_cap.hash_filter) ? "Y" : "N"); 2696 seq_printf(seq, "\tMultiple MAC address registers: %s\n", 2697 (priv->dma_cap.multi_addr) ? "Y" : "N"); 2698 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n", 2699 (priv->dma_cap.pcs) ? "Y" : "N"); 2700 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 2701 (priv->dma_cap.sma_mdio) ? "Y" : "N"); 2702 seq_printf(seq, "\tPMT Remote wake up: %s\n", 2703 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 2704 seq_printf(seq, "\tPMT Magic Frame: %s\n", 2705 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 2706 seq_printf(seq, "\tRMON module: %s\n", 2707 (priv->dma_cap.rmon) ? "Y" : "N"); 2708 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 2709 (priv->dma_cap.time_stamp) ? "Y" : "N"); 2710 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n", 2711 (priv->dma_cap.atime_stamp) ? "Y" : "N"); 2712 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n", 2713 (priv->dma_cap.eee) ? "Y" : "N"); 2714 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 2715 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 2716 (priv->dma_cap.tx_coe) ? "Y" : "N"); 2717 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 2718 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 2719 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 2720 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 2721 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 2722 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 2723 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 2724 priv->dma_cap.number_rx_channel); 2725 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 2726 priv->dma_cap.number_tx_channel); 2727 seq_printf(seq, "\tEnhanced descriptors: %s\n", 2728 (priv->dma_cap.enh_desc) ? "Y" : "N"); 2729 2730 return 0; 2731 } 2732 2733 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file) 2734 { 2735 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private); 2736 } 2737 2738 static const struct file_operations stmmac_dma_cap_fops = { 2739 .owner = THIS_MODULE, 2740 .open = stmmac_sysfs_dma_cap_open, 2741 .read = seq_read, 2742 .llseek = seq_lseek, 2743 .release = single_release, 2744 }; 2745 2746 static int stmmac_init_fs(struct net_device *dev) 2747 { 2748 struct stmmac_priv *priv = netdev_priv(dev); 2749 2750 /* Create per netdev entries */ 2751 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 2752 2753 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { 2754 pr_err("ERROR %s/%s, debugfs create directory failed\n", 2755 STMMAC_RESOURCE_NAME, dev->name); 2756 2757 return -ENOMEM; 2758 } 2759 2760 /* Entry to report DMA RX/TX rings */ 2761 priv->dbgfs_rings_status = 2762 debugfs_create_file("descriptors_status", S_IRUGO, 2763 priv->dbgfs_dir, dev, 2764 &stmmac_rings_status_fops); 2765 2766 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { 2767 pr_info("ERROR creating stmmac ring debugfs file\n"); 2768 debugfs_remove_recursive(priv->dbgfs_dir); 2769 2770 return -ENOMEM; 2771 } 2772 2773 /* Entry to report the DMA HW features */ 2774 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, 2775 priv->dbgfs_dir, 2776 dev, &stmmac_dma_cap_fops); 2777 2778 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { 2779 pr_info("ERROR creating stmmac MMC debugfs file\n"); 2780 debugfs_remove_recursive(priv->dbgfs_dir); 2781 2782 return -ENOMEM; 2783 } 2784 2785 return 0; 2786 } 2787 2788 static void stmmac_exit_fs(struct net_device *dev) 2789 { 2790 struct stmmac_priv *priv = netdev_priv(dev); 2791 2792 debugfs_remove_recursive(priv->dbgfs_dir); 2793 } 2794 #endif /* CONFIG_DEBUG_FS */ 2795 2796 static const struct net_device_ops stmmac_netdev_ops = { 2797 .ndo_open = stmmac_open, 2798 .ndo_start_xmit = stmmac_xmit, 2799 .ndo_stop = stmmac_release, 2800 .ndo_change_mtu = stmmac_change_mtu, 2801 .ndo_fix_features = stmmac_fix_features, 2802 .ndo_set_features = stmmac_set_features, 2803 .ndo_set_rx_mode = stmmac_set_rx_mode, 2804 .ndo_tx_timeout = stmmac_tx_timeout, 2805 .ndo_do_ioctl = stmmac_ioctl, 2806 #ifdef CONFIG_NET_POLL_CONTROLLER 2807 .ndo_poll_controller = stmmac_poll_controller, 2808 #endif 2809 .ndo_set_mac_address = eth_mac_addr, 2810 }; 2811 2812 /** 2813 * stmmac_hw_init - Init the MAC device 2814 * @priv: driver private structure 2815 * Description: this function is to configure the MAC device according to 2816 * some platform parameters or the HW capability register. It prepares the 2817 * driver to use either ring or chain modes and to setup either enhanced or 2818 * normal descriptors. 2819 */ 2820 static int stmmac_hw_init(struct stmmac_priv *priv) 2821 { 2822 struct mac_device_info *mac; 2823 2824 /* Identify the MAC HW device */ 2825 if (priv->plat->has_gmac) { 2826 priv->dev->priv_flags |= IFF_UNICAST_FLT; 2827 mac = dwmac1000_setup(priv->ioaddr, 2828 priv->plat->multicast_filter_bins, 2829 priv->plat->unicast_filter_entries); 2830 } else { 2831 mac = dwmac100_setup(priv->ioaddr); 2832 } 2833 if (!mac) 2834 return -ENOMEM; 2835 2836 priv->hw = mac; 2837 2838 /* Get and dump the chip ID */ 2839 priv->synopsys_id = stmmac_get_synopsys_id(priv); 2840 2841 /* To use the chained or ring mode */ 2842 if (chain_mode) { 2843 priv->hw->mode = &chain_mode_ops; 2844 pr_info(" Chain mode enabled\n"); 2845 priv->mode = STMMAC_CHAIN_MODE; 2846 } else { 2847 priv->hw->mode = &ring_mode_ops; 2848 pr_info(" Ring mode enabled\n"); 2849 priv->mode = STMMAC_RING_MODE; 2850 } 2851 2852 /* Get the HW capability (new GMAC newer than 3.50a) */ 2853 priv->hw_cap_support = stmmac_get_hw_features(priv); 2854 if (priv->hw_cap_support) { 2855 pr_info(" DMA HW capability register supported"); 2856 2857 /* We can override some gmac/dma configuration fields: e.g. 2858 * enh_desc, tx_coe (e.g. that are passed through the 2859 * platform) with the values from the HW capability 2860 * register (if supported). 2861 */ 2862 priv->plat->enh_desc = priv->dma_cap.enh_desc; 2863 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 2864 2865 /* TXCOE doesn't work in thresh DMA mode */ 2866 if (priv->plat->force_thresh_dma_mode) 2867 priv->plat->tx_coe = 0; 2868 else 2869 priv->plat->tx_coe = priv->dma_cap.tx_coe; 2870 2871 if (priv->dma_cap.rx_coe_type2) 2872 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 2873 else if (priv->dma_cap.rx_coe_type1) 2874 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 2875 2876 } else 2877 pr_info(" No HW DMA feature register supported"); 2878 2879 /* To use alternate (extended) or normal descriptor structures */ 2880 stmmac_selec_desc_mode(priv); 2881 2882 if (priv->plat->rx_coe) { 2883 priv->hw->rx_csum = priv->plat->rx_coe; 2884 pr_info(" RX Checksum Offload Engine supported (type %d)\n", 2885 priv->plat->rx_coe); 2886 } 2887 if (priv->plat->tx_coe) 2888 pr_info(" TX Checksum insertion supported\n"); 2889 2890 if (priv->plat->pmt) { 2891 pr_info(" Wake-Up On Lan supported\n"); 2892 device_set_wakeup_capable(priv->device, 1); 2893 } 2894 2895 return 0; 2896 } 2897 2898 /** 2899 * stmmac_dvr_probe 2900 * @device: device pointer 2901 * @plat_dat: platform data pointer 2902 * @res: stmmac resource pointer 2903 * Description: this is the main probe function used to 2904 * call the alloc_etherdev, allocate the priv structure. 2905 * Return: 2906 * returns 0 on success, otherwise errno. 2907 */ 2908 int stmmac_dvr_probe(struct device *device, 2909 struct plat_stmmacenet_data *plat_dat, 2910 struct stmmac_resources *res) 2911 { 2912 int ret = 0; 2913 struct net_device *ndev = NULL; 2914 struct stmmac_priv *priv; 2915 2916 ndev = alloc_etherdev(sizeof(struct stmmac_priv)); 2917 if (!ndev) 2918 return -ENOMEM; 2919 2920 SET_NETDEV_DEV(ndev, device); 2921 2922 priv = netdev_priv(ndev); 2923 priv->device = device; 2924 priv->dev = ndev; 2925 2926 stmmac_set_ethtool_ops(ndev); 2927 priv->pause = pause; 2928 priv->plat = plat_dat; 2929 priv->ioaddr = res->addr; 2930 priv->dev->base_addr = (unsigned long)res->addr; 2931 2932 priv->dev->irq = res->irq; 2933 priv->wol_irq = res->wol_irq; 2934 priv->lpi_irq = res->lpi_irq; 2935 2936 if (res->mac) 2937 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 2938 2939 dev_set_drvdata(device, priv->dev); 2940 2941 /* Verify driver arguments */ 2942 stmmac_verify_args(); 2943 2944 /* Override with kernel parameters if supplied XXX CRS XXX 2945 * this needs to have multiple instances 2946 */ 2947 if ((phyaddr >= 0) && (phyaddr <= 31)) 2948 priv->plat->phy_addr = phyaddr; 2949 2950 priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME); 2951 if (IS_ERR(priv->stmmac_clk)) { 2952 dev_warn(priv->device, "%s: warning: cannot get CSR clock\n", 2953 __func__); 2954 /* If failed to obtain stmmac_clk and specific clk_csr value 2955 * is NOT passed from the platform, probe fail. 2956 */ 2957 if (!priv->plat->clk_csr) { 2958 ret = PTR_ERR(priv->stmmac_clk); 2959 goto error_clk_get; 2960 } else { 2961 priv->stmmac_clk = NULL; 2962 } 2963 } 2964 clk_prepare_enable(priv->stmmac_clk); 2965 2966 priv->pclk = devm_clk_get(priv->device, "pclk"); 2967 if (IS_ERR(priv->pclk)) { 2968 if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) { 2969 ret = -EPROBE_DEFER; 2970 goto error_pclk_get; 2971 } 2972 priv->pclk = NULL; 2973 } 2974 clk_prepare_enable(priv->pclk); 2975 2976 priv->stmmac_rst = devm_reset_control_get(priv->device, 2977 STMMAC_RESOURCE_NAME); 2978 if (IS_ERR(priv->stmmac_rst)) { 2979 if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) { 2980 ret = -EPROBE_DEFER; 2981 goto error_hw_init; 2982 } 2983 dev_info(priv->device, "no reset control found\n"); 2984 priv->stmmac_rst = NULL; 2985 } 2986 if (priv->stmmac_rst) 2987 reset_control_deassert(priv->stmmac_rst); 2988 2989 /* Init MAC and get the capabilities */ 2990 ret = stmmac_hw_init(priv); 2991 if (ret) 2992 goto error_hw_init; 2993 2994 ndev->netdev_ops = &stmmac_netdev_ops; 2995 2996 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2997 NETIF_F_RXCSUM; 2998 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 2999 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 3000 #ifdef STMMAC_VLAN_TAG_USED 3001 /* Both mac100 and gmac support receive VLAN tag detection */ 3002 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3003 #endif 3004 priv->msg_enable = netif_msg_init(debug, default_msg_level); 3005 3006 if (flow_ctrl) 3007 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 3008 3009 /* Rx Watchdog is available in the COREs newer than the 3.40. 3010 * In some case, for example on bugged HW this feature 3011 * has to be disable and this can be done by passing the 3012 * riwt_off field from the platform. 3013 */ 3014 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { 3015 priv->use_riwt = 1; 3016 pr_info(" Enable RX Mitigation via HW Watchdog Timer\n"); 3017 } 3018 3019 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); 3020 3021 spin_lock_init(&priv->lock); 3022 spin_lock_init(&priv->tx_lock); 3023 3024 ret = register_netdev(ndev); 3025 if (ret) { 3026 pr_err("%s: ERROR %i registering the device\n", __func__, ret); 3027 goto error_netdev_register; 3028 } 3029 3030 /* If a specific clk_csr value is passed from the platform 3031 * this means that the CSR Clock Range selection cannot be 3032 * changed at run-time and it is fixed. Viceversa the driver'll try to 3033 * set the MDC clock dynamically according to the csr actual 3034 * clock input. 3035 */ 3036 if (!priv->plat->clk_csr) 3037 stmmac_clk_csr_set(priv); 3038 else 3039 priv->clk_csr = priv->plat->clk_csr; 3040 3041 stmmac_check_pcs_mode(priv); 3042 3043 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 3044 priv->pcs != STMMAC_PCS_RTBI) { 3045 /* MDIO bus Registration */ 3046 ret = stmmac_mdio_register(ndev); 3047 if (ret < 0) { 3048 pr_debug("%s: MDIO bus (id: %d) registration failed", 3049 __func__, priv->plat->bus_id); 3050 goto error_mdio_register; 3051 } 3052 } 3053 3054 return 0; 3055 3056 error_mdio_register: 3057 unregister_netdev(ndev); 3058 error_netdev_register: 3059 netif_napi_del(&priv->napi); 3060 error_hw_init: 3061 clk_disable_unprepare(priv->pclk); 3062 error_pclk_get: 3063 clk_disable_unprepare(priv->stmmac_clk); 3064 error_clk_get: 3065 free_netdev(ndev); 3066 3067 return ret; 3068 } 3069 EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 3070 3071 /** 3072 * stmmac_dvr_remove 3073 * @ndev: net device pointer 3074 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 3075 * changes the link status, releases the DMA descriptor rings. 3076 */ 3077 int stmmac_dvr_remove(struct net_device *ndev) 3078 { 3079 struct stmmac_priv *priv = netdev_priv(ndev); 3080 3081 pr_info("%s:\n\tremoving driver", __func__); 3082 3083 priv->hw->dma->stop_rx(priv->ioaddr); 3084 priv->hw->dma->stop_tx(priv->ioaddr); 3085 3086 stmmac_set_mac(priv->ioaddr, false); 3087 netif_carrier_off(ndev); 3088 unregister_netdev(ndev); 3089 if (priv->stmmac_rst) 3090 reset_control_assert(priv->stmmac_rst); 3091 clk_disable_unprepare(priv->pclk); 3092 clk_disable_unprepare(priv->stmmac_clk); 3093 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 3094 priv->pcs != STMMAC_PCS_RTBI) 3095 stmmac_mdio_unregister(ndev); 3096 free_netdev(ndev); 3097 3098 return 0; 3099 } 3100 EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 3101 3102 /** 3103 * stmmac_suspend - suspend callback 3104 * @ndev: net device pointer 3105 * Description: this is the function to suspend the device and it is called 3106 * by the platform driver to stop the network queue, release the resources, 3107 * program the PMT register (for WoL), clean and release driver resources. 3108 */ 3109 int stmmac_suspend(struct net_device *ndev) 3110 { 3111 struct stmmac_priv *priv = netdev_priv(ndev); 3112 unsigned long flags; 3113 3114 if (!ndev || !netif_running(ndev)) 3115 return 0; 3116 3117 if (priv->phydev) 3118 phy_stop(priv->phydev); 3119 3120 spin_lock_irqsave(&priv->lock, flags); 3121 3122 netif_device_detach(ndev); 3123 netif_stop_queue(ndev); 3124 3125 napi_disable(&priv->napi); 3126 3127 /* Stop TX/RX DMA */ 3128 priv->hw->dma->stop_tx(priv->ioaddr); 3129 priv->hw->dma->stop_rx(priv->ioaddr); 3130 3131 /* Enable Power down mode by programming the PMT regs */ 3132 if (device_may_wakeup(priv->device)) { 3133 priv->hw->mac->pmt(priv->hw, priv->wolopts); 3134 priv->irq_wake = 1; 3135 } else { 3136 stmmac_set_mac(priv->ioaddr, false); 3137 pinctrl_pm_select_sleep_state(priv->device); 3138 /* Disable clock in case of PWM is off */ 3139 clk_disable(priv->pclk); 3140 clk_disable(priv->stmmac_clk); 3141 } 3142 spin_unlock_irqrestore(&priv->lock, flags); 3143 3144 priv->oldlink = 0; 3145 priv->speed = 0; 3146 priv->oldduplex = -1; 3147 return 0; 3148 } 3149 EXPORT_SYMBOL_GPL(stmmac_suspend); 3150 3151 /** 3152 * stmmac_resume - resume callback 3153 * @ndev: net device pointer 3154 * Description: when resume this function is invoked to setup the DMA and CORE 3155 * in a usable state. 3156 */ 3157 int stmmac_resume(struct net_device *ndev) 3158 { 3159 struct stmmac_priv *priv = netdev_priv(ndev); 3160 unsigned long flags; 3161 3162 if (!netif_running(ndev)) 3163 return 0; 3164 3165 spin_lock_irqsave(&priv->lock, flags); 3166 3167 /* Power Down bit, into the PM register, is cleared 3168 * automatically as soon as a magic packet or a Wake-up frame 3169 * is received. Anyway, it's better to manually clear 3170 * this bit because it can generate problems while resuming 3171 * from another devices (e.g. serial console). 3172 */ 3173 if (device_may_wakeup(priv->device)) { 3174 priv->hw->mac->pmt(priv->hw, 0); 3175 priv->irq_wake = 0; 3176 } else { 3177 pinctrl_pm_select_default_state(priv->device); 3178 /* enable the clk prevously disabled */ 3179 clk_enable(priv->stmmac_clk); 3180 clk_enable(priv->pclk); 3181 /* reset the phy so that it's ready */ 3182 if (priv->mii) 3183 stmmac_mdio_reset(priv->mii); 3184 } 3185 3186 netif_device_attach(ndev); 3187 3188 priv->cur_rx = 0; 3189 priv->dirty_rx = 0; 3190 priv->dirty_tx = 0; 3191 priv->cur_tx = 0; 3192 stmmac_clear_descriptors(priv); 3193 3194 stmmac_hw_setup(ndev, false); 3195 stmmac_init_tx_coalesce(priv); 3196 stmmac_set_rx_mode(ndev); 3197 3198 napi_enable(&priv->napi); 3199 3200 netif_start_queue(ndev); 3201 3202 spin_unlock_irqrestore(&priv->lock, flags); 3203 3204 if (priv->phydev) 3205 phy_start(priv->phydev); 3206 3207 return 0; 3208 } 3209 EXPORT_SYMBOL_GPL(stmmac_resume); 3210 3211 #ifndef MODULE 3212 static int __init stmmac_cmdline_opt(char *str) 3213 { 3214 char *opt; 3215 3216 if (!str || !*str) 3217 return -EINVAL; 3218 while ((opt = strsep(&str, ",")) != NULL) { 3219 if (!strncmp(opt, "debug:", 6)) { 3220 if (kstrtoint(opt + 6, 0, &debug)) 3221 goto err; 3222 } else if (!strncmp(opt, "phyaddr:", 8)) { 3223 if (kstrtoint(opt + 8, 0, &phyaddr)) 3224 goto err; 3225 } else if (!strncmp(opt, "buf_sz:", 7)) { 3226 if (kstrtoint(opt + 7, 0, &buf_sz)) 3227 goto err; 3228 } else if (!strncmp(opt, "tc:", 3)) { 3229 if (kstrtoint(opt + 3, 0, &tc)) 3230 goto err; 3231 } else if (!strncmp(opt, "watchdog:", 9)) { 3232 if (kstrtoint(opt + 9, 0, &watchdog)) 3233 goto err; 3234 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 3235 if (kstrtoint(opt + 10, 0, &flow_ctrl)) 3236 goto err; 3237 } else if (!strncmp(opt, "pause:", 6)) { 3238 if (kstrtoint(opt + 6, 0, &pause)) 3239 goto err; 3240 } else if (!strncmp(opt, "eee_timer:", 10)) { 3241 if (kstrtoint(opt + 10, 0, &eee_timer)) 3242 goto err; 3243 } else if (!strncmp(opt, "chain_mode:", 11)) { 3244 if (kstrtoint(opt + 11, 0, &chain_mode)) 3245 goto err; 3246 } 3247 } 3248 return 0; 3249 3250 err: 3251 pr_err("%s: ERROR broken module parameter conversion", __func__); 3252 return -EINVAL; 3253 } 3254 3255 __setup("stmmaceth=", stmmac_cmdline_opt); 3256 #endif /* MODULE */ 3257 3258 static int __init stmmac_init(void) 3259 { 3260 #ifdef CONFIG_DEBUG_FS 3261 /* Create debugfs main directory if it doesn't exist yet */ 3262 if (!stmmac_fs_dir) { 3263 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 3264 3265 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { 3266 pr_err("ERROR %s, debugfs create directory failed\n", 3267 STMMAC_RESOURCE_NAME); 3268 3269 return -ENOMEM; 3270 } 3271 } 3272 #endif 3273 3274 return 0; 3275 } 3276 3277 static void __exit stmmac_exit(void) 3278 { 3279 #ifdef CONFIG_DEBUG_FS 3280 debugfs_remove_recursive(stmmac_fs_dir); 3281 #endif 3282 } 3283 3284 module_init(stmmac_init) 3285 module_exit(stmmac_exit) 3286 3287 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 3288 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 3289 MODULE_LICENSE("GPL"); 3290