1 /******************************************************************************* 2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 3 ST Ethernet IPs are built around a Synopsys IP Core. 4 5 Copyright(C) 2007-2011 STMicroelectronics Ltd 6 7 This program is free software; you can redistribute it and/or modify it 8 under the terms and conditions of the GNU General Public License, 9 version 2, as published by the Free Software Foundation. 10 11 This program is distributed in the hope it will be useful, but WITHOUT 12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 more details. 15 16 You should have received a copy of the GNU General Public License along with 17 this program; if not, write to the Free Software Foundation, Inc., 18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 20 The full GNU General Public License is included in this distribution in 21 the file called "COPYING". 22 23 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 24 25 Documentation available at: 26 http://www.stlinux.com 27 Support available at: 28 https://bugzilla.stlinux.com/ 29 *******************************************************************************/ 30 31 #include <linux/clk.h> 32 #include <linux/kernel.h> 33 #include <linux/interrupt.h> 34 #include <linux/ip.h> 35 #include <linux/tcp.h> 36 #include <linux/skbuff.h> 37 #include <linux/ethtool.h> 38 #include <linux/if_ether.h> 39 #include <linux/crc32.h> 40 #include <linux/mii.h> 41 #include <linux/if.h> 42 #include <linux/if_vlan.h> 43 #include <linux/dma-mapping.h> 44 #include <linux/slab.h> 45 #include <linux/prefetch.h> 46 #ifdef CONFIG_STMMAC_DEBUG_FS 47 #include <linux/debugfs.h> 48 #include <linux/seq_file.h> 49 #endif /* CONFIG_STMMAC_DEBUG_FS */ 50 #include <linux/net_tstamp.h> 51 #include "stmmac_ptp.h" 52 #include "stmmac.h" 53 54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) 55 #define JUMBO_LEN 9000 56 57 /* Module parameters */ 58 #define TX_TIMEO 5000 59 static int watchdog = TX_TIMEO; 60 module_param(watchdog, int, S_IRUGO | S_IWUSR); 61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 62 63 static int debug = -1; 64 module_param(debug, int, S_IRUGO | S_IWUSR); 65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 66 67 int phyaddr = -1; 68 module_param(phyaddr, int, S_IRUGO); 69 MODULE_PARM_DESC(phyaddr, "Physical device address"); 70 71 #define DMA_TX_SIZE 256 72 static int dma_txsize = DMA_TX_SIZE; 73 module_param(dma_txsize, int, S_IRUGO | S_IWUSR); 74 MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list"); 75 76 #define DMA_RX_SIZE 256 77 static int dma_rxsize = DMA_RX_SIZE; 78 module_param(dma_rxsize, int, S_IRUGO | S_IWUSR); 79 MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list"); 80 81 static int flow_ctrl = FLOW_OFF; 82 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); 83 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 84 85 static int pause = PAUSE_TIME; 86 module_param(pause, int, S_IRUGO | S_IWUSR); 87 MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 88 89 #define TC_DEFAULT 64 90 static int tc = TC_DEFAULT; 91 module_param(tc, int, S_IRUGO | S_IWUSR); 92 MODULE_PARM_DESC(tc, "DMA threshold control value"); 93 94 #define DMA_BUFFER_SIZE BUF_SIZE_2KiB 95 static int buf_sz = DMA_BUFFER_SIZE; 96 module_param(buf_sz, int, S_IRUGO | S_IWUSR); 97 MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 98 99 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 100 NETIF_MSG_LINK | NETIF_MSG_IFUP | 101 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 102 103 #define STMMAC_DEFAULT_LPI_TIMER 1000 104 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 105 module_param(eee_timer, int, S_IRUGO | S_IWUSR); 106 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 107 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) 108 109 /* By default the driver will use the ring mode to manage tx and rx descriptors 110 * but passing this value so user can force to use the chain instead of the ring 111 */ 112 static unsigned int chain_mode; 113 module_param(chain_mode, int, S_IRUGO); 114 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 115 116 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 117 118 #ifdef CONFIG_STMMAC_DEBUG_FS 119 static int stmmac_init_fs(struct net_device *dev); 120 static void stmmac_exit_fs(void); 121 #endif 122 123 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) 124 125 /** 126 * stmmac_verify_args - verify the driver parameters. 127 * Description: it verifies if some wrong parameter is passed to the driver. 128 * Note that wrong parameters are replaced with the default values. 129 */ 130 static void stmmac_verify_args(void) 131 { 132 if (unlikely(watchdog < 0)) 133 watchdog = TX_TIMEO; 134 if (unlikely(dma_rxsize < 0)) 135 dma_rxsize = DMA_RX_SIZE; 136 if (unlikely(dma_txsize < 0)) 137 dma_txsize = DMA_TX_SIZE; 138 if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB))) 139 buf_sz = DMA_BUFFER_SIZE; 140 if (unlikely(flow_ctrl > 1)) 141 flow_ctrl = FLOW_AUTO; 142 else if (likely(flow_ctrl < 0)) 143 flow_ctrl = FLOW_OFF; 144 if (unlikely((pause < 0) || (pause > 0xffff))) 145 pause = PAUSE_TIME; 146 if (eee_timer < 0) 147 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 148 } 149 150 /** 151 * stmmac_clk_csr_set - dynamically set the MDC clock 152 * @priv: driver private structure 153 * Description: this is to dynamically set the MDC clock according to the csr 154 * clock input. 155 * Note: 156 * If a specific clk_csr value is passed from the platform 157 * this means that the CSR Clock Range selection cannot be 158 * changed at run-time and it is fixed (as reported in the driver 159 * documentation). Viceversa the driver will try to set the MDC 160 * clock dynamically according to the actual clock input. 161 */ 162 static void stmmac_clk_csr_set(struct stmmac_priv *priv) 163 { 164 u32 clk_rate; 165 166 clk_rate = clk_get_rate(priv->stmmac_clk); 167 168 /* Platform provided default clk_csr would be assumed valid 169 * for all other cases except for the below mentioned ones. 170 * For values higher than the IEEE 802.3 specified frequency 171 * we can not estimate the proper divider as it is not known 172 * the frequency of clk_csr_i. So we do not change the default 173 * divider. 174 */ 175 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 176 if (clk_rate < CSR_F_35M) 177 priv->clk_csr = STMMAC_CSR_20_35M; 178 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 179 priv->clk_csr = STMMAC_CSR_35_60M; 180 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 181 priv->clk_csr = STMMAC_CSR_60_100M; 182 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 183 priv->clk_csr = STMMAC_CSR_100_150M; 184 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 185 priv->clk_csr = STMMAC_CSR_150_250M; 186 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 187 priv->clk_csr = STMMAC_CSR_250_300M; 188 } 189 } 190 191 static void print_pkt(unsigned char *buf, int len) 192 { 193 int j; 194 pr_debug("len = %d byte, buf addr: 0x%p", len, buf); 195 for (j = 0; j < len; j++) { 196 if ((j % 16) == 0) 197 pr_debug("\n %03x:", j); 198 pr_debug(" %02x", buf[j]); 199 } 200 pr_debug("\n"); 201 } 202 203 /* minimum number of free TX descriptors required to wake up TX process */ 204 #define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) 205 206 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) 207 { 208 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; 209 } 210 211 /** 212 * stmmac_hw_fix_mac_speed: callback for speed selection 213 * @priv: driver private structure 214 * Description: on some platforms (e.g. ST), some HW system configuraton 215 * registers have to be set according to the link speed negotiated. 216 */ 217 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) 218 { 219 struct phy_device *phydev = priv->phydev; 220 221 if (likely(priv->plat->fix_mac_speed)) 222 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); 223 } 224 225 /** 226 * stmmac_enable_eee_mode: Check and enter in LPI mode 227 * @priv: driver private structure 228 * Description: this function is to verify and enter in LPI mode for EEE. 229 */ 230 static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 231 { 232 /* Check and enter in LPI mode */ 233 if ((priv->dirty_tx == priv->cur_tx) && 234 (priv->tx_path_in_lpi_mode == false)) 235 priv->hw->mac->set_eee_mode(priv->ioaddr); 236 } 237 238 /** 239 * stmmac_disable_eee_mode: disable/exit from EEE 240 * @priv: driver private structure 241 * Description: this function is to exit and disable EEE in case of 242 * LPI state is true. This is called by the xmit. 243 */ 244 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 245 { 246 priv->hw->mac->reset_eee_mode(priv->ioaddr); 247 del_timer_sync(&priv->eee_ctrl_timer); 248 priv->tx_path_in_lpi_mode = false; 249 } 250 251 /** 252 * stmmac_eee_ctrl_timer: EEE TX SW timer. 253 * @arg : data hook 254 * Description: 255 * if there is no data transfer and if we are not in LPI state, 256 * then MAC Transmitter can be moved to LPI state. 257 */ 258 static void stmmac_eee_ctrl_timer(unsigned long arg) 259 { 260 struct stmmac_priv *priv = (struct stmmac_priv *)arg; 261 262 stmmac_enable_eee_mode(priv); 263 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 264 } 265 266 /** 267 * stmmac_eee_init: init EEE 268 * @priv: driver private structure 269 * Description: 270 * If the EEE support has been enabled while configuring the driver, 271 * if the GMAC actually supports the EEE (from the HW cap reg) and the 272 * phy can also manage EEE, so enable the LPI state and start the timer 273 * to verify if the tx path can enter in LPI state. 274 */ 275 bool stmmac_eee_init(struct stmmac_priv *priv) 276 { 277 bool ret = false; 278 279 /* Using PCS we cannot dial with the phy registers at this stage 280 * so we do not support extra feature like EEE. 281 */ 282 if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) || 283 (priv->pcs == STMMAC_PCS_RTBI)) 284 goto out; 285 286 /* MAC core supports the EEE feature. */ 287 if (priv->dma_cap.eee) { 288 /* Check if the PHY supports EEE */ 289 if (phy_init_eee(priv->phydev, 1)) 290 goto out; 291 292 if (!priv->eee_active) { 293 priv->eee_active = 1; 294 init_timer(&priv->eee_ctrl_timer); 295 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; 296 priv->eee_ctrl_timer.data = (unsigned long)priv; 297 priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); 298 add_timer(&priv->eee_ctrl_timer); 299 300 priv->hw->mac->set_eee_timer(priv->ioaddr, 301 STMMAC_DEFAULT_LIT_LS, 302 priv->tx_lpi_timer); 303 } else 304 /* Set HW EEE according to the speed */ 305 priv->hw->mac->set_eee_pls(priv->ioaddr, 306 priv->phydev->link); 307 308 pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); 309 310 ret = true; 311 } 312 out: 313 return ret; 314 } 315 316 /* stmmac_get_tx_hwtstamp: get HW TX timestamps 317 * @priv: driver private structure 318 * @entry : descriptor index to be used. 319 * @skb : the socket buffer 320 * Description : 321 * This function will read timestamp from the descriptor & pass it to stack. 322 * and also perform some sanity checks. 323 */ 324 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 325 unsigned int entry, struct sk_buff *skb) 326 { 327 struct skb_shared_hwtstamps shhwtstamp; 328 u64 ns; 329 void *desc = NULL; 330 331 if (!priv->hwts_tx_en) 332 return; 333 334 /* exit if skb doesn't support hw tstamp */ 335 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 336 return; 337 338 if (priv->adv_ts) 339 desc = (priv->dma_etx + entry); 340 else 341 desc = (priv->dma_tx + entry); 342 343 /* check tx tstamp status */ 344 if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc)) 345 return; 346 347 /* get the valid tstamp */ 348 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 349 350 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 351 shhwtstamp.hwtstamp = ns_to_ktime(ns); 352 /* pass tstamp to stack */ 353 skb_tstamp_tx(skb, &shhwtstamp); 354 355 return; 356 } 357 358 /* stmmac_get_rx_hwtstamp: get HW RX timestamps 359 * @priv: driver private structure 360 * @entry : descriptor index to be used. 361 * @skb : the socket buffer 362 * Description : 363 * This function will read received packet's timestamp from the descriptor 364 * and pass it to stack. It also perform some sanity checks. 365 */ 366 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, 367 unsigned int entry, struct sk_buff *skb) 368 { 369 struct skb_shared_hwtstamps *shhwtstamp = NULL; 370 u64 ns; 371 void *desc = NULL; 372 373 if (!priv->hwts_rx_en) 374 return; 375 376 if (priv->adv_ts) 377 desc = (priv->dma_erx + entry); 378 else 379 desc = (priv->dma_rx + entry); 380 381 /* exit if rx tstamp is not valid */ 382 if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) 383 return; 384 385 /* get valid tstamp */ 386 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 387 shhwtstamp = skb_hwtstamps(skb); 388 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 389 shhwtstamp->hwtstamp = ns_to_ktime(ns); 390 } 391 392 /** 393 * stmmac_hwtstamp_ioctl - control hardware timestamping. 394 * @dev: device pointer. 395 * @ifr: An IOCTL specefic structure, that can contain a pointer to 396 * a proprietary structure used to pass information to the driver. 397 * Description: 398 * This function configures the MAC to enable/disable both outgoing(TX) 399 * and incoming(RX) packets time stamping based on user input. 400 * Return Value: 401 * 0 on success and an appropriate -ve integer on failure. 402 */ 403 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) 404 { 405 struct stmmac_priv *priv = netdev_priv(dev); 406 struct hwtstamp_config config; 407 struct timespec now; 408 u64 temp = 0; 409 u32 ptp_v2 = 0; 410 u32 tstamp_all = 0; 411 u32 ptp_over_ipv4_udp = 0; 412 u32 ptp_over_ipv6_udp = 0; 413 u32 ptp_over_ethernet = 0; 414 u32 snap_type_sel = 0; 415 u32 ts_master_en = 0; 416 u32 ts_event_en = 0; 417 u32 value = 0; 418 419 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 420 netdev_alert(priv->dev, "No support for HW time stamping\n"); 421 priv->hwts_tx_en = 0; 422 priv->hwts_rx_en = 0; 423 424 return -EOPNOTSUPP; 425 } 426 427 if (copy_from_user(&config, ifr->ifr_data, 428 sizeof(struct hwtstamp_config))) 429 return -EFAULT; 430 431 pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 432 __func__, config.flags, config.tx_type, config.rx_filter); 433 434 /* reserved for future extensions */ 435 if (config.flags) 436 return -EINVAL; 437 438 if (config.tx_type != HWTSTAMP_TX_OFF && 439 config.tx_type != HWTSTAMP_TX_ON) 440 return -ERANGE; 441 442 if (priv->adv_ts) { 443 switch (config.rx_filter) { 444 case HWTSTAMP_FILTER_NONE: 445 /* time stamp no incoming packet at all */ 446 config.rx_filter = HWTSTAMP_FILTER_NONE; 447 break; 448 449 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 450 /* PTP v1, UDP, any kind of event packet */ 451 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 452 /* take time stamp for all event messages */ 453 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 454 455 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 456 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 457 break; 458 459 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 460 /* PTP v1, UDP, Sync packet */ 461 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 462 /* take time stamp for SYNC messages only */ 463 ts_event_en = PTP_TCR_TSEVNTENA; 464 465 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 466 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 467 break; 468 469 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 470 /* PTP v1, UDP, Delay_req packet */ 471 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 472 /* take time stamp for Delay_Req messages only */ 473 ts_master_en = PTP_TCR_TSMSTRENA; 474 ts_event_en = PTP_TCR_TSEVNTENA; 475 476 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 477 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 478 break; 479 480 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 481 /* PTP v2, UDP, any kind of event packet */ 482 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 483 ptp_v2 = PTP_TCR_TSVER2ENA; 484 /* take time stamp for all event messages */ 485 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 486 487 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 488 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 489 break; 490 491 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 492 /* PTP v2, UDP, Sync packet */ 493 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 494 ptp_v2 = PTP_TCR_TSVER2ENA; 495 /* take time stamp for SYNC messages only */ 496 ts_event_en = PTP_TCR_TSEVNTENA; 497 498 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 499 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 500 break; 501 502 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 503 /* PTP v2, UDP, Delay_req packet */ 504 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 505 ptp_v2 = PTP_TCR_TSVER2ENA; 506 /* take time stamp for Delay_Req messages only */ 507 ts_master_en = PTP_TCR_TSMSTRENA; 508 ts_event_en = PTP_TCR_TSEVNTENA; 509 510 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 511 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 512 break; 513 514 case HWTSTAMP_FILTER_PTP_V2_EVENT: 515 /* PTP v2/802.AS1 any layer, any kind of event packet */ 516 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 517 ptp_v2 = PTP_TCR_TSVER2ENA; 518 /* take time stamp for all event messages */ 519 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 520 521 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 522 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 523 ptp_over_ethernet = PTP_TCR_TSIPENA; 524 break; 525 526 case HWTSTAMP_FILTER_PTP_V2_SYNC: 527 /* PTP v2/802.AS1, any layer, Sync packet */ 528 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 529 ptp_v2 = PTP_TCR_TSVER2ENA; 530 /* take time stamp for SYNC messages only */ 531 ts_event_en = PTP_TCR_TSEVNTENA; 532 533 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 534 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 535 ptp_over_ethernet = PTP_TCR_TSIPENA; 536 break; 537 538 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 539 /* PTP v2/802.AS1, any layer, Delay_req packet */ 540 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 541 ptp_v2 = PTP_TCR_TSVER2ENA; 542 /* take time stamp for Delay_Req messages only */ 543 ts_master_en = PTP_TCR_TSMSTRENA; 544 ts_event_en = PTP_TCR_TSEVNTENA; 545 546 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 547 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 548 ptp_over_ethernet = PTP_TCR_TSIPENA; 549 break; 550 551 case HWTSTAMP_FILTER_ALL: 552 /* time stamp any incoming packet */ 553 config.rx_filter = HWTSTAMP_FILTER_ALL; 554 tstamp_all = PTP_TCR_TSENALL; 555 break; 556 557 default: 558 return -ERANGE; 559 } 560 } else { 561 switch (config.rx_filter) { 562 case HWTSTAMP_FILTER_NONE: 563 config.rx_filter = HWTSTAMP_FILTER_NONE; 564 break; 565 default: 566 /* PTP v1, UDP, any kind of event packet */ 567 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 568 break; 569 } 570 } 571 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 572 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 573 574 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 575 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); 576 else { 577 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 578 tstamp_all | ptp_v2 | ptp_over_ethernet | 579 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 580 ts_master_en | snap_type_sel); 581 582 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); 583 584 /* program Sub Second Increment reg */ 585 priv->hw->ptp->config_sub_second_increment(priv->ioaddr); 586 587 /* calculate default added value: 588 * formula is : 589 * addend = (2^32)/freq_div_ratio; 590 * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz 591 * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK; 592 * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to 593 * achive 20ns accuracy. 594 * 595 * 2^x * y == (y << x), hence 596 * 2^32 * 50000000 ==> (50000000 << 32) 597 */ 598 temp = (u64) (50000000ULL << 32); 599 priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK); 600 priv->hw->ptp->config_addend(priv->ioaddr, 601 priv->default_addend); 602 603 /* initialize system time */ 604 getnstimeofday(&now); 605 priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec, 606 now.tv_nsec); 607 } 608 609 return copy_to_user(ifr->ifr_data, &config, 610 sizeof(struct hwtstamp_config)) ? -EFAULT : 0; 611 } 612 613 /** 614 * stmmac_init_ptp: init PTP 615 * @priv: driver private structure 616 * Description: this is to verify if the HW supports the PTPv1 or v2. 617 * This is done by looking at the HW cap. register. 618 * Also it registers the ptp driver. 619 */ 620 static int stmmac_init_ptp(struct stmmac_priv *priv) 621 { 622 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 623 return -EOPNOTSUPP; 624 625 if (netif_msg_hw(priv)) { 626 if (priv->dma_cap.time_stamp) { 627 pr_debug("IEEE 1588-2002 Time Stamp supported\n"); 628 priv->adv_ts = 0; 629 } 630 if (priv->dma_cap.atime_stamp && priv->extend_desc) { 631 pr_debug 632 ("IEEE 1588-2008 Advanced Time Stamp supported\n"); 633 priv->adv_ts = 1; 634 } 635 } 636 637 priv->hw->ptp = &stmmac_ptp; 638 priv->hwts_tx_en = 0; 639 priv->hwts_rx_en = 0; 640 641 return stmmac_ptp_register(priv); 642 } 643 644 static void stmmac_release_ptp(struct stmmac_priv *priv) 645 { 646 stmmac_ptp_unregister(priv); 647 } 648 649 /** 650 * stmmac_adjust_link 651 * @dev: net device structure 652 * Description: it adjusts the link parameters. 653 */ 654 static void stmmac_adjust_link(struct net_device *dev) 655 { 656 struct stmmac_priv *priv = netdev_priv(dev); 657 struct phy_device *phydev = priv->phydev; 658 unsigned long flags; 659 int new_state = 0; 660 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; 661 662 if (phydev == NULL) 663 return; 664 665 spin_lock_irqsave(&priv->lock, flags); 666 667 if (phydev->link) { 668 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 669 670 /* Now we make sure that we can be in full duplex mode. 671 * If not, we operate in half-duplex mode. */ 672 if (phydev->duplex != priv->oldduplex) { 673 new_state = 1; 674 if (!(phydev->duplex)) 675 ctrl &= ~priv->hw->link.duplex; 676 else 677 ctrl |= priv->hw->link.duplex; 678 priv->oldduplex = phydev->duplex; 679 } 680 /* Flow Control operation */ 681 if (phydev->pause) 682 priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex, 683 fc, pause_time); 684 685 if (phydev->speed != priv->speed) { 686 new_state = 1; 687 switch (phydev->speed) { 688 case 1000: 689 if (likely(priv->plat->has_gmac)) 690 ctrl &= ~priv->hw->link.port; 691 stmmac_hw_fix_mac_speed(priv); 692 break; 693 case 100: 694 case 10: 695 if (priv->plat->has_gmac) { 696 ctrl |= priv->hw->link.port; 697 if (phydev->speed == SPEED_100) { 698 ctrl |= priv->hw->link.speed; 699 } else { 700 ctrl &= ~(priv->hw->link.speed); 701 } 702 } else { 703 ctrl &= ~priv->hw->link.port; 704 } 705 stmmac_hw_fix_mac_speed(priv); 706 break; 707 default: 708 if (netif_msg_link(priv)) 709 pr_warn("%s: Speed (%d) not 10/100\n", 710 dev->name, phydev->speed); 711 break; 712 } 713 714 priv->speed = phydev->speed; 715 } 716 717 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 718 719 if (!priv->oldlink) { 720 new_state = 1; 721 priv->oldlink = 1; 722 } 723 } else if (priv->oldlink) { 724 new_state = 1; 725 priv->oldlink = 0; 726 priv->speed = 0; 727 priv->oldduplex = -1; 728 } 729 730 if (new_state && netif_msg_link(priv)) 731 phy_print_status(phydev); 732 733 /* At this stage, it could be needed to setup the EEE or adjust some 734 * MAC related HW registers. 735 */ 736 priv->eee_enabled = stmmac_eee_init(priv); 737 738 spin_unlock_irqrestore(&priv->lock, flags); 739 } 740 741 /** 742 * stmmac_check_pcs_mode: verify if RGMII/SGMII is supported 743 * @priv: driver private structure 744 * Description: this is to verify if the HW supports the PCS. 745 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 746 * configured for the TBI, RTBI, or SGMII PHY interface. 747 */ 748 static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 749 { 750 int interface = priv->plat->interface; 751 752 if (priv->dma_cap.pcs) { 753 if ((interface == PHY_INTERFACE_MODE_RGMII) || 754 (interface == PHY_INTERFACE_MODE_RGMII_ID) || 755 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 756 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 757 pr_debug("STMMAC: PCS RGMII support enable\n"); 758 priv->pcs = STMMAC_PCS_RGMII; 759 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 760 pr_debug("STMMAC: PCS SGMII support enable\n"); 761 priv->pcs = STMMAC_PCS_SGMII; 762 } 763 } 764 } 765 766 /** 767 * stmmac_init_phy - PHY initialization 768 * @dev: net device structure 769 * Description: it initializes the driver's PHY state, and attaches the PHY 770 * to the mac driver. 771 * Return value: 772 * 0 on success 773 */ 774 static int stmmac_init_phy(struct net_device *dev) 775 { 776 struct stmmac_priv *priv = netdev_priv(dev); 777 struct phy_device *phydev; 778 char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 779 char bus_id[MII_BUS_ID_SIZE]; 780 int interface = priv->plat->interface; 781 priv->oldlink = 0; 782 priv->speed = 0; 783 priv->oldduplex = -1; 784 785 if (priv->plat->phy_bus_name) 786 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", 787 priv->plat->phy_bus_name, priv->plat->bus_id); 788 else 789 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", 790 priv->plat->bus_id); 791 792 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 793 priv->plat->phy_addr); 794 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt); 795 796 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface); 797 798 if (IS_ERR(phydev)) { 799 pr_err("%s: Could not attach to PHY\n", dev->name); 800 return PTR_ERR(phydev); 801 } 802 803 /* Stop Advertising 1000BASE Capability if interface is not GMII */ 804 if ((interface == PHY_INTERFACE_MODE_MII) || 805 (interface == PHY_INTERFACE_MODE_RMII)) 806 phydev->advertising &= ~(SUPPORTED_1000baseT_Half | 807 SUPPORTED_1000baseT_Full); 808 809 /* 810 * Broken HW is sometimes missing the pull-up resistor on the 811 * MDIO line, which results in reads to non-existent devices returning 812 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent 813 * device as well. 814 * Note: phydev->phy_id is the result of reading the UID PHY registers. 815 */ 816 if (phydev->phy_id == 0) { 817 phy_disconnect(phydev); 818 return -ENODEV; 819 } 820 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" 821 " Link = %d\n", dev->name, phydev->phy_id, phydev->link); 822 823 priv->phydev = phydev; 824 825 return 0; 826 } 827 828 /** 829 * stmmac_display_ring: display ring 830 * @head: pointer to the head of the ring passed. 831 * @size: size of the ring. 832 * @extend_desc: to verify if extended descriptors are used. 833 * Description: display the control/status and buffer descriptors. 834 */ 835 static void stmmac_display_ring(void *head, int size, int extend_desc) 836 { 837 int i; 838 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 839 struct dma_desc *p = (struct dma_desc *)head; 840 841 for (i = 0; i < size; i++) { 842 u64 x; 843 if (extend_desc) { 844 x = *(u64 *) ep; 845 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 846 i, (unsigned int)virt_to_phys(ep), 847 (unsigned int)x, (unsigned int)(x >> 32), 848 ep->basic.des2, ep->basic.des3); 849 ep++; 850 } else { 851 x = *(u64 *) p; 852 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x", 853 i, (unsigned int)virt_to_phys(p), 854 (unsigned int)x, (unsigned int)(x >> 32), 855 p->des2, p->des3); 856 p++; 857 } 858 pr_info("\n"); 859 } 860 } 861 862 static void stmmac_display_rings(struct stmmac_priv *priv) 863 { 864 unsigned int txsize = priv->dma_tx_size; 865 unsigned int rxsize = priv->dma_rx_size; 866 867 if (priv->extend_desc) { 868 pr_info("Extended RX descriptor ring:\n"); 869 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); 870 pr_info("Extended TX descriptor ring:\n"); 871 stmmac_display_ring((void *)priv->dma_etx, txsize, 1); 872 } else { 873 pr_info("RX descriptor ring:\n"); 874 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); 875 pr_info("TX descriptor ring:\n"); 876 stmmac_display_ring((void *)priv->dma_tx, txsize, 0); 877 } 878 } 879 880 static int stmmac_set_bfsize(int mtu, int bufsize) 881 { 882 int ret = bufsize; 883 884 if (mtu >= BUF_SIZE_4KiB) 885 ret = BUF_SIZE_8KiB; 886 else if (mtu >= BUF_SIZE_2KiB) 887 ret = BUF_SIZE_4KiB; 888 else if (mtu >= DMA_BUFFER_SIZE) 889 ret = BUF_SIZE_2KiB; 890 else 891 ret = DMA_BUFFER_SIZE; 892 893 return ret; 894 } 895 896 /** 897 * stmmac_clear_descriptors: clear descriptors 898 * @priv: driver private structure 899 * Description: this function is called to clear the tx and rx descriptors 900 * in case of both basic and extended descriptors are used. 901 */ 902 static void stmmac_clear_descriptors(struct stmmac_priv *priv) 903 { 904 int i; 905 unsigned int txsize = priv->dma_tx_size; 906 unsigned int rxsize = priv->dma_rx_size; 907 908 /* Clear the Rx/Tx descriptors */ 909 for (i = 0; i < rxsize; i++) 910 if (priv->extend_desc) 911 priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic, 912 priv->use_riwt, priv->mode, 913 (i == rxsize - 1)); 914 else 915 priv->hw->desc->init_rx_desc(&priv->dma_rx[i], 916 priv->use_riwt, priv->mode, 917 (i == rxsize - 1)); 918 for (i = 0; i < txsize; i++) 919 if (priv->extend_desc) 920 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, 921 priv->mode, 922 (i == txsize - 1)); 923 else 924 priv->hw->desc->init_tx_desc(&priv->dma_tx[i], 925 priv->mode, 926 (i == txsize - 1)); 927 } 928 929 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 930 int i) 931 { 932 struct sk_buff *skb; 933 934 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, 935 GFP_KERNEL); 936 if (!skb) { 937 pr_err("%s: Rx init fails; skb is NULL\n", __func__); 938 return -ENOMEM; 939 } 940 skb_reserve(skb, NET_IP_ALIGN); 941 priv->rx_skbuff[i] = skb; 942 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, 943 priv->dma_buf_sz, 944 DMA_FROM_DEVICE); 945 if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { 946 pr_err("%s: DMA mapping error\n", __func__); 947 dev_kfree_skb_any(skb); 948 return -EINVAL; 949 } 950 951 p->des2 = priv->rx_skbuff_dma[i]; 952 953 if ((priv->mode == STMMAC_RING_MODE) && 954 (priv->dma_buf_sz == BUF_SIZE_16KiB)) 955 priv->hw->ring->init_desc3(p); 956 957 return 0; 958 } 959 960 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) 961 { 962 if (priv->rx_skbuff[i]) { 963 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], 964 priv->dma_buf_sz, DMA_FROM_DEVICE); 965 dev_kfree_skb_any(priv->rx_skbuff[i]); 966 } 967 priv->rx_skbuff[i] = NULL; 968 } 969 970 /** 971 * init_dma_desc_rings - init the RX/TX descriptor rings 972 * @dev: net device structure 973 * Description: this function initializes the DMA RX/TX descriptors 974 * and allocates the socket buffers. It suppors the chained and ring 975 * modes. 976 */ 977 static int init_dma_desc_rings(struct net_device *dev) 978 { 979 int i; 980 struct stmmac_priv *priv = netdev_priv(dev); 981 unsigned int txsize = priv->dma_tx_size; 982 unsigned int rxsize = priv->dma_rx_size; 983 unsigned int bfsize = 0; 984 int ret = -ENOMEM; 985 986 /* Set the max buffer size according to the DESC mode 987 * and the MTU. Note that RING mode allows 16KiB bsize. 988 */ 989 if (priv->mode == STMMAC_RING_MODE) 990 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); 991 992 if (bfsize < BUF_SIZE_16KiB) 993 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 994 995 if (netif_msg_probe(priv)) 996 pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__, 997 txsize, rxsize, bfsize); 998 999 if (priv->extend_desc) { 1000 priv->dma_erx = dma_alloc_coherent(priv->device, rxsize * 1001 sizeof(struct 1002 dma_extended_desc), 1003 &priv->dma_rx_phy, 1004 GFP_KERNEL); 1005 if (!priv->dma_erx) 1006 goto err_dma; 1007 1008 priv->dma_etx = dma_alloc_coherent(priv->device, txsize * 1009 sizeof(struct 1010 dma_extended_desc), 1011 &priv->dma_tx_phy, 1012 GFP_KERNEL); 1013 if (!priv->dma_etx) { 1014 dma_free_coherent(priv->device, priv->dma_rx_size * 1015 sizeof(struct dma_extended_desc), 1016 priv->dma_erx, priv->dma_rx_phy); 1017 goto err_dma; 1018 } 1019 } else { 1020 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * 1021 sizeof(struct dma_desc), 1022 &priv->dma_rx_phy, 1023 GFP_KERNEL); 1024 if (!priv->dma_rx) 1025 goto err_dma; 1026 1027 priv->dma_tx = dma_alloc_coherent(priv->device, txsize * 1028 sizeof(struct dma_desc), 1029 &priv->dma_tx_phy, 1030 GFP_KERNEL); 1031 if (!priv->dma_tx) { 1032 dma_free_coherent(priv->device, priv->dma_rx_size * 1033 sizeof(struct dma_desc), 1034 priv->dma_rx, priv->dma_rx_phy); 1035 goto err_dma; 1036 } 1037 } 1038 1039 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), 1040 GFP_KERNEL); 1041 if (!priv->rx_skbuff_dma) 1042 goto err_rx_skbuff_dma; 1043 1044 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), 1045 GFP_KERNEL); 1046 if (!priv->rx_skbuff) 1047 goto err_rx_skbuff; 1048 1049 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), 1050 GFP_KERNEL); 1051 if (!priv->tx_skbuff_dma) 1052 goto err_tx_skbuff_dma; 1053 1054 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), 1055 GFP_KERNEL); 1056 if (!priv->tx_skbuff) 1057 goto err_tx_skbuff; 1058 1059 if (netif_msg_probe(priv)) { 1060 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, 1061 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); 1062 1063 /* RX INITIALIZATION */ 1064 pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n"); 1065 } 1066 for (i = 0; i < rxsize; i++) { 1067 struct dma_desc *p; 1068 if (priv->extend_desc) 1069 p = &((priv->dma_erx + i)->basic); 1070 else 1071 p = priv->dma_rx + i; 1072 1073 ret = stmmac_init_rx_buffers(priv, p, i); 1074 if (ret) 1075 goto err_init_rx_buffers; 1076 1077 if (netif_msg_probe(priv)) 1078 pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], 1079 priv->rx_skbuff[i]->data, 1080 (unsigned int)priv->rx_skbuff_dma[i]); 1081 } 1082 priv->cur_rx = 0; 1083 priv->dirty_rx = (unsigned int)(i - rxsize); 1084 priv->dma_buf_sz = bfsize; 1085 buf_sz = bfsize; 1086 1087 /* Setup the chained descriptor addresses */ 1088 if (priv->mode == STMMAC_CHAIN_MODE) { 1089 if (priv->extend_desc) { 1090 priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy, 1091 rxsize, 1); 1092 priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy, 1093 txsize, 1); 1094 } else { 1095 priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy, 1096 rxsize, 0); 1097 priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy, 1098 txsize, 0); 1099 } 1100 } 1101 1102 /* TX INITIALIZATION */ 1103 for (i = 0; i < txsize; i++) { 1104 struct dma_desc *p; 1105 if (priv->extend_desc) 1106 p = &((priv->dma_etx + i)->basic); 1107 else 1108 p = priv->dma_tx + i; 1109 p->des2 = 0; 1110 priv->tx_skbuff_dma[i] = 0; 1111 priv->tx_skbuff[i] = NULL; 1112 } 1113 1114 priv->dirty_tx = 0; 1115 priv->cur_tx = 0; 1116 1117 stmmac_clear_descriptors(priv); 1118 1119 if (netif_msg_hw(priv)) 1120 stmmac_display_rings(priv); 1121 1122 return 0; 1123 err_init_rx_buffers: 1124 while (--i >= 0) 1125 stmmac_free_rx_buffers(priv, i); 1126 kfree(priv->tx_skbuff); 1127 err_tx_skbuff: 1128 kfree(priv->tx_skbuff_dma); 1129 err_tx_skbuff_dma: 1130 kfree(priv->rx_skbuff); 1131 err_rx_skbuff: 1132 kfree(priv->rx_skbuff_dma); 1133 err_rx_skbuff_dma: 1134 if (priv->extend_desc) { 1135 dma_free_coherent(priv->device, priv->dma_tx_size * 1136 sizeof(struct dma_extended_desc), 1137 priv->dma_etx, priv->dma_tx_phy); 1138 dma_free_coherent(priv->device, priv->dma_rx_size * 1139 sizeof(struct dma_extended_desc), 1140 priv->dma_erx, priv->dma_rx_phy); 1141 } else { 1142 dma_free_coherent(priv->device, 1143 priv->dma_tx_size * sizeof(struct dma_desc), 1144 priv->dma_tx, priv->dma_tx_phy); 1145 dma_free_coherent(priv->device, 1146 priv->dma_rx_size * sizeof(struct dma_desc), 1147 priv->dma_rx, priv->dma_rx_phy); 1148 } 1149 err_dma: 1150 return ret; 1151 } 1152 1153 static void dma_free_rx_skbufs(struct stmmac_priv *priv) 1154 { 1155 int i; 1156 1157 for (i = 0; i < priv->dma_rx_size; i++) 1158 stmmac_free_rx_buffers(priv, i); 1159 } 1160 1161 static void dma_free_tx_skbufs(struct stmmac_priv *priv) 1162 { 1163 int i; 1164 1165 for (i = 0; i < priv->dma_tx_size; i++) { 1166 if (priv->tx_skbuff[i] != NULL) { 1167 struct dma_desc *p; 1168 if (priv->extend_desc) 1169 p = &((priv->dma_etx + i)->basic); 1170 else 1171 p = priv->dma_tx + i; 1172 1173 if (priv->tx_skbuff_dma[i]) 1174 dma_unmap_single(priv->device, 1175 priv->tx_skbuff_dma[i], 1176 priv->hw->desc->get_tx_len(p), 1177 DMA_TO_DEVICE); 1178 dev_kfree_skb_any(priv->tx_skbuff[i]); 1179 priv->tx_skbuff[i] = NULL; 1180 priv->tx_skbuff_dma[i] = 0; 1181 } 1182 } 1183 } 1184 1185 static void free_dma_desc_resources(struct stmmac_priv *priv) 1186 { 1187 /* Release the DMA TX/RX socket buffers */ 1188 dma_free_rx_skbufs(priv); 1189 dma_free_tx_skbufs(priv); 1190 1191 /* Free DMA regions of consistent memory previously allocated */ 1192 if (!priv->extend_desc) { 1193 dma_free_coherent(priv->device, 1194 priv->dma_tx_size * sizeof(struct dma_desc), 1195 priv->dma_tx, priv->dma_tx_phy); 1196 dma_free_coherent(priv->device, 1197 priv->dma_rx_size * sizeof(struct dma_desc), 1198 priv->dma_rx, priv->dma_rx_phy); 1199 } else { 1200 dma_free_coherent(priv->device, priv->dma_tx_size * 1201 sizeof(struct dma_extended_desc), 1202 priv->dma_etx, priv->dma_tx_phy); 1203 dma_free_coherent(priv->device, priv->dma_rx_size * 1204 sizeof(struct dma_extended_desc), 1205 priv->dma_erx, priv->dma_rx_phy); 1206 } 1207 kfree(priv->rx_skbuff_dma); 1208 kfree(priv->rx_skbuff); 1209 kfree(priv->tx_skbuff_dma); 1210 kfree(priv->tx_skbuff); 1211 } 1212 1213 /** 1214 * stmmac_dma_operation_mode - HW DMA operation mode 1215 * @priv: driver private structure 1216 * Description: it sets the DMA operation mode: tx/rx DMA thresholds 1217 * or Store-And-Forward capability. 1218 */ 1219 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 1220 { 1221 if (priv->plat->force_thresh_dma_mode) 1222 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc); 1223 else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 1224 /* 1225 * In case of GMAC, SF mode can be enabled 1226 * to perform the TX COE in HW. This depends on: 1227 * 1) TX COE if actually supported 1228 * 2) There is no bugged Jumbo frame support 1229 * that needs to not insert csum in the TDES. 1230 */ 1231 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE); 1232 tc = SF_DMA_MODE; 1233 } else 1234 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 1235 } 1236 1237 /** 1238 * stmmac_tx_clean: 1239 * @priv: driver private structure 1240 * Description: it reclaims resources after transmission completes. 1241 */ 1242 static void stmmac_tx_clean(struct stmmac_priv *priv) 1243 { 1244 unsigned int txsize = priv->dma_tx_size; 1245 1246 spin_lock(&priv->tx_lock); 1247 1248 priv->xstats.tx_clean++; 1249 1250 while (priv->dirty_tx != priv->cur_tx) { 1251 int last; 1252 unsigned int entry = priv->dirty_tx % txsize; 1253 struct sk_buff *skb = priv->tx_skbuff[entry]; 1254 struct dma_desc *p; 1255 1256 if (priv->extend_desc) 1257 p = (struct dma_desc *)(priv->dma_etx + entry); 1258 else 1259 p = priv->dma_tx + entry; 1260 1261 /* Check if the descriptor is owned by the DMA. */ 1262 if (priv->hw->desc->get_tx_owner(p)) 1263 break; 1264 1265 /* Verify tx error by looking at the last segment. */ 1266 last = priv->hw->desc->get_tx_ls(p); 1267 if (likely(last)) { 1268 int tx_error = 1269 priv->hw->desc->tx_status(&priv->dev->stats, 1270 &priv->xstats, p, 1271 priv->ioaddr); 1272 if (likely(tx_error == 0)) { 1273 priv->dev->stats.tx_packets++; 1274 priv->xstats.tx_pkt_n++; 1275 } else 1276 priv->dev->stats.tx_errors++; 1277 1278 stmmac_get_tx_hwtstamp(priv, entry, skb); 1279 } 1280 if (netif_msg_tx_done(priv)) 1281 pr_debug("%s: curr %d, dirty %d\n", __func__, 1282 priv->cur_tx, priv->dirty_tx); 1283 1284 if (likely(priv->tx_skbuff_dma[entry])) { 1285 dma_unmap_single(priv->device, 1286 priv->tx_skbuff_dma[entry], 1287 priv->hw->desc->get_tx_len(p), 1288 DMA_TO_DEVICE); 1289 priv->tx_skbuff_dma[entry] = 0; 1290 } 1291 priv->hw->ring->clean_desc3(priv, p); 1292 1293 if (likely(skb != NULL)) { 1294 dev_kfree_skb(skb); 1295 priv->tx_skbuff[entry] = NULL; 1296 } 1297 1298 priv->hw->desc->release_tx_desc(p, priv->mode); 1299 1300 priv->dirty_tx++; 1301 } 1302 if (unlikely(netif_queue_stopped(priv->dev) && 1303 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { 1304 netif_tx_lock(priv->dev); 1305 if (netif_queue_stopped(priv->dev) && 1306 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { 1307 if (netif_msg_tx_done(priv)) 1308 pr_debug("%s: restart transmit\n", __func__); 1309 netif_wake_queue(priv->dev); 1310 } 1311 netif_tx_unlock(priv->dev); 1312 } 1313 1314 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { 1315 stmmac_enable_eee_mode(priv); 1316 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 1317 } 1318 spin_unlock(&priv->tx_lock); 1319 } 1320 1321 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv) 1322 { 1323 priv->hw->dma->enable_dma_irq(priv->ioaddr); 1324 } 1325 1326 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) 1327 { 1328 priv->hw->dma->disable_dma_irq(priv->ioaddr); 1329 } 1330 1331 /** 1332 * stmmac_tx_err: irq tx error mng function 1333 * @priv: driver private structure 1334 * Description: it cleans the descriptors and restarts the transmission 1335 * in case of errors. 1336 */ 1337 static void stmmac_tx_err(struct stmmac_priv *priv) 1338 { 1339 int i; 1340 int txsize = priv->dma_tx_size; 1341 netif_stop_queue(priv->dev); 1342 1343 priv->hw->dma->stop_tx(priv->ioaddr); 1344 dma_free_tx_skbufs(priv); 1345 for (i = 0; i < txsize; i++) 1346 if (priv->extend_desc) 1347 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, 1348 priv->mode, 1349 (i == txsize - 1)); 1350 else 1351 priv->hw->desc->init_tx_desc(&priv->dma_tx[i], 1352 priv->mode, 1353 (i == txsize - 1)); 1354 priv->dirty_tx = 0; 1355 priv->cur_tx = 0; 1356 priv->hw->dma->start_tx(priv->ioaddr); 1357 1358 priv->dev->stats.tx_errors++; 1359 netif_wake_queue(priv->dev); 1360 } 1361 1362 /** 1363 * stmmac_dma_interrupt: DMA ISR 1364 * @priv: driver private structure 1365 * Description: this is the DMA ISR. It is called by the main ISR. 1366 * It calls the dwmac dma routine to understand which type of interrupt 1367 * happened. In case of there is a Normal interrupt and either TX or RX 1368 * interrupt happened so the NAPI is scheduled. 1369 */ 1370 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 1371 { 1372 int status; 1373 1374 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); 1375 if (likely((status & handle_rx)) || (status & handle_tx)) { 1376 if (likely(napi_schedule_prep(&priv->napi))) { 1377 stmmac_disable_dma_irq(priv); 1378 __napi_schedule(&priv->napi); 1379 } 1380 } 1381 if (unlikely(status & tx_hard_error_bump_tc)) { 1382 /* Try to bump up the dma threshold on this failure */ 1383 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) { 1384 tc += 64; 1385 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 1386 priv->xstats.threshold = tc; 1387 } 1388 } else if (unlikely(status == tx_hard_error)) 1389 stmmac_tx_err(priv); 1390 } 1391 1392 /** 1393 * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 1394 * @priv: driver private structure 1395 * Description: this masks the MMC irq, in fact, the counters are managed in SW. 1396 */ 1397 static void stmmac_mmc_setup(struct stmmac_priv *priv) 1398 { 1399 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 1400 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 1401 1402 dwmac_mmc_intr_all_mask(priv->ioaddr); 1403 1404 if (priv->dma_cap.rmon) { 1405 dwmac_mmc_ctrl(priv->ioaddr, mode); 1406 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 1407 } else 1408 pr_info(" No MAC Management Counters available\n"); 1409 } 1410 1411 static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) 1412 { 1413 u32 hwid = priv->hw->synopsys_uid; 1414 1415 /* Check Synopsys Id (not available on old chips) */ 1416 if (likely(hwid)) { 1417 u32 uid = ((hwid & 0x0000ff00) >> 8); 1418 u32 synid = (hwid & 0x000000ff); 1419 1420 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n", 1421 uid, synid); 1422 1423 return synid; 1424 } 1425 return 0; 1426 } 1427 1428 /** 1429 * stmmac_selec_desc_mode: to select among: normal/alternate/extend descriptors 1430 * @priv: driver private structure 1431 * Description: select the Enhanced/Alternate or Normal descriptors. 1432 * In case of Enhanced/Alternate, it looks at the extended descriptors are 1433 * supported by the HW cap. register. 1434 */ 1435 static void stmmac_selec_desc_mode(struct stmmac_priv *priv) 1436 { 1437 if (priv->plat->enh_desc) { 1438 pr_info(" Enhanced/Alternate descriptors\n"); 1439 1440 /* GMAC older than 3.50 has no extended descriptors */ 1441 if (priv->synopsys_id >= DWMAC_CORE_3_50) { 1442 pr_info("\tEnabled extended descriptors\n"); 1443 priv->extend_desc = 1; 1444 } else 1445 pr_warn("Extended descriptors not supported\n"); 1446 1447 priv->hw->desc = &enh_desc_ops; 1448 } else { 1449 pr_info(" Normal descriptors\n"); 1450 priv->hw->desc = &ndesc_ops; 1451 } 1452 } 1453 1454 /** 1455 * stmmac_get_hw_features: get MAC capabilities from the HW cap. register. 1456 * @priv: driver private structure 1457 * Description: 1458 * new GMAC chip generations have a new register to indicate the 1459 * presence of the optional feature/functions. 1460 * This can be also used to override the value passed through the 1461 * platform and necessary for old MAC10/100 and GMAC chips. 1462 */ 1463 static int stmmac_get_hw_features(struct stmmac_priv *priv) 1464 { 1465 u32 hw_cap = 0; 1466 1467 if (priv->hw->dma->get_hw_feature) { 1468 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr); 1469 1470 priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); 1471 priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; 1472 priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; 1473 priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; 1474 priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5; 1475 priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6; 1476 priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8; 1477 priv->dma_cap.pmt_remote_wake_up = 1478 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; 1479 priv->dma_cap.pmt_magic_frame = 1480 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; 1481 /* MMC */ 1482 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; 1483 /* IEEE 1588-2002 */ 1484 priv->dma_cap.time_stamp = 1485 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12; 1486 /* IEEE 1588-2008 */ 1487 priv->dma_cap.atime_stamp = 1488 (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13; 1489 /* 802.3az - Energy-Efficient Ethernet (EEE) */ 1490 priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14; 1491 priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15; 1492 /* TX and RX csum */ 1493 priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16; 1494 priv->dma_cap.rx_coe_type1 = 1495 (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17; 1496 priv->dma_cap.rx_coe_type2 = 1497 (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18; 1498 priv->dma_cap.rxfifo_over_2048 = 1499 (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19; 1500 /* TX and RX number of channels */ 1501 priv->dma_cap.number_rx_channel = 1502 (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20; 1503 priv->dma_cap.number_tx_channel = 1504 (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22; 1505 /* Alternate (enhanced) DESC mode */ 1506 priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; 1507 } 1508 1509 return hw_cap; 1510 } 1511 1512 /** 1513 * stmmac_check_ether_addr: check if the MAC addr is valid 1514 * @priv: driver private structure 1515 * Description: 1516 * it is to verify if the MAC address is valid, in case of failures it 1517 * generates a random MAC address 1518 */ 1519 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 1520 { 1521 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 1522 priv->hw->mac->get_umac_addr((void __iomem *) 1523 priv->dev->base_addr, 1524 priv->dev->dev_addr, 0); 1525 if (!is_valid_ether_addr(priv->dev->dev_addr)) 1526 eth_hw_addr_random(priv->dev); 1527 } 1528 pr_warn("%s: device MAC address %pM\n", priv->dev->name, 1529 priv->dev->dev_addr); 1530 } 1531 1532 /** 1533 * stmmac_init_dma_engine: DMA init. 1534 * @priv: driver private structure 1535 * Description: 1536 * It inits the DMA invoking the specific MAC/GMAC callback. 1537 * Some DMA parameters can be passed from the platform; 1538 * in case of these are not passed a default is kept for the MAC or GMAC. 1539 */ 1540 static int stmmac_init_dma_engine(struct stmmac_priv *priv) 1541 { 1542 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0; 1543 int mixed_burst = 0; 1544 int atds = 0; 1545 1546 if (priv->plat->dma_cfg) { 1547 pbl = priv->plat->dma_cfg->pbl; 1548 fixed_burst = priv->plat->dma_cfg->fixed_burst; 1549 mixed_burst = priv->plat->dma_cfg->mixed_burst; 1550 burst_len = priv->plat->dma_cfg->burst_len; 1551 } 1552 1553 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 1554 atds = 1; 1555 1556 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, 1557 burst_len, priv->dma_tx_phy, 1558 priv->dma_rx_phy, atds); 1559 } 1560 1561 /** 1562 * stmmac_tx_timer: mitigation sw timer for tx. 1563 * @data: data pointer 1564 * Description: 1565 * This is the timer handler to directly invoke the stmmac_tx_clean. 1566 */ 1567 static void stmmac_tx_timer(unsigned long data) 1568 { 1569 struct stmmac_priv *priv = (struct stmmac_priv *)data; 1570 1571 stmmac_tx_clean(priv); 1572 } 1573 1574 /** 1575 * stmmac_init_tx_coalesce: init tx mitigation options. 1576 * @priv: driver private structure 1577 * Description: 1578 * This inits the transmit coalesce parameters: i.e. timer rate, 1579 * timer handler and default threshold used for enabling the 1580 * interrupt on completion bit. 1581 */ 1582 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) 1583 { 1584 priv->tx_coal_frames = STMMAC_TX_FRAMES; 1585 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; 1586 init_timer(&priv->txtimer); 1587 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); 1588 priv->txtimer.data = (unsigned long)priv; 1589 priv->txtimer.function = stmmac_tx_timer; 1590 add_timer(&priv->txtimer); 1591 } 1592 1593 /** 1594 * stmmac_open - open entry point of the driver 1595 * @dev : pointer to the device structure. 1596 * Description: 1597 * This function is the open entry point of the driver. 1598 * Return value: 1599 * 0 on success and an appropriate (-)ve integer as defined in errno.h 1600 * file on failure. 1601 */ 1602 static int stmmac_open(struct net_device *dev) 1603 { 1604 struct stmmac_priv *priv = netdev_priv(dev); 1605 int ret; 1606 1607 clk_prepare_enable(priv->stmmac_clk); 1608 1609 stmmac_check_ether_addr(priv); 1610 1611 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 1612 priv->pcs != STMMAC_PCS_RTBI) { 1613 ret = stmmac_init_phy(dev); 1614 if (ret) { 1615 pr_err("%s: Cannot attach to PHY (error: %d)\n", 1616 __func__, ret); 1617 goto phy_error; 1618 } 1619 } 1620 1621 /* Create and initialize the TX/RX descriptors chains. */ 1622 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); 1623 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); 1624 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 1625 1626 ret = init_dma_desc_rings(dev); 1627 if (ret < 0) { 1628 pr_err("%s: DMA descriptors initialization failed\n", __func__); 1629 goto dma_desc_error; 1630 } 1631 1632 /* DMA initialization and SW reset */ 1633 ret = stmmac_init_dma_engine(priv); 1634 if (ret < 0) { 1635 pr_err("%s: DMA engine initialization failed\n", __func__); 1636 goto init_error; 1637 } 1638 1639 /* Copy the MAC addr into the HW */ 1640 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); 1641 1642 /* If required, perform hw setup of the bus. */ 1643 if (priv->plat->bus_setup) 1644 priv->plat->bus_setup(priv->ioaddr); 1645 1646 /* Initialize the MAC Core */ 1647 priv->hw->mac->core_init(priv->ioaddr); 1648 1649 /* Request the IRQ lines */ 1650 ret = request_irq(dev->irq, stmmac_interrupt, 1651 IRQF_SHARED, dev->name, dev); 1652 if (unlikely(ret < 0)) { 1653 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", 1654 __func__, dev->irq, ret); 1655 goto init_error; 1656 } 1657 1658 /* Request the Wake IRQ in case of another line is used for WoL */ 1659 if (priv->wol_irq != dev->irq) { 1660 ret = request_irq(priv->wol_irq, stmmac_interrupt, 1661 IRQF_SHARED, dev->name, dev); 1662 if (unlikely(ret < 0)) { 1663 pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n", 1664 __func__, priv->wol_irq, ret); 1665 goto wolirq_error; 1666 } 1667 } 1668 1669 /* Request the IRQ lines */ 1670 if (priv->lpi_irq != -ENXIO) { 1671 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, 1672 dev->name, dev); 1673 if (unlikely(ret < 0)) { 1674 pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n", 1675 __func__, priv->lpi_irq, ret); 1676 goto lpiirq_error; 1677 } 1678 } 1679 1680 /* Enable the MAC Rx/Tx */ 1681 stmmac_set_mac(priv->ioaddr, true); 1682 1683 /* Set the HW DMA mode and the COE */ 1684 stmmac_dma_operation_mode(priv); 1685 1686 /* Extra statistics */ 1687 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 1688 priv->xstats.threshold = tc; 1689 1690 stmmac_mmc_setup(priv); 1691 1692 ret = stmmac_init_ptp(priv); 1693 if (ret) 1694 pr_warn("%s: failed PTP initialisation\n", __func__); 1695 1696 #ifdef CONFIG_STMMAC_DEBUG_FS 1697 ret = stmmac_init_fs(dev); 1698 if (ret < 0) 1699 pr_warn("%s: failed debugFS registration\n", __func__); 1700 #endif 1701 /* Start the ball rolling... */ 1702 pr_debug("%s: DMA RX/TX processes started...\n", dev->name); 1703 priv->hw->dma->start_tx(priv->ioaddr); 1704 priv->hw->dma->start_rx(priv->ioaddr); 1705 1706 /* Dump DMA/MAC registers */ 1707 if (netif_msg_hw(priv)) { 1708 priv->hw->mac->dump_regs(priv->ioaddr); 1709 priv->hw->dma->dump_regs(priv->ioaddr); 1710 } 1711 1712 if (priv->phydev) 1713 phy_start(priv->phydev); 1714 1715 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; 1716 1717 priv->eee_enabled = stmmac_eee_init(priv); 1718 1719 stmmac_init_tx_coalesce(priv); 1720 1721 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { 1722 priv->rx_riwt = MAX_DMA_RIWT; 1723 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); 1724 } 1725 1726 if (priv->pcs && priv->hw->mac->ctrl_ane) 1727 priv->hw->mac->ctrl_ane(priv->ioaddr, 0); 1728 1729 napi_enable(&priv->napi); 1730 netif_start_queue(dev); 1731 1732 return 0; 1733 1734 lpiirq_error: 1735 if (priv->wol_irq != dev->irq) 1736 free_irq(priv->wol_irq, dev); 1737 wolirq_error: 1738 free_irq(dev->irq, dev); 1739 1740 init_error: 1741 free_dma_desc_resources(priv); 1742 dma_desc_error: 1743 if (priv->phydev) 1744 phy_disconnect(priv->phydev); 1745 phy_error: 1746 clk_disable_unprepare(priv->stmmac_clk); 1747 1748 return ret; 1749 } 1750 1751 /** 1752 * stmmac_release - close entry point of the driver 1753 * @dev : device pointer. 1754 * Description: 1755 * This is the stop entry point of the driver. 1756 */ 1757 static int stmmac_release(struct net_device *dev) 1758 { 1759 struct stmmac_priv *priv = netdev_priv(dev); 1760 1761 if (priv->eee_enabled) 1762 del_timer_sync(&priv->eee_ctrl_timer); 1763 1764 /* Stop and disconnect the PHY */ 1765 if (priv->phydev) { 1766 phy_stop(priv->phydev); 1767 phy_disconnect(priv->phydev); 1768 priv->phydev = NULL; 1769 } 1770 1771 netif_stop_queue(dev); 1772 1773 napi_disable(&priv->napi); 1774 1775 del_timer_sync(&priv->txtimer); 1776 1777 /* Free the IRQ lines */ 1778 free_irq(dev->irq, dev); 1779 if (priv->wol_irq != dev->irq) 1780 free_irq(priv->wol_irq, dev); 1781 if (priv->lpi_irq != -ENXIO) 1782 free_irq(priv->lpi_irq, dev); 1783 1784 /* Stop TX/RX DMA and clear the descriptors */ 1785 priv->hw->dma->stop_tx(priv->ioaddr); 1786 priv->hw->dma->stop_rx(priv->ioaddr); 1787 1788 /* Release and free the Rx/Tx resources */ 1789 free_dma_desc_resources(priv); 1790 1791 /* Disable the MAC Rx/Tx */ 1792 stmmac_set_mac(priv->ioaddr, false); 1793 1794 netif_carrier_off(dev); 1795 1796 #ifdef CONFIG_STMMAC_DEBUG_FS 1797 stmmac_exit_fs(); 1798 #endif 1799 clk_disable_unprepare(priv->stmmac_clk); 1800 1801 stmmac_release_ptp(priv); 1802 1803 return 0; 1804 } 1805 1806 /** 1807 * stmmac_xmit: Tx entry point of the driver 1808 * @skb : the socket buffer 1809 * @dev : device pointer 1810 * Description : this is the tx entry point of the driver. 1811 * It programs the chain or the ring and supports oversized frames 1812 * and SG feature. 1813 */ 1814 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 1815 { 1816 struct stmmac_priv *priv = netdev_priv(dev); 1817 unsigned int txsize = priv->dma_tx_size; 1818 unsigned int entry; 1819 int i, csum_insertion = 0, is_jumbo = 0; 1820 int nfrags = skb_shinfo(skb)->nr_frags; 1821 struct dma_desc *desc, *first; 1822 unsigned int nopaged_len = skb_headlen(skb); 1823 1824 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { 1825 if (!netif_queue_stopped(dev)) { 1826 netif_stop_queue(dev); 1827 /* This is a hard error, log it. */ 1828 pr_err("%s: Tx Ring full when queue awake\n", __func__); 1829 } 1830 return NETDEV_TX_BUSY; 1831 } 1832 1833 spin_lock(&priv->tx_lock); 1834 1835 if (priv->tx_path_in_lpi_mode) 1836 stmmac_disable_eee_mode(priv); 1837 1838 entry = priv->cur_tx % txsize; 1839 1840 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 1841 1842 if (priv->extend_desc) 1843 desc = (struct dma_desc *)(priv->dma_etx + entry); 1844 else 1845 desc = priv->dma_tx + entry; 1846 1847 first = desc; 1848 1849 priv->tx_skbuff[entry] = skb; 1850 1851 /* To program the descriptors according to the size of the frame */ 1852 if (priv->mode == STMMAC_RING_MODE) { 1853 is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len, 1854 priv->plat->enh_desc); 1855 if (unlikely(is_jumbo)) 1856 entry = priv->hw->ring->jumbo_frm(priv, skb, 1857 csum_insertion); 1858 } else { 1859 is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len, 1860 priv->plat->enh_desc); 1861 if (unlikely(is_jumbo)) 1862 entry = priv->hw->chain->jumbo_frm(priv, skb, 1863 csum_insertion); 1864 } 1865 if (likely(!is_jumbo)) { 1866 desc->des2 = dma_map_single(priv->device, skb->data, 1867 nopaged_len, DMA_TO_DEVICE); 1868 priv->tx_skbuff_dma[entry] = desc->des2; 1869 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, 1870 csum_insertion, priv->mode); 1871 } else 1872 desc = first; 1873 1874 for (i = 0; i < nfrags; i++) { 1875 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1876 int len = skb_frag_size(frag); 1877 1878 entry = (++priv->cur_tx) % txsize; 1879 if (priv->extend_desc) 1880 desc = (struct dma_desc *)(priv->dma_etx + entry); 1881 else 1882 desc = priv->dma_tx + entry; 1883 1884 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, 1885 DMA_TO_DEVICE); 1886 priv->tx_skbuff_dma[entry] = desc->des2; 1887 priv->tx_skbuff[entry] = NULL; 1888 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, 1889 priv->mode); 1890 wmb(); 1891 priv->hw->desc->set_tx_owner(desc); 1892 wmb(); 1893 } 1894 1895 /* Finalize the latest segment. */ 1896 priv->hw->desc->close_tx_desc(desc); 1897 1898 wmb(); 1899 /* According to the coalesce parameter the IC bit for the latest 1900 * segment could be reset and the timer re-started to invoke the 1901 * stmmac_tx function. This approach takes care about the fragments. 1902 */ 1903 priv->tx_count_frames += nfrags + 1; 1904 if (priv->tx_coal_frames > priv->tx_count_frames) { 1905 priv->hw->desc->clear_tx_ic(desc); 1906 priv->xstats.tx_reset_ic_bit++; 1907 mod_timer(&priv->txtimer, 1908 STMMAC_COAL_TIMER(priv->tx_coal_timer)); 1909 } else 1910 priv->tx_count_frames = 0; 1911 1912 /* To avoid raise condition */ 1913 priv->hw->desc->set_tx_owner(first); 1914 wmb(); 1915 1916 priv->cur_tx++; 1917 1918 if (netif_msg_pktdata(priv)) { 1919 pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", 1920 __func__, (priv->cur_tx % txsize), 1921 (priv->dirty_tx % txsize), entry, first, nfrags); 1922 1923 if (priv->extend_desc) 1924 stmmac_display_ring((void *)priv->dma_etx, txsize, 1); 1925 else 1926 stmmac_display_ring((void *)priv->dma_tx, txsize, 0); 1927 1928 pr_debug(">>> frame to be transmitted: "); 1929 print_pkt(skb->data, skb->len); 1930 } 1931 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { 1932 if (netif_msg_hw(priv)) 1933 pr_debug("%s: stop transmitted packets\n", __func__); 1934 netif_stop_queue(dev); 1935 } 1936 1937 dev->stats.tx_bytes += skb->len; 1938 1939 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 1940 priv->hwts_tx_en)) { 1941 /* declare that device is doing timestamping */ 1942 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1943 priv->hw->desc->enable_tx_timestamp(first); 1944 } 1945 1946 if (!priv->hwts_tx_en) 1947 skb_tx_timestamp(skb); 1948 1949 priv->hw->dma->enable_dma_transmission(priv->ioaddr); 1950 1951 spin_unlock(&priv->tx_lock); 1952 1953 return NETDEV_TX_OK; 1954 } 1955 1956 /** 1957 * stmmac_rx_refill: refill used skb preallocated buffers 1958 * @priv: driver private structure 1959 * Description : this is to reallocate the skb for the reception process 1960 * that is based on zero-copy. 1961 */ 1962 static inline void stmmac_rx_refill(struct stmmac_priv *priv) 1963 { 1964 unsigned int rxsize = priv->dma_rx_size; 1965 int bfsize = priv->dma_buf_sz; 1966 1967 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { 1968 unsigned int entry = priv->dirty_rx % rxsize; 1969 struct dma_desc *p; 1970 1971 if (priv->extend_desc) 1972 p = (struct dma_desc *)(priv->dma_erx + entry); 1973 else 1974 p = priv->dma_rx + entry; 1975 1976 if (likely(priv->rx_skbuff[entry] == NULL)) { 1977 struct sk_buff *skb; 1978 1979 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); 1980 1981 if (unlikely(skb == NULL)) 1982 break; 1983 1984 priv->rx_skbuff[entry] = skb; 1985 priv->rx_skbuff_dma[entry] = 1986 dma_map_single(priv->device, skb->data, bfsize, 1987 DMA_FROM_DEVICE); 1988 1989 p->des2 = priv->rx_skbuff_dma[entry]; 1990 1991 priv->hw->ring->refill_desc3(priv, p); 1992 1993 if (netif_msg_rx_status(priv)) 1994 pr_debug("\trefill entry #%d\n", entry); 1995 } 1996 wmb(); 1997 priv->hw->desc->set_rx_owner(p); 1998 wmb(); 1999 } 2000 } 2001 2002 /** 2003 * stmmac_rx_refill: refill used skb preallocated buffers 2004 * @priv: driver private structure 2005 * @limit: napi bugget. 2006 * Description : this the function called by the napi poll method. 2007 * It gets all the frames inside the ring. 2008 */ 2009 static int stmmac_rx(struct stmmac_priv *priv, int limit) 2010 { 2011 unsigned int rxsize = priv->dma_rx_size; 2012 unsigned int entry = priv->cur_rx % rxsize; 2013 unsigned int next_entry; 2014 unsigned int count = 0; 2015 int coe = priv->plat->rx_coe; 2016 2017 if (netif_msg_rx_status(priv)) { 2018 pr_debug("%s: descriptor ring:\n", __func__); 2019 if (priv->extend_desc) 2020 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); 2021 else 2022 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); 2023 } 2024 while (count < limit) { 2025 int status; 2026 struct dma_desc *p; 2027 2028 if (priv->extend_desc) 2029 p = (struct dma_desc *)(priv->dma_erx + entry); 2030 else 2031 p = priv->dma_rx + entry; 2032 2033 if (priv->hw->desc->get_rx_owner(p)) 2034 break; 2035 2036 count++; 2037 2038 next_entry = (++priv->cur_rx) % rxsize; 2039 if (priv->extend_desc) 2040 prefetch(priv->dma_erx + next_entry); 2041 else 2042 prefetch(priv->dma_rx + next_entry); 2043 2044 /* read the status of the incoming frame */ 2045 status = priv->hw->desc->rx_status(&priv->dev->stats, 2046 &priv->xstats, p); 2047 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) 2048 priv->hw->desc->rx_extended_status(&priv->dev->stats, 2049 &priv->xstats, 2050 priv->dma_erx + 2051 entry); 2052 if (unlikely(status == discard_frame)) { 2053 priv->dev->stats.rx_errors++; 2054 if (priv->hwts_rx_en && !priv->extend_desc) { 2055 /* DESC2 & DESC3 will be overwitten by device 2056 * with timestamp value, hence reinitialize 2057 * them in stmmac_rx_refill() function so that 2058 * device can reuse it. 2059 */ 2060 priv->rx_skbuff[entry] = NULL; 2061 dma_unmap_single(priv->device, 2062 priv->rx_skbuff_dma[entry], 2063 priv->dma_buf_sz, 2064 DMA_FROM_DEVICE); 2065 } 2066 } else { 2067 struct sk_buff *skb; 2068 int frame_len; 2069 2070 frame_len = priv->hw->desc->get_rx_frame_len(p, coe); 2071 2072 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 2073 * Type frames (LLC/LLC-SNAP) 2074 */ 2075 if (unlikely(status != llc_snap)) 2076 frame_len -= ETH_FCS_LEN; 2077 2078 if (netif_msg_rx_status(priv)) { 2079 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", 2080 p, entry, p->des2); 2081 if (frame_len > ETH_FRAME_LEN) 2082 pr_debug("\tframe size %d, COE: %d\n", 2083 frame_len, status); 2084 } 2085 skb = priv->rx_skbuff[entry]; 2086 if (unlikely(!skb)) { 2087 pr_err("%s: Inconsistent Rx descriptor chain\n", 2088 priv->dev->name); 2089 priv->dev->stats.rx_dropped++; 2090 break; 2091 } 2092 prefetch(skb->data - NET_IP_ALIGN); 2093 priv->rx_skbuff[entry] = NULL; 2094 2095 stmmac_get_rx_hwtstamp(priv, entry, skb); 2096 2097 skb_put(skb, frame_len); 2098 dma_unmap_single(priv->device, 2099 priv->rx_skbuff_dma[entry], 2100 priv->dma_buf_sz, DMA_FROM_DEVICE); 2101 2102 if (netif_msg_pktdata(priv)) { 2103 pr_debug("frame received (%dbytes)", frame_len); 2104 print_pkt(skb->data, frame_len); 2105 } 2106 2107 skb->protocol = eth_type_trans(skb, priv->dev); 2108 2109 if (unlikely(!coe)) 2110 skb_checksum_none_assert(skb); 2111 else 2112 skb->ip_summed = CHECKSUM_UNNECESSARY; 2113 2114 napi_gro_receive(&priv->napi, skb); 2115 2116 priv->dev->stats.rx_packets++; 2117 priv->dev->stats.rx_bytes += frame_len; 2118 } 2119 entry = next_entry; 2120 } 2121 2122 stmmac_rx_refill(priv); 2123 2124 priv->xstats.rx_pkt_n += count; 2125 2126 return count; 2127 } 2128 2129 /** 2130 * stmmac_poll - stmmac poll method (NAPI) 2131 * @napi : pointer to the napi structure. 2132 * @budget : maximum number of packets that the current CPU can receive from 2133 * all interfaces. 2134 * Description : 2135 * To look at the incoming frames and clear the tx resources. 2136 */ 2137 static int stmmac_poll(struct napi_struct *napi, int budget) 2138 { 2139 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); 2140 int work_done = 0; 2141 2142 priv->xstats.napi_poll++; 2143 stmmac_tx_clean(priv); 2144 2145 work_done = stmmac_rx(priv, budget); 2146 if (work_done < budget) { 2147 napi_complete(napi); 2148 stmmac_enable_dma_irq(priv); 2149 } 2150 return work_done; 2151 } 2152 2153 /** 2154 * stmmac_tx_timeout 2155 * @dev : Pointer to net device structure 2156 * Description: this function is called when a packet transmission fails to 2157 * complete within a reasonable time. The driver will mark the error in the 2158 * netdev structure and arrange for the device to be reset to a sane state 2159 * in order to transmit a new packet. 2160 */ 2161 static void stmmac_tx_timeout(struct net_device *dev) 2162 { 2163 struct stmmac_priv *priv = netdev_priv(dev); 2164 2165 /* Clear Tx resources and restart transmitting again */ 2166 stmmac_tx_err(priv); 2167 } 2168 2169 /* Configuration changes (passed on by ifconfig) */ 2170 static int stmmac_config(struct net_device *dev, struct ifmap *map) 2171 { 2172 if (dev->flags & IFF_UP) /* can't act on a running interface */ 2173 return -EBUSY; 2174 2175 /* Don't allow changing the I/O address */ 2176 if (map->base_addr != dev->base_addr) { 2177 pr_warn("%s: can't change I/O address\n", dev->name); 2178 return -EOPNOTSUPP; 2179 } 2180 2181 /* Don't allow changing the IRQ */ 2182 if (map->irq != dev->irq) { 2183 pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq); 2184 return -EOPNOTSUPP; 2185 } 2186 2187 return 0; 2188 } 2189 2190 /** 2191 * stmmac_set_rx_mode - entry point for multicast addressing 2192 * @dev : pointer to the device structure 2193 * Description: 2194 * This function is a driver entry point which gets called by the kernel 2195 * whenever multicast addresses must be enabled/disabled. 2196 * Return value: 2197 * void. 2198 */ 2199 static void stmmac_set_rx_mode(struct net_device *dev) 2200 { 2201 struct stmmac_priv *priv = netdev_priv(dev); 2202 2203 spin_lock(&priv->lock); 2204 priv->hw->mac->set_filter(dev, priv->synopsys_id); 2205 spin_unlock(&priv->lock); 2206 } 2207 2208 /** 2209 * stmmac_change_mtu - entry point to change MTU size for the device. 2210 * @dev : device pointer. 2211 * @new_mtu : the new MTU size for the device. 2212 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 2213 * to drive packet transmission. Ethernet has an MTU of 1500 octets 2214 * (ETH_DATA_LEN). This value can be changed with ifconfig. 2215 * Return value: 2216 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2217 * file on failure. 2218 */ 2219 static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 2220 { 2221 struct stmmac_priv *priv = netdev_priv(dev); 2222 int max_mtu; 2223 2224 if (netif_running(dev)) { 2225 pr_err("%s: must be stopped to change its MTU\n", dev->name); 2226 return -EBUSY; 2227 } 2228 2229 if (priv->plat->enh_desc) 2230 max_mtu = JUMBO_LEN; 2231 else 2232 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 2233 2234 if ((new_mtu < 46) || (new_mtu > max_mtu)) { 2235 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu); 2236 return -EINVAL; 2237 } 2238 2239 dev->mtu = new_mtu; 2240 netdev_update_features(dev); 2241 2242 return 0; 2243 } 2244 2245 static netdev_features_t stmmac_fix_features(struct net_device *dev, 2246 netdev_features_t features) 2247 { 2248 struct stmmac_priv *priv = netdev_priv(dev); 2249 2250 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 2251 features &= ~NETIF_F_RXCSUM; 2252 else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1) 2253 features &= ~NETIF_F_IPV6_CSUM; 2254 if (!priv->plat->tx_coe) 2255 features &= ~NETIF_F_ALL_CSUM; 2256 2257 /* Some GMAC devices have a bugged Jumbo frame support that 2258 * needs to have the Tx COE disabled for oversized frames 2259 * (due to limited buffer sizes). In this case we disable 2260 * the TX csum insertionin the TDES and not use SF. 2261 */ 2262 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 2263 features &= ~NETIF_F_ALL_CSUM; 2264 2265 return features; 2266 } 2267 2268 /** 2269 * stmmac_interrupt - main ISR 2270 * @irq: interrupt number. 2271 * @dev_id: to pass the net device pointer. 2272 * Description: this is the main driver interrupt service routine. 2273 * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI 2274 * interrupts. 2275 */ 2276 static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 2277 { 2278 struct net_device *dev = (struct net_device *)dev_id; 2279 struct stmmac_priv *priv = netdev_priv(dev); 2280 2281 if (unlikely(!dev)) { 2282 pr_err("%s: invalid dev pointer\n", __func__); 2283 return IRQ_NONE; 2284 } 2285 2286 /* To handle GMAC own interrupts */ 2287 if (priv->plat->has_gmac) { 2288 int status = priv->hw->mac->host_irq_status((void __iomem *) 2289 dev->base_addr, 2290 &priv->xstats); 2291 if (unlikely(status)) { 2292 /* For LPI we need to save the tx status */ 2293 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 2294 priv->tx_path_in_lpi_mode = true; 2295 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 2296 priv->tx_path_in_lpi_mode = false; 2297 } 2298 } 2299 2300 /* To handle DMA interrupts */ 2301 stmmac_dma_interrupt(priv); 2302 2303 return IRQ_HANDLED; 2304 } 2305 2306 #ifdef CONFIG_NET_POLL_CONTROLLER 2307 /* Polling receive - used by NETCONSOLE and other diagnostic tools 2308 * to allow network I/O with interrupts disabled. 2309 */ 2310 static void stmmac_poll_controller(struct net_device *dev) 2311 { 2312 disable_irq(dev->irq); 2313 stmmac_interrupt(dev->irq, dev); 2314 enable_irq(dev->irq); 2315 } 2316 #endif 2317 2318 /** 2319 * stmmac_ioctl - Entry point for the Ioctl 2320 * @dev: Device pointer. 2321 * @rq: An IOCTL specefic structure, that can contain a pointer to 2322 * a proprietary structure used to pass information to the driver. 2323 * @cmd: IOCTL command 2324 * Description: 2325 * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 2326 */ 2327 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2328 { 2329 struct stmmac_priv *priv = netdev_priv(dev); 2330 int ret = -EOPNOTSUPP; 2331 2332 if (!netif_running(dev)) 2333 return -EINVAL; 2334 2335 switch (cmd) { 2336 case SIOCGMIIPHY: 2337 case SIOCGMIIREG: 2338 case SIOCSMIIREG: 2339 if (!priv->phydev) 2340 return -EINVAL; 2341 ret = phy_mii_ioctl(priv->phydev, rq, cmd); 2342 break; 2343 case SIOCSHWTSTAMP: 2344 ret = stmmac_hwtstamp_ioctl(dev, rq); 2345 break; 2346 default: 2347 break; 2348 } 2349 2350 return ret; 2351 } 2352 2353 #ifdef CONFIG_STMMAC_DEBUG_FS 2354 static struct dentry *stmmac_fs_dir; 2355 static struct dentry *stmmac_rings_status; 2356 static struct dentry *stmmac_dma_cap; 2357 2358 static void sysfs_display_ring(void *head, int size, int extend_desc, 2359 struct seq_file *seq) 2360 { 2361 int i; 2362 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 2363 struct dma_desc *p = (struct dma_desc *)head; 2364 2365 for (i = 0; i < size; i++) { 2366 u64 x; 2367 if (extend_desc) { 2368 x = *(u64 *) ep; 2369 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 2370 i, (unsigned int)virt_to_phys(ep), 2371 (unsigned int)x, (unsigned int)(x >> 32), 2372 ep->basic.des2, ep->basic.des3); 2373 ep++; 2374 } else { 2375 x = *(u64 *) p; 2376 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 2377 i, (unsigned int)virt_to_phys(ep), 2378 (unsigned int)x, (unsigned int)(x >> 32), 2379 p->des2, p->des3); 2380 p++; 2381 } 2382 seq_printf(seq, "\n"); 2383 } 2384 } 2385 2386 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) 2387 { 2388 struct net_device *dev = seq->private; 2389 struct stmmac_priv *priv = netdev_priv(dev); 2390 unsigned int txsize = priv->dma_tx_size; 2391 unsigned int rxsize = priv->dma_rx_size; 2392 2393 if (priv->extend_desc) { 2394 seq_printf(seq, "Extended RX descriptor ring:\n"); 2395 sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq); 2396 seq_printf(seq, "Extended TX descriptor ring:\n"); 2397 sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq); 2398 } else { 2399 seq_printf(seq, "RX descriptor ring:\n"); 2400 sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq); 2401 seq_printf(seq, "TX descriptor ring:\n"); 2402 sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq); 2403 } 2404 2405 return 0; 2406 } 2407 2408 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file) 2409 { 2410 return single_open(file, stmmac_sysfs_ring_read, inode->i_private); 2411 } 2412 2413 static const struct file_operations stmmac_rings_status_fops = { 2414 .owner = THIS_MODULE, 2415 .open = stmmac_sysfs_ring_open, 2416 .read = seq_read, 2417 .llseek = seq_lseek, 2418 .release = single_release, 2419 }; 2420 2421 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) 2422 { 2423 struct net_device *dev = seq->private; 2424 struct stmmac_priv *priv = netdev_priv(dev); 2425 2426 if (!priv->hw_cap_support) { 2427 seq_printf(seq, "DMA HW features not supported\n"); 2428 return 0; 2429 } 2430 2431 seq_printf(seq, "==============================\n"); 2432 seq_printf(seq, "\tDMA HW features\n"); 2433 seq_printf(seq, "==============================\n"); 2434 2435 seq_printf(seq, "\t10/100 Mbps %s\n", 2436 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 2437 seq_printf(seq, "\t1000 Mbps %s\n", 2438 (priv->dma_cap.mbps_1000) ? "Y" : "N"); 2439 seq_printf(seq, "\tHalf duple %s\n", 2440 (priv->dma_cap.half_duplex) ? "Y" : "N"); 2441 seq_printf(seq, "\tHash Filter: %s\n", 2442 (priv->dma_cap.hash_filter) ? "Y" : "N"); 2443 seq_printf(seq, "\tMultiple MAC address registers: %s\n", 2444 (priv->dma_cap.multi_addr) ? "Y" : "N"); 2445 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n", 2446 (priv->dma_cap.pcs) ? "Y" : "N"); 2447 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 2448 (priv->dma_cap.sma_mdio) ? "Y" : "N"); 2449 seq_printf(seq, "\tPMT Remote wake up: %s\n", 2450 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 2451 seq_printf(seq, "\tPMT Magic Frame: %s\n", 2452 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 2453 seq_printf(seq, "\tRMON module: %s\n", 2454 (priv->dma_cap.rmon) ? "Y" : "N"); 2455 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 2456 (priv->dma_cap.time_stamp) ? "Y" : "N"); 2457 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n", 2458 (priv->dma_cap.atime_stamp) ? "Y" : "N"); 2459 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n", 2460 (priv->dma_cap.eee) ? "Y" : "N"); 2461 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 2462 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 2463 (priv->dma_cap.tx_coe) ? "Y" : "N"); 2464 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 2465 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 2466 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 2467 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 2468 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 2469 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 2470 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 2471 priv->dma_cap.number_rx_channel); 2472 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 2473 priv->dma_cap.number_tx_channel); 2474 seq_printf(seq, "\tEnhanced descriptors: %s\n", 2475 (priv->dma_cap.enh_desc) ? "Y" : "N"); 2476 2477 return 0; 2478 } 2479 2480 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file) 2481 { 2482 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private); 2483 } 2484 2485 static const struct file_operations stmmac_dma_cap_fops = { 2486 .owner = THIS_MODULE, 2487 .open = stmmac_sysfs_dma_cap_open, 2488 .read = seq_read, 2489 .llseek = seq_lseek, 2490 .release = single_release, 2491 }; 2492 2493 static int stmmac_init_fs(struct net_device *dev) 2494 { 2495 /* Create debugfs entries */ 2496 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 2497 2498 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { 2499 pr_err("ERROR %s, debugfs create directory failed\n", 2500 STMMAC_RESOURCE_NAME); 2501 2502 return -ENOMEM; 2503 } 2504 2505 /* Entry to report DMA RX/TX rings */ 2506 stmmac_rings_status = debugfs_create_file("descriptors_status", 2507 S_IRUGO, stmmac_fs_dir, dev, 2508 &stmmac_rings_status_fops); 2509 2510 if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) { 2511 pr_info("ERROR creating stmmac ring debugfs file\n"); 2512 debugfs_remove(stmmac_fs_dir); 2513 2514 return -ENOMEM; 2515 } 2516 2517 /* Entry to report the DMA HW features */ 2518 stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir, 2519 dev, &stmmac_dma_cap_fops); 2520 2521 if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) { 2522 pr_info("ERROR creating stmmac MMC debugfs file\n"); 2523 debugfs_remove(stmmac_rings_status); 2524 debugfs_remove(stmmac_fs_dir); 2525 2526 return -ENOMEM; 2527 } 2528 2529 return 0; 2530 } 2531 2532 static void stmmac_exit_fs(void) 2533 { 2534 debugfs_remove(stmmac_rings_status); 2535 debugfs_remove(stmmac_dma_cap); 2536 debugfs_remove(stmmac_fs_dir); 2537 } 2538 #endif /* CONFIG_STMMAC_DEBUG_FS */ 2539 2540 static const struct net_device_ops stmmac_netdev_ops = { 2541 .ndo_open = stmmac_open, 2542 .ndo_start_xmit = stmmac_xmit, 2543 .ndo_stop = stmmac_release, 2544 .ndo_change_mtu = stmmac_change_mtu, 2545 .ndo_fix_features = stmmac_fix_features, 2546 .ndo_set_rx_mode = stmmac_set_rx_mode, 2547 .ndo_tx_timeout = stmmac_tx_timeout, 2548 .ndo_do_ioctl = stmmac_ioctl, 2549 .ndo_set_config = stmmac_config, 2550 #ifdef CONFIG_NET_POLL_CONTROLLER 2551 .ndo_poll_controller = stmmac_poll_controller, 2552 #endif 2553 .ndo_set_mac_address = eth_mac_addr, 2554 }; 2555 2556 /** 2557 * stmmac_hw_init - Init the MAC device 2558 * @priv: driver private structure 2559 * Description: this function detects which MAC device 2560 * (GMAC/MAC10-100) has to attached, checks the HW capability 2561 * (if supported) and sets the driver's features (for example 2562 * to use the ring or chaine mode or support the normal/enh 2563 * descriptor structure). 2564 */ 2565 static int stmmac_hw_init(struct stmmac_priv *priv) 2566 { 2567 int ret; 2568 struct mac_device_info *mac; 2569 2570 /* Identify the MAC HW device */ 2571 if (priv->plat->has_gmac) { 2572 priv->dev->priv_flags |= IFF_UNICAST_FLT; 2573 mac = dwmac1000_setup(priv->ioaddr); 2574 } else { 2575 mac = dwmac100_setup(priv->ioaddr); 2576 } 2577 if (!mac) 2578 return -ENOMEM; 2579 2580 priv->hw = mac; 2581 2582 /* Get and dump the chip ID */ 2583 priv->synopsys_id = stmmac_get_synopsys_id(priv); 2584 2585 /* To use the chained or ring mode */ 2586 if (chain_mode) { 2587 priv->hw->chain = &chain_mode_ops; 2588 pr_info(" Chain mode enabled\n"); 2589 priv->mode = STMMAC_CHAIN_MODE; 2590 } else { 2591 priv->hw->ring = &ring_mode_ops; 2592 pr_info(" Ring mode enabled\n"); 2593 priv->mode = STMMAC_RING_MODE; 2594 } 2595 2596 /* Get the HW capability (new GMAC newer than 3.50a) */ 2597 priv->hw_cap_support = stmmac_get_hw_features(priv); 2598 if (priv->hw_cap_support) { 2599 pr_info(" DMA HW capability register supported"); 2600 2601 /* We can override some gmac/dma configuration fields: e.g. 2602 * enh_desc, tx_coe (e.g. that are passed through the 2603 * platform) with the values from the HW capability 2604 * register (if supported). 2605 */ 2606 priv->plat->enh_desc = priv->dma_cap.enh_desc; 2607 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 2608 2609 priv->plat->tx_coe = priv->dma_cap.tx_coe; 2610 2611 if (priv->dma_cap.rx_coe_type2) 2612 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 2613 else if (priv->dma_cap.rx_coe_type1) 2614 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 2615 2616 } else 2617 pr_info(" No HW DMA feature register supported"); 2618 2619 /* To use alternate (extended) or normal descriptor structures */ 2620 stmmac_selec_desc_mode(priv); 2621 2622 ret = priv->hw->mac->rx_ipc(priv->ioaddr); 2623 if (!ret) { 2624 pr_warn(" RX IPC Checksum Offload not configured.\n"); 2625 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 2626 } 2627 2628 if (priv->plat->rx_coe) 2629 pr_info(" RX Checksum Offload Engine supported (type %d)\n", 2630 priv->plat->rx_coe); 2631 if (priv->plat->tx_coe) 2632 pr_info(" TX Checksum insertion supported\n"); 2633 2634 if (priv->plat->pmt) { 2635 pr_info(" Wake-Up On Lan supported\n"); 2636 device_set_wakeup_capable(priv->device, 1); 2637 } 2638 2639 return 0; 2640 } 2641 2642 /** 2643 * stmmac_dvr_probe 2644 * @device: device pointer 2645 * @plat_dat: platform data pointer 2646 * @addr: iobase memory address 2647 * Description: this is the main probe function used to 2648 * call the alloc_etherdev, allocate the priv structure. 2649 */ 2650 struct stmmac_priv *stmmac_dvr_probe(struct device *device, 2651 struct plat_stmmacenet_data *plat_dat, 2652 void __iomem *addr) 2653 { 2654 int ret = 0; 2655 struct net_device *ndev = NULL; 2656 struct stmmac_priv *priv; 2657 2658 ndev = alloc_etherdev(sizeof(struct stmmac_priv)); 2659 if (!ndev) 2660 return NULL; 2661 2662 SET_NETDEV_DEV(ndev, device); 2663 2664 priv = netdev_priv(ndev); 2665 priv->device = device; 2666 priv->dev = ndev; 2667 2668 ether_setup(ndev); 2669 2670 stmmac_set_ethtool_ops(ndev); 2671 priv->pause = pause; 2672 priv->plat = plat_dat; 2673 priv->ioaddr = addr; 2674 priv->dev->base_addr = (unsigned long)addr; 2675 2676 /* Verify driver arguments */ 2677 stmmac_verify_args(); 2678 2679 /* Override with kernel parameters if supplied XXX CRS XXX 2680 * this needs to have multiple instances 2681 */ 2682 if ((phyaddr >= 0) && (phyaddr <= 31)) 2683 priv->plat->phy_addr = phyaddr; 2684 2685 /* Init MAC and get the capabilities */ 2686 ret = stmmac_hw_init(priv); 2687 if (ret) 2688 goto error_free_netdev; 2689 2690 ndev->netdev_ops = &stmmac_netdev_ops; 2691 2692 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2693 NETIF_F_RXCSUM; 2694 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 2695 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 2696 #ifdef STMMAC_VLAN_TAG_USED 2697 /* Both mac100 and gmac support receive VLAN tag detection */ 2698 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 2699 #endif 2700 priv->msg_enable = netif_msg_init(debug, default_msg_level); 2701 2702 if (flow_ctrl) 2703 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 2704 2705 /* Rx Watchdog is available in the COREs newer than the 3.40. 2706 * In some case, for example on bugged HW this feature 2707 * has to be disable and this can be done by passing the 2708 * riwt_off field from the platform. 2709 */ 2710 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { 2711 priv->use_riwt = 1; 2712 pr_info(" Enable RX Mitigation via HW Watchdog Timer\n"); 2713 } 2714 2715 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); 2716 2717 spin_lock_init(&priv->lock); 2718 spin_lock_init(&priv->tx_lock); 2719 2720 ret = register_netdev(ndev); 2721 if (ret) { 2722 pr_err("%s: ERROR %i registering the device\n", __func__, ret); 2723 goto error_netdev_register; 2724 } 2725 2726 priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME); 2727 if (IS_ERR(priv->stmmac_clk)) { 2728 pr_warn("%s: warning: cannot get CSR clock\n", __func__); 2729 goto error_clk_get; 2730 } 2731 2732 /* If a specific clk_csr value is passed from the platform 2733 * this means that the CSR Clock Range selection cannot be 2734 * changed at run-time and it is fixed. Viceversa the driver'll try to 2735 * set the MDC clock dynamically according to the csr actual 2736 * clock input. 2737 */ 2738 if (!priv->plat->clk_csr) 2739 stmmac_clk_csr_set(priv); 2740 else 2741 priv->clk_csr = priv->plat->clk_csr; 2742 2743 stmmac_check_pcs_mode(priv); 2744 2745 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 2746 priv->pcs != STMMAC_PCS_RTBI) { 2747 /* MDIO bus Registration */ 2748 ret = stmmac_mdio_register(ndev); 2749 if (ret < 0) { 2750 pr_debug("%s: MDIO bus (id: %d) registration failed", 2751 __func__, priv->plat->bus_id); 2752 goto error_mdio_register; 2753 } 2754 } 2755 2756 return priv; 2757 2758 error_mdio_register: 2759 clk_put(priv->stmmac_clk); 2760 error_clk_get: 2761 unregister_netdev(ndev); 2762 error_netdev_register: 2763 netif_napi_del(&priv->napi); 2764 error_free_netdev: 2765 free_netdev(ndev); 2766 2767 return NULL; 2768 } 2769 2770 /** 2771 * stmmac_dvr_remove 2772 * @ndev: net device pointer 2773 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 2774 * changes the link status, releases the DMA descriptor rings. 2775 */ 2776 int stmmac_dvr_remove(struct net_device *ndev) 2777 { 2778 struct stmmac_priv *priv = netdev_priv(ndev); 2779 2780 pr_info("%s:\n\tremoving driver", __func__); 2781 2782 priv->hw->dma->stop_rx(priv->ioaddr); 2783 priv->hw->dma->stop_tx(priv->ioaddr); 2784 2785 stmmac_set_mac(priv->ioaddr, false); 2786 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 2787 priv->pcs != STMMAC_PCS_RTBI) 2788 stmmac_mdio_unregister(ndev); 2789 netif_carrier_off(ndev); 2790 unregister_netdev(ndev); 2791 free_netdev(ndev); 2792 2793 return 0; 2794 } 2795 2796 #ifdef CONFIG_PM 2797 int stmmac_suspend(struct net_device *ndev) 2798 { 2799 struct stmmac_priv *priv = netdev_priv(ndev); 2800 unsigned long flags; 2801 2802 if (!ndev || !netif_running(ndev)) 2803 return 0; 2804 2805 if (priv->phydev) 2806 phy_stop(priv->phydev); 2807 2808 spin_lock_irqsave(&priv->lock, flags); 2809 2810 netif_device_detach(ndev); 2811 netif_stop_queue(ndev); 2812 2813 napi_disable(&priv->napi); 2814 2815 /* Stop TX/RX DMA */ 2816 priv->hw->dma->stop_tx(priv->ioaddr); 2817 priv->hw->dma->stop_rx(priv->ioaddr); 2818 2819 stmmac_clear_descriptors(priv); 2820 2821 /* Enable Power down mode by programming the PMT regs */ 2822 if (device_may_wakeup(priv->device)) 2823 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); 2824 else { 2825 stmmac_set_mac(priv->ioaddr, false); 2826 /* Disable clock in case of PWM is off */ 2827 clk_disable_unprepare(priv->stmmac_clk); 2828 } 2829 spin_unlock_irqrestore(&priv->lock, flags); 2830 return 0; 2831 } 2832 2833 int stmmac_resume(struct net_device *ndev) 2834 { 2835 struct stmmac_priv *priv = netdev_priv(ndev); 2836 unsigned long flags; 2837 2838 if (!netif_running(ndev)) 2839 return 0; 2840 2841 spin_lock_irqsave(&priv->lock, flags); 2842 2843 /* Power Down bit, into the PM register, is cleared 2844 * automatically as soon as a magic packet or a Wake-up frame 2845 * is received. Anyway, it's better to manually clear 2846 * this bit because it can generate problems while resuming 2847 * from another devices (e.g. serial console). 2848 */ 2849 if (device_may_wakeup(priv->device)) 2850 priv->hw->mac->pmt(priv->ioaddr, 0); 2851 else 2852 /* enable the clk prevously disabled */ 2853 clk_prepare_enable(priv->stmmac_clk); 2854 2855 netif_device_attach(ndev); 2856 2857 /* Enable the MAC and DMA */ 2858 stmmac_set_mac(priv->ioaddr, true); 2859 priv->hw->dma->start_tx(priv->ioaddr); 2860 priv->hw->dma->start_rx(priv->ioaddr); 2861 2862 napi_enable(&priv->napi); 2863 2864 netif_start_queue(ndev); 2865 2866 spin_unlock_irqrestore(&priv->lock, flags); 2867 2868 if (priv->phydev) 2869 phy_start(priv->phydev); 2870 2871 return 0; 2872 } 2873 2874 int stmmac_freeze(struct net_device *ndev) 2875 { 2876 if (!ndev || !netif_running(ndev)) 2877 return 0; 2878 2879 return stmmac_release(ndev); 2880 } 2881 2882 int stmmac_restore(struct net_device *ndev) 2883 { 2884 if (!ndev || !netif_running(ndev)) 2885 return 0; 2886 2887 return stmmac_open(ndev); 2888 } 2889 #endif /* CONFIG_PM */ 2890 2891 /* Driver can be configured w/ and w/ both PCI and Platf drivers 2892 * depending on the configuration selected. 2893 */ 2894 static int __init stmmac_init(void) 2895 { 2896 int ret; 2897 2898 ret = stmmac_register_platform(); 2899 if (ret) 2900 goto err; 2901 ret = stmmac_register_pci(); 2902 if (ret) 2903 goto err_pci; 2904 return 0; 2905 err_pci: 2906 stmmac_unregister_platform(); 2907 err: 2908 pr_err("stmmac: driver registration failed\n"); 2909 return ret; 2910 } 2911 2912 static void __exit stmmac_exit(void) 2913 { 2914 stmmac_unregister_platform(); 2915 stmmac_unregister_pci(); 2916 } 2917 2918 module_init(stmmac_init); 2919 module_exit(stmmac_exit); 2920 2921 #ifndef MODULE 2922 static int __init stmmac_cmdline_opt(char *str) 2923 { 2924 char *opt; 2925 2926 if (!str || !*str) 2927 return -EINVAL; 2928 while ((opt = strsep(&str, ",")) != NULL) { 2929 if (!strncmp(opt, "debug:", 6)) { 2930 if (kstrtoint(opt + 6, 0, &debug)) 2931 goto err; 2932 } else if (!strncmp(opt, "phyaddr:", 8)) { 2933 if (kstrtoint(opt + 8, 0, &phyaddr)) 2934 goto err; 2935 } else if (!strncmp(opt, "dma_txsize:", 11)) { 2936 if (kstrtoint(opt + 11, 0, &dma_txsize)) 2937 goto err; 2938 } else if (!strncmp(opt, "dma_rxsize:", 11)) { 2939 if (kstrtoint(opt + 11, 0, &dma_rxsize)) 2940 goto err; 2941 } else if (!strncmp(opt, "buf_sz:", 7)) { 2942 if (kstrtoint(opt + 7, 0, &buf_sz)) 2943 goto err; 2944 } else if (!strncmp(opt, "tc:", 3)) { 2945 if (kstrtoint(opt + 3, 0, &tc)) 2946 goto err; 2947 } else if (!strncmp(opt, "watchdog:", 9)) { 2948 if (kstrtoint(opt + 9, 0, &watchdog)) 2949 goto err; 2950 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 2951 if (kstrtoint(opt + 10, 0, &flow_ctrl)) 2952 goto err; 2953 } else if (!strncmp(opt, "pause:", 6)) { 2954 if (kstrtoint(opt + 6, 0, &pause)) 2955 goto err; 2956 } else if (!strncmp(opt, "eee_timer:", 10)) { 2957 if (kstrtoint(opt + 10, 0, &eee_timer)) 2958 goto err; 2959 } else if (!strncmp(opt, "chain_mode:", 11)) { 2960 if (kstrtoint(opt + 11, 0, &chain_mode)) 2961 goto err; 2962 } 2963 } 2964 return 0; 2965 2966 err: 2967 pr_err("%s: ERROR broken module parameter conversion", __func__); 2968 return -EINVAL; 2969 } 2970 2971 __setup("stmmaceth=", stmmac_cmdline_opt); 2972 #endif /* MODULE */ 2973 2974 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 2975 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 2976 MODULE_LICENSE("GPL"); 2977