1 /* 2 * AMD 10Gb Ethernet driver 3 * 4 * This file is available to you under your choice of the following two 5 * licenses: 6 * 7 * License 1: GPLv2 8 * 9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. 10 * 11 * This file is free software; you may copy, redistribute and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation, either version 2 of the License, or (at 14 * your option) any later version. 15 * 16 * This file is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program. If not, see <http://www.gnu.org/licenses/>. 23 * 24 * This file incorporates work covered by the following copyright and 25 * permission notice: 26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 28 * Inc. unless otherwise expressly agreed to in writing between Synopsys 29 * and you. 30 * 31 * The Software IS NOT an item of Licensed Software or Licensed Product 32 * under any End User Software License Agreement or Agreement for Licensed 33 * Product with Synopsys or any supplement thereto. Permission is hereby 34 * granted, free of charge, to any person obtaining a copy of this software 35 * annotated with this license and the Software, to deal in the Software 36 * without restriction, including without limitation the rights to use, 37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 38 * of the Software, and to permit persons to whom the Software is furnished 39 * to do so, subject to the following conditions: 40 * 41 * The above copyright notice and this permission notice shall be included 42 * in all copies or substantial portions of the Software. 43 * 44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 54 * THE POSSIBILITY OF SUCH DAMAGE. 55 * 56 * 57 * License 2: Modified BSD 58 * 59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. 60 * All rights reserved. 61 * 62 * Redistribution and use in source and binary forms, with or without 63 * modification, are permitted provided that the following conditions are met: 64 * * Redistributions of source code must retain the above copyright 65 * notice, this list of conditions and the following disclaimer. 66 * * Redistributions in binary form must reproduce the above copyright 67 * notice, this list of conditions and the following disclaimer in the 68 * documentation and/or other materials provided with the distribution. 69 * * Neither the name of Advanced Micro Devices, Inc. nor the 70 * names of its contributors may be used to endorse or promote products 71 * derived from this software without specific prior written permission. 72 * 73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY 77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 83 * 84 * This file incorporates work covered by the following copyright and 85 * permission notice: 86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 88 * Inc. unless otherwise expressly agreed to in writing between Synopsys 89 * and you. 90 * 91 * The Software IS NOT an item of Licensed Software or Licensed Product 92 * under any End User Software License Agreement or Agreement for Licensed 93 * Product with Synopsys or any supplement thereto. Permission is hereby 94 * granted, free of charge, to any person obtaining a copy of this software 95 * annotated with this license and the Software, to deal in the Software 96 * without restriction, including without limitation the rights to use, 97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 98 * of the Software, and to permit persons to whom the Software is furnished 99 * to do so, subject to the following conditions: 100 * 101 * The above copyright notice and this permission notice shall be included 102 * in all copies or substantial portions of the Software. 103 * 104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 114 * THE POSSIBILITY OF SUCH DAMAGE. 115 */ 116 117 #include <linux/phy.h> 118 #include <linux/mdio.h> 119 #include <linux/clk.h> 120 #include <linux/bitrev.h> 121 #include <linux/crc32.h> 122 123 #include "xgbe.h" 124 #include "xgbe-common.h" 125 126 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata) 127 { 128 return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 129 } 130 131 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, 132 unsigned int usec) 133 { 134 unsigned long rate; 135 unsigned int ret; 136 137 DBGPR("-->xgbe_usec_to_riwt\n"); 138 139 rate = pdata->sysclk_rate; 140 141 /* 142 * Convert the input usec value to the watchdog timer value. Each 143 * watchdog timer value is equivalent to 256 clock cycles. 144 * Calculate the required value as: 145 * ( usec * ( system_clock_mhz / 10^6 ) / 256 146 */ 147 ret = (usec * (rate / 1000000)) / 256; 148 149 DBGPR("<--xgbe_usec_to_riwt\n"); 150 151 return ret; 152 } 153 154 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, 155 unsigned int riwt) 156 { 157 unsigned long rate; 158 unsigned int ret; 159 160 DBGPR("-->xgbe_riwt_to_usec\n"); 161 162 rate = pdata->sysclk_rate; 163 164 /* 165 * Convert the input watchdog timer value to the usec value. Each 166 * watchdog timer value is equivalent to 256 clock cycles. 167 * Calculate the required value as: 168 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) 169 */ 170 ret = (riwt * 256) / (rate / 1000000); 171 172 DBGPR("<--xgbe_riwt_to_usec\n"); 173 174 return ret; 175 } 176 177 static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata) 178 { 179 unsigned int pblx8, pbl; 180 unsigned int i; 181 182 pblx8 = DMA_PBL_X8_DISABLE; 183 pbl = pdata->pbl; 184 185 if (pdata->pbl > 32) { 186 pblx8 = DMA_PBL_X8_ENABLE; 187 pbl >>= 3; 188 } 189 190 for (i = 0; i < pdata->channel_count; i++) { 191 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8, 192 pblx8); 193 194 if (pdata->channel[i]->tx_ring) 195 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, 196 PBL, pbl); 197 198 if (pdata->channel[i]->rx_ring) 199 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, 200 PBL, pbl); 201 } 202 203 return 0; 204 } 205 206 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata) 207 { 208 unsigned int i; 209 210 for (i = 0; i < pdata->channel_count; i++) { 211 if (!pdata->channel[i]->tx_ring) 212 break; 213 214 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP, 215 pdata->tx_osp_mode); 216 } 217 218 return 0; 219 } 220 221 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 222 { 223 unsigned int i; 224 225 for (i = 0; i < pdata->rx_q_count; i++) 226 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); 227 228 return 0; 229 } 230 231 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 232 { 233 unsigned int i; 234 235 for (i = 0; i < pdata->tx_q_count; i++) 236 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); 237 238 return 0; 239 } 240 241 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, 242 unsigned int val) 243 { 244 unsigned int i; 245 246 for (i = 0; i < pdata->rx_q_count; i++) 247 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); 248 249 return 0; 250 } 251 252 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, 253 unsigned int val) 254 { 255 unsigned int i; 256 257 for (i = 0; i < pdata->tx_q_count; i++) 258 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); 259 260 return 0; 261 } 262 263 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) 264 { 265 unsigned int i; 266 267 for (i = 0; i < pdata->channel_count; i++) { 268 if (!pdata->channel[i]->rx_ring) 269 break; 270 271 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT, 272 pdata->rx_riwt); 273 } 274 275 return 0; 276 } 277 278 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) 279 { 280 return 0; 281 } 282 283 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) 284 { 285 unsigned int i; 286 287 for (i = 0; i < pdata->channel_count; i++) { 288 if (!pdata->channel[i]->rx_ring) 289 break; 290 291 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ, 292 pdata->rx_buf_size); 293 } 294 } 295 296 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) 297 { 298 unsigned int i; 299 300 for (i = 0; i < pdata->channel_count; i++) { 301 if (!pdata->channel[i]->tx_ring) 302 break; 303 304 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1); 305 } 306 } 307 308 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) 309 { 310 unsigned int i; 311 312 for (i = 0; i < pdata->channel_count; i++) { 313 if (!pdata->channel[i]->rx_ring) 314 break; 315 316 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1); 317 } 318 319 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); 320 } 321 322 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, 323 unsigned int index, unsigned int val) 324 { 325 unsigned int wait; 326 int ret = 0; 327 328 mutex_lock(&pdata->rss_mutex); 329 330 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { 331 ret = -EBUSY; 332 goto unlock; 333 } 334 335 XGMAC_IOWRITE(pdata, MAC_RSSDR, val); 336 337 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); 338 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); 339 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); 340 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); 341 342 wait = 1000; 343 while (wait--) { 344 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) 345 goto unlock; 346 347 usleep_range(1000, 1500); 348 } 349 350 ret = -EBUSY; 351 352 unlock: 353 mutex_unlock(&pdata->rss_mutex); 354 355 return ret; 356 } 357 358 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata) 359 { 360 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); 361 unsigned int *key = (unsigned int *)&pdata->rss_key; 362 int ret; 363 364 while (key_regs--) { 365 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, 366 key_regs, *key++); 367 if (ret) 368 return ret; 369 } 370 371 return 0; 372 } 373 374 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata) 375 { 376 unsigned int i; 377 int ret; 378 379 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { 380 ret = xgbe_write_rss_reg(pdata, 381 XGBE_RSS_LOOKUP_TABLE_TYPE, i, 382 pdata->rss_table[i]); 383 if (ret) 384 return ret; 385 } 386 387 return 0; 388 } 389 390 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key) 391 { 392 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); 393 394 return xgbe_write_rss_hash_key(pdata); 395 } 396 397 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, 398 const u32 *table) 399 { 400 unsigned int i; 401 402 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) 403 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); 404 405 return xgbe_write_rss_lookup_table(pdata); 406 } 407 408 static int xgbe_enable_rss(struct xgbe_prv_data *pdata) 409 { 410 int ret; 411 412 if (!pdata->hw_feat.rss) 413 return -EOPNOTSUPP; 414 415 /* Program the hash key */ 416 ret = xgbe_write_rss_hash_key(pdata); 417 if (ret) 418 return ret; 419 420 /* Program the lookup table */ 421 ret = xgbe_write_rss_lookup_table(pdata); 422 if (ret) 423 return ret; 424 425 /* Set the RSS options */ 426 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); 427 428 /* Enable RSS */ 429 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); 430 431 return 0; 432 } 433 434 static int xgbe_disable_rss(struct xgbe_prv_data *pdata) 435 { 436 if (!pdata->hw_feat.rss) 437 return -EOPNOTSUPP; 438 439 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); 440 441 return 0; 442 } 443 444 static void xgbe_config_rss(struct xgbe_prv_data *pdata) 445 { 446 int ret; 447 448 if (!pdata->hw_feat.rss) 449 return; 450 451 if (pdata->netdev->features & NETIF_F_RXHASH) 452 ret = xgbe_enable_rss(pdata); 453 else 454 ret = xgbe_disable_rss(pdata); 455 456 if (ret) 457 netdev_err(pdata->netdev, 458 "error configuring RSS, RSS disabled\n"); 459 } 460 461 static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata, 462 unsigned int queue) 463 { 464 unsigned int prio, tc; 465 466 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { 467 /* Does this queue handle the priority? */ 468 if (pdata->prio2q_map[prio] != queue) 469 continue; 470 471 /* Get the Traffic Class for this priority */ 472 tc = pdata->ets->prio_tc[prio]; 473 474 /* Check if PFC is enabled for this traffic class */ 475 if (pdata->pfc->pfc_en & (1 << tc)) 476 return true; 477 } 478 479 return false; 480 } 481 482 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) 483 { 484 unsigned int max_q_count, q_count; 485 unsigned int reg, reg_val; 486 unsigned int i; 487 488 /* Clear MTL flow control */ 489 for (i = 0; i < pdata->rx_q_count; i++) 490 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); 491 492 /* Clear MAC flow control */ 493 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 494 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 495 reg = MAC_Q0TFCR; 496 for (i = 0; i < q_count; i++) { 497 reg_val = XGMAC_IOREAD(pdata, reg); 498 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); 499 XGMAC_IOWRITE(pdata, reg, reg_val); 500 501 reg += MAC_QTFCR_INC; 502 } 503 504 return 0; 505 } 506 507 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) 508 { 509 struct ieee_pfc *pfc = pdata->pfc; 510 struct ieee_ets *ets = pdata->ets; 511 unsigned int max_q_count, q_count; 512 unsigned int reg, reg_val; 513 unsigned int i; 514 515 /* Set MTL flow control */ 516 for (i = 0; i < pdata->rx_q_count; i++) { 517 unsigned int ehfc = 0; 518 519 if (pdata->rx_rfd[i]) { 520 /* Flow control thresholds are established */ 521 if (pfc && ets) { 522 if (xgbe_is_pfc_queue(pdata, i)) 523 ehfc = 1; 524 } else { 525 ehfc = 1; 526 } 527 } 528 529 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); 530 531 netif_dbg(pdata, drv, pdata->netdev, 532 "flow control %s for RXq%u\n", 533 ehfc ? "enabled" : "disabled", i); 534 } 535 536 /* Set MAC flow control */ 537 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 538 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 539 reg = MAC_Q0TFCR; 540 for (i = 0; i < q_count; i++) { 541 reg_val = XGMAC_IOREAD(pdata, reg); 542 543 /* Enable transmit flow control */ 544 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); 545 /* Set pause time */ 546 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); 547 548 XGMAC_IOWRITE(pdata, reg, reg_val); 549 550 reg += MAC_QTFCR_INC; 551 } 552 553 return 0; 554 } 555 556 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) 557 { 558 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); 559 560 return 0; 561 } 562 563 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) 564 { 565 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); 566 567 return 0; 568 } 569 570 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) 571 { 572 struct ieee_pfc *pfc = pdata->pfc; 573 574 if (pdata->tx_pause || (pfc && pfc->pfc_en)) 575 xgbe_enable_tx_flow_control(pdata); 576 else 577 xgbe_disable_tx_flow_control(pdata); 578 579 return 0; 580 } 581 582 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) 583 { 584 struct ieee_pfc *pfc = pdata->pfc; 585 586 if (pdata->rx_pause || (pfc && pfc->pfc_en)) 587 xgbe_enable_rx_flow_control(pdata); 588 else 589 xgbe_disable_rx_flow_control(pdata); 590 591 return 0; 592 } 593 594 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) 595 { 596 struct ieee_pfc *pfc = pdata->pfc; 597 598 xgbe_config_tx_flow_control(pdata); 599 xgbe_config_rx_flow_control(pdata); 600 601 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 602 (pfc && pfc->pfc_en) ? 1 : 0); 603 } 604 605 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) 606 { 607 struct xgbe_channel *channel; 608 unsigned int dma_ch_isr, dma_ch_ier; 609 unsigned int i; 610 611 /* Set the interrupt mode if supported */ 612 if (pdata->channel_irq_mode) 613 XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM, 614 pdata->channel_irq_mode); 615 616 for (i = 0; i < pdata->channel_count; i++) { 617 channel = pdata->channel[i]; 618 619 /* Clear all the interrupts which are set */ 620 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); 621 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); 622 623 /* Clear all interrupt enable bits */ 624 dma_ch_ier = 0; 625 626 /* Enable following interrupts 627 * NIE - Normal Interrupt Summary Enable 628 * AIE - Abnormal Interrupt Summary Enable 629 * FBEE - Fatal Bus Error Enable 630 */ 631 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1); 632 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); 633 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); 634 635 if (channel->tx_ring) { 636 /* Enable the following Tx interrupts 637 * TIE - Transmit Interrupt Enable (unless using 638 * per channel interrupts in edge triggered 639 * mode) 640 */ 641 if (!pdata->per_channel_irq || pdata->channel_irq_mode) 642 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); 643 } 644 if (channel->rx_ring) { 645 /* Enable following Rx interrupts 646 * RBUE - Receive Buffer Unavailable Enable 647 * RIE - Receive Interrupt Enable (unless using 648 * per channel interrupts in edge triggered 649 * mode) 650 */ 651 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); 652 if (!pdata->per_channel_irq || pdata->channel_irq_mode) 653 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); 654 } 655 656 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); 657 } 658 } 659 660 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) 661 { 662 unsigned int mtl_q_isr; 663 unsigned int q_count, i; 664 665 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); 666 for (i = 0; i < q_count; i++) { 667 /* Clear all the interrupts which are set */ 668 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); 669 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); 670 671 /* No MTL interrupts to be enabled */ 672 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); 673 } 674 } 675 676 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) 677 { 678 unsigned int mac_ier = 0; 679 680 /* Enable Timestamp interrupt */ 681 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); 682 683 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); 684 685 /* Enable all counter interrupts */ 686 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); 687 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); 688 689 /* Enable MDIO single command completion interrupt */ 690 XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1); 691 } 692 693 static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata) 694 { 695 unsigned int ecc_isr, ecc_ier = 0; 696 697 if (!pdata->vdata->ecc_support) 698 return; 699 700 /* Clear all the interrupts which are set */ 701 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR); 702 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr); 703 704 /* Enable ECC interrupts */ 705 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1); 706 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1); 707 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1); 708 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1); 709 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1); 710 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1); 711 712 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier); 713 } 714 715 static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata) 716 { 717 unsigned int ecc_ier; 718 719 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER); 720 721 /* Disable ECC DED interrupts */ 722 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0); 723 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0); 724 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0); 725 726 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier); 727 } 728 729 static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata, 730 enum xgbe_ecc_sec sec) 731 { 732 unsigned int ecc_ier; 733 734 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER); 735 736 /* Disable ECC SEC interrupt */ 737 switch (sec) { 738 case XGBE_ECC_SEC_TX: 739 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0); 740 break; 741 case XGBE_ECC_SEC_RX: 742 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0); 743 break; 744 case XGBE_ECC_SEC_DESC: 745 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0); 746 break; 747 } 748 749 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier); 750 } 751 752 static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed) 753 { 754 unsigned int ss; 755 756 switch (speed) { 757 case SPEED_1000: 758 ss = 0x03; 759 break; 760 case SPEED_2500: 761 ss = 0x02; 762 break; 763 case SPEED_10000: 764 ss = 0x00; 765 break; 766 default: 767 return -EINVAL; 768 } 769 770 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss) 771 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss); 772 773 return 0; 774 } 775 776 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 777 { 778 /* Put the VLAN tag in the Rx descriptor */ 779 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); 780 781 /* Don't check the VLAN type */ 782 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); 783 784 /* Check only C-TAG (0x8100) packets */ 785 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); 786 787 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ 788 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); 789 790 /* Enable VLAN tag stripping */ 791 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); 792 793 return 0; 794 } 795 796 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 797 { 798 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); 799 800 return 0; 801 } 802 803 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 804 { 805 /* Enable VLAN filtering */ 806 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); 807 808 /* Enable VLAN Hash Table filtering */ 809 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); 810 811 /* Disable VLAN tag inverse matching */ 812 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); 813 814 /* Only filter on the lower 12-bits of the VLAN tag */ 815 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); 816 817 /* In order for the VLAN Hash Table filtering to be effective, 818 * the VLAN tag identifier in the VLAN Tag Register must not 819 * be zero. Set the VLAN tag identifier to "1" to enable the 820 * VLAN Hash Table filtering. This implies that a VLAN tag of 821 * 1 will always pass filtering. 822 */ 823 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); 824 825 return 0; 826 } 827 828 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 829 { 830 /* Disable VLAN filtering */ 831 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); 832 833 return 0; 834 } 835 836 static u32 xgbe_vid_crc32_le(__le16 vid_le) 837 { 838 u32 poly = 0xedb88320; /* CRCPOLY_LE */ 839 u32 crc = ~0; 840 u32 temp = 0; 841 unsigned char *data = (unsigned char *)&vid_le; 842 unsigned char data_byte = 0; 843 int i, bits; 844 845 bits = get_bitmask_order(VLAN_VID_MASK); 846 for (i = 0; i < bits; i++) { 847 if ((i % 8) == 0) 848 data_byte = data[i / 8]; 849 850 temp = ((crc & 1) ^ data_byte) & 1; 851 crc >>= 1; 852 data_byte >>= 1; 853 854 if (temp) 855 crc ^= poly; 856 } 857 858 return crc; 859 } 860 861 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) 862 { 863 u32 crc; 864 u16 vid; 865 __le16 vid_le; 866 u16 vlan_hash_table = 0; 867 868 /* Generate the VLAN Hash Table value */ 869 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { 870 /* Get the CRC32 value of the VLAN ID */ 871 vid_le = cpu_to_le16(vid); 872 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; 873 874 vlan_hash_table |= (1 << crc); 875 } 876 877 /* Set the VLAN Hash Table filtering register */ 878 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); 879 880 return 0; 881 } 882 883 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, 884 unsigned int enable) 885 { 886 unsigned int val = enable ? 1 : 0; 887 888 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) 889 return 0; 890 891 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n", 892 enable ? "entering" : "leaving"); 893 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); 894 895 /* Hardware will still perform VLAN filtering in promiscuous mode */ 896 if (enable) { 897 xgbe_disable_rx_vlan_filtering(pdata); 898 } else { 899 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 900 xgbe_enable_rx_vlan_filtering(pdata); 901 } 902 903 return 0; 904 } 905 906 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, 907 unsigned int enable) 908 { 909 unsigned int val = enable ? 1 : 0; 910 911 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) 912 return 0; 913 914 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n", 915 enable ? "entering" : "leaving"); 916 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); 917 918 return 0; 919 } 920 921 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, 922 struct netdev_hw_addr *ha, unsigned int *mac_reg) 923 { 924 unsigned int mac_addr_hi, mac_addr_lo; 925 u8 *mac_addr; 926 927 mac_addr_lo = 0; 928 mac_addr_hi = 0; 929 930 if (ha) { 931 mac_addr = (u8 *)&mac_addr_lo; 932 mac_addr[0] = ha->addr[0]; 933 mac_addr[1] = ha->addr[1]; 934 mac_addr[2] = ha->addr[2]; 935 mac_addr[3] = ha->addr[3]; 936 mac_addr = (u8 *)&mac_addr_hi; 937 mac_addr[0] = ha->addr[4]; 938 mac_addr[1] = ha->addr[5]; 939 940 netif_dbg(pdata, drv, pdata->netdev, 941 "adding mac address %pM at %#x\n", 942 ha->addr, *mac_reg); 943 944 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); 945 } 946 947 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi); 948 *mac_reg += MAC_MACA_INC; 949 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo); 950 *mac_reg += MAC_MACA_INC; 951 } 952 953 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) 954 { 955 struct net_device *netdev = pdata->netdev; 956 struct netdev_hw_addr *ha; 957 unsigned int mac_reg; 958 unsigned int addn_macs; 959 960 mac_reg = MAC_MACA1HR; 961 addn_macs = pdata->hw_feat.addn_mac; 962 963 if (netdev_uc_count(netdev) > addn_macs) { 964 xgbe_set_promiscuous_mode(pdata, 1); 965 } else { 966 netdev_for_each_uc_addr(ha, netdev) { 967 xgbe_set_mac_reg(pdata, ha, &mac_reg); 968 addn_macs--; 969 } 970 971 if (netdev_mc_count(netdev) > addn_macs) { 972 xgbe_set_all_multicast_mode(pdata, 1); 973 } else { 974 netdev_for_each_mc_addr(ha, netdev) { 975 xgbe_set_mac_reg(pdata, ha, &mac_reg); 976 addn_macs--; 977 } 978 } 979 } 980 981 /* Clear remaining additional MAC address entries */ 982 while (addn_macs--) 983 xgbe_set_mac_reg(pdata, NULL, &mac_reg); 984 } 985 986 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata) 987 { 988 struct net_device *netdev = pdata->netdev; 989 struct netdev_hw_addr *ha; 990 unsigned int hash_reg; 991 unsigned int hash_table_shift, hash_table_count; 992 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE]; 993 u32 crc; 994 unsigned int i; 995 996 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); 997 hash_table_count = pdata->hw_feat.hash_table_size / 32; 998 memset(hash_table, 0, sizeof(hash_table)); 999 1000 /* Build the MAC Hash Table register values */ 1001 netdev_for_each_uc_addr(ha, netdev) { 1002 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); 1003 crc >>= hash_table_shift; 1004 hash_table[crc >> 5] |= (1 << (crc & 0x1f)); 1005 } 1006 1007 netdev_for_each_mc_addr(ha, netdev) { 1008 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); 1009 crc >>= hash_table_shift; 1010 hash_table[crc >> 5] |= (1 << (crc & 0x1f)); 1011 } 1012 1013 /* Set the MAC Hash Table registers */ 1014 hash_reg = MAC_HTR0; 1015 for (i = 0; i < hash_table_count; i++) { 1016 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]); 1017 hash_reg += MAC_HTR_INC; 1018 } 1019 } 1020 1021 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) 1022 { 1023 if (pdata->hw_feat.hash_table_size) 1024 xgbe_set_mac_hash_table(pdata); 1025 else 1026 xgbe_set_mac_addn_addrs(pdata); 1027 1028 return 0; 1029 } 1030 1031 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr) 1032 { 1033 unsigned int mac_addr_hi, mac_addr_lo; 1034 1035 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); 1036 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | 1037 (addr[1] << 8) | (addr[0] << 0); 1038 1039 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); 1040 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); 1041 1042 return 0; 1043 } 1044 1045 static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) 1046 { 1047 struct net_device *netdev = pdata->netdev; 1048 unsigned int pr_mode, am_mode; 1049 1050 pr_mode = ((netdev->flags & IFF_PROMISC) != 0); 1051 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); 1052 1053 xgbe_set_promiscuous_mode(pdata, pr_mode); 1054 xgbe_set_all_multicast_mode(pdata, am_mode); 1055 1056 xgbe_add_mac_addresses(pdata); 1057 1058 return 0; 1059 } 1060 1061 static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) 1062 { 1063 unsigned int reg; 1064 1065 if (gpio > 15) 1066 return -EINVAL; 1067 1068 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); 1069 1070 reg &= ~(1 << (gpio + 16)); 1071 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); 1072 1073 return 0; 1074 } 1075 1076 static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) 1077 { 1078 unsigned int reg; 1079 1080 if (gpio > 15) 1081 return -EINVAL; 1082 1083 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); 1084 1085 reg |= (1 << (gpio + 16)); 1086 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); 1087 1088 return 0; 1089 } 1090 1091 static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, 1092 int mmd_reg) 1093 { 1094 unsigned long flags; 1095 unsigned int mmd_address, index, offset; 1096 int mmd_data; 1097 1098 if (mmd_reg & MII_ADDR_C45) 1099 mmd_address = mmd_reg & ~MII_ADDR_C45; 1100 else 1101 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1102 1103 /* The PCS registers are accessed using mmio. The underlying 1104 * management interface uses indirect addressing to access the MMD 1105 * register sets. This requires accessing of the PCS register in two 1106 * phases, an address phase and a data phase. 1107 * 1108 * The mmio interface is based on 16-bit offsets and values. All 1109 * register offsets must therefore be adjusted by left shifting the 1110 * offset 1 bit and reading 16 bits of data. 1111 */ 1112 mmd_address <<= 1; 1113 index = mmd_address & ~pdata->xpcs_window_mask; 1114 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1115 1116 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1117 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); 1118 mmd_data = XPCS16_IOREAD(pdata, offset); 1119 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1120 1121 return mmd_data; 1122 } 1123 1124 static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, 1125 int mmd_reg, int mmd_data) 1126 { 1127 unsigned long flags; 1128 unsigned int mmd_address, index, offset; 1129 1130 if (mmd_reg & MII_ADDR_C45) 1131 mmd_address = mmd_reg & ~MII_ADDR_C45; 1132 else 1133 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1134 1135 /* The PCS registers are accessed using mmio. The underlying 1136 * management interface uses indirect addressing to access the MMD 1137 * register sets. This requires accessing of the PCS register in two 1138 * phases, an address phase and a data phase. 1139 * 1140 * The mmio interface is based on 16-bit offsets and values. All 1141 * register offsets must therefore be adjusted by left shifting the 1142 * offset 1 bit and writing 16 bits of data. 1143 */ 1144 mmd_address <<= 1; 1145 index = mmd_address & ~pdata->xpcs_window_mask; 1146 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1147 1148 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1149 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); 1150 XPCS16_IOWRITE(pdata, offset, mmd_data); 1151 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1152 } 1153 1154 static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, 1155 int mmd_reg) 1156 { 1157 unsigned long flags; 1158 unsigned int mmd_address; 1159 int mmd_data; 1160 1161 if (mmd_reg & MII_ADDR_C45) 1162 mmd_address = mmd_reg & ~MII_ADDR_C45; 1163 else 1164 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1165 1166 /* The PCS registers are accessed using mmio. The underlying APB3 1167 * management interface uses indirect addressing to access the MMD 1168 * register sets. This requires accessing of the PCS register in two 1169 * phases, an address phase and a data phase. 1170 * 1171 * The mmio interface is based on 32-bit offsets and values. All 1172 * register offsets must therefore be adjusted by left shifting the 1173 * offset 2 bits and reading 32 bits of data. 1174 */ 1175 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1176 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); 1177 mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2); 1178 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1179 1180 return mmd_data; 1181 } 1182 1183 static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, 1184 int mmd_reg, int mmd_data) 1185 { 1186 unsigned int mmd_address; 1187 unsigned long flags; 1188 1189 if (mmd_reg & MII_ADDR_C45) 1190 mmd_address = mmd_reg & ~MII_ADDR_C45; 1191 else 1192 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1193 1194 /* The PCS registers are accessed using mmio. The underlying APB3 1195 * management interface uses indirect addressing to access the MMD 1196 * register sets. This requires accessing of the PCS register in two 1197 * phases, an address phase and a data phase. 1198 * 1199 * The mmio interface is based on 32-bit offsets and values. All 1200 * register offsets must therefore be adjusted by left shifting the 1201 * offset 2 bits and writing 32 bits of data. 1202 */ 1203 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1204 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); 1205 XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); 1206 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1207 } 1208 1209 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, 1210 int mmd_reg) 1211 { 1212 switch (pdata->vdata->xpcs_access) { 1213 case XGBE_XPCS_ACCESS_V1: 1214 return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg); 1215 1216 case XGBE_XPCS_ACCESS_V2: 1217 default: 1218 return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg); 1219 } 1220 } 1221 1222 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, 1223 int mmd_reg, int mmd_data) 1224 { 1225 switch (pdata->vdata->xpcs_access) { 1226 case XGBE_XPCS_ACCESS_V1: 1227 return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data); 1228 1229 case XGBE_XPCS_ACCESS_V2: 1230 default: 1231 return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data); 1232 } 1233 } 1234 1235 static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, 1236 int reg, u16 val) 1237 { 1238 unsigned int mdio_sca, mdio_sccd; 1239 1240 reinit_completion(&pdata->mdio_complete); 1241 1242 mdio_sca = 0; 1243 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); 1244 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); 1245 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1246 1247 mdio_sccd = 0; 1248 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val); 1249 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1); 1250 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); 1251 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); 1252 1253 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) { 1254 netdev_err(pdata->netdev, "mdio write operation timed out\n"); 1255 return -ETIMEDOUT; 1256 } 1257 1258 return 0; 1259 } 1260 1261 static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, 1262 int reg) 1263 { 1264 unsigned int mdio_sca, mdio_sccd; 1265 1266 reinit_completion(&pdata->mdio_complete); 1267 1268 mdio_sca = 0; 1269 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); 1270 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); 1271 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1272 1273 mdio_sccd = 0; 1274 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3); 1275 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); 1276 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); 1277 1278 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) { 1279 netdev_err(pdata->netdev, "mdio read operation timed out\n"); 1280 return -ETIMEDOUT; 1281 } 1282 1283 return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA); 1284 } 1285 1286 static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, 1287 enum xgbe_mdio_mode mode) 1288 { 1289 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R); 1290 1291 switch (mode) { 1292 case XGBE_MDIO_MODE_CL22: 1293 if (port > XGMAC_MAX_C22_PORT) 1294 return -EINVAL; 1295 reg_val |= (1 << port); 1296 break; 1297 case XGBE_MDIO_MODE_CL45: 1298 break; 1299 default: 1300 return -EINVAL; 1301 } 1302 1303 XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val); 1304 1305 return 0; 1306 } 1307 1308 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) 1309 { 1310 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN); 1311 } 1312 1313 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) 1314 { 1315 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); 1316 1317 return 0; 1318 } 1319 1320 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) 1321 { 1322 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); 1323 1324 return 0; 1325 } 1326 1327 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) 1328 { 1329 struct xgbe_ring_desc *rdesc = rdata->rdesc; 1330 1331 /* Reset the Tx descriptor 1332 * Set buffer 1 (lo) address to zero 1333 * Set buffer 1 (hi) address to zero 1334 * Reset all other control bits (IC, TTSE, B2L & B1L) 1335 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) 1336 */ 1337 rdesc->desc0 = 0; 1338 rdesc->desc1 = 0; 1339 rdesc->desc2 = 0; 1340 rdesc->desc3 = 0; 1341 1342 /* Make sure ownership is written to the descriptor */ 1343 dma_wmb(); 1344 } 1345 1346 static void xgbe_tx_desc_init(struct xgbe_channel *channel) 1347 { 1348 struct xgbe_ring *ring = channel->tx_ring; 1349 struct xgbe_ring_data *rdata; 1350 int i; 1351 int start_index = ring->cur; 1352 1353 DBGPR("-->tx_desc_init\n"); 1354 1355 /* Initialze all descriptors */ 1356 for (i = 0; i < ring->rdesc_count; i++) { 1357 rdata = XGBE_GET_DESC_DATA(ring, i); 1358 1359 /* Initialize Tx descriptor */ 1360 xgbe_tx_desc_reset(rdata); 1361 } 1362 1363 /* Update the total number of Tx descriptors */ 1364 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); 1365 1366 /* Update the starting address of descriptor ring */ 1367 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1368 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, 1369 upper_32_bits(rdata->rdesc_dma)); 1370 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, 1371 lower_32_bits(rdata->rdesc_dma)); 1372 1373 DBGPR("<--tx_desc_init\n"); 1374 } 1375 1376 static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, 1377 struct xgbe_ring_data *rdata, unsigned int index) 1378 { 1379 struct xgbe_ring_desc *rdesc = rdata->rdesc; 1380 unsigned int rx_usecs = pdata->rx_usecs; 1381 unsigned int rx_frames = pdata->rx_frames; 1382 unsigned int inte; 1383 dma_addr_t hdr_dma, buf_dma; 1384 1385 if (!rx_usecs && !rx_frames) { 1386 /* No coalescing, interrupt for every descriptor */ 1387 inte = 1; 1388 } else { 1389 /* Set interrupt based on Rx frame coalescing setting */ 1390 if (rx_frames && !((index + 1) % rx_frames)) 1391 inte = 1; 1392 else 1393 inte = 0; 1394 } 1395 1396 /* Reset the Rx descriptor 1397 * Set buffer 1 (lo) address to header dma address (lo) 1398 * Set buffer 1 (hi) address to header dma address (hi) 1399 * Set buffer 2 (lo) address to buffer dma address (lo) 1400 * Set buffer 2 (hi) address to buffer dma address (hi) and 1401 * set control bits OWN and INTE 1402 */ 1403 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off; 1404 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off; 1405 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); 1406 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); 1407 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); 1408 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); 1409 1410 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); 1411 1412 /* Since the Rx DMA engine is likely running, make sure everything 1413 * is written to the descriptor(s) before setting the OWN bit 1414 * for the descriptor 1415 */ 1416 dma_wmb(); 1417 1418 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); 1419 1420 /* Make sure ownership is written to the descriptor */ 1421 dma_wmb(); 1422 } 1423 1424 static void xgbe_rx_desc_init(struct xgbe_channel *channel) 1425 { 1426 struct xgbe_prv_data *pdata = channel->pdata; 1427 struct xgbe_ring *ring = channel->rx_ring; 1428 struct xgbe_ring_data *rdata; 1429 unsigned int start_index = ring->cur; 1430 unsigned int i; 1431 1432 DBGPR("-->rx_desc_init\n"); 1433 1434 /* Initialize all descriptors */ 1435 for (i = 0; i < ring->rdesc_count; i++) { 1436 rdata = XGBE_GET_DESC_DATA(ring, i); 1437 1438 /* Initialize Rx descriptor */ 1439 xgbe_rx_desc_reset(pdata, rdata, i); 1440 } 1441 1442 /* Update the total number of Rx descriptors */ 1443 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); 1444 1445 /* Update the starting address of descriptor ring */ 1446 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1447 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, 1448 upper_32_bits(rdata->rdesc_dma)); 1449 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, 1450 lower_32_bits(rdata->rdesc_dma)); 1451 1452 /* Update the Rx Descriptor Tail Pointer */ 1453 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1); 1454 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, 1455 lower_32_bits(rdata->rdesc_dma)); 1456 1457 DBGPR("<--rx_desc_init\n"); 1458 } 1459 1460 static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata, 1461 unsigned int addend) 1462 { 1463 unsigned int count = 10000; 1464 1465 /* Set the addend register value and tell the device */ 1466 XGMAC_IOWRITE(pdata, MAC_TSAR, addend); 1467 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1); 1468 1469 /* Wait for addend update to complete */ 1470 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG)) 1471 udelay(5); 1472 1473 if (!count) 1474 netdev_err(pdata->netdev, 1475 "timed out updating timestamp addend register\n"); 1476 } 1477 1478 static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec, 1479 unsigned int nsec) 1480 { 1481 unsigned int count = 10000; 1482 1483 /* Set the time values and tell the device */ 1484 XGMAC_IOWRITE(pdata, MAC_STSUR, sec); 1485 XGMAC_IOWRITE(pdata, MAC_STNUR, nsec); 1486 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1); 1487 1488 /* Wait for time update to complete */ 1489 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT)) 1490 udelay(5); 1491 1492 if (!count) 1493 netdev_err(pdata->netdev, "timed out initializing timestamp\n"); 1494 } 1495 1496 static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata) 1497 { 1498 u64 nsec; 1499 1500 nsec = XGMAC_IOREAD(pdata, MAC_STSR); 1501 nsec *= NSEC_PER_SEC; 1502 nsec += XGMAC_IOREAD(pdata, MAC_STNR); 1503 1504 return nsec; 1505 } 1506 1507 static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata) 1508 { 1509 unsigned int tx_snr, tx_ssr; 1510 u64 nsec; 1511 1512 if (pdata->vdata->tx_tstamp_workaround) { 1513 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR); 1514 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR); 1515 } else { 1516 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR); 1517 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR); 1518 } 1519 1520 if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) 1521 return 0; 1522 1523 nsec = tx_ssr; 1524 nsec *= NSEC_PER_SEC; 1525 nsec += tx_snr; 1526 1527 return nsec; 1528 } 1529 1530 static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet, 1531 struct xgbe_ring_desc *rdesc) 1532 { 1533 u64 nsec; 1534 1535 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) && 1536 !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) { 1537 nsec = le32_to_cpu(rdesc->desc1); 1538 nsec <<= 32; 1539 nsec |= le32_to_cpu(rdesc->desc0); 1540 if (nsec != 0xffffffffffffffffULL) { 1541 packet->rx_tstamp = nsec; 1542 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1543 RX_TSTAMP, 1); 1544 } 1545 } 1546 } 1547 1548 static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, 1549 unsigned int mac_tscr) 1550 { 1551 /* Set one nano-second accuracy */ 1552 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1); 1553 1554 /* Set fine timestamp update */ 1555 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1); 1556 1557 /* Overwrite earlier timestamps */ 1558 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1); 1559 1560 XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); 1561 1562 /* Exit if timestamping is not enabled */ 1563 if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) 1564 return 0; 1565 1566 /* Initialize time registers */ 1567 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC); 1568 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC); 1569 xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend); 1570 xgbe_set_tstamp_time(pdata, 0, 0); 1571 1572 /* Initialize the timecounter */ 1573 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, 1574 ktime_to_ns(ktime_get_real())); 1575 1576 return 0; 1577 } 1578 1579 static void xgbe_tx_start_xmit(struct xgbe_channel *channel, 1580 struct xgbe_ring *ring) 1581 { 1582 struct xgbe_prv_data *pdata = channel->pdata; 1583 struct xgbe_ring_data *rdata; 1584 1585 /* Make sure everything is written before the register write */ 1586 wmb(); 1587 1588 /* Issue a poll command to Tx DMA by writing address 1589 * of next immediate free descriptor */ 1590 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1591 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, 1592 lower_32_bits(rdata->rdesc_dma)); 1593 1594 /* Start the Tx timer */ 1595 if (pdata->tx_usecs && !channel->tx_timer_active) { 1596 channel->tx_timer_active = 1; 1597 mod_timer(&channel->tx_timer, 1598 jiffies + usecs_to_jiffies(pdata->tx_usecs)); 1599 } 1600 1601 ring->tx.xmit_more = 0; 1602 } 1603 1604 static void xgbe_dev_xmit(struct xgbe_channel *channel) 1605 { 1606 struct xgbe_prv_data *pdata = channel->pdata; 1607 struct xgbe_ring *ring = channel->tx_ring; 1608 struct xgbe_ring_data *rdata; 1609 struct xgbe_ring_desc *rdesc; 1610 struct xgbe_packet_data *packet = &ring->packet_data; 1611 unsigned int csum, tso, vlan; 1612 unsigned int tso_context, vlan_context; 1613 unsigned int tx_set_ic; 1614 int start_index = ring->cur; 1615 int cur_index = ring->cur; 1616 int i; 1617 1618 DBGPR("-->xgbe_dev_xmit\n"); 1619 1620 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1621 CSUM_ENABLE); 1622 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1623 TSO_ENABLE); 1624 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1625 VLAN_CTAG); 1626 1627 if (tso && (packet->mss != ring->tx.cur_mss)) 1628 tso_context = 1; 1629 else 1630 tso_context = 0; 1631 1632 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)) 1633 vlan_context = 1; 1634 else 1635 vlan_context = 0; 1636 1637 /* Determine if an interrupt should be generated for this Tx: 1638 * Interrupt: 1639 * - Tx frame count exceeds the frame count setting 1640 * - Addition of Tx frame count to the frame count since the 1641 * last interrupt was set exceeds the frame count setting 1642 * No interrupt: 1643 * - No frame count setting specified (ethtool -C ethX tx-frames 0) 1644 * - Addition of Tx frame count to the frame count since the 1645 * last interrupt was set does not exceed the frame count setting 1646 */ 1647 ring->coalesce_count += packet->tx_packets; 1648 if (!pdata->tx_frames) 1649 tx_set_ic = 0; 1650 else if (packet->tx_packets > pdata->tx_frames) 1651 tx_set_ic = 1; 1652 else if ((ring->coalesce_count % pdata->tx_frames) < 1653 packet->tx_packets) 1654 tx_set_ic = 1; 1655 else 1656 tx_set_ic = 0; 1657 1658 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1659 rdesc = rdata->rdesc; 1660 1661 /* Create a context descriptor if this is a TSO packet */ 1662 if (tso_context || vlan_context) { 1663 if (tso_context) { 1664 netif_dbg(pdata, tx_queued, pdata->netdev, 1665 "TSO context descriptor, mss=%u\n", 1666 packet->mss); 1667 1668 /* Set the MSS size */ 1669 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, 1670 MSS, packet->mss); 1671 1672 /* Mark it as a CONTEXT descriptor */ 1673 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1674 CTXT, 1); 1675 1676 /* Indicate this descriptor contains the MSS */ 1677 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1678 TCMSSV, 1); 1679 1680 ring->tx.cur_mss = packet->mss; 1681 } 1682 1683 if (vlan_context) { 1684 netif_dbg(pdata, tx_queued, pdata->netdev, 1685 "VLAN context descriptor, ctag=%u\n", 1686 packet->vlan_ctag); 1687 1688 /* Mark it as a CONTEXT descriptor */ 1689 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1690 CTXT, 1); 1691 1692 /* Set the VLAN tag */ 1693 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1694 VT, packet->vlan_ctag); 1695 1696 /* Indicate this descriptor contains the VLAN tag */ 1697 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1698 VLTV, 1); 1699 1700 ring->tx.cur_vlan_ctag = packet->vlan_ctag; 1701 } 1702 1703 cur_index++; 1704 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1705 rdesc = rdata->rdesc; 1706 } 1707 1708 /* Update buffer address (for TSO this is the header) */ 1709 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 1710 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 1711 1712 /* Update the buffer length */ 1713 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, 1714 rdata->skb_dma_len); 1715 1716 /* VLAN tag insertion check */ 1717 if (vlan) 1718 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, 1719 TX_NORMAL_DESC2_VLAN_INSERT); 1720 1721 /* Timestamp enablement check */ 1722 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) 1723 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1); 1724 1725 /* Mark it as First Descriptor */ 1726 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); 1727 1728 /* Mark it as a NORMAL descriptor */ 1729 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1730 1731 /* Set OWN bit if not the first descriptor */ 1732 if (cur_index != start_index) 1733 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1734 1735 if (tso) { 1736 /* Enable TSO */ 1737 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1); 1738 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL, 1739 packet->tcp_payload_len); 1740 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, 1741 packet->tcp_header_len / 4); 1742 1743 pdata->ext_stats.tx_tso_packets++; 1744 } else { 1745 /* Enable CRC and Pad Insertion */ 1746 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); 1747 1748 /* Enable HW CSUM */ 1749 if (csum) 1750 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, 1751 CIC, 0x3); 1752 1753 /* Set the total length to be transmitted */ 1754 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, 1755 packet->length); 1756 } 1757 1758 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) { 1759 cur_index++; 1760 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1761 rdesc = rdata->rdesc; 1762 1763 /* Update buffer address */ 1764 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 1765 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 1766 1767 /* Update the buffer length */ 1768 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, 1769 rdata->skb_dma_len); 1770 1771 /* Set OWN bit */ 1772 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1773 1774 /* Mark it as NORMAL descriptor */ 1775 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1776 1777 /* Enable HW CSUM */ 1778 if (csum) 1779 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, 1780 CIC, 0x3); 1781 } 1782 1783 /* Set LAST bit for the last descriptor */ 1784 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); 1785 1786 /* Set IC bit based on Tx coalescing settings */ 1787 if (tx_set_ic) 1788 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); 1789 1790 /* Save the Tx info to report back during cleanup */ 1791 rdata->tx.packets = packet->tx_packets; 1792 rdata->tx.bytes = packet->tx_bytes; 1793 1794 /* In case the Tx DMA engine is running, make sure everything 1795 * is written to the descriptor(s) before setting the OWN bit 1796 * for the first descriptor 1797 */ 1798 dma_wmb(); 1799 1800 /* Set OWN bit for the first descriptor */ 1801 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1802 rdesc = rdata->rdesc; 1803 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1804 1805 if (netif_msg_tx_queued(pdata)) 1806 xgbe_dump_tx_desc(pdata, ring, start_index, 1807 packet->rdesc_count, 1); 1808 1809 /* Make sure ownership is written to the descriptor */ 1810 smp_wmb(); 1811 1812 ring->cur = cur_index + 1; 1813 if (!packet->skb->xmit_more || 1814 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, 1815 channel->queue_index))) 1816 xgbe_tx_start_xmit(channel, ring); 1817 else 1818 ring->tx.xmit_more = 1; 1819 1820 DBGPR(" %s: descriptors %u to %u written\n", 1821 channel->name, start_index & (ring->rdesc_count - 1), 1822 (ring->cur - 1) & (ring->rdesc_count - 1)); 1823 1824 DBGPR("<--xgbe_dev_xmit\n"); 1825 } 1826 1827 static int xgbe_dev_read(struct xgbe_channel *channel) 1828 { 1829 struct xgbe_prv_data *pdata = channel->pdata; 1830 struct xgbe_ring *ring = channel->rx_ring; 1831 struct xgbe_ring_data *rdata; 1832 struct xgbe_ring_desc *rdesc; 1833 struct xgbe_packet_data *packet = &ring->packet_data; 1834 struct net_device *netdev = pdata->netdev; 1835 unsigned int err, etlt, l34t; 1836 1837 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); 1838 1839 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1840 rdesc = rdata->rdesc; 1841 1842 /* Check for data availability */ 1843 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) 1844 return 1; 1845 1846 /* Make sure descriptor fields are read after reading the OWN bit */ 1847 dma_rmb(); 1848 1849 if (netif_msg_rx_status(pdata)) 1850 xgbe_dump_rx_desc(pdata, ring, ring->cur); 1851 1852 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { 1853 /* Timestamp Context Descriptor */ 1854 xgbe_get_rx_tstamp(packet, rdesc); 1855 1856 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1857 CONTEXT, 1); 1858 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1859 CONTEXT_NEXT, 0); 1860 return 0; 1861 } 1862 1863 /* Normal Descriptor, be sure Context Descriptor bit is off */ 1864 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); 1865 1866 /* Indicate if a Context Descriptor is next */ 1867 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) 1868 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1869 CONTEXT_NEXT, 1); 1870 1871 /* Get the header length */ 1872 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { 1873 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1874 FIRST, 1); 1875 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, 1876 RX_NORMAL_DESC2, HL); 1877 if (rdata->rx.hdr_len) 1878 pdata->ext_stats.rx_split_header_packets++; 1879 } else { 1880 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1881 FIRST, 0); 1882 } 1883 1884 /* Get the RSS hash */ 1885 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { 1886 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1887 RSS_HASH, 1); 1888 1889 packet->rss_hash = le32_to_cpu(rdesc->desc1); 1890 1891 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); 1892 switch (l34t) { 1893 case RX_DESC3_L34T_IPV4_TCP: 1894 case RX_DESC3_L34T_IPV4_UDP: 1895 case RX_DESC3_L34T_IPV6_TCP: 1896 case RX_DESC3_L34T_IPV6_UDP: 1897 packet->rss_hash_type = PKT_HASH_TYPE_L4; 1898 break; 1899 default: 1900 packet->rss_hash_type = PKT_HASH_TYPE_L3; 1901 } 1902 } 1903 1904 /* Not all the data has been transferred for this packet */ 1905 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) 1906 return 0; 1907 1908 /* This is the last of the data for this packet */ 1909 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1910 LAST, 1); 1911 1912 /* Get the packet length */ 1913 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 1914 1915 /* Set checksum done indicator as appropriate */ 1916 if (netdev->features & NETIF_F_RXCSUM) 1917 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1918 CSUM_DONE, 1); 1919 1920 /* Check for errors (only valid in last descriptor) */ 1921 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); 1922 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); 1923 netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt); 1924 1925 if (!err || !etlt) { 1926 /* No error if err is 0 or etlt is 0 */ 1927 if ((etlt == 0x09) && 1928 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1929 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1930 VLAN_CTAG, 1); 1931 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, 1932 RX_NORMAL_DESC0, 1933 OVT); 1934 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", 1935 packet->vlan_ctag); 1936 } 1937 } else { 1938 if ((etlt == 0x05) || (etlt == 0x06)) 1939 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1940 CSUM_DONE, 0); 1941 else 1942 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, 1943 FRAME, 1); 1944 } 1945 1946 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, 1947 ring->cur & (ring->rdesc_count - 1), ring->cur); 1948 1949 return 0; 1950 } 1951 1952 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) 1953 { 1954 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ 1955 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT); 1956 } 1957 1958 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) 1959 { 1960 /* Rx and Tx share LD bit, so check TDES3.LD bit */ 1961 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD); 1962 } 1963 1964 static int xgbe_enable_int(struct xgbe_channel *channel, 1965 enum xgbe_int int_id) 1966 { 1967 unsigned int dma_ch_ier; 1968 1969 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); 1970 1971 switch (int_id) { 1972 case XGMAC_INT_DMA_CH_SR_TI: 1973 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); 1974 break; 1975 case XGMAC_INT_DMA_CH_SR_TPS: 1976 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1); 1977 break; 1978 case XGMAC_INT_DMA_CH_SR_TBU: 1979 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1); 1980 break; 1981 case XGMAC_INT_DMA_CH_SR_RI: 1982 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); 1983 break; 1984 case XGMAC_INT_DMA_CH_SR_RBU: 1985 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); 1986 break; 1987 case XGMAC_INT_DMA_CH_SR_RPS: 1988 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1); 1989 break; 1990 case XGMAC_INT_DMA_CH_SR_TI_RI: 1991 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); 1992 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); 1993 break; 1994 case XGMAC_INT_DMA_CH_SR_FBE: 1995 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); 1996 break; 1997 case XGMAC_INT_DMA_ALL: 1998 dma_ch_ier |= channel->saved_ier; 1999 break; 2000 default: 2001 return -1; 2002 } 2003 2004 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); 2005 2006 return 0; 2007 } 2008 2009 static int xgbe_disable_int(struct xgbe_channel *channel, 2010 enum xgbe_int int_id) 2011 { 2012 unsigned int dma_ch_ier; 2013 2014 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); 2015 2016 switch (int_id) { 2017 case XGMAC_INT_DMA_CH_SR_TI: 2018 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); 2019 break; 2020 case XGMAC_INT_DMA_CH_SR_TPS: 2021 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0); 2022 break; 2023 case XGMAC_INT_DMA_CH_SR_TBU: 2024 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0); 2025 break; 2026 case XGMAC_INT_DMA_CH_SR_RI: 2027 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); 2028 break; 2029 case XGMAC_INT_DMA_CH_SR_RBU: 2030 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); 2031 break; 2032 case XGMAC_INT_DMA_CH_SR_RPS: 2033 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0); 2034 break; 2035 case XGMAC_INT_DMA_CH_SR_TI_RI: 2036 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); 2037 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); 2038 break; 2039 case XGMAC_INT_DMA_CH_SR_FBE: 2040 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0); 2041 break; 2042 case XGMAC_INT_DMA_ALL: 2043 channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK; 2044 dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK; 2045 break; 2046 default: 2047 return -1; 2048 } 2049 2050 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); 2051 2052 return 0; 2053 } 2054 2055 static int __xgbe_exit(struct xgbe_prv_data *pdata) 2056 { 2057 unsigned int count = 2000; 2058 2059 DBGPR("-->xgbe_exit\n"); 2060 2061 /* Issue a software reset */ 2062 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); 2063 usleep_range(10, 15); 2064 2065 /* Poll Until Poll Condition */ 2066 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 2067 usleep_range(500, 600); 2068 2069 if (!count) 2070 return -EBUSY; 2071 2072 DBGPR("<--xgbe_exit\n"); 2073 2074 return 0; 2075 } 2076 2077 static int xgbe_exit(struct xgbe_prv_data *pdata) 2078 { 2079 int ret; 2080 2081 /* To guard against possible incorrectly generated interrupts, 2082 * issue the software reset twice. 2083 */ 2084 ret = __xgbe_exit(pdata); 2085 if (ret) 2086 return ret; 2087 2088 return __xgbe_exit(pdata); 2089 } 2090 2091 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) 2092 { 2093 unsigned int i, count; 2094 2095 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) 2096 return 0; 2097 2098 for (i = 0; i < pdata->tx_q_count; i++) 2099 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); 2100 2101 /* Poll Until Poll Condition */ 2102 for (i = 0; i < pdata->tx_q_count; i++) { 2103 count = 2000; 2104 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, 2105 MTL_Q_TQOMR, FTQ)) 2106 usleep_range(500, 600); 2107 2108 if (!count) 2109 return -EBUSY; 2110 } 2111 2112 return 0; 2113 } 2114 2115 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata) 2116 { 2117 unsigned int sbmr; 2118 2119 sbmr = XGMAC_IOREAD(pdata, DMA_SBMR); 2120 2121 /* Set enhanced addressing mode */ 2122 XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1); 2123 2124 /* Set the System Bus mode */ 2125 XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1); 2126 XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2); 2127 XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal); 2128 XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1); 2129 XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1); 2130 2131 XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr); 2132 2133 /* Set descriptor fetching threshold */ 2134 if (pdata->vdata->tx_desc_prefetch) 2135 XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS, 2136 pdata->vdata->tx_desc_prefetch); 2137 2138 if (pdata->vdata->rx_desc_prefetch) 2139 XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS, 2140 pdata->vdata->rx_desc_prefetch); 2141 } 2142 2143 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) 2144 { 2145 XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr); 2146 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr); 2147 if (pdata->awarcr) 2148 XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr); 2149 } 2150 2151 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) 2152 { 2153 unsigned int i; 2154 2155 /* Set Tx to weighted round robin scheduling algorithm */ 2156 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); 2157 2158 /* Set Tx traffic classes to use WRR algorithm with equal weights */ 2159 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { 2160 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 2161 MTL_TSA_ETS); 2162 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); 2163 } 2164 2165 /* Set Rx to strict priority algorithm */ 2166 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); 2167 } 2168 2169 static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata, 2170 unsigned int queue, 2171 unsigned int q_fifo_size) 2172 { 2173 unsigned int frame_fifo_size; 2174 unsigned int rfa, rfd; 2175 2176 frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata)); 2177 2178 if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) { 2179 /* PFC is active for this queue */ 2180 rfa = pdata->pfc_rfa; 2181 rfd = rfa + frame_fifo_size; 2182 if (rfd > XGMAC_FLOW_CONTROL_MAX) 2183 rfd = XGMAC_FLOW_CONTROL_MAX; 2184 if (rfa >= XGMAC_FLOW_CONTROL_MAX) 2185 rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT; 2186 } else { 2187 /* This path deals with just maximum frame sizes which are 2188 * limited to a jumbo frame of 9,000 (plus headers, etc.) 2189 * so we can never exceed the maximum allowable RFA/RFD 2190 * values. 2191 */ 2192 if (q_fifo_size <= 2048) { 2193 /* rx_rfd to zero to signal no flow control */ 2194 pdata->rx_rfa[queue] = 0; 2195 pdata->rx_rfd[queue] = 0; 2196 return; 2197 } 2198 2199 if (q_fifo_size <= 4096) { 2200 /* Between 2048 and 4096 */ 2201 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ 2202 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ 2203 return; 2204 } 2205 2206 if (q_fifo_size <= frame_fifo_size) { 2207 /* Between 4096 and max-frame */ 2208 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ 2209 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ 2210 return; 2211 } 2212 2213 if (q_fifo_size <= (frame_fifo_size * 3)) { 2214 /* Between max-frame and 3 max-frames, 2215 * trigger if we get just over a frame of data and 2216 * resume when we have just under half a frame left. 2217 */ 2218 rfa = q_fifo_size - frame_fifo_size; 2219 rfd = rfa + (frame_fifo_size / 2); 2220 } else { 2221 /* Above 3 max-frames - trigger when just over 2222 * 2 frames of space available 2223 */ 2224 rfa = frame_fifo_size * 2; 2225 rfa += XGMAC_FLOW_CONTROL_UNIT; 2226 rfd = rfa + frame_fifo_size; 2227 } 2228 } 2229 2230 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa); 2231 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd); 2232 } 2233 2234 static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata, 2235 unsigned int *fifo) 2236 { 2237 unsigned int q_fifo_size; 2238 unsigned int i; 2239 2240 for (i = 0; i < pdata->rx_q_count; i++) { 2241 q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT; 2242 2243 xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size); 2244 } 2245 } 2246 2247 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) 2248 { 2249 unsigned int i; 2250 2251 for (i = 0; i < pdata->rx_q_count; i++) { 2252 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2253 pdata->rx_rfa[i]); 2254 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 2255 pdata->rx_rfd[i]); 2256 } 2257 } 2258 2259 static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata) 2260 { 2261 /* The configured value may not be the actual amount of fifo RAM */ 2262 return min_t(unsigned int, pdata->tx_max_fifo_size, 2263 pdata->hw_feat.tx_fifo_size); 2264 } 2265 2266 static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata) 2267 { 2268 /* The configured value may not be the actual amount of fifo RAM */ 2269 return min_t(unsigned int, pdata->rx_max_fifo_size, 2270 pdata->hw_feat.rx_fifo_size); 2271 } 2272 2273 static void xgbe_calculate_equal_fifo(unsigned int fifo_size, 2274 unsigned int queue_count, 2275 unsigned int *fifo) 2276 { 2277 unsigned int q_fifo_size; 2278 unsigned int p_fifo; 2279 unsigned int i; 2280 2281 q_fifo_size = fifo_size / queue_count; 2282 2283 /* Calculate the fifo setting by dividing the queue's fifo size 2284 * by the fifo allocation increment (with 0 representing the 2285 * base allocation increment so decrement the result by 1). 2286 */ 2287 p_fifo = q_fifo_size / XGMAC_FIFO_UNIT; 2288 if (p_fifo) 2289 p_fifo--; 2290 2291 /* Distribute the fifo equally amongst the queues */ 2292 for (i = 0; i < queue_count; i++) 2293 fifo[i] = p_fifo; 2294 } 2295 2296 static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size, 2297 unsigned int queue_count, 2298 unsigned int *fifo) 2299 { 2300 unsigned int i; 2301 2302 BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC); 2303 2304 if (queue_count <= IEEE_8021QAZ_MAX_TCS) 2305 return fifo_size; 2306 2307 /* Rx queues 9 and up are for specialized packets, 2308 * such as PTP or DCB control packets, etc. and 2309 * don't require a large fifo 2310 */ 2311 for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) { 2312 fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1; 2313 fifo_size -= XGMAC_FIFO_MIN_ALLOC; 2314 } 2315 2316 return fifo_size; 2317 } 2318 2319 static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata) 2320 { 2321 unsigned int delay; 2322 2323 /* If a delay has been provided, use that */ 2324 if (pdata->pfc->delay) 2325 return pdata->pfc->delay / 8; 2326 2327 /* Allow for two maximum size frames */ 2328 delay = xgbe_get_max_frame(pdata); 2329 delay += XGMAC_ETH_PREAMBLE; 2330 delay *= 2; 2331 2332 /* Allow for PFC frame */ 2333 delay += XGMAC_PFC_DATA_LEN; 2334 delay += ETH_HLEN + ETH_FCS_LEN; 2335 delay += XGMAC_ETH_PREAMBLE; 2336 2337 /* Allow for miscellaneous delays (LPI exit, cable, etc.) */ 2338 delay += XGMAC_PFC_DELAYS; 2339 2340 return delay; 2341 } 2342 2343 static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata) 2344 { 2345 unsigned int count, prio_queues; 2346 unsigned int i; 2347 2348 if (!pdata->pfc->pfc_en) 2349 return 0; 2350 2351 count = 0; 2352 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 2353 for (i = 0; i < prio_queues; i++) { 2354 if (!xgbe_is_pfc_queue(pdata, i)) 2355 continue; 2356 2357 pdata->pfcq[i] = 1; 2358 count++; 2359 } 2360 2361 return count; 2362 } 2363 2364 static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata, 2365 unsigned int fifo_size, 2366 unsigned int *fifo) 2367 { 2368 unsigned int q_fifo_size, rem_fifo, addn_fifo; 2369 unsigned int prio_queues; 2370 unsigned int pfc_count; 2371 unsigned int i; 2372 2373 q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata)); 2374 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 2375 pfc_count = xgbe_get_pfc_queues(pdata); 2376 2377 if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) { 2378 /* No traffic classes with PFC enabled or can't do lossless */ 2379 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo); 2380 return; 2381 } 2382 2383 /* Calculate how much fifo we have to play with */ 2384 rem_fifo = fifo_size - (q_fifo_size * prio_queues); 2385 2386 /* Calculate how much more than base fifo PFC needs, which also 2387 * becomes the threshold activation point (RFA) 2388 */ 2389 pdata->pfc_rfa = xgbe_get_pfc_delay(pdata); 2390 pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa); 2391 2392 if (pdata->pfc_rfa > q_fifo_size) { 2393 addn_fifo = pdata->pfc_rfa - q_fifo_size; 2394 addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo); 2395 } else { 2396 addn_fifo = 0; 2397 } 2398 2399 /* Calculate DCB fifo settings: 2400 * - distribute remaining fifo between the VLAN priority 2401 * queues based on traffic class PFC enablement and overall 2402 * priority (0 is lowest priority, so start at highest) 2403 */ 2404 i = prio_queues; 2405 while (i > 0) { 2406 i--; 2407 2408 fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1; 2409 2410 if (!pdata->pfcq[i] || !addn_fifo) 2411 continue; 2412 2413 if (addn_fifo > rem_fifo) { 2414 netdev_warn(pdata->netdev, 2415 "RXq%u cannot set needed fifo size\n", i); 2416 if (!rem_fifo) 2417 continue; 2418 2419 addn_fifo = rem_fifo; 2420 } 2421 2422 fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT); 2423 rem_fifo -= addn_fifo; 2424 } 2425 2426 if (rem_fifo) { 2427 unsigned int inc_fifo = rem_fifo / prio_queues; 2428 2429 /* Distribute remaining fifo across queues */ 2430 for (i = 0; i < prio_queues; i++) 2431 fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT); 2432 } 2433 } 2434 2435 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) 2436 { 2437 unsigned int fifo_size; 2438 unsigned int fifo[XGBE_MAX_QUEUES]; 2439 unsigned int i; 2440 2441 fifo_size = xgbe_get_tx_fifo_size(pdata); 2442 2443 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo); 2444 2445 for (i = 0; i < pdata->tx_q_count; i++) 2446 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]); 2447 2448 netif_info(pdata, drv, pdata->netdev, 2449 "%d Tx hardware queues, %d byte fifo per queue\n", 2450 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); 2451 } 2452 2453 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) 2454 { 2455 unsigned int fifo_size; 2456 unsigned int fifo[XGBE_MAX_QUEUES]; 2457 unsigned int prio_queues; 2458 unsigned int i; 2459 2460 /* Clear any DCB related fifo/queue information */ 2461 memset(pdata->pfcq, 0, sizeof(pdata->pfcq)); 2462 pdata->pfc_rfa = 0; 2463 2464 fifo_size = xgbe_get_rx_fifo_size(pdata); 2465 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 2466 2467 /* Assign a minimum fifo to the non-VLAN priority queues */ 2468 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo); 2469 2470 if (pdata->pfc && pdata->ets) 2471 xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo); 2472 else 2473 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo); 2474 2475 for (i = 0; i < pdata->rx_q_count; i++) 2476 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]); 2477 2478 xgbe_calculate_flow_control_threshold(pdata, fifo); 2479 xgbe_config_flow_control_threshold(pdata); 2480 2481 if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) { 2482 netif_info(pdata, drv, pdata->netdev, 2483 "%u Rx hardware queues\n", pdata->rx_q_count); 2484 for (i = 0; i < pdata->rx_q_count; i++) 2485 netif_info(pdata, drv, pdata->netdev, 2486 "RxQ%u, %u byte fifo queue\n", i, 2487 ((fifo[i] + 1) * XGMAC_FIFO_UNIT)); 2488 } else { 2489 netif_info(pdata, drv, pdata->netdev, 2490 "%u Rx hardware queues, %u byte fifo per queue\n", 2491 pdata->rx_q_count, 2492 ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); 2493 } 2494 } 2495 2496 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) 2497 { 2498 unsigned int qptc, qptc_extra, queue; 2499 unsigned int prio_queues; 2500 unsigned int ppq, ppq_extra, prio; 2501 unsigned int mask; 2502 unsigned int i, j, reg, reg_val; 2503 2504 /* Map the MTL Tx Queues to Traffic Classes 2505 * Note: Tx Queues >= Traffic Classes 2506 */ 2507 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; 2508 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; 2509 2510 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { 2511 for (j = 0; j < qptc; j++) { 2512 netif_dbg(pdata, drv, pdata->netdev, 2513 "TXq%u mapped to TC%u\n", queue, i); 2514 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 2515 Q2TCMAP, i); 2516 pdata->q2tc_map[queue++] = i; 2517 } 2518 2519 if (i < qptc_extra) { 2520 netif_dbg(pdata, drv, pdata->netdev, 2521 "TXq%u mapped to TC%u\n", queue, i); 2522 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 2523 Q2TCMAP, i); 2524 pdata->q2tc_map[queue++] = i; 2525 } 2526 } 2527 2528 /* Map the 8 VLAN priority values to available MTL Rx queues */ 2529 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 2530 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; 2531 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; 2532 2533 reg = MAC_RQC2R; 2534 reg_val = 0; 2535 for (i = 0, prio = 0; i < prio_queues;) { 2536 mask = 0; 2537 for (j = 0; j < ppq; j++) { 2538 netif_dbg(pdata, drv, pdata->netdev, 2539 "PRIO%u mapped to RXq%u\n", prio, i); 2540 mask |= (1 << prio); 2541 pdata->prio2q_map[prio++] = i; 2542 } 2543 2544 if (i < ppq_extra) { 2545 netif_dbg(pdata, drv, pdata->netdev, 2546 "PRIO%u mapped to RXq%u\n", prio, i); 2547 mask |= (1 << prio); 2548 pdata->prio2q_map[prio++] = i; 2549 } 2550 2551 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); 2552 2553 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) 2554 continue; 2555 2556 XGMAC_IOWRITE(pdata, reg, reg_val); 2557 reg += MAC_RQC2_INC; 2558 reg_val = 0; 2559 } 2560 2561 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ 2562 reg = MTL_RQDCM0R; 2563 reg_val = 0; 2564 for (i = 0; i < pdata->rx_q_count;) { 2565 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); 2566 2567 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) 2568 continue; 2569 2570 XGMAC_IOWRITE(pdata, reg, reg_val); 2571 2572 reg += MTL_RQDCM_INC; 2573 reg_val = 0; 2574 } 2575 } 2576 2577 static void xgbe_config_tc(struct xgbe_prv_data *pdata) 2578 { 2579 unsigned int offset, queue, prio; 2580 u8 i; 2581 2582 netdev_reset_tc(pdata->netdev); 2583 if (!pdata->num_tcs) 2584 return; 2585 2586 netdev_set_num_tc(pdata->netdev, pdata->num_tcs); 2587 2588 for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) { 2589 while ((queue < pdata->tx_q_count) && 2590 (pdata->q2tc_map[queue] == i)) 2591 queue++; 2592 2593 netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n", 2594 i, offset, queue - 1); 2595 netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset); 2596 offset = queue; 2597 } 2598 2599 if (!pdata->ets) 2600 return; 2601 2602 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) 2603 netdev_set_prio_tc_map(pdata->netdev, prio, 2604 pdata->ets->prio_tc[prio]); 2605 } 2606 2607 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) 2608 { 2609 struct ieee_ets *ets = pdata->ets; 2610 unsigned int total_weight, min_weight, weight; 2611 unsigned int mask, reg, reg_val; 2612 unsigned int i, prio; 2613 2614 if (!ets) 2615 return; 2616 2617 /* Set Tx to deficit weighted round robin scheduling algorithm (when 2618 * traffic class is using ETS algorithm) 2619 */ 2620 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR); 2621 2622 /* Set Traffic Class algorithms */ 2623 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt; 2624 min_weight = total_weight / 100; 2625 if (!min_weight) 2626 min_weight = 1; 2627 2628 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { 2629 /* Map the priorities to the traffic class */ 2630 mask = 0; 2631 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { 2632 if (ets->prio_tc[prio] == i) 2633 mask |= (1 << prio); 2634 } 2635 mask &= 0xff; 2636 2637 netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n", 2638 i, mask); 2639 reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG)); 2640 reg_val = XGMAC_IOREAD(pdata, reg); 2641 2642 reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3)); 2643 reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3)); 2644 2645 XGMAC_IOWRITE(pdata, reg, reg_val); 2646 2647 /* Set the traffic class algorithm */ 2648 switch (ets->tc_tsa[i]) { 2649 case IEEE_8021QAZ_TSA_STRICT: 2650 netif_dbg(pdata, drv, pdata->netdev, 2651 "TC%u using SP\n", i); 2652 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 2653 MTL_TSA_SP); 2654 break; 2655 case IEEE_8021QAZ_TSA_ETS: 2656 weight = total_weight * ets->tc_tx_bw[i] / 100; 2657 weight = clamp(weight, min_weight, total_weight); 2658 2659 netif_dbg(pdata, drv, pdata->netdev, 2660 "TC%u using DWRR (weight %u)\n", i, weight); 2661 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 2662 MTL_TSA_ETS); 2663 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 2664 weight); 2665 break; 2666 } 2667 } 2668 2669 xgbe_config_tc(pdata); 2670 } 2671 2672 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) 2673 { 2674 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) { 2675 /* Just stop the Tx queues while Rx fifo is changed */ 2676 netif_tx_stop_all_queues(pdata->netdev); 2677 2678 /* Suspend Rx so that fifo's can be adjusted */ 2679 pdata->hw_if.disable_rx(pdata); 2680 } 2681 2682 xgbe_config_rx_fifo_size(pdata); 2683 xgbe_config_flow_control(pdata); 2684 2685 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) { 2686 /* Resume Rx */ 2687 pdata->hw_if.enable_rx(pdata); 2688 2689 /* Resume Tx queues */ 2690 netif_tx_start_all_queues(pdata->netdev); 2691 } 2692 } 2693 2694 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata) 2695 { 2696 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr); 2697 2698 /* Filtering is done using perfect filtering and hash filtering */ 2699 if (pdata->hw_feat.hash_table_size) { 2700 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 2701 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 2702 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1); 2703 } 2704 } 2705 2706 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) 2707 { 2708 unsigned int val; 2709 2710 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0; 2711 2712 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 2713 } 2714 2715 static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata) 2716 { 2717 xgbe_set_speed(pdata, pdata->phy_speed); 2718 } 2719 2720 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) 2721 { 2722 if (pdata->netdev->features & NETIF_F_RXCSUM) 2723 xgbe_enable_rx_csum(pdata); 2724 else 2725 xgbe_disable_rx_csum(pdata); 2726 } 2727 2728 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) 2729 { 2730 /* Indicate that VLAN Tx CTAGs come from context descriptors */ 2731 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); 2732 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); 2733 2734 /* Set the current VLAN Hash Table register value */ 2735 xgbe_update_vlan_hash_table(pdata); 2736 2737 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 2738 xgbe_enable_rx_vlan_filtering(pdata); 2739 else 2740 xgbe_disable_rx_vlan_filtering(pdata); 2741 2742 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 2743 xgbe_enable_rx_vlan_stripping(pdata); 2744 else 2745 xgbe_disable_rx_vlan_stripping(pdata); 2746 } 2747 2748 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) 2749 { 2750 bool read_hi; 2751 u64 val; 2752 2753 if (pdata->vdata->mmc_64bit) { 2754 switch (reg_lo) { 2755 /* These registers are always 32 bit */ 2756 case MMC_RXRUNTERROR: 2757 case MMC_RXJABBERERROR: 2758 case MMC_RXUNDERSIZE_G: 2759 case MMC_RXOVERSIZE_G: 2760 case MMC_RXWATCHDOGERROR: 2761 read_hi = false; 2762 break; 2763 2764 default: 2765 read_hi = true; 2766 } 2767 } else { 2768 switch (reg_lo) { 2769 /* These registers are always 64 bit */ 2770 case MMC_TXOCTETCOUNT_GB_LO: 2771 case MMC_TXOCTETCOUNT_G_LO: 2772 case MMC_RXOCTETCOUNT_GB_LO: 2773 case MMC_RXOCTETCOUNT_G_LO: 2774 read_hi = true; 2775 break; 2776 2777 default: 2778 read_hi = false; 2779 } 2780 } 2781 2782 val = XGMAC_IOREAD(pdata, reg_lo); 2783 2784 if (read_hi) 2785 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); 2786 2787 return val; 2788 } 2789 2790 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) 2791 { 2792 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2793 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR); 2794 2795 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) 2796 stats->txoctetcount_gb += 2797 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 2798 2799 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) 2800 stats->txframecount_gb += 2801 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 2802 2803 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) 2804 stats->txbroadcastframes_g += 2805 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2806 2807 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) 2808 stats->txmulticastframes_g += 2809 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2810 2811 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) 2812 stats->tx64octets_gb += 2813 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 2814 2815 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) 2816 stats->tx65to127octets_gb += 2817 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 2818 2819 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) 2820 stats->tx128to255octets_gb += 2821 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 2822 2823 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) 2824 stats->tx256to511octets_gb += 2825 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 2826 2827 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) 2828 stats->tx512to1023octets_gb += 2829 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2830 2831 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) 2832 stats->tx1024tomaxoctets_gb += 2833 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2834 2835 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) 2836 stats->txunicastframes_gb += 2837 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2838 2839 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) 2840 stats->txmulticastframes_gb += 2841 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2842 2843 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) 2844 stats->txbroadcastframes_g += 2845 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2846 2847 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) 2848 stats->txunderflowerror += 2849 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 2850 2851 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) 2852 stats->txoctetcount_g += 2853 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 2854 2855 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) 2856 stats->txframecount_g += 2857 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 2858 2859 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) 2860 stats->txpauseframes += 2861 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 2862 2863 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) 2864 stats->txvlanframes_g += 2865 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 2866 } 2867 2868 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) 2869 { 2870 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2871 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR); 2872 2873 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) 2874 stats->rxframecount_gb += 2875 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 2876 2877 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) 2878 stats->rxoctetcount_gb += 2879 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 2880 2881 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) 2882 stats->rxoctetcount_g += 2883 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 2884 2885 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) 2886 stats->rxbroadcastframes_g += 2887 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2888 2889 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) 2890 stats->rxmulticastframes_g += 2891 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2892 2893 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) 2894 stats->rxcrcerror += 2895 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 2896 2897 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) 2898 stats->rxrunterror += 2899 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 2900 2901 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) 2902 stats->rxjabbererror += 2903 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 2904 2905 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) 2906 stats->rxundersize_g += 2907 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 2908 2909 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) 2910 stats->rxoversize_g += 2911 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 2912 2913 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) 2914 stats->rx64octets_gb += 2915 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 2916 2917 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) 2918 stats->rx65to127octets_gb += 2919 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 2920 2921 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) 2922 stats->rx128to255octets_gb += 2923 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 2924 2925 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) 2926 stats->rx256to511octets_gb += 2927 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 2928 2929 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) 2930 stats->rx512to1023octets_gb += 2931 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2932 2933 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) 2934 stats->rx1024tomaxoctets_gb += 2935 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2936 2937 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) 2938 stats->rxunicastframes_g += 2939 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 2940 2941 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) 2942 stats->rxlengtherror += 2943 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 2944 2945 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) 2946 stats->rxoutofrangetype += 2947 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 2948 2949 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) 2950 stats->rxpauseframes += 2951 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 2952 2953 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) 2954 stats->rxfifooverflow += 2955 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 2956 2957 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) 2958 stats->rxvlanframes_gb += 2959 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 2960 2961 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) 2962 stats->rxwatchdogerror += 2963 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 2964 } 2965 2966 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) 2967 { 2968 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2969 2970 /* Freeze counters */ 2971 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 2972 2973 stats->txoctetcount_gb += 2974 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 2975 2976 stats->txframecount_gb += 2977 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 2978 2979 stats->txbroadcastframes_g += 2980 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2981 2982 stats->txmulticastframes_g += 2983 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2984 2985 stats->tx64octets_gb += 2986 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 2987 2988 stats->tx65to127octets_gb += 2989 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 2990 2991 stats->tx128to255octets_gb += 2992 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 2993 2994 stats->tx256to511octets_gb += 2995 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 2996 2997 stats->tx512to1023octets_gb += 2998 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2999 3000 stats->tx1024tomaxoctets_gb += 3001 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 3002 3003 stats->txunicastframes_gb += 3004 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 3005 3006 stats->txmulticastframes_gb += 3007 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 3008 3009 stats->txbroadcastframes_g += 3010 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 3011 3012 stats->txunderflowerror += 3013 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 3014 3015 stats->txoctetcount_g += 3016 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 3017 3018 stats->txframecount_g += 3019 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 3020 3021 stats->txpauseframes += 3022 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 3023 3024 stats->txvlanframes_g += 3025 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 3026 3027 stats->rxframecount_gb += 3028 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 3029 3030 stats->rxoctetcount_gb += 3031 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 3032 3033 stats->rxoctetcount_g += 3034 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 3035 3036 stats->rxbroadcastframes_g += 3037 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 3038 3039 stats->rxmulticastframes_g += 3040 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 3041 3042 stats->rxcrcerror += 3043 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 3044 3045 stats->rxrunterror += 3046 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 3047 3048 stats->rxjabbererror += 3049 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 3050 3051 stats->rxundersize_g += 3052 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 3053 3054 stats->rxoversize_g += 3055 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 3056 3057 stats->rx64octets_gb += 3058 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 3059 3060 stats->rx65to127octets_gb += 3061 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 3062 3063 stats->rx128to255octets_gb += 3064 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 3065 3066 stats->rx256to511octets_gb += 3067 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 3068 3069 stats->rx512to1023octets_gb += 3070 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 3071 3072 stats->rx1024tomaxoctets_gb += 3073 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 3074 3075 stats->rxunicastframes_g += 3076 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 3077 3078 stats->rxlengtherror += 3079 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 3080 3081 stats->rxoutofrangetype += 3082 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 3083 3084 stats->rxpauseframes += 3085 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 3086 3087 stats->rxfifooverflow += 3088 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 3089 3090 stats->rxvlanframes_gb += 3091 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 3092 3093 stats->rxwatchdogerror += 3094 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 3095 3096 /* Un-freeze counters */ 3097 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 3098 } 3099 3100 static void xgbe_config_mmc(struct xgbe_prv_data *pdata) 3101 { 3102 /* Set counters to reset on read */ 3103 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); 3104 3105 /* Reset the counters */ 3106 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); 3107 } 3108 3109 static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata, 3110 unsigned int queue) 3111 { 3112 unsigned int tx_status; 3113 unsigned long tx_timeout; 3114 3115 /* The Tx engine cannot be stopped if it is actively processing 3116 * packets. Wait for the Tx queue to empty the Tx fifo. Don't 3117 * wait forever though... 3118 */ 3119 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); 3120 while (time_before(jiffies, tx_timeout)) { 3121 tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); 3122 if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && 3123 (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) 3124 break; 3125 3126 usleep_range(500, 1000); 3127 } 3128 3129 if (!time_before(jiffies, tx_timeout)) 3130 netdev_info(pdata->netdev, 3131 "timed out waiting for Tx queue %u to empty\n", 3132 queue); 3133 } 3134 3135 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, 3136 unsigned int queue) 3137 { 3138 unsigned int tx_dsr, tx_pos, tx_qidx; 3139 unsigned int tx_status; 3140 unsigned long tx_timeout; 3141 3142 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) 3143 return xgbe_txq_prepare_tx_stop(pdata, queue); 3144 3145 /* Calculate the status register to read and the position within */ 3146 if (queue < DMA_DSRX_FIRST_QUEUE) { 3147 tx_dsr = DMA_DSR0; 3148 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; 3149 } else { 3150 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; 3151 3152 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); 3153 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + 3154 DMA_DSRX_TPS_START; 3155 } 3156 3157 /* The Tx engine cannot be stopped if it is actively processing 3158 * descriptors. Wait for the Tx engine to enter the stopped or 3159 * suspended state. Don't wait forever though... 3160 */ 3161 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); 3162 while (time_before(jiffies, tx_timeout)) { 3163 tx_status = XGMAC_IOREAD(pdata, tx_dsr); 3164 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); 3165 if ((tx_status == DMA_TPS_STOPPED) || 3166 (tx_status == DMA_TPS_SUSPENDED)) 3167 break; 3168 3169 usleep_range(500, 1000); 3170 } 3171 3172 if (!time_before(jiffies, tx_timeout)) 3173 netdev_info(pdata->netdev, 3174 "timed out waiting for Tx DMA channel %u to stop\n", 3175 queue); 3176 } 3177 3178 static void xgbe_enable_tx(struct xgbe_prv_data *pdata) 3179 { 3180 unsigned int i; 3181 3182 /* Enable each Tx DMA channel */ 3183 for (i = 0; i < pdata->channel_count; i++) { 3184 if (!pdata->channel[i]->tx_ring) 3185 break; 3186 3187 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); 3188 } 3189 3190 /* Enable each Tx queue */ 3191 for (i = 0; i < pdata->tx_q_count; i++) 3192 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 3193 MTL_Q_ENABLED); 3194 3195 /* Enable MAC Tx */ 3196 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 3197 } 3198 3199 static void xgbe_disable_tx(struct xgbe_prv_data *pdata) 3200 { 3201 unsigned int i; 3202 3203 /* Prepare for Tx DMA channel stop */ 3204 for (i = 0; i < pdata->tx_q_count; i++) 3205 xgbe_prepare_tx_stop(pdata, i); 3206 3207 /* Disable MAC Tx */ 3208 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 3209 3210 /* Disable each Tx queue */ 3211 for (i = 0; i < pdata->tx_q_count; i++) 3212 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); 3213 3214 /* Disable each Tx DMA channel */ 3215 for (i = 0; i < pdata->channel_count; i++) { 3216 if (!pdata->channel[i]->tx_ring) 3217 break; 3218 3219 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); 3220 } 3221 } 3222 3223 static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, 3224 unsigned int queue) 3225 { 3226 unsigned int rx_status; 3227 unsigned long rx_timeout; 3228 3229 /* The Rx engine cannot be stopped if it is actively processing 3230 * packets. Wait for the Rx queue to empty the Rx fifo. Don't 3231 * wait forever though... 3232 */ 3233 rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); 3234 while (time_before(jiffies, rx_timeout)) { 3235 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); 3236 if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && 3237 (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) 3238 break; 3239 3240 usleep_range(500, 1000); 3241 } 3242 3243 if (!time_before(jiffies, rx_timeout)) 3244 netdev_info(pdata->netdev, 3245 "timed out waiting for Rx queue %u to empty\n", 3246 queue); 3247 } 3248 3249 static void xgbe_enable_rx(struct xgbe_prv_data *pdata) 3250 { 3251 unsigned int reg_val, i; 3252 3253 /* Enable each Rx DMA channel */ 3254 for (i = 0; i < pdata->channel_count; i++) { 3255 if (!pdata->channel[i]->rx_ring) 3256 break; 3257 3258 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); 3259 } 3260 3261 /* Enable each Rx queue */ 3262 reg_val = 0; 3263 for (i = 0; i < pdata->rx_q_count; i++) 3264 reg_val |= (0x02 << (i << 1)); 3265 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 3266 3267 /* Enable MAC Rx */ 3268 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); 3269 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); 3270 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); 3271 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); 3272 } 3273 3274 static void xgbe_disable_rx(struct xgbe_prv_data *pdata) 3275 { 3276 unsigned int i; 3277 3278 /* Disable MAC Rx */ 3279 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); 3280 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); 3281 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); 3282 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); 3283 3284 /* Prepare for Rx DMA channel stop */ 3285 for (i = 0; i < pdata->rx_q_count; i++) 3286 xgbe_prepare_rx_stop(pdata, i); 3287 3288 /* Disable each Rx queue */ 3289 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); 3290 3291 /* Disable each Rx DMA channel */ 3292 for (i = 0; i < pdata->channel_count; i++) { 3293 if (!pdata->channel[i]->rx_ring) 3294 break; 3295 3296 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); 3297 } 3298 } 3299 3300 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata) 3301 { 3302 unsigned int i; 3303 3304 /* Enable each Tx DMA channel */ 3305 for (i = 0; i < pdata->channel_count; i++) { 3306 if (!pdata->channel[i]->tx_ring) 3307 break; 3308 3309 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); 3310 } 3311 3312 /* Enable MAC Tx */ 3313 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 3314 } 3315 3316 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) 3317 { 3318 unsigned int i; 3319 3320 /* Prepare for Tx DMA channel stop */ 3321 for (i = 0; i < pdata->tx_q_count; i++) 3322 xgbe_prepare_tx_stop(pdata, i); 3323 3324 /* Disable MAC Tx */ 3325 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 3326 3327 /* Disable each Tx DMA channel */ 3328 for (i = 0; i < pdata->channel_count; i++) { 3329 if (!pdata->channel[i]->tx_ring) 3330 break; 3331 3332 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); 3333 } 3334 } 3335 3336 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata) 3337 { 3338 unsigned int i; 3339 3340 /* Enable each Rx DMA channel */ 3341 for (i = 0; i < pdata->channel_count; i++) { 3342 if (!pdata->channel[i]->rx_ring) 3343 break; 3344 3345 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); 3346 } 3347 } 3348 3349 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) 3350 { 3351 unsigned int i; 3352 3353 /* Disable each Rx DMA channel */ 3354 for (i = 0; i < pdata->channel_count; i++) { 3355 if (!pdata->channel[i]->rx_ring) 3356 break; 3357 3358 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); 3359 } 3360 } 3361 3362 static int xgbe_init(struct xgbe_prv_data *pdata) 3363 { 3364 struct xgbe_desc_if *desc_if = &pdata->desc_if; 3365 int ret; 3366 3367 DBGPR("-->xgbe_init\n"); 3368 3369 /* Flush Tx queues */ 3370 ret = xgbe_flush_tx_queues(pdata); 3371 if (ret) { 3372 netdev_err(pdata->netdev, "error flushing TX queues\n"); 3373 return ret; 3374 } 3375 3376 /* 3377 * Initialize DMA related features 3378 */ 3379 xgbe_config_dma_bus(pdata); 3380 xgbe_config_dma_cache(pdata); 3381 xgbe_config_osp_mode(pdata); 3382 xgbe_config_pbl_val(pdata); 3383 xgbe_config_rx_coalesce(pdata); 3384 xgbe_config_tx_coalesce(pdata); 3385 xgbe_config_rx_buffer_size(pdata); 3386 xgbe_config_tso_mode(pdata); 3387 xgbe_config_sph_mode(pdata); 3388 xgbe_config_rss(pdata); 3389 desc_if->wrapper_tx_desc_init(pdata); 3390 desc_if->wrapper_rx_desc_init(pdata); 3391 xgbe_enable_dma_interrupts(pdata); 3392 3393 /* 3394 * Initialize MTL related features 3395 */ 3396 xgbe_config_mtl_mode(pdata); 3397 xgbe_config_queue_mapping(pdata); 3398 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); 3399 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); 3400 xgbe_config_tx_threshold(pdata, pdata->tx_threshold); 3401 xgbe_config_rx_threshold(pdata, pdata->rx_threshold); 3402 xgbe_config_tx_fifo_size(pdata); 3403 xgbe_config_rx_fifo_size(pdata); 3404 /*TODO: Error Packet and undersized good Packet forwarding enable 3405 (FEP and FUP) 3406 */ 3407 xgbe_config_dcb_tc(pdata); 3408 xgbe_enable_mtl_interrupts(pdata); 3409 3410 /* 3411 * Initialize MAC related features 3412 */ 3413 xgbe_config_mac_address(pdata); 3414 xgbe_config_rx_mode(pdata); 3415 xgbe_config_jumbo_enable(pdata); 3416 xgbe_config_flow_control(pdata); 3417 xgbe_config_mac_speed(pdata); 3418 xgbe_config_checksum_offload(pdata); 3419 xgbe_config_vlan_support(pdata); 3420 xgbe_config_mmc(pdata); 3421 xgbe_enable_mac_interrupts(pdata); 3422 3423 /* 3424 * Initialize ECC related features 3425 */ 3426 xgbe_enable_ecc_interrupts(pdata); 3427 3428 DBGPR("<--xgbe_init\n"); 3429 3430 return 0; 3431 } 3432 3433 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) 3434 { 3435 DBGPR("-->xgbe_init_function_ptrs\n"); 3436 3437 hw_if->tx_complete = xgbe_tx_complete; 3438 3439 hw_if->set_mac_address = xgbe_set_mac_address; 3440 hw_if->config_rx_mode = xgbe_config_rx_mode; 3441 3442 hw_if->enable_rx_csum = xgbe_enable_rx_csum; 3443 hw_if->disable_rx_csum = xgbe_disable_rx_csum; 3444 3445 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; 3446 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; 3447 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; 3448 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; 3449 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; 3450 3451 hw_if->read_mmd_regs = xgbe_read_mmd_regs; 3452 hw_if->write_mmd_regs = xgbe_write_mmd_regs; 3453 3454 hw_if->set_speed = xgbe_set_speed; 3455 3456 hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode; 3457 hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs; 3458 hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs; 3459 3460 hw_if->set_gpio = xgbe_set_gpio; 3461 hw_if->clr_gpio = xgbe_clr_gpio; 3462 3463 hw_if->enable_tx = xgbe_enable_tx; 3464 hw_if->disable_tx = xgbe_disable_tx; 3465 hw_if->enable_rx = xgbe_enable_rx; 3466 hw_if->disable_rx = xgbe_disable_rx; 3467 3468 hw_if->powerup_tx = xgbe_powerup_tx; 3469 hw_if->powerdown_tx = xgbe_powerdown_tx; 3470 hw_if->powerup_rx = xgbe_powerup_rx; 3471 hw_if->powerdown_rx = xgbe_powerdown_rx; 3472 3473 hw_if->dev_xmit = xgbe_dev_xmit; 3474 hw_if->dev_read = xgbe_dev_read; 3475 hw_if->enable_int = xgbe_enable_int; 3476 hw_if->disable_int = xgbe_disable_int; 3477 hw_if->init = xgbe_init; 3478 hw_if->exit = xgbe_exit; 3479 3480 /* Descriptor related Sequences have to be initialized here */ 3481 hw_if->tx_desc_init = xgbe_tx_desc_init; 3482 hw_if->rx_desc_init = xgbe_rx_desc_init; 3483 hw_if->tx_desc_reset = xgbe_tx_desc_reset; 3484 hw_if->rx_desc_reset = xgbe_rx_desc_reset; 3485 hw_if->is_last_desc = xgbe_is_last_desc; 3486 hw_if->is_context_desc = xgbe_is_context_desc; 3487 hw_if->tx_start_xmit = xgbe_tx_start_xmit; 3488 3489 /* For FLOW ctrl */ 3490 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; 3491 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; 3492 3493 /* For RX coalescing */ 3494 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; 3495 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; 3496 hw_if->usec_to_riwt = xgbe_usec_to_riwt; 3497 hw_if->riwt_to_usec = xgbe_riwt_to_usec; 3498 3499 /* For RX and TX threshold config */ 3500 hw_if->config_rx_threshold = xgbe_config_rx_threshold; 3501 hw_if->config_tx_threshold = xgbe_config_tx_threshold; 3502 3503 /* For RX and TX Store and Forward Mode config */ 3504 hw_if->config_rsf_mode = xgbe_config_rsf_mode; 3505 hw_if->config_tsf_mode = xgbe_config_tsf_mode; 3506 3507 /* For TX DMA Operating on Second Frame config */ 3508 hw_if->config_osp_mode = xgbe_config_osp_mode; 3509 3510 /* For MMC statistics support */ 3511 hw_if->tx_mmc_int = xgbe_tx_mmc_int; 3512 hw_if->rx_mmc_int = xgbe_rx_mmc_int; 3513 hw_if->read_mmc_stats = xgbe_read_mmc_stats; 3514 3515 /* For PTP config */ 3516 hw_if->config_tstamp = xgbe_config_tstamp; 3517 hw_if->update_tstamp_addend = xgbe_update_tstamp_addend; 3518 hw_if->set_tstamp_time = xgbe_set_tstamp_time; 3519 hw_if->get_tstamp_time = xgbe_get_tstamp_time; 3520 hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; 3521 3522 /* For Data Center Bridging config */ 3523 hw_if->config_tc = xgbe_config_tc; 3524 hw_if->config_dcb_tc = xgbe_config_dcb_tc; 3525 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; 3526 3527 /* For Receive Side Scaling */ 3528 hw_if->enable_rss = xgbe_enable_rss; 3529 hw_if->disable_rss = xgbe_disable_rss; 3530 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; 3531 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; 3532 3533 /* For ECC */ 3534 hw_if->disable_ecc_ded = xgbe_disable_ecc_ded; 3535 hw_if->disable_ecc_sec = xgbe_disable_ecc_sec; 3536 3537 DBGPR("<--xgbe_init_function_ptrs\n"); 3538 } 3539