1 /* 2 * AMD 10Gb Ethernet driver 3 * 4 * This file is available to you under your choice of the following two 5 * licenses: 6 * 7 * License 1: GPLv2 8 * 9 * Copyright (c) 2014 Advanced Micro Devices, Inc. 10 * 11 * This file is free software; you may copy, redistribute and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation, either version 2 of the License, or (at 14 * your option) any later version. 15 * 16 * This file is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program. If not, see <http://www.gnu.org/licenses/>. 23 * 24 * This file incorporates work covered by the following copyright and 25 * permission notice: 26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 28 * Inc. unless otherwise expressly agreed to in writing between Synopsys 29 * and you. 30 * 31 * The Software IS NOT an item of Licensed Software or Licensed Product 32 * under any End User Software License Agreement or Agreement for Licensed 33 * Product with Synopsys or any supplement thereto. Permission is hereby 34 * granted, free of charge, to any person obtaining a copy of this software 35 * annotated with this license and the Software, to deal in the Software 36 * without restriction, including without limitation the rights to use, 37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 38 * of the Software, and to permit persons to whom the Software is furnished 39 * to do so, subject to the following conditions: 40 * 41 * The above copyright notice and this permission notice shall be included 42 * in all copies or substantial portions of the Software. 43 * 44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 54 * THE POSSIBILITY OF SUCH DAMAGE. 55 * 56 * 57 * License 2: Modified BSD 58 * 59 * Copyright (c) 2014 Advanced Micro Devices, Inc. 60 * All rights reserved. 61 * 62 * Redistribution and use in source and binary forms, with or without 63 * modification, are permitted provided that the following conditions are met: 64 * * Redistributions of source code must retain the above copyright 65 * notice, this list of conditions and the following disclaimer. 66 * * Redistributions in binary form must reproduce the above copyright 67 * notice, this list of conditions and the following disclaimer in the 68 * documentation and/or other materials provided with the distribution. 69 * * Neither the name of Advanced Micro Devices, Inc. nor the 70 * names of its contributors may be used to endorse or promote products 71 * derived from this software without specific prior written permission. 72 * 73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY 77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 83 * 84 * This file incorporates work covered by the following copyright and 85 * permission notice: 86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 88 * Inc. unless otherwise expressly agreed to in writing between Synopsys 89 * and you. 90 * 91 * The Software IS NOT an item of Licensed Software or Licensed Product 92 * under any End User Software License Agreement or Agreement for Licensed 93 * Product with Synopsys or any supplement thereto. Permission is hereby 94 * granted, free of charge, to any person obtaining a copy of this software 95 * annotated with this license and the Software, to deal in the Software 96 * without restriction, including without limitation the rights to use, 97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 98 * of the Software, and to permit persons to whom the Software is furnished 99 * to do so, subject to the following conditions: 100 * 101 * The above copyright notice and this permission notice shall be included 102 * in all copies or substantial portions of the Software. 103 * 104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 114 * THE POSSIBILITY OF SUCH DAMAGE. 115 */ 116 117 #include <linux/phy.h> 118 #include <linux/mdio.h> 119 #include <linux/clk.h> 120 #include <linux/bitrev.h> 121 #include <linux/crc32.h> 122 123 #include "xgbe.h" 124 #include "xgbe-common.h" 125 126 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, 127 unsigned int usec) 128 { 129 unsigned long rate; 130 unsigned int ret; 131 132 DBGPR("-->xgbe_usec_to_riwt\n"); 133 134 rate = pdata->sysclk_rate; 135 136 /* 137 * Convert the input usec value to the watchdog timer value. Each 138 * watchdog timer value is equivalent to 256 clock cycles. 139 * Calculate the required value as: 140 * ( usec * ( system_clock_mhz / 10^6 ) / 256 141 */ 142 ret = (usec * (rate / 1000000)) / 256; 143 144 DBGPR("<--xgbe_usec_to_riwt\n"); 145 146 return ret; 147 } 148 149 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, 150 unsigned int riwt) 151 { 152 unsigned long rate; 153 unsigned int ret; 154 155 DBGPR("-->xgbe_riwt_to_usec\n"); 156 157 rate = pdata->sysclk_rate; 158 159 /* 160 * Convert the input watchdog timer value to the usec value. Each 161 * watchdog timer value is equivalent to 256 clock cycles. 162 * Calculate the required value as: 163 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) 164 */ 165 ret = (riwt * 256) / (rate / 1000000); 166 167 DBGPR("<--xgbe_riwt_to_usec\n"); 168 169 return ret; 170 } 171 172 static int xgbe_config_pblx8(struct xgbe_prv_data *pdata) 173 { 174 struct xgbe_channel *channel; 175 unsigned int i; 176 177 channel = pdata->channel; 178 for (i = 0; i < pdata->channel_count; i++, channel++) 179 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8, 180 pdata->pblx8); 181 182 return 0; 183 } 184 185 static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata) 186 { 187 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL); 188 } 189 190 static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata) 191 { 192 struct xgbe_channel *channel; 193 unsigned int i; 194 195 channel = pdata->channel; 196 for (i = 0; i < pdata->channel_count; i++, channel++) { 197 if (!channel->tx_ring) 198 break; 199 200 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL, 201 pdata->tx_pbl); 202 } 203 204 return 0; 205 } 206 207 static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata) 208 { 209 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL); 210 } 211 212 static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata) 213 { 214 struct xgbe_channel *channel; 215 unsigned int i; 216 217 channel = pdata->channel; 218 for (i = 0; i < pdata->channel_count; i++, channel++) { 219 if (!channel->rx_ring) 220 break; 221 222 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL, 223 pdata->rx_pbl); 224 } 225 226 return 0; 227 } 228 229 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata) 230 { 231 struct xgbe_channel *channel; 232 unsigned int i; 233 234 channel = pdata->channel; 235 for (i = 0; i < pdata->channel_count; i++, channel++) { 236 if (!channel->tx_ring) 237 break; 238 239 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP, 240 pdata->tx_osp_mode); 241 } 242 243 return 0; 244 } 245 246 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 247 { 248 unsigned int i; 249 250 for (i = 0; i < pdata->rx_q_count; i++) 251 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); 252 253 return 0; 254 } 255 256 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 257 { 258 unsigned int i; 259 260 for (i = 0; i < pdata->tx_q_count; i++) 261 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); 262 263 return 0; 264 } 265 266 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, 267 unsigned int val) 268 { 269 unsigned int i; 270 271 for (i = 0; i < pdata->rx_q_count; i++) 272 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); 273 274 return 0; 275 } 276 277 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, 278 unsigned int val) 279 { 280 unsigned int i; 281 282 for (i = 0; i < pdata->tx_q_count; i++) 283 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); 284 285 return 0; 286 } 287 288 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) 289 { 290 struct xgbe_channel *channel; 291 unsigned int i; 292 293 channel = pdata->channel; 294 for (i = 0; i < pdata->channel_count; i++, channel++) { 295 if (!channel->rx_ring) 296 break; 297 298 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT, 299 pdata->rx_riwt); 300 } 301 302 return 0; 303 } 304 305 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) 306 { 307 return 0; 308 } 309 310 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) 311 { 312 struct xgbe_channel *channel; 313 unsigned int i; 314 315 channel = pdata->channel; 316 for (i = 0; i < pdata->channel_count; i++, channel++) { 317 if (!channel->rx_ring) 318 break; 319 320 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ, 321 pdata->rx_buf_size); 322 } 323 } 324 325 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) 326 { 327 struct xgbe_channel *channel; 328 unsigned int i; 329 330 channel = pdata->channel; 331 for (i = 0; i < pdata->channel_count; i++, channel++) { 332 if (!channel->tx_ring) 333 break; 334 335 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1); 336 } 337 } 338 339 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) 340 { 341 struct xgbe_channel *channel; 342 unsigned int i; 343 344 channel = pdata->channel; 345 for (i = 0; i < pdata->channel_count; i++, channel++) { 346 if (!channel->rx_ring) 347 break; 348 349 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1); 350 } 351 352 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); 353 } 354 355 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, 356 unsigned int index, unsigned int val) 357 { 358 unsigned int wait; 359 int ret = 0; 360 361 mutex_lock(&pdata->rss_mutex); 362 363 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { 364 ret = -EBUSY; 365 goto unlock; 366 } 367 368 XGMAC_IOWRITE(pdata, MAC_RSSDR, val); 369 370 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); 371 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); 372 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); 373 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); 374 375 wait = 1000; 376 while (wait--) { 377 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) 378 goto unlock; 379 380 usleep_range(1000, 1500); 381 } 382 383 ret = -EBUSY; 384 385 unlock: 386 mutex_unlock(&pdata->rss_mutex); 387 388 return ret; 389 } 390 391 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata) 392 { 393 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); 394 unsigned int *key = (unsigned int *)&pdata->rss_key; 395 int ret; 396 397 while (key_regs--) { 398 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, 399 key_regs, *key++); 400 if (ret) 401 return ret; 402 } 403 404 return 0; 405 } 406 407 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata) 408 { 409 unsigned int i; 410 int ret; 411 412 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { 413 ret = xgbe_write_rss_reg(pdata, 414 XGBE_RSS_LOOKUP_TABLE_TYPE, i, 415 pdata->rss_table[i]); 416 if (ret) 417 return ret; 418 } 419 420 return 0; 421 } 422 423 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key) 424 { 425 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); 426 427 return xgbe_write_rss_hash_key(pdata); 428 } 429 430 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, 431 const u32 *table) 432 { 433 unsigned int i; 434 435 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) 436 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); 437 438 return xgbe_write_rss_lookup_table(pdata); 439 } 440 441 static int xgbe_enable_rss(struct xgbe_prv_data *pdata) 442 { 443 int ret; 444 445 if (!pdata->hw_feat.rss) 446 return -EOPNOTSUPP; 447 448 /* Program the hash key */ 449 ret = xgbe_write_rss_hash_key(pdata); 450 if (ret) 451 return ret; 452 453 /* Program the lookup table */ 454 ret = xgbe_write_rss_lookup_table(pdata); 455 if (ret) 456 return ret; 457 458 /* Set the RSS options */ 459 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); 460 461 /* Enable RSS */ 462 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); 463 464 return 0; 465 } 466 467 static int xgbe_disable_rss(struct xgbe_prv_data *pdata) 468 { 469 if (!pdata->hw_feat.rss) 470 return -EOPNOTSUPP; 471 472 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); 473 474 return 0; 475 } 476 477 static void xgbe_config_rss(struct xgbe_prv_data *pdata) 478 { 479 int ret; 480 481 if (!pdata->hw_feat.rss) 482 return; 483 484 if (pdata->netdev->features & NETIF_F_RXHASH) 485 ret = xgbe_enable_rss(pdata); 486 else 487 ret = xgbe_disable_rss(pdata); 488 489 if (ret) 490 netdev_err(pdata->netdev, 491 "error configuring RSS, RSS disabled\n"); 492 } 493 494 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) 495 { 496 unsigned int max_q_count, q_count; 497 unsigned int reg, reg_val; 498 unsigned int i; 499 500 /* Clear MTL flow control */ 501 for (i = 0; i < pdata->rx_q_count; i++) 502 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); 503 504 /* Clear MAC flow control */ 505 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 506 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 507 reg = MAC_Q0TFCR; 508 for (i = 0; i < q_count; i++) { 509 reg_val = XGMAC_IOREAD(pdata, reg); 510 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); 511 XGMAC_IOWRITE(pdata, reg, reg_val); 512 513 reg += MAC_QTFCR_INC; 514 } 515 516 return 0; 517 } 518 519 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) 520 { 521 unsigned int max_q_count, q_count; 522 unsigned int reg, reg_val; 523 unsigned int i; 524 525 /* Set MTL flow control */ 526 for (i = 0; i < pdata->rx_q_count; i++) 527 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1); 528 529 /* Set MAC flow control */ 530 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 531 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 532 reg = MAC_Q0TFCR; 533 for (i = 0; i < q_count; i++) { 534 reg_val = XGMAC_IOREAD(pdata, reg); 535 536 /* Enable transmit flow control */ 537 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); 538 /* Set pause time */ 539 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); 540 541 XGMAC_IOWRITE(pdata, reg, reg_val); 542 543 reg += MAC_QTFCR_INC; 544 } 545 546 return 0; 547 } 548 549 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) 550 { 551 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); 552 553 return 0; 554 } 555 556 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) 557 { 558 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); 559 560 return 0; 561 } 562 563 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) 564 { 565 struct ieee_pfc *pfc = pdata->pfc; 566 567 if (pdata->tx_pause || (pfc && pfc->pfc_en)) 568 xgbe_enable_tx_flow_control(pdata); 569 else 570 xgbe_disable_tx_flow_control(pdata); 571 572 return 0; 573 } 574 575 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) 576 { 577 struct ieee_pfc *pfc = pdata->pfc; 578 579 if (pdata->rx_pause || (pfc && pfc->pfc_en)) 580 xgbe_enable_rx_flow_control(pdata); 581 else 582 xgbe_disable_rx_flow_control(pdata); 583 584 return 0; 585 } 586 587 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) 588 { 589 struct ieee_pfc *pfc = pdata->pfc; 590 591 xgbe_config_tx_flow_control(pdata); 592 xgbe_config_rx_flow_control(pdata); 593 594 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 595 (pfc && pfc->pfc_en) ? 1 : 0); 596 } 597 598 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) 599 { 600 struct xgbe_channel *channel; 601 unsigned int dma_ch_isr, dma_ch_ier; 602 unsigned int i; 603 604 channel = pdata->channel; 605 for (i = 0; i < pdata->channel_count; i++, channel++) { 606 /* Clear all the interrupts which are set */ 607 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); 608 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); 609 610 /* Clear all interrupt enable bits */ 611 dma_ch_ier = 0; 612 613 /* Enable following interrupts 614 * NIE - Normal Interrupt Summary Enable 615 * AIE - Abnormal Interrupt Summary Enable 616 * FBEE - Fatal Bus Error Enable 617 */ 618 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1); 619 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); 620 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); 621 622 if (channel->tx_ring) { 623 /* Enable the following Tx interrupts 624 * TIE - Transmit Interrupt Enable (unless using 625 * per channel interrupts) 626 */ 627 if (!pdata->per_channel_irq) 628 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); 629 } 630 if (channel->rx_ring) { 631 /* Enable following Rx interrupts 632 * RBUE - Receive Buffer Unavailable Enable 633 * RIE - Receive Interrupt Enable (unless using 634 * per channel interrupts) 635 */ 636 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); 637 if (!pdata->per_channel_irq) 638 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); 639 } 640 641 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); 642 } 643 } 644 645 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) 646 { 647 unsigned int mtl_q_isr; 648 unsigned int q_count, i; 649 650 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); 651 for (i = 0; i < q_count; i++) { 652 /* Clear all the interrupts which are set */ 653 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); 654 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); 655 656 /* No MTL interrupts to be enabled */ 657 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); 658 } 659 } 660 661 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) 662 { 663 unsigned int mac_ier = 0; 664 665 /* Enable Timestamp interrupt */ 666 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); 667 668 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); 669 670 /* Enable all counter interrupts */ 671 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); 672 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); 673 } 674 675 static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) 676 { 677 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3) 678 return 0; 679 680 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3); 681 682 return 0; 683 } 684 685 static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata) 686 { 687 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2) 688 return 0; 689 690 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2); 691 692 return 0; 693 } 694 695 static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata) 696 { 697 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0) 698 return 0; 699 700 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0); 701 702 return 0; 703 } 704 705 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, 706 unsigned int enable) 707 { 708 unsigned int val = enable ? 1 : 0; 709 710 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) 711 return 0; 712 713 DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving"); 714 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); 715 716 return 0; 717 } 718 719 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, 720 unsigned int enable) 721 { 722 unsigned int val = enable ? 1 : 0; 723 724 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) 725 return 0; 726 727 DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving"); 728 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); 729 730 return 0; 731 } 732 733 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, 734 struct netdev_hw_addr *ha, unsigned int *mac_reg) 735 { 736 unsigned int mac_addr_hi, mac_addr_lo; 737 u8 *mac_addr; 738 739 mac_addr_lo = 0; 740 mac_addr_hi = 0; 741 742 if (ha) { 743 mac_addr = (u8 *)&mac_addr_lo; 744 mac_addr[0] = ha->addr[0]; 745 mac_addr[1] = ha->addr[1]; 746 mac_addr[2] = ha->addr[2]; 747 mac_addr[3] = ha->addr[3]; 748 mac_addr = (u8 *)&mac_addr_hi; 749 mac_addr[0] = ha->addr[4]; 750 mac_addr[1] = ha->addr[5]; 751 752 DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr, 753 *mac_reg); 754 755 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); 756 } 757 758 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi); 759 *mac_reg += MAC_MACA_INC; 760 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo); 761 *mac_reg += MAC_MACA_INC; 762 } 763 764 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) 765 { 766 struct net_device *netdev = pdata->netdev; 767 struct netdev_hw_addr *ha; 768 unsigned int mac_reg; 769 unsigned int addn_macs; 770 771 mac_reg = MAC_MACA1HR; 772 addn_macs = pdata->hw_feat.addn_mac; 773 774 if (netdev_uc_count(netdev) > addn_macs) { 775 xgbe_set_promiscuous_mode(pdata, 1); 776 } else { 777 netdev_for_each_uc_addr(ha, netdev) { 778 xgbe_set_mac_reg(pdata, ha, &mac_reg); 779 addn_macs--; 780 } 781 782 if (netdev_mc_count(netdev) > addn_macs) { 783 xgbe_set_all_multicast_mode(pdata, 1); 784 } else { 785 netdev_for_each_mc_addr(ha, netdev) { 786 xgbe_set_mac_reg(pdata, ha, &mac_reg); 787 addn_macs--; 788 } 789 } 790 } 791 792 /* Clear remaining additional MAC address entries */ 793 while (addn_macs--) 794 xgbe_set_mac_reg(pdata, NULL, &mac_reg); 795 } 796 797 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata) 798 { 799 struct net_device *netdev = pdata->netdev; 800 struct netdev_hw_addr *ha; 801 unsigned int hash_reg; 802 unsigned int hash_table_shift, hash_table_count; 803 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE]; 804 u32 crc; 805 unsigned int i; 806 807 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); 808 hash_table_count = pdata->hw_feat.hash_table_size / 32; 809 memset(hash_table, 0, sizeof(hash_table)); 810 811 /* Build the MAC Hash Table register values */ 812 netdev_for_each_uc_addr(ha, netdev) { 813 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); 814 crc >>= hash_table_shift; 815 hash_table[crc >> 5] |= (1 << (crc & 0x1f)); 816 } 817 818 netdev_for_each_mc_addr(ha, netdev) { 819 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); 820 crc >>= hash_table_shift; 821 hash_table[crc >> 5] |= (1 << (crc & 0x1f)); 822 } 823 824 /* Set the MAC Hash Table registers */ 825 hash_reg = MAC_HTR0; 826 for (i = 0; i < hash_table_count; i++) { 827 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]); 828 hash_reg += MAC_HTR_INC; 829 } 830 } 831 832 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) 833 { 834 if (pdata->hw_feat.hash_table_size) 835 xgbe_set_mac_hash_table(pdata); 836 else 837 xgbe_set_mac_addn_addrs(pdata); 838 839 return 0; 840 } 841 842 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr) 843 { 844 unsigned int mac_addr_hi, mac_addr_lo; 845 846 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); 847 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | 848 (addr[1] << 8) | (addr[0] << 0); 849 850 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); 851 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); 852 853 return 0; 854 } 855 856 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, 857 int mmd_reg) 858 { 859 unsigned int mmd_address; 860 int mmd_data; 861 862 if (mmd_reg & MII_ADDR_C45) 863 mmd_address = mmd_reg & ~MII_ADDR_C45; 864 else 865 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 866 867 /* The PCS registers are accessed using mmio. The underlying APB3 868 * management interface uses indirect addressing to access the MMD 869 * register sets. This requires accessing of the PCS register in two 870 * phases, an address phase and a data phase. 871 * 872 * The mmio interface is based on 32-bit offsets and values. All 873 * register offsets must therefore be adjusted by left shifting the 874 * offset 2 bits and reading 32 bits of data. 875 */ 876 mutex_lock(&pdata->xpcs_mutex); 877 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); 878 mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2); 879 mutex_unlock(&pdata->xpcs_mutex); 880 881 return mmd_data; 882 } 883 884 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, 885 int mmd_reg, int mmd_data) 886 { 887 unsigned int mmd_address; 888 889 if (mmd_reg & MII_ADDR_C45) 890 mmd_address = mmd_reg & ~MII_ADDR_C45; 891 else 892 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 893 894 /* If the PCS is changing modes, match the MAC speed to it */ 895 if (((mmd_address >> 16) == MDIO_MMD_PCS) && 896 ((mmd_address & 0xffff) == MDIO_CTRL2)) { 897 struct phy_device *phydev = pdata->phydev; 898 899 if (mmd_data & MDIO_PCS_CTRL2_TYPE) { 900 /* KX mode */ 901 if (phydev->supported & SUPPORTED_1000baseKX_Full) 902 xgbe_set_gmii_speed(pdata); 903 else 904 xgbe_set_gmii_2500_speed(pdata); 905 } else { 906 /* KR mode */ 907 xgbe_set_xgmii_speed(pdata); 908 } 909 } 910 911 /* The PCS registers are accessed using mmio. The underlying APB3 912 * management interface uses indirect addressing to access the MMD 913 * register sets. This requires accessing of the PCS register in two 914 * phases, an address phase and a data phase. 915 * 916 * The mmio interface is based on 32-bit offsets and values. All 917 * register offsets must therefore be adjusted by left shifting the 918 * offset 2 bits and reading 32 bits of data. 919 */ 920 mutex_lock(&pdata->xpcs_mutex); 921 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); 922 XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); 923 mutex_unlock(&pdata->xpcs_mutex); 924 } 925 926 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) 927 { 928 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN); 929 } 930 931 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) 932 { 933 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); 934 935 return 0; 936 } 937 938 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) 939 { 940 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); 941 942 return 0; 943 } 944 945 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 946 { 947 /* Put the VLAN tag in the Rx descriptor */ 948 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); 949 950 /* Don't check the VLAN type */ 951 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); 952 953 /* Check only C-TAG (0x8100) packets */ 954 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); 955 956 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ 957 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); 958 959 /* Enable VLAN tag stripping */ 960 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); 961 962 return 0; 963 } 964 965 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 966 { 967 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); 968 969 return 0; 970 } 971 972 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 973 { 974 /* Enable VLAN filtering */ 975 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); 976 977 /* Enable VLAN Hash Table filtering */ 978 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); 979 980 /* Disable VLAN tag inverse matching */ 981 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); 982 983 /* Only filter on the lower 12-bits of the VLAN tag */ 984 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); 985 986 /* In order for the VLAN Hash Table filtering to be effective, 987 * the VLAN tag identifier in the VLAN Tag Register must not 988 * be zero. Set the VLAN tag identifier to "1" to enable the 989 * VLAN Hash Table filtering. This implies that a VLAN tag of 990 * 1 will always pass filtering. 991 */ 992 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); 993 994 return 0; 995 } 996 997 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 998 { 999 /* Disable VLAN filtering */ 1000 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); 1001 1002 return 0; 1003 } 1004 1005 #ifndef CRCPOLY_LE 1006 #define CRCPOLY_LE 0xedb88320 1007 #endif 1008 static u32 xgbe_vid_crc32_le(__le16 vid_le) 1009 { 1010 u32 poly = CRCPOLY_LE; 1011 u32 crc = ~0; 1012 u32 temp = 0; 1013 unsigned char *data = (unsigned char *)&vid_le; 1014 unsigned char data_byte = 0; 1015 int i, bits; 1016 1017 bits = get_bitmask_order(VLAN_VID_MASK); 1018 for (i = 0; i < bits; i++) { 1019 if ((i % 8) == 0) 1020 data_byte = data[i / 8]; 1021 1022 temp = ((crc & 1) ^ data_byte) & 1; 1023 crc >>= 1; 1024 data_byte >>= 1; 1025 1026 if (temp) 1027 crc ^= poly; 1028 } 1029 1030 return crc; 1031 } 1032 1033 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) 1034 { 1035 u32 crc; 1036 u16 vid; 1037 __le16 vid_le; 1038 u16 vlan_hash_table = 0; 1039 1040 /* Generate the VLAN Hash Table value */ 1041 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { 1042 /* Get the CRC32 value of the VLAN ID */ 1043 vid_le = cpu_to_le16(vid); 1044 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; 1045 1046 vlan_hash_table |= (1 << crc); 1047 } 1048 1049 /* Set the VLAN Hash Table filtering register */ 1050 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); 1051 1052 return 0; 1053 } 1054 1055 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) 1056 { 1057 struct xgbe_ring_desc *rdesc = rdata->rdesc; 1058 1059 /* Reset the Tx descriptor 1060 * Set buffer 1 (lo) address to zero 1061 * Set buffer 1 (hi) address to zero 1062 * Reset all other control bits (IC, TTSE, B2L & B1L) 1063 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) 1064 */ 1065 rdesc->desc0 = 0; 1066 rdesc->desc1 = 0; 1067 rdesc->desc2 = 0; 1068 rdesc->desc3 = 0; 1069 1070 /* Make sure ownership is written to the descriptor */ 1071 wmb(); 1072 } 1073 1074 static void xgbe_tx_desc_init(struct xgbe_channel *channel) 1075 { 1076 struct xgbe_ring *ring = channel->tx_ring; 1077 struct xgbe_ring_data *rdata; 1078 int i; 1079 int start_index = ring->cur; 1080 1081 DBGPR("-->tx_desc_init\n"); 1082 1083 /* Initialze all descriptors */ 1084 for (i = 0; i < ring->rdesc_count; i++) { 1085 rdata = XGBE_GET_DESC_DATA(ring, i); 1086 1087 /* Initialize Tx descriptor */ 1088 xgbe_tx_desc_reset(rdata); 1089 } 1090 1091 /* Update the total number of Tx descriptors */ 1092 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); 1093 1094 /* Update the starting address of descriptor ring */ 1095 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1096 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, 1097 upper_32_bits(rdata->rdesc_dma)); 1098 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, 1099 lower_32_bits(rdata->rdesc_dma)); 1100 1101 DBGPR("<--tx_desc_init\n"); 1102 } 1103 1104 static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata) 1105 { 1106 struct xgbe_ring_desc *rdesc = rdata->rdesc; 1107 1108 /* Reset the Rx descriptor 1109 * Set buffer 1 (lo) address to header dma address (lo) 1110 * Set buffer 1 (hi) address to header dma address (hi) 1111 * Set buffer 2 (lo) address to buffer dma address (lo) 1112 * Set buffer 2 (hi) address to buffer dma address (hi) and 1113 * set control bits OWN and INTE 1114 */ 1115 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma)); 1116 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma)); 1117 rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma)); 1118 rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma)); 1119 1120 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1121 rdata->interrupt ? 1 : 0); 1122 1123 /* Since the Rx DMA engine is likely running, make sure everything 1124 * is written to the descriptor(s) before setting the OWN bit 1125 * for the descriptor 1126 */ 1127 wmb(); 1128 1129 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); 1130 1131 /* Make sure ownership is written to the descriptor */ 1132 wmb(); 1133 } 1134 1135 static void xgbe_rx_desc_init(struct xgbe_channel *channel) 1136 { 1137 struct xgbe_prv_data *pdata = channel->pdata; 1138 struct xgbe_ring *ring = channel->rx_ring; 1139 struct xgbe_ring_data *rdata; 1140 unsigned int start_index = ring->cur; 1141 unsigned int rx_coalesce, rx_frames; 1142 unsigned int i; 1143 1144 DBGPR("-->rx_desc_init\n"); 1145 1146 rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0; 1147 rx_frames = pdata->rx_frames; 1148 1149 /* Initialize all descriptors */ 1150 for (i = 0; i < ring->rdesc_count; i++) { 1151 rdata = XGBE_GET_DESC_DATA(ring, i); 1152 1153 /* Set interrupt on completion bit as appropriate */ 1154 if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) 1155 rdata->interrupt = 0; 1156 else 1157 rdata->interrupt = 1; 1158 1159 /* Initialize Rx descriptor */ 1160 xgbe_rx_desc_reset(rdata); 1161 } 1162 1163 /* Update the total number of Rx descriptors */ 1164 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); 1165 1166 /* Update the starting address of descriptor ring */ 1167 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1168 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, 1169 upper_32_bits(rdata->rdesc_dma)); 1170 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, 1171 lower_32_bits(rdata->rdesc_dma)); 1172 1173 /* Update the Rx Descriptor Tail Pointer */ 1174 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1); 1175 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, 1176 lower_32_bits(rdata->rdesc_dma)); 1177 1178 DBGPR("<--rx_desc_init\n"); 1179 } 1180 1181 static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata, 1182 unsigned int addend) 1183 { 1184 /* Set the addend register value and tell the device */ 1185 XGMAC_IOWRITE(pdata, MAC_TSAR, addend); 1186 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1); 1187 1188 /* Wait for addend update to complete */ 1189 while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG)) 1190 udelay(5); 1191 } 1192 1193 static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec, 1194 unsigned int nsec) 1195 { 1196 /* Set the time values and tell the device */ 1197 XGMAC_IOWRITE(pdata, MAC_STSUR, sec); 1198 XGMAC_IOWRITE(pdata, MAC_STNUR, nsec); 1199 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1); 1200 1201 /* Wait for time update to complete */ 1202 while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT)) 1203 udelay(5); 1204 } 1205 1206 static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata) 1207 { 1208 u64 nsec; 1209 1210 nsec = XGMAC_IOREAD(pdata, MAC_STSR); 1211 nsec *= NSEC_PER_SEC; 1212 nsec += XGMAC_IOREAD(pdata, MAC_STNR); 1213 1214 return nsec; 1215 } 1216 1217 static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata) 1218 { 1219 unsigned int tx_snr; 1220 u64 nsec; 1221 1222 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR); 1223 if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) 1224 return 0; 1225 1226 nsec = XGMAC_IOREAD(pdata, MAC_TXSSR); 1227 nsec *= NSEC_PER_SEC; 1228 nsec += tx_snr; 1229 1230 return nsec; 1231 } 1232 1233 static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet, 1234 struct xgbe_ring_desc *rdesc) 1235 { 1236 u64 nsec; 1237 1238 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) && 1239 !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) { 1240 nsec = le32_to_cpu(rdesc->desc1); 1241 nsec <<= 32; 1242 nsec |= le32_to_cpu(rdesc->desc0); 1243 if (nsec != 0xffffffffffffffffULL) { 1244 packet->rx_tstamp = nsec; 1245 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1246 RX_TSTAMP, 1); 1247 } 1248 } 1249 } 1250 1251 static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, 1252 unsigned int mac_tscr) 1253 { 1254 /* Set one nano-second accuracy */ 1255 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1); 1256 1257 /* Set fine timestamp update */ 1258 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1); 1259 1260 /* Overwrite earlier timestamps */ 1261 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1); 1262 1263 XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); 1264 1265 /* Exit if timestamping is not enabled */ 1266 if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) 1267 return 0; 1268 1269 /* Initialize time registers */ 1270 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC); 1271 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC); 1272 xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend); 1273 xgbe_set_tstamp_time(pdata, 0, 0); 1274 1275 /* Initialize the timecounter */ 1276 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, 1277 ktime_to_ns(ktime_get_real())); 1278 1279 return 0; 1280 } 1281 1282 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) 1283 { 1284 struct ieee_ets *ets = pdata->ets; 1285 unsigned int total_weight, min_weight, weight; 1286 unsigned int i; 1287 1288 if (!ets) 1289 return; 1290 1291 /* Set Tx to deficit weighted round robin scheduling algorithm (when 1292 * traffic class is using ETS algorithm) 1293 */ 1294 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR); 1295 1296 /* Set Traffic Class algorithms */ 1297 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt; 1298 min_weight = total_weight / 100; 1299 if (!min_weight) 1300 min_weight = 1; 1301 1302 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { 1303 switch (ets->tc_tsa[i]) { 1304 case IEEE_8021QAZ_TSA_STRICT: 1305 DBGPR(" TC%u using SP\n", i); 1306 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 1307 MTL_TSA_SP); 1308 break; 1309 case IEEE_8021QAZ_TSA_ETS: 1310 weight = total_weight * ets->tc_tx_bw[i] / 100; 1311 weight = clamp(weight, min_weight, total_weight); 1312 1313 DBGPR(" TC%u using DWRR (weight %u)\n", i, weight); 1314 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 1315 MTL_TSA_ETS); 1316 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1317 weight); 1318 break; 1319 } 1320 } 1321 } 1322 1323 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) 1324 { 1325 struct ieee_pfc *pfc = pdata->pfc; 1326 struct ieee_ets *ets = pdata->ets; 1327 unsigned int mask, reg, reg_val; 1328 unsigned int tc, prio; 1329 1330 if (!pfc || !ets) 1331 return; 1332 1333 for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) { 1334 mask = 0; 1335 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { 1336 if ((pfc->pfc_en & (1 << prio)) && 1337 (ets->prio_tc[prio] == tc)) 1338 mask |= (1 << prio); 1339 } 1340 mask &= 0xff; 1341 1342 DBGPR(" TC%u PFC mask=%#x\n", tc, mask); 1343 reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG)); 1344 reg_val = XGMAC_IOREAD(pdata, reg); 1345 1346 reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3)); 1347 reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3)); 1348 1349 XGMAC_IOWRITE(pdata, reg, reg_val); 1350 } 1351 1352 xgbe_config_flow_control(pdata); 1353 } 1354 1355 static void xgbe_tx_start_xmit(struct xgbe_channel *channel, 1356 struct xgbe_ring *ring) 1357 { 1358 struct xgbe_prv_data *pdata = channel->pdata; 1359 struct xgbe_ring_data *rdata; 1360 1361 /* Issue a poll command to Tx DMA by writing address 1362 * of next immediate free descriptor */ 1363 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1364 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, 1365 lower_32_bits(rdata->rdesc_dma)); 1366 1367 /* Start the Tx coalescing timer */ 1368 if (pdata->tx_usecs && !channel->tx_timer_active) { 1369 channel->tx_timer_active = 1; 1370 hrtimer_start(&channel->tx_timer, 1371 ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC), 1372 HRTIMER_MODE_REL); 1373 } 1374 1375 ring->tx.xmit_more = 0; 1376 } 1377 1378 static void xgbe_dev_xmit(struct xgbe_channel *channel) 1379 { 1380 struct xgbe_prv_data *pdata = channel->pdata; 1381 struct xgbe_ring *ring = channel->tx_ring; 1382 struct xgbe_ring_data *rdata; 1383 struct xgbe_ring_desc *rdesc; 1384 struct xgbe_packet_data *packet = &ring->packet_data; 1385 unsigned int csum, tso, vlan; 1386 unsigned int tso_context, vlan_context; 1387 unsigned int tx_set_ic; 1388 int start_index = ring->cur; 1389 int cur_index = ring->cur; 1390 int i; 1391 1392 DBGPR("-->xgbe_dev_xmit\n"); 1393 1394 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1395 CSUM_ENABLE); 1396 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1397 TSO_ENABLE); 1398 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1399 VLAN_CTAG); 1400 1401 if (tso && (packet->mss != ring->tx.cur_mss)) 1402 tso_context = 1; 1403 else 1404 tso_context = 0; 1405 1406 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)) 1407 vlan_context = 1; 1408 else 1409 vlan_context = 0; 1410 1411 /* Determine if an interrupt should be generated for this Tx: 1412 * Interrupt: 1413 * - Tx frame count exceeds the frame count setting 1414 * - Addition of Tx frame count to the frame count since the 1415 * last interrupt was set exceeds the frame count setting 1416 * No interrupt: 1417 * - No frame count setting specified (ethtool -C ethX tx-frames 0) 1418 * - Addition of Tx frame count to the frame count since the 1419 * last interrupt was set does not exceed the frame count setting 1420 */ 1421 ring->coalesce_count += packet->tx_packets; 1422 if (!pdata->tx_frames) 1423 tx_set_ic = 0; 1424 else if (packet->tx_packets > pdata->tx_frames) 1425 tx_set_ic = 1; 1426 else if ((ring->coalesce_count % pdata->tx_frames) < 1427 packet->tx_packets) 1428 tx_set_ic = 1; 1429 else 1430 tx_set_ic = 0; 1431 1432 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1433 rdesc = rdata->rdesc; 1434 1435 /* Create a context descriptor if this is a TSO packet */ 1436 if (tso_context || vlan_context) { 1437 if (tso_context) { 1438 DBGPR(" TSO context descriptor, mss=%u\n", 1439 packet->mss); 1440 1441 /* Set the MSS size */ 1442 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, 1443 MSS, packet->mss); 1444 1445 /* Mark it as a CONTEXT descriptor */ 1446 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1447 CTXT, 1); 1448 1449 /* Indicate this descriptor contains the MSS */ 1450 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1451 TCMSSV, 1); 1452 1453 ring->tx.cur_mss = packet->mss; 1454 } 1455 1456 if (vlan_context) { 1457 DBGPR(" VLAN context descriptor, ctag=%u\n", 1458 packet->vlan_ctag); 1459 1460 /* Mark it as a CONTEXT descriptor */ 1461 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1462 CTXT, 1); 1463 1464 /* Set the VLAN tag */ 1465 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1466 VT, packet->vlan_ctag); 1467 1468 /* Indicate this descriptor contains the VLAN tag */ 1469 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1470 VLTV, 1); 1471 1472 ring->tx.cur_vlan_ctag = packet->vlan_ctag; 1473 } 1474 1475 cur_index++; 1476 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1477 rdesc = rdata->rdesc; 1478 } 1479 1480 /* Update buffer address (for TSO this is the header) */ 1481 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 1482 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 1483 1484 /* Update the buffer length */ 1485 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, 1486 rdata->skb_dma_len); 1487 1488 /* VLAN tag insertion check */ 1489 if (vlan) 1490 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, 1491 TX_NORMAL_DESC2_VLAN_INSERT); 1492 1493 /* Timestamp enablement check */ 1494 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) 1495 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1); 1496 1497 /* Mark it as First Descriptor */ 1498 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); 1499 1500 /* Mark it as a NORMAL descriptor */ 1501 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1502 1503 /* Set OWN bit if not the first descriptor */ 1504 if (cur_index != start_index) 1505 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1506 1507 if (tso) { 1508 /* Enable TSO */ 1509 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1); 1510 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL, 1511 packet->tcp_payload_len); 1512 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, 1513 packet->tcp_header_len / 4); 1514 } else { 1515 /* Enable CRC and Pad Insertion */ 1516 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); 1517 1518 /* Enable HW CSUM */ 1519 if (csum) 1520 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, 1521 CIC, 0x3); 1522 1523 /* Set the total length to be transmitted */ 1524 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, 1525 packet->length); 1526 } 1527 1528 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) { 1529 cur_index++; 1530 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1531 rdesc = rdata->rdesc; 1532 1533 /* Update buffer address */ 1534 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 1535 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 1536 1537 /* Update the buffer length */ 1538 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, 1539 rdata->skb_dma_len); 1540 1541 /* Set OWN bit */ 1542 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1543 1544 /* Mark it as NORMAL descriptor */ 1545 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1546 1547 /* Enable HW CSUM */ 1548 if (csum) 1549 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, 1550 CIC, 0x3); 1551 } 1552 1553 /* Set LAST bit for the last descriptor */ 1554 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); 1555 1556 /* Set IC bit based on Tx coalescing settings */ 1557 if (tx_set_ic) 1558 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); 1559 1560 /* Save the Tx info to report back during cleanup */ 1561 rdata->tx.packets = packet->tx_packets; 1562 rdata->tx.bytes = packet->tx_bytes; 1563 1564 /* In case the Tx DMA engine is running, make sure everything 1565 * is written to the descriptor(s) before setting the OWN bit 1566 * for the first descriptor 1567 */ 1568 wmb(); 1569 1570 /* Set OWN bit for the first descriptor */ 1571 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1572 rdesc = rdata->rdesc; 1573 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1574 1575 #ifdef XGMAC_ENABLE_TX_DESC_DUMP 1576 xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1); 1577 #endif 1578 1579 /* Make sure ownership is written to the descriptor */ 1580 wmb(); 1581 1582 ring->cur = cur_index + 1; 1583 if (!packet->skb->xmit_more || 1584 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, 1585 channel->queue_index))) 1586 xgbe_tx_start_xmit(channel, ring); 1587 else 1588 ring->tx.xmit_more = 1; 1589 1590 DBGPR(" %s: descriptors %u to %u written\n", 1591 channel->name, start_index & (ring->rdesc_count - 1), 1592 (ring->cur - 1) & (ring->rdesc_count - 1)); 1593 1594 DBGPR("<--xgbe_dev_xmit\n"); 1595 } 1596 1597 static int xgbe_dev_read(struct xgbe_channel *channel) 1598 { 1599 struct xgbe_ring *ring = channel->rx_ring; 1600 struct xgbe_ring_data *rdata; 1601 struct xgbe_ring_desc *rdesc; 1602 struct xgbe_packet_data *packet = &ring->packet_data; 1603 struct net_device *netdev = channel->pdata->netdev; 1604 unsigned int err, etlt, l34t; 1605 1606 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); 1607 1608 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1609 rdesc = rdata->rdesc; 1610 1611 /* Check for data availability */ 1612 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) 1613 return 1; 1614 1615 /* Make sure descriptor fields are read after reading the OWN bit */ 1616 rmb(); 1617 1618 #ifdef XGMAC_ENABLE_RX_DESC_DUMP 1619 xgbe_dump_rx_desc(ring, rdesc, ring->cur); 1620 #endif 1621 1622 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { 1623 /* Timestamp Context Descriptor */ 1624 xgbe_get_rx_tstamp(packet, rdesc); 1625 1626 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1627 CONTEXT, 1); 1628 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1629 CONTEXT_NEXT, 0); 1630 return 0; 1631 } 1632 1633 /* Normal Descriptor, be sure Context Descriptor bit is off */ 1634 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); 1635 1636 /* Indicate if a Context Descriptor is next */ 1637 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) 1638 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1639 CONTEXT_NEXT, 1); 1640 1641 /* Get the header length */ 1642 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) 1643 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, 1644 RX_NORMAL_DESC2, HL); 1645 1646 /* Get the RSS hash */ 1647 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { 1648 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1649 RSS_HASH, 1); 1650 1651 packet->rss_hash = le32_to_cpu(rdesc->desc1); 1652 1653 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); 1654 switch (l34t) { 1655 case RX_DESC3_L34T_IPV4_TCP: 1656 case RX_DESC3_L34T_IPV4_UDP: 1657 case RX_DESC3_L34T_IPV6_TCP: 1658 case RX_DESC3_L34T_IPV6_UDP: 1659 packet->rss_hash_type = PKT_HASH_TYPE_L4; 1660 break; 1661 default: 1662 packet->rss_hash_type = PKT_HASH_TYPE_L3; 1663 } 1664 } 1665 1666 /* Get the packet length */ 1667 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 1668 1669 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { 1670 /* Not all the data has been transferred for this packet */ 1671 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1672 INCOMPLETE, 1); 1673 return 0; 1674 } 1675 1676 /* This is the last of the data for this packet */ 1677 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1678 INCOMPLETE, 0); 1679 1680 /* Set checksum done indicator as appropriate */ 1681 if (channel->pdata->netdev->features & NETIF_F_RXCSUM) 1682 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1683 CSUM_DONE, 1); 1684 1685 /* Check for errors (only valid in last descriptor) */ 1686 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); 1687 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); 1688 DBGPR(" err=%u, etlt=%#x\n", err, etlt); 1689 1690 if (!err || !etlt) { 1691 /* No error if err is 0 or etlt is 0 */ 1692 if ((etlt == 0x09) && 1693 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1694 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1695 VLAN_CTAG, 1); 1696 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, 1697 RX_NORMAL_DESC0, 1698 OVT); 1699 DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag); 1700 } 1701 } else { 1702 if ((etlt == 0x05) || (etlt == 0x06)) 1703 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1704 CSUM_DONE, 0); 1705 else 1706 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, 1707 FRAME, 1); 1708 } 1709 1710 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, 1711 ring->cur & (ring->rdesc_count - 1), ring->cur); 1712 1713 return 0; 1714 } 1715 1716 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) 1717 { 1718 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ 1719 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT); 1720 } 1721 1722 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) 1723 { 1724 /* Rx and Tx share LD bit, so check TDES3.LD bit */ 1725 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD); 1726 } 1727 1728 static int xgbe_enable_int(struct xgbe_channel *channel, 1729 enum xgbe_int int_id) 1730 { 1731 unsigned int dma_ch_ier; 1732 1733 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); 1734 1735 switch (int_id) { 1736 case XGMAC_INT_DMA_CH_SR_TI: 1737 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); 1738 break; 1739 case XGMAC_INT_DMA_CH_SR_TPS: 1740 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1); 1741 break; 1742 case XGMAC_INT_DMA_CH_SR_TBU: 1743 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1); 1744 break; 1745 case XGMAC_INT_DMA_CH_SR_RI: 1746 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); 1747 break; 1748 case XGMAC_INT_DMA_CH_SR_RBU: 1749 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); 1750 break; 1751 case XGMAC_INT_DMA_CH_SR_RPS: 1752 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1); 1753 break; 1754 case XGMAC_INT_DMA_CH_SR_TI_RI: 1755 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); 1756 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); 1757 break; 1758 case XGMAC_INT_DMA_CH_SR_FBE: 1759 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); 1760 break; 1761 case XGMAC_INT_DMA_ALL: 1762 dma_ch_ier |= channel->saved_ier; 1763 break; 1764 default: 1765 return -1; 1766 } 1767 1768 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); 1769 1770 return 0; 1771 } 1772 1773 static int xgbe_disable_int(struct xgbe_channel *channel, 1774 enum xgbe_int int_id) 1775 { 1776 unsigned int dma_ch_ier; 1777 1778 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); 1779 1780 switch (int_id) { 1781 case XGMAC_INT_DMA_CH_SR_TI: 1782 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); 1783 break; 1784 case XGMAC_INT_DMA_CH_SR_TPS: 1785 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0); 1786 break; 1787 case XGMAC_INT_DMA_CH_SR_TBU: 1788 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0); 1789 break; 1790 case XGMAC_INT_DMA_CH_SR_RI: 1791 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); 1792 break; 1793 case XGMAC_INT_DMA_CH_SR_RBU: 1794 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); 1795 break; 1796 case XGMAC_INT_DMA_CH_SR_RPS: 1797 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0); 1798 break; 1799 case XGMAC_INT_DMA_CH_SR_TI_RI: 1800 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); 1801 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); 1802 break; 1803 case XGMAC_INT_DMA_CH_SR_FBE: 1804 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0); 1805 break; 1806 case XGMAC_INT_DMA_ALL: 1807 channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK; 1808 dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK; 1809 break; 1810 default: 1811 return -1; 1812 } 1813 1814 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); 1815 1816 return 0; 1817 } 1818 1819 static int xgbe_exit(struct xgbe_prv_data *pdata) 1820 { 1821 unsigned int count = 2000; 1822 1823 DBGPR("-->xgbe_exit\n"); 1824 1825 /* Issue a software reset */ 1826 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); 1827 usleep_range(10, 15); 1828 1829 /* Poll Until Poll Condition */ 1830 while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 1831 usleep_range(500, 600); 1832 1833 if (!count) 1834 return -EBUSY; 1835 1836 DBGPR("<--xgbe_exit\n"); 1837 1838 return 0; 1839 } 1840 1841 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) 1842 { 1843 unsigned int i, count; 1844 1845 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) 1846 return 0; 1847 1848 for (i = 0; i < pdata->tx_q_count; i++) 1849 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); 1850 1851 /* Poll Until Poll Condition */ 1852 for (i = 0; i < pdata->tx_q_count; i++) { 1853 count = 2000; 1854 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, 1855 MTL_Q_TQOMR, FTQ)) 1856 usleep_range(500, 600); 1857 1858 if (!count) 1859 return -EBUSY; 1860 } 1861 1862 return 0; 1863 } 1864 1865 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata) 1866 { 1867 /* Set enhanced addressing mode */ 1868 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1); 1869 1870 /* Set the System Bus mode */ 1871 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1); 1872 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1); 1873 } 1874 1875 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) 1876 { 1877 unsigned int arcache, awcache; 1878 1879 arcache = 0; 1880 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache); 1881 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain); 1882 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache); 1883 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain); 1884 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache); 1885 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain); 1886 XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); 1887 1888 awcache = 0; 1889 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache); 1890 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain); 1891 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache); 1892 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain); 1893 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache); 1894 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain); 1895 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache); 1896 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain); 1897 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); 1898 } 1899 1900 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) 1901 { 1902 unsigned int i; 1903 1904 /* Set Tx to weighted round robin scheduling algorithm */ 1905 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); 1906 1907 /* Set Tx traffic classes to use WRR algorithm with equal weights */ 1908 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { 1909 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 1910 MTL_TSA_ETS); 1911 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); 1912 } 1913 1914 /* Set Rx to strict priority algorithm */ 1915 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); 1916 } 1917 1918 static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size, 1919 unsigned int queue_count) 1920 { 1921 unsigned int q_fifo_size = 0; 1922 enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256; 1923 1924 /* Calculate Tx/Rx fifo share per queue */ 1925 switch (fifo_size) { 1926 case 0: 1927 q_fifo_size = XGBE_FIFO_SIZE_B(128); 1928 break; 1929 case 1: 1930 q_fifo_size = XGBE_FIFO_SIZE_B(256); 1931 break; 1932 case 2: 1933 q_fifo_size = XGBE_FIFO_SIZE_B(512); 1934 break; 1935 case 3: 1936 q_fifo_size = XGBE_FIFO_SIZE_KB(1); 1937 break; 1938 case 4: 1939 q_fifo_size = XGBE_FIFO_SIZE_KB(2); 1940 break; 1941 case 5: 1942 q_fifo_size = XGBE_FIFO_SIZE_KB(4); 1943 break; 1944 case 6: 1945 q_fifo_size = XGBE_FIFO_SIZE_KB(8); 1946 break; 1947 case 7: 1948 q_fifo_size = XGBE_FIFO_SIZE_KB(16); 1949 break; 1950 case 8: 1951 q_fifo_size = XGBE_FIFO_SIZE_KB(32); 1952 break; 1953 case 9: 1954 q_fifo_size = XGBE_FIFO_SIZE_KB(64); 1955 break; 1956 case 10: 1957 q_fifo_size = XGBE_FIFO_SIZE_KB(128); 1958 break; 1959 case 11: 1960 q_fifo_size = XGBE_FIFO_SIZE_KB(256); 1961 break; 1962 } 1963 1964 /* The configured value is not the actual amount of fifo RAM */ 1965 q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size); 1966 1967 q_fifo_size = q_fifo_size / queue_count; 1968 1969 /* Set the queue fifo size programmable value */ 1970 if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256)) 1971 p_fifo = XGMAC_MTL_FIFO_SIZE_256K; 1972 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128)) 1973 p_fifo = XGMAC_MTL_FIFO_SIZE_128K; 1974 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64)) 1975 p_fifo = XGMAC_MTL_FIFO_SIZE_64K; 1976 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32)) 1977 p_fifo = XGMAC_MTL_FIFO_SIZE_32K; 1978 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16)) 1979 p_fifo = XGMAC_MTL_FIFO_SIZE_16K; 1980 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8)) 1981 p_fifo = XGMAC_MTL_FIFO_SIZE_8K; 1982 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4)) 1983 p_fifo = XGMAC_MTL_FIFO_SIZE_4K; 1984 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2)) 1985 p_fifo = XGMAC_MTL_FIFO_SIZE_2K; 1986 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1)) 1987 p_fifo = XGMAC_MTL_FIFO_SIZE_1K; 1988 else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512)) 1989 p_fifo = XGMAC_MTL_FIFO_SIZE_512; 1990 else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256)) 1991 p_fifo = XGMAC_MTL_FIFO_SIZE_256; 1992 1993 return p_fifo; 1994 } 1995 1996 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) 1997 { 1998 enum xgbe_mtl_fifo_size fifo_size; 1999 unsigned int i; 2000 2001 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, 2002 pdata->tx_q_count); 2003 2004 for (i = 0; i < pdata->tx_q_count; i++) 2005 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); 2006 2007 netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n", 2008 pdata->tx_q_count, ((fifo_size + 1) * 256)); 2009 } 2010 2011 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) 2012 { 2013 enum xgbe_mtl_fifo_size fifo_size; 2014 unsigned int i; 2015 2016 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size, 2017 pdata->rx_q_count); 2018 2019 for (i = 0; i < pdata->rx_q_count; i++) 2020 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); 2021 2022 netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n", 2023 pdata->rx_q_count, ((fifo_size + 1) * 256)); 2024 } 2025 2026 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) 2027 { 2028 unsigned int qptc, qptc_extra, queue; 2029 unsigned int prio_queues; 2030 unsigned int ppq, ppq_extra, prio; 2031 unsigned int mask; 2032 unsigned int i, j, reg, reg_val; 2033 2034 /* Map the MTL Tx Queues to Traffic Classes 2035 * Note: Tx Queues >= Traffic Classes 2036 */ 2037 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; 2038 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; 2039 2040 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { 2041 for (j = 0; j < qptc; j++) { 2042 DBGPR(" TXq%u mapped to TC%u\n", queue, i); 2043 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 2044 Q2TCMAP, i); 2045 pdata->q2tc_map[queue++] = i; 2046 } 2047 2048 if (i < qptc_extra) { 2049 DBGPR(" TXq%u mapped to TC%u\n", queue, i); 2050 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 2051 Q2TCMAP, i); 2052 pdata->q2tc_map[queue++] = i; 2053 } 2054 } 2055 2056 /* Map the 8 VLAN priority values to available MTL Rx queues */ 2057 prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, 2058 pdata->rx_q_count); 2059 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; 2060 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; 2061 2062 reg = MAC_RQC2R; 2063 reg_val = 0; 2064 for (i = 0, prio = 0; i < prio_queues;) { 2065 mask = 0; 2066 for (j = 0; j < ppq; j++) { 2067 DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); 2068 mask |= (1 << prio); 2069 pdata->prio2q_map[prio++] = i; 2070 } 2071 2072 if (i < ppq_extra) { 2073 DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); 2074 mask |= (1 << prio); 2075 pdata->prio2q_map[prio++] = i; 2076 } 2077 2078 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); 2079 2080 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) 2081 continue; 2082 2083 XGMAC_IOWRITE(pdata, reg, reg_val); 2084 reg += MAC_RQC2_INC; 2085 reg_val = 0; 2086 } 2087 2088 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ 2089 reg = MTL_RQDCM0R; 2090 reg_val = 0; 2091 for (i = 0; i < pdata->rx_q_count;) { 2092 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); 2093 2094 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) 2095 continue; 2096 2097 XGMAC_IOWRITE(pdata, reg, reg_val); 2098 2099 reg += MTL_RQDCM_INC; 2100 reg_val = 0; 2101 } 2102 } 2103 2104 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) 2105 { 2106 unsigned int i; 2107 2108 for (i = 0; i < pdata->rx_q_count; i++) { 2109 /* Activate flow control when less than 4k left in fifo */ 2110 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2); 2111 2112 /* De-activate flow control when more than 6k left in fifo */ 2113 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4); 2114 } 2115 } 2116 2117 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata) 2118 { 2119 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr); 2120 2121 /* Filtering is done using perfect filtering and hash filtering */ 2122 if (pdata->hw_feat.hash_table_size) { 2123 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 2124 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 2125 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1); 2126 } 2127 } 2128 2129 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) 2130 { 2131 unsigned int val; 2132 2133 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0; 2134 2135 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 2136 } 2137 2138 static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata) 2139 { 2140 switch (pdata->phy_speed) { 2141 case SPEED_10000: 2142 xgbe_set_xgmii_speed(pdata); 2143 break; 2144 2145 case SPEED_2500: 2146 xgbe_set_gmii_2500_speed(pdata); 2147 break; 2148 2149 case SPEED_1000: 2150 xgbe_set_gmii_speed(pdata); 2151 break; 2152 } 2153 } 2154 2155 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) 2156 { 2157 if (pdata->netdev->features & NETIF_F_RXCSUM) 2158 xgbe_enable_rx_csum(pdata); 2159 else 2160 xgbe_disable_rx_csum(pdata); 2161 } 2162 2163 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) 2164 { 2165 /* Indicate that VLAN Tx CTAGs come from context descriptors */ 2166 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); 2167 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); 2168 2169 /* Set the current VLAN Hash Table register value */ 2170 xgbe_update_vlan_hash_table(pdata); 2171 2172 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 2173 xgbe_enable_rx_vlan_filtering(pdata); 2174 else 2175 xgbe_disable_rx_vlan_filtering(pdata); 2176 2177 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 2178 xgbe_enable_rx_vlan_stripping(pdata); 2179 else 2180 xgbe_disable_rx_vlan_stripping(pdata); 2181 } 2182 2183 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) 2184 { 2185 bool read_hi; 2186 u64 val; 2187 2188 switch (reg_lo) { 2189 /* These registers are always 64 bit */ 2190 case MMC_TXOCTETCOUNT_GB_LO: 2191 case MMC_TXOCTETCOUNT_G_LO: 2192 case MMC_RXOCTETCOUNT_GB_LO: 2193 case MMC_RXOCTETCOUNT_G_LO: 2194 read_hi = true; 2195 break; 2196 2197 default: 2198 read_hi = false; 2199 }; 2200 2201 val = XGMAC_IOREAD(pdata, reg_lo); 2202 2203 if (read_hi) 2204 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); 2205 2206 return val; 2207 } 2208 2209 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) 2210 { 2211 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2212 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR); 2213 2214 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) 2215 stats->txoctetcount_gb += 2216 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 2217 2218 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) 2219 stats->txframecount_gb += 2220 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 2221 2222 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) 2223 stats->txbroadcastframes_g += 2224 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2225 2226 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) 2227 stats->txmulticastframes_g += 2228 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2229 2230 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) 2231 stats->tx64octets_gb += 2232 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 2233 2234 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) 2235 stats->tx65to127octets_gb += 2236 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 2237 2238 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) 2239 stats->tx128to255octets_gb += 2240 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 2241 2242 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) 2243 stats->tx256to511octets_gb += 2244 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 2245 2246 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) 2247 stats->tx512to1023octets_gb += 2248 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2249 2250 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) 2251 stats->tx1024tomaxoctets_gb += 2252 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2253 2254 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) 2255 stats->txunicastframes_gb += 2256 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2257 2258 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) 2259 stats->txmulticastframes_gb += 2260 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2261 2262 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) 2263 stats->txbroadcastframes_g += 2264 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2265 2266 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) 2267 stats->txunderflowerror += 2268 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 2269 2270 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) 2271 stats->txoctetcount_g += 2272 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 2273 2274 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) 2275 stats->txframecount_g += 2276 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 2277 2278 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) 2279 stats->txpauseframes += 2280 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 2281 2282 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) 2283 stats->txvlanframes_g += 2284 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 2285 } 2286 2287 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) 2288 { 2289 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2290 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR); 2291 2292 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) 2293 stats->rxframecount_gb += 2294 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 2295 2296 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) 2297 stats->rxoctetcount_gb += 2298 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 2299 2300 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) 2301 stats->rxoctetcount_g += 2302 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 2303 2304 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) 2305 stats->rxbroadcastframes_g += 2306 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2307 2308 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) 2309 stats->rxmulticastframes_g += 2310 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2311 2312 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) 2313 stats->rxcrcerror += 2314 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 2315 2316 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) 2317 stats->rxrunterror += 2318 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 2319 2320 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) 2321 stats->rxjabbererror += 2322 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 2323 2324 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) 2325 stats->rxundersize_g += 2326 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 2327 2328 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) 2329 stats->rxoversize_g += 2330 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 2331 2332 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) 2333 stats->rx64octets_gb += 2334 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 2335 2336 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) 2337 stats->rx65to127octets_gb += 2338 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 2339 2340 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) 2341 stats->rx128to255octets_gb += 2342 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 2343 2344 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) 2345 stats->rx256to511octets_gb += 2346 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 2347 2348 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) 2349 stats->rx512to1023octets_gb += 2350 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2351 2352 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) 2353 stats->rx1024tomaxoctets_gb += 2354 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2355 2356 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) 2357 stats->rxunicastframes_g += 2358 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 2359 2360 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) 2361 stats->rxlengtherror += 2362 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 2363 2364 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) 2365 stats->rxoutofrangetype += 2366 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 2367 2368 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) 2369 stats->rxpauseframes += 2370 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 2371 2372 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) 2373 stats->rxfifooverflow += 2374 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 2375 2376 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) 2377 stats->rxvlanframes_gb += 2378 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 2379 2380 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) 2381 stats->rxwatchdogerror += 2382 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 2383 } 2384 2385 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) 2386 { 2387 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2388 2389 /* Freeze counters */ 2390 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 2391 2392 stats->txoctetcount_gb += 2393 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 2394 2395 stats->txframecount_gb += 2396 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 2397 2398 stats->txbroadcastframes_g += 2399 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2400 2401 stats->txmulticastframes_g += 2402 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2403 2404 stats->tx64octets_gb += 2405 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 2406 2407 stats->tx65to127octets_gb += 2408 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 2409 2410 stats->tx128to255octets_gb += 2411 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 2412 2413 stats->tx256to511octets_gb += 2414 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 2415 2416 stats->tx512to1023octets_gb += 2417 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2418 2419 stats->tx1024tomaxoctets_gb += 2420 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2421 2422 stats->txunicastframes_gb += 2423 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2424 2425 stats->txmulticastframes_gb += 2426 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2427 2428 stats->txbroadcastframes_g += 2429 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2430 2431 stats->txunderflowerror += 2432 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 2433 2434 stats->txoctetcount_g += 2435 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 2436 2437 stats->txframecount_g += 2438 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 2439 2440 stats->txpauseframes += 2441 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 2442 2443 stats->txvlanframes_g += 2444 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 2445 2446 stats->rxframecount_gb += 2447 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 2448 2449 stats->rxoctetcount_gb += 2450 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 2451 2452 stats->rxoctetcount_g += 2453 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 2454 2455 stats->rxbroadcastframes_g += 2456 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2457 2458 stats->rxmulticastframes_g += 2459 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2460 2461 stats->rxcrcerror += 2462 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 2463 2464 stats->rxrunterror += 2465 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 2466 2467 stats->rxjabbererror += 2468 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 2469 2470 stats->rxundersize_g += 2471 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 2472 2473 stats->rxoversize_g += 2474 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 2475 2476 stats->rx64octets_gb += 2477 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 2478 2479 stats->rx65to127octets_gb += 2480 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 2481 2482 stats->rx128to255octets_gb += 2483 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 2484 2485 stats->rx256to511octets_gb += 2486 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 2487 2488 stats->rx512to1023octets_gb += 2489 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2490 2491 stats->rx1024tomaxoctets_gb += 2492 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2493 2494 stats->rxunicastframes_g += 2495 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 2496 2497 stats->rxlengtherror += 2498 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 2499 2500 stats->rxoutofrangetype += 2501 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 2502 2503 stats->rxpauseframes += 2504 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 2505 2506 stats->rxfifooverflow += 2507 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 2508 2509 stats->rxvlanframes_gb += 2510 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 2511 2512 stats->rxwatchdogerror += 2513 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 2514 2515 /* Un-freeze counters */ 2516 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 2517 } 2518 2519 static void xgbe_config_mmc(struct xgbe_prv_data *pdata) 2520 { 2521 /* Set counters to reset on read */ 2522 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); 2523 2524 /* Reset the counters */ 2525 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); 2526 } 2527 2528 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, 2529 struct xgbe_channel *channel) 2530 { 2531 unsigned int tx_dsr, tx_pos, tx_qidx; 2532 unsigned int tx_status; 2533 unsigned long tx_timeout; 2534 2535 /* Calculate the status register to read and the position within */ 2536 if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { 2537 tx_dsr = DMA_DSR0; 2538 tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) + 2539 DMA_DSR0_TPS_START; 2540 } else { 2541 tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; 2542 2543 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); 2544 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + 2545 DMA_DSRX_TPS_START; 2546 } 2547 2548 /* The Tx engine cannot be stopped if it is actively processing 2549 * descriptors. Wait for the Tx engine to enter the stopped or 2550 * suspended state. Don't wait forever though... 2551 */ 2552 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); 2553 while (time_before(jiffies, tx_timeout)) { 2554 tx_status = XGMAC_IOREAD(pdata, tx_dsr); 2555 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); 2556 if ((tx_status == DMA_TPS_STOPPED) || 2557 (tx_status == DMA_TPS_SUSPENDED)) 2558 break; 2559 2560 usleep_range(500, 1000); 2561 } 2562 2563 if (!time_before(jiffies, tx_timeout)) 2564 netdev_info(pdata->netdev, 2565 "timed out waiting for Tx DMA channel %u to stop\n", 2566 channel->queue_index); 2567 } 2568 2569 static void xgbe_enable_tx(struct xgbe_prv_data *pdata) 2570 { 2571 struct xgbe_channel *channel; 2572 unsigned int i; 2573 2574 /* Enable each Tx DMA channel */ 2575 channel = pdata->channel; 2576 for (i = 0; i < pdata->channel_count; i++, channel++) { 2577 if (!channel->tx_ring) 2578 break; 2579 2580 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1); 2581 } 2582 2583 /* Enable each Tx queue */ 2584 for (i = 0; i < pdata->tx_q_count; i++) 2585 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 2586 MTL_Q_ENABLED); 2587 2588 /* Enable MAC Tx */ 2589 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 2590 } 2591 2592 static void xgbe_disable_tx(struct xgbe_prv_data *pdata) 2593 { 2594 struct xgbe_channel *channel; 2595 unsigned int i; 2596 2597 /* Prepare for Tx DMA channel stop */ 2598 channel = pdata->channel; 2599 for (i = 0; i < pdata->channel_count; i++, channel++) { 2600 if (!channel->tx_ring) 2601 break; 2602 2603 xgbe_prepare_tx_stop(pdata, channel); 2604 } 2605 2606 /* Disable MAC Tx */ 2607 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 2608 2609 /* Disable each Tx queue */ 2610 for (i = 0; i < pdata->tx_q_count; i++) 2611 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); 2612 2613 /* Disable each Tx DMA channel */ 2614 channel = pdata->channel; 2615 for (i = 0; i < pdata->channel_count; i++, channel++) { 2616 if (!channel->tx_ring) 2617 break; 2618 2619 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0); 2620 } 2621 } 2622 2623 static void xgbe_enable_rx(struct xgbe_prv_data *pdata) 2624 { 2625 struct xgbe_channel *channel; 2626 unsigned int reg_val, i; 2627 2628 /* Enable each Rx DMA channel */ 2629 channel = pdata->channel; 2630 for (i = 0; i < pdata->channel_count; i++, channel++) { 2631 if (!channel->rx_ring) 2632 break; 2633 2634 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1); 2635 } 2636 2637 /* Enable each Rx queue */ 2638 reg_val = 0; 2639 for (i = 0; i < pdata->rx_q_count; i++) 2640 reg_val |= (0x02 << (i << 1)); 2641 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 2642 2643 /* Enable MAC Rx */ 2644 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); 2645 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); 2646 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); 2647 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); 2648 } 2649 2650 static void xgbe_disable_rx(struct xgbe_prv_data *pdata) 2651 { 2652 struct xgbe_channel *channel; 2653 unsigned int i; 2654 2655 /* Disable MAC Rx */ 2656 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); 2657 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); 2658 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); 2659 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); 2660 2661 /* Disable each Rx queue */ 2662 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); 2663 2664 /* Disable each Rx DMA channel */ 2665 channel = pdata->channel; 2666 for (i = 0; i < pdata->channel_count; i++, channel++) { 2667 if (!channel->rx_ring) 2668 break; 2669 2670 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0); 2671 } 2672 } 2673 2674 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata) 2675 { 2676 struct xgbe_channel *channel; 2677 unsigned int i; 2678 2679 /* Enable each Tx DMA channel */ 2680 channel = pdata->channel; 2681 for (i = 0; i < pdata->channel_count; i++, channel++) { 2682 if (!channel->tx_ring) 2683 break; 2684 2685 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1); 2686 } 2687 2688 /* Enable MAC Tx */ 2689 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 2690 } 2691 2692 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) 2693 { 2694 struct xgbe_channel *channel; 2695 unsigned int i; 2696 2697 /* Prepare for Tx DMA channel stop */ 2698 channel = pdata->channel; 2699 for (i = 0; i < pdata->channel_count; i++, channel++) { 2700 if (!channel->tx_ring) 2701 break; 2702 2703 xgbe_prepare_tx_stop(pdata, channel); 2704 } 2705 2706 /* Disable MAC Tx */ 2707 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 2708 2709 /* Disable each Tx DMA channel */ 2710 channel = pdata->channel; 2711 for (i = 0; i < pdata->channel_count; i++, channel++) { 2712 if (!channel->tx_ring) 2713 break; 2714 2715 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0); 2716 } 2717 } 2718 2719 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata) 2720 { 2721 struct xgbe_channel *channel; 2722 unsigned int i; 2723 2724 /* Enable each Rx DMA channel */ 2725 channel = pdata->channel; 2726 for (i = 0; i < pdata->channel_count; i++, channel++) { 2727 if (!channel->rx_ring) 2728 break; 2729 2730 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1); 2731 } 2732 } 2733 2734 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) 2735 { 2736 struct xgbe_channel *channel; 2737 unsigned int i; 2738 2739 /* Disable each Rx DMA channel */ 2740 channel = pdata->channel; 2741 for (i = 0; i < pdata->channel_count; i++, channel++) { 2742 if (!channel->rx_ring) 2743 break; 2744 2745 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0); 2746 } 2747 } 2748 2749 static int xgbe_init(struct xgbe_prv_data *pdata) 2750 { 2751 struct xgbe_desc_if *desc_if = &pdata->desc_if; 2752 int ret; 2753 2754 DBGPR("-->xgbe_init\n"); 2755 2756 /* Flush Tx queues */ 2757 ret = xgbe_flush_tx_queues(pdata); 2758 if (ret) 2759 return ret; 2760 2761 /* 2762 * Initialize DMA related features 2763 */ 2764 xgbe_config_dma_bus(pdata); 2765 xgbe_config_dma_cache(pdata); 2766 xgbe_config_osp_mode(pdata); 2767 xgbe_config_pblx8(pdata); 2768 xgbe_config_tx_pbl_val(pdata); 2769 xgbe_config_rx_pbl_val(pdata); 2770 xgbe_config_rx_coalesce(pdata); 2771 xgbe_config_tx_coalesce(pdata); 2772 xgbe_config_rx_buffer_size(pdata); 2773 xgbe_config_tso_mode(pdata); 2774 xgbe_config_sph_mode(pdata); 2775 xgbe_config_rss(pdata); 2776 desc_if->wrapper_tx_desc_init(pdata); 2777 desc_if->wrapper_rx_desc_init(pdata); 2778 xgbe_enable_dma_interrupts(pdata); 2779 2780 /* 2781 * Initialize MTL related features 2782 */ 2783 xgbe_config_mtl_mode(pdata); 2784 xgbe_config_queue_mapping(pdata); 2785 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); 2786 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); 2787 xgbe_config_tx_threshold(pdata, pdata->tx_threshold); 2788 xgbe_config_rx_threshold(pdata, pdata->rx_threshold); 2789 xgbe_config_tx_fifo_size(pdata); 2790 xgbe_config_rx_fifo_size(pdata); 2791 xgbe_config_flow_control_threshold(pdata); 2792 /*TODO: Error Packet and undersized good Packet forwarding enable 2793 (FEP and FUP) 2794 */ 2795 xgbe_config_dcb_tc(pdata); 2796 xgbe_config_dcb_pfc(pdata); 2797 xgbe_enable_mtl_interrupts(pdata); 2798 2799 /* 2800 * Initialize MAC related features 2801 */ 2802 xgbe_config_mac_address(pdata); 2803 xgbe_config_jumbo_enable(pdata); 2804 xgbe_config_flow_control(pdata); 2805 xgbe_config_mac_speed(pdata); 2806 xgbe_config_checksum_offload(pdata); 2807 xgbe_config_vlan_support(pdata); 2808 xgbe_config_mmc(pdata); 2809 xgbe_enable_mac_interrupts(pdata); 2810 2811 DBGPR("<--xgbe_init\n"); 2812 2813 return 0; 2814 } 2815 2816 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) 2817 { 2818 DBGPR("-->xgbe_init_function_ptrs\n"); 2819 2820 hw_if->tx_complete = xgbe_tx_complete; 2821 2822 hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode; 2823 hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode; 2824 hw_if->add_mac_addresses = xgbe_add_mac_addresses; 2825 hw_if->set_mac_address = xgbe_set_mac_address; 2826 2827 hw_if->enable_rx_csum = xgbe_enable_rx_csum; 2828 hw_if->disable_rx_csum = xgbe_disable_rx_csum; 2829 2830 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; 2831 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; 2832 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; 2833 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; 2834 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; 2835 2836 hw_if->read_mmd_regs = xgbe_read_mmd_regs; 2837 hw_if->write_mmd_regs = xgbe_write_mmd_regs; 2838 2839 hw_if->set_gmii_speed = xgbe_set_gmii_speed; 2840 hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed; 2841 hw_if->set_xgmii_speed = xgbe_set_xgmii_speed; 2842 2843 hw_if->enable_tx = xgbe_enable_tx; 2844 hw_if->disable_tx = xgbe_disable_tx; 2845 hw_if->enable_rx = xgbe_enable_rx; 2846 hw_if->disable_rx = xgbe_disable_rx; 2847 2848 hw_if->powerup_tx = xgbe_powerup_tx; 2849 hw_if->powerdown_tx = xgbe_powerdown_tx; 2850 hw_if->powerup_rx = xgbe_powerup_rx; 2851 hw_if->powerdown_rx = xgbe_powerdown_rx; 2852 2853 hw_if->dev_xmit = xgbe_dev_xmit; 2854 hw_if->dev_read = xgbe_dev_read; 2855 hw_if->enable_int = xgbe_enable_int; 2856 hw_if->disable_int = xgbe_disable_int; 2857 hw_if->init = xgbe_init; 2858 hw_if->exit = xgbe_exit; 2859 2860 /* Descriptor related Sequences have to be initialized here */ 2861 hw_if->tx_desc_init = xgbe_tx_desc_init; 2862 hw_if->rx_desc_init = xgbe_rx_desc_init; 2863 hw_if->tx_desc_reset = xgbe_tx_desc_reset; 2864 hw_if->rx_desc_reset = xgbe_rx_desc_reset; 2865 hw_if->is_last_desc = xgbe_is_last_desc; 2866 hw_if->is_context_desc = xgbe_is_context_desc; 2867 hw_if->tx_start_xmit = xgbe_tx_start_xmit; 2868 2869 /* For FLOW ctrl */ 2870 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; 2871 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; 2872 2873 /* For RX coalescing */ 2874 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; 2875 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; 2876 hw_if->usec_to_riwt = xgbe_usec_to_riwt; 2877 hw_if->riwt_to_usec = xgbe_riwt_to_usec; 2878 2879 /* For RX and TX threshold config */ 2880 hw_if->config_rx_threshold = xgbe_config_rx_threshold; 2881 hw_if->config_tx_threshold = xgbe_config_tx_threshold; 2882 2883 /* For RX and TX Store and Forward Mode config */ 2884 hw_if->config_rsf_mode = xgbe_config_rsf_mode; 2885 hw_if->config_tsf_mode = xgbe_config_tsf_mode; 2886 2887 /* For TX DMA Operating on Second Frame config */ 2888 hw_if->config_osp_mode = xgbe_config_osp_mode; 2889 2890 /* For RX and TX PBL config */ 2891 hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val; 2892 hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val; 2893 hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val; 2894 hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val; 2895 hw_if->config_pblx8 = xgbe_config_pblx8; 2896 2897 /* For MMC statistics support */ 2898 hw_if->tx_mmc_int = xgbe_tx_mmc_int; 2899 hw_if->rx_mmc_int = xgbe_rx_mmc_int; 2900 hw_if->read_mmc_stats = xgbe_read_mmc_stats; 2901 2902 /* For PTP config */ 2903 hw_if->config_tstamp = xgbe_config_tstamp; 2904 hw_if->update_tstamp_addend = xgbe_update_tstamp_addend; 2905 hw_if->set_tstamp_time = xgbe_set_tstamp_time; 2906 hw_if->get_tstamp_time = xgbe_get_tstamp_time; 2907 hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; 2908 2909 /* For Data Center Bridging config */ 2910 hw_if->config_dcb_tc = xgbe_config_dcb_tc; 2911 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; 2912 2913 /* For Receive Side Scaling */ 2914 hw_if->enable_rss = xgbe_enable_rss; 2915 hw_if->disable_rss = xgbe_disable_rss; 2916 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; 2917 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; 2918 2919 DBGPR("<--xgbe_init_function_ptrs\n"); 2920 } 2921