1 /* 2 * AMD 10Gb Ethernet driver 3 * 4 * This file is available to you under your choice of the following two 5 * licenses: 6 * 7 * License 1: GPLv2 8 * 9 * Copyright (c) 2014 Advanced Micro Devices, Inc. 10 * 11 * This file is free software; you may copy, redistribute and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation, either version 2 of the License, or (at 14 * your option) any later version. 15 * 16 * This file is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program. If not, see <http://www.gnu.org/licenses/>. 23 * 24 * This file incorporates work covered by the following copyright and 25 * permission notice: 26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 28 * Inc. unless otherwise expressly agreed to in writing between Synopsys 29 * and you. 30 * 31 * The Software IS NOT an item of Licensed Software or Licensed Product 32 * under any End User Software License Agreement or Agreement for Licensed 33 * Product with Synopsys or any supplement thereto. Permission is hereby 34 * granted, free of charge, to any person obtaining a copy of this software 35 * annotated with this license and the Software, to deal in the Software 36 * without restriction, including without limitation the rights to use, 37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 38 * of the Software, and to permit persons to whom the Software is furnished 39 * to do so, subject to the following conditions: 40 * 41 * The above copyright notice and this permission notice shall be included 42 * in all copies or substantial portions of the Software. 43 * 44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 54 * THE POSSIBILITY OF SUCH DAMAGE. 55 * 56 * 57 * License 2: Modified BSD 58 * 59 * Copyright (c) 2014 Advanced Micro Devices, Inc. 60 * All rights reserved. 61 * 62 * Redistribution and use in source and binary forms, with or without 63 * modification, are permitted provided that the following conditions are met: 64 * * Redistributions of source code must retain the above copyright 65 * notice, this list of conditions and the following disclaimer. 66 * * Redistributions in binary form must reproduce the above copyright 67 * notice, this list of conditions and the following disclaimer in the 68 * documentation and/or other materials provided with the distribution. 69 * * Neither the name of Advanced Micro Devices, Inc. nor the 70 * names of its contributors may be used to endorse or promote products 71 * derived from this software without specific prior written permission. 72 * 73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY 77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 83 * 84 * This file incorporates work covered by the following copyright and 85 * permission notice: 86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 88 * Inc. unless otherwise expressly agreed to in writing between Synopsys 89 * and you. 90 * 91 * The Software IS NOT an item of Licensed Software or Licensed Product 92 * under any End User Software License Agreement or Agreement for Licensed 93 * Product with Synopsys or any supplement thereto. Permission is hereby 94 * granted, free of charge, to any person obtaining a copy of this software 95 * annotated with this license and the Software, to deal in the Software 96 * without restriction, including without limitation the rights to use, 97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 98 * of the Software, and to permit persons to whom the Software is furnished 99 * to do so, subject to the following conditions: 100 * 101 * The above copyright notice and this permission notice shall be included 102 * in all copies or substantial portions of the Software. 103 * 104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 114 * THE POSSIBILITY OF SUCH DAMAGE. 115 */ 116 117 #include <linux/phy.h> 118 #include <linux/clk.h> 119 120 #include "xgbe.h" 121 #include "xgbe-common.h" 122 123 124 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, 125 unsigned int usec) 126 { 127 unsigned long rate; 128 unsigned int ret; 129 130 DBGPR("-->xgbe_usec_to_riwt\n"); 131 132 rate = clk_get_rate(pdata->sysclock); 133 134 /* 135 * Convert the input usec value to the watchdog timer value. Each 136 * watchdog timer value is equivalent to 256 clock cycles. 137 * Calculate the required value as: 138 * ( usec * ( system_clock_mhz / 10^6 ) / 256 139 */ 140 ret = (usec * (rate / 1000000)) / 256; 141 142 DBGPR("<--xgbe_usec_to_riwt\n"); 143 144 return ret; 145 } 146 147 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, 148 unsigned int riwt) 149 { 150 unsigned long rate; 151 unsigned int ret; 152 153 DBGPR("-->xgbe_riwt_to_usec\n"); 154 155 rate = clk_get_rate(pdata->sysclock); 156 157 /* 158 * Convert the input watchdog timer value to the usec value. Each 159 * watchdog timer value is equivalent to 256 clock cycles. 160 * Calculate the required value as: 161 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) 162 */ 163 ret = (riwt * 256) / (rate / 1000000); 164 165 DBGPR("<--xgbe_riwt_to_usec\n"); 166 167 return ret; 168 } 169 170 static int xgbe_config_pblx8(struct xgbe_prv_data *pdata) 171 { 172 struct xgbe_channel *channel; 173 unsigned int i; 174 175 channel = pdata->channel; 176 for (i = 0; i < pdata->channel_count; i++, channel++) 177 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8, 178 pdata->pblx8); 179 180 return 0; 181 } 182 183 static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata) 184 { 185 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL); 186 } 187 188 static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata) 189 { 190 struct xgbe_channel *channel; 191 unsigned int i; 192 193 channel = pdata->channel; 194 for (i = 0; i < pdata->channel_count; i++, channel++) { 195 if (!channel->tx_ring) 196 break; 197 198 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL, 199 pdata->tx_pbl); 200 } 201 202 return 0; 203 } 204 205 static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata) 206 { 207 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL); 208 } 209 210 static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata) 211 { 212 struct xgbe_channel *channel; 213 unsigned int i; 214 215 channel = pdata->channel; 216 for (i = 0; i < pdata->channel_count; i++, channel++) { 217 if (!channel->rx_ring) 218 break; 219 220 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL, 221 pdata->rx_pbl); 222 } 223 224 return 0; 225 } 226 227 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata) 228 { 229 struct xgbe_channel *channel; 230 unsigned int i; 231 232 channel = pdata->channel; 233 for (i = 0; i < pdata->channel_count; i++, channel++) { 234 if (!channel->tx_ring) 235 break; 236 237 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP, 238 pdata->tx_osp_mode); 239 } 240 241 return 0; 242 } 243 244 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 245 { 246 unsigned int i; 247 248 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) 249 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); 250 251 return 0; 252 } 253 254 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 255 { 256 unsigned int i; 257 258 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) 259 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); 260 261 return 0; 262 } 263 264 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, 265 unsigned int val) 266 { 267 unsigned int i; 268 269 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) 270 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); 271 272 return 0; 273 } 274 275 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, 276 unsigned int val) 277 { 278 unsigned int i; 279 280 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) 281 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); 282 283 return 0; 284 } 285 286 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) 287 { 288 struct xgbe_channel *channel; 289 unsigned int i; 290 291 channel = pdata->channel; 292 for (i = 0; i < pdata->channel_count; i++, channel++) { 293 if (!channel->rx_ring) 294 break; 295 296 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT, 297 pdata->rx_riwt); 298 } 299 300 return 0; 301 } 302 303 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) 304 { 305 return 0; 306 } 307 308 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) 309 { 310 struct xgbe_channel *channel; 311 unsigned int i; 312 313 channel = pdata->channel; 314 for (i = 0; i < pdata->channel_count; i++, channel++) { 315 if (!channel->rx_ring) 316 break; 317 318 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ, 319 pdata->rx_buf_size); 320 } 321 } 322 323 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) 324 { 325 struct xgbe_channel *channel; 326 unsigned int i; 327 328 channel = pdata->channel; 329 for (i = 0; i < pdata->channel_count; i++, channel++) { 330 if (!channel->tx_ring) 331 break; 332 333 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1); 334 } 335 } 336 337 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) 338 { 339 unsigned int max_q_count, q_count; 340 unsigned int reg, reg_val; 341 unsigned int i; 342 343 /* Clear MTL flow control */ 344 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) 345 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); 346 347 /* Clear MAC flow control */ 348 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 349 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count); 350 reg = MAC_Q0TFCR; 351 for (i = 0; i < q_count; i++) { 352 reg_val = XGMAC_IOREAD(pdata, reg); 353 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); 354 XGMAC_IOWRITE(pdata, reg, reg_val); 355 356 reg += MAC_QTFCR_INC; 357 } 358 359 return 0; 360 } 361 362 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) 363 { 364 unsigned int max_q_count, q_count; 365 unsigned int reg, reg_val; 366 unsigned int i; 367 368 /* Set MTL flow control */ 369 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) 370 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1); 371 372 /* Set MAC flow control */ 373 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 374 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count); 375 reg = MAC_Q0TFCR; 376 for (i = 0; i < q_count; i++) { 377 reg_val = XGMAC_IOREAD(pdata, reg); 378 379 /* Enable transmit flow control */ 380 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); 381 /* Set pause time */ 382 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); 383 384 XGMAC_IOWRITE(pdata, reg, reg_val); 385 386 reg += MAC_QTFCR_INC; 387 } 388 389 return 0; 390 } 391 392 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) 393 { 394 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); 395 396 return 0; 397 } 398 399 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) 400 { 401 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); 402 403 return 0; 404 } 405 406 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) 407 { 408 if (pdata->tx_pause) 409 xgbe_enable_tx_flow_control(pdata); 410 else 411 xgbe_disable_tx_flow_control(pdata); 412 413 return 0; 414 } 415 416 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) 417 { 418 if (pdata->rx_pause) 419 xgbe_enable_rx_flow_control(pdata); 420 else 421 xgbe_disable_rx_flow_control(pdata); 422 423 return 0; 424 } 425 426 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) 427 { 428 xgbe_config_tx_flow_control(pdata); 429 xgbe_config_rx_flow_control(pdata); 430 } 431 432 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) 433 { 434 struct xgbe_channel *channel; 435 unsigned int dma_ch_isr, dma_ch_ier; 436 unsigned int i; 437 438 channel = pdata->channel; 439 for (i = 0; i < pdata->channel_count; i++, channel++) { 440 /* Clear all the interrupts which are set */ 441 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); 442 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); 443 444 /* Clear all interrupt enable bits */ 445 dma_ch_ier = 0; 446 447 /* Enable following interrupts 448 * NIE - Normal Interrupt Summary Enable 449 * AIE - Abnormal Interrupt Summary Enable 450 * FBEE - Fatal Bus Error Enable 451 */ 452 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1); 453 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); 454 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); 455 456 if (channel->tx_ring) { 457 /* Enable the following Tx interrupts 458 * TIE - Transmit Interrupt Enable (unless polling) 459 */ 460 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); 461 } 462 if (channel->rx_ring) { 463 /* Enable following Rx interrupts 464 * RBUE - Receive Buffer Unavailable Enable 465 * RIE - Receive Interrupt Enable 466 */ 467 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); 468 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); 469 } 470 471 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); 472 } 473 } 474 475 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) 476 { 477 unsigned int mtl_q_isr; 478 unsigned int q_count, i; 479 480 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); 481 for (i = 0; i < q_count; i++) { 482 /* Clear all the interrupts which are set */ 483 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); 484 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); 485 486 /* No MTL interrupts to be enabled */ 487 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, 0); 488 } 489 } 490 491 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) 492 { 493 /* No MAC interrupts to be enabled */ 494 XGMAC_IOWRITE(pdata, MAC_IER, 0); 495 496 /* Enable all counter interrupts */ 497 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff); 498 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff); 499 } 500 501 static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) 502 { 503 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3); 504 505 return 0; 506 } 507 508 static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata) 509 { 510 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2); 511 512 return 0; 513 } 514 515 static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata) 516 { 517 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0); 518 519 return 0; 520 } 521 522 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, 523 unsigned int enable) 524 { 525 unsigned int val = enable ? 1 : 0; 526 527 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) 528 return 0; 529 530 DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving"); 531 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); 532 533 return 0; 534 } 535 536 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, 537 unsigned int enable) 538 { 539 unsigned int val = enable ? 1 : 0; 540 541 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) 542 return 0; 543 544 DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving"); 545 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); 546 547 return 0; 548 } 549 550 static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata, 551 unsigned int am_mode) 552 { 553 struct netdev_hw_addr *ha; 554 unsigned int mac_reg; 555 unsigned int mac_addr_hi, mac_addr_lo; 556 u8 *mac_addr; 557 unsigned int i; 558 559 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 560 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 0); 561 562 i = 0; 563 mac_reg = MAC_MACA1HR; 564 565 netdev_for_each_uc_addr(ha, pdata->netdev) { 566 mac_addr_lo = 0; 567 mac_addr_hi = 0; 568 mac_addr = (u8 *)&mac_addr_lo; 569 mac_addr[0] = ha->addr[0]; 570 mac_addr[1] = ha->addr[1]; 571 mac_addr[2] = ha->addr[2]; 572 mac_addr[3] = ha->addr[3]; 573 mac_addr = (u8 *)&mac_addr_hi; 574 mac_addr[0] = ha->addr[4]; 575 mac_addr[1] = ha->addr[5]; 576 577 DBGPR(" adding unicast address %pM at 0x%04x\n", 578 ha->addr, mac_reg); 579 580 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); 581 582 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi); 583 mac_reg += MAC_MACA_INC; 584 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo); 585 mac_reg += MAC_MACA_INC; 586 587 i++; 588 } 589 590 if (!am_mode) { 591 netdev_for_each_mc_addr(ha, pdata->netdev) { 592 mac_addr_lo = 0; 593 mac_addr_hi = 0; 594 mac_addr = (u8 *)&mac_addr_lo; 595 mac_addr[0] = ha->addr[0]; 596 mac_addr[1] = ha->addr[1]; 597 mac_addr[2] = ha->addr[2]; 598 mac_addr[3] = ha->addr[3]; 599 mac_addr = (u8 *)&mac_addr_hi; 600 mac_addr[0] = ha->addr[4]; 601 mac_addr[1] = ha->addr[5]; 602 603 DBGPR(" adding multicast address %pM at 0x%04x\n", 604 ha->addr, mac_reg); 605 606 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); 607 608 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi); 609 mac_reg += MAC_MACA_INC; 610 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo); 611 mac_reg += MAC_MACA_INC; 612 613 i++; 614 } 615 } 616 617 /* Clear remaining additional MAC address entries */ 618 for (; i < pdata->hw_feat.addn_mac; i++) { 619 XGMAC_IOWRITE(pdata, mac_reg, 0); 620 mac_reg += MAC_MACA_INC; 621 XGMAC_IOWRITE(pdata, mac_reg, 0); 622 mac_reg += MAC_MACA_INC; 623 } 624 625 return 0; 626 } 627 628 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr) 629 { 630 unsigned int mac_addr_hi, mac_addr_lo; 631 632 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); 633 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | 634 (addr[1] << 8) | (addr[0] << 0); 635 636 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); 637 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); 638 639 return 0; 640 } 641 642 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, 643 int mmd_reg) 644 { 645 unsigned int mmd_address; 646 int mmd_data; 647 648 if (mmd_reg & MII_ADDR_C45) 649 mmd_address = mmd_reg & ~MII_ADDR_C45; 650 else 651 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 652 653 /* The PCS registers are accessed using mmio. The underlying APB3 654 * management interface uses indirect addressing to access the MMD 655 * register sets. This requires accessing of the PCS register in two 656 * phases, an address phase and a data phase. 657 * 658 * The mmio interface is based on 32-bit offsets and values. All 659 * register offsets must therefore be adjusted by left shifting the 660 * offset 2 bits and reading 32 bits of data. 661 */ 662 mutex_lock(&pdata->xpcs_mutex); 663 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); 664 mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2); 665 mutex_unlock(&pdata->xpcs_mutex); 666 667 return mmd_data; 668 } 669 670 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, 671 int mmd_reg, int mmd_data) 672 { 673 unsigned int mmd_address; 674 675 if (mmd_reg & MII_ADDR_C45) 676 mmd_address = mmd_reg & ~MII_ADDR_C45; 677 else 678 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 679 680 /* The PCS registers are accessed using mmio. The underlying APB3 681 * management interface uses indirect addressing to access the MMD 682 * register sets. This requires accessing of the PCS register in two 683 * phases, an address phase and a data phase. 684 * 685 * The mmio interface is based on 32-bit offsets and values. All 686 * register offsets must therefore be adjusted by left shifting the 687 * offset 2 bits and reading 32 bits of data. 688 */ 689 mutex_lock(&pdata->xpcs_mutex); 690 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); 691 XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); 692 mutex_unlock(&pdata->xpcs_mutex); 693 } 694 695 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) 696 { 697 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN); 698 } 699 700 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) 701 { 702 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); 703 704 return 0; 705 } 706 707 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) 708 { 709 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); 710 711 return 0; 712 } 713 714 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 715 { 716 /* Put the VLAN tag in the Rx descriptor */ 717 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); 718 719 /* Don't check the VLAN type */ 720 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); 721 722 /* Check only C-TAG (0x8100) packets */ 723 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); 724 725 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ 726 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); 727 728 /* Enable VLAN tag stripping */ 729 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); 730 731 return 0; 732 } 733 734 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 735 { 736 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); 737 738 return 0; 739 } 740 741 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) 742 { 743 struct xgbe_ring_desc *rdesc = rdata->rdesc; 744 745 /* Reset the Tx descriptor 746 * Set buffer 1 (lo) address to zero 747 * Set buffer 1 (hi) address to zero 748 * Reset all other control bits (IC, TTSE, B2L & B1L) 749 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) 750 */ 751 rdesc->desc0 = 0; 752 rdesc->desc1 = 0; 753 rdesc->desc2 = 0; 754 rdesc->desc3 = 0; 755 } 756 757 static void xgbe_tx_desc_init(struct xgbe_channel *channel) 758 { 759 struct xgbe_ring *ring = channel->tx_ring; 760 struct xgbe_ring_data *rdata; 761 struct xgbe_ring_desc *rdesc; 762 int i; 763 int start_index = ring->cur; 764 765 DBGPR("-->tx_desc_init\n"); 766 767 /* Initialze all descriptors */ 768 for (i = 0; i < ring->rdesc_count; i++) { 769 rdata = GET_DESC_DATA(ring, i); 770 rdesc = rdata->rdesc; 771 772 /* Initialize Tx descriptor 773 * Set buffer 1 (lo) address to zero 774 * Set buffer 1 (hi) address to zero 775 * Reset all other control bits (IC, TTSE, B2L & B1L) 776 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, 777 * etc) 778 */ 779 rdesc->desc0 = 0; 780 rdesc->desc1 = 0; 781 rdesc->desc2 = 0; 782 rdesc->desc3 = 0; 783 } 784 785 /* Make sure everything is written to the descriptor(s) before 786 * telling the device about them 787 */ 788 wmb(); 789 790 /* Update the total number of Tx descriptors */ 791 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); 792 793 /* Update the starting address of descriptor ring */ 794 rdata = GET_DESC_DATA(ring, start_index); 795 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, 796 upper_32_bits(rdata->rdesc_dma)); 797 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, 798 lower_32_bits(rdata->rdesc_dma)); 799 800 DBGPR("<--tx_desc_init\n"); 801 } 802 803 static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata) 804 { 805 struct xgbe_ring_desc *rdesc = rdata->rdesc; 806 807 /* Reset the Rx descriptor 808 * Set buffer 1 (lo) address to dma address (lo) 809 * Set buffer 1 (hi) address to dma address (hi) 810 * Set buffer 2 (lo) address to zero 811 * Set buffer 2 (hi) address to zero and set control bits 812 * OWN and INTE 813 */ 814 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 815 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 816 rdesc->desc2 = 0; 817 818 rdesc->desc3 = 0; 819 if (rdata->interrupt) 820 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1); 821 822 /* Since the Rx DMA engine is likely running, make sure everything 823 * is written to the descriptor(s) before setting the OWN bit 824 * for the descriptor 825 */ 826 wmb(); 827 828 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); 829 830 /* Make sure ownership is written to the descriptor */ 831 wmb(); 832 } 833 834 static void xgbe_rx_desc_init(struct xgbe_channel *channel) 835 { 836 struct xgbe_prv_data *pdata = channel->pdata; 837 struct xgbe_ring *ring = channel->rx_ring; 838 struct xgbe_ring_data *rdata; 839 struct xgbe_ring_desc *rdesc; 840 unsigned int start_index = ring->cur; 841 unsigned int rx_coalesce, rx_frames; 842 unsigned int i; 843 844 DBGPR("-->rx_desc_init\n"); 845 846 rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0; 847 rx_frames = pdata->rx_frames; 848 849 /* Initialize all descriptors */ 850 for (i = 0; i < ring->rdesc_count; i++) { 851 rdata = GET_DESC_DATA(ring, i); 852 rdesc = rdata->rdesc; 853 854 /* Initialize Rx descriptor 855 * Set buffer 1 (lo) address to dma address (lo) 856 * Set buffer 1 (hi) address to dma address (hi) 857 * Set buffer 2 (lo) address to zero 858 * Set buffer 2 (hi) address to zero and set control 859 * bits OWN and INTE appropriateley 860 */ 861 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 862 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 863 rdesc->desc2 = 0; 864 rdesc->desc3 = 0; 865 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); 866 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1); 867 rdata->interrupt = 1; 868 if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) { 869 /* Clear interrupt on completion bit */ 870 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 871 0); 872 rdata->interrupt = 0; 873 } 874 } 875 876 /* Make sure everything is written to the descriptors before 877 * telling the device about them 878 */ 879 wmb(); 880 881 /* Update the total number of Rx descriptors */ 882 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); 883 884 /* Update the starting address of descriptor ring */ 885 rdata = GET_DESC_DATA(ring, start_index); 886 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, 887 upper_32_bits(rdata->rdesc_dma)); 888 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, 889 lower_32_bits(rdata->rdesc_dma)); 890 891 /* Update the Rx Descriptor Tail Pointer */ 892 rdata = GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1); 893 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, 894 lower_32_bits(rdata->rdesc_dma)); 895 896 DBGPR("<--rx_desc_init\n"); 897 } 898 899 static void xgbe_pre_xmit(struct xgbe_channel *channel) 900 { 901 struct xgbe_prv_data *pdata = channel->pdata; 902 struct xgbe_ring *ring = channel->tx_ring; 903 struct xgbe_ring_data *rdata; 904 struct xgbe_ring_desc *rdesc; 905 struct xgbe_packet_data *packet = &ring->packet_data; 906 unsigned int csum, tso, vlan; 907 unsigned int tso_context, vlan_context; 908 unsigned int tx_coalesce, tx_frames; 909 int start_index = ring->cur; 910 int i; 911 912 DBGPR("-->xgbe_pre_xmit\n"); 913 914 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 915 CSUM_ENABLE); 916 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 917 TSO_ENABLE); 918 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 919 VLAN_CTAG); 920 921 if (tso && (packet->mss != ring->tx.cur_mss)) 922 tso_context = 1; 923 else 924 tso_context = 0; 925 926 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)) 927 vlan_context = 1; 928 else 929 vlan_context = 0; 930 931 tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0; 932 tx_frames = pdata->tx_frames; 933 if (tx_coalesce && !channel->tx_timer_active) 934 ring->coalesce_count = 0; 935 936 rdata = GET_DESC_DATA(ring, ring->cur); 937 rdesc = rdata->rdesc; 938 939 /* Create a context descriptor if this is a TSO packet */ 940 if (tso_context || vlan_context) { 941 if (tso_context) { 942 DBGPR(" TSO context descriptor, mss=%u\n", 943 packet->mss); 944 945 /* Set the MSS size */ 946 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, 947 MSS, packet->mss); 948 949 /* Mark it as a CONTEXT descriptor */ 950 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 951 CTXT, 1); 952 953 /* Indicate this descriptor contains the MSS */ 954 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 955 TCMSSV, 1); 956 957 ring->tx.cur_mss = packet->mss; 958 } 959 960 if (vlan_context) { 961 DBGPR(" VLAN context descriptor, ctag=%u\n", 962 packet->vlan_ctag); 963 964 /* Mark it as a CONTEXT descriptor */ 965 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 966 CTXT, 1); 967 968 /* Set the VLAN tag */ 969 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 970 VT, packet->vlan_ctag); 971 972 /* Indicate this descriptor contains the VLAN tag */ 973 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 974 VLTV, 1); 975 976 ring->tx.cur_vlan_ctag = packet->vlan_ctag; 977 } 978 979 ring->cur++; 980 rdata = GET_DESC_DATA(ring, ring->cur); 981 rdesc = rdata->rdesc; 982 } 983 984 /* Update buffer address (for TSO this is the header) */ 985 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 986 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 987 988 /* Update the buffer length */ 989 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, 990 rdata->skb_dma_len); 991 992 /* VLAN tag insertion check */ 993 if (vlan) 994 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, 995 TX_NORMAL_DESC2_VLAN_INSERT); 996 997 /* Set IC bit based on Tx coalescing settings */ 998 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); 999 if (tx_coalesce && (!tx_frames || 1000 (++ring->coalesce_count % tx_frames))) 1001 /* Clear IC bit */ 1002 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0); 1003 1004 /* Mark it as First Descriptor */ 1005 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); 1006 1007 /* Mark it as a NORMAL descriptor */ 1008 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1009 1010 /* Set OWN bit if not the first descriptor */ 1011 if (ring->cur != start_index) 1012 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1013 1014 if (tso) { 1015 /* Enable TSO */ 1016 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1); 1017 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL, 1018 packet->tcp_payload_len); 1019 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, 1020 packet->tcp_header_len / 4); 1021 } else { 1022 /* Enable CRC and Pad Insertion */ 1023 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); 1024 1025 /* Enable HW CSUM */ 1026 if (csum) 1027 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, 1028 CIC, 0x3); 1029 1030 /* Set the total length to be transmitted */ 1031 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, 1032 packet->length); 1033 } 1034 1035 for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) { 1036 ring->cur++; 1037 rdata = GET_DESC_DATA(ring, ring->cur); 1038 rdesc = rdata->rdesc; 1039 1040 /* Update buffer address */ 1041 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 1042 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 1043 1044 /* Update the buffer length */ 1045 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, 1046 rdata->skb_dma_len); 1047 1048 /* Set IC bit based on Tx coalescing settings */ 1049 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); 1050 if (tx_coalesce && (!tx_frames || 1051 (++ring->coalesce_count % tx_frames))) 1052 /* Clear IC bit */ 1053 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0); 1054 1055 /* Set OWN bit */ 1056 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1057 1058 /* Mark it as NORMAL descriptor */ 1059 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1060 1061 /* Enable HW CSUM */ 1062 if (csum) 1063 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, 1064 CIC, 0x3); 1065 } 1066 1067 /* Set LAST bit for the last descriptor */ 1068 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); 1069 1070 /* In case the Tx DMA engine is running, make sure everything 1071 * is written to the descriptor(s) before setting the OWN bit 1072 * for the first descriptor 1073 */ 1074 wmb(); 1075 1076 /* Set OWN bit for the first descriptor */ 1077 rdata = GET_DESC_DATA(ring, start_index); 1078 rdesc = rdata->rdesc; 1079 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1080 1081 #ifdef XGMAC_ENABLE_TX_DESC_DUMP 1082 xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1); 1083 #endif 1084 1085 /* Make sure ownership is written to the descriptor */ 1086 wmb(); 1087 1088 /* Issue a poll command to Tx DMA by writing address 1089 * of next immediate free descriptor */ 1090 ring->cur++; 1091 rdata = GET_DESC_DATA(ring, ring->cur); 1092 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, 1093 lower_32_bits(rdata->rdesc_dma)); 1094 1095 /* Start the Tx coalescing timer */ 1096 if (tx_coalesce && !channel->tx_timer_active) { 1097 channel->tx_timer_active = 1; 1098 hrtimer_start(&channel->tx_timer, 1099 ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC), 1100 HRTIMER_MODE_REL); 1101 } 1102 1103 DBGPR(" %s: descriptors %u to %u written\n", 1104 channel->name, start_index & (ring->rdesc_count - 1), 1105 (ring->cur - 1) & (ring->rdesc_count - 1)); 1106 1107 DBGPR("<--xgbe_pre_xmit\n"); 1108 } 1109 1110 static int xgbe_dev_read(struct xgbe_channel *channel) 1111 { 1112 struct xgbe_ring *ring = channel->rx_ring; 1113 struct xgbe_ring_data *rdata; 1114 struct xgbe_ring_desc *rdesc; 1115 struct xgbe_packet_data *packet = &ring->packet_data; 1116 unsigned int err, etlt; 1117 1118 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); 1119 1120 rdata = GET_DESC_DATA(ring, ring->cur); 1121 rdesc = rdata->rdesc; 1122 1123 /* Check for data availability */ 1124 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) 1125 return 1; 1126 1127 #ifdef XGMAC_ENABLE_RX_DESC_DUMP 1128 xgbe_dump_rx_desc(ring, rdesc, ring->cur); 1129 #endif 1130 1131 /* Get the packet length */ 1132 rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 1133 1134 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { 1135 /* Not all the data has been transferred for this packet */ 1136 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1137 INCOMPLETE, 1); 1138 return 0; 1139 } 1140 1141 /* This is the last of the data for this packet */ 1142 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1143 INCOMPLETE, 0); 1144 1145 /* Set checksum done indicator as appropriate */ 1146 if (channel->pdata->netdev->features & NETIF_F_RXCSUM) 1147 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1148 CSUM_DONE, 1); 1149 1150 /* Check for errors (only valid in last descriptor) */ 1151 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); 1152 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); 1153 DBGPR(" err=%u, etlt=%#x\n", err, etlt); 1154 1155 if (!err || (err && !etlt)) { 1156 if (etlt == 0x09) { 1157 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1158 VLAN_CTAG, 1); 1159 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, 1160 RX_NORMAL_DESC0, 1161 OVT); 1162 DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag); 1163 } 1164 } else { 1165 if ((etlt == 0x05) || (etlt == 0x06)) 1166 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1167 CSUM_DONE, 0); 1168 else 1169 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, 1170 FRAME, 1); 1171 } 1172 1173 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, 1174 ring->cur & (ring->rdesc_count - 1), ring->cur); 1175 1176 return 0; 1177 } 1178 1179 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) 1180 { 1181 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ 1182 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT); 1183 } 1184 1185 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) 1186 { 1187 /* Rx and Tx share LD bit, so check TDES3.LD bit */ 1188 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD); 1189 } 1190 1191 static void xgbe_save_interrupt_status(struct xgbe_channel *channel, 1192 enum xgbe_int_state int_state) 1193 { 1194 unsigned int dma_ch_ier; 1195 1196 if (int_state == XGMAC_INT_STATE_SAVE) { 1197 channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); 1198 channel->saved_ier &= DMA_INTERRUPT_MASK; 1199 } else { 1200 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); 1201 dma_ch_ier |= channel->saved_ier; 1202 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); 1203 } 1204 } 1205 1206 static int xgbe_enable_int(struct xgbe_channel *channel, 1207 enum xgbe_int int_id) 1208 { 1209 switch (int_id) { 1210 case XGMAC_INT_DMA_ISR_DC0IS: 1211 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1); 1212 break; 1213 case XGMAC_INT_DMA_CH_SR_TI: 1214 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1); 1215 break; 1216 case XGMAC_INT_DMA_CH_SR_TPS: 1217 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1); 1218 break; 1219 case XGMAC_INT_DMA_CH_SR_TBU: 1220 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1); 1221 break; 1222 case XGMAC_INT_DMA_CH_SR_RI: 1223 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1); 1224 break; 1225 case XGMAC_INT_DMA_CH_SR_RBU: 1226 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1); 1227 break; 1228 case XGMAC_INT_DMA_CH_SR_RPS: 1229 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1); 1230 break; 1231 case XGMAC_INT_DMA_CH_SR_FBE: 1232 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1); 1233 break; 1234 case XGMAC_INT_DMA_ALL: 1235 xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE); 1236 break; 1237 default: 1238 return -1; 1239 } 1240 1241 return 0; 1242 } 1243 1244 static int xgbe_disable_int(struct xgbe_channel *channel, 1245 enum xgbe_int int_id) 1246 { 1247 unsigned int dma_ch_ier; 1248 1249 switch (int_id) { 1250 case XGMAC_INT_DMA_ISR_DC0IS: 1251 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0); 1252 break; 1253 case XGMAC_INT_DMA_CH_SR_TI: 1254 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0); 1255 break; 1256 case XGMAC_INT_DMA_CH_SR_TPS: 1257 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0); 1258 break; 1259 case XGMAC_INT_DMA_CH_SR_TBU: 1260 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0); 1261 break; 1262 case XGMAC_INT_DMA_CH_SR_RI: 1263 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0); 1264 break; 1265 case XGMAC_INT_DMA_CH_SR_RBU: 1266 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0); 1267 break; 1268 case XGMAC_INT_DMA_CH_SR_RPS: 1269 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0); 1270 break; 1271 case XGMAC_INT_DMA_CH_SR_FBE: 1272 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0); 1273 break; 1274 case XGMAC_INT_DMA_ALL: 1275 xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE); 1276 1277 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); 1278 dma_ch_ier &= ~DMA_INTERRUPT_MASK; 1279 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); 1280 break; 1281 default: 1282 return -1; 1283 } 1284 1285 return 0; 1286 } 1287 1288 static int xgbe_exit(struct xgbe_prv_data *pdata) 1289 { 1290 unsigned int count = 2000; 1291 1292 DBGPR("-->xgbe_exit\n"); 1293 1294 /* Issue a software reset */ 1295 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); 1296 usleep_range(10, 15); 1297 1298 /* Poll Until Poll Condition */ 1299 while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 1300 usleep_range(500, 600); 1301 1302 if (!count) 1303 return -EBUSY; 1304 1305 DBGPR("<--xgbe_exit\n"); 1306 1307 return 0; 1308 } 1309 1310 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) 1311 { 1312 unsigned int i, count; 1313 1314 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) 1315 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); 1316 1317 /* Poll Until Poll Condition */ 1318 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) { 1319 count = 2000; 1320 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, 1321 MTL_Q_TQOMR, FTQ)) 1322 usleep_range(500, 600); 1323 1324 if (!count) 1325 return -EBUSY; 1326 } 1327 1328 return 0; 1329 } 1330 1331 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata) 1332 { 1333 /* Set enhanced addressing mode */ 1334 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1); 1335 1336 /* Set the System Bus mode */ 1337 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1); 1338 } 1339 1340 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) 1341 { 1342 unsigned int arcache, awcache; 1343 1344 arcache = 0; 1345 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, DMA_ARCACHE_SETTING); 1346 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, DMA_ARDOMAIN_SETTING); 1347 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, DMA_ARCACHE_SETTING); 1348 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, DMA_ARDOMAIN_SETTING); 1349 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, DMA_ARCACHE_SETTING); 1350 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, DMA_ARDOMAIN_SETTING); 1351 XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); 1352 1353 awcache = 0; 1354 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, DMA_AWCACHE_SETTING); 1355 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, DMA_AWDOMAIN_SETTING); 1356 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, DMA_AWCACHE_SETTING); 1357 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, DMA_AWDOMAIN_SETTING); 1358 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, DMA_AWCACHE_SETTING); 1359 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, DMA_AWDOMAIN_SETTING); 1360 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, DMA_AWCACHE_SETTING); 1361 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, DMA_AWDOMAIN_SETTING); 1362 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); 1363 } 1364 1365 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) 1366 { 1367 unsigned int i; 1368 1369 /* Set Tx to weighted round robin scheduling algorithm (when 1370 * traffic class is using ETS algorithm) 1371 */ 1372 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); 1373 1374 /* Set Tx traffic classes to strict priority algorithm */ 1375 for (i = 0; i < XGBE_TC_CNT; i++) 1376 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP); 1377 1378 /* Set Rx to strict priority algorithm */ 1379 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); 1380 } 1381 1382 static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size, 1383 unsigned char queue_count) 1384 { 1385 unsigned int q_fifo_size = 0; 1386 enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256; 1387 1388 /* Calculate Tx/Rx fifo share per queue */ 1389 switch (fifo_size) { 1390 case 0: 1391 q_fifo_size = FIFO_SIZE_B(128); 1392 break; 1393 case 1: 1394 q_fifo_size = FIFO_SIZE_B(256); 1395 break; 1396 case 2: 1397 q_fifo_size = FIFO_SIZE_B(512); 1398 break; 1399 case 3: 1400 q_fifo_size = FIFO_SIZE_KB(1); 1401 break; 1402 case 4: 1403 q_fifo_size = FIFO_SIZE_KB(2); 1404 break; 1405 case 5: 1406 q_fifo_size = FIFO_SIZE_KB(4); 1407 break; 1408 case 6: 1409 q_fifo_size = FIFO_SIZE_KB(8); 1410 break; 1411 case 7: 1412 q_fifo_size = FIFO_SIZE_KB(16); 1413 break; 1414 case 8: 1415 q_fifo_size = FIFO_SIZE_KB(32); 1416 break; 1417 case 9: 1418 q_fifo_size = FIFO_SIZE_KB(64); 1419 break; 1420 case 10: 1421 q_fifo_size = FIFO_SIZE_KB(128); 1422 break; 1423 case 11: 1424 q_fifo_size = FIFO_SIZE_KB(256); 1425 break; 1426 } 1427 q_fifo_size = q_fifo_size / queue_count; 1428 1429 /* Set the queue fifo size programmable value */ 1430 if (q_fifo_size >= FIFO_SIZE_KB(256)) 1431 p_fifo = XGMAC_MTL_FIFO_SIZE_256K; 1432 else if (q_fifo_size >= FIFO_SIZE_KB(128)) 1433 p_fifo = XGMAC_MTL_FIFO_SIZE_128K; 1434 else if (q_fifo_size >= FIFO_SIZE_KB(64)) 1435 p_fifo = XGMAC_MTL_FIFO_SIZE_64K; 1436 else if (q_fifo_size >= FIFO_SIZE_KB(32)) 1437 p_fifo = XGMAC_MTL_FIFO_SIZE_32K; 1438 else if (q_fifo_size >= FIFO_SIZE_KB(16)) 1439 p_fifo = XGMAC_MTL_FIFO_SIZE_16K; 1440 else if (q_fifo_size >= FIFO_SIZE_KB(8)) 1441 p_fifo = XGMAC_MTL_FIFO_SIZE_8K; 1442 else if (q_fifo_size >= FIFO_SIZE_KB(4)) 1443 p_fifo = XGMAC_MTL_FIFO_SIZE_4K; 1444 else if (q_fifo_size >= FIFO_SIZE_KB(2)) 1445 p_fifo = XGMAC_MTL_FIFO_SIZE_2K; 1446 else if (q_fifo_size >= FIFO_SIZE_KB(1)) 1447 p_fifo = XGMAC_MTL_FIFO_SIZE_1K; 1448 else if (q_fifo_size >= FIFO_SIZE_B(512)) 1449 p_fifo = XGMAC_MTL_FIFO_SIZE_512; 1450 else if (q_fifo_size >= FIFO_SIZE_B(256)) 1451 p_fifo = XGMAC_MTL_FIFO_SIZE_256; 1452 1453 return p_fifo; 1454 } 1455 1456 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) 1457 { 1458 enum xgbe_mtl_fifo_size fifo_size; 1459 unsigned int i; 1460 1461 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, 1462 pdata->hw_feat.tx_q_cnt); 1463 1464 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) 1465 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); 1466 1467 netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n", 1468 pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256)); 1469 } 1470 1471 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) 1472 { 1473 enum xgbe_mtl_fifo_size fifo_size; 1474 unsigned int i; 1475 1476 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size, 1477 pdata->hw_feat.rx_q_cnt); 1478 1479 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) 1480 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); 1481 1482 netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n", 1483 pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256)); 1484 } 1485 1486 static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata) 1487 { 1488 unsigned int i, reg, reg_val; 1489 unsigned int q_count = pdata->hw_feat.rx_q_cnt; 1490 1491 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ 1492 reg = MTL_RQDCM0R; 1493 reg_val = 0; 1494 for (i = 0; i < q_count;) { 1495 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); 1496 1497 if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count)) 1498 continue; 1499 1500 XGMAC_IOWRITE(pdata, reg, reg_val); 1501 1502 reg += MTL_RQDCM_INC; 1503 reg_val = 0; 1504 } 1505 } 1506 1507 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) 1508 { 1509 unsigned int i; 1510 1511 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) { 1512 /* Activate flow control when less than 4k left in fifo */ 1513 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); 1514 1515 /* De-activate flow control when more than 6k left in fifo */ 1516 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4); 1517 } 1518 } 1519 1520 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata) 1521 { 1522 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr); 1523 } 1524 1525 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) 1526 { 1527 unsigned int val; 1528 1529 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0; 1530 1531 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 1532 } 1533 1534 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) 1535 { 1536 if (pdata->netdev->features & NETIF_F_RXCSUM) 1537 xgbe_enable_rx_csum(pdata); 1538 else 1539 xgbe_disable_rx_csum(pdata); 1540 } 1541 1542 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) 1543 { 1544 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 1545 xgbe_enable_rx_vlan_stripping(pdata); 1546 else 1547 xgbe_disable_rx_vlan_stripping(pdata); 1548 } 1549 1550 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) 1551 { 1552 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 1553 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR); 1554 1555 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) 1556 stats->txoctetcount_gb += 1557 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 1558 1559 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) 1560 stats->txframecount_gb += 1561 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 1562 1563 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) 1564 stats->txbroadcastframes_g += 1565 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 1566 1567 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) 1568 stats->txmulticastframes_g += 1569 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 1570 1571 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) 1572 stats->tx64octets_gb += 1573 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 1574 1575 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) 1576 stats->tx65to127octets_gb += 1577 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 1578 1579 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) 1580 stats->tx128to255octets_gb += 1581 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 1582 1583 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) 1584 stats->tx256to511octets_gb += 1585 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 1586 1587 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) 1588 stats->tx512to1023octets_gb += 1589 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 1590 1591 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) 1592 stats->tx1024tomaxoctets_gb += 1593 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 1594 1595 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) 1596 stats->txunicastframes_gb += 1597 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 1598 1599 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) 1600 stats->txmulticastframes_gb += 1601 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 1602 1603 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) 1604 stats->txbroadcastframes_g += 1605 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 1606 1607 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) 1608 stats->txunderflowerror += 1609 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 1610 1611 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) 1612 stats->txoctetcount_g += 1613 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 1614 1615 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) 1616 stats->txframecount_g += 1617 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 1618 1619 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) 1620 stats->txpauseframes += 1621 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 1622 1623 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) 1624 stats->txvlanframes_g += 1625 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 1626 } 1627 1628 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) 1629 { 1630 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 1631 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR); 1632 1633 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) 1634 stats->rxframecount_gb += 1635 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 1636 1637 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) 1638 stats->rxoctetcount_gb += 1639 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 1640 1641 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) 1642 stats->rxoctetcount_g += 1643 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 1644 1645 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) 1646 stats->rxbroadcastframes_g += 1647 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 1648 1649 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) 1650 stats->rxmulticastframes_g += 1651 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 1652 1653 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) 1654 stats->rxcrcerror += 1655 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 1656 1657 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) 1658 stats->rxrunterror += 1659 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 1660 1661 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) 1662 stats->rxjabbererror += 1663 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 1664 1665 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) 1666 stats->rxundersize_g += 1667 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 1668 1669 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) 1670 stats->rxoversize_g += 1671 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 1672 1673 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) 1674 stats->rx64octets_gb += 1675 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 1676 1677 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) 1678 stats->rx65to127octets_gb += 1679 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 1680 1681 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) 1682 stats->rx128to255octets_gb += 1683 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 1684 1685 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) 1686 stats->rx256to511octets_gb += 1687 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 1688 1689 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) 1690 stats->rx512to1023octets_gb += 1691 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 1692 1693 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) 1694 stats->rx1024tomaxoctets_gb += 1695 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 1696 1697 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) 1698 stats->rxunicastframes_g += 1699 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 1700 1701 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) 1702 stats->rxlengtherror += 1703 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 1704 1705 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) 1706 stats->rxoutofrangetype += 1707 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 1708 1709 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) 1710 stats->rxpauseframes += 1711 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 1712 1713 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) 1714 stats->rxfifooverflow += 1715 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 1716 1717 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) 1718 stats->rxvlanframes_gb += 1719 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 1720 1721 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) 1722 stats->rxwatchdogerror += 1723 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 1724 } 1725 1726 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) 1727 { 1728 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 1729 1730 /* Freeze counters */ 1731 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 1732 1733 stats->txoctetcount_gb += 1734 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 1735 1736 stats->txframecount_gb += 1737 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 1738 1739 stats->txbroadcastframes_g += 1740 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 1741 1742 stats->txmulticastframes_g += 1743 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 1744 1745 stats->tx64octets_gb += 1746 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 1747 1748 stats->tx65to127octets_gb += 1749 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 1750 1751 stats->tx128to255octets_gb += 1752 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 1753 1754 stats->tx256to511octets_gb += 1755 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 1756 1757 stats->tx512to1023octets_gb += 1758 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 1759 1760 stats->tx1024tomaxoctets_gb += 1761 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 1762 1763 stats->txunicastframes_gb += 1764 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 1765 1766 stats->txmulticastframes_gb += 1767 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 1768 1769 stats->txbroadcastframes_g += 1770 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 1771 1772 stats->txunderflowerror += 1773 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 1774 1775 stats->txoctetcount_g += 1776 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 1777 1778 stats->txframecount_g += 1779 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 1780 1781 stats->txpauseframes += 1782 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 1783 1784 stats->txvlanframes_g += 1785 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 1786 1787 stats->rxframecount_gb += 1788 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 1789 1790 stats->rxoctetcount_gb += 1791 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 1792 1793 stats->rxoctetcount_g += 1794 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 1795 1796 stats->rxbroadcastframes_g += 1797 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 1798 1799 stats->rxmulticastframes_g += 1800 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 1801 1802 stats->rxcrcerror += 1803 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 1804 1805 stats->rxrunterror += 1806 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 1807 1808 stats->rxjabbererror += 1809 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 1810 1811 stats->rxundersize_g += 1812 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 1813 1814 stats->rxoversize_g += 1815 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 1816 1817 stats->rx64octets_gb += 1818 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 1819 1820 stats->rx65to127octets_gb += 1821 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 1822 1823 stats->rx128to255octets_gb += 1824 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 1825 1826 stats->rx256to511octets_gb += 1827 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 1828 1829 stats->rx512to1023octets_gb += 1830 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 1831 1832 stats->rx1024tomaxoctets_gb += 1833 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 1834 1835 stats->rxunicastframes_g += 1836 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 1837 1838 stats->rxlengtherror += 1839 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 1840 1841 stats->rxoutofrangetype += 1842 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 1843 1844 stats->rxpauseframes += 1845 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 1846 1847 stats->rxfifooverflow += 1848 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 1849 1850 stats->rxvlanframes_gb += 1851 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 1852 1853 stats->rxwatchdogerror += 1854 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 1855 1856 /* Un-freeze counters */ 1857 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 1858 } 1859 1860 static void xgbe_config_mmc(struct xgbe_prv_data *pdata) 1861 { 1862 /* Set counters to reset on read */ 1863 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); 1864 1865 /* Reset the counters */ 1866 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); 1867 } 1868 1869 static void xgbe_enable_tx(struct xgbe_prv_data *pdata) 1870 { 1871 struct xgbe_channel *channel; 1872 unsigned int i; 1873 1874 /* Enable each Tx DMA channel */ 1875 channel = pdata->channel; 1876 for (i = 0; i < pdata->channel_count; i++, channel++) { 1877 if (!channel->tx_ring) 1878 break; 1879 1880 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1); 1881 } 1882 1883 /* Enable each Tx queue */ 1884 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) 1885 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 1886 MTL_Q_ENABLED); 1887 1888 /* Enable MAC Tx */ 1889 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 1890 } 1891 1892 static void xgbe_disable_tx(struct xgbe_prv_data *pdata) 1893 { 1894 struct xgbe_channel *channel; 1895 unsigned int i; 1896 1897 /* Disable MAC Tx */ 1898 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 1899 1900 /* Disable each Tx queue */ 1901 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) 1902 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); 1903 1904 /* Disable each Tx DMA channel */ 1905 channel = pdata->channel; 1906 for (i = 0; i < pdata->channel_count; i++, channel++) { 1907 if (!channel->tx_ring) 1908 break; 1909 1910 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0); 1911 } 1912 } 1913 1914 static void xgbe_enable_rx(struct xgbe_prv_data *pdata) 1915 { 1916 struct xgbe_channel *channel; 1917 unsigned int reg_val, i; 1918 1919 /* Enable each Rx DMA channel */ 1920 channel = pdata->channel; 1921 for (i = 0; i < pdata->channel_count; i++, channel++) { 1922 if (!channel->rx_ring) 1923 break; 1924 1925 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1); 1926 } 1927 1928 /* Enable each Rx queue */ 1929 reg_val = 0; 1930 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) 1931 reg_val |= (0x02 << (i << 1)); 1932 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 1933 1934 /* Enable MAC Rx */ 1935 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); 1936 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); 1937 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); 1938 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); 1939 } 1940 1941 static void xgbe_disable_rx(struct xgbe_prv_data *pdata) 1942 { 1943 struct xgbe_channel *channel; 1944 unsigned int i; 1945 1946 /* Disable MAC Rx */ 1947 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); 1948 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); 1949 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); 1950 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); 1951 1952 /* Disable each Rx queue */ 1953 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); 1954 1955 /* Disable each Rx DMA channel */ 1956 channel = pdata->channel; 1957 for (i = 0; i < pdata->channel_count; i++, channel++) { 1958 if (!channel->rx_ring) 1959 break; 1960 1961 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0); 1962 } 1963 } 1964 1965 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata) 1966 { 1967 struct xgbe_channel *channel; 1968 unsigned int i; 1969 1970 /* Enable each Tx DMA channel */ 1971 channel = pdata->channel; 1972 for (i = 0; i < pdata->channel_count; i++, channel++) { 1973 if (!channel->tx_ring) 1974 break; 1975 1976 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1); 1977 } 1978 1979 /* Enable MAC Tx */ 1980 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 1981 } 1982 1983 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) 1984 { 1985 struct xgbe_channel *channel; 1986 unsigned int i; 1987 1988 /* Disable MAC Tx */ 1989 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 1990 1991 /* Disable each Tx DMA channel */ 1992 channel = pdata->channel; 1993 for (i = 0; i < pdata->channel_count; i++, channel++) { 1994 if (!channel->tx_ring) 1995 break; 1996 1997 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0); 1998 } 1999 } 2000 2001 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata) 2002 { 2003 struct xgbe_channel *channel; 2004 unsigned int i; 2005 2006 /* Enable each Rx DMA channel */ 2007 channel = pdata->channel; 2008 for (i = 0; i < pdata->channel_count; i++, channel++) { 2009 if (!channel->rx_ring) 2010 break; 2011 2012 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1); 2013 } 2014 } 2015 2016 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) 2017 { 2018 struct xgbe_channel *channel; 2019 unsigned int i; 2020 2021 /* Disable each Rx DMA channel */ 2022 channel = pdata->channel; 2023 for (i = 0; i < pdata->channel_count; i++, channel++) { 2024 if (!channel->rx_ring) 2025 break; 2026 2027 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0); 2028 } 2029 } 2030 2031 static int xgbe_init(struct xgbe_prv_data *pdata) 2032 { 2033 struct xgbe_desc_if *desc_if = &pdata->desc_if; 2034 int ret; 2035 2036 DBGPR("-->xgbe_init\n"); 2037 2038 /* Flush Tx queues */ 2039 ret = xgbe_flush_tx_queues(pdata); 2040 if (ret) 2041 return ret; 2042 2043 /* 2044 * Initialize DMA related features 2045 */ 2046 xgbe_config_dma_bus(pdata); 2047 xgbe_config_dma_cache(pdata); 2048 xgbe_config_osp_mode(pdata); 2049 xgbe_config_pblx8(pdata); 2050 xgbe_config_tx_pbl_val(pdata); 2051 xgbe_config_rx_pbl_val(pdata); 2052 xgbe_config_rx_coalesce(pdata); 2053 xgbe_config_tx_coalesce(pdata); 2054 xgbe_config_rx_buffer_size(pdata); 2055 xgbe_config_tso_mode(pdata); 2056 desc_if->wrapper_tx_desc_init(pdata); 2057 desc_if->wrapper_rx_desc_init(pdata); 2058 xgbe_enable_dma_interrupts(pdata); 2059 2060 /* 2061 * Initialize MTL related features 2062 */ 2063 xgbe_config_mtl_mode(pdata); 2064 xgbe_config_rx_queue_mapping(pdata); 2065 /*TODO: Program the priorities mapped to the Selected Traffic Classes 2066 in MTL_TC_Prty_Map0-3 registers */ 2067 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); 2068 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); 2069 xgbe_config_tx_threshold(pdata, pdata->tx_threshold); 2070 xgbe_config_rx_threshold(pdata, pdata->rx_threshold); 2071 xgbe_config_tx_fifo_size(pdata); 2072 xgbe_config_rx_fifo_size(pdata); 2073 xgbe_config_flow_control_threshold(pdata); 2074 /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */ 2075 /*TODO: Error Packet and undersized good Packet forwarding enable 2076 (FEP and FUP) 2077 */ 2078 xgbe_enable_mtl_interrupts(pdata); 2079 2080 /* Transmit Class Weight */ 2081 XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10); 2082 2083 /* 2084 * Initialize MAC related features 2085 */ 2086 xgbe_config_mac_address(pdata); 2087 xgbe_config_jumbo_enable(pdata); 2088 xgbe_config_flow_control(pdata); 2089 xgbe_config_checksum_offload(pdata); 2090 xgbe_config_vlan_support(pdata); 2091 xgbe_config_mmc(pdata); 2092 xgbe_enable_mac_interrupts(pdata); 2093 2094 DBGPR("<--xgbe_init\n"); 2095 2096 return 0; 2097 } 2098 2099 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) 2100 { 2101 DBGPR("-->xgbe_init_function_ptrs\n"); 2102 2103 hw_if->tx_complete = xgbe_tx_complete; 2104 2105 hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode; 2106 hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode; 2107 hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs; 2108 hw_if->set_mac_address = xgbe_set_mac_address; 2109 2110 hw_if->enable_rx_csum = xgbe_enable_rx_csum; 2111 hw_if->disable_rx_csum = xgbe_disable_rx_csum; 2112 2113 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; 2114 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; 2115 2116 hw_if->read_mmd_regs = xgbe_read_mmd_regs; 2117 hw_if->write_mmd_regs = xgbe_write_mmd_regs; 2118 2119 hw_if->set_gmii_speed = xgbe_set_gmii_speed; 2120 hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed; 2121 hw_if->set_xgmii_speed = xgbe_set_xgmii_speed; 2122 2123 hw_if->enable_tx = xgbe_enable_tx; 2124 hw_if->disable_tx = xgbe_disable_tx; 2125 hw_if->enable_rx = xgbe_enable_rx; 2126 hw_if->disable_rx = xgbe_disable_rx; 2127 2128 hw_if->powerup_tx = xgbe_powerup_tx; 2129 hw_if->powerdown_tx = xgbe_powerdown_tx; 2130 hw_if->powerup_rx = xgbe_powerup_rx; 2131 hw_if->powerdown_rx = xgbe_powerdown_rx; 2132 2133 hw_if->pre_xmit = xgbe_pre_xmit; 2134 hw_if->dev_read = xgbe_dev_read; 2135 hw_if->enable_int = xgbe_enable_int; 2136 hw_if->disable_int = xgbe_disable_int; 2137 hw_if->init = xgbe_init; 2138 hw_if->exit = xgbe_exit; 2139 2140 /* Descriptor related Sequences have to be initialized here */ 2141 hw_if->tx_desc_init = xgbe_tx_desc_init; 2142 hw_if->rx_desc_init = xgbe_rx_desc_init; 2143 hw_if->tx_desc_reset = xgbe_tx_desc_reset; 2144 hw_if->rx_desc_reset = xgbe_rx_desc_reset; 2145 hw_if->is_last_desc = xgbe_is_last_desc; 2146 hw_if->is_context_desc = xgbe_is_context_desc; 2147 2148 /* For FLOW ctrl */ 2149 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; 2150 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; 2151 2152 /* For RX coalescing */ 2153 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; 2154 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; 2155 hw_if->usec_to_riwt = xgbe_usec_to_riwt; 2156 hw_if->riwt_to_usec = xgbe_riwt_to_usec; 2157 2158 /* For RX and TX threshold config */ 2159 hw_if->config_rx_threshold = xgbe_config_rx_threshold; 2160 hw_if->config_tx_threshold = xgbe_config_tx_threshold; 2161 2162 /* For RX and TX Store and Forward Mode config */ 2163 hw_if->config_rsf_mode = xgbe_config_rsf_mode; 2164 hw_if->config_tsf_mode = xgbe_config_tsf_mode; 2165 2166 /* For TX DMA Operating on Second Frame config */ 2167 hw_if->config_osp_mode = xgbe_config_osp_mode; 2168 2169 /* For RX and TX PBL config */ 2170 hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val; 2171 hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val; 2172 hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val; 2173 hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val; 2174 hw_if->config_pblx8 = xgbe_config_pblx8; 2175 2176 /* For MMC statistics support */ 2177 hw_if->tx_mmc_int = xgbe_tx_mmc_int; 2178 hw_if->rx_mmc_int = xgbe_rx_mmc_int; 2179 hw_if->read_mmc_stats = xgbe_read_mmc_stats; 2180 2181 DBGPR("<--xgbe_init_function_ptrs\n"); 2182 } 2183