1 /* 2 * AMD 10Gb Ethernet driver 3 * 4 * This file is available to you under your choice of the following two 5 * licenses: 6 * 7 * License 1: GPLv2 8 * 9 * Copyright (c) 2014 Advanced Micro Devices, Inc. 10 * 11 * This file is free software; you may copy, redistribute and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation, either version 2 of the License, or (at 14 * your option) any later version. 15 * 16 * This file is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program. If not, see <http://www.gnu.org/licenses/>. 23 * 24 * This file incorporates work covered by the following copyright and 25 * permission notice: 26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 28 * Inc. unless otherwise expressly agreed to in writing between Synopsys 29 * and you. 30 * 31 * The Software IS NOT an item of Licensed Software or Licensed Product 32 * under any End User Software License Agreement or Agreement for Licensed 33 * Product with Synopsys or any supplement thereto. Permission is hereby 34 * granted, free of charge, to any person obtaining a copy of this software 35 * annotated with this license and the Software, to deal in the Software 36 * without restriction, including without limitation the rights to use, 37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 38 * of the Software, and to permit persons to whom the Software is furnished 39 * to do so, subject to the following conditions: 40 * 41 * The above copyright notice and this permission notice shall be included 42 * in all copies or substantial portions of the Software. 43 * 44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 54 * THE POSSIBILITY OF SUCH DAMAGE. 55 * 56 * 57 * License 2: Modified BSD 58 * 59 * Copyright (c) 2014 Advanced Micro Devices, Inc. 60 * All rights reserved. 61 * 62 * Redistribution and use in source and binary forms, with or without 63 * modification, are permitted provided that the following conditions are met: 64 * * Redistributions of source code must retain the above copyright 65 * notice, this list of conditions and the following disclaimer. 66 * * Redistributions in binary form must reproduce the above copyright 67 * notice, this list of conditions and the following disclaimer in the 68 * documentation and/or other materials provided with the distribution. 69 * * Neither the name of Advanced Micro Devices, Inc. nor the 70 * names of its contributors may be used to endorse or promote products 71 * derived from this software without specific prior written permission. 72 * 73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY 77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 83 * 84 * This file incorporates work covered by the following copyright and 85 * permission notice: 86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 88 * Inc. unless otherwise expressly agreed to in writing between Synopsys 89 * and you. 90 * 91 * The Software IS NOT an item of Licensed Software or Licensed Product 92 * under any End User Software License Agreement or Agreement for Licensed 93 * Product with Synopsys or any supplement thereto. Permission is hereby 94 * granted, free of charge, to any person obtaining a copy of this software 95 * annotated with this license and the Software, to deal in the Software 96 * without restriction, including without limitation the rights to use, 97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 98 * of the Software, and to permit persons to whom the Software is furnished 99 * to do so, subject to the following conditions: 100 * 101 * The above copyright notice and this permission notice shall be included 102 * in all copies or substantial portions of the Software. 103 * 104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 114 * THE POSSIBILITY OF SUCH DAMAGE. 115 */ 116 117 #include <linux/spinlock.h> 118 #include <linux/tcp.h> 119 #include <linux/if_vlan.h> 120 #include <linux/phy.h> 121 #include <net/busy_poll.h> 122 #include <linux/clk.h> 123 #include <linux/if_ether.h> 124 125 #include "xgbe.h" 126 #include "xgbe-common.h" 127 128 129 static int xgbe_poll(struct napi_struct *, int); 130 static void xgbe_set_rx_mode(struct net_device *); 131 132 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) 133 { 134 return (ring->rdesc_count - (ring->cur - ring->dirty)); 135 } 136 137 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) 138 { 139 unsigned int rx_buf_size; 140 141 if (mtu > XGMAC_JUMBO_PACKET_MTU) { 142 netdev_alert(netdev, "MTU exceeds maximum supported value\n"); 143 return -EINVAL; 144 } 145 146 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 147 if (rx_buf_size < RX_MIN_BUF_SIZE) 148 rx_buf_size = RX_MIN_BUF_SIZE; 149 rx_buf_size = (rx_buf_size + RX_BUF_ALIGN - 1) & ~(RX_BUF_ALIGN - 1); 150 151 return rx_buf_size; 152 } 153 154 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata) 155 { 156 struct xgbe_hw_if *hw_if = &pdata->hw_if; 157 struct xgbe_channel *channel; 158 unsigned int i; 159 160 channel = pdata->channel; 161 for (i = 0; i < pdata->channel_count; i++, channel++) { 162 if (channel->tx_ring) 163 hw_if->enable_int(channel, 164 XGMAC_INT_DMA_CH_SR_TI); 165 if (channel->rx_ring) 166 hw_if->enable_int(channel, 167 XGMAC_INT_DMA_CH_SR_RI); 168 } 169 } 170 171 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata) 172 { 173 struct xgbe_hw_if *hw_if = &pdata->hw_if; 174 struct xgbe_channel *channel; 175 unsigned int i; 176 177 channel = pdata->channel; 178 for (i = 0; i < pdata->channel_count; i++, channel++) { 179 if (channel->tx_ring) 180 hw_if->disable_int(channel, 181 XGMAC_INT_DMA_CH_SR_TI); 182 if (channel->rx_ring) 183 hw_if->disable_int(channel, 184 XGMAC_INT_DMA_CH_SR_RI); 185 } 186 } 187 188 static irqreturn_t xgbe_isr(int irq, void *data) 189 { 190 struct xgbe_prv_data *pdata = data; 191 struct xgbe_hw_if *hw_if = &pdata->hw_if; 192 struct xgbe_channel *channel; 193 unsigned int dma_isr, dma_ch_isr; 194 unsigned int mac_isr; 195 unsigned int i; 196 197 /* The DMA interrupt status register also reports MAC and MTL 198 * interrupts. So for polling mode, we just need to check for 199 * this register to be non-zero 200 */ 201 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR); 202 if (!dma_isr) 203 goto isr_done; 204 205 DBGPR("-->xgbe_isr\n"); 206 207 DBGPR(" DMA_ISR = %08x\n", dma_isr); 208 DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0)); 209 DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1)); 210 211 for (i = 0; i < pdata->channel_count; i++) { 212 if (!(dma_isr & (1 << i))) 213 continue; 214 215 channel = pdata->channel + i; 216 217 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); 218 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr); 219 220 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || 221 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) { 222 if (napi_schedule_prep(&pdata->napi)) { 223 /* Disable Tx and Rx interrupts */ 224 xgbe_disable_rx_tx_ints(pdata); 225 226 /* Turn on polling */ 227 __napi_schedule(&pdata->napi); 228 } 229 } 230 231 /* Restart the device on a Fatal Bus Error */ 232 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) 233 schedule_work(&pdata->restart_work); 234 235 /* Clear all interrupt signals */ 236 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); 237 } 238 239 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) { 240 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR); 241 242 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS)) 243 hw_if->tx_mmc_int(pdata); 244 245 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS)) 246 hw_if->rx_mmc_int(pdata); 247 } 248 249 DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR)); 250 251 DBGPR("<--xgbe_isr\n"); 252 253 isr_done: 254 return IRQ_HANDLED; 255 } 256 257 static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) 258 { 259 struct xgbe_channel *channel = container_of(timer, 260 struct xgbe_channel, 261 tx_timer); 262 struct xgbe_ring *ring = channel->tx_ring; 263 struct xgbe_prv_data *pdata = channel->pdata; 264 unsigned long flags; 265 266 DBGPR("-->xgbe_tx_timer\n"); 267 268 spin_lock_irqsave(&ring->lock, flags); 269 270 if (napi_schedule_prep(&pdata->napi)) { 271 /* Disable Tx and Rx interrupts */ 272 xgbe_disable_rx_tx_ints(pdata); 273 274 /* Turn on polling */ 275 __napi_schedule(&pdata->napi); 276 } 277 278 channel->tx_timer_active = 0; 279 280 spin_unlock_irqrestore(&ring->lock, flags); 281 282 DBGPR("<--xgbe_tx_timer\n"); 283 284 return HRTIMER_NORESTART; 285 } 286 287 static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata) 288 { 289 struct xgbe_channel *channel; 290 unsigned int i; 291 292 DBGPR("-->xgbe_init_tx_timers\n"); 293 294 channel = pdata->channel; 295 for (i = 0; i < pdata->channel_count; i++, channel++) { 296 if (!channel->tx_ring) 297 break; 298 299 DBGPR(" %s adding tx timer\n", channel->name); 300 hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC, 301 HRTIMER_MODE_REL); 302 channel->tx_timer.function = xgbe_tx_timer; 303 } 304 305 DBGPR("<--xgbe_init_tx_timers\n"); 306 } 307 308 static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata) 309 { 310 struct xgbe_channel *channel; 311 unsigned int i; 312 313 DBGPR("-->xgbe_stop_tx_timers\n"); 314 315 channel = pdata->channel; 316 for (i = 0; i < pdata->channel_count; i++, channel++) { 317 if (!channel->tx_ring) 318 break; 319 320 DBGPR(" %s deleting tx timer\n", channel->name); 321 channel->tx_timer_active = 0; 322 hrtimer_cancel(&channel->tx_timer); 323 } 324 325 DBGPR("<--xgbe_stop_tx_timers\n"); 326 } 327 328 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) 329 { 330 unsigned int mac_hfr0, mac_hfr1, mac_hfr2; 331 struct xgbe_hw_features *hw_feat = &pdata->hw_feat; 332 333 DBGPR("-->xgbe_get_all_hw_features\n"); 334 335 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R); 336 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R); 337 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R); 338 339 memset(hw_feat, 0, sizeof(*hw_feat)); 340 341 /* Hardware feature register 0 */ 342 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 343 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 344 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 345 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 346 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 347 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 348 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 349 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 350 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 351 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 352 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 353 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 354 ADDMACADRSEL); 355 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 356 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 357 358 /* Hardware feature register 1 */ 359 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 360 RXFIFOSIZE); 361 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 362 TXFIFOSIZE); 363 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 364 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 365 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 366 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 367 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 368 HASHTBLSZ); 369 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 370 L3L4FNUM); 371 372 /* Hardware feature register 2 */ 373 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 374 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 375 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 376 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 377 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 378 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM); 379 380 /* The Queue and Channel counts are zero based so increment them 381 * to get the actual number 382 */ 383 hw_feat->rx_q_cnt++; 384 hw_feat->tx_q_cnt++; 385 hw_feat->rx_ch_cnt++; 386 hw_feat->tx_ch_cnt++; 387 388 DBGPR("<--xgbe_get_all_hw_features\n"); 389 } 390 391 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) 392 { 393 if (add) 394 netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll, 395 NAPI_POLL_WEIGHT); 396 napi_enable(&pdata->napi); 397 } 398 399 static void xgbe_napi_disable(struct xgbe_prv_data *pdata) 400 { 401 napi_disable(&pdata->napi); 402 } 403 404 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) 405 { 406 struct xgbe_hw_if *hw_if = &pdata->hw_if; 407 408 DBGPR("-->xgbe_init_tx_coalesce\n"); 409 410 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS; 411 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES; 412 413 hw_if->config_tx_coalesce(pdata); 414 415 DBGPR("<--xgbe_init_tx_coalesce\n"); 416 } 417 418 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) 419 { 420 struct xgbe_hw_if *hw_if = &pdata->hw_if; 421 422 DBGPR("-->xgbe_init_rx_coalesce\n"); 423 424 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS); 425 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES; 426 427 hw_if->config_rx_coalesce(pdata); 428 429 DBGPR("<--xgbe_init_rx_coalesce\n"); 430 } 431 432 static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) 433 { 434 struct xgbe_desc_if *desc_if = &pdata->desc_if; 435 struct xgbe_channel *channel; 436 struct xgbe_ring *ring; 437 struct xgbe_ring_data *rdata; 438 unsigned int i, j; 439 440 DBGPR("-->xgbe_free_tx_skbuff\n"); 441 442 channel = pdata->channel; 443 for (i = 0; i < pdata->channel_count; i++, channel++) { 444 ring = channel->tx_ring; 445 if (!ring) 446 break; 447 448 for (j = 0; j < ring->rdesc_count; j++) { 449 rdata = GET_DESC_DATA(ring, j); 450 desc_if->unmap_skb(pdata, rdata); 451 } 452 } 453 454 DBGPR("<--xgbe_free_tx_skbuff\n"); 455 } 456 457 static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) 458 { 459 struct xgbe_desc_if *desc_if = &pdata->desc_if; 460 struct xgbe_channel *channel; 461 struct xgbe_ring *ring; 462 struct xgbe_ring_data *rdata; 463 unsigned int i, j; 464 465 DBGPR("-->xgbe_free_rx_skbuff\n"); 466 467 channel = pdata->channel; 468 for (i = 0; i < pdata->channel_count; i++, channel++) { 469 ring = channel->rx_ring; 470 if (!ring) 471 break; 472 473 for (j = 0; j < ring->rdesc_count; j++) { 474 rdata = GET_DESC_DATA(ring, j); 475 desc_if->unmap_skb(pdata, rdata); 476 } 477 } 478 479 DBGPR("<--xgbe_free_rx_skbuff\n"); 480 } 481 482 int xgbe_powerdown(struct net_device *netdev, unsigned int caller) 483 { 484 struct xgbe_prv_data *pdata = netdev_priv(netdev); 485 struct xgbe_hw_if *hw_if = &pdata->hw_if; 486 unsigned long flags; 487 488 DBGPR("-->xgbe_powerdown\n"); 489 490 if (!netif_running(netdev) || 491 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) { 492 netdev_alert(netdev, "Device is already powered down\n"); 493 DBGPR("<--xgbe_powerdown\n"); 494 return -EINVAL; 495 } 496 497 phy_stop(pdata->phydev); 498 499 spin_lock_irqsave(&pdata->lock, flags); 500 501 if (caller == XGMAC_DRIVER_CONTEXT) 502 netif_device_detach(netdev); 503 504 netif_tx_stop_all_queues(netdev); 505 xgbe_napi_disable(pdata); 506 507 /* Powerdown Tx/Rx */ 508 hw_if->powerdown_tx(pdata); 509 hw_if->powerdown_rx(pdata); 510 511 pdata->power_down = 1; 512 513 spin_unlock_irqrestore(&pdata->lock, flags); 514 515 DBGPR("<--xgbe_powerdown\n"); 516 517 return 0; 518 } 519 520 int xgbe_powerup(struct net_device *netdev, unsigned int caller) 521 { 522 struct xgbe_prv_data *pdata = netdev_priv(netdev); 523 struct xgbe_hw_if *hw_if = &pdata->hw_if; 524 unsigned long flags; 525 526 DBGPR("-->xgbe_powerup\n"); 527 528 if (!netif_running(netdev) || 529 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) { 530 netdev_alert(netdev, "Device is already powered up\n"); 531 DBGPR("<--xgbe_powerup\n"); 532 return -EINVAL; 533 } 534 535 spin_lock_irqsave(&pdata->lock, flags); 536 537 pdata->power_down = 0; 538 539 phy_start(pdata->phydev); 540 541 /* Enable Tx/Rx */ 542 hw_if->powerup_tx(pdata); 543 hw_if->powerup_rx(pdata); 544 545 if (caller == XGMAC_DRIVER_CONTEXT) 546 netif_device_attach(netdev); 547 548 xgbe_napi_enable(pdata, 0); 549 netif_tx_start_all_queues(netdev); 550 551 spin_unlock_irqrestore(&pdata->lock, flags); 552 553 DBGPR("<--xgbe_powerup\n"); 554 555 return 0; 556 } 557 558 static int xgbe_start(struct xgbe_prv_data *pdata) 559 { 560 struct xgbe_hw_if *hw_if = &pdata->hw_if; 561 struct net_device *netdev = pdata->netdev; 562 563 DBGPR("-->xgbe_start\n"); 564 565 xgbe_set_rx_mode(netdev); 566 567 hw_if->init(pdata); 568 569 phy_start(pdata->phydev); 570 571 hw_if->enable_tx(pdata); 572 hw_if->enable_rx(pdata); 573 574 xgbe_init_tx_timers(pdata); 575 576 xgbe_napi_enable(pdata, 1); 577 netif_tx_start_all_queues(netdev); 578 579 DBGPR("<--xgbe_start\n"); 580 581 return 0; 582 } 583 584 static void xgbe_stop(struct xgbe_prv_data *pdata) 585 { 586 struct xgbe_hw_if *hw_if = &pdata->hw_if; 587 struct net_device *netdev = pdata->netdev; 588 589 DBGPR("-->xgbe_stop\n"); 590 591 phy_stop(pdata->phydev); 592 593 netif_tx_stop_all_queues(netdev); 594 xgbe_napi_disable(pdata); 595 596 xgbe_stop_tx_timers(pdata); 597 598 hw_if->disable_tx(pdata); 599 hw_if->disable_rx(pdata); 600 601 DBGPR("<--xgbe_stop\n"); 602 } 603 604 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) 605 { 606 struct xgbe_hw_if *hw_if = &pdata->hw_if; 607 608 DBGPR("-->xgbe_restart_dev\n"); 609 610 /* If not running, "restart" will happen on open */ 611 if (!netif_running(pdata->netdev)) 612 return; 613 614 xgbe_stop(pdata); 615 synchronize_irq(pdata->irq_number); 616 617 xgbe_free_tx_skbuff(pdata); 618 xgbe_free_rx_skbuff(pdata); 619 620 /* Issue software reset to device if requested */ 621 if (reset) 622 hw_if->exit(pdata); 623 624 xgbe_start(pdata); 625 626 DBGPR("<--xgbe_restart_dev\n"); 627 } 628 629 static void xgbe_restart(struct work_struct *work) 630 { 631 struct xgbe_prv_data *pdata = container_of(work, 632 struct xgbe_prv_data, 633 restart_work); 634 635 rtnl_lock(); 636 637 xgbe_restart_dev(pdata, 1); 638 639 rtnl_unlock(); 640 } 641 642 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet) 643 { 644 if (vlan_tx_tag_present(skb)) 645 packet->vlan_ctag = vlan_tx_tag_get(skb); 646 } 647 648 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) 649 { 650 int ret; 651 652 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 653 TSO_ENABLE)) 654 return 0; 655 656 ret = skb_cow_head(skb, 0); 657 if (ret) 658 return ret; 659 660 packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 661 packet->tcp_header_len = tcp_hdrlen(skb); 662 packet->tcp_payload_len = skb->len - packet->header_len; 663 packet->mss = skb_shinfo(skb)->gso_size; 664 DBGPR(" packet->header_len=%u\n", packet->header_len); 665 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n", 666 packet->tcp_header_len, packet->tcp_payload_len); 667 DBGPR(" packet->mss=%u\n", packet->mss); 668 669 return 0; 670 } 671 672 static int xgbe_is_tso(struct sk_buff *skb) 673 { 674 if (skb->ip_summed != CHECKSUM_PARTIAL) 675 return 0; 676 677 if (!skb_is_gso(skb)) 678 return 0; 679 680 DBGPR(" TSO packet to be processed\n"); 681 682 return 1; 683 } 684 685 static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb, 686 struct xgbe_packet_data *packet) 687 { 688 struct skb_frag_struct *frag; 689 unsigned int context_desc; 690 unsigned int len; 691 unsigned int i; 692 693 context_desc = 0; 694 packet->rdesc_count = 0; 695 696 if (xgbe_is_tso(skb)) { 697 /* TSO requires an extra desriptor if mss is different */ 698 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { 699 context_desc = 1; 700 packet->rdesc_count++; 701 } 702 703 /* TSO requires an extra desriptor for TSO header */ 704 packet->rdesc_count++; 705 706 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 707 TSO_ENABLE, 1); 708 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 709 CSUM_ENABLE, 1); 710 } else if (skb->ip_summed == CHECKSUM_PARTIAL) 711 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 712 CSUM_ENABLE, 1); 713 714 if (vlan_tx_tag_present(skb)) { 715 /* VLAN requires an extra descriptor if tag is different */ 716 if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag) 717 /* We can share with the TSO context descriptor */ 718 if (!context_desc) { 719 context_desc = 1; 720 packet->rdesc_count++; 721 } 722 723 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 724 VLAN_CTAG, 1); 725 } 726 727 for (len = skb_headlen(skb); len;) { 728 packet->rdesc_count++; 729 len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE); 730 } 731 732 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 733 frag = &skb_shinfo(skb)->frags[i]; 734 for (len = skb_frag_size(frag); len; ) { 735 packet->rdesc_count++; 736 len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE); 737 } 738 } 739 } 740 741 static int xgbe_open(struct net_device *netdev) 742 { 743 struct xgbe_prv_data *pdata = netdev_priv(netdev); 744 struct xgbe_hw_if *hw_if = &pdata->hw_if; 745 struct xgbe_desc_if *desc_if = &pdata->desc_if; 746 int ret; 747 748 DBGPR("-->xgbe_open\n"); 749 750 /* Enable the clock */ 751 ret = clk_prepare_enable(pdata->sysclock); 752 if (ret) { 753 netdev_alert(netdev, "clk_prepare_enable failed\n"); 754 return ret; 755 } 756 757 /* Calculate the Rx buffer size before allocating rings */ 758 ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu); 759 if (ret < 0) 760 goto err_clk; 761 pdata->rx_buf_size = ret; 762 763 /* Allocate the ring descriptors and buffers */ 764 ret = desc_if->alloc_ring_resources(pdata); 765 if (ret) 766 goto err_clk; 767 768 /* Initialize the device restart work struct */ 769 INIT_WORK(&pdata->restart_work, xgbe_restart); 770 771 /* Request interrupts */ 772 ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0, 773 netdev->name, pdata); 774 if (ret) { 775 netdev_alert(netdev, "error requesting irq %d\n", 776 pdata->irq_number); 777 goto err_irq; 778 } 779 pdata->irq_number = netdev->irq; 780 781 ret = xgbe_start(pdata); 782 if (ret) 783 goto err_start; 784 785 DBGPR("<--xgbe_open\n"); 786 787 return 0; 788 789 err_start: 790 hw_if->exit(pdata); 791 792 devm_free_irq(pdata->dev, pdata->irq_number, pdata); 793 pdata->irq_number = 0; 794 795 err_irq: 796 desc_if->free_ring_resources(pdata); 797 798 err_clk: 799 clk_disable_unprepare(pdata->sysclock); 800 801 return ret; 802 } 803 804 static int xgbe_close(struct net_device *netdev) 805 { 806 struct xgbe_prv_data *pdata = netdev_priv(netdev); 807 struct xgbe_hw_if *hw_if = &pdata->hw_if; 808 struct xgbe_desc_if *desc_if = &pdata->desc_if; 809 810 DBGPR("-->xgbe_close\n"); 811 812 /* Stop the device */ 813 xgbe_stop(pdata); 814 815 /* Issue software reset to device */ 816 hw_if->exit(pdata); 817 818 /* Free all the ring data */ 819 desc_if->free_ring_resources(pdata); 820 821 /* Release the interrupt */ 822 if (pdata->irq_number != 0) { 823 devm_free_irq(pdata->dev, pdata->irq_number, pdata); 824 pdata->irq_number = 0; 825 } 826 827 /* Disable the clock */ 828 clk_disable_unprepare(pdata->sysclock); 829 830 DBGPR("<--xgbe_close\n"); 831 832 return 0; 833 } 834 835 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) 836 { 837 struct xgbe_prv_data *pdata = netdev_priv(netdev); 838 struct xgbe_hw_if *hw_if = &pdata->hw_if; 839 struct xgbe_desc_if *desc_if = &pdata->desc_if; 840 struct xgbe_channel *channel; 841 struct xgbe_ring *ring; 842 struct xgbe_packet_data *packet; 843 unsigned long flags; 844 int ret; 845 846 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); 847 848 channel = pdata->channel + skb->queue_mapping; 849 ring = channel->tx_ring; 850 packet = &ring->packet_data; 851 852 ret = NETDEV_TX_OK; 853 854 spin_lock_irqsave(&ring->lock, flags); 855 856 if (skb->len == 0) { 857 netdev_err(netdev, "empty skb received from stack\n"); 858 dev_kfree_skb_any(skb); 859 goto tx_netdev_return; 860 } 861 862 /* Calculate preliminary packet info */ 863 memset(packet, 0, sizeof(*packet)); 864 xgbe_packet_info(ring, skb, packet); 865 866 /* Check that there are enough descriptors available */ 867 if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) { 868 DBGPR(" Tx queue stopped, not enough descriptors available\n"); 869 netif_stop_subqueue(netdev, channel->queue_index); 870 ring->tx.queue_stopped = 1; 871 ret = NETDEV_TX_BUSY; 872 goto tx_netdev_return; 873 } 874 875 ret = xgbe_prep_tso(skb, packet); 876 if (ret) { 877 netdev_err(netdev, "error processing TSO packet\n"); 878 dev_kfree_skb_any(skb); 879 goto tx_netdev_return; 880 } 881 xgbe_prep_vlan(skb, packet); 882 883 if (!desc_if->map_tx_skb(channel, skb)) { 884 dev_kfree_skb_any(skb); 885 goto tx_netdev_return; 886 } 887 888 /* Configure required descriptor fields for transmission */ 889 hw_if->pre_xmit(channel); 890 891 #ifdef XGMAC_ENABLE_TX_PKT_DUMP 892 xgbe_print_pkt(netdev, skb, true); 893 #endif 894 895 tx_netdev_return: 896 spin_unlock_irqrestore(&ring->lock, flags); 897 898 DBGPR("<--xgbe_xmit\n"); 899 900 return ret; 901 } 902 903 static void xgbe_set_rx_mode(struct net_device *netdev) 904 { 905 struct xgbe_prv_data *pdata = netdev_priv(netdev); 906 struct xgbe_hw_if *hw_if = &pdata->hw_if; 907 unsigned int pr_mode, am_mode; 908 909 DBGPR("-->xgbe_set_rx_mode\n"); 910 911 pr_mode = ((netdev->flags & IFF_PROMISC) != 0); 912 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); 913 914 if (netdev_uc_count(netdev) > pdata->hw_feat.addn_mac) 915 pr_mode = 1; 916 if (netdev_mc_count(netdev) > pdata->hw_feat.addn_mac) 917 am_mode = 1; 918 if ((netdev_uc_count(netdev) + netdev_mc_count(netdev)) > 919 pdata->hw_feat.addn_mac) 920 pr_mode = 1; 921 922 hw_if->set_promiscuous_mode(pdata, pr_mode); 923 hw_if->set_all_multicast_mode(pdata, am_mode); 924 if (!pr_mode) 925 hw_if->set_addn_mac_addrs(pdata, am_mode); 926 927 DBGPR("<--xgbe_set_rx_mode\n"); 928 } 929 930 static int xgbe_set_mac_address(struct net_device *netdev, void *addr) 931 { 932 struct xgbe_prv_data *pdata = netdev_priv(netdev); 933 struct xgbe_hw_if *hw_if = &pdata->hw_if; 934 struct sockaddr *saddr = addr; 935 936 DBGPR("-->xgbe_set_mac_address\n"); 937 938 if (!is_valid_ether_addr(saddr->sa_data)) 939 return -EADDRNOTAVAIL; 940 941 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); 942 943 hw_if->set_mac_address(pdata, netdev->dev_addr); 944 945 DBGPR("<--xgbe_set_mac_address\n"); 946 947 return 0; 948 } 949 950 static int xgbe_change_mtu(struct net_device *netdev, int mtu) 951 { 952 struct xgbe_prv_data *pdata = netdev_priv(netdev); 953 int ret; 954 955 DBGPR("-->xgbe_change_mtu\n"); 956 957 ret = xgbe_calc_rx_buf_size(netdev, mtu); 958 if (ret < 0) 959 return ret; 960 961 pdata->rx_buf_size = ret; 962 netdev->mtu = mtu; 963 964 xgbe_restart_dev(pdata, 0); 965 966 DBGPR("<--xgbe_change_mtu\n"); 967 968 return 0; 969 } 970 971 static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, 972 struct rtnl_link_stats64 *s) 973 { 974 struct xgbe_prv_data *pdata = netdev_priv(netdev); 975 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; 976 977 DBGPR("-->%s\n", __func__); 978 979 pdata->hw_if.read_mmc_stats(pdata); 980 981 s->rx_packets = pstats->rxframecount_gb; 982 s->rx_bytes = pstats->rxoctetcount_gb; 983 s->rx_errors = pstats->rxframecount_gb - 984 pstats->rxbroadcastframes_g - 985 pstats->rxmulticastframes_g - 986 pstats->rxunicastframes_g; 987 s->multicast = pstats->rxmulticastframes_g; 988 s->rx_length_errors = pstats->rxlengtherror; 989 s->rx_crc_errors = pstats->rxcrcerror; 990 s->rx_fifo_errors = pstats->rxfifooverflow; 991 992 s->tx_packets = pstats->txframecount_gb; 993 s->tx_bytes = pstats->txoctetcount_gb; 994 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; 995 s->tx_dropped = netdev->stats.tx_dropped; 996 997 DBGPR("<--%s\n", __func__); 998 999 return s; 1000 } 1001 1002 #ifdef CONFIG_NET_POLL_CONTROLLER 1003 static void xgbe_poll_controller(struct net_device *netdev) 1004 { 1005 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1006 1007 DBGPR("-->xgbe_poll_controller\n"); 1008 1009 disable_irq(pdata->irq_number); 1010 1011 xgbe_isr(pdata->irq_number, pdata); 1012 1013 enable_irq(pdata->irq_number); 1014 1015 DBGPR("<--xgbe_poll_controller\n"); 1016 } 1017 #endif /* End CONFIG_NET_POLL_CONTROLLER */ 1018 1019 static int xgbe_set_features(struct net_device *netdev, 1020 netdev_features_t features) 1021 { 1022 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1023 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1024 unsigned int rxcsum_enabled, rxvlan_enabled; 1025 1026 rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM); 1027 rxvlan_enabled = !!(pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX); 1028 1029 if ((features & NETIF_F_RXCSUM) && !rxcsum_enabled) { 1030 hw_if->enable_rx_csum(pdata); 1031 netdev_alert(netdev, "state change - rxcsum enabled\n"); 1032 } else if (!(features & NETIF_F_RXCSUM) && rxcsum_enabled) { 1033 hw_if->disable_rx_csum(pdata); 1034 netdev_alert(netdev, "state change - rxcsum disabled\n"); 1035 } 1036 1037 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan_enabled) { 1038 hw_if->enable_rx_vlan_stripping(pdata); 1039 netdev_alert(netdev, "state change - rxvlan enabled\n"); 1040 } else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan_enabled) { 1041 hw_if->disable_rx_vlan_stripping(pdata); 1042 netdev_alert(netdev, "state change - rxvlan disabled\n"); 1043 } 1044 1045 pdata->netdev_features = features; 1046 1047 DBGPR("<--xgbe_set_features\n"); 1048 1049 return 0; 1050 } 1051 1052 static const struct net_device_ops xgbe_netdev_ops = { 1053 .ndo_open = xgbe_open, 1054 .ndo_stop = xgbe_close, 1055 .ndo_start_xmit = xgbe_xmit, 1056 .ndo_set_rx_mode = xgbe_set_rx_mode, 1057 .ndo_set_mac_address = xgbe_set_mac_address, 1058 .ndo_validate_addr = eth_validate_addr, 1059 .ndo_change_mtu = xgbe_change_mtu, 1060 .ndo_get_stats64 = xgbe_get_stats64, 1061 #ifdef CONFIG_NET_POLL_CONTROLLER 1062 .ndo_poll_controller = xgbe_poll_controller, 1063 #endif 1064 .ndo_set_features = xgbe_set_features, 1065 }; 1066 1067 struct net_device_ops *xgbe_get_netdev_ops(void) 1068 { 1069 return (struct net_device_ops *)&xgbe_netdev_ops; 1070 } 1071 1072 static int xgbe_tx_poll(struct xgbe_channel *channel) 1073 { 1074 struct xgbe_prv_data *pdata = channel->pdata; 1075 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1076 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1077 struct xgbe_ring *ring = channel->tx_ring; 1078 struct xgbe_ring_data *rdata; 1079 struct xgbe_ring_desc *rdesc; 1080 struct net_device *netdev = pdata->netdev; 1081 unsigned long flags; 1082 int processed = 0; 1083 1084 DBGPR("-->xgbe_tx_poll\n"); 1085 1086 /* Nothing to do if there isn't a Tx ring for this channel */ 1087 if (!ring) 1088 return 0; 1089 1090 spin_lock_irqsave(&ring->lock, flags); 1091 1092 while ((processed < TX_DESC_MAX_PROC) && (ring->dirty < ring->cur)) { 1093 rdata = GET_DESC_DATA(ring, ring->dirty); 1094 rdesc = rdata->rdesc; 1095 1096 if (!hw_if->tx_complete(rdesc)) 1097 break; 1098 1099 #ifdef XGMAC_ENABLE_TX_DESC_DUMP 1100 xgbe_dump_tx_desc(ring, ring->dirty, 1, 0); 1101 #endif 1102 1103 /* Free the SKB and reset the descriptor for re-use */ 1104 desc_if->unmap_skb(pdata, rdata); 1105 hw_if->tx_desc_reset(rdata); 1106 1107 processed++; 1108 ring->dirty++; 1109 } 1110 1111 if ((ring->tx.queue_stopped == 1) && 1112 (xgbe_tx_avail_desc(ring) > TX_DESC_MIN_FREE)) { 1113 ring->tx.queue_stopped = 0; 1114 netif_wake_subqueue(netdev, channel->queue_index); 1115 } 1116 1117 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed); 1118 1119 spin_unlock_irqrestore(&ring->lock, flags); 1120 1121 return processed; 1122 } 1123 1124 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) 1125 { 1126 struct xgbe_prv_data *pdata = channel->pdata; 1127 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1128 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1129 struct xgbe_ring *ring = channel->rx_ring; 1130 struct xgbe_ring_data *rdata; 1131 struct xgbe_packet_data *packet; 1132 struct net_device *netdev = pdata->netdev; 1133 struct sk_buff *skb; 1134 unsigned int incomplete, error; 1135 unsigned int cur_len, put_len, max_len; 1136 int received = 0; 1137 1138 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); 1139 1140 /* Nothing to do if there isn't a Rx ring for this channel */ 1141 if (!ring) 1142 return 0; 1143 1144 packet = &ring->packet_data; 1145 while (received < budget) { 1146 DBGPR(" cur = %d\n", ring->cur); 1147 1148 /* Clear the packet data information */ 1149 memset(packet, 0, sizeof(*packet)); 1150 skb = NULL; 1151 error = 0; 1152 cur_len = 0; 1153 1154 read_again: 1155 rdata = GET_DESC_DATA(ring, ring->cur); 1156 1157 if (hw_if->dev_read(channel)) 1158 break; 1159 1160 received++; 1161 ring->cur++; 1162 ring->dirty++; 1163 1164 dma_unmap_single(pdata->dev, rdata->skb_dma, 1165 rdata->skb_dma_len, DMA_FROM_DEVICE); 1166 rdata->skb_dma = 0; 1167 1168 incomplete = XGMAC_GET_BITS(packet->attributes, 1169 RX_PACKET_ATTRIBUTES, 1170 INCOMPLETE); 1171 1172 /* Earlier error, just drain the remaining data */ 1173 if (incomplete && error) 1174 goto read_again; 1175 1176 if (error || packet->errors) { 1177 if (packet->errors) 1178 DBGPR("Error in received packet\n"); 1179 dev_kfree_skb(skb); 1180 continue; 1181 } 1182 1183 put_len = rdata->len - cur_len; 1184 if (skb) { 1185 if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) { 1186 DBGPR("pskb_expand_head error\n"); 1187 if (incomplete) { 1188 error = 1; 1189 goto read_again; 1190 } 1191 1192 dev_kfree_skb(skb); 1193 continue; 1194 } 1195 memcpy(skb_tail_pointer(skb), rdata->skb->data, 1196 put_len); 1197 } else { 1198 skb = rdata->skb; 1199 rdata->skb = NULL; 1200 } 1201 skb_put(skb, put_len); 1202 cur_len += put_len; 1203 1204 if (incomplete) 1205 goto read_again; 1206 1207 /* Be sure we don't exceed the configured MTU */ 1208 max_len = netdev->mtu + ETH_HLEN; 1209 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1210 (skb->protocol == htons(ETH_P_8021Q))) 1211 max_len += VLAN_HLEN; 1212 1213 if (skb->len > max_len) { 1214 DBGPR("packet length exceeds configured MTU\n"); 1215 dev_kfree_skb(skb); 1216 continue; 1217 } 1218 1219 #ifdef XGMAC_ENABLE_RX_PKT_DUMP 1220 xgbe_print_pkt(netdev, skb, false); 1221 #endif 1222 1223 skb_checksum_none_assert(skb); 1224 if (XGMAC_GET_BITS(packet->attributes, 1225 RX_PACKET_ATTRIBUTES, CSUM_DONE)) 1226 skb->ip_summed = CHECKSUM_UNNECESSARY; 1227 1228 if (XGMAC_GET_BITS(packet->attributes, 1229 RX_PACKET_ATTRIBUTES, VLAN_CTAG)) 1230 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1231 packet->vlan_ctag); 1232 1233 skb->dev = netdev; 1234 skb->protocol = eth_type_trans(skb, netdev); 1235 skb_record_rx_queue(skb, channel->queue_index); 1236 skb_mark_napi_id(skb, &pdata->napi); 1237 1238 netdev->last_rx = jiffies; 1239 napi_gro_receive(&pdata->napi, skb); 1240 } 1241 1242 if (received) { 1243 desc_if->realloc_skb(channel); 1244 1245 /* Update the Rx Tail Pointer Register with address of 1246 * the last cleaned entry */ 1247 rdata = GET_DESC_DATA(ring, ring->rx.realloc_index - 1); 1248 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, 1249 lower_32_bits(rdata->rdesc_dma)); 1250 } 1251 1252 DBGPR("<--xgbe_rx_poll: received = %d\n", received); 1253 1254 return received; 1255 } 1256 1257 static int xgbe_poll(struct napi_struct *napi, int budget) 1258 { 1259 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data, 1260 napi); 1261 struct xgbe_channel *channel; 1262 int processed; 1263 unsigned int i; 1264 1265 DBGPR("-->xgbe_poll: budget=%d\n", budget); 1266 1267 /* Cleanup Tx ring first */ 1268 channel = pdata->channel; 1269 for (i = 0; i < pdata->channel_count; i++, channel++) 1270 xgbe_tx_poll(channel); 1271 1272 /* Process Rx ring next */ 1273 processed = 0; 1274 channel = pdata->channel; 1275 for (i = 0; i < pdata->channel_count; i++, channel++) 1276 processed += xgbe_rx_poll(channel, budget - processed); 1277 1278 /* If we processed everything, we are done */ 1279 if (processed < budget) { 1280 /* Turn off polling */ 1281 napi_complete(napi); 1282 1283 /* Enable Tx and Rx interrupts */ 1284 xgbe_enable_rx_tx_ints(pdata); 1285 } 1286 1287 DBGPR("<--xgbe_poll: received = %d\n", processed); 1288 1289 return processed; 1290 } 1291 1292 void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, 1293 unsigned int count, unsigned int flag) 1294 { 1295 struct xgbe_ring_data *rdata; 1296 struct xgbe_ring_desc *rdesc; 1297 1298 while (count--) { 1299 rdata = GET_DESC_DATA(ring, idx); 1300 rdesc = rdata->rdesc; 1301 DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, 1302 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", 1303 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), 1304 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); 1305 idx++; 1306 } 1307 } 1308 1309 void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc, 1310 unsigned int idx) 1311 { 1312 DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, 1313 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), 1314 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); 1315 } 1316 1317 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) 1318 { 1319 struct ethhdr *eth = (struct ethhdr *)skb->data; 1320 unsigned char *buf = skb->data; 1321 unsigned char buffer[128]; 1322 unsigned int i, j; 1323 1324 netdev_alert(netdev, "\n************** SKB dump ****************\n"); 1325 1326 netdev_alert(netdev, "%s packet of %d bytes\n", 1327 (tx_rx ? "TX" : "RX"), skb->len); 1328 1329 netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest); 1330 netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source); 1331 netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto)); 1332 1333 for (i = 0, j = 0; i < skb->len;) { 1334 j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx", 1335 buf[i++]); 1336 1337 if ((i % 32) == 0) { 1338 netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer); 1339 j = 0; 1340 } else if ((i % 16) == 0) { 1341 buffer[j++] = ' '; 1342 buffer[j++] = ' '; 1343 } else if ((i % 4) == 0) { 1344 buffer[j++] = ' '; 1345 } 1346 } 1347 if (i % 32) 1348 netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer); 1349 1350 netdev_alert(netdev, "\n************** SKB dump ****************\n"); 1351 } 1352