1 /* 2 * AMD 10Gb Ethernet driver 3 * 4 * This file is available to you under your choice of the following two 5 * licenses: 6 * 7 * License 1: GPLv2 8 * 9 * Copyright (c) 2014 Advanced Micro Devices, Inc. 10 * 11 * This file is free software; you may copy, redistribute and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation, either version 2 of the License, or (at 14 * your option) any later version. 15 * 16 * This file is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program. If not, see <http://www.gnu.org/licenses/>. 23 * 24 * This file incorporates work covered by the following copyright and 25 * permission notice: 26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 28 * Inc. unless otherwise expressly agreed to in writing between Synopsys 29 * and you. 30 * 31 * The Software IS NOT an item of Licensed Software or Licensed Product 32 * under any End User Software License Agreement or Agreement for Licensed 33 * Product with Synopsys or any supplement thereto. Permission is hereby 34 * granted, free of charge, to any person obtaining a copy of this software 35 * annotated with this license and the Software, to deal in the Software 36 * without restriction, including without limitation the rights to use, 37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 38 * of the Software, and to permit persons to whom the Software is furnished 39 * to do so, subject to the following conditions: 40 * 41 * The above copyright notice and this permission notice shall be included 42 * in all copies or substantial portions of the Software. 43 * 44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 54 * THE POSSIBILITY OF SUCH DAMAGE. 55 * 56 * 57 * License 2: Modified BSD 58 * 59 * Copyright (c) 2014 Advanced Micro Devices, Inc. 60 * All rights reserved. 61 * 62 * Redistribution and use in source and binary forms, with or without 63 * modification, are permitted provided that the following conditions are met: 64 * * Redistributions of source code must retain the above copyright 65 * notice, this list of conditions and the following disclaimer. 66 * * Redistributions in binary form must reproduce the above copyright 67 * notice, this list of conditions and the following disclaimer in the 68 * documentation and/or other materials provided with the distribution. 69 * * Neither the name of Advanced Micro Devices, Inc. nor the 70 * names of its contributors may be used to endorse or promote products 71 * derived from this software without specific prior written permission. 72 * 73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY 77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 83 * 84 * This file incorporates work covered by the following copyright and 85 * permission notice: 86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 88 * Inc. unless otherwise expressly agreed to in writing between Synopsys 89 * and you. 90 * 91 * The Software IS NOT an item of Licensed Software or Licensed Product 92 * under any End User Software License Agreement or Agreement for Licensed 93 * Product with Synopsys or any supplement thereto. Permission is hereby 94 * granted, free of charge, to any person obtaining a copy of this software 95 * annotated with this license and the Software, to deal in the Software 96 * without restriction, including without limitation the rights to use, 97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 98 * of the Software, and to permit persons to whom the Software is furnished 99 * to do so, subject to the following conditions: 100 * 101 * The above copyright notice and this permission notice shall be included 102 * in all copies or substantial portions of the Software. 103 * 104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 114 * THE POSSIBILITY OF SUCH DAMAGE. 115 */ 116 117 #include <linux/platform_device.h> 118 #include <linux/spinlock.h> 119 #include <linux/tcp.h> 120 #include <linux/if_vlan.h> 121 #include <net/busy_poll.h> 122 #include <linux/clk.h> 123 #include <linux/if_ether.h> 124 #include <linux/net_tstamp.h> 125 #include <linux/phy.h> 126 127 #include "xgbe.h" 128 #include "xgbe-common.h" 129 130 static int xgbe_one_poll(struct napi_struct *, int); 131 static int xgbe_all_poll(struct napi_struct *, int); 132 static void xgbe_set_rx_mode(struct net_device *); 133 134 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) 135 { 136 struct xgbe_channel *channel_mem, *channel; 137 struct xgbe_ring *tx_ring, *rx_ring; 138 unsigned int count, i; 139 int ret = -ENOMEM; 140 141 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); 142 143 channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL); 144 if (!channel_mem) 145 goto err_channel; 146 147 tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring), 148 GFP_KERNEL); 149 if (!tx_ring) 150 goto err_tx_ring; 151 152 rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring), 153 GFP_KERNEL); 154 if (!rx_ring) 155 goto err_rx_ring; 156 157 for (i = 0, channel = channel_mem; i < count; i++, channel++) { 158 snprintf(channel->name, sizeof(channel->name), "channel-%d", i); 159 channel->pdata = pdata; 160 channel->queue_index = i; 161 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + 162 (DMA_CH_INC * i); 163 164 if (pdata->per_channel_irq) { 165 /* Get the DMA interrupt (offset 1) */ 166 ret = platform_get_irq(pdata->pdev, i + 1); 167 if (ret < 0) { 168 netdev_err(pdata->netdev, 169 "platform_get_irq %u failed\n", 170 i + 1); 171 goto err_irq; 172 } 173 174 channel->dma_irq = ret; 175 } 176 177 if (i < pdata->tx_ring_count) { 178 spin_lock_init(&tx_ring->lock); 179 channel->tx_ring = tx_ring++; 180 } 181 182 if (i < pdata->rx_ring_count) { 183 spin_lock_init(&rx_ring->lock); 184 channel->rx_ring = rx_ring++; 185 } 186 187 DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n", 188 channel->name, channel->queue_index, channel->dma_regs, 189 channel->dma_irq, channel->tx_ring, channel->rx_ring); 190 } 191 192 pdata->channel = channel_mem; 193 pdata->channel_count = count; 194 195 return 0; 196 197 err_irq: 198 kfree(rx_ring); 199 200 err_rx_ring: 201 kfree(tx_ring); 202 203 err_tx_ring: 204 kfree(channel_mem); 205 206 err_channel: 207 return ret; 208 } 209 210 static void xgbe_free_channels(struct xgbe_prv_data *pdata) 211 { 212 if (!pdata->channel) 213 return; 214 215 kfree(pdata->channel->rx_ring); 216 kfree(pdata->channel->tx_ring); 217 kfree(pdata->channel); 218 219 pdata->channel = NULL; 220 pdata->channel_count = 0; 221 } 222 223 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) 224 { 225 return (ring->rdesc_count - (ring->cur - ring->dirty)); 226 } 227 228 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring) 229 { 230 return (ring->cur - ring->dirty); 231 } 232 233 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, 234 struct xgbe_ring *ring, unsigned int count) 235 { 236 struct xgbe_prv_data *pdata = channel->pdata; 237 238 if (count > xgbe_tx_avail_desc(ring)) { 239 DBGPR(" Tx queue stopped, not enough descriptors available\n"); 240 netif_stop_subqueue(pdata->netdev, channel->queue_index); 241 ring->tx.queue_stopped = 1; 242 243 /* If we haven't notified the hardware because of xmit_more 244 * support, tell it now 245 */ 246 if (ring->tx.xmit_more) 247 pdata->hw_if.tx_start_xmit(channel, ring); 248 249 return NETDEV_TX_BUSY; 250 } 251 252 return 0; 253 } 254 255 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) 256 { 257 unsigned int rx_buf_size; 258 259 if (mtu > XGMAC_JUMBO_PACKET_MTU) { 260 netdev_alert(netdev, "MTU exceeds maximum supported value\n"); 261 return -EINVAL; 262 } 263 264 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 265 rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE); 266 267 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & 268 ~(XGBE_RX_BUF_ALIGN - 1); 269 270 return rx_buf_size; 271 } 272 273 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata) 274 { 275 struct xgbe_hw_if *hw_if = &pdata->hw_if; 276 struct xgbe_channel *channel; 277 enum xgbe_int int_id; 278 unsigned int i; 279 280 channel = pdata->channel; 281 for (i = 0; i < pdata->channel_count; i++, channel++) { 282 if (channel->tx_ring && channel->rx_ring) 283 int_id = XGMAC_INT_DMA_CH_SR_TI_RI; 284 else if (channel->tx_ring) 285 int_id = XGMAC_INT_DMA_CH_SR_TI; 286 else if (channel->rx_ring) 287 int_id = XGMAC_INT_DMA_CH_SR_RI; 288 else 289 continue; 290 291 hw_if->enable_int(channel, int_id); 292 } 293 } 294 295 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata) 296 { 297 struct xgbe_hw_if *hw_if = &pdata->hw_if; 298 struct xgbe_channel *channel; 299 enum xgbe_int int_id; 300 unsigned int i; 301 302 channel = pdata->channel; 303 for (i = 0; i < pdata->channel_count; i++, channel++) { 304 if (channel->tx_ring && channel->rx_ring) 305 int_id = XGMAC_INT_DMA_CH_SR_TI_RI; 306 else if (channel->tx_ring) 307 int_id = XGMAC_INT_DMA_CH_SR_TI; 308 else if (channel->rx_ring) 309 int_id = XGMAC_INT_DMA_CH_SR_RI; 310 else 311 continue; 312 313 hw_if->disable_int(channel, int_id); 314 } 315 } 316 317 static irqreturn_t xgbe_isr(int irq, void *data) 318 { 319 struct xgbe_prv_data *pdata = data; 320 struct xgbe_hw_if *hw_if = &pdata->hw_if; 321 struct xgbe_channel *channel; 322 unsigned int dma_isr, dma_ch_isr; 323 unsigned int mac_isr, mac_tssr; 324 unsigned int i; 325 326 /* The DMA interrupt status register also reports MAC and MTL 327 * interrupts. So for polling mode, we just need to check for 328 * this register to be non-zero 329 */ 330 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR); 331 if (!dma_isr) 332 goto isr_done; 333 334 DBGPR(" DMA_ISR = %08x\n", dma_isr); 335 336 for (i = 0; i < pdata->channel_count; i++) { 337 if (!(dma_isr & (1 << i))) 338 continue; 339 340 channel = pdata->channel + i; 341 342 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); 343 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr); 344 345 /* The TI or RI interrupt bits may still be set even if using 346 * per channel DMA interrupts. Check to be sure those are not 347 * enabled before using the private data napi structure. 348 */ 349 if (!pdata->per_channel_irq && 350 (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || 351 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) { 352 if (napi_schedule_prep(&pdata->napi)) { 353 /* Disable Tx and Rx interrupts */ 354 xgbe_disable_rx_tx_ints(pdata); 355 356 /* Turn on polling */ 357 __napi_schedule(&pdata->napi); 358 } 359 } 360 361 /* Restart the device on a Fatal Bus Error */ 362 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) 363 schedule_work(&pdata->restart_work); 364 365 /* Clear all interrupt signals */ 366 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); 367 } 368 369 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) { 370 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR); 371 372 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS)) 373 hw_if->tx_mmc_int(pdata); 374 375 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS)) 376 hw_if->rx_mmc_int(pdata); 377 378 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) { 379 mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR); 380 381 if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) { 382 /* Read Tx Timestamp to clear interrupt */ 383 pdata->tx_tstamp = 384 hw_if->get_tx_tstamp(pdata); 385 schedule_work(&pdata->tx_tstamp_work); 386 } 387 } 388 } 389 390 DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR)); 391 392 isr_done: 393 return IRQ_HANDLED; 394 } 395 396 static irqreturn_t xgbe_dma_isr(int irq, void *data) 397 { 398 struct xgbe_channel *channel = data; 399 400 /* Per channel DMA interrupts are enabled, so we use the per 401 * channel napi structure and not the private data napi structure 402 */ 403 if (napi_schedule_prep(&channel->napi)) { 404 /* Disable Tx and Rx interrupts */ 405 disable_irq_nosync(channel->dma_irq); 406 407 /* Turn on polling */ 408 __napi_schedule(&channel->napi); 409 } 410 411 return IRQ_HANDLED; 412 } 413 414 static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) 415 { 416 struct xgbe_channel *channel = container_of(timer, 417 struct xgbe_channel, 418 tx_timer); 419 struct xgbe_prv_data *pdata = channel->pdata; 420 struct napi_struct *napi; 421 422 DBGPR("-->xgbe_tx_timer\n"); 423 424 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; 425 426 if (napi_schedule_prep(napi)) { 427 /* Disable Tx and Rx interrupts */ 428 if (pdata->per_channel_irq) 429 disable_irq(channel->dma_irq); 430 else 431 xgbe_disable_rx_tx_ints(pdata); 432 433 /* Turn on polling */ 434 __napi_schedule(napi); 435 } 436 437 channel->tx_timer_active = 0; 438 439 DBGPR("<--xgbe_tx_timer\n"); 440 441 return HRTIMER_NORESTART; 442 } 443 444 static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata) 445 { 446 struct xgbe_channel *channel; 447 unsigned int i; 448 449 DBGPR("-->xgbe_init_tx_timers\n"); 450 451 channel = pdata->channel; 452 for (i = 0; i < pdata->channel_count; i++, channel++) { 453 if (!channel->tx_ring) 454 break; 455 456 DBGPR(" %s adding tx timer\n", channel->name); 457 hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC, 458 HRTIMER_MODE_REL); 459 channel->tx_timer.function = xgbe_tx_timer; 460 } 461 462 DBGPR("<--xgbe_init_tx_timers\n"); 463 } 464 465 static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata) 466 { 467 struct xgbe_channel *channel; 468 unsigned int i; 469 470 DBGPR("-->xgbe_stop_tx_timers\n"); 471 472 channel = pdata->channel; 473 for (i = 0; i < pdata->channel_count; i++, channel++) { 474 if (!channel->tx_ring) 475 break; 476 477 DBGPR(" %s deleting tx timer\n", channel->name); 478 channel->tx_timer_active = 0; 479 hrtimer_cancel(&channel->tx_timer); 480 } 481 482 DBGPR("<--xgbe_stop_tx_timers\n"); 483 } 484 485 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) 486 { 487 unsigned int mac_hfr0, mac_hfr1, mac_hfr2; 488 struct xgbe_hw_features *hw_feat = &pdata->hw_feat; 489 490 DBGPR("-->xgbe_get_all_hw_features\n"); 491 492 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R); 493 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R); 494 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R); 495 496 memset(hw_feat, 0, sizeof(*hw_feat)); 497 498 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR); 499 500 /* Hardware feature register 0 */ 501 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 502 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 503 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 504 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 505 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 506 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 507 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 508 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 509 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 510 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 511 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 512 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 513 ADDMACADRSEL); 514 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 515 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 516 517 /* Hardware feature register 1 */ 518 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 519 RXFIFOSIZE); 520 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 521 TXFIFOSIZE); 522 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 523 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 524 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 525 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 526 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 527 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 528 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 529 HASHTBLSZ); 530 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 531 L3L4FNUM); 532 533 /* Hardware feature register 2 */ 534 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 535 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 536 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 537 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 538 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 539 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM); 540 541 /* Translate the Hash Table size into actual number */ 542 switch (hw_feat->hash_table_size) { 543 case 0: 544 break; 545 case 1: 546 hw_feat->hash_table_size = 64; 547 break; 548 case 2: 549 hw_feat->hash_table_size = 128; 550 break; 551 case 3: 552 hw_feat->hash_table_size = 256; 553 break; 554 } 555 556 /* The Queue, Channel and TC counts are zero based so increment them 557 * to get the actual number 558 */ 559 hw_feat->rx_q_cnt++; 560 hw_feat->tx_q_cnt++; 561 hw_feat->rx_ch_cnt++; 562 hw_feat->tx_ch_cnt++; 563 hw_feat->tc_cnt++; 564 565 DBGPR("<--xgbe_get_all_hw_features\n"); 566 } 567 568 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) 569 { 570 struct xgbe_channel *channel; 571 unsigned int i; 572 573 if (pdata->per_channel_irq) { 574 channel = pdata->channel; 575 for (i = 0; i < pdata->channel_count; i++, channel++) { 576 if (add) 577 netif_napi_add(pdata->netdev, &channel->napi, 578 xgbe_one_poll, NAPI_POLL_WEIGHT); 579 580 napi_enable(&channel->napi); 581 } 582 } else { 583 if (add) 584 netif_napi_add(pdata->netdev, &pdata->napi, 585 xgbe_all_poll, NAPI_POLL_WEIGHT); 586 587 napi_enable(&pdata->napi); 588 } 589 } 590 591 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) 592 { 593 struct xgbe_channel *channel; 594 unsigned int i; 595 596 if (pdata->per_channel_irq) { 597 channel = pdata->channel; 598 for (i = 0; i < pdata->channel_count; i++, channel++) { 599 napi_disable(&channel->napi); 600 601 if (del) 602 netif_napi_del(&channel->napi); 603 } 604 } else { 605 napi_disable(&pdata->napi); 606 607 if (del) 608 netif_napi_del(&pdata->napi); 609 } 610 } 611 612 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) 613 { 614 struct xgbe_hw_if *hw_if = &pdata->hw_if; 615 616 DBGPR("-->xgbe_init_tx_coalesce\n"); 617 618 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS; 619 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES; 620 621 hw_if->config_tx_coalesce(pdata); 622 623 DBGPR("<--xgbe_init_tx_coalesce\n"); 624 } 625 626 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) 627 { 628 struct xgbe_hw_if *hw_if = &pdata->hw_if; 629 630 DBGPR("-->xgbe_init_rx_coalesce\n"); 631 632 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS); 633 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES; 634 635 hw_if->config_rx_coalesce(pdata); 636 637 DBGPR("<--xgbe_init_rx_coalesce\n"); 638 } 639 640 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata) 641 { 642 struct xgbe_desc_if *desc_if = &pdata->desc_if; 643 struct xgbe_channel *channel; 644 struct xgbe_ring *ring; 645 struct xgbe_ring_data *rdata; 646 unsigned int i, j; 647 648 DBGPR("-->xgbe_free_tx_data\n"); 649 650 channel = pdata->channel; 651 for (i = 0; i < pdata->channel_count; i++, channel++) { 652 ring = channel->tx_ring; 653 if (!ring) 654 break; 655 656 for (j = 0; j < ring->rdesc_count; j++) { 657 rdata = XGBE_GET_DESC_DATA(ring, j); 658 desc_if->unmap_rdata(pdata, rdata); 659 } 660 } 661 662 DBGPR("<--xgbe_free_tx_data\n"); 663 } 664 665 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) 666 { 667 struct xgbe_desc_if *desc_if = &pdata->desc_if; 668 struct xgbe_channel *channel; 669 struct xgbe_ring *ring; 670 struct xgbe_ring_data *rdata; 671 unsigned int i, j; 672 673 DBGPR("-->xgbe_free_rx_data\n"); 674 675 channel = pdata->channel; 676 for (i = 0; i < pdata->channel_count; i++, channel++) { 677 ring = channel->rx_ring; 678 if (!ring) 679 break; 680 681 for (j = 0; j < ring->rdesc_count; j++) { 682 rdata = XGBE_GET_DESC_DATA(ring, j); 683 desc_if->unmap_rdata(pdata, rdata); 684 } 685 } 686 687 DBGPR("<--xgbe_free_rx_data\n"); 688 } 689 690 static void xgbe_adjust_link(struct net_device *netdev) 691 { 692 struct xgbe_prv_data *pdata = netdev_priv(netdev); 693 struct xgbe_hw_if *hw_if = &pdata->hw_if; 694 struct phy_device *phydev = pdata->phydev; 695 int new_state = 0; 696 697 if (!phydev) 698 return; 699 700 if (phydev->link) { 701 /* Flow control support */ 702 if (pdata->pause_autoneg) { 703 if (phydev->pause || phydev->asym_pause) { 704 pdata->tx_pause = 1; 705 pdata->rx_pause = 1; 706 } else { 707 pdata->tx_pause = 0; 708 pdata->rx_pause = 0; 709 } 710 } 711 712 if (pdata->tx_pause != pdata->phy_tx_pause) { 713 hw_if->config_tx_flow_control(pdata); 714 pdata->phy_tx_pause = pdata->tx_pause; 715 } 716 717 if (pdata->rx_pause != pdata->phy_rx_pause) { 718 hw_if->config_rx_flow_control(pdata); 719 pdata->phy_rx_pause = pdata->rx_pause; 720 } 721 722 /* Speed support */ 723 if (phydev->speed != pdata->phy_speed) { 724 new_state = 1; 725 726 switch (phydev->speed) { 727 case SPEED_10000: 728 hw_if->set_xgmii_speed(pdata); 729 break; 730 731 case SPEED_2500: 732 hw_if->set_gmii_2500_speed(pdata); 733 break; 734 735 case SPEED_1000: 736 hw_if->set_gmii_speed(pdata); 737 break; 738 } 739 pdata->phy_speed = phydev->speed; 740 } 741 742 if (phydev->link != pdata->phy_link) { 743 new_state = 1; 744 pdata->phy_link = 1; 745 } 746 } else if (pdata->phy_link) { 747 new_state = 1; 748 pdata->phy_link = 0; 749 pdata->phy_speed = SPEED_UNKNOWN; 750 } 751 752 if (new_state) 753 phy_print_status(phydev); 754 } 755 756 static int xgbe_phy_init(struct xgbe_prv_data *pdata) 757 { 758 struct net_device *netdev = pdata->netdev; 759 struct phy_device *phydev = pdata->phydev; 760 int ret; 761 762 pdata->phy_link = -1; 763 pdata->phy_speed = SPEED_UNKNOWN; 764 pdata->phy_tx_pause = pdata->tx_pause; 765 pdata->phy_rx_pause = pdata->rx_pause; 766 767 ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link, 768 pdata->phy_mode); 769 if (ret) { 770 netdev_err(netdev, "phy_connect_direct failed\n"); 771 return ret; 772 } 773 774 if (!phydev->drv || (phydev->drv->phy_id == 0)) { 775 netdev_err(netdev, "phy_id not valid\n"); 776 ret = -ENODEV; 777 goto err_phy_connect; 778 } 779 DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n", 780 dev_name(&phydev->dev), phydev->link); 781 782 return 0; 783 784 err_phy_connect: 785 phy_disconnect(phydev); 786 787 return ret; 788 } 789 790 static void xgbe_phy_exit(struct xgbe_prv_data *pdata) 791 { 792 if (!pdata->phydev) 793 return; 794 795 phy_disconnect(pdata->phydev); 796 } 797 798 int xgbe_powerdown(struct net_device *netdev, unsigned int caller) 799 { 800 struct xgbe_prv_data *pdata = netdev_priv(netdev); 801 struct xgbe_hw_if *hw_if = &pdata->hw_if; 802 unsigned long flags; 803 804 DBGPR("-->xgbe_powerdown\n"); 805 806 if (!netif_running(netdev) || 807 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) { 808 netdev_alert(netdev, "Device is already powered down\n"); 809 DBGPR("<--xgbe_powerdown\n"); 810 return -EINVAL; 811 } 812 813 phy_stop(pdata->phydev); 814 815 spin_lock_irqsave(&pdata->lock, flags); 816 817 if (caller == XGMAC_DRIVER_CONTEXT) 818 netif_device_detach(netdev); 819 820 netif_tx_stop_all_queues(netdev); 821 xgbe_napi_disable(pdata, 0); 822 823 /* Powerdown Tx/Rx */ 824 hw_if->powerdown_tx(pdata); 825 hw_if->powerdown_rx(pdata); 826 827 pdata->power_down = 1; 828 829 spin_unlock_irqrestore(&pdata->lock, flags); 830 831 DBGPR("<--xgbe_powerdown\n"); 832 833 return 0; 834 } 835 836 int xgbe_powerup(struct net_device *netdev, unsigned int caller) 837 { 838 struct xgbe_prv_data *pdata = netdev_priv(netdev); 839 struct xgbe_hw_if *hw_if = &pdata->hw_if; 840 unsigned long flags; 841 842 DBGPR("-->xgbe_powerup\n"); 843 844 if (!netif_running(netdev) || 845 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) { 846 netdev_alert(netdev, "Device is already powered up\n"); 847 DBGPR("<--xgbe_powerup\n"); 848 return -EINVAL; 849 } 850 851 spin_lock_irqsave(&pdata->lock, flags); 852 853 pdata->power_down = 0; 854 855 phy_start(pdata->phydev); 856 857 /* Enable Tx/Rx */ 858 hw_if->powerup_tx(pdata); 859 hw_if->powerup_rx(pdata); 860 861 if (caller == XGMAC_DRIVER_CONTEXT) 862 netif_device_attach(netdev); 863 864 xgbe_napi_enable(pdata, 0); 865 netif_tx_start_all_queues(netdev); 866 867 spin_unlock_irqrestore(&pdata->lock, flags); 868 869 DBGPR("<--xgbe_powerup\n"); 870 871 return 0; 872 } 873 874 static int xgbe_start(struct xgbe_prv_data *pdata) 875 { 876 struct xgbe_hw_if *hw_if = &pdata->hw_if; 877 struct net_device *netdev = pdata->netdev; 878 879 DBGPR("-->xgbe_start\n"); 880 881 xgbe_set_rx_mode(netdev); 882 883 hw_if->init(pdata); 884 885 phy_start(pdata->phydev); 886 887 hw_if->enable_tx(pdata); 888 hw_if->enable_rx(pdata); 889 890 xgbe_init_tx_timers(pdata); 891 892 xgbe_napi_enable(pdata, 1); 893 netif_tx_start_all_queues(netdev); 894 895 DBGPR("<--xgbe_start\n"); 896 897 return 0; 898 } 899 900 static void xgbe_stop(struct xgbe_prv_data *pdata) 901 { 902 struct xgbe_hw_if *hw_if = &pdata->hw_if; 903 struct xgbe_channel *channel; 904 struct net_device *netdev = pdata->netdev; 905 struct netdev_queue *txq; 906 unsigned int i; 907 908 DBGPR("-->xgbe_stop\n"); 909 910 phy_stop(pdata->phydev); 911 912 netif_tx_stop_all_queues(netdev); 913 xgbe_napi_disable(pdata, 1); 914 915 xgbe_stop_tx_timers(pdata); 916 917 hw_if->disable_tx(pdata); 918 hw_if->disable_rx(pdata); 919 920 channel = pdata->channel; 921 for (i = 0; i < pdata->channel_count; i++, channel++) { 922 if (!channel->tx_ring) 923 continue; 924 925 txq = netdev_get_tx_queue(netdev, channel->queue_index); 926 netdev_tx_reset_queue(txq); 927 } 928 929 DBGPR("<--xgbe_stop\n"); 930 } 931 932 static void xgbe_restart_dev(struct xgbe_prv_data *pdata) 933 { 934 struct xgbe_channel *channel; 935 struct xgbe_hw_if *hw_if = &pdata->hw_if; 936 unsigned int i; 937 938 DBGPR("-->xgbe_restart_dev\n"); 939 940 /* If not running, "restart" will happen on open */ 941 if (!netif_running(pdata->netdev)) 942 return; 943 944 xgbe_stop(pdata); 945 synchronize_irq(pdata->dev_irq); 946 if (pdata->per_channel_irq) { 947 channel = pdata->channel; 948 for (i = 0; i < pdata->channel_count; i++, channel++) 949 synchronize_irq(channel->dma_irq); 950 } 951 952 xgbe_free_tx_data(pdata); 953 xgbe_free_rx_data(pdata); 954 955 /* Issue software reset to device */ 956 hw_if->exit(pdata); 957 958 xgbe_start(pdata); 959 960 DBGPR("<--xgbe_restart_dev\n"); 961 } 962 963 static void xgbe_restart(struct work_struct *work) 964 { 965 struct xgbe_prv_data *pdata = container_of(work, 966 struct xgbe_prv_data, 967 restart_work); 968 969 rtnl_lock(); 970 971 xgbe_restart_dev(pdata); 972 973 rtnl_unlock(); 974 } 975 976 static void xgbe_tx_tstamp(struct work_struct *work) 977 { 978 struct xgbe_prv_data *pdata = container_of(work, 979 struct xgbe_prv_data, 980 tx_tstamp_work); 981 struct skb_shared_hwtstamps hwtstamps; 982 u64 nsec; 983 unsigned long flags; 984 985 if (pdata->tx_tstamp) { 986 nsec = timecounter_cyc2time(&pdata->tstamp_tc, 987 pdata->tx_tstamp); 988 989 memset(&hwtstamps, 0, sizeof(hwtstamps)); 990 hwtstamps.hwtstamp = ns_to_ktime(nsec); 991 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps); 992 } 993 994 dev_kfree_skb_any(pdata->tx_tstamp_skb); 995 996 spin_lock_irqsave(&pdata->tstamp_lock, flags); 997 pdata->tx_tstamp_skb = NULL; 998 spin_unlock_irqrestore(&pdata->tstamp_lock, flags); 999 } 1000 1001 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, 1002 struct ifreq *ifreq) 1003 { 1004 if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config, 1005 sizeof(pdata->tstamp_config))) 1006 return -EFAULT; 1007 1008 return 0; 1009 } 1010 1011 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, 1012 struct ifreq *ifreq) 1013 { 1014 struct hwtstamp_config config; 1015 unsigned int mac_tscr; 1016 1017 if (copy_from_user(&config, ifreq->ifr_data, sizeof(config))) 1018 return -EFAULT; 1019 1020 if (config.flags) 1021 return -EINVAL; 1022 1023 mac_tscr = 0; 1024 1025 switch (config.tx_type) { 1026 case HWTSTAMP_TX_OFF: 1027 break; 1028 1029 case HWTSTAMP_TX_ON: 1030 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1031 break; 1032 1033 default: 1034 return -ERANGE; 1035 } 1036 1037 switch (config.rx_filter) { 1038 case HWTSTAMP_FILTER_NONE: 1039 break; 1040 1041 case HWTSTAMP_FILTER_ALL: 1042 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1); 1043 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1044 break; 1045 1046 /* PTP v2, UDP, any kind of event packet */ 1047 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1048 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1049 /* PTP v1, UDP, any kind of event packet */ 1050 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1051 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); 1052 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); 1053 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); 1054 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1055 break; 1056 1057 /* PTP v2, UDP, Sync packet */ 1058 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1059 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1060 /* PTP v1, UDP, Sync packet */ 1061 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1062 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); 1063 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); 1064 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); 1065 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1066 break; 1067 1068 /* PTP v2, UDP, Delay_req packet */ 1069 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1070 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1071 /* PTP v1, UDP, Delay_req packet */ 1072 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1073 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); 1074 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); 1075 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); 1076 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); 1077 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1078 break; 1079 1080 /* 802.AS1, Ethernet, any kind of event packet */ 1081 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1082 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); 1083 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); 1084 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1085 break; 1086 1087 /* 802.AS1, Ethernet, Sync packet */ 1088 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1089 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); 1090 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); 1091 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1092 break; 1093 1094 /* 802.AS1, Ethernet, Delay_req packet */ 1095 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1096 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); 1097 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); 1098 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); 1099 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1100 break; 1101 1102 /* PTP v2/802.AS1, any layer, any kind of event packet */ 1103 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1104 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1105 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); 1106 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); 1107 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); 1108 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); 1109 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1110 break; 1111 1112 /* PTP v2/802.AS1, any layer, Sync packet */ 1113 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1114 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1115 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); 1116 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); 1117 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); 1118 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); 1119 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1120 break; 1121 1122 /* PTP v2/802.AS1, any layer, Delay_req packet */ 1123 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1124 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1125 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); 1126 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); 1127 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); 1128 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); 1129 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); 1130 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1131 break; 1132 1133 default: 1134 return -ERANGE; 1135 } 1136 1137 pdata->hw_if.config_tstamp(pdata, mac_tscr); 1138 1139 memcpy(&pdata->tstamp_config, &config, sizeof(config)); 1140 1141 return 0; 1142 } 1143 1144 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata, 1145 struct sk_buff *skb, 1146 struct xgbe_packet_data *packet) 1147 { 1148 unsigned long flags; 1149 1150 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) { 1151 spin_lock_irqsave(&pdata->tstamp_lock, flags); 1152 if (pdata->tx_tstamp_skb) { 1153 /* Another timestamp in progress, ignore this one */ 1154 XGMAC_SET_BITS(packet->attributes, 1155 TX_PACKET_ATTRIBUTES, PTP, 0); 1156 } else { 1157 pdata->tx_tstamp_skb = skb_get(skb); 1158 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1159 } 1160 spin_unlock_irqrestore(&pdata->tstamp_lock, flags); 1161 } 1162 1163 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) 1164 skb_tx_timestamp(skb); 1165 } 1166 1167 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet) 1168 { 1169 if (skb_vlan_tag_present(skb)) 1170 packet->vlan_ctag = skb_vlan_tag_get(skb); 1171 } 1172 1173 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) 1174 { 1175 int ret; 1176 1177 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1178 TSO_ENABLE)) 1179 return 0; 1180 1181 ret = skb_cow_head(skb, 0); 1182 if (ret) 1183 return ret; 1184 1185 packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1186 packet->tcp_header_len = tcp_hdrlen(skb); 1187 packet->tcp_payload_len = skb->len - packet->header_len; 1188 packet->mss = skb_shinfo(skb)->gso_size; 1189 DBGPR(" packet->header_len=%u\n", packet->header_len); 1190 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n", 1191 packet->tcp_header_len, packet->tcp_payload_len); 1192 DBGPR(" packet->mss=%u\n", packet->mss); 1193 1194 /* Update the number of packets that will ultimately be transmitted 1195 * along with the extra bytes for each extra packet 1196 */ 1197 packet->tx_packets = skb_shinfo(skb)->gso_segs; 1198 packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len; 1199 1200 return 0; 1201 } 1202 1203 static int xgbe_is_tso(struct sk_buff *skb) 1204 { 1205 if (skb->ip_summed != CHECKSUM_PARTIAL) 1206 return 0; 1207 1208 if (!skb_is_gso(skb)) 1209 return 0; 1210 1211 DBGPR(" TSO packet to be processed\n"); 1212 1213 return 1; 1214 } 1215 1216 static void xgbe_packet_info(struct xgbe_prv_data *pdata, 1217 struct xgbe_ring *ring, struct sk_buff *skb, 1218 struct xgbe_packet_data *packet) 1219 { 1220 struct skb_frag_struct *frag; 1221 unsigned int context_desc; 1222 unsigned int len; 1223 unsigned int i; 1224 1225 packet->skb = skb; 1226 1227 context_desc = 0; 1228 packet->rdesc_count = 0; 1229 1230 packet->tx_packets = 1; 1231 packet->tx_bytes = skb->len; 1232 1233 if (xgbe_is_tso(skb)) { 1234 /* TSO requires an extra descriptor if mss is different */ 1235 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { 1236 context_desc = 1; 1237 packet->rdesc_count++; 1238 } 1239 1240 /* TSO requires an extra descriptor for TSO header */ 1241 packet->rdesc_count++; 1242 1243 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1244 TSO_ENABLE, 1); 1245 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1246 CSUM_ENABLE, 1); 1247 } else if (skb->ip_summed == CHECKSUM_PARTIAL) 1248 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1249 CSUM_ENABLE, 1); 1250 1251 if (skb_vlan_tag_present(skb)) { 1252 /* VLAN requires an extra descriptor if tag is different */ 1253 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) 1254 /* We can share with the TSO context descriptor */ 1255 if (!context_desc) { 1256 context_desc = 1; 1257 packet->rdesc_count++; 1258 } 1259 1260 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1261 VLAN_CTAG, 1); 1262 } 1263 1264 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 1265 (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON)) 1266 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1267 PTP, 1); 1268 1269 for (len = skb_headlen(skb); len;) { 1270 packet->rdesc_count++; 1271 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); 1272 } 1273 1274 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1275 frag = &skb_shinfo(skb)->frags[i]; 1276 for (len = skb_frag_size(frag); len; ) { 1277 packet->rdesc_count++; 1278 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); 1279 } 1280 } 1281 } 1282 1283 static int xgbe_open(struct net_device *netdev) 1284 { 1285 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1286 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1287 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1288 struct xgbe_channel *channel = NULL; 1289 unsigned int i = 0; 1290 int ret; 1291 1292 DBGPR("-->xgbe_open\n"); 1293 1294 /* Initialize the phy */ 1295 ret = xgbe_phy_init(pdata); 1296 if (ret) 1297 return ret; 1298 1299 /* Enable the clocks */ 1300 ret = clk_prepare_enable(pdata->sysclk); 1301 if (ret) { 1302 netdev_alert(netdev, "dma clk_prepare_enable failed\n"); 1303 goto err_phy_init; 1304 } 1305 1306 ret = clk_prepare_enable(pdata->ptpclk); 1307 if (ret) { 1308 netdev_alert(netdev, "ptp clk_prepare_enable failed\n"); 1309 goto err_sysclk; 1310 } 1311 1312 /* Calculate the Rx buffer size before allocating rings */ 1313 ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu); 1314 if (ret < 0) 1315 goto err_ptpclk; 1316 pdata->rx_buf_size = ret; 1317 1318 /* Allocate the channel and ring structures */ 1319 ret = xgbe_alloc_channels(pdata); 1320 if (ret) 1321 goto err_ptpclk; 1322 1323 /* Allocate the ring descriptors and buffers */ 1324 ret = desc_if->alloc_ring_resources(pdata); 1325 if (ret) 1326 goto err_channels; 1327 1328 /* Initialize the device restart and Tx timestamp work struct */ 1329 INIT_WORK(&pdata->restart_work, xgbe_restart); 1330 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); 1331 1332 /* Request interrupts */ 1333 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, 1334 netdev->name, pdata); 1335 if (ret) { 1336 netdev_alert(netdev, "error requesting irq %d\n", 1337 pdata->dev_irq); 1338 goto err_rings; 1339 } 1340 1341 if (pdata->per_channel_irq) { 1342 channel = pdata->channel; 1343 for (i = 0; i < pdata->channel_count; i++, channel++) { 1344 snprintf(channel->dma_irq_name, 1345 sizeof(channel->dma_irq_name) - 1, 1346 "%s-TxRx-%u", netdev_name(netdev), 1347 channel->queue_index); 1348 1349 ret = devm_request_irq(pdata->dev, channel->dma_irq, 1350 xgbe_dma_isr, 0, 1351 channel->dma_irq_name, channel); 1352 if (ret) { 1353 netdev_alert(netdev, 1354 "error requesting irq %d\n", 1355 channel->dma_irq); 1356 goto err_irq; 1357 } 1358 } 1359 } 1360 1361 ret = xgbe_start(pdata); 1362 if (ret) 1363 goto err_start; 1364 1365 DBGPR("<--xgbe_open\n"); 1366 1367 return 0; 1368 1369 err_start: 1370 hw_if->exit(pdata); 1371 1372 err_irq: 1373 if (pdata->per_channel_irq) { 1374 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ 1375 for (i--, channel--; i < pdata->channel_count; i--, channel--) 1376 devm_free_irq(pdata->dev, channel->dma_irq, channel); 1377 } 1378 1379 devm_free_irq(pdata->dev, pdata->dev_irq, pdata); 1380 1381 err_rings: 1382 desc_if->free_ring_resources(pdata); 1383 1384 err_channels: 1385 xgbe_free_channels(pdata); 1386 1387 err_ptpclk: 1388 clk_disable_unprepare(pdata->ptpclk); 1389 1390 err_sysclk: 1391 clk_disable_unprepare(pdata->sysclk); 1392 1393 err_phy_init: 1394 xgbe_phy_exit(pdata); 1395 1396 return ret; 1397 } 1398 1399 static int xgbe_close(struct net_device *netdev) 1400 { 1401 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1402 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1403 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1404 struct xgbe_channel *channel; 1405 unsigned int i; 1406 1407 DBGPR("-->xgbe_close\n"); 1408 1409 /* Stop the device */ 1410 xgbe_stop(pdata); 1411 1412 /* Issue software reset to device */ 1413 hw_if->exit(pdata); 1414 1415 /* Free the ring descriptors and buffers */ 1416 desc_if->free_ring_resources(pdata); 1417 1418 /* Release the interrupts */ 1419 devm_free_irq(pdata->dev, pdata->dev_irq, pdata); 1420 if (pdata->per_channel_irq) { 1421 channel = pdata->channel; 1422 for (i = 0; i < pdata->channel_count; i++, channel++) 1423 devm_free_irq(pdata->dev, channel->dma_irq, channel); 1424 } 1425 1426 /* Free the channel and ring structures */ 1427 xgbe_free_channels(pdata); 1428 1429 /* Disable the clocks */ 1430 clk_disable_unprepare(pdata->ptpclk); 1431 clk_disable_unprepare(pdata->sysclk); 1432 1433 /* Release the phy */ 1434 xgbe_phy_exit(pdata); 1435 1436 DBGPR("<--xgbe_close\n"); 1437 1438 return 0; 1439 } 1440 1441 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) 1442 { 1443 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1444 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1445 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1446 struct xgbe_channel *channel; 1447 struct xgbe_ring *ring; 1448 struct xgbe_packet_data *packet; 1449 struct netdev_queue *txq; 1450 int ret; 1451 1452 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); 1453 1454 channel = pdata->channel + skb->queue_mapping; 1455 txq = netdev_get_tx_queue(netdev, channel->queue_index); 1456 ring = channel->tx_ring; 1457 packet = &ring->packet_data; 1458 1459 ret = NETDEV_TX_OK; 1460 1461 if (skb->len == 0) { 1462 netdev_err(netdev, "empty skb received from stack\n"); 1463 dev_kfree_skb_any(skb); 1464 goto tx_netdev_return; 1465 } 1466 1467 /* Calculate preliminary packet info */ 1468 memset(packet, 0, sizeof(*packet)); 1469 xgbe_packet_info(pdata, ring, skb, packet); 1470 1471 /* Check that there are enough descriptors available */ 1472 ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count); 1473 if (ret) 1474 goto tx_netdev_return; 1475 1476 ret = xgbe_prep_tso(skb, packet); 1477 if (ret) { 1478 netdev_err(netdev, "error processing TSO packet\n"); 1479 dev_kfree_skb_any(skb); 1480 goto tx_netdev_return; 1481 } 1482 xgbe_prep_vlan(skb, packet); 1483 1484 if (!desc_if->map_tx_skb(channel, skb)) { 1485 dev_kfree_skb_any(skb); 1486 goto tx_netdev_return; 1487 } 1488 1489 xgbe_prep_tx_tstamp(pdata, skb, packet); 1490 1491 /* Report on the actual number of bytes (to be) sent */ 1492 netdev_tx_sent_queue(txq, packet->tx_bytes); 1493 1494 /* Configure required descriptor fields for transmission */ 1495 hw_if->dev_xmit(channel); 1496 1497 #ifdef XGMAC_ENABLE_TX_PKT_DUMP 1498 xgbe_print_pkt(netdev, skb, true); 1499 #endif 1500 1501 /* Stop the queue in advance if there may not be enough descriptors */ 1502 xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS); 1503 1504 ret = NETDEV_TX_OK; 1505 1506 tx_netdev_return: 1507 return ret; 1508 } 1509 1510 static void xgbe_set_rx_mode(struct net_device *netdev) 1511 { 1512 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1513 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1514 unsigned int pr_mode, am_mode; 1515 1516 DBGPR("-->xgbe_set_rx_mode\n"); 1517 1518 pr_mode = ((netdev->flags & IFF_PROMISC) != 0); 1519 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); 1520 1521 hw_if->set_promiscuous_mode(pdata, pr_mode); 1522 hw_if->set_all_multicast_mode(pdata, am_mode); 1523 1524 hw_if->add_mac_addresses(pdata); 1525 1526 DBGPR("<--xgbe_set_rx_mode\n"); 1527 } 1528 1529 static int xgbe_set_mac_address(struct net_device *netdev, void *addr) 1530 { 1531 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1532 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1533 struct sockaddr *saddr = addr; 1534 1535 DBGPR("-->xgbe_set_mac_address\n"); 1536 1537 if (!is_valid_ether_addr(saddr->sa_data)) 1538 return -EADDRNOTAVAIL; 1539 1540 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); 1541 1542 hw_if->set_mac_address(pdata, netdev->dev_addr); 1543 1544 DBGPR("<--xgbe_set_mac_address\n"); 1545 1546 return 0; 1547 } 1548 1549 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd) 1550 { 1551 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1552 int ret; 1553 1554 switch (cmd) { 1555 case SIOCGHWTSTAMP: 1556 ret = xgbe_get_hwtstamp_settings(pdata, ifreq); 1557 break; 1558 1559 case SIOCSHWTSTAMP: 1560 ret = xgbe_set_hwtstamp_settings(pdata, ifreq); 1561 break; 1562 1563 default: 1564 ret = -EOPNOTSUPP; 1565 } 1566 1567 return ret; 1568 } 1569 1570 static int xgbe_change_mtu(struct net_device *netdev, int mtu) 1571 { 1572 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1573 int ret; 1574 1575 DBGPR("-->xgbe_change_mtu\n"); 1576 1577 ret = xgbe_calc_rx_buf_size(netdev, mtu); 1578 if (ret < 0) 1579 return ret; 1580 1581 pdata->rx_buf_size = ret; 1582 netdev->mtu = mtu; 1583 1584 xgbe_restart_dev(pdata); 1585 1586 DBGPR("<--xgbe_change_mtu\n"); 1587 1588 return 0; 1589 } 1590 1591 static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, 1592 struct rtnl_link_stats64 *s) 1593 { 1594 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1595 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; 1596 1597 DBGPR("-->%s\n", __func__); 1598 1599 pdata->hw_if.read_mmc_stats(pdata); 1600 1601 s->rx_packets = pstats->rxframecount_gb; 1602 s->rx_bytes = pstats->rxoctetcount_gb; 1603 s->rx_errors = pstats->rxframecount_gb - 1604 pstats->rxbroadcastframes_g - 1605 pstats->rxmulticastframes_g - 1606 pstats->rxunicastframes_g; 1607 s->multicast = pstats->rxmulticastframes_g; 1608 s->rx_length_errors = pstats->rxlengtherror; 1609 s->rx_crc_errors = pstats->rxcrcerror; 1610 s->rx_fifo_errors = pstats->rxfifooverflow; 1611 1612 s->tx_packets = pstats->txframecount_gb; 1613 s->tx_bytes = pstats->txoctetcount_gb; 1614 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; 1615 s->tx_dropped = netdev->stats.tx_dropped; 1616 1617 DBGPR("<--%s\n", __func__); 1618 1619 return s; 1620 } 1621 1622 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1623 u16 vid) 1624 { 1625 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1626 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1627 1628 DBGPR("-->%s\n", __func__); 1629 1630 set_bit(vid, pdata->active_vlans); 1631 hw_if->update_vlan_hash_table(pdata); 1632 1633 DBGPR("<--%s\n", __func__); 1634 1635 return 0; 1636 } 1637 1638 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1639 u16 vid) 1640 { 1641 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1642 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1643 1644 DBGPR("-->%s\n", __func__); 1645 1646 clear_bit(vid, pdata->active_vlans); 1647 hw_if->update_vlan_hash_table(pdata); 1648 1649 DBGPR("<--%s\n", __func__); 1650 1651 return 0; 1652 } 1653 1654 #ifdef CONFIG_NET_POLL_CONTROLLER 1655 static void xgbe_poll_controller(struct net_device *netdev) 1656 { 1657 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1658 struct xgbe_channel *channel; 1659 unsigned int i; 1660 1661 DBGPR("-->xgbe_poll_controller\n"); 1662 1663 if (pdata->per_channel_irq) { 1664 channel = pdata->channel; 1665 for (i = 0; i < pdata->channel_count; i++, channel++) 1666 xgbe_dma_isr(channel->dma_irq, channel); 1667 } else { 1668 disable_irq(pdata->dev_irq); 1669 xgbe_isr(pdata->dev_irq, pdata); 1670 enable_irq(pdata->dev_irq); 1671 } 1672 1673 DBGPR("<--xgbe_poll_controller\n"); 1674 } 1675 #endif /* End CONFIG_NET_POLL_CONTROLLER */ 1676 1677 static int xgbe_setup_tc(struct net_device *netdev, u8 tc) 1678 { 1679 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1680 unsigned int offset, queue; 1681 u8 i; 1682 1683 if (tc && (tc != pdata->hw_feat.tc_cnt)) 1684 return -EINVAL; 1685 1686 if (tc) { 1687 netdev_set_num_tc(netdev, tc); 1688 for (i = 0, queue = 0, offset = 0; i < tc; i++) { 1689 while ((queue < pdata->tx_q_count) && 1690 (pdata->q2tc_map[queue] == i)) 1691 queue++; 1692 1693 DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1); 1694 netdev_set_tc_queue(netdev, i, queue - offset, offset); 1695 offset = queue; 1696 } 1697 } else { 1698 netdev_reset_tc(netdev); 1699 } 1700 1701 return 0; 1702 } 1703 1704 static int xgbe_set_features(struct net_device *netdev, 1705 netdev_features_t features) 1706 { 1707 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1708 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1709 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; 1710 int ret = 0; 1711 1712 rxhash = pdata->netdev_features & NETIF_F_RXHASH; 1713 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; 1714 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; 1715 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; 1716 1717 if ((features & NETIF_F_RXHASH) && !rxhash) 1718 ret = hw_if->enable_rss(pdata); 1719 else if (!(features & NETIF_F_RXHASH) && rxhash) 1720 ret = hw_if->disable_rss(pdata); 1721 if (ret) 1722 return ret; 1723 1724 if ((features & NETIF_F_RXCSUM) && !rxcsum) 1725 hw_if->enable_rx_csum(pdata); 1726 else if (!(features & NETIF_F_RXCSUM) && rxcsum) 1727 hw_if->disable_rx_csum(pdata); 1728 1729 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) 1730 hw_if->enable_rx_vlan_stripping(pdata); 1731 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan) 1732 hw_if->disable_rx_vlan_stripping(pdata); 1733 1734 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter) 1735 hw_if->enable_rx_vlan_filtering(pdata); 1736 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) 1737 hw_if->disable_rx_vlan_filtering(pdata); 1738 1739 pdata->netdev_features = features; 1740 1741 DBGPR("<--xgbe_set_features\n"); 1742 1743 return 0; 1744 } 1745 1746 static const struct net_device_ops xgbe_netdev_ops = { 1747 .ndo_open = xgbe_open, 1748 .ndo_stop = xgbe_close, 1749 .ndo_start_xmit = xgbe_xmit, 1750 .ndo_set_rx_mode = xgbe_set_rx_mode, 1751 .ndo_set_mac_address = xgbe_set_mac_address, 1752 .ndo_validate_addr = eth_validate_addr, 1753 .ndo_do_ioctl = xgbe_ioctl, 1754 .ndo_change_mtu = xgbe_change_mtu, 1755 .ndo_get_stats64 = xgbe_get_stats64, 1756 .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid, 1757 .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid, 1758 #ifdef CONFIG_NET_POLL_CONTROLLER 1759 .ndo_poll_controller = xgbe_poll_controller, 1760 #endif 1761 .ndo_setup_tc = xgbe_setup_tc, 1762 .ndo_set_features = xgbe_set_features, 1763 }; 1764 1765 struct net_device_ops *xgbe_get_netdev_ops(void) 1766 { 1767 return (struct net_device_ops *)&xgbe_netdev_ops; 1768 } 1769 1770 static void xgbe_rx_refresh(struct xgbe_channel *channel) 1771 { 1772 struct xgbe_prv_data *pdata = channel->pdata; 1773 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1774 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1775 struct xgbe_ring *ring = channel->rx_ring; 1776 struct xgbe_ring_data *rdata; 1777 1778 while (ring->dirty != ring->cur) { 1779 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); 1780 1781 /* Reset rdata values */ 1782 desc_if->unmap_rdata(pdata, rdata); 1783 1784 if (desc_if->map_rx_buffer(pdata, ring, rdata)) 1785 break; 1786 1787 hw_if->rx_desc_reset(rdata); 1788 1789 ring->dirty++; 1790 } 1791 1792 /* Update the Rx Tail Pointer Register with address of 1793 * the last cleaned entry */ 1794 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1); 1795 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, 1796 lower_32_bits(rdata->rdesc_dma)); 1797 } 1798 1799 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, 1800 struct xgbe_ring_data *rdata, 1801 unsigned int *len) 1802 { 1803 struct net_device *netdev = pdata->netdev; 1804 struct sk_buff *skb; 1805 u8 *packet; 1806 unsigned int copy_len; 1807 1808 skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len); 1809 if (!skb) 1810 return NULL; 1811 1812 packet = page_address(rdata->rx.hdr.pa.pages) + 1813 rdata->rx.hdr.pa.pages_offset; 1814 copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len; 1815 copy_len = min(rdata->rx.hdr.dma_len, copy_len); 1816 skb_copy_to_linear_data(skb, packet, copy_len); 1817 skb_put(skb, copy_len); 1818 1819 *len -= copy_len; 1820 1821 return skb; 1822 } 1823 1824 static int xgbe_tx_poll(struct xgbe_channel *channel) 1825 { 1826 struct xgbe_prv_data *pdata = channel->pdata; 1827 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1828 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1829 struct xgbe_ring *ring = channel->tx_ring; 1830 struct xgbe_ring_data *rdata; 1831 struct xgbe_ring_desc *rdesc; 1832 struct net_device *netdev = pdata->netdev; 1833 struct netdev_queue *txq; 1834 int processed = 0; 1835 unsigned int tx_packets = 0, tx_bytes = 0; 1836 1837 DBGPR("-->xgbe_tx_poll\n"); 1838 1839 /* Nothing to do if there isn't a Tx ring for this channel */ 1840 if (!ring) 1841 return 0; 1842 1843 txq = netdev_get_tx_queue(netdev, channel->queue_index); 1844 1845 while ((processed < XGBE_TX_DESC_MAX_PROC) && 1846 (ring->dirty != ring->cur)) { 1847 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); 1848 rdesc = rdata->rdesc; 1849 1850 if (!hw_if->tx_complete(rdesc)) 1851 break; 1852 1853 /* Make sure descriptor fields are read after reading the OWN 1854 * bit */ 1855 rmb(); 1856 1857 #ifdef XGMAC_ENABLE_TX_DESC_DUMP 1858 xgbe_dump_tx_desc(ring, ring->dirty, 1, 0); 1859 #endif 1860 1861 if (hw_if->is_last_desc(rdesc)) { 1862 tx_packets += rdata->tx.packets; 1863 tx_bytes += rdata->tx.bytes; 1864 } 1865 1866 /* Free the SKB and reset the descriptor for re-use */ 1867 desc_if->unmap_rdata(pdata, rdata); 1868 hw_if->tx_desc_reset(rdata); 1869 1870 processed++; 1871 ring->dirty++; 1872 } 1873 1874 if (!processed) 1875 return 0; 1876 1877 netdev_tx_completed_queue(txq, tx_packets, tx_bytes); 1878 1879 if ((ring->tx.queue_stopped == 1) && 1880 (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) { 1881 ring->tx.queue_stopped = 0; 1882 netif_tx_wake_queue(txq); 1883 } 1884 1885 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed); 1886 1887 return processed; 1888 } 1889 1890 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) 1891 { 1892 struct xgbe_prv_data *pdata = channel->pdata; 1893 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1894 struct xgbe_ring *ring = channel->rx_ring; 1895 struct xgbe_ring_data *rdata; 1896 struct xgbe_packet_data *packet; 1897 struct net_device *netdev = pdata->netdev; 1898 struct napi_struct *napi; 1899 struct sk_buff *skb; 1900 struct skb_shared_hwtstamps *hwtstamps; 1901 unsigned int incomplete, error, context_next, context; 1902 unsigned int len, put_len, max_len; 1903 unsigned int received = 0; 1904 int packet_count = 0; 1905 1906 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); 1907 1908 /* Nothing to do if there isn't a Rx ring for this channel */ 1909 if (!ring) 1910 return 0; 1911 1912 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; 1913 1914 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1915 packet = &ring->packet_data; 1916 while (packet_count < budget) { 1917 DBGPR(" cur = %d\n", ring->cur); 1918 1919 /* First time in loop see if we need to restore state */ 1920 if (!received && rdata->state_saved) { 1921 incomplete = rdata->state.incomplete; 1922 context_next = rdata->state.context_next; 1923 skb = rdata->state.skb; 1924 error = rdata->state.error; 1925 len = rdata->state.len; 1926 } else { 1927 memset(packet, 0, sizeof(*packet)); 1928 incomplete = 0; 1929 context_next = 0; 1930 skb = NULL; 1931 error = 0; 1932 len = 0; 1933 } 1934 1935 read_again: 1936 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1937 1938 if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3)) 1939 xgbe_rx_refresh(channel); 1940 1941 if (hw_if->dev_read(channel)) 1942 break; 1943 1944 received++; 1945 ring->cur++; 1946 1947 incomplete = XGMAC_GET_BITS(packet->attributes, 1948 RX_PACKET_ATTRIBUTES, 1949 INCOMPLETE); 1950 context_next = XGMAC_GET_BITS(packet->attributes, 1951 RX_PACKET_ATTRIBUTES, 1952 CONTEXT_NEXT); 1953 context = XGMAC_GET_BITS(packet->attributes, 1954 RX_PACKET_ATTRIBUTES, 1955 CONTEXT); 1956 1957 /* Earlier error, just drain the remaining data */ 1958 if ((incomplete || context_next) && error) 1959 goto read_again; 1960 1961 if (error || packet->errors) { 1962 if (packet->errors) 1963 DBGPR("Error in received packet\n"); 1964 dev_kfree_skb(skb); 1965 goto next_packet; 1966 } 1967 1968 if (!context) { 1969 put_len = rdata->rx.len - len; 1970 len += put_len; 1971 1972 if (!skb) { 1973 dma_sync_single_for_cpu(pdata->dev, 1974 rdata->rx.hdr.dma, 1975 rdata->rx.hdr.dma_len, 1976 DMA_FROM_DEVICE); 1977 1978 skb = xgbe_create_skb(pdata, rdata, &put_len); 1979 if (!skb) { 1980 error = 1; 1981 goto skip_data; 1982 } 1983 } 1984 1985 if (put_len) { 1986 dma_sync_single_for_cpu(pdata->dev, 1987 rdata->rx.buf.dma, 1988 rdata->rx.buf.dma_len, 1989 DMA_FROM_DEVICE); 1990 1991 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 1992 rdata->rx.buf.pa.pages, 1993 rdata->rx.buf.pa.pages_offset, 1994 put_len, rdata->rx.buf.dma_len); 1995 rdata->rx.buf.pa.pages = NULL; 1996 } 1997 } 1998 1999 skip_data: 2000 if (incomplete || context_next) 2001 goto read_again; 2002 2003 if (!skb) 2004 goto next_packet; 2005 2006 /* Be sure we don't exceed the configured MTU */ 2007 max_len = netdev->mtu + ETH_HLEN; 2008 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 2009 (skb->protocol == htons(ETH_P_8021Q))) 2010 max_len += VLAN_HLEN; 2011 2012 if (skb->len > max_len) { 2013 DBGPR("packet length exceeds configured MTU\n"); 2014 dev_kfree_skb(skb); 2015 goto next_packet; 2016 } 2017 2018 #ifdef XGMAC_ENABLE_RX_PKT_DUMP 2019 xgbe_print_pkt(netdev, skb, false); 2020 #endif 2021 2022 skb_checksum_none_assert(skb); 2023 if (XGMAC_GET_BITS(packet->attributes, 2024 RX_PACKET_ATTRIBUTES, CSUM_DONE)) 2025 skb->ip_summed = CHECKSUM_UNNECESSARY; 2026 2027 if (XGMAC_GET_BITS(packet->attributes, 2028 RX_PACKET_ATTRIBUTES, VLAN_CTAG)) 2029 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 2030 packet->vlan_ctag); 2031 2032 if (XGMAC_GET_BITS(packet->attributes, 2033 RX_PACKET_ATTRIBUTES, RX_TSTAMP)) { 2034 u64 nsec; 2035 2036 nsec = timecounter_cyc2time(&pdata->tstamp_tc, 2037 packet->rx_tstamp); 2038 hwtstamps = skb_hwtstamps(skb); 2039 hwtstamps->hwtstamp = ns_to_ktime(nsec); 2040 } 2041 2042 if (XGMAC_GET_BITS(packet->attributes, 2043 RX_PACKET_ATTRIBUTES, RSS_HASH)) 2044 skb_set_hash(skb, packet->rss_hash, 2045 packet->rss_hash_type); 2046 2047 skb->dev = netdev; 2048 skb->protocol = eth_type_trans(skb, netdev); 2049 skb_record_rx_queue(skb, channel->queue_index); 2050 skb_mark_napi_id(skb, napi); 2051 2052 netdev->last_rx = jiffies; 2053 napi_gro_receive(napi, skb); 2054 2055 next_packet: 2056 packet_count++; 2057 } 2058 2059 /* Check if we need to save state before leaving */ 2060 if (received && (incomplete || context_next)) { 2061 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 2062 rdata->state_saved = 1; 2063 rdata->state.incomplete = incomplete; 2064 rdata->state.context_next = context_next; 2065 rdata->state.skb = skb; 2066 rdata->state.len = len; 2067 rdata->state.error = error; 2068 } 2069 2070 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count); 2071 2072 return packet_count; 2073 } 2074 2075 static int xgbe_one_poll(struct napi_struct *napi, int budget) 2076 { 2077 struct xgbe_channel *channel = container_of(napi, struct xgbe_channel, 2078 napi); 2079 int processed = 0; 2080 2081 DBGPR("-->xgbe_one_poll: budget=%d\n", budget); 2082 2083 /* Cleanup Tx ring first */ 2084 xgbe_tx_poll(channel); 2085 2086 /* Process Rx ring next */ 2087 processed = xgbe_rx_poll(channel, budget); 2088 2089 /* If we processed everything, we are done */ 2090 if (processed < budget) { 2091 /* Turn off polling */ 2092 napi_complete(napi); 2093 2094 /* Enable Tx and Rx interrupts */ 2095 enable_irq(channel->dma_irq); 2096 } 2097 2098 DBGPR("<--xgbe_one_poll: received = %d\n", processed); 2099 2100 return processed; 2101 } 2102 2103 static int xgbe_all_poll(struct napi_struct *napi, int budget) 2104 { 2105 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data, 2106 napi); 2107 struct xgbe_channel *channel; 2108 int ring_budget; 2109 int processed, last_processed; 2110 unsigned int i; 2111 2112 DBGPR("-->xgbe_all_poll: budget=%d\n", budget); 2113 2114 processed = 0; 2115 ring_budget = budget / pdata->rx_ring_count; 2116 do { 2117 last_processed = processed; 2118 2119 channel = pdata->channel; 2120 for (i = 0; i < pdata->channel_count; i++, channel++) { 2121 /* Cleanup Tx ring first */ 2122 xgbe_tx_poll(channel); 2123 2124 /* Process Rx ring next */ 2125 if (ring_budget > (budget - processed)) 2126 ring_budget = budget - processed; 2127 processed += xgbe_rx_poll(channel, ring_budget); 2128 } 2129 } while ((processed < budget) && (processed != last_processed)); 2130 2131 /* If we processed everything, we are done */ 2132 if (processed < budget) { 2133 /* Turn off polling */ 2134 napi_complete(napi); 2135 2136 /* Enable Tx and Rx interrupts */ 2137 xgbe_enable_rx_tx_ints(pdata); 2138 } 2139 2140 DBGPR("<--xgbe_all_poll: received = %d\n", processed); 2141 2142 return processed; 2143 } 2144 2145 void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, 2146 unsigned int count, unsigned int flag) 2147 { 2148 struct xgbe_ring_data *rdata; 2149 struct xgbe_ring_desc *rdesc; 2150 2151 while (count--) { 2152 rdata = XGBE_GET_DESC_DATA(ring, idx); 2153 rdesc = rdata->rdesc; 2154 pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, 2155 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", 2156 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), 2157 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); 2158 idx++; 2159 } 2160 } 2161 2162 void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc, 2163 unsigned int idx) 2164 { 2165 pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, 2166 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), 2167 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); 2168 } 2169 2170 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) 2171 { 2172 struct ethhdr *eth = (struct ethhdr *)skb->data; 2173 unsigned char *buf = skb->data; 2174 unsigned char buffer[128]; 2175 unsigned int i, j; 2176 2177 netdev_alert(netdev, "\n************** SKB dump ****************\n"); 2178 2179 netdev_alert(netdev, "%s packet of %d bytes\n", 2180 (tx_rx ? "TX" : "RX"), skb->len); 2181 2182 netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest); 2183 netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source); 2184 netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto)); 2185 2186 for (i = 0, j = 0; i < skb->len;) { 2187 j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx", 2188 buf[i++]); 2189 2190 if ((i % 32) == 0) { 2191 netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer); 2192 j = 0; 2193 } else if ((i % 16) == 0) { 2194 buffer[j++] = ' '; 2195 buffer[j++] = ' '; 2196 } else if ((i % 4) == 0) { 2197 buffer[j++] = ' '; 2198 } 2199 } 2200 if (i % 32) 2201 netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer); 2202 2203 netdev_alert(netdev, "\n************** SKB dump ****************\n"); 2204 } 2205