1 /* 2 * AMD 10Gb Ethernet driver 3 * 4 * This file is available to you under your choice of the following two 5 * licenses: 6 * 7 * License 1: GPLv2 8 * 9 * Copyright (c) 2014 Advanced Micro Devices, Inc. 10 * 11 * This file is free software; you may copy, redistribute and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation, either version 2 of the License, or (at 14 * your option) any later version. 15 * 16 * This file is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program. If not, see <http://www.gnu.org/licenses/>. 23 * 24 * This file incorporates work covered by the following copyright and 25 * permission notice: 26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 28 * Inc. unless otherwise expressly agreed to in writing between Synopsys 29 * and you. 30 * 31 * The Software IS NOT an item of Licensed Software or Licensed Product 32 * under any End User Software License Agreement or Agreement for Licensed 33 * Product with Synopsys or any supplement thereto. Permission is hereby 34 * granted, free of charge, to any person obtaining a copy of this software 35 * annotated with this license and the Software, to deal in the Software 36 * without restriction, including without limitation the rights to use, 37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 38 * of the Software, and to permit persons to whom the Software is furnished 39 * to do so, subject to the following conditions: 40 * 41 * The above copyright notice and this permission notice shall be included 42 * in all copies or substantial portions of the Software. 43 * 44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 54 * THE POSSIBILITY OF SUCH DAMAGE. 55 * 56 * 57 * License 2: Modified BSD 58 * 59 * Copyright (c) 2014 Advanced Micro Devices, Inc. 60 * All rights reserved. 61 * 62 * Redistribution and use in source and binary forms, with or without 63 * modification, are permitted provided that the following conditions are met: 64 * * Redistributions of source code must retain the above copyright 65 * notice, this list of conditions and the following disclaimer. 66 * * Redistributions in binary form must reproduce the above copyright 67 * notice, this list of conditions and the following disclaimer in the 68 * documentation and/or other materials provided with the distribution. 69 * * Neither the name of Advanced Micro Devices, Inc. nor the 70 * names of its contributors may be used to endorse or promote products 71 * derived from this software without specific prior written permission. 72 * 73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY 77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 83 * 84 * This file incorporates work covered by the following copyright and 85 * permission notice: 86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 88 * Inc. unless otherwise expressly agreed to in writing between Synopsys 89 * and you. 90 * 91 * The Software IS NOT an item of Licensed Software or Licensed Product 92 * under any End User Software License Agreement or Agreement for Licensed 93 * Product with Synopsys or any supplement thereto. Permission is hereby 94 * granted, free of charge, to any person obtaining a copy of this software 95 * annotated with this license and the Software, to deal in the Software 96 * without restriction, including without limitation the rights to use, 97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 98 * of the Software, and to permit persons to whom the Software is furnished 99 * to do so, subject to the following conditions: 100 * 101 * The above copyright notice and this permission notice shall be included 102 * in all copies or substantial portions of the Software. 103 * 104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 114 * THE POSSIBILITY OF SUCH DAMAGE. 115 */ 116 117 #include <linux/platform_device.h> 118 #include <linux/spinlock.h> 119 #include <linux/tcp.h> 120 #include <linux/if_vlan.h> 121 #include <net/busy_poll.h> 122 #include <linux/clk.h> 123 #include <linux/if_ether.h> 124 #include <linux/net_tstamp.h> 125 #include <linux/phy.h> 126 127 #include "xgbe.h" 128 #include "xgbe-common.h" 129 130 static int xgbe_one_poll(struct napi_struct *, int); 131 static int xgbe_all_poll(struct napi_struct *, int); 132 static void xgbe_set_rx_mode(struct net_device *); 133 134 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) 135 { 136 struct xgbe_channel *channel_mem, *channel; 137 struct xgbe_ring *tx_ring, *rx_ring; 138 unsigned int count, i; 139 int ret = -ENOMEM; 140 141 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); 142 143 channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL); 144 if (!channel_mem) 145 goto err_channel; 146 147 tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring), 148 GFP_KERNEL); 149 if (!tx_ring) 150 goto err_tx_ring; 151 152 rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring), 153 GFP_KERNEL); 154 if (!rx_ring) 155 goto err_rx_ring; 156 157 for (i = 0, channel = channel_mem; i < count; i++, channel++) { 158 snprintf(channel->name, sizeof(channel->name), "channel-%d", i); 159 channel->pdata = pdata; 160 channel->queue_index = i; 161 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + 162 (DMA_CH_INC * i); 163 164 if (pdata->per_channel_irq) { 165 /* Get the DMA interrupt (offset 1) */ 166 ret = platform_get_irq(pdata->pdev, i + 1); 167 if (ret < 0) { 168 netdev_err(pdata->netdev, 169 "platform_get_irq %u failed\n", 170 i + 1); 171 goto err_irq; 172 } 173 174 channel->dma_irq = ret; 175 } 176 177 if (i < pdata->tx_ring_count) { 178 spin_lock_init(&tx_ring->lock); 179 channel->tx_ring = tx_ring++; 180 } 181 182 if (i < pdata->rx_ring_count) { 183 spin_lock_init(&rx_ring->lock); 184 channel->rx_ring = rx_ring++; 185 } 186 187 DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n", 188 channel->name, channel->queue_index, channel->dma_regs, 189 channel->dma_irq, channel->tx_ring, channel->rx_ring); 190 } 191 192 pdata->channel = channel_mem; 193 pdata->channel_count = count; 194 195 return 0; 196 197 err_irq: 198 kfree(rx_ring); 199 200 err_rx_ring: 201 kfree(tx_ring); 202 203 err_tx_ring: 204 kfree(channel_mem); 205 206 err_channel: 207 return ret; 208 } 209 210 static void xgbe_free_channels(struct xgbe_prv_data *pdata) 211 { 212 if (!pdata->channel) 213 return; 214 215 kfree(pdata->channel->rx_ring); 216 kfree(pdata->channel->tx_ring); 217 kfree(pdata->channel); 218 219 pdata->channel = NULL; 220 pdata->channel_count = 0; 221 } 222 223 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) 224 { 225 return (ring->rdesc_count - (ring->cur - ring->dirty)); 226 } 227 228 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring) 229 { 230 return (ring->cur - ring->dirty); 231 } 232 233 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, 234 struct xgbe_ring *ring, unsigned int count) 235 { 236 struct xgbe_prv_data *pdata = channel->pdata; 237 238 if (count > xgbe_tx_avail_desc(ring)) { 239 DBGPR(" Tx queue stopped, not enough descriptors available\n"); 240 netif_stop_subqueue(pdata->netdev, channel->queue_index); 241 ring->tx.queue_stopped = 1; 242 243 /* If we haven't notified the hardware because of xmit_more 244 * support, tell it now 245 */ 246 if (ring->tx.xmit_more) 247 pdata->hw_if.tx_start_xmit(channel, ring); 248 249 return NETDEV_TX_BUSY; 250 } 251 252 return 0; 253 } 254 255 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) 256 { 257 unsigned int rx_buf_size; 258 259 if (mtu > XGMAC_JUMBO_PACKET_MTU) { 260 netdev_alert(netdev, "MTU exceeds maximum supported value\n"); 261 return -EINVAL; 262 } 263 264 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 265 rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE); 266 267 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & 268 ~(XGBE_RX_BUF_ALIGN - 1); 269 270 return rx_buf_size; 271 } 272 273 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata) 274 { 275 struct xgbe_hw_if *hw_if = &pdata->hw_if; 276 struct xgbe_channel *channel; 277 enum xgbe_int int_id; 278 unsigned int i; 279 280 channel = pdata->channel; 281 for (i = 0; i < pdata->channel_count; i++, channel++) { 282 if (channel->tx_ring && channel->rx_ring) 283 int_id = XGMAC_INT_DMA_CH_SR_TI_RI; 284 else if (channel->tx_ring) 285 int_id = XGMAC_INT_DMA_CH_SR_TI; 286 else if (channel->rx_ring) 287 int_id = XGMAC_INT_DMA_CH_SR_RI; 288 else 289 continue; 290 291 hw_if->enable_int(channel, int_id); 292 } 293 } 294 295 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata) 296 { 297 struct xgbe_hw_if *hw_if = &pdata->hw_if; 298 struct xgbe_channel *channel; 299 enum xgbe_int int_id; 300 unsigned int i; 301 302 channel = pdata->channel; 303 for (i = 0; i < pdata->channel_count; i++, channel++) { 304 if (channel->tx_ring && channel->rx_ring) 305 int_id = XGMAC_INT_DMA_CH_SR_TI_RI; 306 else if (channel->tx_ring) 307 int_id = XGMAC_INT_DMA_CH_SR_TI; 308 else if (channel->rx_ring) 309 int_id = XGMAC_INT_DMA_CH_SR_RI; 310 else 311 continue; 312 313 hw_if->disable_int(channel, int_id); 314 } 315 } 316 317 static irqreturn_t xgbe_isr(int irq, void *data) 318 { 319 struct xgbe_prv_data *pdata = data; 320 struct xgbe_hw_if *hw_if = &pdata->hw_if; 321 struct xgbe_channel *channel; 322 unsigned int dma_isr, dma_ch_isr; 323 unsigned int mac_isr, mac_tssr; 324 unsigned int i; 325 326 /* The DMA interrupt status register also reports MAC and MTL 327 * interrupts. So for polling mode, we just need to check for 328 * this register to be non-zero 329 */ 330 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR); 331 if (!dma_isr) 332 goto isr_done; 333 334 DBGPR(" DMA_ISR = %08x\n", dma_isr); 335 336 for (i = 0; i < pdata->channel_count; i++) { 337 if (!(dma_isr & (1 << i))) 338 continue; 339 340 channel = pdata->channel + i; 341 342 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); 343 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr); 344 345 /* The TI or RI interrupt bits may still be set even if using 346 * per channel DMA interrupts. Check to be sure those are not 347 * enabled before using the private data napi structure. 348 */ 349 if (!pdata->per_channel_irq && 350 (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || 351 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) { 352 if (napi_schedule_prep(&pdata->napi)) { 353 /* Disable Tx and Rx interrupts */ 354 xgbe_disable_rx_tx_ints(pdata); 355 356 /* Turn on polling */ 357 __napi_schedule(&pdata->napi); 358 } 359 } 360 361 /* Restart the device on a Fatal Bus Error */ 362 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) 363 schedule_work(&pdata->restart_work); 364 365 /* Clear all interrupt signals */ 366 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); 367 } 368 369 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) { 370 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR); 371 372 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS)) 373 hw_if->tx_mmc_int(pdata); 374 375 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS)) 376 hw_if->rx_mmc_int(pdata); 377 378 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) { 379 mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR); 380 381 if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) { 382 /* Read Tx Timestamp to clear interrupt */ 383 pdata->tx_tstamp = 384 hw_if->get_tx_tstamp(pdata); 385 schedule_work(&pdata->tx_tstamp_work); 386 } 387 } 388 } 389 390 DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR)); 391 392 isr_done: 393 return IRQ_HANDLED; 394 } 395 396 static irqreturn_t xgbe_dma_isr(int irq, void *data) 397 { 398 struct xgbe_channel *channel = data; 399 400 /* Per channel DMA interrupts are enabled, so we use the per 401 * channel napi structure and not the private data napi structure 402 */ 403 if (napi_schedule_prep(&channel->napi)) { 404 /* Disable Tx and Rx interrupts */ 405 disable_irq_nosync(channel->dma_irq); 406 407 /* Turn on polling */ 408 __napi_schedule(&channel->napi); 409 } 410 411 return IRQ_HANDLED; 412 } 413 414 static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) 415 { 416 struct xgbe_channel *channel = container_of(timer, 417 struct xgbe_channel, 418 tx_timer); 419 struct xgbe_prv_data *pdata = channel->pdata; 420 struct napi_struct *napi; 421 422 DBGPR("-->xgbe_tx_timer\n"); 423 424 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; 425 426 if (napi_schedule_prep(napi)) { 427 /* Disable Tx and Rx interrupts */ 428 if (pdata->per_channel_irq) 429 disable_irq(channel->dma_irq); 430 else 431 xgbe_disable_rx_tx_ints(pdata); 432 433 /* Turn on polling */ 434 __napi_schedule(napi); 435 } 436 437 channel->tx_timer_active = 0; 438 439 DBGPR("<--xgbe_tx_timer\n"); 440 441 return HRTIMER_NORESTART; 442 } 443 444 static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata) 445 { 446 struct xgbe_channel *channel; 447 unsigned int i; 448 449 DBGPR("-->xgbe_init_tx_timers\n"); 450 451 channel = pdata->channel; 452 for (i = 0; i < pdata->channel_count; i++, channel++) { 453 if (!channel->tx_ring) 454 break; 455 456 DBGPR(" %s adding tx timer\n", channel->name); 457 hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC, 458 HRTIMER_MODE_REL); 459 channel->tx_timer.function = xgbe_tx_timer; 460 } 461 462 DBGPR("<--xgbe_init_tx_timers\n"); 463 } 464 465 static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata) 466 { 467 struct xgbe_channel *channel; 468 unsigned int i; 469 470 DBGPR("-->xgbe_stop_tx_timers\n"); 471 472 channel = pdata->channel; 473 for (i = 0; i < pdata->channel_count; i++, channel++) { 474 if (!channel->tx_ring) 475 break; 476 477 DBGPR(" %s deleting tx timer\n", channel->name); 478 channel->tx_timer_active = 0; 479 hrtimer_cancel(&channel->tx_timer); 480 } 481 482 DBGPR("<--xgbe_stop_tx_timers\n"); 483 } 484 485 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) 486 { 487 unsigned int mac_hfr0, mac_hfr1, mac_hfr2; 488 struct xgbe_hw_features *hw_feat = &pdata->hw_feat; 489 490 DBGPR("-->xgbe_get_all_hw_features\n"); 491 492 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R); 493 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R); 494 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R); 495 496 memset(hw_feat, 0, sizeof(*hw_feat)); 497 498 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR); 499 500 /* Hardware feature register 0 */ 501 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 502 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 503 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 504 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 505 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 506 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 507 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 508 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 509 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 510 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 511 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 512 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 513 ADDMACADRSEL); 514 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 515 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 516 517 /* Hardware feature register 1 */ 518 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 519 RXFIFOSIZE); 520 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 521 TXFIFOSIZE); 522 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 523 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 524 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 525 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 526 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 527 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 528 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 529 HASHTBLSZ); 530 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 531 L3L4FNUM); 532 533 /* Hardware feature register 2 */ 534 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 535 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 536 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 537 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 538 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 539 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM); 540 541 /* Translate the Hash Table size into actual number */ 542 switch (hw_feat->hash_table_size) { 543 case 0: 544 break; 545 case 1: 546 hw_feat->hash_table_size = 64; 547 break; 548 case 2: 549 hw_feat->hash_table_size = 128; 550 break; 551 case 3: 552 hw_feat->hash_table_size = 256; 553 break; 554 } 555 556 /* The Queue, Channel and TC counts are zero based so increment them 557 * to get the actual number 558 */ 559 hw_feat->rx_q_cnt++; 560 hw_feat->tx_q_cnt++; 561 hw_feat->rx_ch_cnt++; 562 hw_feat->tx_ch_cnt++; 563 hw_feat->tc_cnt++; 564 565 DBGPR("<--xgbe_get_all_hw_features\n"); 566 } 567 568 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) 569 { 570 struct xgbe_channel *channel; 571 unsigned int i; 572 573 if (pdata->per_channel_irq) { 574 channel = pdata->channel; 575 for (i = 0; i < pdata->channel_count; i++, channel++) { 576 if (add) 577 netif_napi_add(pdata->netdev, &channel->napi, 578 xgbe_one_poll, NAPI_POLL_WEIGHT); 579 580 napi_enable(&channel->napi); 581 } 582 } else { 583 if (add) 584 netif_napi_add(pdata->netdev, &pdata->napi, 585 xgbe_all_poll, NAPI_POLL_WEIGHT); 586 587 napi_enable(&pdata->napi); 588 } 589 } 590 591 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) 592 { 593 struct xgbe_channel *channel; 594 unsigned int i; 595 596 if (pdata->per_channel_irq) { 597 channel = pdata->channel; 598 for (i = 0; i < pdata->channel_count; i++, channel++) { 599 napi_disable(&channel->napi); 600 601 if (del) 602 netif_napi_del(&channel->napi); 603 } 604 } else { 605 napi_disable(&pdata->napi); 606 607 if (del) 608 netif_napi_del(&pdata->napi); 609 } 610 } 611 612 static int xgbe_request_irqs(struct xgbe_prv_data *pdata) 613 { 614 struct xgbe_channel *channel; 615 struct net_device *netdev = pdata->netdev; 616 unsigned int i; 617 int ret; 618 619 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, 620 netdev->name, pdata); 621 if (ret) { 622 netdev_alert(netdev, "error requesting irq %d\n", 623 pdata->dev_irq); 624 return ret; 625 } 626 627 if (!pdata->per_channel_irq) 628 return 0; 629 630 channel = pdata->channel; 631 for (i = 0; i < pdata->channel_count; i++, channel++) { 632 snprintf(channel->dma_irq_name, 633 sizeof(channel->dma_irq_name) - 1, 634 "%s-TxRx-%u", netdev_name(netdev), 635 channel->queue_index); 636 637 ret = devm_request_irq(pdata->dev, channel->dma_irq, 638 xgbe_dma_isr, 0, 639 channel->dma_irq_name, channel); 640 if (ret) { 641 netdev_alert(netdev, "error requesting irq %d\n", 642 channel->dma_irq); 643 goto err_irq; 644 } 645 } 646 647 return 0; 648 649 err_irq: 650 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ 651 for (i--, channel--; i < pdata->channel_count; i--, channel--) 652 devm_free_irq(pdata->dev, channel->dma_irq, channel); 653 654 devm_free_irq(pdata->dev, pdata->dev_irq, pdata); 655 656 return ret; 657 } 658 659 static void xgbe_free_irqs(struct xgbe_prv_data *pdata) 660 { 661 struct xgbe_channel *channel; 662 unsigned int i; 663 664 devm_free_irq(pdata->dev, pdata->dev_irq, pdata); 665 666 if (!pdata->per_channel_irq) 667 return; 668 669 channel = pdata->channel; 670 for (i = 0; i < pdata->channel_count; i++, channel++) 671 devm_free_irq(pdata->dev, channel->dma_irq, channel); 672 } 673 674 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) 675 { 676 struct xgbe_hw_if *hw_if = &pdata->hw_if; 677 678 DBGPR("-->xgbe_init_tx_coalesce\n"); 679 680 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS; 681 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES; 682 683 hw_if->config_tx_coalesce(pdata); 684 685 DBGPR("<--xgbe_init_tx_coalesce\n"); 686 } 687 688 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) 689 { 690 struct xgbe_hw_if *hw_if = &pdata->hw_if; 691 692 DBGPR("-->xgbe_init_rx_coalesce\n"); 693 694 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS); 695 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES; 696 697 hw_if->config_rx_coalesce(pdata); 698 699 DBGPR("<--xgbe_init_rx_coalesce\n"); 700 } 701 702 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata) 703 { 704 struct xgbe_desc_if *desc_if = &pdata->desc_if; 705 struct xgbe_channel *channel; 706 struct xgbe_ring *ring; 707 struct xgbe_ring_data *rdata; 708 unsigned int i, j; 709 710 DBGPR("-->xgbe_free_tx_data\n"); 711 712 channel = pdata->channel; 713 for (i = 0; i < pdata->channel_count; i++, channel++) { 714 ring = channel->tx_ring; 715 if (!ring) 716 break; 717 718 for (j = 0; j < ring->rdesc_count; j++) { 719 rdata = XGBE_GET_DESC_DATA(ring, j); 720 desc_if->unmap_rdata(pdata, rdata); 721 } 722 } 723 724 DBGPR("<--xgbe_free_tx_data\n"); 725 } 726 727 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) 728 { 729 struct xgbe_desc_if *desc_if = &pdata->desc_if; 730 struct xgbe_channel *channel; 731 struct xgbe_ring *ring; 732 struct xgbe_ring_data *rdata; 733 unsigned int i, j; 734 735 DBGPR("-->xgbe_free_rx_data\n"); 736 737 channel = pdata->channel; 738 for (i = 0; i < pdata->channel_count; i++, channel++) { 739 ring = channel->rx_ring; 740 if (!ring) 741 break; 742 743 for (j = 0; j < ring->rdesc_count; j++) { 744 rdata = XGBE_GET_DESC_DATA(ring, j); 745 desc_if->unmap_rdata(pdata, rdata); 746 } 747 } 748 749 DBGPR("<--xgbe_free_rx_data\n"); 750 } 751 752 static void xgbe_adjust_link(struct net_device *netdev) 753 { 754 struct xgbe_prv_data *pdata = netdev_priv(netdev); 755 struct xgbe_hw_if *hw_if = &pdata->hw_if; 756 struct phy_device *phydev = pdata->phydev; 757 int new_state = 0; 758 759 if (!phydev) 760 return; 761 762 if (phydev->link) { 763 /* Flow control support */ 764 if (pdata->pause_autoneg) { 765 if (phydev->pause || phydev->asym_pause) { 766 pdata->tx_pause = 1; 767 pdata->rx_pause = 1; 768 } else { 769 pdata->tx_pause = 0; 770 pdata->rx_pause = 0; 771 } 772 } 773 774 if (pdata->tx_pause != pdata->phy_tx_pause) { 775 hw_if->config_tx_flow_control(pdata); 776 pdata->phy_tx_pause = pdata->tx_pause; 777 } 778 779 if (pdata->rx_pause != pdata->phy_rx_pause) { 780 hw_if->config_rx_flow_control(pdata); 781 pdata->phy_rx_pause = pdata->rx_pause; 782 } 783 784 /* Speed support */ 785 if (phydev->speed != pdata->phy_speed) { 786 new_state = 1; 787 788 switch (phydev->speed) { 789 case SPEED_10000: 790 hw_if->set_xgmii_speed(pdata); 791 break; 792 793 case SPEED_2500: 794 hw_if->set_gmii_2500_speed(pdata); 795 break; 796 797 case SPEED_1000: 798 hw_if->set_gmii_speed(pdata); 799 break; 800 } 801 pdata->phy_speed = phydev->speed; 802 } 803 804 if (phydev->link != pdata->phy_link) { 805 new_state = 1; 806 pdata->phy_link = 1; 807 } 808 } else if (pdata->phy_link) { 809 new_state = 1; 810 pdata->phy_link = 0; 811 pdata->phy_speed = SPEED_UNKNOWN; 812 } 813 814 if (new_state) 815 phy_print_status(phydev); 816 } 817 818 static int xgbe_phy_init(struct xgbe_prv_data *pdata) 819 { 820 struct net_device *netdev = pdata->netdev; 821 struct phy_device *phydev = pdata->phydev; 822 int ret; 823 824 pdata->phy_link = -1; 825 pdata->phy_speed = SPEED_UNKNOWN; 826 pdata->phy_tx_pause = pdata->tx_pause; 827 pdata->phy_rx_pause = pdata->rx_pause; 828 829 ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link, 830 pdata->phy_mode); 831 if (ret) { 832 netdev_err(netdev, "phy_connect_direct failed\n"); 833 return ret; 834 } 835 836 if (!phydev->drv || (phydev->drv->phy_id == 0)) { 837 netdev_err(netdev, "phy_id not valid\n"); 838 ret = -ENODEV; 839 goto err_phy_connect; 840 } 841 DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n", 842 dev_name(&phydev->dev), phydev->link); 843 844 return 0; 845 846 err_phy_connect: 847 phy_disconnect(phydev); 848 849 return ret; 850 } 851 852 static void xgbe_phy_exit(struct xgbe_prv_data *pdata) 853 { 854 if (!pdata->phydev) 855 return; 856 857 phy_disconnect(pdata->phydev); 858 } 859 860 int xgbe_powerdown(struct net_device *netdev, unsigned int caller) 861 { 862 struct xgbe_prv_data *pdata = netdev_priv(netdev); 863 struct xgbe_hw_if *hw_if = &pdata->hw_if; 864 unsigned long flags; 865 866 DBGPR("-->xgbe_powerdown\n"); 867 868 if (!netif_running(netdev) || 869 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) { 870 netdev_alert(netdev, "Device is already powered down\n"); 871 DBGPR("<--xgbe_powerdown\n"); 872 return -EINVAL; 873 } 874 875 spin_lock_irqsave(&pdata->lock, flags); 876 877 if (caller == XGMAC_DRIVER_CONTEXT) 878 netif_device_detach(netdev); 879 880 netif_tx_stop_all_queues(netdev); 881 882 hw_if->powerdown_tx(pdata); 883 hw_if->powerdown_rx(pdata); 884 885 xgbe_napi_disable(pdata, 0); 886 887 phy_stop(pdata->phydev); 888 889 pdata->power_down = 1; 890 891 spin_unlock_irqrestore(&pdata->lock, flags); 892 893 DBGPR("<--xgbe_powerdown\n"); 894 895 return 0; 896 } 897 898 int xgbe_powerup(struct net_device *netdev, unsigned int caller) 899 { 900 struct xgbe_prv_data *pdata = netdev_priv(netdev); 901 struct xgbe_hw_if *hw_if = &pdata->hw_if; 902 unsigned long flags; 903 904 DBGPR("-->xgbe_powerup\n"); 905 906 if (!netif_running(netdev) || 907 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) { 908 netdev_alert(netdev, "Device is already powered up\n"); 909 DBGPR("<--xgbe_powerup\n"); 910 return -EINVAL; 911 } 912 913 spin_lock_irqsave(&pdata->lock, flags); 914 915 pdata->power_down = 0; 916 917 phy_start(pdata->phydev); 918 919 xgbe_napi_enable(pdata, 0); 920 921 hw_if->powerup_tx(pdata); 922 hw_if->powerup_rx(pdata); 923 924 if (caller == XGMAC_DRIVER_CONTEXT) 925 netif_device_attach(netdev); 926 927 netif_tx_start_all_queues(netdev); 928 929 spin_unlock_irqrestore(&pdata->lock, flags); 930 931 DBGPR("<--xgbe_powerup\n"); 932 933 return 0; 934 } 935 936 static int xgbe_start(struct xgbe_prv_data *pdata) 937 { 938 struct xgbe_hw_if *hw_if = &pdata->hw_if; 939 struct net_device *netdev = pdata->netdev; 940 int ret; 941 942 DBGPR("-->xgbe_start\n"); 943 944 xgbe_set_rx_mode(netdev); 945 946 hw_if->init(pdata); 947 948 phy_start(pdata->phydev); 949 950 xgbe_napi_enable(pdata, 1); 951 952 ret = xgbe_request_irqs(pdata); 953 if (ret) 954 goto err_napi; 955 956 hw_if->enable_tx(pdata); 957 hw_if->enable_rx(pdata); 958 959 xgbe_init_tx_timers(pdata); 960 961 netif_tx_start_all_queues(netdev); 962 963 DBGPR("<--xgbe_start\n"); 964 965 return 0; 966 967 err_napi: 968 xgbe_napi_disable(pdata, 1); 969 970 phy_stop(pdata->phydev); 971 972 hw_if->exit(pdata); 973 974 return ret; 975 } 976 977 static void xgbe_stop(struct xgbe_prv_data *pdata) 978 { 979 struct xgbe_hw_if *hw_if = &pdata->hw_if; 980 struct xgbe_channel *channel; 981 struct net_device *netdev = pdata->netdev; 982 struct netdev_queue *txq; 983 unsigned int i; 984 985 DBGPR("-->xgbe_stop\n"); 986 987 netif_tx_stop_all_queues(netdev); 988 989 xgbe_stop_tx_timers(pdata); 990 991 hw_if->disable_tx(pdata); 992 hw_if->disable_rx(pdata); 993 994 xgbe_free_irqs(pdata); 995 996 xgbe_napi_disable(pdata, 1); 997 998 phy_stop(pdata->phydev); 999 1000 hw_if->exit(pdata); 1001 1002 channel = pdata->channel; 1003 for (i = 0; i < pdata->channel_count; i++, channel++) { 1004 if (!channel->tx_ring) 1005 continue; 1006 1007 txq = netdev_get_tx_queue(netdev, channel->queue_index); 1008 netdev_tx_reset_queue(txq); 1009 } 1010 1011 DBGPR("<--xgbe_stop\n"); 1012 } 1013 1014 static void xgbe_restart_dev(struct xgbe_prv_data *pdata) 1015 { 1016 DBGPR("-->xgbe_restart_dev\n"); 1017 1018 /* If not running, "restart" will happen on open */ 1019 if (!netif_running(pdata->netdev)) 1020 return; 1021 1022 xgbe_stop(pdata); 1023 1024 xgbe_free_tx_data(pdata); 1025 xgbe_free_rx_data(pdata); 1026 1027 xgbe_start(pdata); 1028 1029 DBGPR("<--xgbe_restart_dev\n"); 1030 } 1031 1032 static void xgbe_restart(struct work_struct *work) 1033 { 1034 struct xgbe_prv_data *pdata = container_of(work, 1035 struct xgbe_prv_data, 1036 restart_work); 1037 1038 rtnl_lock(); 1039 1040 xgbe_restart_dev(pdata); 1041 1042 rtnl_unlock(); 1043 } 1044 1045 static void xgbe_tx_tstamp(struct work_struct *work) 1046 { 1047 struct xgbe_prv_data *pdata = container_of(work, 1048 struct xgbe_prv_data, 1049 tx_tstamp_work); 1050 struct skb_shared_hwtstamps hwtstamps; 1051 u64 nsec; 1052 unsigned long flags; 1053 1054 if (pdata->tx_tstamp) { 1055 nsec = timecounter_cyc2time(&pdata->tstamp_tc, 1056 pdata->tx_tstamp); 1057 1058 memset(&hwtstamps, 0, sizeof(hwtstamps)); 1059 hwtstamps.hwtstamp = ns_to_ktime(nsec); 1060 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps); 1061 } 1062 1063 dev_kfree_skb_any(pdata->tx_tstamp_skb); 1064 1065 spin_lock_irqsave(&pdata->tstamp_lock, flags); 1066 pdata->tx_tstamp_skb = NULL; 1067 spin_unlock_irqrestore(&pdata->tstamp_lock, flags); 1068 } 1069 1070 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, 1071 struct ifreq *ifreq) 1072 { 1073 if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config, 1074 sizeof(pdata->tstamp_config))) 1075 return -EFAULT; 1076 1077 return 0; 1078 } 1079 1080 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, 1081 struct ifreq *ifreq) 1082 { 1083 struct hwtstamp_config config; 1084 unsigned int mac_tscr; 1085 1086 if (copy_from_user(&config, ifreq->ifr_data, sizeof(config))) 1087 return -EFAULT; 1088 1089 if (config.flags) 1090 return -EINVAL; 1091 1092 mac_tscr = 0; 1093 1094 switch (config.tx_type) { 1095 case HWTSTAMP_TX_OFF: 1096 break; 1097 1098 case HWTSTAMP_TX_ON: 1099 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1100 break; 1101 1102 default: 1103 return -ERANGE; 1104 } 1105 1106 switch (config.rx_filter) { 1107 case HWTSTAMP_FILTER_NONE: 1108 break; 1109 1110 case HWTSTAMP_FILTER_ALL: 1111 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1); 1112 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1113 break; 1114 1115 /* PTP v2, UDP, any kind of event packet */ 1116 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1117 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1118 /* PTP v1, UDP, any kind of event packet */ 1119 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1120 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); 1121 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); 1122 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); 1123 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1124 break; 1125 1126 /* PTP v2, UDP, Sync packet */ 1127 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1128 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1129 /* PTP v1, UDP, Sync packet */ 1130 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1131 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); 1132 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); 1133 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); 1134 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1135 break; 1136 1137 /* PTP v2, UDP, Delay_req packet */ 1138 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1139 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1140 /* PTP v1, UDP, Delay_req packet */ 1141 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1142 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); 1143 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); 1144 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); 1145 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); 1146 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1147 break; 1148 1149 /* 802.AS1, Ethernet, any kind of event packet */ 1150 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1151 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); 1152 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); 1153 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1154 break; 1155 1156 /* 802.AS1, Ethernet, Sync packet */ 1157 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1158 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); 1159 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); 1160 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1161 break; 1162 1163 /* 802.AS1, Ethernet, Delay_req packet */ 1164 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1165 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); 1166 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); 1167 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); 1168 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1169 break; 1170 1171 /* PTP v2/802.AS1, any layer, any kind of event packet */ 1172 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1173 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1174 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); 1175 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); 1176 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); 1177 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); 1178 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1179 break; 1180 1181 /* PTP v2/802.AS1, any layer, Sync packet */ 1182 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1183 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1184 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); 1185 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); 1186 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); 1187 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); 1188 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1189 break; 1190 1191 /* PTP v2/802.AS1, any layer, Delay_req packet */ 1192 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1193 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1194 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); 1195 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); 1196 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); 1197 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); 1198 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); 1199 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1200 break; 1201 1202 default: 1203 return -ERANGE; 1204 } 1205 1206 pdata->hw_if.config_tstamp(pdata, mac_tscr); 1207 1208 memcpy(&pdata->tstamp_config, &config, sizeof(config)); 1209 1210 return 0; 1211 } 1212 1213 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata, 1214 struct sk_buff *skb, 1215 struct xgbe_packet_data *packet) 1216 { 1217 unsigned long flags; 1218 1219 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) { 1220 spin_lock_irqsave(&pdata->tstamp_lock, flags); 1221 if (pdata->tx_tstamp_skb) { 1222 /* Another timestamp in progress, ignore this one */ 1223 XGMAC_SET_BITS(packet->attributes, 1224 TX_PACKET_ATTRIBUTES, PTP, 0); 1225 } else { 1226 pdata->tx_tstamp_skb = skb_get(skb); 1227 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1228 } 1229 spin_unlock_irqrestore(&pdata->tstamp_lock, flags); 1230 } 1231 1232 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) 1233 skb_tx_timestamp(skb); 1234 } 1235 1236 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet) 1237 { 1238 if (skb_vlan_tag_present(skb)) 1239 packet->vlan_ctag = skb_vlan_tag_get(skb); 1240 } 1241 1242 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) 1243 { 1244 int ret; 1245 1246 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1247 TSO_ENABLE)) 1248 return 0; 1249 1250 ret = skb_cow_head(skb, 0); 1251 if (ret) 1252 return ret; 1253 1254 packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1255 packet->tcp_header_len = tcp_hdrlen(skb); 1256 packet->tcp_payload_len = skb->len - packet->header_len; 1257 packet->mss = skb_shinfo(skb)->gso_size; 1258 DBGPR(" packet->header_len=%u\n", packet->header_len); 1259 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n", 1260 packet->tcp_header_len, packet->tcp_payload_len); 1261 DBGPR(" packet->mss=%u\n", packet->mss); 1262 1263 /* Update the number of packets that will ultimately be transmitted 1264 * along with the extra bytes for each extra packet 1265 */ 1266 packet->tx_packets = skb_shinfo(skb)->gso_segs; 1267 packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len; 1268 1269 return 0; 1270 } 1271 1272 static int xgbe_is_tso(struct sk_buff *skb) 1273 { 1274 if (skb->ip_summed != CHECKSUM_PARTIAL) 1275 return 0; 1276 1277 if (!skb_is_gso(skb)) 1278 return 0; 1279 1280 DBGPR(" TSO packet to be processed\n"); 1281 1282 return 1; 1283 } 1284 1285 static void xgbe_packet_info(struct xgbe_prv_data *pdata, 1286 struct xgbe_ring *ring, struct sk_buff *skb, 1287 struct xgbe_packet_data *packet) 1288 { 1289 struct skb_frag_struct *frag; 1290 unsigned int context_desc; 1291 unsigned int len; 1292 unsigned int i; 1293 1294 packet->skb = skb; 1295 1296 context_desc = 0; 1297 packet->rdesc_count = 0; 1298 1299 packet->tx_packets = 1; 1300 packet->tx_bytes = skb->len; 1301 1302 if (xgbe_is_tso(skb)) { 1303 /* TSO requires an extra descriptor if mss is different */ 1304 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { 1305 context_desc = 1; 1306 packet->rdesc_count++; 1307 } 1308 1309 /* TSO requires an extra descriptor for TSO header */ 1310 packet->rdesc_count++; 1311 1312 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1313 TSO_ENABLE, 1); 1314 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1315 CSUM_ENABLE, 1); 1316 } else if (skb->ip_summed == CHECKSUM_PARTIAL) 1317 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1318 CSUM_ENABLE, 1); 1319 1320 if (skb_vlan_tag_present(skb)) { 1321 /* VLAN requires an extra descriptor if tag is different */ 1322 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) 1323 /* We can share with the TSO context descriptor */ 1324 if (!context_desc) { 1325 context_desc = 1; 1326 packet->rdesc_count++; 1327 } 1328 1329 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1330 VLAN_CTAG, 1); 1331 } 1332 1333 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 1334 (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON)) 1335 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1336 PTP, 1); 1337 1338 for (len = skb_headlen(skb); len;) { 1339 packet->rdesc_count++; 1340 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); 1341 } 1342 1343 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1344 frag = &skb_shinfo(skb)->frags[i]; 1345 for (len = skb_frag_size(frag); len; ) { 1346 packet->rdesc_count++; 1347 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); 1348 } 1349 } 1350 } 1351 1352 static int xgbe_open(struct net_device *netdev) 1353 { 1354 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1355 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1356 int ret; 1357 1358 DBGPR("-->xgbe_open\n"); 1359 1360 /* Initialize the phy */ 1361 ret = xgbe_phy_init(pdata); 1362 if (ret) 1363 return ret; 1364 1365 /* Enable the clocks */ 1366 ret = clk_prepare_enable(pdata->sysclk); 1367 if (ret) { 1368 netdev_alert(netdev, "dma clk_prepare_enable failed\n"); 1369 goto err_phy_init; 1370 } 1371 1372 ret = clk_prepare_enable(pdata->ptpclk); 1373 if (ret) { 1374 netdev_alert(netdev, "ptp clk_prepare_enable failed\n"); 1375 goto err_sysclk; 1376 } 1377 1378 /* Calculate the Rx buffer size before allocating rings */ 1379 ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu); 1380 if (ret < 0) 1381 goto err_ptpclk; 1382 pdata->rx_buf_size = ret; 1383 1384 /* Allocate the channel and ring structures */ 1385 ret = xgbe_alloc_channels(pdata); 1386 if (ret) 1387 goto err_ptpclk; 1388 1389 /* Allocate the ring descriptors and buffers */ 1390 ret = desc_if->alloc_ring_resources(pdata); 1391 if (ret) 1392 goto err_channels; 1393 1394 /* Initialize the device restart and Tx timestamp work struct */ 1395 INIT_WORK(&pdata->restart_work, xgbe_restart); 1396 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); 1397 1398 ret = xgbe_start(pdata); 1399 if (ret) 1400 goto err_rings; 1401 1402 DBGPR("<--xgbe_open\n"); 1403 1404 return 0; 1405 1406 err_rings: 1407 desc_if->free_ring_resources(pdata); 1408 1409 err_channels: 1410 xgbe_free_channels(pdata); 1411 1412 err_ptpclk: 1413 clk_disable_unprepare(pdata->ptpclk); 1414 1415 err_sysclk: 1416 clk_disable_unprepare(pdata->sysclk); 1417 1418 err_phy_init: 1419 xgbe_phy_exit(pdata); 1420 1421 return ret; 1422 } 1423 1424 static int xgbe_close(struct net_device *netdev) 1425 { 1426 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1427 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1428 1429 DBGPR("-->xgbe_close\n"); 1430 1431 /* Stop the device */ 1432 xgbe_stop(pdata); 1433 1434 /* Free the ring descriptors and buffers */ 1435 desc_if->free_ring_resources(pdata); 1436 1437 /* Free the channel and ring structures */ 1438 xgbe_free_channels(pdata); 1439 1440 /* Disable the clocks */ 1441 clk_disable_unprepare(pdata->ptpclk); 1442 clk_disable_unprepare(pdata->sysclk); 1443 1444 /* Release the phy */ 1445 xgbe_phy_exit(pdata); 1446 1447 DBGPR("<--xgbe_close\n"); 1448 1449 return 0; 1450 } 1451 1452 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) 1453 { 1454 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1455 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1456 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1457 struct xgbe_channel *channel; 1458 struct xgbe_ring *ring; 1459 struct xgbe_packet_data *packet; 1460 struct netdev_queue *txq; 1461 int ret; 1462 1463 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); 1464 1465 channel = pdata->channel + skb->queue_mapping; 1466 txq = netdev_get_tx_queue(netdev, channel->queue_index); 1467 ring = channel->tx_ring; 1468 packet = &ring->packet_data; 1469 1470 ret = NETDEV_TX_OK; 1471 1472 if (skb->len == 0) { 1473 netdev_err(netdev, "empty skb received from stack\n"); 1474 dev_kfree_skb_any(skb); 1475 goto tx_netdev_return; 1476 } 1477 1478 /* Calculate preliminary packet info */ 1479 memset(packet, 0, sizeof(*packet)); 1480 xgbe_packet_info(pdata, ring, skb, packet); 1481 1482 /* Check that there are enough descriptors available */ 1483 ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count); 1484 if (ret) 1485 goto tx_netdev_return; 1486 1487 ret = xgbe_prep_tso(skb, packet); 1488 if (ret) { 1489 netdev_err(netdev, "error processing TSO packet\n"); 1490 dev_kfree_skb_any(skb); 1491 goto tx_netdev_return; 1492 } 1493 xgbe_prep_vlan(skb, packet); 1494 1495 if (!desc_if->map_tx_skb(channel, skb)) { 1496 dev_kfree_skb_any(skb); 1497 goto tx_netdev_return; 1498 } 1499 1500 xgbe_prep_tx_tstamp(pdata, skb, packet); 1501 1502 /* Report on the actual number of bytes (to be) sent */ 1503 netdev_tx_sent_queue(txq, packet->tx_bytes); 1504 1505 /* Configure required descriptor fields for transmission */ 1506 hw_if->dev_xmit(channel); 1507 1508 #ifdef XGMAC_ENABLE_TX_PKT_DUMP 1509 xgbe_print_pkt(netdev, skb, true); 1510 #endif 1511 1512 /* Stop the queue in advance if there may not be enough descriptors */ 1513 xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS); 1514 1515 ret = NETDEV_TX_OK; 1516 1517 tx_netdev_return: 1518 return ret; 1519 } 1520 1521 static void xgbe_set_rx_mode(struct net_device *netdev) 1522 { 1523 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1524 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1525 unsigned int pr_mode, am_mode; 1526 1527 DBGPR("-->xgbe_set_rx_mode\n"); 1528 1529 pr_mode = ((netdev->flags & IFF_PROMISC) != 0); 1530 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); 1531 1532 hw_if->set_promiscuous_mode(pdata, pr_mode); 1533 hw_if->set_all_multicast_mode(pdata, am_mode); 1534 1535 hw_if->add_mac_addresses(pdata); 1536 1537 DBGPR("<--xgbe_set_rx_mode\n"); 1538 } 1539 1540 static int xgbe_set_mac_address(struct net_device *netdev, void *addr) 1541 { 1542 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1543 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1544 struct sockaddr *saddr = addr; 1545 1546 DBGPR("-->xgbe_set_mac_address\n"); 1547 1548 if (!is_valid_ether_addr(saddr->sa_data)) 1549 return -EADDRNOTAVAIL; 1550 1551 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); 1552 1553 hw_if->set_mac_address(pdata, netdev->dev_addr); 1554 1555 DBGPR("<--xgbe_set_mac_address\n"); 1556 1557 return 0; 1558 } 1559 1560 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd) 1561 { 1562 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1563 int ret; 1564 1565 switch (cmd) { 1566 case SIOCGHWTSTAMP: 1567 ret = xgbe_get_hwtstamp_settings(pdata, ifreq); 1568 break; 1569 1570 case SIOCSHWTSTAMP: 1571 ret = xgbe_set_hwtstamp_settings(pdata, ifreq); 1572 break; 1573 1574 default: 1575 ret = -EOPNOTSUPP; 1576 } 1577 1578 return ret; 1579 } 1580 1581 static int xgbe_change_mtu(struct net_device *netdev, int mtu) 1582 { 1583 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1584 int ret; 1585 1586 DBGPR("-->xgbe_change_mtu\n"); 1587 1588 ret = xgbe_calc_rx_buf_size(netdev, mtu); 1589 if (ret < 0) 1590 return ret; 1591 1592 pdata->rx_buf_size = ret; 1593 netdev->mtu = mtu; 1594 1595 xgbe_restart_dev(pdata); 1596 1597 DBGPR("<--xgbe_change_mtu\n"); 1598 1599 return 0; 1600 } 1601 1602 static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, 1603 struct rtnl_link_stats64 *s) 1604 { 1605 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1606 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; 1607 1608 DBGPR("-->%s\n", __func__); 1609 1610 pdata->hw_if.read_mmc_stats(pdata); 1611 1612 s->rx_packets = pstats->rxframecount_gb; 1613 s->rx_bytes = pstats->rxoctetcount_gb; 1614 s->rx_errors = pstats->rxframecount_gb - 1615 pstats->rxbroadcastframes_g - 1616 pstats->rxmulticastframes_g - 1617 pstats->rxunicastframes_g; 1618 s->multicast = pstats->rxmulticastframes_g; 1619 s->rx_length_errors = pstats->rxlengtherror; 1620 s->rx_crc_errors = pstats->rxcrcerror; 1621 s->rx_fifo_errors = pstats->rxfifooverflow; 1622 1623 s->tx_packets = pstats->txframecount_gb; 1624 s->tx_bytes = pstats->txoctetcount_gb; 1625 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; 1626 s->tx_dropped = netdev->stats.tx_dropped; 1627 1628 DBGPR("<--%s\n", __func__); 1629 1630 return s; 1631 } 1632 1633 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1634 u16 vid) 1635 { 1636 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1637 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1638 1639 DBGPR("-->%s\n", __func__); 1640 1641 set_bit(vid, pdata->active_vlans); 1642 hw_if->update_vlan_hash_table(pdata); 1643 1644 DBGPR("<--%s\n", __func__); 1645 1646 return 0; 1647 } 1648 1649 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1650 u16 vid) 1651 { 1652 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1653 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1654 1655 DBGPR("-->%s\n", __func__); 1656 1657 clear_bit(vid, pdata->active_vlans); 1658 hw_if->update_vlan_hash_table(pdata); 1659 1660 DBGPR("<--%s\n", __func__); 1661 1662 return 0; 1663 } 1664 1665 #ifdef CONFIG_NET_POLL_CONTROLLER 1666 static void xgbe_poll_controller(struct net_device *netdev) 1667 { 1668 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1669 struct xgbe_channel *channel; 1670 unsigned int i; 1671 1672 DBGPR("-->xgbe_poll_controller\n"); 1673 1674 if (pdata->per_channel_irq) { 1675 channel = pdata->channel; 1676 for (i = 0; i < pdata->channel_count; i++, channel++) 1677 xgbe_dma_isr(channel->dma_irq, channel); 1678 } else { 1679 disable_irq(pdata->dev_irq); 1680 xgbe_isr(pdata->dev_irq, pdata); 1681 enable_irq(pdata->dev_irq); 1682 } 1683 1684 DBGPR("<--xgbe_poll_controller\n"); 1685 } 1686 #endif /* End CONFIG_NET_POLL_CONTROLLER */ 1687 1688 static int xgbe_setup_tc(struct net_device *netdev, u8 tc) 1689 { 1690 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1691 unsigned int offset, queue; 1692 u8 i; 1693 1694 if (tc && (tc != pdata->hw_feat.tc_cnt)) 1695 return -EINVAL; 1696 1697 if (tc) { 1698 netdev_set_num_tc(netdev, tc); 1699 for (i = 0, queue = 0, offset = 0; i < tc; i++) { 1700 while ((queue < pdata->tx_q_count) && 1701 (pdata->q2tc_map[queue] == i)) 1702 queue++; 1703 1704 DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1); 1705 netdev_set_tc_queue(netdev, i, queue - offset, offset); 1706 offset = queue; 1707 } 1708 } else { 1709 netdev_reset_tc(netdev); 1710 } 1711 1712 return 0; 1713 } 1714 1715 static int xgbe_set_features(struct net_device *netdev, 1716 netdev_features_t features) 1717 { 1718 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1719 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1720 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; 1721 int ret = 0; 1722 1723 rxhash = pdata->netdev_features & NETIF_F_RXHASH; 1724 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; 1725 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; 1726 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; 1727 1728 if ((features & NETIF_F_RXHASH) && !rxhash) 1729 ret = hw_if->enable_rss(pdata); 1730 else if (!(features & NETIF_F_RXHASH) && rxhash) 1731 ret = hw_if->disable_rss(pdata); 1732 if (ret) 1733 return ret; 1734 1735 if ((features & NETIF_F_RXCSUM) && !rxcsum) 1736 hw_if->enable_rx_csum(pdata); 1737 else if (!(features & NETIF_F_RXCSUM) && rxcsum) 1738 hw_if->disable_rx_csum(pdata); 1739 1740 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) 1741 hw_if->enable_rx_vlan_stripping(pdata); 1742 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan) 1743 hw_if->disable_rx_vlan_stripping(pdata); 1744 1745 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter) 1746 hw_if->enable_rx_vlan_filtering(pdata); 1747 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) 1748 hw_if->disable_rx_vlan_filtering(pdata); 1749 1750 pdata->netdev_features = features; 1751 1752 DBGPR("<--xgbe_set_features\n"); 1753 1754 return 0; 1755 } 1756 1757 static const struct net_device_ops xgbe_netdev_ops = { 1758 .ndo_open = xgbe_open, 1759 .ndo_stop = xgbe_close, 1760 .ndo_start_xmit = xgbe_xmit, 1761 .ndo_set_rx_mode = xgbe_set_rx_mode, 1762 .ndo_set_mac_address = xgbe_set_mac_address, 1763 .ndo_validate_addr = eth_validate_addr, 1764 .ndo_do_ioctl = xgbe_ioctl, 1765 .ndo_change_mtu = xgbe_change_mtu, 1766 .ndo_get_stats64 = xgbe_get_stats64, 1767 .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid, 1768 .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid, 1769 #ifdef CONFIG_NET_POLL_CONTROLLER 1770 .ndo_poll_controller = xgbe_poll_controller, 1771 #endif 1772 .ndo_setup_tc = xgbe_setup_tc, 1773 .ndo_set_features = xgbe_set_features, 1774 }; 1775 1776 struct net_device_ops *xgbe_get_netdev_ops(void) 1777 { 1778 return (struct net_device_ops *)&xgbe_netdev_ops; 1779 } 1780 1781 static void xgbe_rx_refresh(struct xgbe_channel *channel) 1782 { 1783 struct xgbe_prv_data *pdata = channel->pdata; 1784 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1785 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1786 struct xgbe_ring *ring = channel->rx_ring; 1787 struct xgbe_ring_data *rdata; 1788 1789 while (ring->dirty != ring->cur) { 1790 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); 1791 1792 /* Reset rdata values */ 1793 desc_if->unmap_rdata(pdata, rdata); 1794 1795 if (desc_if->map_rx_buffer(pdata, ring, rdata)) 1796 break; 1797 1798 hw_if->rx_desc_reset(rdata); 1799 1800 ring->dirty++; 1801 } 1802 1803 /* Update the Rx Tail Pointer Register with address of 1804 * the last cleaned entry */ 1805 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1); 1806 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, 1807 lower_32_bits(rdata->rdesc_dma)); 1808 } 1809 1810 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, 1811 struct xgbe_ring_data *rdata, 1812 unsigned int *len) 1813 { 1814 struct net_device *netdev = pdata->netdev; 1815 struct sk_buff *skb; 1816 u8 *packet; 1817 unsigned int copy_len; 1818 1819 skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len); 1820 if (!skb) 1821 return NULL; 1822 1823 packet = page_address(rdata->rx.hdr.pa.pages) + 1824 rdata->rx.hdr.pa.pages_offset; 1825 copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len; 1826 copy_len = min(rdata->rx.hdr.dma_len, copy_len); 1827 skb_copy_to_linear_data(skb, packet, copy_len); 1828 skb_put(skb, copy_len); 1829 1830 *len -= copy_len; 1831 1832 return skb; 1833 } 1834 1835 static int xgbe_tx_poll(struct xgbe_channel *channel) 1836 { 1837 struct xgbe_prv_data *pdata = channel->pdata; 1838 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1839 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1840 struct xgbe_ring *ring = channel->tx_ring; 1841 struct xgbe_ring_data *rdata; 1842 struct xgbe_ring_desc *rdesc; 1843 struct net_device *netdev = pdata->netdev; 1844 struct netdev_queue *txq; 1845 int processed = 0; 1846 unsigned int tx_packets = 0, tx_bytes = 0; 1847 1848 DBGPR("-->xgbe_tx_poll\n"); 1849 1850 /* Nothing to do if there isn't a Tx ring for this channel */ 1851 if (!ring) 1852 return 0; 1853 1854 txq = netdev_get_tx_queue(netdev, channel->queue_index); 1855 1856 while ((processed < XGBE_TX_DESC_MAX_PROC) && 1857 (ring->dirty != ring->cur)) { 1858 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); 1859 rdesc = rdata->rdesc; 1860 1861 if (!hw_if->tx_complete(rdesc)) 1862 break; 1863 1864 /* Make sure descriptor fields are read after reading the OWN 1865 * bit */ 1866 rmb(); 1867 1868 #ifdef XGMAC_ENABLE_TX_DESC_DUMP 1869 xgbe_dump_tx_desc(ring, ring->dirty, 1, 0); 1870 #endif 1871 1872 if (hw_if->is_last_desc(rdesc)) { 1873 tx_packets += rdata->tx.packets; 1874 tx_bytes += rdata->tx.bytes; 1875 } 1876 1877 /* Free the SKB and reset the descriptor for re-use */ 1878 desc_if->unmap_rdata(pdata, rdata); 1879 hw_if->tx_desc_reset(rdata); 1880 1881 processed++; 1882 ring->dirty++; 1883 } 1884 1885 if (!processed) 1886 return 0; 1887 1888 netdev_tx_completed_queue(txq, tx_packets, tx_bytes); 1889 1890 if ((ring->tx.queue_stopped == 1) && 1891 (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) { 1892 ring->tx.queue_stopped = 0; 1893 netif_tx_wake_queue(txq); 1894 } 1895 1896 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed); 1897 1898 return processed; 1899 } 1900 1901 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) 1902 { 1903 struct xgbe_prv_data *pdata = channel->pdata; 1904 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1905 struct xgbe_ring *ring = channel->rx_ring; 1906 struct xgbe_ring_data *rdata; 1907 struct xgbe_packet_data *packet; 1908 struct net_device *netdev = pdata->netdev; 1909 struct napi_struct *napi; 1910 struct sk_buff *skb; 1911 struct skb_shared_hwtstamps *hwtstamps; 1912 unsigned int incomplete, error, context_next, context; 1913 unsigned int len, put_len, max_len; 1914 unsigned int received = 0; 1915 int packet_count = 0; 1916 1917 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); 1918 1919 /* Nothing to do if there isn't a Rx ring for this channel */ 1920 if (!ring) 1921 return 0; 1922 1923 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; 1924 1925 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1926 packet = &ring->packet_data; 1927 while (packet_count < budget) { 1928 DBGPR(" cur = %d\n", ring->cur); 1929 1930 /* First time in loop see if we need to restore state */ 1931 if (!received && rdata->state_saved) { 1932 incomplete = rdata->state.incomplete; 1933 context_next = rdata->state.context_next; 1934 skb = rdata->state.skb; 1935 error = rdata->state.error; 1936 len = rdata->state.len; 1937 } else { 1938 memset(packet, 0, sizeof(*packet)); 1939 incomplete = 0; 1940 context_next = 0; 1941 skb = NULL; 1942 error = 0; 1943 len = 0; 1944 } 1945 1946 read_again: 1947 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1948 1949 if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3)) 1950 xgbe_rx_refresh(channel); 1951 1952 if (hw_if->dev_read(channel)) 1953 break; 1954 1955 received++; 1956 ring->cur++; 1957 1958 incomplete = XGMAC_GET_BITS(packet->attributes, 1959 RX_PACKET_ATTRIBUTES, 1960 INCOMPLETE); 1961 context_next = XGMAC_GET_BITS(packet->attributes, 1962 RX_PACKET_ATTRIBUTES, 1963 CONTEXT_NEXT); 1964 context = XGMAC_GET_BITS(packet->attributes, 1965 RX_PACKET_ATTRIBUTES, 1966 CONTEXT); 1967 1968 /* Earlier error, just drain the remaining data */ 1969 if ((incomplete || context_next) && error) 1970 goto read_again; 1971 1972 if (error || packet->errors) { 1973 if (packet->errors) 1974 DBGPR("Error in received packet\n"); 1975 dev_kfree_skb(skb); 1976 goto next_packet; 1977 } 1978 1979 if (!context) { 1980 put_len = rdata->rx.len - len; 1981 len += put_len; 1982 1983 if (!skb) { 1984 dma_sync_single_for_cpu(pdata->dev, 1985 rdata->rx.hdr.dma, 1986 rdata->rx.hdr.dma_len, 1987 DMA_FROM_DEVICE); 1988 1989 skb = xgbe_create_skb(pdata, rdata, &put_len); 1990 if (!skb) { 1991 error = 1; 1992 goto skip_data; 1993 } 1994 } 1995 1996 if (put_len) { 1997 dma_sync_single_for_cpu(pdata->dev, 1998 rdata->rx.buf.dma, 1999 rdata->rx.buf.dma_len, 2000 DMA_FROM_DEVICE); 2001 2002 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 2003 rdata->rx.buf.pa.pages, 2004 rdata->rx.buf.pa.pages_offset, 2005 put_len, rdata->rx.buf.dma_len); 2006 rdata->rx.buf.pa.pages = NULL; 2007 } 2008 } 2009 2010 skip_data: 2011 if (incomplete || context_next) 2012 goto read_again; 2013 2014 if (!skb) 2015 goto next_packet; 2016 2017 /* Be sure we don't exceed the configured MTU */ 2018 max_len = netdev->mtu + ETH_HLEN; 2019 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 2020 (skb->protocol == htons(ETH_P_8021Q))) 2021 max_len += VLAN_HLEN; 2022 2023 if (skb->len > max_len) { 2024 DBGPR("packet length exceeds configured MTU\n"); 2025 dev_kfree_skb(skb); 2026 goto next_packet; 2027 } 2028 2029 #ifdef XGMAC_ENABLE_RX_PKT_DUMP 2030 xgbe_print_pkt(netdev, skb, false); 2031 #endif 2032 2033 skb_checksum_none_assert(skb); 2034 if (XGMAC_GET_BITS(packet->attributes, 2035 RX_PACKET_ATTRIBUTES, CSUM_DONE)) 2036 skb->ip_summed = CHECKSUM_UNNECESSARY; 2037 2038 if (XGMAC_GET_BITS(packet->attributes, 2039 RX_PACKET_ATTRIBUTES, VLAN_CTAG)) 2040 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 2041 packet->vlan_ctag); 2042 2043 if (XGMAC_GET_BITS(packet->attributes, 2044 RX_PACKET_ATTRIBUTES, RX_TSTAMP)) { 2045 u64 nsec; 2046 2047 nsec = timecounter_cyc2time(&pdata->tstamp_tc, 2048 packet->rx_tstamp); 2049 hwtstamps = skb_hwtstamps(skb); 2050 hwtstamps->hwtstamp = ns_to_ktime(nsec); 2051 } 2052 2053 if (XGMAC_GET_BITS(packet->attributes, 2054 RX_PACKET_ATTRIBUTES, RSS_HASH)) 2055 skb_set_hash(skb, packet->rss_hash, 2056 packet->rss_hash_type); 2057 2058 skb->dev = netdev; 2059 skb->protocol = eth_type_trans(skb, netdev); 2060 skb_record_rx_queue(skb, channel->queue_index); 2061 skb_mark_napi_id(skb, napi); 2062 2063 netdev->last_rx = jiffies; 2064 napi_gro_receive(napi, skb); 2065 2066 next_packet: 2067 packet_count++; 2068 } 2069 2070 /* Check if we need to save state before leaving */ 2071 if (received && (incomplete || context_next)) { 2072 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 2073 rdata->state_saved = 1; 2074 rdata->state.incomplete = incomplete; 2075 rdata->state.context_next = context_next; 2076 rdata->state.skb = skb; 2077 rdata->state.len = len; 2078 rdata->state.error = error; 2079 } 2080 2081 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count); 2082 2083 return packet_count; 2084 } 2085 2086 static int xgbe_one_poll(struct napi_struct *napi, int budget) 2087 { 2088 struct xgbe_channel *channel = container_of(napi, struct xgbe_channel, 2089 napi); 2090 int processed = 0; 2091 2092 DBGPR("-->xgbe_one_poll: budget=%d\n", budget); 2093 2094 /* Cleanup Tx ring first */ 2095 xgbe_tx_poll(channel); 2096 2097 /* Process Rx ring next */ 2098 processed = xgbe_rx_poll(channel, budget); 2099 2100 /* If we processed everything, we are done */ 2101 if (processed < budget) { 2102 /* Turn off polling */ 2103 napi_complete(napi); 2104 2105 /* Enable Tx and Rx interrupts */ 2106 enable_irq(channel->dma_irq); 2107 } 2108 2109 DBGPR("<--xgbe_one_poll: received = %d\n", processed); 2110 2111 return processed; 2112 } 2113 2114 static int xgbe_all_poll(struct napi_struct *napi, int budget) 2115 { 2116 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data, 2117 napi); 2118 struct xgbe_channel *channel; 2119 int ring_budget; 2120 int processed, last_processed; 2121 unsigned int i; 2122 2123 DBGPR("-->xgbe_all_poll: budget=%d\n", budget); 2124 2125 processed = 0; 2126 ring_budget = budget / pdata->rx_ring_count; 2127 do { 2128 last_processed = processed; 2129 2130 channel = pdata->channel; 2131 for (i = 0; i < pdata->channel_count; i++, channel++) { 2132 /* Cleanup Tx ring first */ 2133 xgbe_tx_poll(channel); 2134 2135 /* Process Rx ring next */ 2136 if (ring_budget > (budget - processed)) 2137 ring_budget = budget - processed; 2138 processed += xgbe_rx_poll(channel, ring_budget); 2139 } 2140 } while ((processed < budget) && (processed != last_processed)); 2141 2142 /* If we processed everything, we are done */ 2143 if (processed < budget) { 2144 /* Turn off polling */ 2145 napi_complete(napi); 2146 2147 /* Enable Tx and Rx interrupts */ 2148 xgbe_enable_rx_tx_ints(pdata); 2149 } 2150 2151 DBGPR("<--xgbe_all_poll: received = %d\n", processed); 2152 2153 return processed; 2154 } 2155 2156 void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, 2157 unsigned int count, unsigned int flag) 2158 { 2159 struct xgbe_ring_data *rdata; 2160 struct xgbe_ring_desc *rdesc; 2161 2162 while (count--) { 2163 rdata = XGBE_GET_DESC_DATA(ring, idx); 2164 rdesc = rdata->rdesc; 2165 pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, 2166 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", 2167 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), 2168 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); 2169 idx++; 2170 } 2171 } 2172 2173 void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc, 2174 unsigned int idx) 2175 { 2176 pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, 2177 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), 2178 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); 2179 } 2180 2181 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) 2182 { 2183 struct ethhdr *eth = (struct ethhdr *)skb->data; 2184 unsigned char *buf = skb->data; 2185 unsigned char buffer[128]; 2186 unsigned int i, j; 2187 2188 netdev_alert(netdev, "\n************** SKB dump ****************\n"); 2189 2190 netdev_alert(netdev, "%s packet of %d bytes\n", 2191 (tx_rx ? "TX" : "RX"), skb->len); 2192 2193 netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest); 2194 netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source); 2195 netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto)); 2196 2197 for (i = 0, j = 0; i < skb->len;) { 2198 j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx", 2199 buf[i++]); 2200 2201 if ((i % 32) == 0) { 2202 netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer); 2203 j = 0; 2204 } else if ((i % 16) == 0) { 2205 buffer[j++] = ' '; 2206 buffer[j++] = ' '; 2207 } else if ((i % 4) == 0) { 2208 buffer[j++] = ' '; 2209 } 2210 } 2211 if (i % 32) 2212 netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer); 2213 2214 netdev_alert(netdev, "\n************** SKB dump ****************\n"); 2215 } 2216