1 /* drivers/net/ethernet/freescale/gianfar.c 2 * 3 * Gianfar Ethernet Driver 4 * This driver is designed for the non-CPM ethernet controllers 5 * on the 85xx and 83xx family of integrated processors 6 * Based on 8260_io/fcc_enet.c 7 * 8 * Author: Andy Fleming 9 * Maintainer: Kumar Gala 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 11 * 12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. 13 * Copyright 2007 MontaVista Software, Inc. 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms of the GNU General Public License as published by the 17 * Free Software Foundation; either version 2 of the License, or (at your 18 * option) any later version. 19 * 20 * Gianfar: AKA Lambda Draconis, "Dragon" 21 * RA 11 31 24.2 22 * Dec +69 19 52 23 * V 3.84 24 * B-V +1.62 25 * 26 * Theory of operation 27 * 28 * The driver is initialized through of_device. Configuration information 29 * is therefore conveyed through an OF-style device tree. 30 * 31 * The Gianfar Ethernet Controller uses a ring of buffer 32 * descriptors. The beginning is indicated by a register 33 * pointing to the physical address of the start of the ring. 34 * The end is determined by a "wrap" bit being set in the 35 * last descriptor of the ring. 36 * 37 * When a packet is received, the RXF bit in the 38 * IEVENT register is set, triggering an interrupt when the 39 * corresponding bit in the IMASK register is also set (if 40 * interrupt coalescing is active, then the interrupt may not 41 * happen immediately, but will wait until either a set number 42 * of frames or amount of time have passed). In NAPI, the 43 * interrupt handler will signal there is work to be done, and 44 * exit. This method will start at the last known empty 45 * descriptor, and process every subsequent descriptor until there 46 * are none left with data (NAPI will stop after a set number of 47 * packets to give time to other tasks, but will eventually 48 * process all the packets). The data arrives inside a 49 * pre-allocated skb, and so after the skb is passed up to the 50 * stack, a new skb must be allocated, and the address field in 51 * the buffer descriptor must be updated to indicate this new 52 * skb. 53 * 54 * When the kernel requests that a packet be transmitted, the 55 * driver starts where it left off last time, and points the 56 * descriptor at the buffer which was passed in. The driver 57 * then informs the DMA engine that there are packets ready to 58 * be transmitted. Once the controller is finished transmitting 59 * the packet, an interrupt may be triggered (under the same 60 * conditions as for reception, but depending on the TXF bit). 61 * The driver then cleans up the buffer. 62 */ 63 64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 65 #define DEBUG 66 67 #include <linux/kernel.h> 68 #include <linux/string.h> 69 #include <linux/errno.h> 70 #include <linux/unistd.h> 71 #include <linux/slab.h> 72 #include <linux/interrupt.h> 73 #include <linux/delay.h> 74 #include <linux/netdevice.h> 75 #include <linux/etherdevice.h> 76 #include <linux/skbuff.h> 77 #include <linux/if_vlan.h> 78 #include <linux/spinlock.h> 79 #include <linux/mm.h> 80 #include <linux/of_address.h> 81 #include <linux/of_irq.h> 82 #include <linux/of_mdio.h> 83 #include <linux/of_platform.h> 84 #include <linux/ip.h> 85 #include <linux/tcp.h> 86 #include <linux/udp.h> 87 #include <linux/in.h> 88 #include <linux/net_tstamp.h> 89 90 #include <asm/io.h> 91 #ifdef CONFIG_PPC 92 #include <asm/reg.h> 93 #include <asm/mpc85xx.h> 94 #endif 95 #include <asm/irq.h> 96 #include <linux/uaccess.h> 97 #include <linux/module.h> 98 #include <linux/dma-mapping.h> 99 #include <linux/crc32.h> 100 #include <linux/mii.h> 101 #include <linux/phy.h> 102 #include <linux/phy_fixed.h> 103 #include <linux/of.h> 104 #include <linux/of_net.h> 105 106 #include "gianfar.h" 107 108 #define TX_TIMEOUT (5*HZ) 109 110 const char gfar_driver_version[] = "2.0"; 111 112 static int gfar_enet_open(struct net_device *dev); 113 static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 114 static void gfar_reset_task(struct work_struct *work); 115 static void gfar_timeout(struct net_device *dev); 116 static int gfar_close(struct net_device *dev); 117 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, 118 int alloc_cnt); 119 static int gfar_set_mac_address(struct net_device *dev); 120 static int gfar_change_mtu(struct net_device *dev, int new_mtu); 121 static irqreturn_t gfar_error(int irq, void *dev_id); 122 static irqreturn_t gfar_transmit(int irq, void *dev_id); 123 static irqreturn_t gfar_interrupt(int irq, void *dev_id); 124 static void adjust_link(struct net_device *dev); 125 static noinline void gfar_update_link_state(struct gfar_private *priv); 126 static int init_phy(struct net_device *dev); 127 static int gfar_probe(struct platform_device *ofdev); 128 static int gfar_remove(struct platform_device *ofdev); 129 static void free_skb_resources(struct gfar_private *priv); 130 static void gfar_set_multi(struct net_device *dev); 131 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 132 static void gfar_configure_serdes(struct net_device *dev); 133 static int gfar_poll_rx(struct napi_struct *napi, int budget); 134 static int gfar_poll_tx(struct napi_struct *napi, int budget); 135 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); 136 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); 137 #ifdef CONFIG_NET_POLL_CONTROLLER 138 static void gfar_netpoll(struct net_device *dev); 139 #endif 140 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 141 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 142 static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb); 143 static void gfar_halt_nodisable(struct gfar_private *priv); 144 static void gfar_clear_exact_match(struct net_device *dev); 145 static void gfar_set_mac_for_addr(struct net_device *dev, int num, 146 const u8 *addr); 147 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 148 149 MODULE_AUTHOR("Freescale Semiconductor, Inc"); 150 MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 151 MODULE_LICENSE("GPL"); 152 153 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 154 dma_addr_t buf) 155 { 156 u32 lstatus; 157 158 bdp->bufPtr = cpu_to_be32(buf); 159 160 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); 161 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) 162 lstatus |= BD_LFLAG(RXBD_WRAP); 163 164 gfar_wmb(); 165 166 bdp->lstatus = cpu_to_be32(lstatus); 167 } 168 169 static void gfar_init_bds(struct net_device *ndev) 170 { 171 struct gfar_private *priv = netdev_priv(ndev); 172 struct gfar __iomem *regs = priv->gfargrp[0].regs; 173 struct gfar_priv_tx_q *tx_queue = NULL; 174 struct gfar_priv_rx_q *rx_queue = NULL; 175 struct txbd8 *txbdp; 176 u32 __iomem *rfbptr; 177 int i, j; 178 179 for (i = 0; i < priv->num_tx_queues; i++) { 180 tx_queue = priv->tx_queue[i]; 181 /* Initialize some variables in our dev structure */ 182 tx_queue->num_txbdfree = tx_queue->tx_ring_size; 183 tx_queue->dirty_tx = tx_queue->tx_bd_base; 184 tx_queue->cur_tx = tx_queue->tx_bd_base; 185 tx_queue->skb_curtx = 0; 186 tx_queue->skb_dirtytx = 0; 187 188 /* Initialize Transmit Descriptor Ring */ 189 txbdp = tx_queue->tx_bd_base; 190 for (j = 0; j < tx_queue->tx_ring_size; j++) { 191 txbdp->lstatus = 0; 192 txbdp->bufPtr = 0; 193 txbdp++; 194 } 195 196 /* Set the last descriptor in the ring to indicate wrap */ 197 txbdp--; 198 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | 199 TXBD_WRAP); 200 } 201 202 rfbptr = ®s->rfbptr0; 203 for (i = 0; i < priv->num_rx_queues; i++) { 204 rx_queue = priv->rx_queue[i]; 205 206 rx_queue->next_to_clean = 0; 207 rx_queue->next_to_use = 0; 208 rx_queue->next_to_alloc = 0; 209 210 /* make sure next_to_clean != next_to_use after this 211 * by leaving at least 1 unused descriptor 212 */ 213 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); 214 215 rx_queue->rfbptr = rfbptr; 216 rfbptr += 2; 217 } 218 } 219 220 static int gfar_alloc_skb_resources(struct net_device *ndev) 221 { 222 void *vaddr; 223 dma_addr_t addr; 224 int i, j; 225 struct gfar_private *priv = netdev_priv(ndev); 226 struct device *dev = priv->dev; 227 struct gfar_priv_tx_q *tx_queue = NULL; 228 struct gfar_priv_rx_q *rx_queue = NULL; 229 230 priv->total_tx_ring_size = 0; 231 for (i = 0; i < priv->num_tx_queues; i++) 232 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; 233 234 priv->total_rx_ring_size = 0; 235 for (i = 0; i < priv->num_rx_queues; i++) 236 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; 237 238 /* Allocate memory for the buffer descriptors */ 239 vaddr = dma_alloc_coherent(dev, 240 (priv->total_tx_ring_size * 241 sizeof(struct txbd8)) + 242 (priv->total_rx_ring_size * 243 sizeof(struct rxbd8)), 244 &addr, GFP_KERNEL); 245 if (!vaddr) 246 return -ENOMEM; 247 248 for (i = 0; i < priv->num_tx_queues; i++) { 249 tx_queue = priv->tx_queue[i]; 250 tx_queue->tx_bd_base = vaddr; 251 tx_queue->tx_bd_dma_base = addr; 252 tx_queue->dev = ndev; 253 /* enet DMA only understands physical addresses */ 254 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; 255 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; 256 } 257 258 /* Start the rx descriptor ring where the tx ring leaves off */ 259 for (i = 0; i < priv->num_rx_queues; i++) { 260 rx_queue = priv->rx_queue[i]; 261 rx_queue->rx_bd_base = vaddr; 262 rx_queue->rx_bd_dma_base = addr; 263 rx_queue->ndev = ndev; 264 rx_queue->dev = dev; 265 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; 266 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; 267 } 268 269 /* Setup the skbuff rings */ 270 for (i = 0; i < priv->num_tx_queues; i++) { 271 tx_queue = priv->tx_queue[i]; 272 tx_queue->tx_skbuff = 273 kmalloc_array(tx_queue->tx_ring_size, 274 sizeof(*tx_queue->tx_skbuff), 275 GFP_KERNEL); 276 if (!tx_queue->tx_skbuff) 277 goto cleanup; 278 279 for (j = 0; j < tx_queue->tx_ring_size; j++) 280 tx_queue->tx_skbuff[j] = NULL; 281 } 282 283 for (i = 0; i < priv->num_rx_queues; i++) { 284 rx_queue = priv->rx_queue[i]; 285 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, 286 sizeof(*rx_queue->rx_buff), 287 GFP_KERNEL); 288 if (!rx_queue->rx_buff) 289 goto cleanup; 290 } 291 292 gfar_init_bds(ndev); 293 294 return 0; 295 296 cleanup: 297 free_skb_resources(priv); 298 return -ENOMEM; 299 } 300 301 static void gfar_init_tx_rx_base(struct gfar_private *priv) 302 { 303 struct gfar __iomem *regs = priv->gfargrp[0].regs; 304 u32 __iomem *baddr; 305 int i; 306 307 baddr = ®s->tbase0; 308 for (i = 0; i < priv->num_tx_queues; i++) { 309 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); 310 baddr += 2; 311 } 312 313 baddr = ®s->rbase0; 314 for (i = 0; i < priv->num_rx_queues; i++) { 315 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); 316 baddr += 2; 317 } 318 } 319 320 static void gfar_init_rqprm(struct gfar_private *priv) 321 { 322 struct gfar __iomem *regs = priv->gfargrp[0].regs; 323 u32 __iomem *baddr; 324 int i; 325 326 baddr = ®s->rqprm0; 327 for (i = 0; i < priv->num_rx_queues; i++) { 328 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | 329 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT)); 330 baddr++; 331 } 332 } 333 334 static void gfar_rx_offload_en(struct gfar_private *priv) 335 { 336 /* set this when rx hw offload (TOE) functions are being used */ 337 priv->uses_rxfcb = 0; 338 339 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) 340 priv->uses_rxfcb = 1; 341 342 if (priv->hwts_rx_en || priv->rx_filer_enable) 343 priv->uses_rxfcb = 1; 344 } 345 346 static void gfar_mac_rx_config(struct gfar_private *priv) 347 { 348 struct gfar __iomem *regs = priv->gfargrp[0].regs; 349 u32 rctrl = 0; 350 351 if (priv->rx_filer_enable) { 352 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; 353 /* Program the RIR0 reg with the required distribution */ 354 if (priv->poll_mode == GFAR_SQ_POLLING) 355 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); 356 else /* GFAR_MQ_POLLING */ 357 gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0); 358 } 359 360 /* Restore PROMISC mode */ 361 if (priv->ndev->flags & IFF_PROMISC) 362 rctrl |= RCTRL_PROM; 363 364 if (priv->ndev->features & NETIF_F_RXCSUM) 365 rctrl |= RCTRL_CHECKSUMMING; 366 367 if (priv->extended_hash) 368 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN; 369 370 if (priv->padding) { 371 rctrl &= ~RCTRL_PAL_MASK; 372 rctrl |= RCTRL_PADDING(priv->padding); 373 } 374 375 /* Enable HW time stamping if requested from user space */ 376 if (priv->hwts_rx_en) 377 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; 378 379 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) 380 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 381 382 /* Clear the LFC bit */ 383 gfar_write(®s->rctrl, rctrl); 384 /* Init flow control threshold values */ 385 gfar_init_rqprm(priv); 386 gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL); 387 rctrl |= RCTRL_LFC; 388 389 /* Init rctrl based on our settings */ 390 gfar_write(®s->rctrl, rctrl); 391 } 392 393 static void gfar_mac_tx_config(struct gfar_private *priv) 394 { 395 struct gfar __iomem *regs = priv->gfargrp[0].regs; 396 u32 tctrl = 0; 397 398 if (priv->ndev->features & NETIF_F_IP_CSUM) 399 tctrl |= TCTRL_INIT_CSUM; 400 401 if (priv->prio_sched_en) 402 tctrl |= TCTRL_TXSCHED_PRIO; 403 else { 404 tctrl |= TCTRL_TXSCHED_WRRS; 405 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); 406 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); 407 } 408 409 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) 410 tctrl |= TCTRL_VLINS; 411 412 gfar_write(®s->tctrl, tctrl); 413 } 414 415 static void gfar_configure_coalescing(struct gfar_private *priv, 416 unsigned long tx_mask, unsigned long rx_mask) 417 { 418 struct gfar __iomem *regs = priv->gfargrp[0].regs; 419 u32 __iomem *baddr; 420 421 if (priv->mode == MQ_MG_MODE) { 422 int i = 0; 423 424 baddr = ®s->txic0; 425 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { 426 gfar_write(baddr + i, 0); 427 if (likely(priv->tx_queue[i]->txcoalescing)) 428 gfar_write(baddr + i, priv->tx_queue[i]->txic); 429 } 430 431 baddr = ®s->rxic0; 432 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { 433 gfar_write(baddr + i, 0); 434 if (likely(priv->rx_queue[i]->rxcoalescing)) 435 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 436 } 437 } else { 438 /* Backward compatible case -- even if we enable 439 * multiple queues, there's only single reg to program 440 */ 441 gfar_write(®s->txic, 0); 442 if (likely(priv->tx_queue[0]->txcoalescing)) 443 gfar_write(®s->txic, priv->tx_queue[0]->txic); 444 445 gfar_write(®s->rxic, 0); 446 if (unlikely(priv->rx_queue[0]->rxcoalescing)) 447 gfar_write(®s->rxic, priv->rx_queue[0]->rxic); 448 } 449 } 450 451 void gfar_configure_coalescing_all(struct gfar_private *priv) 452 { 453 gfar_configure_coalescing(priv, 0xFF, 0xFF); 454 } 455 456 static struct net_device_stats *gfar_get_stats(struct net_device *dev) 457 { 458 struct gfar_private *priv = netdev_priv(dev); 459 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; 460 unsigned long tx_packets = 0, tx_bytes = 0; 461 int i; 462 463 for (i = 0; i < priv->num_rx_queues; i++) { 464 rx_packets += priv->rx_queue[i]->stats.rx_packets; 465 rx_bytes += priv->rx_queue[i]->stats.rx_bytes; 466 rx_dropped += priv->rx_queue[i]->stats.rx_dropped; 467 } 468 469 dev->stats.rx_packets = rx_packets; 470 dev->stats.rx_bytes = rx_bytes; 471 dev->stats.rx_dropped = rx_dropped; 472 473 for (i = 0; i < priv->num_tx_queues; i++) { 474 tx_bytes += priv->tx_queue[i]->stats.tx_bytes; 475 tx_packets += priv->tx_queue[i]->stats.tx_packets; 476 } 477 478 dev->stats.tx_bytes = tx_bytes; 479 dev->stats.tx_packets = tx_packets; 480 481 return &dev->stats; 482 } 483 484 static int gfar_set_mac_addr(struct net_device *dev, void *p) 485 { 486 eth_mac_addr(dev, p); 487 488 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 489 490 return 0; 491 } 492 493 static const struct net_device_ops gfar_netdev_ops = { 494 .ndo_open = gfar_enet_open, 495 .ndo_start_xmit = gfar_start_xmit, 496 .ndo_stop = gfar_close, 497 .ndo_change_mtu = gfar_change_mtu, 498 .ndo_set_features = gfar_set_features, 499 .ndo_set_rx_mode = gfar_set_multi, 500 .ndo_tx_timeout = gfar_timeout, 501 .ndo_do_ioctl = gfar_ioctl, 502 .ndo_get_stats = gfar_get_stats, 503 .ndo_change_carrier = fixed_phy_change_carrier, 504 .ndo_set_mac_address = gfar_set_mac_addr, 505 .ndo_validate_addr = eth_validate_addr, 506 #ifdef CONFIG_NET_POLL_CONTROLLER 507 .ndo_poll_controller = gfar_netpoll, 508 #endif 509 }; 510 511 static void gfar_ints_disable(struct gfar_private *priv) 512 { 513 int i; 514 for (i = 0; i < priv->num_grps; i++) { 515 struct gfar __iomem *regs = priv->gfargrp[i].regs; 516 /* Clear IEVENT */ 517 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 518 519 /* Initialize IMASK */ 520 gfar_write(®s->imask, IMASK_INIT_CLEAR); 521 } 522 } 523 524 static void gfar_ints_enable(struct gfar_private *priv) 525 { 526 int i; 527 for (i = 0; i < priv->num_grps; i++) { 528 struct gfar __iomem *regs = priv->gfargrp[i].regs; 529 /* Unmask the interrupts we look for */ 530 gfar_write(®s->imask, IMASK_DEFAULT); 531 } 532 } 533 534 static int gfar_alloc_tx_queues(struct gfar_private *priv) 535 { 536 int i; 537 538 for (i = 0; i < priv->num_tx_queues; i++) { 539 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), 540 GFP_KERNEL); 541 if (!priv->tx_queue[i]) 542 return -ENOMEM; 543 544 priv->tx_queue[i]->tx_skbuff = NULL; 545 priv->tx_queue[i]->qindex = i; 546 priv->tx_queue[i]->dev = priv->ndev; 547 spin_lock_init(&(priv->tx_queue[i]->txlock)); 548 } 549 return 0; 550 } 551 552 static int gfar_alloc_rx_queues(struct gfar_private *priv) 553 { 554 int i; 555 556 for (i = 0; i < priv->num_rx_queues; i++) { 557 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), 558 GFP_KERNEL); 559 if (!priv->rx_queue[i]) 560 return -ENOMEM; 561 562 priv->rx_queue[i]->qindex = i; 563 priv->rx_queue[i]->ndev = priv->ndev; 564 } 565 return 0; 566 } 567 568 static void gfar_free_tx_queues(struct gfar_private *priv) 569 { 570 int i; 571 572 for (i = 0; i < priv->num_tx_queues; i++) 573 kfree(priv->tx_queue[i]); 574 } 575 576 static void gfar_free_rx_queues(struct gfar_private *priv) 577 { 578 int i; 579 580 for (i = 0; i < priv->num_rx_queues; i++) 581 kfree(priv->rx_queue[i]); 582 } 583 584 static void unmap_group_regs(struct gfar_private *priv) 585 { 586 int i; 587 588 for (i = 0; i < MAXGROUPS; i++) 589 if (priv->gfargrp[i].regs) 590 iounmap(priv->gfargrp[i].regs); 591 } 592 593 static void free_gfar_dev(struct gfar_private *priv) 594 { 595 int i, j; 596 597 for (i = 0; i < priv->num_grps; i++) 598 for (j = 0; j < GFAR_NUM_IRQS; j++) { 599 kfree(priv->gfargrp[i].irqinfo[j]); 600 priv->gfargrp[i].irqinfo[j] = NULL; 601 } 602 603 free_netdev(priv->ndev); 604 } 605 606 static void disable_napi(struct gfar_private *priv) 607 { 608 int i; 609 610 for (i = 0; i < priv->num_grps; i++) { 611 napi_disable(&priv->gfargrp[i].napi_rx); 612 napi_disable(&priv->gfargrp[i].napi_tx); 613 } 614 } 615 616 static void enable_napi(struct gfar_private *priv) 617 { 618 int i; 619 620 for (i = 0; i < priv->num_grps; i++) { 621 napi_enable(&priv->gfargrp[i].napi_rx); 622 napi_enable(&priv->gfargrp[i].napi_tx); 623 } 624 } 625 626 static int gfar_parse_group(struct device_node *np, 627 struct gfar_private *priv, const char *model) 628 { 629 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; 630 int i; 631 632 for (i = 0; i < GFAR_NUM_IRQS; i++) { 633 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), 634 GFP_KERNEL); 635 if (!grp->irqinfo[i]) 636 return -ENOMEM; 637 } 638 639 grp->regs = of_iomap(np, 0); 640 if (!grp->regs) 641 return -ENOMEM; 642 643 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); 644 645 /* If we aren't the FEC we have multiple interrupts */ 646 if (model && strcasecmp(model, "FEC")) { 647 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); 648 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); 649 if (!gfar_irq(grp, TX)->irq || 650 !gfar_irq(grp, RX)->irq || 651 !gfar_irq(grp, ER)->irq) 652 return -EINVAL; 653 } 654 655 grp->priv = priv; 656 spin_lock_init(&grp->grplock); 657 if (priv->mode == MQ_MG_MODE) { 658 u32 rxq_mask, txq_mask; 659 int ret; 660 661 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); 662 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); 663 664 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask); 665 if (!ret) { 666 grp->rx_bit_map = rxq_mask ? 667 rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); 668 } 669 670 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask); 671 if (!ret) { 672 grp->tx_bit_map = txq_mask ? 673 txq_mask : (DEFAULT_MAPPING >> priv->num_grps); 674 } 675 676 if (priv->poll_mode == GFAR_SQ_POLLING) { 677 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ 678 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); 679 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); 680 } 681 } else { 682 grp->rx_bit_map = 0xFF; 683 grp->tx_bit_map = 0xFF; 684 } 685 686 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses 687 * right to left, so we need to revert the 8 bits to get the q index 688 */ 689 grp->rx_bit_map = bitrev8(grp->rx_bit_map); 690 grp->tx_bit_map = bitrev8(grp->tx_bit_map); 691 692 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 693 * also assign queues to groups 694 */ 695 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { 696 if (!grp->rx_queue) 697 grp->rx_queue = priv->rx_queue[i]; 698 grp->num_rx_queues++; 699 grp->rstat |= (RSTAT_CLEAR_RHALT >> i); 700 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 701 priv->rx_queue[i]->grp = grp; 702 } 703 704 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { 705 if (!grp->tx_queue) 706 grp->tx_queue = priv->tx_queue[i]; 707 grp->num_tx_queues++; 708 grp->tstat |= (TSTAT_CLEAR_THALT >> i); 709 priv->tqueue |= (TQUEUE_EN0 >> i); 710 priv->tx_queue[i]->grp = grp; 711 } 712 713 priv->num_grps++; 714 715 return 0; 716 } 717 718 static int gfar_of_group_count(struct device_node *np) 719 { 720 struct device_node *child; 721 int num = 0; 722 723 for_each_available_child_of_node(np, child) 724 if (of_node_name_eq(child, "queue-group")) 725 num++; 726 727 return num; 728 } 729 730 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) 731 { 732 const char *model; 733 const char *ctype; 734 const void *mac_addr; 735 int err = 0, i; 736 struct net_device *dev = NULL; 737 struct gfar_private *priv = NULL; 738 struct device_node *np = ofdev->dev.of_node; 739 struct device_node *child = NULL; 740 u32 stash_len = 0; 741 u32 stash_idx = 0; 742 unsigned int num_tx_qs, num_rx_qs; 743 unsigned short mode, poll_mode; 744 745 if (!np) 746 return -ENODEV; 747 748 if (of_device_is_compatible(np, "fsl,etsec2")) { 749 mode = MQ_MG_MODE; 750 poll_mode = GFAR_SQ_POLLING; 751 } else { 752 mode = SQ_SG_MODE; 753 poll_mode = GFAR_SQ_POLLING; 754 } 755 756 if (mode == SQ_SG_MODE) { 757 num_tx_qs = 1; 758 num_rx_qs = 1; 759 } else { /* MQ_MG_MODE */ 760 /* get the actual number of supported groups */ 761 unsigned int num_grps = gfar_of_group_count(np); 762 763 if (num_grps == 0 || num_grps > MAXGROUPS) { 764 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", 765 num_grps); 766 pr_err("Cannot do alloc_etherdev, aborting\n"); 767 return -EINVAL; 768 } 769 770 if (poll_mode == GFAR_SQ_POLLING) { 771 num_tx_qs = num_grps; /* one txq per int group */ 772 num_rx_qs = num_grps; /* one rxq per int group */ 773 } else { /* GFAR_MQ_POLLING */ 774 u32 tx_queues, rx_queues; 775 int ret; 776 777 /* parse the num of HW tx and rx queues */ 778 ret = of_property_read_u32(np, "fsl,num_tx_queues", 779 &tx_queues); 780 num_tx_qs = ret ? 1 : tx_queues; 781 782 ret = of_property_read_u32(np, "fsl,num_rx_queues", 783 &rx_queues); 784 num_rx_qs = ret ? 1 : rx_queues; 785 } 786 } 787 788 if (num_tx_qs > MAX_TX_QS) { 789 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", 790 num_tx_qs, MAX_TX_QS); 791 pr_err("Cannot do alloc_etherdev, aborting\n"); 792 return -EINVAL; 793 } 794 795 if (num_rx_qs > MAX_RX_QS) { 796 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", 797 num_rx_qs, MAX_RX_QS); 798 pr_err("Cannot do alloc_etherdev, aborting\n"); 799 return -EINVAL; 800 } 801 802 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); 803 dev = *pdev; 804 if (NULL == dev) 805 return -ENOMEM; 806 807 priv = netdev_priv(dev); 808 priv->ndev = dev; 809 810 priv->mode = mode; 811 priv->poll_mode = poll_mode; 812 813 priv->num_tx_queues = num_tx_qs; 814 netif_set_real_num_rx_queues(dev, num_rx_qs); 815 priv->num_rx_queues = num_rx_qs; 816 817 err = gfar_alloc_tx_queues(priv); 818 if (err) 819 goto tx_alloc_failed; 820 821 err = gfar_alloc_rx_queues(priv); 822 if (err) 823 goto rx_alloc_failed; 824 825 err = of_property_read_string(np, "model", &model); 826 if (err) { 827 pr_err("Device model property missing, aborting\n"); 828 goto rx_alloc_failed; 829 } 830 831 /* Init Rx queue filer rule set linked list */ 832 INIT_LIST_HEAD(&priv->rx_list.list); 833 priv->rx_list.count = 0; 834 mutex_init(&priv->rx_queue_access); 835 836 for (i = 0; i < MAXGROUPS; i++) 837 priv->gfargrp[i].regs = NULL; 838 839 /* Parse and initialize group specific information */ 840 if (priv->mode == MQ_MG_MODE) { 841 for_each_available_child_of_node(np, child) { 842 if (!of_node_name_eq(child, "queue-group")) 843 continue; 844 845 err = gfar_parse_group(child, priv, model); 846 if (err) 847 goto err_grp_init; 848 } 849 } else { /* SQ_SG_MODE */ 850 err = gfar_parse_group(np, priv, model); 851 if (err) 852 goto err_grp_init; 853 } 854 855 if (of_property_read_bool(np, "bd-stash")) { 856 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 857 priv->bd_stash_en = 1; 858 } 859 860 err = of_property_read_u32(np, "rx-stash-len", &stash_len); 861 862 if (err == 0) 863 priv->rx_stash_size = stash_len; 864 865 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx); 866 867 if (err == 0) 868 priv->rx_stash_index = stash_idx; 869 870 if (stash_len || stash_idx) 871 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; 872 873 mac_addr = of_get_mac_address(np); 874 875 if (mac_addr) 876 memcpy(dev->dev_addr, mac_addr, ETH_ALEN); 877 878 if (model && !strcasecmp(model, "TSEC")) 879 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | 880 FSL_GIANFAR_DEV_HAS_COALESCE | 881 FSL_GIANFAR_DEV_HAS_RMON | 882 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 883 884 if (model && !strcasecmp(model, "eTSEC")) 885 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | 886 FSL_GIANFAR_DEV_HAS_COALESCE | 887 FSL_GIANFAR_DEV_HAS_RMON | 888 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 889 FSL_GIANFAR_DEV_HAS_CSUM | 890 FSL_GIANFAR_DEV_HAS_VLAN | 891 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 892 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 893 FSL_GIANFAR_DEV_HAS_TIMER | 894 FSL_GIANFAR_DEV_HAS_RX_FILER; 895 896 err = of_property_read_string(np, "phy-connection-type", &ctype); 897 898 /* We only care about rgmii-id. The rest are autodetected */ 899 if (err == 0 && !strcmp(ctype, "rgmii-id")) 900 priv->interface = PHY_INTERFACE_MODE_RGMII_ID; 901 else 902 priv->interface = PHY_INTERFACE_MODE_MII; 903 904 if (of_find_property(np, "fsl,magic-packet", NULL)) 905 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; 906 907 if (of_get_property(np, "fsl,wake-on-filer", NULL)) 908 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER; 909 910 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 911 912 /* In the case of a fixed PHY, the DT node associated 913 * to the PHY is the Ethernet MAC DT node. 914 */ 915 if (!priv->phy_node && of_phy_is_fixed_link(np)) { 916 err = of_phy_register_fixed_link(np); 917 if (err) 918 goto err_grp_init; 919 920 priv->phy_node = of_node_get(np); 921 } 922 923 /* Find the TBI PHY. If it's not there, we don't support SGMII */ 924 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 925 926 return 0; 927 928 err_grp_init: 929 unmap_group_regs(priv); 930 rx_alloc_failed: 931 gfar_free_rx_queues(priv); 932 tx_alloc_failed: 933 gfar_free_tx_queues(priv); 934 free_gfar_dev(priv); 935 return err; 936 } 937 938 static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) 939 { 940 struct hwtstamp_config config; 941 struct gfar_private *priv = netdev_priv(netdev); 942 943 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 944 return -EFAULT; 945 946 /* reserved for future extensions */ 947 if (config.flags) 948 return -EINVAL; 949 950 switch (config.tx_type) { 951 case HWTSTAMP_TX_OFF: 952 priv->hwts_tx_en = 0; 953 break; 954 case HWTSTAMP_TX_ON: 955 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 956 return -ERANGE; 957 priv->hwts_tx_en = 1; 958 break; 959 default: 960 return -ERANGE; 961 } 962 963 switch (config.rx_filter) { 964 case HWTSTAMP_FILTER_NONE: 965 if (priv->hwts_rx_en) { 966 priv->hwts_rx_en = 0; 967 reset_gfar(netdev); 968 } 969 break; 970 default: 971 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 972 return -ERANGE; 973 if (!priv->hwts_rx_en) { 974 priv->hwts_rx_en = 1; 975 reset_gfar(netdev); 976 } 977 config.rx_filter = HWTSTAMP_FILTER_ALL; 978 break; 979 } 980 981 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 982 -EFAULT : 0; 983 } 984 985 static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) 986 { 987 struct hwtstamp_config config; 988 struct gfar_private *priv = netdev_priv(netdev); 989 990 config.flags = 0; 991 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 992 config.rx_filter = (priv->hwts_rx_en ? 993 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); 994 995 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 996 -EFAULT : 0; 997 } 998 999 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1000 { 1001 struct phy_device *phydev = dev->phydev; 1002 1003 if (!netif_running(dev)) 1004 return -EINVAL; 1005 1006 if (cmd == SIOCSHWTSTAMP) 1007 return gfar_hwtstamp_set(dev, rq); 1008 if (cmd == SIOCGHWTSTAMP) 1009 return gfar_hwtstamp_get(dev, rq); 1010 1011 if (!phydev) 1012 return -ENODEV; 1013 1014 return phy_mii_ioctl(phydev, rq, cmd); 1015 } 1016 1017 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 1018 u32 class) 1019 { 1020 u32 rqfpr = FPR_FILER_MASK; 1021 u32 rqfcr = 0x0; 1022 1023 rqfar--; 1024 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; 1025 priv->ftp_rqfpr[rqfar] = rqfpr; 1026 priv->ftp_rqfcr[rqfar] = rqfcr; 1027 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 1028 1029 rqfar--; 1030 rqfcr = RQFCR_CMP_NOMATCH; 1031 priv->ftp_rqfpr[rqfar] = rqfpr; 1032 priv->ftp_rqfcr[rqfar] = rqfcr; 1033 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 1034 1035 rqfar--; 1036 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; 1037 rqfpr = class; 1038 priv->ftp_rqfcr[rqfar] = rqfcr; 1039 priv->ftp_rqfpr[rqfar] = rqfpr; 1040 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 1041 1042 rqfar--; 1043 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; 1044 rqfpr = class; 1045 priv->ftp_rqfcr[rqfar] = rqfcr; 1046 priv->ftp_rqfpr[rqfar] = rqfpr; 1047 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 1048 1049 return rqfar; 1050 } 1051 1052 static void gfar_init_filer_table(struct gfar_private *priv) 1053 { 1054 int i = 0x0; 1055 u32 rqfar = MAX_FILER_IDX; 1056 u32 rqfcr = 0x0; 1057 u32 rqfpr = FPR_FILER_MASK; 1058 1059 /* Default rule */ 1060 rqfcr = RQFCR_CMP_MATCH; 1061 priv->ftp_rqfcr[rqfar] = rqfcr; 1062 priv->ftp_rqfpr[rqfar] = rqfpr; 1063 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 1064 1065 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); 1066 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); 1067 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); 1068 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); 1069 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); 1070 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); 1071 1072 /* cur_filer_idx indicated the first non-masked rule */ 1073 priv->cur_filer_idx = rqfar; 1074 1075 /* Rest are masked rules */ 1076 rqfcr = RQFCR_CMP_NOMATCH; 1077 for (i = 0; i < rqfar; i++) { 1078 priv->ftp_rqfcr[i] = rqfcr; 1079 priv->ftp_rqfpr[i] = rqfpr; 1080 gfar_write_filer(priv, i, rqfcr, rqfpr); 1081 } 1082 } 1083 1084 #ifdef CONFIG_PPC 1085 static void __gfar_detect_errata_83xx(struct gfar_private *priv) 1086 { 1087 unsigned int pvr = mfspr(SPRN_PVR); 1088 unsigned int svr = mfspr(SPRN_SVR); 1089 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ 1090 unsigned int rev = svr & 0xffff; 1091 1092 /* MPC8313 Rev 2.0 and higher; All MPC837x */ 1093 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || 1094 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 1095 priv->errata |= GFAR_ERRATA_74; 1096 1097 /* MPC8313 and MPC837x all rev */ 1098 if ((pvr == 0x80850010 && mod == 0x80b0) || 1099 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 1100 priv->errata |= GFAR_ERRATA_76; 1101 1102 /* MPC8313 Rev < 2.0 */ 1103 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) 1104 priv->errata |= GFAR_ERRATA_12; 1105 } 1106 1107 static void __gfar_detect_errata_85xx(struct gfar_private *priv) 1108 { 1109 unsigned int svr = mfspr(SPRN_SVR); 1110 1111 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) 1112 priv->errata |= GFAR_ERRATA_12; 1113 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */ 1114 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || 1115 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) || 1116 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31))) 1117 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ 1118 } 1119 #endif 1120 1121 static void gfar_detect_errata(struct gfar_private *priv) 1122 { 1123 struct device *dev = &priv->ofdev->dev; 1124 1125 /* no plans to fix */ 1126 priv->errata |= GFAR_ERRATA_A002; 1127 1128 #ifdef CONFIG_PPC 1129 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) 1130 __gfar_detect_errata_85xx(priv); 1131 else /* non-mpc85xx parts, i.e. e300 core based */ 1132 __gfar_detect_errata_83xx(priv); 1133 #endif 1134 1135 if (priv->errata) 1136 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 1137 priv->errata); 1138 } 1139 1140 void gfar_mac_reset(struct gfar_private *priv) 1141 { 1142 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1143 u32 tempval; 1144 1145 /* Reset MAC layer */ 1146 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); 1147 1148 /* We need to delay at least 3 TX clocks */ 1149 udelay(3); 1150 1151 /* the soft reset bit is not self-resetting, so we need to 1152 * clear it before resuming normal operation 1153 */ 1154 gfar_write(®s->maccfg1, 0); 1155 1156 udelay(3); 1157 1158 gfar_rx_offload_en(priv); 1159 1160 /* Initialize the max receive frame/buffer lengths */ 1161 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); 1162 gfar_write(®s->mrblr, GFAR_RXB_SIZE); 1163 1164 /* Initialize the Minimum Frame Length Register */ 1165 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); 1166 1167 /* Initialize MACCFG2. */ 1168 tempval = MACCFG2_INIT_SETTINGS; 1169 1170 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 1171 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, 1172 * and by checking RxBD[LG] and discarding larger than MAXFRM. 1173 */ 1174 if (gfar_has_errata(priv, GFAR_ERRATA_74)) 1175 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; 1176 1177 gfar_write(®s->maccfg2, tempval); 1178 1179 /* Clear mac addr hash registers */ 1180 gfar_write(®s->igaddr0, 0); 1181 gfar_write(®s->igaddr1, 0); 1182 gfar_write(®s->igaddr2, 0); 1183 gfar_write(®s->igaddr3, 0); 1184 gfar_write(®s->igaddr4, 0); 1185 gfar_write(®s->igaddr5, 0); 1186 gfar_write(®s->igaddr6, 0); 1187 gfar_write(®s->igaddr7, 0); 1188 1189 gfar_write(®s->gaddr0, 0); 1190 gfar_write(®s->gaddr1, 0); 1191 gfar_write(®s->gaddr2, 0); 1192 gfar_write(®s->gaddr3, 0); 1193 gfar_write(®s->gaddr4, 0); 1194 gfar_write(®s->gaddr5, 0); 1195 gfar_write(®s->gaddr6, 0); 1196 gfar_write(®s->gaddr7, 0); 1197 1198 if (priv->extended_hash) 1199 gfar_clear_exact_match(priv->ndev); 1200 1201 gfar_mac_rx_config(priv); 1202 1203 gfar_mac_tx_config(priv); 1204 1205 gfar_set_mac_address(priv->ndev); 1206 1207 gfar_set_multi(priv->ndev); 1208 1209 /* clear ievent and imask before configuring coalescing */ 1210 gfar_ints_disable(priv); 1211 1212 /* Configure the coalescing support */ 1213 gfar_configure_coalescing_all(priv); 1214 } 1215 1216 static void gfar_hw_init(struct gfar_private *priv) 1217 { 1218 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1219 u32 attrs; 1220 1221 /* Stop the DMA engine now, in case it was running before 1222 * (The firmware could have used it, and left it running). 1223 */ 1224 gfar_halt(priv); 1225 1226 gfar_mac_reset(priv); 1227 1228 /* Zero out the rmon mib registers if it has them */ 1229 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1230 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); 1231 1232 /* Mask off the CAM interrupts */ 1233 gfar_write(®s->rmon.cam1, 0xffffffff); 1234 gfar_write(®s->rmon.cam2, 0xffffffff); 1235 } 1236 1237 /* Initialize ECNTRL */ 1238 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); 1239 1240 /* Set the extraction length and index */ 1241 attrs = ATTRELI_EL(priv->rx_stash_size) | 1242 ATTRELI_EI(priv->rx_stash_index); 1243 1244 gfar_write(®s->attreli, attrs); 1245 1246 /* Start with defaults, and add stashing 1247 * depending on driver parameters 1248 */ 1249 attrs = ATTR_INIT_SETTINGS; 1250 1251 if (priv->bd_stash_en) 1252 attrs |= ATTR_BDSTASH; 1253 1254 if (priv->rx_stash_size != 0) 1255 attrs |= ATTR_BUFSTASH; 1256 1257 gfar_write(®s->attr, attrs); 1258 1259 /* FIFO configs */ 1260 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); 1261 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); 1262 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); 1263 1264 /* Program the interrupt steering regs, only for MG devices */ 1265 if (priv->num_grps > 1) 1266 gfar_write_isrg(priv); 1267 } 1268 1269 static void gfar_init_addr_hash_table(struct gfar_private *priv) 1270 { 1271 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1272 1273 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1274 priv->extended_hash = 1; 1275 priv->hash_width = 9; 1276 1277 priv->hash_regs[0] = ®s->igaddr0; 1278 priv->hash_regs[1] = ®s->igaddr1; 1279 priv->hash_regs[2] = ®s->igaddr2; 1280 priv->hash_regs[3] = ®s->igaddr3; 1281 priv->hash_regs[4] = ®s->igaddr4; 1282 priv->hash_regs[5] = ®s->igaddr5; 1283 priv->hash_regs[6] = ®s->igaddr6; 1284 priv->hash_regs[7] = ®s->igaddr7; 1285 priv->hash_regs[8] = ®s->gaddr0; 1286 priv->hash_regs[9] = ®s->gaddr1; 1287 priv->hash_regs[10] = ®s->gaddr2; 1288 priv->hash_regs[11] = ®s->gaddr3; 1289 priv->hash_regs[12] = ®s->gaddr4; 1290 priv->hash_regs[13] = ®s->gaddr5; 1291 priv->hash_regs[14] = ®s->gaddr6; 1292 priv->hash_regs[15] = ®s->gaddr7; 1293 1294 } else { 1295 priv->extended_hash = 0; 1296 priv->hash_width = 8; 1297 1298 priv->hash_regs[0] = ®s->gaddr0; 1299 priv->hash_regs[1] = ®s->gaddr1; 1300 priv->hash_regs[2] = ®s->gaddr2; 1301 priv->hash_regs[3] = ®s->gaddr3; 1302 priv->hash_regs[4] = ®s->gaddr4; 1303 priv->hash_regs[5] = ®s->gaddr5; 1304 priv->hash_regs[6] = ®s->gaddr6; 1305 priv->hash_regs[7] = ®s->gaddr7; 1306 } 1307 } 1308 1309 /* Set up the ethernet device structure, private data, 1310 * and anything else we need before we start 1311 */ 1312 static int gfar_probe(struct platform_device *ofdev) 1313 { 1314 struct device_node *np = ofdev->dev.of_node; 1315 struct net_device *dev = NULL; 1316 struct gfar_private *priv = NULL; 1317 int err = 0, i; 1318 1319 err = gfar_of_init(ofdev, &dev); 1320 1321 if (err) 1322 return err; 1323 1324 priv = netdev_priv(dev); 1325 priv->ndev = dev; 1326 priv->ofdev = ofdev; 1327 priv->dev = &ofdev->dev; 1328 SET_NETDEV_DEV(dev, &ofdev->dev); 1329 1330 INIT_WORK(&priv->reset_task, gfar_reset_task); 1331 1332 platform_set_drvdata(ofdev, priv); 1333 1334 gfar_detect_errata(priv); 1335 1336 /* Set the dev->base_addr to the gfar reg region */ 1337 dev->base_addr = (unsigned long) priv->gfargrp[0].regs; 1338 1339 /* Fill in the dev structure */ 1340 dev->watchdog_timeo = TX_TIMEOUT; 1341 /* MTU range: 50 - 9586 */ 1342 dev->mtu = 1500; 1343 dev->min_mtu = 50; 1344 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN; 1345 dev->netdev_ops = &gfar_netdev_ops; 1346 dev->ethtool_ops = &gfar_ethtool_ops; 1347 1348 /* Register for napi ...We are registering NAPI for each grp */ 1349 for (i = 0; i < priv->num_grps; i++) { 1350 if (priv->poll_mode == GFAR_SQ_POLLING) { 1351 netif_napi_add(dev, &priv->gfargrp[i].napi_rx, 1352 gfar_poll_rx_sq, GFAR_DEV_WEIGHT); 1353 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, 1354 gfar_poll_tx_sq, 2); 1355 } else { 1356 netif_napi_add(dev, &priv->gfargrp[i].napi_rx, 1357 gfar_poll_rx, GFAR_DEV_WEIGHT); 1358 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, 1359 gfar_poll_tx, 2); 1360 } 1361 } 1362 1363 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1364 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1365 NETIF_F_RXCSUM; 1366 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 1367 NETIF_F_RXCSUM | NETIF_F_HIGHDMA; 1368 } 1369 1370 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1371 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 1372 NETIF_F_HW_VLAN_CTAG_RX; 1373 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1374 } 1375 1376 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1377 1378 gfar_init_addr_hash_table(priv); 1379 1380 /* Insert receive time stamps into padding alignment bytes, and 1381 * plus 2 bytes padding to ensure the cpu alignment. 1382 */ 1383 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1384 priv->padding = 8 + DEFAULT_PADDING; 1385 1386 if (dev->features & NETIF_F_IP_CSUM || 1387 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1388 dev->needed_headroom = GMAC_FCB_LEN; 1389 1390 /* Initializing some of the rx/tx queue level parameters */ 1391 for (i = 0; i < priv->num_tx_queues; i++) { 1392 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; 1393 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; 1394 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; 1395 priv->tx_queue[i]->txic = DEFAULT_TXIC; 1396 } 1397 1398 for (i = 0; i < priv->num_rx_queues; i++) { 1399 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; 1400 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; 1401 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1402 } 1403 1404 /* Always enable rx filer if available */ 1405 priv->rx_filer_enable = 1406 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; 1407 /* Enable most messages by default */ 1408 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1409 /* use pritority h/w tx queue scheduling for single queue devices */ 1410 if (priv->num_tx_queues == 1) 1411 priv->prio_sched_en = 1; 1412 1413 set_bit(GFAR_DOWN, &priv->state); 1414 1415 gfar_hw_init(priv); 1416 1417 /* Carrier starts down, phylib will bring it up */ 1418 netif_carrier_off(dev); 1419 1420 err = register_netdev(dev); 1421 1422 if (err) { 1423 pr_err("%s: Cannot register net device, aborting\n", dev->name); 1424 goto register_fail; 1425 } 1426 1427 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) 1428 priv->wol_supported |= GFAR_WOL_MAGIC; 1429 1430 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && 1431 priv->rx_filer_enable) 1432 priv->wol_supported |= GFAR_WOL_FILER_UCAST; 1433 1434 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); 1435 1436 /* fill out IRQ number and name fields */ 1437 for (i = 0; i < priv->num_grps; i++) { 1438 struct gfar_priv_grp *grp = &priv->gfargrp[i]; 1439 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1440 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", 1441 dev->name, "_g", '0' + i, "_tx"); 1442 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", 1443 dev->name, "_g", '0' + i, "_rx"); 1444 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", 1445 dev->name, "_g", '0' + i, "_er"); 1446 } else 1447 strcpy(gfar_irq(grp, TX)->name, dev->name); 1448 } 1449 1450 /* Initialize the filer table */ 1451 gfar_init_filer_table(priv); 1452 1453 /* Print out the device info */ 1454 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 1455 1456 /* Even more device info helps when determining which kernel 1457 * provided which set of benchmarks. 1458 */ 1459 netdev_info(dev, "Running with NAPI enabled\n"); 1460 for (i = 0; i < priv->num_rx_queues; i++) 1461 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", 1462 i, priv->rx_queue[i]->rx_ring_size); 1463 for (i = 0; i < priv->num_tx_queues; i++) 1464 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", 1465 i, priv->tx_queue[i]->tx_ring_size); 1466 1467 return 0; 1468 1469 register_fail: 1470 if (of_phy_is_fixed_link(np)) 1471 of_phy_deregister_fixed_link(np); 1472 unmap_group_regs(priv); 1473 gfar_free_rx_queues(priv); 1474 gfar_free_tx_queues(priv); 1475 of_node_put(priv->phy_node); 1476 of_node_put(priv->tbi_node); 1477 free_gfar_dev(priv); 1478 return err; 1479 } 1480 1481 static int gfar_remove(struct platform_device *ofdev) 1482 { 1483 struct gfar_private *priv = platform_get_drvdata(ofdev); 1484 struct device_node *np = ofdev->dev.of_node; 1485 1486 of_node_put(priv->phy_node); 1487 of_node_put(priv->tbi_node); 1488 1489 unregister_netdev(priv->ndev); 1490 1491 if (of_phy_is_fixed_link(np)) 1492 of_phy_deregister_fixed_link(np); 1493 1494 unmap_group_regs(priv); 1495 gfar_free_rx_queues(priv); 1496 gfar_free_tx_queues(priv); 1497 free_gfar_dev(priv); 1498 1499 return 0; 1500 } 1501 1502 #ifdef CONFIG_PM 1503 1504 static void __gfar_filer_disable(struct gfar_private *priv) 1505 { 1506 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1507 u32 temp; 1508 1509 temp = gfar_read(®s->rctrl); 1510 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT); 1511 gfar_write(®s->rctrl, temp); 1512 } 1513 1514 static void __gfar_filer_enable(struct gfar_private *priv) 1515 { 1516 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1517 u32 temp; 1518 1519 temp = gfar_read(®s->rctrl); 1520 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; 1521 gfar_write(®s->rctrl, temp); 1522 } 1523 1524 /* Filer rules implementing wol capabilities */ 1525 static void gfar_filer_config_wol(struct gfar_private *priv) 1526 { 1527 unsigned int i; 1528 u32 rqfcr; 1529 1530 __gfar_filer_disable(priv); 1531 1532 /* clear the filer table, reject any packet by default */ 1533 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH; 1534 for (i = 0; i <= MAX_FILER_IDX; i++) 1535 gfar_write_filer(priv, i, rqfcr, 0); 1536 1537 i = 0; 1538 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { 1539 /* unicast packet, accept it */ 1540 struct net_device *ndev = priv->ndev; 1541 /* get the default rx queue index */ 1542 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; 1543 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | 1544 (ndev->dev_addr[1] << 8) | 1545 ndev->dev_addr[2]; 1546 1547 rqfcr = (qindex << 10) | RQFCR_AND | 1548 RQFCR_CMP_EXACT | RQFCR_PID_DAH; 1549 1550 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); 1551 1552 dest_mac_addr = (ndev->dev_addr[3] << 16) | 1553 (ndev->dev_addr[4] << 8) | 1554 ndev->dev_addr[5]; 1555 rqfcr = (qindex << 10) | RQFCR_GPI | 1556 RQFCR_CMP_EXACT | RQFCR_PID_DAL; 1557 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); 1558 } 1559 1560 __gfar_filer_enable(priv); 1561 } 1562 1563 static void gfar_filer_restore_table(struct gfar_private *priv) 1564 { 1565 u32 rqfcr, rqfpr; 1566 unsigned int i; 1567 1568 __gfar_filer_disable(priv); 1569 1570 for (i = 0; i <= MAX_FILER_IDX; i++) { 1571 rqfcr = priv->ftp_rqfcr[i]; 1572 rqfpr = priv->ftp_rqfpr[i]; 1573 gfar_write_filer(priv, i, rqfcr, rqfpr); 1574 } 1575 1576 __gfar_filer_enable(priv); 1577 } 1578 1579 /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */ 1580 static void gfar_start_wol_filer(struct gfar_private *priv) 1581 { 1582 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1583 u32 tempval; 1584 int i = 0; 1585 1586 /* Enable Rx hw queues */ 1587 gfar_write(®s->rqueue, priv->rqueue); 1588 1589 /* Initialize DMACTRL to have WWR and WOP */ 1590 tempval = gfar_read(®s->dmactrl); 1591 tempval |= DMACTRL_INIT_SETTINGS; 1592 gfar_write(®s->dmactrl, tempval); 1593 1594 /* Make sure we aren't stopped */ 1595 tempval = gfar_read(®s->dmactrl); 1596 tempval &= ~DMACTRL_GRS; 1597 gfar_write(®s->dmactrl, tempval); 1598 1599 for (i = 0; i < priv->num_grps; i++) { 1600 regs = priv->gfargrp[i].regs; 1601 /* Clear RHLT, so that the DMA starts polling now */ 1602 gfar_write(®s->rstat, priv->gfargrp[i].rstat); 1603 /* enable the Filer General Purpose Interrupt */ 1604 gfar_write(®s->imask, IMASK_FGPI); 1605 } 1606 1607 /* Enable Rx DMA */ 1608 tempval = gfar_read(®s->maccfg1); 1609 tempval |= MACCFG1_RX_EN; 1610 gfar_write(®s->maccfg1, tempval); 1611 } 1612 1613 static int gfar_suspend(struct device *dev) 1614 { 1615 struct gfar_private *priv = dev_get_drvdata(dev); 1616 struct net_device *ndev = priv->ndev; 1617 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1618 u32 tempval; 1619 u16 wol = priv->wol_opts; 1620 1621 if (!netif_running(ndev)) 1622 return 0; 1623 1624 disable_napi(priv); 1625 netif_tx_lock(ndev); 1626 netif_device_detach(ndev); 1627 netif_tx_unlock(ndev); 1628 1629 gfar_halt(priv); 1630 1631 if (wol & GFAR_WOL_MAGIC) { 1632 /* Enable interrupt on Magic Packet */ 1633 gfar_write(®s->imask, IMASK_MAG); 1634 1635 /* Enable Magic Packet mode */ 1636 tempval = gfar_read(®s->maccfg2); 1637 tempval |= MACCFG2_MPEN; 1638 gfar_write(®s->maccfg2, tempval); 1639 1640 /* re-enable the Rx block */ 1641 tempval = gfar_read(®s->maccfg1); 1642 tempval |= MACCFG1_RX_EN; 1643 gfar_write(®s->maccfg1, tempval); 1644 1645 } else if (wol & GFAR_WOL_FILER_UCAST) { 1646 gfar_filer_config_wol(priv); 1647 gfar_start_wol_filer(priv); 1648 1649 } else { 1650 phy_stop(ndev->phydev); 1651 } 1652 1653 return 0; 1654 } 1655 1656 static int gfar_resume(struct device *dev) 1657 { 1658 struct gfar_private *priv = dev_get_drvdata(dev); 1659 struct net_device *ndev = priv->ndev; 1660 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1661 u32 tempval; 1662 u16 wol = priv->wol_opts; 1663 1664 if (!netif_running(ndev)) 1665 return 0; 1666 1667 if (wol & GFAR_WOL_MAGIC) { 1668 /* Disable Magic Packet mode */ 1669 tempval = gfar_read(®s->maccfg2); 1670 tempval &= ~MACCFG2_MPEN; 1671 gfar_write(®s->maccfg2, tempval); 1672 1673 } else if (wol & GFAR_WOL_FILER_UCAST) { 1674 /* need to stop rx only, tx is already down */ 1675 gfar_halt(priv); 1676 gfar_filer_restore_table(priv); 1677 1678 } else { 1679 phy_start(ndev->phydev); 1680 } 1681 1682 gfar_start(priv); 1683 1684 netif_device_attach(ndev); 1685 enable_napi(priv); 1686 1687 return 0; 1688 } 1689 1690 static int gfar_restore(struct device *dev) 1691 { 1692 struct gfar_private *priv = dev_get_drvdata(dev); 1693 struct net_device *ndev = priv->ndev; 1694 1695 if (!netif_running(ndev)) { 1696 netif_device_attach(ndev); 1697 1698 return 0; 1699 } 1700 1701 gfar_init_bds(ndev); 1702 1703 gfar_mac_reset(priv); 1704 1705 gfar_init_tx_rx_base(priv); 1706 1707 gfar_start(priv); 1708 1709 priv->oldlink = 0; 1710 priv->oldspeed = 0; 1711 priv->oldduplex = -1; 1712 1713 if (ndev->phydev) 1714 phy_start(ndev->phydev); 1715 1716 netif_device_attach(ndev); 1717 enable_napi(priv); 1718 1719 return 0; 1720 } 1721 1722 static const struct dev_pm_ops gfar_pm_ops = { 1723 .suspend = gfar_suspend, 1724 .resume = gfar_resume, 1725 .freeze = gfar_suspend, 1726 .thaw = gfar_resume, 1727 .restore = gfar_restore, 1728 }; 1729 1730 #define GFAR_PM_OPS (&gfar_pm_ops) 1731 1732 #else 1733 1734 #define GFAR_PM_OPS NULL 1735 1736 #endif 1737 1738 /* Reads the controller's registers to determine what interface 1739 * connects it to the PHY. 1740 */ 1741 static phy_interface_t gfar_get_interface(struct net_device *dev) 1742 { 1743 struct gfar_private *priv = netdev_priv(dev); 1744 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1745 u32 ecntrl; 1746 1747 ecntrl = gfar_read(®s->ecntrl); 1748 1749 if (ecntrl & ECNTRL_SGMII_MODE) 1750 return PHY_INTERFACE_MODE_SGMII; 1751 1752 if (ecntrl & ECNTRL_TBI_MODE) { 1753 if (ecntrl & ECNTRL_REDUCED_MODE) 1754 return PHY_INTERFACE_MODE_RTBI; 1755 else 1756 return PHY_INTERFACE_MODE_TBI; 1757 } 1758 1759 if (ecntrl & ECNTRL_REDUCED_MODE) { 1760 if (ecntrl & ECNTRL_REDUCED_MII_MODE) { 1761 return PHY_INTERFACE_MODE_RMII; 1762 } 1763 else { 1764 phy_interface_t interface = priv->interface; 1765 1766 /* This isn't autodetected right now, so it must 1767 * be set by the device tree or platform code. 1768 */ 1769 if (interface == PHY_INTERFACE_MODE_RGMII_ID) 1770 return PHY_INTERFACE_MODE_RGMII_ID; 1771 1772 return PHY_INTERFACE_MODE_RGMII; 1773 } 1774 } 1775 1776 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 1777 return PHY_INTERFACE_MODE_GMII; 1778 1779 return PHY_INTERFACE_MODE_MII; 1780 } 1781 1782 1783 /* Initializes driver's PHY state, and attaches to the PHY. 1784 * Returns 0 on success. 1785 */ 1786 static int init_phy(struct net_device *dev) 1787 { 1788 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1789 struct gfar_private *priv = netdev_priv(dev); 1790 phy_interface_t interface; 1791 struct phy_device *phydev; 1792 struct ethtool_eee edata; 1793 1794 linkmode_set_bit_array(phy_10_100_features_array, 1795 ARRAY_SIZE(phy_10_100_features_array), 1796 mask); 1797 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); 1798 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); 1799 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 1800 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask); 1801 1802 priv->oldlink = 0; 1803 priv->oldspeed = 0; 1804 priv->oldduplex = -1; 1805 1806 interface = gfar_get_interface(dev); 1807 1808 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, 1809 interface); 1810 if (!phydev) { 1811 dev_err(&dev->dev, "could not attach to PHY\n"); 1812 return -ENODEV; 1813 } 1814 1815 if (interface == PHY_INTERFACE_MODE_SGMII) 1816 gfar_configure_serdes(dev); 1817 1818 /* Remove any features not supported by the controller */ 1819 linkmode_and(phydev->supported, phydev->supported, mask); 1820 linkmode_copy(phydev->advertising, phydev->supported); 1821 1822 /* Add support for flow control */ 1823 phy_support_asym_pause(phydev); 1824 1825 /* disable EEE autoneg, EEE not supported by eTSEC */ 1826 memset(&edata, 0, sizeof(struct ethtool_eee)); 1827 phy_ethtool_set_eee(phydev, &edata); 1828 1829 return 0; 1830 } 1831 1832 /* Initialize TBI PHY interface for communicating with the 1833 * SERDES lynx PHY on the chip. We communicate with this PHY 1834 * through the MDIO bus on each controller, treating it as a 1835 * "normal" PHY at the address found in the TBIPA register. We assume 1836 * that the TBIPA register is valid. Either the MDIO bus code will set 1837 * it to a value that doesn't conflict with other PHYs on the bus, or the 1838 * value doesn't matter, as there are no other PHYs on the bus. 1839 */ 1840 static void gfar_configure_serdes(struct net_device *dev) 1841 { 1842 struct gfar_private *priv = netdev_priv(dev); 1843 struct phy_device *tbiphy; 1844 1845 if (!priv->tbi_node) { 1846 dev_warn(&dev->dev, "error: SGMII mode requires that the " 1847 "device tree specify a tbi-handle\n"); 1848 return; 1849 } 1850 1851 tbiphy = of_phy_find_device(priv->tbi_node); 1852 if (!tbiphy) { 1853 dev_err(&dev->dev, "error: Could not get TBI device\n"); 1854 return; 1855 } 1856 1857 /* If the link is already up, we must already be ok, and don't need to 1858 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1859 * everything for us? Resetting it takes the link down and requires 1860 * several seconds for it to come back. 1861 */ 1862 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { 1863 put_device(&tbiphy->mdio.dev); 1864 return; 1865 } 1866 1867 /* Single clk mode, mii mode off(for serdes communication) */ 1868 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1869 1870 phy_write(tbiphy, MII_ADVERTISE, 1871 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1872 ADVERTISE_1000XPSE_ASYM); 1873 1874 phy_write(tbiphy, MII_BMCR, 1875 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | 1876 BMCR_SPEED1000); 1877 1878 put_device(&tbiphy->mdio.dev); 1879 } 1880 1881 static int __gfar_is_rx_idle(struct gfar_private *priv) 1882 { 1883 u32 res; 1884 1885 /* Normaly TSEC should not hang on GRS commands, so we should 1886 * actually wait for IEVENT_GRSC flag. 1887 */ 1888 if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) 1889 return 0; 1890 1891 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are 1892 * the same as bits 23-30, the eTSEC Rx is assumed to be idle 1893 * and the Rx can be safely reset. 1894 */ 1895 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); 1896 res &= 0x7f807f80; 1897 if ((res & 0xffff) == (res >> 16)) 1898 return 1; 1899 1900 return 0; 1901 } 1902 1903 /* Halt the receive and transmit queues */ 1904 static void gfar_halt_nodisable(struct gfar_private *priv) 1905 { 1906 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1907 u32 tempval; 1908 unsigned int timeout; 1909 int stopped; 1910 1911 gfar_ints_disable(priv); 1912 1913 if (gfar_is_dma_stopped(priv)) 1914 return; 1915 1916 /* Stop the DMA, and wait for it to stop */ 1917 tempval = gfar_read(®s->dmactrl); 1918 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1919 gfar_write(®s->dmactrl, tempval); 1920 1921 retry: 1922 timeout = 1000; 1923 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { 1924 cpu_relax(); 1925 timeout--; 1926 } 1927 1928 if (!timeout) 1929 stopped = gfar_is_dma_stopped(priv); 1930 1931 if (!stopped && !gfar_is_rx_dma_stopped(priv) && 1932 !__gfar_is_rx_idle(priv)) 1933 goto retry; 1934 } 1935 1936 /* Halt the receive and transmit queues */ 1937 void gfar_halt(struct gfar_private *priv) 1938 { 1939 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1940 u32 tempval; 1941 1942 /* Dissable the Rx/Tx hw queues */ 1943 gfar_write(®s->rqueue, 0); 1944 gfar_write(®s->tqueue, 0); 1945 1946 mdelay(10); 1947 1948 gfar_halt_nodisable(priv); 1949 1950 /* Disable Rx/Tx DMA */ 1951 tempval = gfar_read(®s->maccfg1); 1952 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1953 gfar_write(®s->maccfg1, tempval); 1954 } 1955 1956 void stop_gfar(struct net_device *dev) 1957 { 1958 struct gfar_private *priv = netdev_priv(dev); 1959 1960 netif_tx_stop_all_queues(dev); 1961 1962 smp_mb__before_atomic(); 1963 set_bit(GFAR_DOWN, &priv->state); 1964 smp_mb__after_atomic(); 1965 1966 disable_napi(priv); 1967 1968 /* disable ints and gracefully shut down Rx/Tx DMA */ 1969 gfar_halt(priv); 1970 1971 phy_stop(dev->phydev); 1972 1973 free_skb_resources(priv); 1974 } 1975 1976 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) 1977 { 1978 struct txbd8 *txbdp; 1979 struct gfar_private *priv = netdev_priv(tx_queue->dev); 1980 int i, j; 1981 1982 txbdp = tx_queue->tx_bd_base; 1983 1984 for (i = 0; i < tx_queue->tx_ring_size; i++) { 1985 if (!tx_queue->tx_skbuff[i]) 1986 continue; 1987 1988 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), 1989 be16_to_cpu(txbdp->length), DMA_TO_DEVICE); 1990 txbdp->lstatus = 0; 1991 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; 1992 j++) { 1993 txbdp++; 1994 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), 1995 be16_to_cpu(txbdp->length), 1996 DMA_TO_DEVICE); 1997 } 1998 txbdp++; 1999 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); 2000 tx_queue->tx_skbuff[i] = NULL; 2001 } 2002 kfree(tx_queue->tx_skbuff); 2003 tx_queue->tx_skbuff = NULL; 2004 } 2005 2006 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) 2007 { 2008 int i; 2009 2010 struct rxbd8 *rxbdp = rx_queue->rx_bd_base; 2011 2012 if (rx_queue->skb) 2013 dev_kfree_skb(rx_queue->skb); 2014 2015 for (i = 0; i < rx_queue->rx_ring_size; i++) { 2016 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; 2017 2018 rxbdp->lstatus = 0; 2019 rxbdp->bufPtr = 0; 2020 rxbdp++; 2021 2022 if (!rxb->page) 2023 continue; 2024 2025 dma_unmap_page(rx_queue->dev, rxb->dma, 2026 PAGE_SIZE, DMA_FROM_DEVICE); 2027 __free_page(rxb->page); 2028 2029 rxb->page = NULL; 2030 } 2031 2032 kfree(rx_queue->rx_buff); 2033 rx_queue->rx_buff = NULL; 2034 } 2035 2036 /* If there are any tx skbs or rx skbs still around, free them. 2037 * Then free tx_skbuff and rx_skbuff 2038 */ 2039 static void free_skb_resources(struct gfar_private *priv) 2040 { 2041 struct gfar_priv_tx_q *tx_queue = NULL; 2042 struct gfar_priv_rx_q *rx_queue = NULL; 2043 int i; 2044 2045 /* Go through all the buffer descriptors and free their data buffers */ 2046 for (i = 0; i < priv->num_tx_queues; i++) { 2047 struct netdev_queue *txq; 2048 2049 tx_queue = priv->tx_queue[i]; 2050 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); 2051 if (tx_queue->tx_skbuff) 2052 free_skb_tx_queue(tx_queue); 2053 netdev_tx_reset_queue(txq); 2054 } 2055 2056 for (i = 0; i < priv->num_rx_queues; i++) { 2057 rx_queue = priv->rx_queue[i]; 2058 if (rx_queue->rx_buff) 2059 free_skb_rx_queue(rx_queue); 2060 } 2061 2062 dma_free_coherent(priv->dev, 2063 sizeof(struct txbd8) * priv->total_tx_ring_size + 2064 sizeof(struct rxbd8) * priv->total_rx_ring_size, 2065 priv->tx_queue[0]->tx_bd_base, 2066 priv->tx_queue[0]->tx_bd_dma_base); 2067 } 2068 2069 void gfar_start(struct gfar_private *priv) 2070 { 2071 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2072 u32 tempval; 2073 int i = 0; 2074 2075 /* Enable Rx/Tx hw queues */ 2076 gfar_write(®s->rqueue, priv->rqueue); 2077 gfar_write(®s->tqueue, priv->tqueue); 2078 2079 /* Initialize DMACTRL to have WWR and WOP */ 2080 tempval = gfar_read(®s->dmactrl); 2081 tempval |= DMACTRL_INIT_SETTINGS; 2082 gfar_write(®s->dmactrl, tempval); 2083 2084 /* Make sure we aren't stopped */ 2085 tempval = gfar_read(®s->dmactrl); 2086 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 2087 gfar_write(®s->dmactrl, tempval); 2088 2089 for (i = 0; i < priv->num_grps; i++) { 2090 regs = priv->gfargrp[i].regs; 2091 /* Clear THLT/RHLT, so that the DMA starts polling now */ 2092 gfar_write(®s->tstat, priv->gfargrp[i].tstat); 2093 gfar_write(®s->rstat, priv->gfargrp[i].rstat); 2094 } 2095 2096 /* Enable Rx/Tx DMA */ 2097 tempval = gfar_read(®s->maccfg1); 2098 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 2099 gfar_write(®s->maccfg1, tempval); 2100 2101 gfar_ints_enable(priv); 2102 2103 netif_trans_update(priv->ndev); /* prevent tx timeout */ 2104 } 2105 2106 static void free_grp_irqs(struct gfar_priv_grp *grp) 2107 { 2108 free_irq(gfar_irq(grp, TX)->irq, grp); 2109 free_irq(gfar_irq(grp, RX)->irq, grp); 2110 free_irq(gfar_irq(grp, ER)->irq, grp); 2111 } 2112 2113 static int register_grp_irqs(struct gfar_priv_grp *grp) 2114 { 2115 struct gfar_private *priv = grp->priv; 2116 struct net_device *dev = priv->ndev; 2117 int err; 2118 2119 /* If the device has multiple interrupts, register for 2120 * them. Otherwise, only register for the one 2121 */ 2122 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2123 /* Install our interrupt handlers for Error, 2124 * Transmit, and Receive 2125 */ 2126 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, 2127 gfar_irq(grp, ER)->name, grp); 2128 if (err < 0) { 2129 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2130 gfar_irq(grp, ER)->irq); 2131 2132 goto err_irq_fail; 2133 } 2134 enable_irq_wake(gfar_irq(grp, ER)->irq); 2135 2136 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, 2137 gfar_irq(grp, TX)->name, grp); 2138 if (err < 0) { 2139 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2140 gfar_irq(grp, TX)->irq); 2141 goto tx_irq_fail; 2142 } 2143 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, 2144 gfar_irq(grp, RX)->name, grp); 2145 if (err < 0) { 2146 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2147 gfar_irq(grp, RX)->irq); 2148 goto rx_irq_fail; 2149 } 2150 enable_irq_wake(gfar_irq(grp, RX)->irq); 2151 2152 } else { 2153 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, 2154 gfar_irq(grp, TX)->name, grp); 2155 if (err < 0) { 2156 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2157 gfar_irq(grp, TX)->irq); 2158 goto err_irq_fail; 2159 } 2160 enable_irq_wake(gfar_irq(grp, TX)->irq); 2161 } 2162 2163 return 0; 2164 2165 rx_irq_fail: 2166 free_irq(gfar_irq(grp, TX)->irq, grp); 2167 tx_irq_fail: 2168 free_irq(gfar_irq(grp, ER)->irq, grp); 2169 err_irq_fail: 2170 return err; 2171 2172 } 2173 2174 static void gfar_free_irq(struct gfar_private *priv) 2175 { 2176 int i; 2177 2178 /* Free the IRQs */ 2179 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2180 for (i = 0; i < priv->num_grps; i++) 2181 free_grp_irqs(&priv->gfargrp[i]); 2182 } else { 2183 for (i = 0; i < priv->num_grps; i++) 2184 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, 2185 &priv->gfargrp[i]); 2186 } 2187 } 2188 2189 static int gfar_request_irq(struct gfar_private *priv) 2190 { 2191 int err, i, j; 2192 2193 for (i = 0; i < priv->num_grps; i++) { 2194 err = register_grp_irqs(&priv->gfargrp[i]); 2195 if (err) { 2196 for (j = 0; j < i; j++) 2197 free_grp_irqs(&priv->gfargrp[j]); 2198 return err; 2199 } 2200 } 2201 2202 return 0; 2203 } 2204 2205 /* Bring the controller up and running */ 2206 int startup_gfar(struct net_device *ndev) 2207 { 2208 struct gfar_private *priv = netdev_priv(ndev); 2209 int err; 2210 2211 gfar_mac_reset(priv); 2212 2213 err = gfar_alloc_skb_resources(ndev); 2214 if (err) 2215 return err; 2216 2217 gfar_init_tx_rx_base(priv); 2218 2219 smp_mb__before_atomic(); 2220 clear_bit(GFAR_DOWN, &priv->state); 2221 smp_mb__after_atomic(); 2222 2223 /* Start Rx/Tx DMA and enable the interrupts */ 2224 gfar_start(priv); 2225 2226 /* force link state update after mac reset */ 2227 priv->oldlink = 0; 2228 priv->oldspeed = 0; 2229 priv->oldduplex = -1; 2230 2231 phy_start(ndev->phydev); 2232 2233 enable_napi(priv); 2234 2235 netif_tx_wake_all_queues(ndev); 2236 2237 return 0; 2238 } 2239 2240 /* Called when something needs to use the ethernet device 2241 * Returns 0 for success. 2242 */ 2243 static int gfar_enet_open(struct net_device *dev) 2244 { 2245 struct gfar_private *priv = netdev_priv(dev); 2246 int err; 2247 2248 err = init_phy(dev); 2249 if (err) 2250 return err; 2251 2252 err = gfar_request_irq(priv); 2253 if (err) 2254 return err; 2255 2256 err = startup_gfar(dev); 2257 if (err) 2258 return err; 2259 2260 return err; 2261 } 2262 2263 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) 2264 { 2265 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN); 2266 2267 memset(fcb, 0, GMAC_FCB_LEN); 2268 2269 return fcb; 2270 } 2271 2272 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, 2273 int fcb_length) 2274 { 2275 /* If we're here, it's a IP packet with a TCP or UDP 2276 * payload. We set it to checksum, using a pseudo-header 2277 * we provide 2278 */ 2279 u8 flags = TXFCB_DEFAULT; 2280 2281 /* Tell the controller what the protocol is 2282 * And provide the already calculated phcs 2283 */ 2284 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 2285 flags |= TXFCB_UDP; 2286 fcb->phcs = (__force __be16)(udp_hdr(skb)->check); 2287 } else 2288 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); 2289 2290 /* l3os is the distance between the start of the 2291 * frame (skb->data) and the start of the IP hdr. 2292 * l4os is the distance between the start of the 2293 * l3 hdr and the l4 hdr 2294 */ 2295 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); 2296 fcb->l4os = skb_network_header_len(skb); 2297 2298 fcb->flags = flags; 2299 } 2300 2301 static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 2302 { 2303 fcb->flags |= TXFCB_VLN; 2304 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); 2305 } 2306 2307 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, 2308 struct txbd8 *base, int ring_size) 2309 { 2310 struct txbd8 *new_bd = bdp + stride; 2311 2312 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; 2313 } 2314 2315 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, 2316 int ring_size) 2317 { 2318 return skip_txbd(bdp, 1, base, ring_size); 2319 } 2320 2321 /* eTSEC12: csum generation not supported for some fcb offsets */ 2322 static inline bool gfar_csum_errata_12(struct gfar_private *priv, 2323 unsigned long fcb_addr) 2324 { 2325 return (gfar_has_errata(priv, GFAR_ERRATA_12) && 2326 (fcb_addr % 0x20) > 0x18); 2327 } 2328 2329 /* eTSEC76: csum generation for frames larger than 2500 may 2330 * cause excess delays before start of transmission 2331 */ 2332 static inline bool gfar_csum_errata_76(struct gfar_private *priv, 2333 unsigned int len) 2334 { 2335 return (gfar_has_errata(priv, GFAR_ERRATA_76) && 2336 (len > 2500)); 2337 } 2338 2339 /* This is called by the kernel when a frame is ready for transmission. 2340 * It is pointed to by the dev->hard_start_xmit function pointer 2341 */ 2342 static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 2343 { 2344 struct gfar_private *priv = netdev_priv(dev); 2345 struct gfar_priv_tx_q *tx_queue = NULL; 2346 struct netdev_queue *txq; 2347 struct gfar __iomem *regs = NULL; 2348 struct txfcb *fcb = NULL; 2349 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; 2350 u32 lstatus; 2351 skb_frag_t *frag; 2352 int i, rq = 0; 2353 int do_tstamp, do_csum, do_vlan; 2354 u32 bufaddr; 2355 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; 2356 2357 rq = skb->queue_mapping; 2358 tx_queue = priv->tx_queue[rq]; 2359 txq = netdev_get_tx_queue(dev, rq); 2360 base = tx_queue->tx_bd_base; 2361 regs = tx_queue->grp->regs; 2362 2363 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); 2364 do_vlan = skb_vlan_tag_present(skb); 2365 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 2366 priv->hwts_tx_en; 2367 2368 if (do_csum || do_vlan) 2369 fcb_len = GMAC_FCB_LEN; 2370 2371 /* check if time stamp should be generated */ 2372 if (unlikely(do_tstamp)) 2373 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; 2374 2375 /* make space for additional header when fcb is needed */ 2376 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { 2377 struct sk_buff *skb_new; 2378 2379 skb_new = skb_realloc_headroom(skb, fcb_len); 2380 if (!skb_new) { 2381 dev->stats.tx_errors++; 2382 dev_kfree_skb_any(skb); 2383 return NETDEV_TX_OK; 2384 } 2385 2386 if (skb->sk) 2387 skb_set_owner_w(skb_new, skb->sk); 2388 dev_consume_skb_any(skb); 2389 skb = skb_new; 2390 } 2391 2392 /* total number of fragments in the SKB */ 2393 nr_frags = skb_shinfo(skb)->nr_frags; 2394 2395 /* calculate the required number of TxBDs for this skb */ 2396 if (unlikely(do_tstamp)) 2397 nr_txbds = nr_frags + 2; 2398 else 2399 nr_txbds = nr_frags + 1; 2400 2401 /* check if there is space to queue this packet */ 2402 if (nr_txbds > tx_queue->num_txbdfree) { 2403 /* no space, stop the queue */ 2404 netif_tx_stop_queue(txq); 2405 dev->stats.tx_fifo_errors++; 2406 return NETDEV_TX_BUSY; 2407 } 2408 2409 /* Update transmit stats */ 2410 bytes_sent = skb->len; 2411 tx_queue->stats.tx_bytes += bytes_sent; 2412 /* keep Tx bytes on wire for BQL accounting */ 2413 GFAR_CB(skb)->bytes_sent = bytes_sent; 2414 tx_queue->stats.tx_packets++; 2415 2416 txbdp = txbdp_start = tx_queue->cur_tx; 2417 lstatus = be32_to_cpu(txbdp->lstatus); 2418 2419 /* Add TxPAL between FCB and frame if required */ 2420 if (unlikely(do_tstamp)) { 2421 skb_push(skb, GMAC_TXPAL_LEN); 2422 memset(skb->data, 0, GMAC_TXPAL_LEN); 2423 } 2424 2425 /* Add TxFCB if required */ 2426 if (fcb_len) { 2427 fcb = gfar_add_fcb(skb); 2428 lstatus |= BD_LFLAG(TXBD_TOE); 2429 } 2430 2431 /* Set up checksumming */ 2432 if (do_csum) { 2433 gfar_tx_checksum(skb, fcb, fcb_len); 2434 2435 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || 2436 unlikely(gfar_csum_errata_76(priv, skb->len))) { 2437 __skb_pull(skb, GMAC_FCB_LEN); 2438 skb_checksum_help(skb); 2439 if (do_vlan || do_tstamp) { 2440 /* put back a new fcb for vlan/tstamp TOE */ 2441 fcb = gfar_add_fcb(skb); 2442 } else { 2443 /* Tx TOE not used */ 2444 lstatus &= ~(BD_LFLAG(TXBD_TOE)); 2445 fcb = NULL; 2446 } 2447 } 2448 } 2449 2450 if (do_vlan) 2451 gfar_tx_vlan(skb, fcb); 2452 2453 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), 2454 DMA_TO_DEVICE); 2455 if (unlikely(dma_mapping_error(priv->dev, bufaddr))) 2456 goto dma_map_err; 2457 2458 txbdp_start->bufPtr = cpu_to_be32(bufaddr); 2459 2460 /* Time stamp insertion requires one additional TxBD */ 2461 if (unlikely(do_tstamp)) 2462 txbdp_tstamp = txbdp = next_txbd(txbdp, base, 2463 tx_queue->tx_ring_size); 2464 2465 if (likely(!nr_frags)) { 2466 if (likely(!do_tstamp)) 2467 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2468 } else { 2469 u32 lstatus_start = lstatus; 2470 2471 /* Place the fragment addresses and lengths into the TxBDs */ 2472 frag = &skb_shinfo(skb)->frags[0]; 2473 for (i = 0; i < nr_frags; i++, frag++) { 2474 unsigned int size; 2475 2476 /* Point at the next BD, wrapping as needed */ 2477 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2478 2479 size = skb_frag_size(frag); 2480 2481 lstatus = be32_to_cpu(txbdp->lstatus) | size | 2482 BD_LFLAG(TXBD_READY); 2483 2484 /* Handle the last BD specially */ 2485 if (i == nr_frags - 1) 2486 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2487 2488 bufaddr = skb_frag_dma_map(priv->dev, frag, 0, 2489 size, DMA_TO_DEVICE); 2490 if (unlikely(dma_mapping_error(priv->dev, bufaddr))) 2491 goto dma_map_err; 2492 2493 /* set the TxBD length and buffer pointer */ 2494 txbdp->bufPtr = cpu_to_be32(bufaddr); 2495 txbdp->lstatus = cpu_to_be32(lstatus); 2496 } 2497 2498 lstatus = lstatus_start; 2499 } 2500 2501 /* If time stamping is requested one additional TxBD must be set up. The 2502 * first TxBD points to the FCB and must have a data length of 2503 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with 2504 * the full frame length. 2505 */ 2506 if (unlikely(do_tstamp)) { 2507 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); 2508 2509 bufaddr = be32_to_cpu(txbdp_start->bufPtr); 2510 bufaddr += fcb_len; 2511 2512 lstatus_ts |= BD_LFLAG(TXBD_READY) | 2513 (skb_headlen(skb) - fcb_len); 2514 if (!nr_frags) 2515 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2516 2517 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); 2518 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); 2519 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; 2520 2521 /* Setup tx hardware time stamping */ 2522 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2523 fcb->ptp = 1; 2524 } else { 2525 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2526 } 2527 2528 netdev_tx_sent_queue(txq, bytes_sent); 2529 2530 gfar_wmb(); 2531 2532 txbdp_start->lstatus = cpu_to_be32(lstatus); 2533 2534 gfar_wmb(); /* force lstatus write before tx_skbuff */ 2535 2536 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; 2537 2538 /* Update the current skb pointer to the next entry we will use 2539 * (wrapping if necessary) 2540 */ 2541 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & 2542 TX_RING_MOD_MASK(tx_queue->tx_ring_size); 2543 2544 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2545 2546 /* We can work in parallel with gfar_clean_tx_ring(), except 2547 * when modifying num_txbdfree. Note that we didn't grab the lock 2548 * when we were reading the num_txbdfree and checking for available 2549 * space, that's because outside of this function it can only grow. 2550 */ 2551 spin_lock_bh(&tx_queue->txlock); 2552 /* reduce TxBD free count */ 2553 tx_queue->num_txbdfree -= (nr_txbds); 2554 spin_unlock_bh(&tx_queue->txlock); 2555 2556 /* If the next BD still needs to be cleaned up, then the bds 2557 * are full. We need to tell the kernel to stop sending us stuff. 2558 */ 2559 if (!tx_queue->num_txbdfree) { 2560 netif_tx_stop_queue(txq); 2561 2562 dev->stats.tx_fifo_errors++; 2563 } 2564 2565 /* Tell the DMA to go go go */ 2566 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); 2567 2568 return NETDEV_TX_OK; 2569 2570 dma_map_err: 2571 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); 2572 if (do_tstamp) 2573 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2574 for (i = 0; i < nr_frags; i++) { 2575 lstatus = be32_to_cpu(txbdp->lstatus); 2576 if (!(lstatus & BD_LFLAG(TXBD_READY))) 2577 break; 2578 2579 lstatus &= ~BD_LFLAG(TXBD_READY); 2580 txbdp->lstatus = cpu_to_be32(lstatus); 2581 bufaddr = be32_to_cpu(txbdp->bufPtr); 2582 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), 2583 DMA_TO_DEVICE); 2584 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2585 } 2586 gfar_wmb(); 2587 dev_kfree_skb_any(skb); 2588 return NETDEV_TX_OK; 2589 } 2590 2591 /* Stops the kernel queue, and halts the controller */ 2592 static int gfar_close(struct net_device *dev) 2593 { 2594 struct gfar_private *priv = netdev_priv(dev); 2595 2596 cancel_work_sync(&priv->reset_task); 2597 stop_gfar(dev); 2598 2599 /* Disconnect from the PHY */ 2600 phy_disconnect(dev->phydev); 2601 2602 gfar_free_irq(priv); 2603 2604 return 0; 2605 } 2606 2607 /* Changes the mac address if the controller is not running. */ 2608 static int gfar_set_mac_address(struct net_device *dev) 2609 { 2610 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 2611 2612 return 0; 2613 } 2614 2615 static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2616 { 2617 struct gfar_private *priv = netdev_priv(dev); 2618 2619 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) 2620 cpu_relax(); 2621 2622 if (dev->flags & IFF_UP) 2623 stop_gfar(dev); 2624 2625 dev->mtu = new_mtu; 2626 2627 if (dev->flags & IFF_UP) 2628 startup_gfar(dev); 2629 2630 clear_bit_unlock(GFAR_RESETTING, &priv->state); 2631 2632 return 0; 2633 } 2634 2635 void reset_gfar(struct net_device *ndev) 2636 { 2637 struct gfar_private *priv = netdev_priv(ndev); 2638 2639 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) 2640 cpu_relax(); 2641 2642 stop_gfar(ndev); 2643 startup_gfar(ndev); 2644 2645 clear_bit_unlock(GFAR_RESETTING, &priv->state); 2646 } 2647 2648 /* gfar_reset_task gets scheduled when a packet has not been 2649 * transmitted after a set amount of time. 2650 * For now, assume that clearing out all the structures, and 2651 * starting over will fix the problem. 2652 */ 2653 static void gfar_reset_task(struct work_struct *work) 2654 { 2655 struct gfar_private *priv = container_of(work, struct gfar_private, 2656 reset_task); 2657 reset_gfar(priv->ndev); 2658 } 2659 2660 static void gfar_timeout(struct net_device *dev) 2661 { 2662 struct gfar_private *priv = netdev_priv(dev); 2663 2664 dev->stats.tx_errors++; 2665 schedule_work(&priv->reset_task); 2666 } 2667 2668 /* Interrupt Handler for Transmit complete */ 2669 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) 2670 { 2671 struct net_device *dev = tx_queue->dev; 2672 struct netdev_queue *txq; 2673 struct gfar_private *priv = netdev_priv(dev); 2674 struct txbd8 *bdp, *next = NULL; 2675 struct txbd8 *lbdp = NULL; 2676 struct txbd8 *base = tx_queue->tx_bd_base; 2677 struct sk_buff *skb; 2678 int skb_dirtytx; 2679 int tx_ring_size = tx_queue->tx_ring_size; 2680 int frags = 0, nr_txbds = 0; 2681 int i; 2682 int howmany = 0; 2683 int tqi = tx_queue->qindex; 2684 unsigned int bytes_sent = 0; 2685 u32 lstatus; 2686 size_t buflen; 2687 2688 txq = netdev_get_tx_queue(dev, tqi); 2689 bdp = tx_queue->dirty_tx; 2690 skb_dirtytx = tx_queue->skb_dirtytx; 2691 2692 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { 2693 2694 frags = skb_shinfo(skb)->nr_frags; 2695 2696 /* When time stamping, one additional TxBD must be freed. 2697 * Also, we need to dma_unmap_single() the TxPAL. 2698 */ 2699 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 2700 nr_txbds = frags + 2; 2701 else 2702 nr_txbds = frags + 1; 2703 2704 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); 2705 2706 lstatus = be32_to_cpu(lbdp->lstatus); 2707 2708 /* Only clean completed frames */ 2709 if ((lstatus & BD_LFLAG(TXBD_READY)) && 2710 (lstatus & BD_LENGTH_MASK)) 2711 break; 2712 2713 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2714 next = next_txbd(bdp, base, tx_ring_size); 2715 buflen = be16_to_cpu(next->length) + 2716 GMAC_FCB_LEN + GMAC_TXPAL_LEN; 2717 } else 2718 buflen = be16_to_cpu(bdp->length); 2719 2720 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), 2721 buflen, DMA_TO_DEVICE); 2722 2723 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2724 struct skb_shared_hwtstamps shhwtstamps; 2725 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & 2726 ~0x7UL); 2727 2728 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 2729 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); 2730 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); 2731 skb_tstamp_tx(skb, &shhwtstamps); 2732 gfar_clear_txbd_status(bdp); 2733 bdp = next; 2734 } 2735 2736 gfar_clear_txbd_status(bdp); 2737 bdp = next_txbd(bdp, base, tx_ring_size); 2738 2739 for (i = 0; i < frags; i++) { 2740 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), 2741 be16_to_cpu(bdp->length), 2742 DMA_TO_DEVICE); 2743 gfar_clear_txbd_status(bdp); 2744 bdp = next_txbd(bdp, base, tx_ring_size); 2745 } 2746 2747 bytes_sent += GFAR_CB(skb)->bytes_sent; 2748 2749 dev_kfree_skb_any(skb); 2750 2751 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2752 2753 skb_dirtytx = (skb_dirtytx + 1) & 2754 TX_RING_MOD_MASK(tx_ring_size); 2755 2756 howmany++; 2757 spin_lock(&tx_queue->txlock); 2758 tx_queue->num_txbdfree += nr_txbds; 2759 spin_unlock(&tx_queue->txlock); 2760 } 2761 2762 /* If we freed a buffer, we can restart transmission, if necessary */ 2763 if (tx_queue->num_txbdfree && 2764 netif_tx_queue_stopped(txq) && 2765 !(test_bit(GFAR_DOWN, &priv->state))) 2766 netif_wake_subqueue(priv->ndev, tqi); 2767 2768 /* Update dirty indicators */ 2769 tx_queue->skb_dirtytx = skb_dirtytx; 2770 tx_queue->dirty_tx = bdp; 2771 2772 netdev_tx_completed_queue(txq, howmany, bytes_sent); 2773 } 2774 2775 static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) 2776 { 2777 struct page *page; 2778 dma_addr_t addr; 2779 2780 page = dev_alloc_page(); 2781 if (unlikely(!page)) 2782 return false; 2783 2784 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 2785 if (unlikely(dma_mapping_error(rxq->dev, addr))) { 2786 __free_page(page); 2787 2788 return false; 2789 } 2790 2791 rxb->dma = addr; 2792 rxb->page = page; 2793 rxb->page_offset = 0; 2794 2795 return true; 2796 } 2797 2798 static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) 2799 { 2800 struct gfar_private *priv = netdev_priv(rx_queue->ndev); 2801 struct gfar_extra_stats *estats = &priv->extra_stats; 2802 2803 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); 2804 atomic64_inc(&estats->rx_alloc_err); 2805 } 2806 2807 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, 2808 int alloc_cnt) 2809 { 2810 struct rxbd8 *bdp; 2811 struct gfar_rx_buff *rxb; 2812 int i; 2813 2814 i = rx_queue->next_to_use; 2815 bdp = &rx_queue->rx_bd_base[i]; 2816 rxb = &rx_queue->rx_buff[i]; 2817 2818 while (alloc_cnt--) { 2819 /* try reuse page */ 2820 if (unlikely(!rxb->page)) { 2821 if (unlikely(!gfar_new_page(rx_queue, rxb))) { 2822 gfar_rx_alloc_err(rx_queue); 2823 break; 2824 } 2825 } 2826 2827 /* Setup the new RxBD */ 2828 gfar_init_rxbdp(rx_queue, bdp, 2829 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); 2830 2831 /* Update to the next pointer */ 2832 bdp++; 2833 rxb++; 2834 2835 if (unlikely(++i == rx_queue->rx_ring_size)) { 2836 i = 0; 2837 bdp = rx_queue->rx_bd_base; 2838 rxb = rx_queue->rx_buff; 2839 } 2840 } 2841 2842 rx_queue->next_to_use = i; 2843 rx_queue->next_to_alloc = i; 2844 } 2845 2846 static void count_errors(u32 lstatus, struct net_device *ndev) 2847 { 2848 struct gfar_private *priv = netdev_priv(ndev); 2849 struct net_device_stats *stats = &ndev->stats; 2850 struct gfar_extra_stats *estats = &priv->extra_stats; 2851 2852 /* If the packet was truncated, none of the other errors matter */ 2853 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { 2854 stats->rx_length_errors++; 2855 2856 atomic64_inc(&estats->rx_trunc); 2857 2858 return; 2859 } 2860 /* Count the errors, if there were any */ 2861 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { 2862 stats->rx_length_errors++; 2863 2864 if (lstatus & BD_LFLAG(RXBD_LARGE)) 2865 atomic64_inc(&estats->rx_large); 2866 else 2867 atomic64_inc(&estats->rx_short); 2868 } 2869 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { 2870 stats->rx_frame_errors++; 2871 atomic64_inc(&estats->rx_nonoctet); 2872 } 2873 if (lstatus & BD_LFLAG(RXBD_CRCERR)) { 2874 atomic64_inc(&estats->rx_crcerr); 2875 stats->rx_crc_errors++; 2876 } 2877 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { 2878 atomic64_inc(&estats->rx_overrun); 2879 stats->rx_over_errors++; 2880 } 2881 } 2882 2883 irqreturn_t gfar_receive(int irq, void *grp_id) 2884 { 2885 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; 2886 unsigned long flags; 2887 u32 imask, ievent; 2888 2889 ievent = gfar_read(&grp->regs->ievent); 2890 2891 if (unlikely(ievent & IEVENT_FGPI)) { 2892 gfar_write(&grp->regs->ievent, IEVENT_FGPI); 2893 return IRQ_HANDLED; 2894 } 2895 2896 if (likely(napi_schedule_prep(&grp->napi_rx))) { 2897 spin_lock_irqsave(&grp->grplock, flags); 2898 imask = gfar_read(&grp->regs->imask); 2899 imask &= IMASK_RX_DISABLED; 2900 gfar_write(&grp->regs->imask, imask); 2901 spin_unlock_irqrestore(&grp->grplock, flags); 2902 __napi_schedule(&grp->napi_rx); 2903 } else { 2904 /* Clear IEVENT, so interrupts aren't called again 2905 * because of the packets that have already arrived. 2906 */ 2907 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); 2908 } 2909 2910 return IRQ_HANDLED; 2911 } 2912 2913 /* Interrupt Handler for Transmit complete */ 2914 static irqreturn_t gfar_transmit(int irq, void *grp_id) 2915 { 2916 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; 2917 unsigned long flags; 2918 u32 imask; 2919 2920 if (likely(napi_schedule_prep(&grp->napi_tx))) { 2921 spin_lock_irqsave(&grp->grplock, flags); 2922 imask = gfar_read(&grp->regs->imask); 2923 imask &= IMASK_TX_DISABLED; 2924 gfar_write(&grp->regs->imask, imask); 2925 spin_unlock_irqrestore(&grp->grplock, flags); 2926 __napi_schedule(&grp->napi_tx); 2927 } else { 2928 /* Clear IEVENT, so interrupts aren't called again 2929 * because of the packets that have already arrived. 2930 */ 2931 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); 2932 } 2933 2934 return IRQ_HANDLED; 2935 } 2936 2937 static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, 2938 struct sk_buff *skb, bool first) 2939 { 2940 int size = lstatus & BD_LENGTH_MASK; 2941 struct page *page = rxb->page; 2942 2943 if (likely(first)) { 2944 skb_put(skb, size); 2945 } else { 2946 /* the last fragments' length contains the full frame length */ 2947 if (lstatus & BD_LFLAG(RXBD_LAST)) 2948 size -= skb->len; 2949 2950 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 2951 rxb->page_offset + RXBUF_ALIGNMENT, 2952 size, GFAR_RXB_TRUESIZE); 2953 } 2954 2955 /* try reuse page */ 2956 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) 2957 return false; 2958 2959 /* change offset to the other half */ 2960 rxb->page_offset ^= GFAR_RXB_TRUESIZE; 2961 2962 page_ref_inc(page); 2963 2964 return true; 2965 } 2966 2967 static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, 2968 struct gfar_rx_buff *old_rxb) 2969 { 2970 struct gfar_rx_buff *new_rxb; 2971 u16 nta = rxq->next_to_alloc; 2972 2973 new_rxb = &rxq->rx_buff[nta]; 2974 2975 /* find next buf that can reuse a page */ 2976 nta++; 2977 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; 2978 2979 /* copy page reference */ 2980 *new_rxb = *old_rxb; 2981 2982 /* sync for use by the device */ 2983 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, 2984 old_rxb->page_offset, 2985 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); 2986 } 2987 2988 static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, 2989 u32 lstatus, struct sk_buff *skb) 2990 { 2991 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; 2992 struct page *page = rxb->page; 2993 bool first = false; 2994 2995 if (likely(!skb)) { 2996 void *buff_addr = page_address(page) + rxb->page_offset; 2997 2998 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE); 2999 if (unlikely(!skb)) { 3000 gfar_rx_alloc_err(rx_queue); 3001 return NULL; 3002 } 3003 skb_reserve(skb, RXBUF_ALIGNMENT); 3004 first = true; 3005 } 3006 3007 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, 3008 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); 3009 3010 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { 3011 /* reuse the free half of the page */ 3012 gfar_reuse_rx_page(rx_queue, rxb); 3013 } else { 3014 /* page cannot be reused, unmap it */ 3015 dma_unmap_page(rx_queue->dev, rxb->dma, 3016 PAGE_SIZE, DMA_FROM_DEVICE); 3017 } 3018 3019 /* clear rxb content */ 3020 rxb->page = NULL; 3021 3022 return skb; 3023 } 3024 3025 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 3026 { 3027 /* If valid headers were found, and valid sums 3028 * were verified, then we tell the kernel that no 3029 * checksumming is necessary. Otherwise, it is [FIXME] 3030 */ 3031 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == 3032 (RXFCB_CIP | RXFCB_CTU)) 3033 skb->ip_summed = CHECKSUM_UNNECESSARY; 3034 else 3035 skb_checksum_none_assert(skb); 3036 } 3037 3038 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ 3039 static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) 3040 { 3041 struct gfar_private *priv = netdev_priv(ndev); 3042 struct rxfcb *fcb = NULL; 3043 3044 /* fcb is at the beginning if exists */ 3045 fcb = (struct rxfcb *)skb->data; 3046 3047 /* Remove the FCB from the skb 3048 * Remove the padded bytes, if there are any 3049 */ 3050 if (priv->uses_rxfcb) 3051 skb_pull(skb, GMAC_FCB_LEN); 3052 3053 /* Get receive timestamp from the skb */ 3054 if (priv->hwts_rx_en) { 3055 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 3056 u64 *ns = (u64 *) skb->data; 3057 3058 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 3059 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); 3060 } 3061 3062 if (priv->padding) 3063 skb_pull(skb, priv->padding); 3064 3065 /* Trim off the FCS */ 3066 pskb_trim(skb, skb->len - ETH_FCS_LEN); 3067 3068 if (ndev->features & NETIF_F_RXCSUM) 3069 gfar_rx_checksum(skb, fcb); 3070 3071 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. 3072 * Even if vlan rx accel is disabled, on some chips 3073 * RXFCB_VLN is pseudo randomly set. 3074 */ 3075 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && 3076 be16_to_cpu(fcb->flags) & RXFCB_VLN) 3077 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3078 be16_to_cpu(fcb->vlctl)); 3079 } 3080 3081 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring 3082 * until the budget/quota has been reached. Returns the number 3083 * of frames handled 3084 */ 3085 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) 3086 { 3087 struct net_device *ndev = rx_queue->ndev; 3088 struct gfar_private *priv = netdev_priv(ndev); 3089 struct rxbd8 *bdp; 3090 int i, howmany = 0; 3091 struct sk_buff *skb = rx_queue->skb; 3092 int cleaned_cnt = gfar_rxbd_unused(rx_queue); 3093 unsigned int total_bytes = 0, total_pkts = 0; 3094 3095 /* Get the first full descriptor */ 3096 i = rx_queue->next_to_clean; 3097 3098 while (rx_work_limit--) { 3099 u32 lstatus; 3100 3101 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { 3102 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); 3103 cleaned_cnt = 0; 3104 } 3105 3106 bdp = &rx_queue->rx_bd_base[i]; 3107 lstatus = be32_to_cpu(bdp->lstatus); 3108 if (lstatus & BD_LFLAG(RXBD_EMPTY)) 3109 break; 3110 3111 /* order rx buffer descriptor reads */ 3112 rmb(); 3113 3114 /* fetch next to clean buffer from the ring */ 3115 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); 3116 if (unlikely(!skb)) 3117 break; 3118 3119 cleaned_cnt++; 3120 howmany++; 3121 3122 if (unlikely(++i == rx_queue->rx_ring_size)) 3123 i = 0; 3124 3125 rx_queue->next_to_clean = i; 3126 3127 /* fetch next buffer if not the last in frame */ 3128 if (!(lstatus & BD_LFLAG(RXBD_LAST))) 3129 continue; 3130 3131 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { 3132 count_errors(lstatus, ndev); 3133 3134 /* discard faulty buffer */ 3135 dev_kfree_skb(skb); 3136 skb = NULL; 3137 rx_queue->stats.rx_dropped++; 3138 continue; 3139 } 3140 3141 gfar_process_frame(ndev, skb); 3142 3143 /* Increment the number of packets */ 3144 total_pkts++; 3145 total_bytes += skb->len; 3146 3147 skb_record_rx_queue(skb, rx_queue->qindex); 3148 3149 skb->protocol = eth_type_trans(skb, ndev); 3150 3151 /* Send the packet up the stack */ 3152 napi_gro_receive(&rx_queue->grp->napi_rx, skb); 3153 3154 skb = NULL; 3155 } 3156 3157 /* Store incomplete frames for completion */ 3158 rx_queue->skb = skb; 3159 3160 rx_queue->stats.rx_packets += total_pkts; 3161 rx_queue->stats.rx_bytes += total_bytes; 3162 3163 if (cleaned_cnt) 3164 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); 3165 3166 /* Update Last Free RxBD pointer for LFC */ 3167 if (unlikely(priv->tx_actual_en)) { 3168 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); 3169 3170 gfar_write(rx_queue->rfbptr, bdp_dma); 3171 } 3172 3173 return howmany; 3174 } 3175 3176 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) 3177 { 3178 struct gfar_priv_grp *gfargrp = 3179 container_of(napi, struct gfar_priv_grp, napi_rx); 3180 struct gfar __iomem *regs = gfargrp->regs; 3181 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; 3182 int work_done = 0; 3183 3184 /* Clear IEVENT, so interrupts aren't called again 3185 * because of the packets that have already arrived 3186 */ 3187 gfar_write(®s->ievent, IEVENT_RX_MASK); 3188 3189 work_done = gfar_clean_rx_ring(rx_queue, budget); 3190 3191 if (work_done < budget) { 3192 u32 imask; 3193 napi_complete_done(napi, work_done); 3194 /* Clear the halt bit in RSTAT */ 3195 gfar_write(®s->rstat, gfargrp->rstat); 3196 3197 spin_lock_irq(&gfargrp->grplock); 3198 imask = gfar_read(®s->imask); 3199 imask |= IMASK_RX_DEFAULT; 3200 gfar_write(®s->imask, imask); 3201 spin_unlock_irq(&gfargrp->grplock); 3202 } 3203 3204 return work_done; 3205 } 3206 3207 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) 3208 { 3209 struct gfar_priv_grp *gfargrp = 3210 container_of(napi, struct gfar_priv_grp, napi_tx); 3211 struct gfar __iomem *regs = gfargrp->regs; 3212 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; 3213 u32 imask; 3214 3215 /* Clear IEVENT, so interrupts aren't called again 3216 * because of the packets that have already arrived 3217 */ 3218 gfar_write(®s->ievent, IEVENT_TX_MASK); 3219 3220 /* run Tx cleanup to completion */ 3221 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) 3222 gfar_clean_tx_ring(tx_queue); 3223 3224 napi_complete(napi); 3225 3226 spin_lock_irq(&gfargrp->grplock); 3227 imask = gfar_read(®s->imask); 3228 imask |= IMASK_TX_DEFAULT; 3229 gfar_write(®s->imask, imask); 3230 spin_unlock_irq(&gfargrp->grplock); 3231 3232 return 0; 3233 } 3234 3235 static int gfar_poll_rx(struct napi_struct *napi, int budget) 3236 { 3237 struct gfar_priv_grp *gfargrp = 3238 container_of(napi, struct gfar_priv_grp, napi_rx); 3239 struct gfar_private *priv = gfargrp->priv; 3240 struct gfar __iomem *regs = gfargrp->regs; 3241 struct gfar_priv_rx_q *rx_queue = NULL; 3242 int work_done = 0, work_done_per_q = 0; 3243 int i, budget_per_q = 0; 3244 unsigned long rstat_rxf; 3245 int num_act_queues; 3246 3247 /* Clear IEVENT, so interrupts aren't called again 3248 * because of the packets that have already arrived 3249 */ 3250 gfar_write(®s->ievent, IEVENT_RX_MASK); 3251 3252 rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; 3253 3254 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); 3255 if (num_act_queues) 3256 budget_per_q = budget/num_act_queues; 3257 3258 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 3259 /* skip queue if not active */ 3260 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) 3261 continue; 3262 3263 rx_queue = priv->rx_queue[i]; 3264 work_done_per_q = 3265 gfar_clean_rx_ring(rx_queue, budget_per_q); 3266 work_done += work_done_per_q; 3267 3268 /* finished processing this queue */ 3269 if (work_done_per_q < budget_per_q) { 3270 /* clear active queue hw indication */ 3271 gfar_write(®s->rstat, 3272 RSTAT_CLEAR_RXF0 >> i); 3273 num_act_queues--; 3274 3275 if (!num_act_queues) 3276 break; 3277 } 3278 } 3279 3280 if (!num_act_queues) { 3281 u32 imask; 3282 napi_complete_done(napi, work_done); 3283 3284 /* Clear the halt bit in RSTAT */ 3285 gfar_write(®s->rstat, gfargrp->rstat); 3286 3287 spin_lock_irq(&gfargrp->grplock); 3288 imask = gfar_read(®s->imask); 3289 imask |= IMASK_RX_DEFAULT; 3290 gfar_write(®s->imask, imask); 3291 spin_unlock_irq(&gfargrp->grplock); 3292 } 3293 3294 return work_done; 3295 } 3296 3297 static int gfar_poll_tx(struct napi_struct *napi, int budget) 3298 { 3299 struct gfar_priv_grp *gfargrp = 3300 container_of(napi, struct gfar_priv_grp, napi_tx); 3301 struct gfar_private *priv = gfargrp->priv; 3302 struct gfar __iomem *regs = gfargrp->regs; 3303 struct gfar_priv_tx_q *tx_queue = NULL; 3304 int has_tx_work = 0; 3305 int i; 3306 3307 /* Clear IEVENT, so interrupts aren't called again 3308 * because of the packets that have already arrived 3309 */ 3310 gfar_write(®s->ievent, IEVENT_TX_MASK); 3311 3312 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { 3313 tx_queue = priv->tx_queue[i]; 3314 /* run Tx cleanup to completion */ 3315 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { 3316 gfar_clean_tx_ring(tx_queue); 3317 has_tx_work = 1; 3318 } 3319 } 3320 3321 if (!has_tx_work) { 3322 u32 imask; 3323 napi_complete(napi); 3324 3325 spin_lock_irq(&gfargrp->grplock); 3326 imask = gfar_read(®s->imask); 3327 imask |= IMASK_TX_DEFAULT; 3328 gfar_write(®s->imask, imask); 3329 spin_unlock_irq(&gfargrp->grplock); 3330 } 3331 3332 return 0; 3333 } 3334 3335 3336 #ifdef CONFIG_NET_POLL_CONTROLLER 3337 /* Polling 'interrupt' - used by things like netconsole to send skbs 3338 * without having to re-enable interrupts. It's not called while 3339 * the interrupt routine is executing. 3340 */ 3341 static void gfar_netpoll(struct net_device *dev) 3342 { 3343 struct gfar_private *priv = netdev_priv(dev); 3344 int i; 3345 3346 /* If the device has multiple interrupts, run tx/rx */ 3347 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 3348 for (i = 0; i < priv->num_grps; i++) { 3349 struct gfar_priv_grp *grp = &priv->gfargrp[i]; 3350 3351 disable_irq(gfar_irq(grp, TX)->irq); 3352 disable_irq(gfar_irq(grp, RX)->irq); 3353 disable_irq(gfar_irq(grp, ER)->irq); 3354 gfar_interrupt(gfar_irq(grp, TX)->irq, grp); 3355 enable_irq(gfar_irq(grp, ER)->irq); 3356 enable_irq(gfar_irq(grp, RX)->irq); 3357 enable_irq(gfar_irq(grp, TX)->irq); 3358 } 3359 } else { 3360 for (i = 0; i < priv->num_grps; i++) { 3361 struct gfar_priv_grp *grp = &priv->gfargrp[i]; 3362 3363 disable_irq(gfar_irq(grp, TX)->irq); 3364 gfar_interrupt(gfar_irq(grp, TX)->irq, grp); 3365 enable_irq(gfar_irq(grp, TX)->irq); 3366 } 3367 } 3368 } 3369 #endif 3370 3371 /* The interrupt handler for devices with one interrupt */ 3372 static irqreturn_t gfar_interrupt(int irq, void *grp_id) 3373 { 3374 struct gfar_priv_grp *gfargrp = grp_id; 3375 3376 /* Save ievent for future reference */ 3377 u32 events = gfar_read(&gfargrp->regs->ievent); 3378 3379 /* Check for reception */ 3380 if (events & IEVENT_RX_MASK) 3381 gfar_receive(irq, grp_id); 3382 3383 /* Check for transmit completion */ 3384 if (events & IEVENT_TX_MASK) 3385 gfar_transmit(irq, grp_id); 3386 3387 /* Check for errors */ 3388 if (events & IEVENT_ERR_MASK) 3389 gfar_error(irq, grp_id); 3390 3391 return IRQ_HANDLED; 3392 } 3393 3394 /* Called every time the controller might need to be made 3395 * aware of new link state. The PHY code conveys this 3396 * information through variables in the phydev structure, and this 3397 * function converts those variables into the appropriate 3398 * register values, and can bring down the device if needed. 3399 */ 3400 static void adjust_link(struct net_device *dev) 3401 { 3402 struct gfar_private *priv = netdev_priv(dev); 3403 struct phy_device *phydev = dev->phydev; 3404 3405 if (unlikely(phydev->link != priv->oldlink || 3406 (phydev->link && (phydev->duplex != priv->oldduplex || 3407 phydev->speed != priv->oldspeed)))) 3408 gfar_update_link_state(priv); 3409 } 3410 3411 /* Update the hash table based on the current list of multicast 3412 * addresses we subscribe to. Also, change the promiscuity of 3413 * the device based on the flags (this function is called 3414 * whenever dev->flags is changed 3415 */ 3416 static void gfar_set_multi(struct net_device *dev) 3417 { 3418 struct netdev_hw_addr *ha; 3419 struct gfar_private *priv = netdev_priv(dev); 3420 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3421 u32 tempval; 3422 3423 if (dev->flags & IFF_PROMISC) { 3424 /* Set RCTRL to PROM */ 3425 tempval = gfar_read(®s->rctrl); 3426 tempval |= RCTRL_PROM; 3427 gfar_write(®s->rctrl, tempval); 3428 } else { 3429 /* Set RCTRL to not PROM */ 3430 tempval = gfar_read(®s->rctrl); 3431 tempval &= ~(RCTRL_PROM); 3432 gfar_write(®s->rctrl, tempval); 3433 } 3434 3435 if (dev->flags & IFF_ALLMULTI) { 3436 /* Set the hash to rx all multicast frames */ 3437 gfar_write(®s->igaddr0, 0xffffffff); 3438 gfar_write(®s->igaddr1, 0xffffffff); 3439 gfar_write(®s->igaddr2, 0xffffffff); 3440 gfar_write(®s->igaddr3, 0xffffffff); 3441 gfar_write(®s->igaddr4, 0xffffffff); 3442 gfar_write(®s->igaddr5, 0xffffffff); 3443 gfar_write(®s->igaddr6, 0xffffffff); 3444 gfar_write(®s->igaddr7, 0xffffffff); 3445 gfar_write(®s->gaddr0, 0xffffffff); 3446 gfar_write(®s->gaddr1, 0xffffffff); 3447 gfar_write(®s->gaddr2, 0xffffffff); 3448 gfar_write(®s->gaddr3, 0xffffffff); 3449 gfar_write(®s->gaddr4, 0xffffffff); 3450 gfar_write(®s->gaddr5, 0xffffffff); 3451 gfar_write(®s->gaddr6, 0xffffffff); 3452 gfar_write(®s->gaddr7, 0xffffffff); 3453 } else { 3454 int em_num; 3455 int idx; 3456 3457 /* zero out the hash */ 3458 gfar_write(®s->igaddr0, 0x0); 3459 gfar_write(®s->igaddr1, 0x0); 3460 gfar_write(®s->igaddr2, 0x0); 3461 gfar_write(®s->igaddr3, 0x0); 3462 gfar_write(®s->igaddr4, 0x0); 3463 gfar_write(®s->igaddr5, 0x0); 3464 gfar_write(®s->igaddr6, 0x0); 3465 gfar_write(®s->igaddr7, 0x0); 3466 gfar_write(®s->gaddr0, 0x0); 3467 gfar_write(®s->gaddr1, 0x0); 3468 gfar_write(®s->gaddr2, 0x0); 3469 gfar_write(®s->gaddr3, 0x0); 3470 gfar_write(®s->gaddr4, 0x0); 3471 gfar_write(®s->gaddr5, 0x0); 3472 gfar_write(®s->gaddr6, 0x0); 3473 gfar_write(®s->gaddr7, 0x0); 3474 3475 /* If we have extended hash tables, we need to 3476 * clear the exact match registers to prepare for 3477 * setting them 3478 */ 3479 if (priv->extended_hash) { 3480 em_num = GFAR_EM_NUM + 1; 3481 gfar_clear_exact_match(dev); 3482 idx = 1; 3483 } else { 3484 idx = 0; 3485 em_num = 0; 3486 } 3487 3488 if (netdev_mc_empty(dev)) 3489 return; 3490 3491 /* Parse the list, and set the appropriate bits */ 3492 netdev_for_each_mc_addr(ha, dev) { 3493 if (idx < em_num) { 3494 gfar_set_mac_for_addr(dev, idx, ha->addr); 3495 idx++; 3496 } else 3497 gfar_set_hash_for_addr(dev, ha->addr); 3498 } 3499 } 3500 } 3501 3502 3503 /* Clears each of the exact match registers to zero, so they 3504 * don't interfere with normal reception 3505 */ 3506 static void gfar_clear_exact_match(struct net_device *dev) 3507 { 3508 int idx; 3509 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 3510 3511 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) 3512 gfar_set_mac_for_addr(dev, idx, zero_arr); 3513 } 3514 3515 /* Set the appropriate hash bit for the given addr */ 3516 /* The algorithm works like so: 3517 * 1) Take the Destination Address (ie the multicast address), and 3518 * do a CRC on it (little endian), and reverse the bits of the 3519 * result. 3520 * 2) Use the 8 most significant bits as a hash into a 256-entry 3521 * table. The table is controlled through 8 32-bit registers: 3522 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 3523 * gaddr7. This means that the 3 most significant bits in the 3524 * hash index which gaddr register to use, and the 5 other bits 3525 * indicate which bit (assuming an IBM numbering scheme, which 3526 * for PowerPC (tm) is usually the case) in the register holds 3527 * the entry. 3528 */ 3529 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 3530 { 3531 u32 tempval; 3532 struct gfar_private *priv = netdev_priv(dev); 3533 u32 result = ether_crc(ETH_ALEN, addr); 3534 int width = priv->hash_width; 3535 u8 whichbit = (result >> (32 - width)) & 0x1f; 3536 u8 whichreg = result >> (32 - width + 5); 3537 u32 value = (1 << (31-whichbit)); 3538 3539 tempval = gfar_read(priv->hash_regs[whichreg]); 3540 tempval |= value; 3541 gfar_write(priv->hash_regs[whichreg], tempval); 3542 } 3543 3544 3545 /* There are multiple MAC Address register pairs on some controllers 3546 * This function sets the numth pair to a given address 3547 */ 3548 static void gfar_set_mac_for_addr(struct net_device *dev, int num, 3549 const u8 *addr) 3550 { 3551 struct gfar_private *priv = netdev_priv(dev); 3552 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3553 u32 tempval; 3554 u32 __iomem *macptr = ®s->macstnaddr1; 3555 3556 macptr += num*2; 3557 3558 /* For a station address of 0x12345678ABCD in transmission 3559 * order (BE), MACnADDR1 is set to 0xCDAB7856 and 3560 * MACnADDR2 is set to 0x34120000. 3561 */ 3562 tempval = (addr[5] << 24) | (addr[4] << 16) | 3563 (addr[3] << 8) | addr[2]; 3564 3565 gfar_write(macptr, tempval); 3566 3567 tempval = (addr[1] << 24) | (addr[0] << 16); 3568 3569 gfar_write(macptr+1, tempval); 3570 } 3571 3572 /* GFAR error interrupt handler */ 3573 static irqreturn_t gfar_error(int irq, void *grp_id) 3574 { 3575 struct gfar_priv_grp *gfargrp = grp_id; 3576 struct gfar __iomem *regs = gfargrp->regs; 3577 struct gfar_private *priv= gfargrp->priv; 3578 struct net_device *dev = priv->ndev; 3579 3580 /* Save ievent for future reference */ 3581 u32 events = gfar_read(®s->ievent); 3582 3583 /* Clear IEVENT */ 3584 gfar_write(®s->ievent, events & IEVENT_ERR_MASK); 3585 3586 /* Magic Packet is not an error. */ 3587 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 3588 (events & IEVENT_MAG)) 3589 events &= ~IEVENT_MAG; 3590 3591 /* Hmm... */ 3592 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 3593 netdev_dbg(dev, 3594 "error interrupt (ievent=0x%08x imask=0x%08x)\n", 3595 events, gfar_read(®s->imask)); 3596 3597 /* Update the error counters */ 3598 if (events & IEVENT_TXE) { 3599 dev->stats.tx_errors++; 3600 3601 if (events & IEVENT_LC) 3602 dev->stats.tx_window_errors++; 3603 if (events & IEVENT_CRL) 3604 dev->stats.tx_aborted_errors++; 3605 if (events & IEVENT_XFUN) { 3606 netif_dbg(priv, tx_err, dev, 3607 "TX FIFO underrun, packet dropped\n"); 3608 dev->stats.tx_dropped++; 3609 atomic64_inc(&priv->extra_stats.tx_underrun); 3610 3611 schedule_work(&priv->reset_task); 3612 } 3613 netif_dbg(priv, tx_err, dev, "Transmit Error\n"); 3614 } 3615 if (events & IEVENT_BSY) { 3616 dev->stats.rx_over_errors++; 3617 atomic64_inc(&priv->extra_stats.rx_bsy); 3618 3619 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", 3620 gfar_read(®s->rstat)); 3621 } 3622 if (events & IEVENT_BABR) { 3623 dev->stats.rx_errors++; 3624 atomic64_inc(&priv->extra_stats.rx_babr); 3625 3626 netif_dbg(priv, rx_err, dev, "babbling RX error\n"); 3627 } 3628 if (events & IEVENT_EBERR) { 3629 atomic64_inc(&priv->extra_stats.eberr); 3630 netif_dbg(priv, rx_err, dev, "bus error\n"); 3631 } 3632 if (events & IEVENT_RXC) 3633 netif_dbg(priv, rx_status, dev, "control frame\n"); 3634 3635 if (events & IEVENT_BABT) { 3636 atomic64_inc(&priv->extra_stats.tx_babt); 3637 netif_dbg(priv, tx_err, dev, "babbling TX error\n"); 3638 } 3639 return IRQ_HANDLED; 3640 } 3641 3642 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) 3643 { 3644 struct net_device *ndev = priv->ndev; 3645 struct phy_device *phydev = ndev->phydev; 3646 u32 val = 0; 3647 3648 if (!phydev->duplex) 3649 return val; 3650 3651 if (!priv->pause_aneg_en) { 3652 if (priv->tx_pause_en) 3653 val |= MACCFG1_TX_FLOW; 3654 if (priv->rx_pause_en) 3655 val |= MACCFG1_RX_FLOW; 3656 } else { 3657 u16 lcl_adv, rmt_adv; 3658 u8 flowctrl; 3659 /* get link partner capabilities */ 3660 rmt_adv = 0; 3661 if (phydev->pause) 3662 rmt_adv = LPA_PAUSE_CAP; 3663 if (phydev->asym_pause) 3664 rmt_adv |= LPA_PAUSE_ASYM; 3665 3666 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); 3667 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); 3668 if (flowctrl & FLOW_CTRL_TX) 3669 val |= MACCFG1_TX_FLOW; 3670 if (flowctrl & FLOW_CTRL_RX) 3671 val |= MACCFG1_RX_FLOW; 3672 } 3673 3674 return val; 3675 } 3676 3677 static noinline void gfar_update_link_state(struct gfar_private *priv) 3678 { 3679 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3680 struct net_device *ndev = priv->ndev; 3681 struct phy_device *phydev = ndev->phydev; 3682 struct gfar_priv_rx_q *rx_queue = NULL; 3683 int i; 3684 3685 if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) 3686 return; 3687 3688 if (phydev->link) { 3689 u32 tempval1 = gfar_read(®s->maccfg1); 3690 u32 tempval = gfar_read(®s->maccfg2); 3691 u32 ecntrl = gfar_read(®s->ecntrl); 3692 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW); 3693 3694 if (phydev->duplex != priv->oldduplex) { 3695 if (!(phydev->duplex)) 3696 tempval &= ~(MACCFG2_FULL_DUPLEX); 3697 else 3698 tempval |= MACCFG2_FULL_DUPLEX; 3699 3700 priv->oldduplex = phydev->duplex; 3701 } 3702 3703 if (phydev->speed != priv->oldspeed) { 3704 switch (phydev->speed) { 3705 case 1000: 3706 tempval = 3707 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 3708 3709 ecntrl &= ~(ECNTRL_R100); 3710 break; 3711 case 100: 3712 case 10: 3713 tempval = 3714 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 3715 3716 /* Reduced mode distinguishes 3717 * between 10 and 100 3718 */ 3719 if (phydev->speed == SPEED_100) 3720 ecntrl |= ECNTRL_R100; 3721 else 3722 ecntrl &= ~(ECNTRL_R100); 3723 break; 3724 default: 3725 netif_warn(priv, link, priv->ndev, 3726 "Ack! Speed (%d) is not 10/100/1000!\n", 3727 phydev->speed); 3728 break; 3729 } 3730 3731 priv->oldspeed = phydev->speed; 3732 } 3733 3734 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 3735 tempval1 |= gfar_get_flowctrl_cfg(priv); 3736 3737 /* Turn last free buffer recording on */ 3738 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { 3739 for (i = 0; i < priv->num_rx_queues; i++) { 3740 u32 bdp_dma; 3741 3742 rx_queue = priv->rx_queue[i]; 3743 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); 3744 gfar_write(rx_queue->rfbptr, bdp_dma); 3745 } 3746 3747 priv->tx_actual_en = 1; 3748 } 3749 3750 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) 3751 priv->tx_actual_en = 0; 3752 3753 gfar_write(®s->maccfg1, tempval1); 3754 gfar_write(®s->maccfg2, tempval); 3755 gfar_write(®s->ecntrl, ecntrl); 3756 3757 if (!priv->oldlink) 3758 priv->oldlink = 1; 3759 3760 } else if (priv->oldlink) { 3761 priv->oldlink = 0; 3762 priv->oldspeed = 0; 3763 priv->oldduplex = -1; 3764 } 3765 3766 if (netif_msg_link(priv)) 3767 phy_print_status(phydev); 3768 } 3769 3770 static const struct of_device_id gfar_match[] = 3771 { 3772 { 3773 .type = "network", 3774 .compatible = "gianfar", 3775 }, 3776 { 3777 .compatible = "fsl,etsec2", 3778 }, 3779 {}, 3780 }; 3781 MODULE_DEVICE_TABLE(of, gfar_match); 3782 3783 /* Structure for a device driver */ 3784 static struct platform_driver gfar_driver = { 3785 .driver = { 3786 .name = "fsl-gianfar", 3787 .pm = GFAR_PM_OPS, 3788 .of_match_table = gfar_match, 3789 }, 3790 .probe = gfar_probe, 3791 .remove = gfar_remove, 3792 }; 3793 3794 module_platform_driver(gfar_driver); 3795