1 /* drivers/net/ethernet/freescale/gianfar.c 2 * 3 * Gianfar Ethernet Driver 4 * This driver is designed for the non-CPM ethernet controllers 5 * on the 85xx and 83xx family of integrated processors 6 * Based on 8260_io/fcc_enet.c 7 * 8 * Author: Andy Fleming 9 * Maintainer: Kumar Gala 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 11 * 12 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. 13 * Copyright 2007 MontaVista Software, Inc. 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms of the GNU General Public License as published by the 17 * Free Software Foundation; either version 2 of the License, or (at your 18 * option) any later version. 19 * 20 * Gianfar: AKA Lambda Draconis, "Dragon" 21 * RA 11 31 24.2 22 * Dec +69 19 52 23 * V 3.84 24 * B-V +1.62 25 * 26 * Theory of operation 27 * 28 * The driver is initialized through of_device. Configuration information 29 * is therefore conveyed through an OF-style device tree. 30 * 31 * The Gianfar Ethernet Controller uses a ring of buffer 32 * descriptors. The beginning is indicated by a register 33 * pointing to the physical address of the start of the ring. 34 * The end is determined by a "wrap" bit being set in the 35 * last descriptor of the ring. 36 * 37 * When a packet is received, the RXF bit in the 38 * IEVENT register is set, triggering an interrupt when the 39 * corresponding bit in the IMASK register is also set (if 40 * interrupt coalescing is active, then the interrupt may not 41 * happen immediately, but will wait until either a set number 42 * of frames or amount of time have passed). In NAPI, the 43 * interrupt handler will signal there is work to be done, and 44 * exit. This method will start at the last known empty 45 * descriptor, and process every subsequent descriptor until there 46 * are none left with data (NAPI will stop after a set number of 47 * packets to give time to other tasks, but will eventually 48 * process all the packets). The data arrives inside a 49 * pre-allocated skb, and so after the skb is passed up to the 50 * stack, a new skb must be allocated, and the address field in 51 * the buffer descriptor must be updated to indicate this new 52 * skb. 53 * 54 * When the kernel requests that a packet be transmitted, the 55 * driver starts where it left off last time, and points the 56 * descriptor at the buffer which was passed in. The driver 57 * then informs the DMA engine that there are packets ready to 58 * be transmitted. Once the controller is finished transmitting 59 * the packet, an interrupt may be triggered (under the same 60 * conditions as for reception, but depending on the TXF bit). 61 * The driver then cleans up the buffer. 62 */ 63 64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 65 #define DEBUG 66 67 #include <linux/kernel.h> 68 #include <linux/string.h> 69 #include <linux/errno.h> 70 #include <linux/unistd.h> 71 #include <linux/slab.h> 72 #include <linux/interrupt.h> 73 #include <linux/init.h> 74 #include <linux/delay.h> 75 #include <linux/netdevice.h> 76 #include <linux/etherdevice.h> 77 #include <linux/skbuff.h> 78 #include <linux/if_vlan.h> 79 #include <linux/spinlock.h> 80 #include <linux/mm.h> 81 #include <linux/of_address.h> 82 #include <linux/of_irq.h> 83 #include <linux/of_mdio.h> 84 #include <linux/of_platform.h> 85 #include <linux/ip.h> 86 #include <linux/tcp.h> 87 #include <linux/udp.h> 88 #include <linux/in.h> 89 #include <linux/net_tstamp.h> 90 91 #include <asm/io.h> 92 #include <asm/reg.h> 93 #include <asm/mpc85xx.h> 94 #include <asm/irq.h> 95 #include <asm/uaccess.h> 96 #include <linux/module.h> 97 #include <linux/dma-mapping.h> 98 #include <linux/crc32.h> 99 #include <linux/mii.h> 100 #include <linux/phy.h> 101 #include <linux/phy_fixed.h> 102 #include <linux/of.h> 103 #include <linux/of_net.h> 104 105 #include "gianfar.h" 106 107 #define TX_TIMEOUT (1*HZ) 108 109 const char gfar_driver_version[] = "1.3"; 110 111 static int gfar_enet_open(struct net_device *dev); 112 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 113 static void gfar_reset_task(struct work_struct *work); 114 static void gfar_timeout(struct net_device *dev); 115 static int gfar_close(struct net_device *dev); 116 struct sk_buff *gfar_new_skb(struct net_device *dev); 117 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 118 struct sk_buff *skb); 119 static int gfar_set_mac_address(struct net_device *dev); 120 static int gfar_change_mtu(struct net_device *dev, int new_mtu); 121 static irqreturn_t gfar_error(int irq, void *dev_id); 122 static irqreturn_t gfar_transmit(int irq, void *dev_id); 123 static irqreturn_t gfar_interrupt(int irq, void *dev_id); 124 static void adjust_link(struct net_device *dev); 125 static void init_registers(struct net_device *dev); 126 static int init_phy(struct net_device *dev); 127 static int gfar_probe(struct platform_device *ofdev); 128 static int gfar_remove(struct platform_device *ofdev); 129 static void free_skb_resources(struct gfar_private *priv); 130 static void gfar_set_multi(struct net_device *dev); 131 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 132 static void gfar_configure_serdes(struct net_device *dev); 133 static int gfar_poll(struct napi_struct *napi, int budget); 134 static int gfar_poll_sq(struct napi_struct *napi, int budget); 135 #ifdef CONFIG_NET_POLL_CONTROLLER 136 static void gfar_netpoll(struct net_device *dev); 137 #endif 138 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 139 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 140 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 141 int amount_pull, struct napi_struct *napi); 142 void gfar_halt(struct net_device *dev); 143 static void gfar_halt_nodisable(struct net_device *dev); 144 void gfar_start(struct net_device *dev); 145 static void gfar_clear_exact_match(struct net_device *dev); 146 static void gfar_set_mac_for_addr(struct net_device *dev, int num, 147 const u8 *addr); 148 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 149 150 MODULE_AUTHOR("Freescale Semiconductor, Inc"); 151 MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 152 MODULE_LICENSE("GPL"); 153 154 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 155 dma_addr_t buf) 156 { 157 u32 lstatus; 158 159 bdp->bufPtr = buf; 160 161 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); 162 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) 163 lstatus |= BD_LFLAG(RXBD_WRAP); 164 165 eieio(); 166 167 bdp->lstatus = lstatus; 168 } 169 170 static int gfar_init_bds(struct net_device *ndev) 171 { 172 struct gfar_private *priv = netdev_priv(ndev); 173 struct gfar_priv_tx_q *tx_queue = NULL; 174 struct gfar_priv_rx_q *rx_queue = NULL; 175 struct txbd8 *txbdp; 176 struct rxbd8 *rxbdp; 177 int i, j; 178 179 for (i = 0; i < priv->num_tx_queues; i++) { 180 tx_queue = priv->tx_queue[i]; 181 /* Initialize some variables in our dev structure */ 182 tx_queue->num_txbdfree = tx_queue->tx_ring_size; 183 tx_queue->dirty_tx = tx_queue->tx_bd_base; 184 tx_queue->cur_tx = tx_queue->tx_bd_base; 185 tx_queue->skb_curtx = 0; 186 tx_queue->skb_dirtytx = 0; 187 188 /* Initialize Transmit Descriptor Ring */ 189 txbdp = tx_queue->tx_bd_base; 190 for (j = 0; j < tx_queue->tx_ring_size; j++) { 191 txbdp->lstatus = 0; 192 txbdp->bufPtr = 0; 193 txbdp++; 194 } 195 196 /* Set the last descriptor in the ring to indicate wrap */ 197 txbdp--; 198 txbdp->status |= TXBD_WRAP; 199 } 200 201 for (i = 0; i < priv->num_rx_queues; i++) { 202 rx_queue = priv->rx_queue[i]; 203 rx_queue->cur_rx = rx_queue->rx_bd_base; 204 rx_queue->skb_currx = 0; 205 rxbdp = rx_queue->rx_bd_base; 206 207 for (j = 0; j < rx_queue->rx_ring_size; j++) { 208 struct sk_buff *skb = rx_queue->rx_skbuff[j]; 209 210 if (skb) { 211 gfar_init_rxbdp(rx_queue, rxbdp, 212 rxbdp->bufPtr); 213 } else { 214 skb = gfar_new_skb(ndev); 215 if (!skb) { 216 netdev_err(ndev, "Can't allocate RX buffers\n"); 217 return -ENOMEM; 218 } 219 rx_queue->rx_skbuff[j] = skb; 220 221 gfar_new_rxbdp(rx_queue, rxbdp, skb); 222 } 223 224 rxbdp++; 225 } 226 227 } 228 229 return 0; 230 } 231 232 static int gfar_alloc_skb_resources(struct net_device *ndev) 233 { 234 void *vaddr; 235 dma_addr_t addr; 236 int i, j, k; 237 struct gfar_private *priv = netdev_priv(ndev); 238 struct device *dev = priv->dev; 239 struct gfar_priv_tx_q *tx_queue = NULL; 240 struct gfar_priv_rx_q *rx_queue = NULL; 241 242 priv->total_tx_ring_size = 0; 243 for (i = 0; i < priv->num_tx_queues; i++) 244 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; 245 246 priv->total_rx_ring_size = 0; 247 for (i = 0; i < priv->num_rx_queues; i++) 248 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; 249 250 /* Allocate memory for the buffer descriptors */ 251 vaddr = dma_alloc_coherent(dev, 252 (priv->total_tx_ring_size * 253 sizeof(struct txbd8)) + 254 (priv->total_rx_ring_size * 255 sizeof(struct rxbd8)), 256 &addr, GFP_KERNEL); 257 if (!vaddr) 258 return -ENOMEM; 259 260 for (i = 0; i < priv->num_tx_queues; i++) { 261 tx_queue = priv->tx_queue[i]; 262 tx_queue->tx_bd_base = vaddr; 263 tx_queue->tx_bd_dma_base = addr; 264 tx_queue->dev = ndev; 265 /* enet DMA only understands physical addresses */ 266 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; 267 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; 268 } 269 270 /* Start the rx descriptor ring where the tx ring leaves off */ 271 for (i = 0; i < priv->num_rx_queues; i++) { 272 rx_queue = priv->rx_queue[i]; 273 rx_queue->rx_bd_base = vaddr; 274 rx_queue->rx_bd_dma_base = addr; 275 rx_queue->dev = ndev; 276 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; 277 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; 278 } 279 280 /* Setup the skbuff rings */ 281 for (i = 0; i < priv->num_tx_queues; i++) { 282 tx_queue = priv->tx_queue[i]; 283 tx_queue->tx_skbuff = 284 kmalloc_array(tx_queue->tx_ring_size, 285 sizeof(*tx_queue->tx_skbuff), 286 GFP_KERNEL); 287 if (!tx_queue->tx_skbuff) 288 goto cleanup; 289 290 for (k = 0; k < tx_queue->tx_ring_size; k++) 291 tx_queue->tx_skbuff[k] = NULL; 292 } 293 294 for (i = 0; i < priv->num_rx_queues; i++) { 295 rx_queue = priv->rx_queue[i]; 296 rx_queue->rx_skbuff = 297 kmalloc_array(rx_queue->rx_ring_size, 298 sizeof(*rx_queue->rx_skbuff), 299 GFP_KERNEL); 300 if (!rx_queue->rx_skbuff) 301 goto cleanup; 302 303 for (j = 0; j < rx_queue->rx_ring_size; j++) 304 rx_queue->rx_skbuff[j] = NULL; 305 } 306 307 if (gfar_init_bds(ndev)) 308 goto cleanup; 309 310 return 0; 311 312 cleanup: 313 free_skb_resources(priv); 314 return -ENOMEM; 315 } 316 317 static void gfar_init_tx_rx_base(struct gfar_private *priv) 318 { 319 struct gfar __iomem *regs = priv->gfargrp[0].regs; 320 u32 __iomem *baddr; 321 int i; 322 323 baddr = ®s->tbase0; 324 for (i = 0; i < priv->num_tx_queues; i++) { 325 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); 326 baddr += 2; 327 } 328 329 baddr = ®s->rbase0; 330 for (i = 0; i < priv->num_rx_queues; i++) { 331 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); 332 baddr += 2; 333 } 334 } 335 336 static void gfar_init_mac(struct net_device *ndev) 337 { 338 struct gfar_private *priv = netdev_priv(ndev); 339 struct gfar __iomem *regs = priv->gfargrp[0].regs; 340 u32 rctrl = 0; 341 u32 tctrl = 0; 342 u32 attrs = 0; 343 344 /* write the tx/rx base registers */ 345 gfar_init_tx_rx_base(priv); 346 347 /* Configure the coalescing support */ 348 gfar_configure_coalescing_all(priv); 349 350 /* set this when rx hw offload (TOE) functions are being used */ 351 priv->uses_rxfcb = 0; 352 353 if (priv->rx_filer_enable) { 354 rctrl |= RCTRL_FILREN; 355 /* Program the RIR0 reg with the required distribution */ 356 gfar_write(®s->rir0, DEFAULT_RIR0); 357 } 358 359 /* Restore PROMISC mode */ 360 if (ndev->flags & IFF_PROMISC) 361 rctrl |= RCTRL_PROM; 362 363 if (ndev->features & NETIF_F_RXCSUM) { 364 rctrl |= RCTRL_CHECKSUMMING; 365 priv->uses_rxfcb = 1; 366 } 367 368 if (priv->extended_hash) { 369 rctrl |= RCTRL_EXTHASH; 370 371 gfar_clear_exact_match(ndev); 372 rctrl |= RCTRL_EMEN; 373 } 374 375 if (priv->padding) { 376 rctrl &= ~RCTRL_PAL_MASK; 377 rctrl |= RCTRL_PADDING(priv->padding); 378 } 379 380 /* Insert receive time stamps into padding alignment bytes */ 381 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { 382 rctrl &= ~RCTRL_PAL_MASK; 383 rctrl |= RCTRL_PADDING(8); 384 priv->padding = 8; 385 } 386 387 /* Enable HW time stamping if requested from user space */ 388 if (priv->hwts_rx_en) { 389 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; 390 priv->uses_rxfcb = 1; 391 } 392 393 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { 394 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 395 priv->uses_rxfcb = 1; 396 } 397 398 /* Init rctrl based on our settings */ 399 gfar_write(®s->rctrl, rctrl); 400 401 if (ndev->features & NETIF_F_IP_CSUM) 402 tctrl |= TCTRL_INIT_CSUM; 403 404 if (priv->prio_sched_en) 405 tctrl |= TCTRL_TXSCHED_PRIO; 406 else { 407 tctrl |= TCTRL_TXSCHED_WRRS; 408 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); 409 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); 410 } 411 412 gfar_write(®s->tctrl, tctrl); 413 414 /* Set the extraction length and index */ 415 attrs = ATTRELI_EL(priv->rx_stash_size) | 416 ATTRELI_EI(priv->rx_stash_index); 417 418 gfar_write(®s->attreli, attrs); 419 420 /* Start with defaults, and add stashing or locking 421 * depending on the approprate variables 422 */ 423 attrs = ATTR_INIT_SETTINGS; 424 425 if (priv->bd_stash_en) 426 attrs |= ATTR_BDSTASH; 427 428 if (priv->rx_stash_size != 0) 429 attrs |= ATTR_BUFSTASH; 430 431 gfar_write(®s->attr, attrs); 432 433 gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); 434 gfar_write(®s->fifo_tx_starve, priv->fifo_starve); 435 gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); 436 } 437 438 static struct net_device_stats *gfar_get_stats(struct net_device *dev) 439 { 440 struct gfar_private *priv = netdev_priv(dev); 441 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; 442 unsigned long tx_packets = 0, tx_bytes = 0; 443 int i; 444 445 for (i = 0; i < priv->num_rx_queues; i++) { 446 rx_packets += priv->rx_queue[i]->stats.rx_packets; 447 rx_bytes += priv->rx_queue[i]->stats.rx_bytes; 448 rx_dropped += priv->rx_queue[i]->stats.rx_dropped; 449 } 450 451 dev->stats.rx_packets = rx_packets; 452 dev->stats.rx_bytes = rx_bytes; 453 dev->stats.rx_dropped = rx_dropped; 454 455 for (i = 0; i < priv->num_tx_queues; i++) { 456 tx_bytes += priv->tx_queue[i]->stats.tx_bytes; 457 tx_packets += priv->tx_queue[i]->stats.tx_packets; 458 } 459 460 dev->stats.tx_bytes = tx_bytes; 461 dev->stats.tx_packets = tx_packets; 462 463 return &dev->stats; 464 } 465 466 static const struct net_device_ops gfar_netdev_ops = { 467 .ndo_open = gfar_enet_open, 468 .ndo_start_xmit = gfar_start_xmit, 469 .ndo_stop = gfar_close, 470 .ndo_change_mtu = gfar_change_mtu, 471 .ndo_set_features = gfar_set_features, 472 .ndo_set_rx_mode = gfar_set_multi, 473 .ndo_tx_timeout = gfar_timeout, 474 .ndo_do_ioctl = gfar_ioctl, 475 .ndo_get_stats = gfar_get_stats, 476 .ndo_set_mac_address = eth_mac_addr, 477 .ndo_validate_addr = eth_validate_addr, 478 #ifdef CONFIG_NET_POLL_CONTROLLER 479 .ndo_poll_controller = gfar_netpoll, 480 #endif 481 }; 482 483 void lock_rx_qs(struct gfar_private *priv) 484 { 485 int i; 486 487 for (i = 0; i < priv->num_rx_queues; i++) 488 spin_lock(&priv->rx_queue[i]->rxlock); 489 } 490 491 void lock_tx_qs(struct gfar_private *priv) 492 { 493 int i; 494 495 for (i = 0; i < priv->num_tx_queues; i++) 496 spin_lock(&priv->tx_queue[i]->txlock); 497 } 498 499 void unlock_rx_qs(struct gfar_private *priv) 500 { 501 int i; 502 503 for (i = 0; i < priv->num_rx_queues; i++) 504 spin_unlock(&priv->rx_queue[i]->rxlock); 505 } 506 507 void unlock_tx_qs(struct gfar_private *priv) 508 { 509 int i; 510 511 for (i = 0; i < priv->num_tx_queues; i++) 512 spin_unlock(&priv->tx_queue[i]->txlock); 513 } 514 515 static void free_tx_pointers(struct gfar_private *priv) 516 { 517 int i; 518 519 for (i = 0; i < priv->num_tx_queues; i++) 520 kfree(priv->tx_queue[i]); 521 } 522 523 static void free_rx_pointers(struct gfar_private *priv) 524 { 525 int i; 526 527 for (i = 0; i < priv->num_rx_queues; i++) 528 kfree(priv->rx_queue[i]); 529 } 530 531 static void unmap_group_regs(struct gfar_private *priv) 532 { 533 int i; 534 535 for (i = 0; i < MAXGROUPS; i++) 536 if (priv->gfargrp[i].regs) 537 iounmap(priv->gfargrp[i].regs); 538 } 539 540 static void free_gfar_dev(struct gfar_private *priv) 541 { 542 int i, j; 543 544 for (i = 0; i < priv->num_grps; i++) 545 for (j = 0; j < GFAR_NUM_IRQS; j++) { 546 kfree(priv->gfargrp[i].irqinfo[j]); 547 priv->gfargrp[i].irqinfo[j] = NULL; 548 } 549 550 free_netdev(priv->ndev); 551 } 552 553 static void disable_napi(struct gfar_private *priv) 554 { 555 int i; 556 557 for (i = 0; i < priv->num_grps; i++) 558 napi_disable(&priv->gfargrp[i].napi); 559 } 560 561 static void enable_napi(struct gfar_private *priv) 562 { 563 int i; 564 565 for (i = 0; i < priv->num_grps; i++) 566 napi_enable(&priv->gfargrp[i].napi); 567 } 568 569 static int gfar_parse_group(struct device_node *np, 570 struct gfar_private *priv, const char *model) 571 { 572 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; 573 u32 *queue_mask; 574 int i; 575 576 for (i = 0; i < GFAR_NUM_IRQS; i++) { 577 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), 578 GFP_KERNEL); 579 if (!grp->irqinfo[i]) 580 return -ENOMEM; 581 } 582 583 grp->regs = of_iomap(np, 0); 584 if (!grp->regs) 585 return -ENOMEM; 586 587 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); 588 589 /* If we aren't the FEC we have multiple interrupts */ 590 if (model && strcasecmp(model, "FEC")) { 591 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); 592 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); 593 if (gfar_irq(grp, TX)->irq == NO_IRQ || 594 gfar_irq(grp, RX)->irq == NO_IRQ || 595 gfar_irq(grp, ER)->irq == NO_IRQ) 596 return -EINVAL; 597 } 598 599 grp->priv = priv; 600 spin_lock_init(&grp->grplock); 601 if (priv->mode == MQ_MG_MODE) { 602 queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); 603 grp->rx_bit_map = queue_mask ? 604 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); 605 queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); 606 grp->tx_bit_map = queue_mask ? 607 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); 608 } else { 609 grp->rx_bit_map = 0xFF; 610 grp->tx_bit_map = 0xFF; 611 } 612 priv->num_grps++; 613 614 return 0; 615 } 616 617 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) 618 { 619 const char *model; 620 const char *ctype; 621 const void *mac_addr; 622 int err = 0, i; 623 struct net_device *dev = NULL; 624 struct gfar_private *priv = NULL; 625 struct device_node *np = ofdev->dev.of_node; 626 struct device_node *child = NULL; 627 const u32 *stash; 628 const u32 *stash_len; 629 const u32 *stash_idx; 630 unsigned int num_tx_qs, num_rx_qs; 631 u32 *tx_queues, *rx_queues; 632 633 if (!np || !of_device_is_available(np)) 634 return -ENODEV; 635 636 /* parse the num of tx and rx queues */ 637 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); 638 num_tx_qs = tx_queues ? *tx_queues : 1; 639 640 if (num_tx_qs > MAX_TX_QS) { 641 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", 642 num_tx_qs, MAX_TX_QS); 643 pr_err("Cannot do alloc_etherdev, aborting\n"); 644 return -EINVAL; 645 } 646 647 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); 648 num_rx_qs = rx_queues ? *rx_queues : 1; 649 650 if (num_rx_qs > MAX_RX_QS) { 651 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", 652 num_rx_qs, MAX_RX_QS); 653 pr_err("Cannot do alloc_etherdev, aborting\n"); 654 return -EINVAL; 655 } 656 657 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); 658 dev = *pdev; 659 if (NULL == dev) 660 return -ENOMEM; 661 662 priv = netdev_priv(dev); 663 priv->ndev = dev; 664 665 priv->num_tx_queues = num_tx_qs; 666 netif_set_real_num_rx_queues(dev, num_rx_qs); 667 priv->num_rx_queues = num_rx_qs; 668 priv->num_grps = 0x0; 669 670 /* Init Rx queue filer rule set linked list */ 671 INIT_LIST_HEAD(&priv->rx_list.list); 672 priv->rx_list.count = 0; 673 mutex_init(&priv->rx_queue_access); 674 675 model = of_get_property(np, "model", NULL); 676 677 for (i = 0; i < MAXGROUPS; i++) 678 priv->gfargrp[i].regs = NULL; 679 680 /* Parse and initialize group specific information */ 681 if (of_device_is_compatible(np, "fsl,etsec2")) { 682 priv->mode = MQ_MG_MODE; 683 for_each_child_of_node(np, child) { 684 err = gfar_parse_group(child, priv, model); 685 if (err) 686 goto err_grp_init; 687 } 688 } else { 689 priv->mode = SQ_SG_MODE; 690 err = gfar_parse_group(np, priv, model); 691 if (err) 692 goto err_grp_init; 693 } 694 695 for (i = 0; i < priv->num_tx_queues; i++) 696 priv->tx_queue[i] = NULL; 697 for (i = 0; i < priv->num_rx_queues; i++) 698 priv->rx_queue[i] = NULL; 699 700 for (i = 0; i < priv->num_tx_queues; i++) { 701 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), 702 GFP_KERNEL); 703 if (!priv->tx_queue[i]) { 704 err = -ENOMEM; 705 goto tx_alloc_failed; 706 } 707 priv->tx_queue[i]->tx_skbuff = NULL; 708 priv->tx_queue[i]->qindex = i; 709 priv->tx_queue[i]->dev = dev; 710 spin_lock_init(&(priv->tx_queue[i]->txlock)); 711 } 712 713 for (i = 0; i < priv->num_rx_queues; i++) { 714 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), 715 GFP_KERNEL); 716 if (!priv->rx_queue[i]) { 717 err = -ENOMEM; 718 goto rx_alloc_failed; 719 } 720 priv->rx_queue[i]->rx_skbuff = NULL; 721 priv->rx_queue[i]->qindex = i; 722 priv->rx_queue[i]->dev = dev; 723 spin_lock_init(&(priv->rx_queue[i]->rxlock)); 724 } 725 726 727 stash = of_get_property(np, "bd-stash", NULL); 728 729 if (stash) { 730 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 731 priv->bd_stash_en = 1; 732 } 733 734 stash_len = of_get_property(np, "rx-stash-len", NULL); 735 736 if (stash_len) 737 priv->rx_stash_size = *stash_len; 738 739 stash_idx = of_get_property(np, "rx-stash-idx", NULL); 740 741 if (stash_idx) 742 priv->rx_stash_index = *stash_idx; 743 744 if (stash_len || stash_idx) 745 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; 746 747 mac_addr = of_get_mac_address(np); 748 749 if (mac_addr) 750 memcpy(dev->dev_addr, mac_addr, ETH_ALEN); 751 752 if (model && !strcasecmp(model, "TSEC")) 753 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | 754 FSL_GIANFAR_DEV_HAS_COALESCE | 755 FSL_GIANFAR_DEV_HAS_RMON | 756 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 757 758 if (model && !strcasecmp(model, "eTSEC")) 759 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | 760 FSL_GIANFAR_DEV_HAS_COALESCE | 761 FSL_GIANFAR_DEV_HAS_RMON | 762 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 763 FSL_GIANFAR_DEV_HAS_PADDING | 764 FSL_GIANFAR_DEV_HAS_CSUM | 765 FSL_GIANFAR_DEV_HAS_VLAN | 766 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 767 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 768 FSL_GIANFAR_DEV_HAS_TIMER; 769 770 ctype = of_get_property(np, "phy-connection-type", NULL); 771 772 /* We only care about rgmii-id. The rest are autodetected */ 773 if (ctype && !strcmp(ctype, "rgmii-id")) 774 priv->interface = PHY_INTERFACE_MODE_RGMII_ID; 775 else 776 priv->interface = PHY_INTERFACE_MODE_MII; 777 778 if (of_get_property(np, "fsl,magic-packet", NULL)) 779 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; 780 781 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 782 783 /* Find the TBI PHY. If it's not there, we don't support SGMII */ 784 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 785 786 return 0; 787 788 rx_alloc_failed: 789 free_rx_pointers(priv); 790 tx_alloc_failed: 791 free_tx_pointers(priv); 792 err_grp_init: 793 unmap_group_regs(priv); 794 free_gfar_dev(priv); 795 return err; 796 } 797 798 static int gfar_hwtstamp_ioctl(struct net_device *netdev, 799 struct ifreq *ifr, int cmd) 800 { 801 struct hwtstamp_config config; 802 struct gfar_private *priv = netdev_priv(netdev); 803 804 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 805 return -EFAULT; 806 807 /* reserved for future extensions */ 808 if (config.flags) 809 return -EINVAL; 810 811 switch (config.tx_type) { 812 case HWTSTAMP_TX_OFF: 813 priv->hwts_tx_en = 0; 814 break; 815 case HWTSTAMP_TX_ON: 816 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 817 return -ERANGE; 818 priv->hwts_tx_en = 1; 819 break; 820 default: 821 return -ERANGE; 822 } 823 824 switch (config.rx_filter) { 825 case HWTSTAMP_FILTER_NONE: 826 if (priv->hwts_rx_en) { 827 stop_gfar(netdev); 828 priv->hwts_rx_en = 0; 829 startup_gfar(netdev); 830 } 831 break; 832 default: 833 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 834 return -ERANGE; 835 if (!priv->hwts_rx_en) { 836 stop_gfar(netdev); 837 priv->hwts_rx_en = 1; 838 startup_gfar(netdev); 839 } 840 config.rx_filter = HWTSTAMP_FILTER_ALL; 841 break; 842 } 843 844 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 845 -EFAULT : 0; 846 } 847 848 /* Ioctl MII Interface */ 849 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 850 { 851 struct gfar_private *priv = netdev_priv(dev); 852 853 if (!netif_running(dev)) 854 return -EINVAL; 855 856 if (cmd == SIOCSHWTSTAMP) 857 return gfar_hwtstamp_ioctl(dev, rq, cmd); 858 859 if (!priv->phydev) 860 return -ENODEV; 861 862 return phy_mii_ioctl(priv->phydev, rq, cmd); 863 } 864 865 static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) 866 { 867 unsigned int new_bit_map = 0x0; 868 int mask = 0x1 << (max_qs - 1), i; 869 870 for (i = 0; i < max_qs; i++) { 871 if (bit_map & mask) 872 new_bit_map = new_bit_map + (1 << i); 873 mask = mask >> 0x1; 874 } 875 return new_bit_map; 876 } 877 878 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 879 u32 class) 880 { 881 u32 rqfpr = FPR_FILER_MASK; 882 u32 rqfcr = 0x0; 883 884 rqfar--; 885 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; 886 priv->ftp_rqfpr[rqfar] = rqfpr; 887 priv->ftp_rqfcr[rqfar] = rqfcr; 888 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 889 890 rqfar--; 891 rqfcr = RQFCR_CMP_NOMATCH; 892 priv->ftp_rqfpr[rqfar] = rqfpr; 893 priv->ftp_rqfcr[rqfar] = rqfcr; 894 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 895 896 rqfar--; 897 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; 898 rqfpr = class; 899 priv->ftp_rqfcr[rqfar] = rqfcr; 900 priv->ftp_rqfpr[rqfar] = rqfpr; 901 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 902 903 rqfar--; 904 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; 905 rqfpr = class; 906 priv->ftp_rqfcr[rqfar] = rqfcr; 907 priv->ftp_rqfpr[rqfar] = rqfpr; 908 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 909 910 return rqfar; 911 } 912 913 static void gfar_init_filer_table(struct gfar_private *priv) 914 { 915 int i = 0x0; 916 u32 rqfar = MAX_FILER_IDX; 917 u32 rqfcr = 0x0; 918 u32 rqfpr = FPR_FILER_MASK; 919 920 /* Default rule */ 921 rqfcr = RQFCR_CMP_MATCH; 922 priv->ftp_rqfcr[rqfar] = rqfcr; 923 priv->ftp_rqfpr[rqfar] = rqfpr; 924 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 925 926 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); 927 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); 928 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); 929 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); 930 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); 931 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); 932 933 /* cur_filer_idx indicated the first non-masked rule */ 934 priv->cur_filer_idx = rqfar; 935 936 /* Rest are masked rules */ 937 rqfcr = RQFCR_CMP_NOMATCH; 938 for (i = 0; i < rqfar; i++) { 939 priv->ftp_rqfcr[i] = rqfcr; 940 priv->ftp_rqfpr[i] = rqfpr; 941 gfar_write_filer(priv, i, rqfcr, rqfpr); 942 } 943 } 944 945 static void __gfar_detect_errata_83xx(struct gfar_private *priv) 946 { 947 unsigned int pvr = mfspr(SPRN_PVR); 948 unsigned int svr = mfspr(SPRN_SVR); 949 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ 950 unsigned int rev = svr & 0xffff; 951 952 /* MPC8313 Rev 2.0 and higher; All MPC837x */ 953 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || 954 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 955 priv->errata |= GFAR_ERRATA_74; 956 957 /* MPC8313 and MPC837x all rev */ 958 if ((pvr == 0x80850010 && mod == 0x80b0) || 959 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 960 priv->errata |= GFAR_ERRATA_76; 961 962 /* MPC8313 Rev < 2.0 */ 963 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) 964 priv->errata |= GFAR_ERRATA_12; 965 } 966 967 static void __gfar_detect_errata_85xx(struct gfar_private *priv) 968 { 969 unsigned int svr = mfspr(SPRN_SVR); 970 971 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) 972 priv->errata |= GFAR_ERRATA_12; 973 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || 974 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) 975 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ 976 } 977 978 static void gfar_detect_errata(struct gfar_private *priv) 979 { 980 struct device *dev = &priv->ofdev->dev; 981 982 /* no plans to fix */ 983 priv->errata |= GFAR_ERRATA_A002; 984 985 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) 986 __gfar_detect_errata_85xx(priv); 987 else /* non-mpc85xx parts, i.e. e300 core based */ 988 __gfar_detect_errata_83xx(priv); 989 990 if (priv->errata) 991 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 992 priv->errata); 993 } 994 995 /* Set up the ethernet device structure, private data, 996 * and anything else we need before we start 997 */ 998 static int gfar_probe(struct platform_device *ofdev) 999 { 1000 u32 tempval; 1001 struct net_device *dev = NULL; 1002 struct gfar_private *priv = NULL; 1003 struct gfar __iomem *regs = NULL; 1004 int err = 0, i, grp_idx = 0; 1005 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; 1006 u32 isrg = 0; 1007 u32 __iomem *baddr; 1008 1009 err = gfar_of_init(ofdev, &dev); 1010 1011 if (err) 1012 return err; 1013 1014 priv = netdev_priv(dev); 1015 priv->ndev = dev; 1016 priv->ofdev = ofdev; 1017 priv->dev = &ofdev->dev; 1018 SET_NETDEV_DEV(dev, &ofdev->dev); 1019 1020 spin_lock_init(&priv->bflock); 1021 INIT_WORK(&priv->reset_task, gfar_reset_task); 1022 1023 platform_set_drvdata(ofdev, priv); 1024 regs = priv->gfargrp[0].regs; 1025 1026 gfar_detect_errata(priv); 1027 1028 /* Stop the DMA engine now, in case it was running before 1029 * (The firmware could have used it, and left it running). 1030 */ 1031 gfar_halt(dev); 1032 1033 /* Reset MAC layer */ 1034 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); 1035 1036 /* We need to delay at least 3 TX clocks */ 1037 udelay(2); 1038 1039 tempval = 0; 1040 if (!priv->pause_aneg_en && priv->tx_pause_en) 1041 tempval |= MACCFG1_TX_FLOW; 1042 if (!priv->pause_aneg_en && priv->rx_pause_en) 1043 tempval |= MACCFG1_RX_FLOW; 1044 /* the soft reset bit is not self-resetting, so we need to 1045 * clear it before resuming normal operation 1046 */ 1047 gfar_write(®s->maccfg1, tempval); 1048 1049 /* Initialize MACCFG2. */ 1050 tempval = MACCFG2_INIT_SETTINGS; 1051 if (gfar_has_errata(priv, GFAR_ERRATA_74)) 1052 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; 1053 gfar_write(®s->maccfg2, tempval); 1054 1055 /* Initialize ECNTRL */ 1056 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); 1057 1058 /* Set the dev->base_addr to the gfar reg region */ 1059 dev->base_addr = (unsigned long) regs; 1060 1061 /* Fill in the dev structure */ 1062 dev->watchdog_timeo = TX_TIMEOUT; 1063 dev->mtu = 1500; 1064 dev->netdev_ops = &gfar_netdev_ops; 1065 dev->ethtool_ops = &gfar_ethtool_ops; 1066 1067 /* Register for napi ...We are registering NAPI for each grp */ 1068 if (priv->mode == SQ_SG_MODE) 1069 netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq, 1070 GFAR_DEV_WEIGHT); 1071 else 1072 for (i = 0; i < priv->num_grps; i++) 1073 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, 1074 GFAR_DEV_WEIGHT); 1075 1076 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1077 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1078 NETIF_F_RXCSUM; 1079 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 1080 NETIF_F_RXCSUM | NETIF_F_HIGHDMA; 1081 } 1082 1083 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1084 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 1085 NETIF_F_HW_VLAN_CTAG_RX; 1086 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1087 } 1088 1089 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1090 priv->extended_hash = 1; 1091 priv->hash_width = 9; 1092 1093 priv->hash_regs[0] = ®s->igaddr0; 1094 priv->hash_regs[1] = ®s->igaddr1; 1095 priv->hash_regs[2] = ®s->igaddr2; 1096 priv->hash_regs[3] = ®s->igaddr3; 1097 priv->hash_regs[4] = ®s->igaddr4; 1098 priv->hash_regs[5] = ®s->igaddr5; 1099 priv->hash_regs[6] = ®s->igaddr6; 1100 priv->hash_regs[7] = ®s->igaddr7; 1101 priv->hash_regs[8] = ®s->gaddr0; 1102 priv->hash_regs[9] = ®s->gaddr1; 1103 priv->hash_regs[10] = ®s->gaddr2; 1104 priv->hash_regs[11] = ®s->gaddr3; 1105 priv->hash_regs[12] = ®s->gaddr4; 1106 priv->hash_regs[13] = ®s->gaddr5; 1107 priv->hash_regs[14] = ®s->gaddr6; 1108 priv->hash_regs[15] = ®s->gaddr7; 1109 1110 } else { 1111 priv->extended_hash = 0; 1112 priv->hash_width = 8; 1113 1114 priv->hash_regs[0] = ®s->gaddr0; 1115 priv->hash_regs[1] = ®s->gaddr1; 1116 priv->hash_regs[2] = ®s->gaddr2; 1117 priv->hash_regs[3] = ®s->gaddr3; 1118 priv->hash_regs[4] = ®s->gaddr4; 1119 priv->hash_regs[5] = ®s->gaddr5; 1120 priv->hash_regs[6] = ®s->gaddr6; 1121 priv->hash_regs[7] = ®s->gaddr7; 1122 } 1123 1124 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 1125 priv->padding = DEFAULT_PADDING; 1126 else 1127 priv->padding = 0; 1128 1129 if (dev->features & NETIF_F_IP_CSUM || 1130 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1131 dev->needed_headroom = GMAC_FCB_LEN; 1132 1133 /* Program the isrg regs only if number of grps > 1 */ 1134 if (priv->num_grps > 1) { 1135 baddr = ®s->isrg0; 1136 for (i = 0; i < priv->num_grps; i++) { 1137 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); 1138 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); 1139 gfar_write(baddr, isrg); 1140 baddr++; 1141 isrg = 0x0; 1142 } 1143 } 1144 1145 /* Need to reverse the bit maps as bit_map's MSB is q0 1146 * but, for_each_set_bit parses from right to left, which 1147 * basically reverses the queue numbers 1148 */ 1149 for (i = 0; i< priv->num_grps; i++) { 1150 priv->gfargrp[i].tx_bit_map = 1151 reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS); 1152 priv->gfargrp[i].rx_bit_map = 1153 reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS); 1154 } 1155 1156 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 1157 * also assign queues to groups 1158 */ 1159 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1160 priv->gfargrp[grp_idx].num_rx_queues = 0x0; 1161 1162 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, 1163 priv->num_rx_queues) { 1164 priv->gfargrp[grp_idx].num_rx_queues++; 1165 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1166 rstat = rstat | (RSTAT_CLEAR_RHALT >> i); 1167 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 1168 } 1169 priv->gfargrp[grp_idx].num_tx_queues = 0x0; 1170 1171 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, 1172 priv->num_tx_queues) { 1173 priv->gfargrp[grp_idx].num_tx_queues++; 1174 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1175 tstat = tstat | (TSTAT_CLEAR_THALT >> i); 1176 tqueue = tqueue | (TQUEUE_EN0 >> i); 1177 } 1178 priv->gfargrp[grp_idx].rstat = rstat; 1179 priv->gfargrp[grp_idx].tstat = tstat; 1180 rstat = tstat =0; 1181 } 1182 1183 gfar_write(®s->rqueue, rqueue); 1184 gfar_write(®s->tqueue, tqueue); 1185 1186 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1187 1188 /* Initializing some of the rx/tx queue level parameters */ 1189 for (i = 0; i < priv->num_tx_queues; i++) { 1190 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; 1191 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; 1192 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; 1193 priv->tx_queue[i]->txic = DEFAULT_TXIC; 1194 } 1195 1196 for (i = 0; i < priv->num_rx_queues; i++) { 1197 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; 1198 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; 1199 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1200 } 1201 1202 /* always enable rx filer */ 1203 priv->rx_filer_enable = 1; 1204 /* Enable most messages by default */ 1205 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1206 /* use pritority h/w tx queue scheduling for single queue devices */ 1207 if (priv->num_tx_queues == 1) 1208 priv->prio_sched_en = 1; 1209 1210 /* Carrier starts down, phylib will bring it up */ 1211 netif_carrier_off(dev); 1212 1213 err = register_netdev(dev); 1214 1215 if (err) { 1216 pr_err("%s: Cannot register net device, aborting\n", dev->name); 1217 goto register_fail; 1218 } 1219 1220 device_init_wakeup(&dev->dev, 1221 priv->device_flags & 1222 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1223 1224 /* fill out IRQ number and name fields */ 1225 for (i = 0; i < priv->num_grps; i++) { 1226 struct gfar_priv_grp *grp = &priv->gfargrp[i]; 1227 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1228 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", 1229 dev->name, "_g", '0' + i, "_tx"); 1230 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", 1231 dev->name, "_g", '0' + i, "_rx"); 1232 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", 1233 dev->name, "_g", '0' + i, "_er"); 1234 } else 1235 strcpy(gfar_irq(grp, TX)->name, dev->name); 1236 } 1237 1238 /* Initialize the filer table */ 1239 gfar_init_filer_table(priv); 1240 1241 /* Create all the sysfs files */ 1242 gfar_init_sysfs(dev); 1243 1244 /* Print out the device info */ 1245 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 1246 1247 /* Even more device info helps when determining which kernel 1248 * provided which set of benchmarks. 1249 */ 1250 netdev_info(dev, "Running with NAPI enabled\n"); 1251 for (i = 0; i < priv->num_rx_queues; i++) 1252 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", 1253 i, priv->rx_queue[i]->rx_ring_size); 1254 for (i = 0; i < priv->num_tx_queues; i++) 1255 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", 1256 i, priv->tx_queue[i]->tx_ring_size); 1257 1258 return 0; 1259 1260 register_fail: 1261 unmap_group_regs(priv); 1262 free_tx_pointers(priv); 1263 free_rx_pointers(priv); 1264 if (priv->phy_node) 1265 of_node_put(priv->phy_node); 1266 if (priv->tbi_node) 1267 of_node_put(priv->tbi_node); 1268 free_gfar_dev(priv); 1269 return err; 1270 } 1271 1272 static int gfar_remove(struct platform_device *ofdev) 1273 { 1274 struct gfar_private *priv = platform_get_drvdata(ofdev); 1275 1276 if (priv->phy_node) 1277 of_node_put(priv->phy_node); 1278 if (priv->tbi_node) 1279 of_node_put(priv->tbi_node); 1280 1281 unregister_netdev(priv->ndev); 1282 unmap_group_regs(priv); 1283 free_gfar_dev(priv); 1284 1285 return 0; 1286 } 1287 1288 #ifdef CONFIG_PM 1289 1290 static int gfar_suspend(struct device *dev) 1291 { 1292 struct gfar_private *priv = dev_get_drvdata(dev); 1293 struct net_device *ndev = priv->ndev; 1294 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1295 unsigned long flags; 1296 u32 tempval; 1297 1298 int magic_packet = priv->wol_en && 1299 (priv->device_flags & 1300 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1301 1302 netif_device_detach(ndev); 1303 1304 if (netif_running(ndev)) { 1305 1306 local_irq_save(flags); 1307 lock_tx_qs(priv); 1308 lock_rx_qs(priv); 1309 1310 gfar_halt_nodisable(ndev); 1311 1312 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1313 tempval = gfar_read(®s->maccfg1); 1314 1315 tempval &= ~MACCFG1_TX_EN; 1316 1317 if (!magic_packet) 1318 tempval &= ~MACCFG1_RX_EN; 1319 1320 gfar_write(®s->maccfg1, tempval); 1321 1322 unlock_rx_qs(priv); 1323 unlock_tx_qs(priv); 1324 local_irq_restore(flags); 1325 1326 disable_napi(priv); 1327 1328 if (magic_packet) { 1329 /* Enable interrupt on Magic Packet */ 1330 gfar_write(®s->imask, IMASK_MAG); 1331 1332 /* Enable Magic Packet mode */ 1333 tempval = gfar_read(®s->maccfg2); 1334 tempval |= MACCFG2_MPEN; 1335 gfar_write(®s->maccfg2, tempval); 1336 } else { 1337 phy_stop(priv->phydev); 1338 } 1339 } 1340 1341 return 0; 1342 } 1343 1344 static int gfar_resume(struct device *dev) 1345 { 1346 struct gfar_private *priv = dev_get_drvdata(dev); 1347 struct net_device *ndev = priv->ndev; 1348 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1349 unsigned long flags; 1350 u32 tempval; 1351 int magic_packet = priv->wol_en && 1352 (priv->device_flags & 1353 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1354 1355 if (!netif_running(ndev)) { 1356 netif_device_attach(ndev); 1357 return 0; 1358 } 1359 1360 if (!magic_packet && priv->phydev) 1361 phy_start(priv->phydev); 1362 1363 /* Disable Magic Packet mode, in case something 1364 * else woke us up. 1365 */ 1366 local_irq_save(flags); 1367 lock_tx_qs(priv); 1368 lock_rx_qs(priv); 1369 1370 tempval = gfar_read(®s->maccfg2); 1371 tempval &= ~MACCFG2_MPEN; 1372 gfar_write(®s->maccfg2, tempval); 1373 1374 gfar_start(ndev); 1375 1376 unlock_rx_qs(priv); 1377 unlock_tx_qs(priv); 1378 local_irq_restore(flags); 1379 1380 netif_device_attach(ndev); 1381 1382 enable_napi(priv); 1383 1384 return 0; 1385 } 1386 1387 static int gfar_restore(struct device *dev) 1388 { 1389 struct gfar_private *priv = dev_get_drvdata(dev); 1390 struct net_device *ndev = priv->ndev; 1391 1392 if (!netif_running(ndev)) { 1393 netif_device_attach(ndev); 1394 1395 return 0; 1396 } 1397 1398 if (gfar_init_bds(ndev)) { 1399 free_skb_resources(priv); 1400 return -ENOMEM; 1401 } 1402 1403 init_registers(ndev); 1404 gfar_set_mac_address(ndev); 1405 gfar_init_mac(ndev); 1406 gfar_start(ndev); 1407 1408 priv->oldlink = 0; 1409 priv->oldspeed = 0; 1410 priv->oldduplex = -1; 1411 1412 if (priv->phydev) 1413 phy_start(priv->phydev); 1414 1415 netif_device_attach(ndev); 1416 enable_napi(priv); 1417 1418 return 0; 1419 } 1420 1421 static struct dev_pm_ops gfar_pm_ops = { 1422 .suspend = gfar_suspend, 1423 .resume = gfar_resume, 1424 .freeze = gfar_suspend, 1425 .thaw = gfar_resume, 1426 .restore = gfar_restore, 1427 }; 1428 1429 #define GFAR_PM_OPS (&gfar_pm_ops) 1430 1431 #else 1432 1433 #define GFAR_PM_OPS NULL 1434 1435 #endif 1436 1437 /* Reads the controller's registers to determine what interface 1438 * connects it to the PHY. 1439 */ 1440 static phy_interface_t gfar_get_interface(struct net_device *dev) 1441 { 1442 struct gfar_private *priv = netdev_priv(dev); 1443 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1444 u32 ecntrl; 1445 1446 ecntrl = gfar_read(®s->ecntrl); 1447 1448 if (ecntrl & ECNTRL_SGMII_MODE) 1449 return PHY_INTERFACE_MODE_SGMII; 1450 1451 if (ecntrl & ECNTRL_TBI_MODE) { 1452 if (ecntrl & ECNTRL_REDUCED_MODE) 1453 return PHY_INTERFACE_MODE_RTBI; 1454 else 1455 return PHY_INTERFACE_MODE_TBI; 1456 } 1457 1458 if (ecntrl & ECNTRL_REDUCED_MODE) { 1459 if (ecntrl & ECNTRL_REDUCED_MII_MODE) { 1460 return PHY_INTERFACE_MODE_RMII; 1461 } 1462 else { 1463 phy_interface_t interface = priv->interface; 1464 1465 /* This isn't autodetected right now, so it must 1466 * be set by the device tree or platform code. 1467 */ 1468 if (interface == PHY_INTERFACE_MODE_RGMII_ID) 1469 return PHY_INTERFACE_MODE_RGMII_ID; 1470 1471 return PHY_INTERFACE_MODE_RGMII; 1472 } 1473 } 1474 1475 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 1476 return PHY_INTERFACE_MODE_GMII; 1477 1478 return PHY_INTERFACE_MODE_MII; 1479 } 1480 1481 1482 /* Initializes driver's PHY state, and attaches to the PHY. 1483 * Returns 0 on success. 1484 */ 1485 static int init_phy(struct net_device *dev) 1486 { 1487 struct gfar_private *priv = netdev_priv(dev); 1488 uint gigabit_support = 1489 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 1490 GFAR_SUPPORTED_GBIT : 0; 1491 phy_interface_t interface; 1492 1493 priv->oldlink = 0; 1494 priv->oldspeed = 0; 1495 priv->oldduplex = -1; 1496 1497 interface = gfar_get_interface(dev); 1498 1499 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, 1500 interface); 1501 if (!priv->phydev) 1502 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, 1503 interface); 1504 if (!priv->phydev) { 1505 dev_err(&dev->dev, "could not attach to PHY\n"); 1506 return -ENODEV; 1507 } 1508 1509 if (interface == PHY_INTERFACE_MODE_SGMII) 1510 gfar_configure_serdes(dev); 1511 1512 /* Remove any features not supported by the controller */ 1513 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); 1514 priv->phydev->advertising = priv->phydev->supported; 1515 1516 return 0; 1517 } 1518 1519 /* Initialize TBI PHY interface for communicating with the 1520 * SERDES lynx PHY on the chip. We communicate with this PHY 1521 * through the MDIO bus on each controller, treating it as a 1522 * "normal" PHY at the address found in the TBIPA register. We assume 1523 * that the TBIPA register is valid. Either the MDIO bus code will set 1524 * it to a value that doesn't conflict with other PHYs on the bus, or the 1525 * value doesn't matter, as there are no other PHYs on the bus. 1526 */ 1527 static void gfar_configure_serdes(struct net_device *dev) 1528 { 1529 struct gfar_private *priv = netdev_priv(dev); 1530 struct phy_device *tbiphy; 1531 1532 if (!priv->tbi_node) { 1533 dev_warn(&dev->dev, "error: SGMII mode requires that the " 1534 "device tree specify a tbi-handle\n"); 1535 return; 1536 } 1537 1538 tbiphy = of_phy_find_device(priv->tbi_node); 1539 if (!tbiphy) { 1540 dev_err(&dev->dev, "error: Could not get TBI device\n"); 1541 return; 1542 } 1543 1544 /* If the link is already up, we must already be ok, and don't need to 1545 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1546 * everything for us? Resetting it takes the link down and requires 1547 * several seconds for it to come back. 1548 */ 1549 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) 1550 return; 1551 1552 /* Single clk mode, mii mode off(for serdes communication) */ 1553 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1554 1555 phy_write(tbiphy, MII_ADVERTISE, 1556 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1557 ADVERTISE_1000XPSE_ASYM); 1558 1559 phy_write(tbiphy, MII_BMCR, 1560 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | 1561 BMCR_SPEED1000); 1562 } 1563 1564 static void init_registers(struct net_device *dev) 1565 { 1566 struct gfar_private *priv = netdev_priv(dev); 1567 struct gfar __iomem *regs = NULL; 1568 int i; 1569 1570 for (i = 0; i < priv->num_grps; i++) { 1571 regs = priv->gfargrp[i].regs; 1572 /* Clear IEVENT */ 1573 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 1574 1575 /* Initialize IMASK */ 1576 gfar_write(®s->imask, IMASK_INIT_CLEAR); 1577 } 1578 1579 regs = priv->gfargrp[0].regs; 1580 /* Init hash registers to zero */ 1581 gfar_write(®s->igaddr0, 0); 1582 gfar_write(®s->igaddr1, 0); 1583 gfar_write(®s->igaddr2, 0); 1584 gfar_write(®s->igaddr3, 0); 1585 gfar_write(®s->igaddr4, 0); 1586 gfar_write(®s->igaddr5, 0); 1587 gfar_write(®s->igaddr6, 0); 1588 gfar_write(®s->igaddr7, 0); 1589 1590 gfar_write(®s->gaddr0, 0); 1591 gfar_write(®s->gaddr1, 0); 1592 gfar_write(®s->gaddr2, 0); 1593 gfar_write(®s->gaddr3, 0); 1594 gfar_write(®s->gaddr4, 0); 1595 gfar_write(®s->gaddr5, 0); 1596 gfar_write(®s->gaddr6, 0); 1597 gfar_write(®s->gaddr7, 0); 1598 1599 /* Zero out the rmon mib registers if it has them */ 1600 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1601 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); 1602 1603 /* Mask off the CAM interrupts */ 1604 gfar_write(®s->rmon.cam1, 0xffffffff); 1605 gfar_write(®s->rmon.cam2, 0xffffffff); 1606 } 1607 1608 /* Initialize the max receive buffer length */ 1609 gfar_write(®s->mrblr, priv->rx_buffer_size); 1610 1611 /* Initialize the Minimum Frame Length Register */ 1612 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); 1613 } 1614 1615 static int __gfar_is_rx_idle(struct gfar_private *priv) 1616 { 1617 u32 res; 1618 1619 /* Normaly TSEC should not hang on GRS commands, so we should 1620 * actually wait for IEVENT_GRSC flag. 1621 */ 1622 if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) 1623 return 0; 1624 1625 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are 1626 * the same as bits 23-30, the eTSEC Rx is assumed to be idle 1627 * and the Rx can be safely reset. 1628 */ 1629 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); 1630 res &= 0x7f807f80; 1631 if ((res & 0xffff) == (res >> 16)) 1632 return 1; 1633 1634 return 0; 1635 } 1636 1637 /* Halt the receive and transmit queues */ 1638 static void gfar_halt_nodisable(struct net_device *dev) 1639 { 1640 struct gfar_private *priv = netdev_priv(dev); 1641 struct gfar __iomem *regs = NULL; 1642 u32 tempval; 1643 int i; 1644 1645 for (i = 0; i < priv->num_grps; i++) { 1646 regs = priv->gfargrp[i].regs; 1647 /* Mask all interrupts */ 1648 gfar_write(®s->imask, IMASK_INIT_CLEAR); 1649 1650 /* Clear all interrupts */ 1651 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 1652 } 1653 1654 regs = priv->gfargrp[0].regs; 1655 /* Stop the DMA, and wait for it to stop */ 1656 tempval = gfar_read(®s->dmactrl); 1657 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != 1658 (DMACTRL_GRS | DMACTRL_GTS)) { 1659 int ret; 1660 1661 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1662 gfar_write(®s->dmactrl, tempval); 1663 1664 do { 1665 ret = spin_event_timeout(((gfar_read(®s->ievent) & 1666 (IEVENT_GRSC | IEVENT_GTSC)) == 1667 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); 1668 if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) 1669 ret = __gfar_is_rx_idle(priv); 1670 } while (!ret); 1671 } 1672 } 1673 1674 /* Halt the receive and transmit queues */ 1675 void gfar_halt(struct net_device *dev) 1676 { 1677 struct gfar_private *priv = netdev_priv(dev); 1678 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1679 u32 tempval; 1680 1681 gfar_halt_nodisable(dev); 1682 1683 /* Disable Rx and Tx */ 1684 tempval = gfar_read(®s->maccfg1); 1685 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1686 gfar_write(®s->maccfg1, tempval); 1687 } 1688 1689 static void free_grp_irqs(struct gfar_priv_grp *grp) 1690 { 1691 free_irq(gfar_irq(grp, TX)->irq, grp); 1692 free_irq(gfar_irq(grp, RX)->irq, grp); 1693 free_irq(gfar_irq(grp, ER)->irq, grp); 1694 } 1695 1696 void stop_gfar(struct net_device *dev) 1697 { 1698 struct gfar_private *priv = netdev_priv(dev); 1699 unsigned long flags; 1700 int i; 1701 1702 phy_stop(priv->phydev); 1703 1704 1705 /* Lock it down */ 1706 local_irq_save(flags); 1707 lock_tx_qs(priv); 1708 lock_rx_qs(priv); 1709 1710 gfar_halt(dev); 1711 1712 unlock_rx_qs(priv); 1713 unlock_tx_qs(priv); 1714 local_irq_restore(flags); 1715 1716 /* Free the IRQs */ 1717 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1718 for (i = 0; i < priv->num_grps; i++) 1719 free_grp_irqs(&priv->gfargrp[i]); 1720 } else { 1721 for (i = 0; i < priv->num_grps; i++) 1722 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, 1723 &priv->gfargrp[i]); 1724 } 1725 1726 free_skb_resources(priv); 1727 } 1728 1729 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) 1730 { 1731 struct txbd8 *txbdp; 1732 struct gfar_private *priv = netdev_priv(tx_queue->dev); 1733 int i, j; 1734 1735 txbdp = tx_queue->tx_bd_base; 1736 1737 for (i = 0; i < tx_queue->tx_ring_size; i++) { 1738 if (!tx_queue->tx_skbuff[i]) 1739 continue; 1740 1741 dma_unmap_single(priv->dev, txbdp->bufPtr, 1742 txbdp->length, DMA_TO_DEVICE); 1743 txbdp->lstatus = 0; 1744 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; 1745 j++) { 1746 txbdp++; 1747 dma_unmap_page(priv->dev, txbdp->bufPtr, 1748 txbdp->length, DMA_TO_DEVICE); 1749 } 1750 txbdp++; 1751 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); 1752 tx_queue->tx_skbuff[i] = NULL; 1753 } 1754 kfree(tx_queue->tx_skbuff); 1755 tx_queue->tx_skbuff = NULL; 1756 } 1757 1758 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) 1759 { 1760 struct rxbd8 *rxbdp; 1761 struct gfar_private *priv = netdev_priv(rx_queue->dev); 1762 int i; 1763 1764 rxbdp = rx_queue->rx_bd_base; 1765 1766 for (i = 0; i < rx_queue->rx_ring_size; i++) { 1767 if (rx_queue->rx_skbuff[i]) { 1768 dma_unmap_single(priv->dev, rxbdp->bufPtr, 1769 priv->rx_buffer_size, 1770 DMA_FROM_DEVICE); 1771 dev_kfree_skb_any(rx_queue->rx_skbuff[i]); 1772 rx_queue->rx_skbuff[i] = NULL; 1773 } 1774 rxbdp->lstatus = 0; 1775 rxbdp->bufPtr = 0; 1776 rxbdp++; 1777 } 1778 kfree(rx_queue->rx_skbuff); 1779 rx_queue->rx_skbuff = NULL; 1780 } 1781 1782 /* If there are any tx skbs or rx skbs still around, free them. 1783 * Then free tx_skbuff and rx_skbuff 1784 */ 1785 static void free_skb_resources(struct gfar_private *priv) 1786 { 1787 struct gfar_priv_tx_q *tx_queue = NULL; 1788 struct gfar_priv_rx_q *rx_queue = NULL; 1789 int i; 1790 1791 /* Go through all the buffer descriptors and free their data buffers */ 1792 for (i = 0; i < priv->num_tx_queues; i++) { 1793 struct netdev_queue *txq; 1794 1795 tx_queue = priv->tx_queue[i]; 1796 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); 1797 if (tx_queue->tx_skbuff) 1798 free_skb_tx_queue(tx_queue); 1799 netdev_tx_reset_queue(txq); 1800 } 1801 1802 for (i = 0; i < priv->num_rx_queues; i++) { 1803 rx_queue = priv->rx_queue[i]; 1804 if (rx_queue->rx_skbuff) 1805 free_skb_rx_queue(rx_queue); 1806 } 1807 1808 dma_free_coherent(priv->dev, 1809 sizeof(struct txbd8) * priv->total_tx_ring_size + 1810 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1811 priv->tx_queue[0]->tx_bd_base, 1812 priv->tx_queue[0]->tx_bd_dma_base); 1813 } 1814 1815 void gfar_start(struct net_device *dev) 1816 { 1817 struct gfar_private *priv = netdev_priv(dev); 1818 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1819 u32 tempval; 1820 int i = 0; 1821 1822 /* Enable Rx and Tx in MACCFG1 */ 1823 tempval = gfar_read(®s->maccfg1); 1824 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 1825 gfar_write(®s->maccfg1, tempval); 1826 1827 /* Initialize DMACTRL to have WWR and WOP */ 1828 tempval = gfar_read(®s->dmactrl); 1829 tempval |= DMACTRL_INIT_SETTINGS; 1830 gfar_write(®s->dmactrl, tempval); 1831 1832 /* Make sure we aren't stopped */ 1833 tempval = gfar_read(®s->dmactrl); 1834 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1835 gfar_write(®s->dmactrl, tempval); 1836 1837 for (i = 0; i < priv->num_grps; i++) { 1838 regs = priv->gfargrp[i].regs; 1839 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1840 gfar_write(®s->tstat, priv->gfargrp[i].tstat); 1841 gfar_write(®s->rstat, priv->gfargrp[i].rstat); 1842 /* Unmask the interrupts we look for */ 1843 gfar_write(®s->imask, IMASK_DEFAULT); 1844 } 1845 1846 dev->trans_start = jiffies; /* prevent tx timeout */ 1847 } 1848 1849 static void gfar_configure_coalescing(struct gfar_private *priv, 1850 unsigned long tx_mask, unsigned long rx_mask) 1851 { 1852 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1853 u32 __iomem *baddr; 1854 1855 if (priv->mode == MQ_MG_MODE) { 1856 int i = 0; 1857 1858 baddr = ®s->txic0; 1859 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { 1860 gfar_write(baddr + i, 0); 1861 if (likely(priv->tx_queue[i]->txcoalescing)) 1862 gfar_write(baddr + i, priv->tx_queue[i]->txic); 1863 } 1864 1865 baddr = ®s->rxic0; 1866 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { 1867 gfar_write(baddr + i, 0); 1868 if (likely(priv->rx_queue[i]->rxcoalescing)) 1869 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 1870 } 1871 } else { 1872 /* Backward compatible case -- even if we enable 1873 * multiple queues, there's only single reg to program 1874 */ 1875 gfar_write(®s->txic, 0); 1876 if (likely(priv->tx_queue[0]->txcoalescing)) 1877 gfar_write(®s->txic, priv->tx_queue[0]->txic); 1878 1879 gfar_write(®s->rxic, 0); 1880 if (unlikely(priv->rx_queue[0]->rxcoalescing)) 1881 gfar_write(®s->rxic, priv->rx_queue[0]->rxic); 1882 } 1883 } 1884 1885 void gfar_configure_coalescing_all(struct gfar_private *priv) 1886 { 1887 gfar_configure_coalescing(priv, 0xFF, 0xFF); 1888 } 1889 1890 static int register_grp_irqs(struct gfar_priv_grp *grp) 1891 { 1892 struct gfar_private *priv = grp->priv; 1893 struct net_device *dev = priv->ndev; 1894 int err; 1895 1896 /* If the device has multiple interrupts, register for 1897 * them. Otherwise, only register for the one 1898 */ 1899 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1900 /* Install our interrupt handlers for Error, 1901 * Transmit, and Receive 1902 */ 1903 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, 1904 gfar_irq(grp, ER)->name, grp); 1905 if (err < 0) { 1906 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1907 gfar_irq(grp, ER)->irq); 1908 1909 goto err_irq_fail; 1910 } 1911 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, 1912 gfar_irq(grp, TX)->name, grp); 1913 if (err < 0) { 1914 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1915 gfar_irq(grp, TX)->irq); 1916 goto tx_irq_fail; 1917 } 1918 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, 1919 gfar_irq(grp, RX)->name, grp); 1920 if (err < 0) { 1921 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1922 gfar_irq(grp, RX)->irq); 1923 goto rx_irq_fail; 1924 } 1925 } else { 1926 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, 1927 gfar_irq(grp, TX)->name, grp); 1928 if (err < 0) { 1929 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1930 gfar_irq(grp, TX)->irq); 1931 goto err_irq_fail; 1932 } 1933 } 1934 1935 return 0; 1936 1937 rx_irq_fail: 1938 free_irq(gfar_irq(grp, TX)->irq, grp); 1939 tx_irq_fail: 1940 free_irq(gfar_irq(grp, ER)->irq, grp); 1941 err_irq_fail: 1942 return err; 1943 1944 } 1945 1946 /* Bring the controller up and running */ 1947 int startup_gfar(struct net_device *ndev) 1948 { 1949 struct gfar_private *priv = netdev_priv(ndev); 1950 struct gfar __iomem *regs = NULL; 1951 int err, i, j; 1952 1953 for (i = 0; i < priv->num_grps; i++) { 1954 regs= priv->gfargrp[i].regs; 1955 gfar_write(®s->imask, IMASK_INIT_CLEAR); 1956 } 1957 1958 regs= priv->gfargrp[0].regs; 1959 err = gfar_alloc_skb_resources(ndev); 1960 if (err) 1961 return err; 1962 1963 gfar_init_mac(ndev); 1964 1965 for (i = 0; i < priv->num_grps; i++) { 1966 err = register_grp_irqs(&priv->gfargrp[i]); 1967 if (err) { 1968 for (j = 0; j < i; j++) 1969 free_grp_irqs(&priv->gfargrp[j]); 1970 goto irq_fail; 1971 } 1972 } 1973 1974 /* Start the controller */ 1975 gfar_start(ndev); 1976 1977 phy_start(priv->phydev); 1978 1979 gfar_configure_coalescing_all(priv); 1980 1981 return 0; 1982 1983 irq_fail: 1984 free_skb_resources(priv); 1985 return err; 1986 } 1987 1988 /* Called when something needs to use the ethernet device 1989 * Returns 0 for success. 1990 */ 1991 static int gfar_enet_open(struct net_device *dev) 1992 { 1993 struct gfar_private *priv = netdev_priv(dev); 1994 int err; 1995 1996 enable_napi(priv); 1997 1998 /* Initialize a bunch of registers */ 1999 init_registers(dev); 2000 2001 gfar_set_mac_address(dev); 2002 2003 err = init_phy(dev); 2004 2005 if (err) { 2006 disable_napi(priv); 2007 return err; 2008 } 2009 2010 err = startup_gfar(dev); 2011 if (err) { 2012 disable_napi(priv); 2013 return err; 2014 } 2015 2016 netif_tx_start_all_queues(dev); 2017 2018 device_set_wakeup_enable(&dev->dev, priv->wol_en); 2019 2020 return err; 2021 } 2022 2023 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) 2024 { 2025 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); 2026 2027 memset(fcb, 0, GMAC_FCB_LEN); 2028 2029 return fcb; 2030 } 2031 2032 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, 2033 int fcb_length) 2034 { 2035 /* If we're here, it's a IP packet with a TCP or UDP 2036 * payload. We set it to checksum, using a pseudo-header 2037 * we provide 2038 */ 2039 u8 flags = TXFCB_DEFAULT; 2040 2041 /* Tell the controller what the protocol is 2042 * And provide the already calculated phcs 2043 */ 2044 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 2045 flags |= TXFCB_UDP; 2046 fcb->phcs = udp_hdr(skb)->check; 2047 } else 2048 fcb->phcs = tcp_hdr(skb)->check; 2049 2050 /* l3os is the distance between the start of the 2051 * frame (skb->data) and the start of the IP hdr. 2052 * l4os is the distance between the start of the 2053 * l3 hdr and the l4 hdr 2054 */ 2055 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); 2056 fcb->l4os = skb_network_header_len(skb); 2057 2058 fcb->flags = flags; 2059 } 2060 2061 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 2062 { 2063 fcb->flags |= TXFCB_VLN; 2064 fcb->vlctl = vlan_tx_tag_get(skb); 2065 } 2066 2067 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, 2068 struct txbd8 *base, int ring_size) 2069 { 2070 struct txbd8 *new_bd = bdp + stride; 2071 2072 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; 2073 } 2074 2075 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, 2076 int ring_size) 2077 { 2078 return skip_txbd(bdp, 1, base, ring_size); 2079 } 2080 2081 /* eTSEC12: csum generation not supported for some fcb offsets */ 2082 static inline bool gfar_csum_errata_12(struct gfar_private *priv, 2083 unsigned long fcb_addr) 2084 { 2085 return (gfar_has_errata(priv, GFAR_ERRATA_12) && 2086 (fcb_addr % 0x20) > 0x18); 2087 } 2088 2089 /* eTSEC76: csum generation for frames larger than 2500 may 2090 * cause excess delays before start of transmission 2091 */ 2092 static inline bool gfar_csum_errata_76(struct gfar_private *priv, 2093 unsigned int len) 2094 { 2095 return (gfar_has_errata(priv, GFAR_ERRATA_76) && 2096 (len > 2500)); 2097 } 2098 2099 /* This is called by the kernel when a frame is ready for transmission. 2100 * It is pointed to by the dev->hard_start_xmit function pointer 2101 */ 2102 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 2103 { 2104 struct gfar_private *priv = netdev_priv(dev); 2105 struct gfar_priv_tx_q *tx_queue = NULL; 2106 struct netdev_queue *txq; 2107 struct gfar __iomem *regs = NULL; 2108 struct txfcb *fcb = NULL; 2109 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; 2110 u32 lstatus; 2111 int i, rq = 0; 2112 int do_tstamp, do_csum, do_vlan; 2113 u32 bufaddr; 2114 unsigned long flags; 2115 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; 2116 2117 rq = skb->queue_mapping; 2118 tx_queue = priv->tx_queue[rq]; 2119 txq = netdev_get_tx_queue(dev, rq); 2120 base = tx_queue->tx_bd_base; 2121 regs = tx_queue->grp->regs; 2122 2123 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); 2124 do_vlan = vlan_tx_tag_present(skb); 2125 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 2126 priv->hwts_tx_en; 2127 2128 if (do_csum || do_vlan) 2129 fcb_len = GMAC_FCB_LEN; 2130 2131 /* check if time stamp should be generated */ 2132 if (unlikely(do_tstamp)) 2133 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; 2134 2135 /* make space for additional header when fcb is needed */ 2136 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { 2137 struct sk_buff *skb_new; 2138 2139 skb_new = skb_realloc_headroom(skb, fcb_len); 2140 if (!skb_new) { 2141 dev->stats.tx_errors++; 2142 kfree_skb(skb); 2143 return NETDEV_TX_OK; 2144 } 2145 2146 if (skb->sk) 2147 skb_set_owner_w(skb_new, skb->sk); 2148 consume_skb(skb); 2149 skb = skb_new; 2150 } 2151 2152 /* total number of fragments in the SKB */ 2153 nr_frags = skb_shinfo(skb)->nr_frags; 2154 2155 /* calculate the required number of TxBDs for this skb */ 2156 if (unlikely(do_tstamp)) 2157 nr_txbds = nr_frags + 2; 2158 else 2159 nr_txbds = nr_frags + 1; 2160 2161 /* check if there is space to queue this packet */ 2162 if (nr_txbds > tx_queue->num_txbdfree) { 2163 /* no space, stop the queue */ 2164 netif_tx_stop_queue(txq); 2165 dev->stats.tx_fifo_errors++; 2166 return NETDEV_TX_BUSY; 2167 } 2168 2169 /* Update transmit stats */ 2170 bytes_sent = skb->len; 2171 tx_queue->stats.tx_bytes += bytes_sent; 2172 /* keep Tx bytes on wire for BQL accounting */ 2173 GFAR_CB(skb)->bytes_sent = bytes_sent; 2174 tx_queue->stats.tx_packets++; 2175 2176 txbdp = txbdp_start = tx_queue->cur_tx; 2177 lstatus = txbdp->lstatus; 2178 2179 /* Time stamp insertion requires one additional TxBD */ 2180 if (unlikely(do_tstamp)) 2181 txbdp_tstamp = txbdp = next_txbd(txbdp, base, 2182 tx_queue->tx_ring_size); 2183 2184 if (nr_frags == 0) { 2185 if (unlikely(do_tstamp)) 2186 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | 2187 TXBD_INTERRUPT); 2188 else 2189 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2190 } else { 2191 /* Place the fragment addresses and lengths into the TxBDs */ 2192 for (i = 0; i < nr_frags; i++) { 2193 unsigned int frag_len; 2194 /* Point at the next BD, wrapping as needed */ 2195 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2196 2197 frag_len = skb_shinfo(skb)->frags[i].size; 2198 2199 lstatus = txbdp->lstatus | frag_len | 2200 BD_LFLAG(TXBD_READY); 2201 2202 /* Handle the last BD specially */ 2203 if (i == nr_frags - 1) 2204 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2205 2206 bufaddr = skb_frag_dma_map(priv->dev, 2207 &skb_shinfo(skb)->frags[i], 2208 0, 2209 frag_len, 2210 DMA_TO_DEVICE); 2211 2212 /* set the TxBD length and buffer pointer */ 2213 txbdp->bufPtr = bufaddr; 2214 txbdp->lstatus = lstatus; 2215 } 2216 2217 lstatus = txbdp_start->lstatus; 2218 } 2219 2220 /* Add TxPAL between FCB and frame if required */ 2221 if (unlikely(do_tstamp)) { 2222 skb_push(skb, GMAC_TXPAL_LEN); 2223 memset(skb->data, 0, GMAC_TXPAL_LEN); 2224 } 2225 2226 /* Add TxFCB if required */ 2227 if (fcb_len) { 2228 fcb = gfar_add_fcb(skb); 2229 lstatus |= BD_LFLAG(TXBD_TOE); 2230 } 2231 2232 /* Set up checksumming */ 2233 if (do_csum) { 2234 gfar_tx_checksum(skb, fcb, fcb_len); 2235 2236 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || 2237 unlikely(gfar_csum_errata_76(priv, skb->len))) { 2238 __skb_pull(skb, GMAC_FCB_LEN); 2239 skb_checksum_help(skb); 2240 if (do_vlan || do_tstamp) { 2241 /* put back a new fcb for vlan/tstamp TOE */ 2242 fcb = gfar_add_fcb(skb); 2243 } else { 2244 /* Tx TOE not used */ 2245 lstatus &= ~(BD_LFLAG(TXBD_TOE)); 2246 fcb = NULL; 2247 } 2248 } 2249 } 2250 2251 if (do_vlan) 2252 gfar_tx_vlan(skb, fcb); 2253 2254 /* Setup tx hardware time stamping if requested */ 2255 if (unlikely(do_tstamp)) { 2256 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2257 fcb->ptp = 1; 2258 } 2259 2260 txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data, 2261 skb_headlen(skb), DMA_TO_DEVICE); 2262 2263 /* If time stamping is requested one additional TxBD must be set up. The 2264 * first TxBD points to the FCB and must have a data length of 2265 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with 2266 * the full frame length. 2267 */ 2268 if (unlikely(do_tstamp)) { 2269 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len; 2270 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | 2271 (skb_headlen(skb) - fcb_len); 2272 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; 2273 } else { 2274 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2275 } 2276 2277 netdev_tx_sent_queue(txq, bytes_sent); 2278 2279 /* We can work in parallel with gfar_clean_tx_ring(), except 2280 * when modifying num_txbdfree. Note that we didn't grab the lock 2281 * when we were reading the num_txbdfree and checking for available 2282 * space, that's because outside of this function it can only grow, 2283 * and once we've got needed space, it cannot suddenly disappear. 2284 * 2285 * The lock also protects us from gfar_error(), which can modify 2286 * regs->tstat and thus retrigger the transfers, which is why we 2287 * also must grab the lock before setting ready bit for the first 2288 * to be transmitted BD. 2289 */ 2290 spin_lock_irqsave(&tx_queue->txlock, flags); 2291 2292 /* The powerpc-specific eieio() is used, as wmb() has too strong 2293 * semantics (it requires synchronization between cacheable and 2294 * uncacheable mappings, which eieio doesn't provide and which we 2295 * don't need), thus requiring a more expensive sync instruction. At 2296 * some point, the set of architecture-independent barrier functions 2297 * should be expanded to include weaker barriers. 2298 */ 2299 eieio(); 2300 2301 txbdp_start->lstatus = lstatus; 2302 2303 eieio(); /* force lstatus write before tx_skbuff */ 2304 2305 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; 2306 2307 /* Update the current skb pointer to the next entry we will use 2308 * (wrapping if necessary) 2309 */ 2310 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & 2311 TX_RING_MOD_MASK(tx_queue->tx_ring_size); 2312 2313 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2314 2315 /* reduce TxBD free count */ 2316 tx_queue->num_txbdfree -= (nr_txbds); 2317 2318 /* If the next BD still needs to be cleaned up, then the bds 2319 * are full. We need to tell the kernel to stop sending us stuff. 2320 */ 2321 if (!tx_queue->num_txbdfree) { 2322 netif_tx_stop_queue(txq); 2323 2324 dev->stats.tx_fifo_errors++; 2325 } 2326 2327 /* Tell the DMA to go go go */ 2328 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); 2329 2330 /* Unlock priv */ 2331 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2332 2333 return NETDEV_TX_OK; 2334 } 2335 2336 /* Stops the kernel queue, and halts the controller */ 2337 static int gfar_close(struct net_device *dev) 2338 { 2339 struct gfar_private *priv = netdev_priv(dev); 2340 2341 disable_napi(priv); 2342 2343 cancel_work_sync(&priv->reset_task); 2344 stop_gfar(dev); 2345 2346 /* Disconnect from the PHY */ 2347 phy_disconnect(priv->phydev); 2348 priv->phydev = NULL; 2349 2350 netif_tx_stop_all_queues(dev); 2351 2352 return 0; 2353 } 2354 2355 /* Changes the mac address if the controller is not running. */ 2356 static int gfar_set_mac_address(struct net_device *dev) 2357 { 2358 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 2359 2360 return 0; 2361 } 2362 2363 /* Check if rx parser should be activated */ 2364 void gfar_check_rx_parser_mode(struct gfar_private *priv) 2365 { 2366 struct gfar __iomem *regs; 2367 u32 tempval; 2368 2369 regs = priv->gfargrp[0].regs; 2370 2371 tempval = gfar_read(®s->rctrl); 2372 /* If parse is no longer required, then disable parser */ 2373 if (tempval & RCTRL_REQ_PARSER) { 2374 tempval |= RCTRL_PRSDEP_INIT; 2375 priv->uses_rxfcb = 1; 2376 } else { 2377 tempval &= ~RCTRL_PRSDEP_INIT; 2378 priv->uses_rxfcb = 0; 2379 } 2380 gfar_write(®s->rctrl, tempval); 2381 } 2382 2383 /* Enables and disables VLAN insertion/extraction */ 2384 void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) 2385 { 2386 struct gfar_private *priv = netdev_priv(dev); 2387 struct gfar __iomem *regs = NULL; 2388 unsigned long flags; 2389 u32 tempval; 2390 2391 regs = priv->gfargrp[0].regs; 2392 local_irq_save(flags); 2393 lock_rx_qs(priv); 2394 2395 if (features & NETIF_F_HW_VLAN_CTAG_TX) { 2396 /* Enable VLAN tag insertion */ 2397 tempval = gfar_read(®s->tctrl); 2398 tempval |= TCTRL_VLINS; 2399 gfar_write(®s->tctrl, tempval); 2400 } else { 2401 /* Disable VLAN tag insertion */ 2402 tempval = gfar_read(®s->tctrl); 2403 tempval &= ~TCTRL_VLINS; 2404 gfar_write(®s->tctrl, tempval); 2405 } 2406 2407 if (features & NETIF_F_HW_VLAN_CTAG_RX) { 2408 /* Enable VLAN tag extraction */ 2409 tempval = gfar_read(®s->rctrl); 2410 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 2411 gfar_write(®s->rctrl, tempval); 2412 priv->uses_rxfcb = 1; 2413 } else { 2414 /* Disable VLAN tag extraction */ 2415 tempval = gfar_read(®s->rctrl); 2416 tempval &= ~RCTRL_VLEX; 2417 gfar_write(®s->rctrl, tempval); 2418 2419 gfar_check_rx_parser_mode(priv); 2420 } 2421 2422 gfar_change_mtu(dev, dev->mtu); 2423 2424 unlock_rx_qs(priv); 2425 local_irq_restore(flags); 2426 } 2427 2428 static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2429 { 2430 int tempsize, tempval; 2431 struct gfar_private *priv = netdev_priv(dev); 2432 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2433 int oldsize = priv->rx_buffer_size; 2434 int frame_size = new_mtu + ETH_HLEN; 2435 2436 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 2437 netif_err(priv, drv, dev, "Invalid MTU setting\n"); 2438 return -EINVAL; 2439 } 2440 2441 if (priv->uses_rxfcb) 2442 frame_size += GMAC_FCB_LEN; 2443 2444 frame_size += priv->padding; 2445 2446 tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 2447 INCREMENTAL_BUFFER_SIZE; 2448 2449 /* Only stop and start the controller if it isn't already 2450 * stopped, and we changed something 2451 */ 2452 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2453 stop_gfar(dev); 2454 2455 priv->rx_buffer_size = tempsize; 2456 2457 dev->mtu = new_mtu; 2458 2459 gfar_write(®s->mrblr, priv->rx_buffer_size); 2460 gfar_write(®s->maxfrm, priv->rx_buffer_size); 2461 2462 /* If the mtu is larger than the max size for standard 2463 * ethernet frames (ie, a jumbo frame), then set maccfg2 2464 * to allow huge frames, and to check the length 2465 */ 2466 tempval = gfar_read(®s->maccfg2); 2467 2468 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || 2469 gfar_has_errata(priv, GFAR_ERRATA_74)) 2470 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2471 else 2472 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2473 2474 gfar_write(®s->maccfg2, tempval); 2475 2476 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2477 startup_gfar(dev); 2478 2479 return 0; 2480 } 2481 2482 /* gfar_reset_task gets scheduled when a packet has not been 2483 * transmitted after a set amount of time. 2484 * For now, assume that clearing out all the structures, and 2485 * starting over will fix the problem. 2486 */ 2487 static void gfar_reset_task(struct work_struct *work) 2488 { 2489 struct gfar_private *priv = container_of(work, struct gfar_private, 2490 reset_task); 2491 struct net_device *dev = priv->ndev; 2492 2493 if (dev->flags & IFF_UP) { 2494 netif_tx_stop_all_queues(dev); 2495 stop_gfar(dev); 2496 startup_gfar(dev); 2497 netif_tx_start_all_queues(dev); 2498 } 2499 2500 netif_tx_schedule_all(dev); 2501 } 2502 2503 static void gfar_timeout(struct net_device *dev) 2504 { 2505 struct gfar_private *priv = netdev_priv(dev); 2506 2507 dev->stats.tx_errors++; 2508 schedule_work(&priv->reset_task); 2509 } 2510 2511 static void gfar_align_skb(struct sk_buff *skb) 2512 { 2513 /* We need the data buffer to be aligned properly. We will reserve 2514 * as many bytes as needed to align the data properly 2515 */ 2516 skb_reserve(skb, RXBUF_ALIGNMENT - 2517 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); 2518 } 2519 2520 /* Interrupt Handler for Transmit complete */ 2521 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) 2522 { 2523 struct net_device *dev = tx_queue->dev; 2524 struct netdev_queue *txq; 2525 struct gfar_private *priv = netdev_priv(dev); 2526 struct txbd8 *bdp, *next = NULL; 2527 struct txbd8 *lbdp = NULL; 2528 struct txbd8 *base = tx_queue->tx_bd_base; 2529 struct sk_buff *skb; 2530 int skb_dirtytx; 2531 int tx_ring_size = tx_queue->tx_ring_size; 2532 int frags = 0, nr_txbds = 0; 2533 int i; 2534 int howmany = 0; 2535 int tqi = tx_queue->qindex; 2536 unsigned int bytes_sent = 0; 2537 u32 lstatus; 2538 size_t buflen; 2539 2540 txq = netdev_get_tx_queue(dev, tqi); 2541 bdp = tx_queue->dirty_tx; 2542 skb_dirtytx = tx_queue->skb_dirtytx; 2543 2544 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { 2545 unsigned long flags; 2546 2547 frags = skb_shinfo(skb)->nr_frags; 2548 2549 /* When time stamping, one additional TxBD must be freed. 2550 * Also, we need to dma_unmap_single() the TxPAL. 2551 */ 2552 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 2553 nr_txbds = frags + 2; 2554 else 2555 nr_txbds = frags + 1; 2556 2557 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); 2558 2559 lstatus = lbdp->lstatus; 2560 2561 /* Only clean completed frames */ 2562 if ((lstatus & BD_LFLAG(TXBD_READY)) && 2563 (lstatus & BD_LENGTH_MASK)) 2564 break; 2565 2566 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2567 next = next_txbd(bdp, base, tx_ring_size); 2568 buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN; 2569 } else 2570 buflen = bdp->length; 2571 2572 dma_unmap_single(priv->dev, bdp->bufPtr, 2573 buflen, DMA_TO_DEVICE); 2574 2575 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2576 struct skb_shared_hwtstamps shhwtstamps; 2577 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); 2578 2579 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 2580 shhwtstamps.hwtstamp = ns_to_ktime(*ns); 2581 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); 2582 skb_tstamp_tx(skb, &shhwtstamps); 2583 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2584 bdp = next; 2585 } 2586 2587 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2588 bdp = next_txbd(bdp, base, tx_ring_size); 2589 2590 for (i = 0; i < frags; i++) { 2591 dma_unmap_page(priv->dev, bdp->bufPtr, 2592 bdp->length, DMA_TO_DEVICE); 2593 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2594 bdp = next_txbd(bdp, base, tx_ring_size); 2595 } 2596 2597 bytes_sent += GFAR_CB(skb)->bytes_sent; 2598 2599 dev_kfree_skb_any(skb); 2600 2601 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2602 2603 skb_dirtytx = (skb_dirtytx + 1) & 2604 TX_RING_MOD_MASK(tx_ring_size); 2605 2606 howmany++; 2607 spin_lock_irqsave(&tx_queue->txlock, flags); 2608 tx_queue->num_txbdfree += nr_txbds; 2609 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2610 } 2611 2612 /* If we freed a buffer, we can restart transmission, if necessary */ 2613 if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree) 2614 netif_wake_subqueue(dev, tqi); 2615 2616 /* Update dirty indicators */ 2617 tx_queue->skb_dirtytx = skb_dirtytx; 2618 tx_queue->dirty_tx = bdp; 2619 2620 netdev_tx_completed_queue(txq, howmany, bytes_sent); 2621 } 2622 2623 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) 2624 { 2625 unsigned long flags; 2626 2627 spin_lock_irqsave(&gfargrp->grplock, flags); 2628 if (napi_schedule_prep(&gfargrp->napi)) { 2629 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); 2630 __napi_schedule(&gfargrp->napi); 2631 } else { 2632 /* Clear IEVENT, so interrupts aren't called again 2633 * because of the packets that have already arrived. 2634 */ 2635 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); 2636 } 2637 spin_unlock_irqrestore(&gfargrp->grplock, flags); 2638 2639 } 2640 2641 /* Interrupt Handler for Transmit complete */ 2642 static irqreturn_t gfar_transmit(int irq, void *grp_id) 2643 { 2644 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2645 return IRQ_HANDLED; 2646 } 2647 2648 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 2649 struct sk_buff *skb) 2650 { 2651 struct net_device *dev = rx_queue->dev; 2652 struct gfar_private *priv = netdev_priv(dev); 2653 dma_addr_t buf; 2654 2655 buf = dma_map_single(priv->dev, skb->data, 2656 priv->rx_buffer_size, DMA_FROM_DEVICE); 2657 gfar_init_rxbdp(rx_queue, bdp, buf); 2658 } 2659 2660 static struct sk_buff *gfar_alloc_skb(struct net_device *dev) 2661 { 2662 struct gfar_private *priv = netdev_priv(dev); 2663 struct sk_buff *skb; 2664 2665 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); 2666 if (!skb) 2667 return NULL; 2668 2669 gfar_align_skb(skb); 2670 2671 return skb; 2672 } 2673 2674 struct sk_buff *gfar_new_skb(struct net_device *dev) 2675 { 2676 return gfar_alloc_skb(dev); 2677 } 2678 2679 static inline void count_errors(unsigned short status, struct net_device *dev) 2680 { 2681 struct gfar_private *priv = netdev_priv(dev); 2682 struct net_device_stats *stats = &dev->stats; 2683 struct gfar_extra_stats *estats = &priv->extra_stats; 2684 2685 /* If the packet was truncated, none of the other errors matter */ 2686 if (status & RXBD_TRUNCATED) { 2687 stats->rx_length_errors++; 2688 2689 atomic64_inc(&estats->rx_trunc); 2690 2691 return; 2692 } 2693 /* Count the errors, if there were any */ 2694 if (status & (RXBD_LARGE | RXBD_SHORT)) { 2695 stats->rx_length_errors++; 2696 2697 if (status & RXBD_LARGE) 2698 atomic64_inc(&estats->rx_large); 2699 else 2700 atomic64_inc(&estats->rx_short); 2701 } 2702 if (status & RXBD_NONOCTET) { 2703 stats->rx_frame_errors++; 2704 atomic64_inc(&estats->rx_nonoctet); 2705 } 2706 if (status & RXBD_CRCERR) { 2707 atomic64_inc(&estats->rx_crcerr); 2708 stats->rx_crc_errors++; 2709 } 2710 if (status & RXBD_OVERRUN) { 2711 atomic64_inc(&estats->rx_overrun); 2712 stats->rx_crc_errors++; 2713 } 2714 } 2715 2716 irqreturn_t gfar_receive(int irq, void *grp_id) 2717 { 2718 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2719 return IRQ_HANDLED; 2720 } 2721 2722 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 2723 { 2724 /* If valid headers were found, and valid sums 2725 * were verified, then we tell the kernel that no 2726 * checksumming is necessary. Otherwise, it is [FIXME] 2727 */ 2728 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 2729 skb->ip_summed = CHECKSUM_UNNECESSARY; 2730 else 2731 skb_checksum_none_assert(skb); 2732 } 2733 2734 2735 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ 2736 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 2737 int amount_pull, struct napi_struct *napi) 2738 { 2739 struct gfar_private *priv = netdev_priv(dev); 2740 struct rxfcb *fcb = NULL; 2741 2742 /* fcb is at the beginning if exists */ 2743 fcb = (struct rxfcb *)skb->data; 2744 2745 /* Remove the FCB from the skb 2746 * Remove the padded bytes, if there are any 2747 */ 2748 if (amount_pull) { 2749 skb_record_rx_queue(skb, fcb->rq); 2750 skb_pull(skb, amount_pull); 2751 } 2752 2753 /* Get receive timestamp from the skb */ 2754 if (priv->hwts_rx_en) { 2755 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 2756 u64 *ns = (u64 *) skb->data; 2757 2758 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 2759 shhwtstamps->hwtstamp = ns_to_ktime(*ns); 2760 } 2761 2762 if (priv->padding) 2763 skb_pull(skb, priv->padding); 2764 2765 if (dev->features & NETIF_F_RXCSUM) 2766 gfar_rx_checksum(skb, fcb); 2767 2768 /* Tell the skb what kind of packet this is */ 2769 skb->protocol = eth_type_trans(skb, dev); 2770 2771 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. 2772 * Even if vlan rx accel is disabled, on some chips 2773 * RXFCB_VLN is pseudo randomly set. 2774 */ 2775 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && 2776 fcb->flags & RXFCB_VLN) 2777 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl); 2778 2779 /* Send the packet up the stack */ 2780 napi_gro_receive(napi, skb); 2781 2782 } 2783 2784 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring 2785 * until the budget/quota has been reached. Returns the number 2786 * of frames handled 2787 */ 2788 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) 2789 { 2790 struct net_device *dev = rx_queue->dev; 2791 struct rxbd8 *bdp, *base; 2792 struct sk_buff *skb; 2793 int pkt_len; 2794 int amount_pull; 2795 int howmany = 0; 2796 struct gfar_private *priv = netdev_priv(dev); 2797 2798 /* Get the first full descriptor */ 2799 bdp = rx_queue->cur_rx; 2800 base = rx_queue->rx_bd_base; 2801 2802 amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; 2803 2804 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 2805 struct sk_buff *newskb; 2806 2807 rmb(); 2808 2809 /* Add another skb for the future */ 2810 newskb = gfar_new_skb(dev); 2811 2812 skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; 2813 2814 dma_unmap_single(priv->dev, bdp->bufPtr, 2815 priv->rx_buffer_size, DMA_FROM_DEVICE); 2816 2817 if (unlikely(!(bdp->status & RXBD_ERR) && 2818 bdp->length > priv->rx_buffer_size)) 2819 bdp->status = RXBD_LARGE; 2820 2821 /* We drop the frame if we failed to allocate a new buffer */ 2822 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || 2823 bdp->status & RXBD_ERR)) { 2824 count_errors(bdp->status, dev); 2825 2826 if (unlikely(!newskb)) 2827 newskb = skb; 2828 else if (skb) 2829 dev_kfree_skb(skb); 2830 } else { 2831 /* Increment the number of packets */ 2832 rx_queue->stats.rx_packets++; 2833 howmany++; 2834 2835 if (likely(skb)) { 2836 pkt_len = bdp->length - ETH_FCS_LEN; 2837 /* Remove the FCS from the packet length */ 2838 skb_put(skb, pkt_len); 2839 rx_queue->stats.rx_bytes += pkt_len; 2840 skb_record_rx_queue(skb, rx_queue->qindex); 2841 gfar_process_frame(dev, skb, amount_pull, 2842 &rx_queue->grp->napi); 2843 2844 } else { 2845 netif_warn(priv, rx_err, dev, "Missing skb!\n"); 2846 rx_queue->stats.rx_dropped++; 2847 atomic64_inc(&priv->extra_stats.rx_skbmissing); 2848 } 2849 2850 } 2851 2852 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; 2853 2854 /* Setup the new bdp */ 2855 gfar_new_rxbdp(rx_queue, bdp, newskb); 2856 2857 /* Update to the next pointer */ 2858 bdp = next_bd(bdp, base, rx_queue->rx_ring_size); 2859 2860 /* update to point at the next skb */ 2861 rx_queue->skb_currx = (rx_queue->skb_currx + 1) & 2862 RX_RING_MOD_MASK(rx_queue->rx_ring_size); 2863 } 2864 2865 /* Update the current rxbd pointer to be the next one */ 2866 rx_queue->cur_rx = bdp; 2867 2868 return howmany; 2869 } 2870 2871 static int gfar_poll_sq(struct napi_struct *napi, int budget) 2872 { 2873 struct gfar_priv_grp *gfargrp = 2874 container_of(napi, struct gfar_priv_grp, napi); 2875 struct gfar __iomem *regs = gfargrp->regs; 2876 struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0]; 2877 struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0]; 2878 int work_done = 0; 2879 2880 /* Clear IEVENT, so interrupts aren't called again 2881 * because of the packets that have already arrived 2882 */ 2883 gfar_write(®s->ievent, IEVENT_RTX_MASK); 2884 2885 /* run Tx cleanup to completion */ 2886 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) 2887 gfar_clean_tx_ring(tx_queue); 2888 2889 work_done = gfar_clean_rx_ring(rx_queue, budget); 2890 2891 if (work_done < budget) { 2892 napi_complete(napi); 2893 /* Clear the halt bit in RSTAT */ 2894 gfar_write(®s->rstat, gfargrp->rstat); 2895 2896 gfar_write(®s->imask, IMASK_DEFAULT); 2897 2898 /* If we are coalescing interrupts, update the timer 2899 * Otherwise, clear it 2900 */ 2901 gfar_write(®s->txic, 0); 2902 if (likely(tx_queue->txcoalescing)) 2903 gfar_write(®s->txic, tx_queue->txic); 2904 2905 gfar_write(®s->rxic, 0); 2906 if (unlikely(rx_queue->rxcoalescing)) 2907 gfar_write(®s->rxic, rx_queue->rxic); 2908 } 2909 2910 return work_done; 2911 } 2912 2913 static int gfar_poll(struct napi_struct *napi, int budget) 2914 { 2915 struct gfar_priv_grp *gfargrp = 2916 container_of(napi, struct gfar_priv_grp, napi); 2917 struct gfar_private *priv = gfargrp->priv; 2918 struct gfar __iomem *regs = gfargrp->regs; 2919 struct gfar_priv_tx_q *tx_queue = NULL; 2920 struct gfar_priv_rx_q *rx_queue = NULL; 2921 int work_done = 0, work_done_per_q = 0; 2922 int i, budget_per_q = 0; 2923 int has_tx_work = 0; 2924 unsigned long rstat_rxf; 2925 int num_act_queues; 2926 2927 /* Clear IEVENT, so interrupts aren't called again 2928 * because of the packets that have already arrived 2929 */ 2930 gfar_write(®s->ievent, IEVENT_RTX_MASK); 2931 2932 rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; 2933 2934 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); 2935 if (num_act_queues) 2936 budget_per_q = budget/num_act_queues; 2937 2938 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { 2939 tx_queue = priv->tx_queue[i]; 2940 /* run Tx cleanup to completion */ 2941 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { 2942 gfar_clean_tx_ring(tx_queue); 2943 has_tx_work = 1; 2944 } 2945 } 2946 2947 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2948 /* skip queue if not active */ 2949 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) 2950 continue; 2951 2952 rx_queue = priv->rx_queue[i]; 2953 work_done_per_q = 2954 gfar_clean_rx_ring(rx_queue, budget_per_q); 2955 work_done += work_done_per_q; 2956 2957 /* finished processing this queue */ 2958 if (work_done_per_q < budget_per_q) { 2959 /* clear active queue hw indication */ 2960 gfar_write(®s->rstat, 2961 RSTAT_CLEAR_RXF0 >> i); 2962 num_act_queues--; 2963 2964 if (!num_act_queues) 2965 break; 2966 } 2967 } 2968 2969 if (!num_act_queues && !has_tx_work) { 2970 2971 napi_complete(napi); 2972 2973 /* Clear the halt bit in RSTAT */ 2974 gfar_write(®s->rstat, gfargrp->rstat); 2975 2976 gfar_write(®s->imask, IMASK_DEFAULT); 2977 2978 /* If we are coalescing interrupts, update the timer 2979 * Otherwise, clear it 2980 */ 2981 gfar_configure_coalescing(priv, gfargrp->rx_bit_map, 2982 gfargrp->tx_bit_map); 2983 } 2984 2985 return work_done; 2986 } 2987 2988 #ifdef CONFIG_NET_POLL_CONTROLLER 2989 /* Polling 'interrupt' - used by things like netconsole to send skbs 2990 * without having to re-enable interrupts. It's not called while 2991 * the interrupt routine is executing. 2992 */ 2993 static void gfar_netpoll(struct net_device *dev) 2994 { 2995 struct gfar_private *priv = netdev_priv(dev); 2996 int i; 2997 2998 /* If the device has multiple interrupts, run tx/rx */ 2999 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 3000 for (i = 0; i < priv->num_grps; i++) { 3001 struct gfar_priv_grp *grp = &priv->gfargrp[i]; 3002 3003 disable_irq(gfar_irq(grp, TX)->irq); 3004 disable_irq(gfar_irq(grp, RX)->irq); 3005 disable_irq(gfar_irq(grp, ER)->irq); 3006 gfar_interrupt(gfar_irq(grp, TX)->irq, grp); 3007 enable_irq(gfar_irq(grp, ER)->irq); 3008 enable_irq(gfar_irq(grp, RX)->irq); 3009 enable_irq(gfar_irq(grp, TX)->irq); 3010 } 3011 } else { 3012 for (i = 0; i < priv->num_grps; i++) { 3013 struct gfar_priv_grp *grp = &priv->gfargrp[i]; 3014 3015 disable_irq(gfar_irq(grp, TX)->irq); 3016 gfar_interrupt(gfar_irq(grp, TX)->irq, grp); 3017 enable_irq(gfar_irq(grp, TX)->irq); 3018 } 3019 } 3020 } 3021 #endif 3022 3023 /* The interrupt handler for devices with one interrupt */ 3024 static irqreturn_t gfar_interrupt(int irq, void *grp_id) 3025 { 3026 struct gfar_priv_grp *gfargrp = grp_id; 3027 3028 /* Save ievent for future reference */ 3029 u32 events = gfar_read(&gfargrp->regs->ievent); 3030 3031 /* Check for reception */ 3032 if (events & IEVENT_RX_MASK) 3033 gfar_receive(irq, grp_id); 3034 3035 /* Check for transmit completion */ 3036 if (events & IEVENT_TX_MASK) 3037 gfar_transmit(irq, grp_id); 3038 3039 /* Check for errors */ 3040 if (events & IEVENT_ERR_MASK) 3041 gfar_error(irq, grp_id); 3042 3043 return IRQ_HANDLED; 3044 } 3045 3046 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) 3047 { 3048 struct phy_device *phydev = priv->phydev; 3049 u32 val = 0; 3050 3051 if (!phydev->duplex) 3052 return val; 3053 3054 if (!priv->pause_aneg_en) { 3055 if (priv->tx_pause_en) 3056 val |= MACCFG1_TX_FLOW; 3057 if (priv->rx_pause_en) 3058 val |= MACCFG1_RX_FLOW; 3059 } else { 3060 u16 lcl_adv, rmt_adv; 3061 u8 flowctrl; 3062 /* get link partner capabilities */ 3063 rmt_adv = 0; 3064 if (phydev->pause) 3065 rmt_adv = LPA_PAUSE_CAP; 3066 if (phydev->asym_pause) 3067 rmt_adv |= LPA_PAUSE_ASYM; 3068 3069 lcl_adv = mii_advertise_flowctrl(phydev->advertising); 3070 3071 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); 3072 if (flowctrl & FLOW_CTRL_TX) 3073 val |= MACCFG1_TX_FLOW; 3074 if (flowctrl & FLOW_CTRL_RX) 3075 val |= MACCFG1_RX_FLOW; 3076 } 3077 3078 return val; 3079 } 3080 3081 /* Called every time the controller might need to be made 3082 * aware of new link state. The PHY code conveys this 3083 * information through variables in the phydev structure, and this 3084 * function converts those variables into the appropriate 3085 * register values, and can bring down the device if needed. 3086 */ 3087 static void adjust_link(struct net_device *dev) 3088 { 3089 struct gfar_private *priv = netdev_priv(dev); 3090 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3091 unsigned long flags; 3092 struct phy_device *phydev = priv->phydev; 3093 int new_state = 0; 3094 3095 local_irq_save(flags); 3096 lock_tx_qs(priv); 3097 3098 if (phydev->link) { 3099 u32 tempval1 = gfar_read(®s->maccfg1); 3100 u32 tempval = gfar_read(®s->maccfg2); 3101 u32 ecntrl = gfar_read(®s->ecntrl); 3102 3103 /* Now we make sure that we can be in full duplex mode. 3104 * If not, we operate in half-duplex mode. 3105 */ 3106 if (phydev->duplex != priv->oldduplex) { 3107 new_state = 1; 3108 if (!(phydev->duplex)) 3109 tempval &= ~(MACCFG2_FULL_DUPLEX); 3110 else 3111 tempval |= MACCFG2_FULL_DUPLEX; 3112 3113 priv->oldduplex = phydev->duplex; 3114 } 3115 3116 if (phydev->speed != priv->oldspeed) { 3117 new_state = 1; 3118 switch (phydev->speed) { 3119 case 1000: 3120 tempval = 3121 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 3122 3123 ecntrl &= ~(ECNTRL_R100); 3124 break; 3125 case 100: 3126 case 10: 3127 tempval = 3128 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 3129 3130 /* Reduced mode distinguishes 3131 * between 10 and 100 3132 */ 3133 if (phydev->speed == SPEED_100) 3134 ecntrl |= ECNTRL_R100; 3135 else 3136 ecntrl &= ~(ECNTRL_R100); 3137 break; 3138 default: 3139 netif_warn(priv, link, dev, 3140 "Ack! Speed (%d) is not 10/100/1000!\n", 3141 phydev->speed); 3142 break; 3143 } 3144 3145 priv->oldspeed = phydev->speed; 3146 } 3147 3148 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 3149 tempval1 |= gfar_get_flowctrl_cfg(priv); 3150 3151 gfar_write(®s->maccfg1, tempval1); 3152 gfar_write(®s->maccfg2, tempval); 3153 gfar_write(®s->ecntrl, ecntrl); 3154 3155 if (!priv->oldlink) { 3156 new_state = 1; 3157 priv->oldlink = 1; 3158 } 3159 } else if (priv->oldlink) { 3160 new_state = 1; 3161 priv->oldlink = 0; 3162 priv->oldspeed = 0; 3163 priv->oldduplex = -1; 3164 } 3165 3166 if (new_state && netif_msg_link(priv)) 3167 phy_print_status(phydev); 3168 unlock_tx_qs(priv); 3169 local_irq_restore(flags); 3170 } 3171 3172 /* Update the hash table based on the current list of multicast 3173 * addresses we subscribe to. Also, change the promiscuity of 3174 * the device based on the flags (this function is called 3175 * whenever dev->flags is changed 3176 */ 3177 static void gfar_set_multi(struct net_device *dev) 3178 { 3179 struct netdev_hw_addr *ha; 3180 struct gfar_private *priv = netdev_priv(dev); 3181 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3182 u32 tempval; 3183 3184 if (dev->flags & IFF_PROMISC) { 3185 /* Set RCTRL to PROM */ 3186 tempval = gfar_read(®s->rctrl); 3187 tempval |= RCTRL_PROM; 3188 gfar_write(®s->rctrl, tempval); 3189 } else { 3190 /* Set RCTRL to not PROM */ 3191 tempval = gfar_read(®s->rctrl); 3192 tempval &= ~(RCTRL_PROM); 3193 gfar_write(®s->rctrl, tempval); 3194 } 3195 3196 if (dev->flags & IFF_ALLMULTI) { 3197 /* Set the hash to rx all multicast frames */ 3198 gfar_write(®s->igaddr0, 0xffffffff); 3199 gfar_write(®s->igaddr1, 0xffffffff); 3200 gfar_write(®s->igaddr2, 0xffffffff); 3201 gfar_write(®s->igaddr3, 0xffffffff); 3202 gfar_write(®s->igaddr4, 0xffffffff); 3203 gfar_write(®s->igaddr5, 0xffffffff); 3204 gfar_write(®s->igaddr6, 0xffffffff); 3205 gfar_write(®s->igaddr7, 0xffffffff); 3206 gfar_write(®s->gaddr0, 0xffffffff); 3207 gfar_write(®s->gaddr1, 0xffffffff); 3208 gfar_write(®s->gaddr2, 0xffffffff); 3209 gfar_write(®s->gaddr3, 0xffffffff); 3210 gfar_write(®s->gaddr4, 0xffffffff); 3211 gfar_write(®s->gaddr5, 0xffffffff); 3212 gfar_write(®s->gaddr6, 0xffffffff); 3213 gfar_write(®s->gaddr7, 0xffffffff); 3214 } else { 3215 int em_num; 3216 int idx; 3217 3218 /* zero out the hash */ 3219 gfar_write(®s->igaddr0, 0x0); 3220 gfar_write(®s->igaddr1, 0x0); 3221 gfar_write(®s->igaddr2, 0x0); 3222 gfar_write(®s->igaddr3, 0x0); 3223 gfar_write(®s->igaddr4, 0x0); 3224 gfar_write(®s->igaddr5, 0x0); 3225 gfar_write(®s->igaddr6, 0x0); 3226 gfar_write(®s->igaddr7, 0x0); 3227 gfar_write(®s->gaddr0, 0x0); 3228 gfar_write(®s->gaddr1, 0x0); 3229 gfar_write(®s->gaddr2, 0x0); 3230 gfar_write(®s->gaddr3, 0x0); 3231 gfar_write(®s->gaddr4, 0x0); 3232 gfar_write(®s->gaddr5, 0x0); 3233 gfar_write(®s->gaddr6, 0x0); 3234 gfar_write(®s->gaddr7, 0x0); 3235 3236 /* If we have extended hash tables, we need to 3237 * clear the exact match registers to prepare for 3238 * setting them 3239 */ 3240 if (priv->extended_hash) { 3241 em_num = GFAR_EM_NUM + 1; 3242 gfar_clear_exact_match(dev); 3243 idx = 1; 3244 } else { 3245 idx = 0; 3246 em_num = 0; 3247 } 3248 3249 if (netdev_mc_empty(dev)) 3250 return; 3251 3252 /* Parse the list, and set the appropriate bits */ 3253 netdev_for_each_mc_addr(ha, dev) { 3254 if (idx < em_num) { 3255 gfar_set_mac_for_addr(dev, idx, ha->addr); 3256 idx++; 3257 } else 3258 gfar_set_hash_for_addr(dev, ha->addr); 3259 } 3260 } 3261 } 3262 3263 3264 /* Clears each of the exact match registers to zero, so they 3265 * don't interfere with normal reception 3266 */ 3267 static void gfar_clear_exact_match(struct net_device *dev) 3268 { 3269 int idx; 3270 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 3271 3272 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) 3273 gfar_set_mac_for_addr(dev, idx, zero_arr); 3274 } 3275 3276 /* Set the appropriate hash bit for the given addr */ 3277 /* The algorithm works like so: 3278 * 1) Take the Destination Address (ie the multicast address), and 3279 * do a CRC on it (little endian), and reverse the bits of the 3280 * result. 3281 * 2) Use the 8 most significant bits as a hash into a 256-entry 3282 * table. The table is controlled through 8 32-bit registers: 3283 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 3284 * gaddr7. This means that the 3 most significant bits in the 3285 * hash index which gaddr register to use, and the 5 other bits 3286 * indicate which bit (assuming an IBM numbering scheme, which 3287 * for PowerPC (tm) is usually the case) in the register holds 3288 * the entry. 3289 */ 3290 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 3291 { 3292 u32 tempval; 3293 struct gfar_private *priv = netdev_priv(dev); 3294 u32 result = ether_crc(ETH_ALEN, addr); 3295 int width = priv->hash_width; 3296 u8 whichbit = (result >> (32 - width)) & 0x1f; 3297 u8 whichreg = result >> (32 - width + 5); 3298 u32 value = (1 << (31-whichbit)); 3299 3300 tempval = gfar_read(priv->hash_regs[whichreg]); 3301 tempval |= value; 3302 gfar_write(priv->hash_regs[whichreg], tempval); 3303 } 3304 3305 3306 /* There are multiple MAC Address register pairs on some controllers 3307 * This function sets the numth pair to a given address 3308 */ 3309 static void gfar_set_mac_for_addr(struct net_device *dev, int num, 3310 const u8 *addr) 3311 { 3312 struct gfar_private *priv = netdev_priv(dev); 3313 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3314 int idx; 3315 char tmpbuf[ETH_ALEN]; 3316 u32 tempval; 3317 u32 __iomem *macptr = ®s->macstnaddr1; 3318 3319 macptr += num*2; 3320 3321 /* Now copy it into the mac registers backwards, cuz 3322 * little endian is silly 3323 */ 3324 for (idx = 0; idx < ETH_ALEN; idx++) 3325 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx]; 3326 3327 gfar_write(macptr, *((u32 *) (tmpbuf))); 3328 3329 tempval = *((u32 *) (tmpbuf + 4)); 3330 3331 gfar_write(macptr+1, tempval); 3332 } 3333 3334 /* GFAR error interrupt handler */ 3335 static irqreturn_t gfar_error(int irq, void *grp_id) 3336 { 3337 struct gfar_priv_grp *gfargrp = grp_id; 3338 struct gfar __iomem *regs = gfargrp->regs; 3339 struct gfar_private *priv= gfargrp->priv; 3340 struct net_device *dev = priv->ndev; 3341 3342 /* Save ievent for future reference */ 3343 u32 events = gfar_read(®s->ievent); 3344 3345 /* Clear IEVENT */ 3346 gfar_write(®s->ievent, events & IEVENT_ERR_MASK); 3347 3348 /* Magic Packet is not an error. */ 3349 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 3350 (events & IEVENT_MAG)) 3351 events &= ~IEVENT_MAG; 3352 3353 /* Hmm... */ 3354 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 3355 netdev_dbg(dev, 3356 "error interrupt (ievent=0x%08x imask=0x%08x)\n", 3357 events, gfar_read(®s->imask)); 3358 3359 /* Update the error counters */ 3360 if (events & IEVENT_TXE) { 3361 dev->stats.tx_errors++; 3362 3363 if (events & IEVENT_LC) 3364 dev->stats.tx_window_errors++; 3365 if (events & IEVENT_CRL) 3366 dev->stats.tx_aborted_errors++; 3367 if (events & IEVENT_XFUN) { 3368 unsigned long flags; 3369 3370 netif_dbg(priv, tx_err, dev, 3371 "TX FIFO underrun, packet dropped\n"); 3372 dev->stats.tx_dropped++; 3373 atomic64_inc(&priv->extra_stats.tx_underrun); 3374 3375 local_irq_save(flags); 3376 lock_tx_qs(priv); 3377 3378 /* Reactivate the Tx Queues */ 3379 gfar_write(®s->tstat, gfargrp->tstat); 3380 3381 unlock_tx_qs(priv); 3382 local_irq_restore(flags); 3383 } 3384 netif_dbg(priv, tx_err, dev, "Transmit Error\n"); 3385 } 3386 if (events & IEVENT_BSY) { 3387 dev->stats.rx_errors++; 3388 atomic64_inc(&priv->extra_stats.rx_bsy); 3389 3390 gfar_receive(irq, grp_id); 3391 3392 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", 3393 gfar_read(®s->rstat)); 3394 } 3395 if (events & IEVENT_BABR) { 3396 dev->stats.rx_errors++; 3397 atomic64_inc(&priv->extra_stats.rx_babr); 3398 3399 netif_dbg(priv, rx_err, dev, "babbling RX error\n"); 3400 } 3401 if (events & IEVENT_EBERR) { 3402 atomic64_inc(&priv->extra_stats.eberr); 3403 netif_dbg(priv, rx_err, dev, "bus error\n"); 3404 } 3405 if (events & IEVENT_RXC) 3406 netif_dbg(priv, rx_status, dev, "control frame\n"); 3407 3408 if (events & IEVENT_BABT) { 3409 atomic64_inc(&priv->extra_stats.tx_babt); 3410 netif_dbg(priv, tx_err, dev, "babbling TX error\n"); 3411 } 3412 return IRQ_HANDLED; 3413 } 3414 3415 static struct of_device_id gfar_match[] = 3416 { 3417 { 3418 .type = "network", 3419 .compatible = "gianfar", 3420 }, 3421 { 3422 .compatible = "fsl,etsec2", 3423 }, 3424 {}, 3425 }; 3426 MODULE_DEVICE_TABLE(of, gfar_match); 3427 3428 /* Structure for a device driver */ 3429 static struct platform_driver gfar_driver = { 3430 .driver = { 3431 .name = "fsl-gianfar", 3432 .owner = THIS_MODULE, 3433 .pm = GFAR_PM_OPS, 3434 .of_match_table = gfar_match, 3435 }, 3436 .probe = gfar_probe, 3437 .remove = gfar_remove, 3438 }; 3439 3440 module_platform_driver(gfar_driver); 3441