1 /* 2 * drivers/net/ethernet/freescale/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * This driver is designed for the non-CPM ethernet controllers 6 * on the 85xx and 83xx family of integrated processors 7 * Based on 8260_io/fcc_enet.c 8 * 9 * Author: Andy Fleming 10 * Maintainer: Kumar Gala 11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 12 * 13 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. 14 * Copyright 2007 MontaVista Software, Inc. 15 * 16 * This program is free software; you can redistribute it and/or modify it 17 * under the terms of the GNU General Public License as published by the 18 * Free Software Foundation; either version 2 of the License, or (at your 19 * option) any later version. 20 * 21 * Gianfar: AKA Lambda Draconis, "Dragon" 22 * RA 11 31 24.2 23 * Dec +69 19 52 24 * V 3.84 25 * B-V +1.62 26 * 27 * Theory of operation 28 * 29 * The driver is initialized through of_device. Configuration information 30 * is therefore conveyed through an OF-style device tree. 31 * 32 * The Gianfar Ethernet Controller uses a ring of buffer 33 * descriptors. The beginning is indicated by a register 34 * pointing to the physical address of the start of the ring. 35 * The end is determined by a "wrap" bit being set in the 36 * last descriptor of the ring. 37 * 38 * When a packet is received, the RXF bit in the 39 * IEVENT register is set, triggering an interrupt when the 40 * corresponding bit in the IMASK register is also set (if 41 * interrupt coalescing is active, then the interrupt may not 42 * happen immediately, but will wait until either a set number 43 * of frames or amount of time have passed). In NAPI, the 44 * interrupt handler will signal there is work to be done, and 45 * exit. This method will start at the last known empty 46 * descriptor, and process every subsequent descriptor until there 47 * are none left with data (NAPI will stop after a set number of 48 * packets to give time to other tasks, but will eventually 49 * process all the packets). The data arrives inside a 50 * pre-allocated skb, and so after the skb is passed up to the 51 * stack, a new skb must be allocated, and the address field in 52 * the buffer descriptor must be updated to indicate this new 53 * skb. 54 * 55 * When the kernel requests that a packet be transmitted, the 56 * driver starts where it left off last time, and points the 57 * descriptor at the buffer which was passed in. The driver 58 * then informs the DMA engine that there are packets ready to 59 * be transmitted. Once the controller is finished transmitting 60 * the packet, an interrupt may be triggered (under the same 61 * conditions as for reception, but depending on the TXF bit). 62 * The driver then cleans up the buffer. 63 */ 64 65 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 66 #define DEBUG 67 68 #include <linux/kernel.h> 69 #include <linux/string.h> 70 #include <linux/errno.h> 71 #include <linux/unistd.h> 72 #include <linux/slab.h> 73 #include <linux/interrupt.h> 74 #include <linux/init.h> 75 #include <linux/delay.h> 76 #include <linux/netdevice.h> 77 #include <linux/etherdevice.h> 78 #include <linux/skbuff.h> 79 #include <linux/if_vlan.h> 80 #include <linux/spinlock.h> 81 #include <linux/mm.h> 82 #include <linux/of_mdio.h> 83 #include <linux/of_platform.h> 84 #include <linux/ip.h> 85 #include <linux/tcp.h> 86 #include <linux/udp.h> 87 #include <linux/in.h> 88 #include <linux/net_tstamp.h> 89 90 #include <asm/io.h> 91 #include <asm/reg.h> 92 #include <asm/irq.h> 93 #include <asm/uaccess.h> 94 #include <linux/module.h> 95 #include <linux/dma-mapping.h> 96 #include <linux/crc32.h> 97 #include <linux/mii.h> 98 #include <linux/phy.h> 99 #include <linux/phy_fixed.h> 100 #include <linux/of.h> 101 #include <linux/of_net.h> 102 103 #include "gianfar.h" 104 #include "fsl_pq_mdio.h" 105 106 #define TX_TIMEOUT (1*HZ) 107 108 const char gfar_driver_version[] = "1.3"; 109 110 static int gfar_enet_open(struct net_device *dev); 111 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 112 static void gfar_reset_task(struct work_struct *work); 113 static void gfar_timeout(struct net_device *dev); 114 static int gfar_close(struct net_device *dev); 115 struct sk_buff *gfar_new_skb(struct net_device *dev); 116 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 117 struct sk_buff *skb); 118 static int gfar_set_mac_address(struct net_device *dev); 119 static int gfar_change_mtu(struct net_device *dev, int new_mtu); 120 static irqreturn_t gfar_error(int irq, void *dev_id); 121 static irqreturn_t gfar_transmit(int irq, void *dev_id); 122 static irqreturn_t gfar_interrupt(int irq, void *dev_id); 123 static void adjust_link(struct net_device *dev); 124 static void init_registers(struct net_device *dev); 125 static int init_phy(struct net_device *dev); 126 static int gfar_probe(struct platform_device *ofdev); 127 static int gfar_remove(struct platform_device *ofdev); 128 static void free_skb_resources(struct gfar_private *priv); 129 static void gfar_set_multi(struct net_device *dev); 130 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 131 static void gfar_configure_serdes(struct net_device *dev); 132 static int gfar_poll(struct napi_struct *napi, int budget); 133 #ifdef CONFIG_NET_POLL_CONTROLLER 134 static void gfar_netpoll(struct net_device *dev); 135 #endif 136 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 137 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 138 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 139 int amount_pull); 140 void gfar_halt(struct net_device *dev); 141 static void gfar_halt_nodisable(struct net_device *dev); 142 void gfar_start(struct net_device *dev); 143 static void gfar_clear_exact_match(struct net_device *dev); 144 static void gfar_set_mac_for_addr(struct net_device *dev, int num, 145 const u8 *addr); 146 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 147 148 MODULE_AUTHOR("Freescale Semiconductor, Inc"); 149 MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 150 MODULE_LICENSE("GPL"); 151 152 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 153 dma_addr_t buf) 154 { 155 u32 lstatus; 156 157 bdp->bufPtr = buf; 158 159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); 160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) 161 lstatus |= BD_LFLAG(RXBD_WRAP); 162 163 eieio(); 164 165 bdp->lstatus = lstatus; 166 } 167 168 static int gfar_init_bds(struct net_device *ndev) 169 { 170 struct gfar_private *priv = netdev_priv(ndev); 171 struct gfar_priv_tx_q *tx_queue = NULL; 172 struct gfar_priv_rx_q *rx_queue = NULL; 173 struct txbd8 *txbdp; 174 struct rxbd8 *rxbdp; 175 int i, j; 176 177 for (i = 0; i < priv->num_tx_queues; i++) { 178 tx_queue = priv->tx_queue[i]; 179 /* Initialize some variables in our dev structure */ 180 tx_queue->num_txbdfree = tx_queue->tx_ring_size; 181 tx_queue->dirty_tx = tx_queue->tx_bd_base; 182 tx_queue->cur_tx = tx_queue->tx_bd_base; 183 tx_queue->skb_curtx = 0; 184 tx_queue->skb_dirtytx = 0; 185 186 /* Initialize Transmit Descriptor Ring */ 187 txbdp = tx_queue->tx_bd_base; 188 for (j = 0; j < tx_queue->tx_ring_size; j++) { 189 txbdp->lstatus = 0; 190 txbdp->bufPtr = 0; 191 txbdp++; 192 } 193 194 /* Set the last descriptor in the ring to indicate wrap */ 195 txbdp--; 196 txbdp->status |= TXBD_WRAP; 197 } 198 199 for (i = 0; i < priv->num_rx_queues; i++) { 200 rx_queue = priv->rx_queue[i]; 201 rx_queue->cur_rx = rx_queue->rx_bd_base; 202 rx_queue->skb_currx = 0; 203 rxbdp = rx_queue->rx_bd_base; 204 205 for (j = 0; j < rx_queue->rx_ring_size; j++) { 206 struct sk_buff *skb = rx_queue->rx_skbuff[j]; 207 208 if (skb) { 209 gfar_init_rxbdp(rx_queue, rxbdp, 210 rxbdp->bufPtr); 211 } else { 212 skb = gfar_new_skb(ndev); 213 if (!skb) { 214 netdev_err(ndev, "Can't allocate RX buffers\n"); 215 goto err_rxalloc_fail; 216 } 217 rx_queue->rx_skbuff[j] = skb; 218 219 gfar_new_rxbdp(rx_queue, rxbdp, skb); 220 } 221 222 rxbdp++; 223 } 224 225 } 226 227 return 0; 228 229 err_rxalloc_fail: 230 free_skb_resources(priv); 231 return -ENOMEM; 232 } 233 234 static int gfar_alloc_skb_resources(struct net_device *ndev) 235 { 236 void *vaddr; 237 dma_addr_t addr; 238 int i, j, k; 239 struct gfar_private *priv = netdev_priv(ndev); 240 struct device *dev = &priv->ofdev->dev; 241 struct gfar_priv_tx_q *tx_queue = NULL; 242 struct gfar_priv_rx_q *rx_queue = NULL; 243 244 priv->total_tx_ring_size = 0; 245 for (i = 0; i < priv->num_tx_queues; i++) 246 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; 247 248 priv->total_rx_ring_size = 0; 249 for (i = 0; i < priv->num_rx_queues; i++) 250 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; 251 252 /* Allocate memory for the buffer descriptors */ 253 vaddr = dma_alloc_coherent(dev, 254 sizeof(struct txbd8) * priv->total_tx_ring_size + 255 sizeof(struct rxbd8) * priv->total_rx_ring_size, 256 &addr, GFP_KERNEL); 257 if (!vaddr) { 258 netif_err(priv, ifup, ndev, 259 "Could not allocate buffer descriptors!\n"); 260 return -ENOMEM; 261 } 262 263 for (i = 0; i < priv->num_tx_queues; i++) { 264 tx_queue = priv->tx_queue[i]; 265 tx_queue->tx_bd_base = vaddr; 266 tx_queue->tx_bd_dma_base = addr; 267 tx_queue->dev = ndev; 268 /* enet DMA only understands physical addresses */ 269 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 270 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 271 } 272 273 /* Start the rx descriptor ring where the tx ring leaves off */ 274 for (i = 0; i < priv->num_rx_queues; i++) { 275 rx_queue = priv->rx_queue[i]; 276 rx_queue->rx_bd_base = vaddr; 277 rx_queue->rx_bd_dma_base = addr; 278 rx_queue->dev = ndev; 279 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 280 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 281 } 282 283 /* Setup the skbuff rings */ 284 for (i = 0; i < priv->num_tx_queues; i++) { 285 tx_queue = priv->tx_queue[i]; 286 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * 287 tx_queue->tx_ring_size, GFP_KERNEL); 288 if (!tx_queue->tx_skbuff) { 289 netif_err(priv, ifup, ndev, 290 "Could not allocate tx_skbuff\n"); 291 goto cleanup; 292 } 293 294 for (k = 0; k < tx_queue->tx_ring_size; k++) 295 tx_queue->tx_skbuff[k] = NULL; 296 } 297 298 for (i = 0; i < priv->num_rx_queues; i++) { 299 rx_queue = priv->rx_queue[i]; 300 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * 301 rx_queue->rx_ring_size, GFP_KERNEL); 302 303 if (!rx_queue->rx_skbuff) { 304 netif_err(priv, ifup, ndev, 305 "Could not allocate rx_skbuff\n"); 306 goto cleanup; 307 } 308 309 for (j = 0; j < rx_queue->rx_ring_size; j++) 310 rx_queue->rx_skbuff[j] = NULL; 311 } 312 313 if (gfar_init_bds(ndev)) 314 goto cleanup; 315 316 return 0; 317 318 cleanup: 319 free_skb_resources(priv); 320 return -ENOMEM; 321 } 322 323 static void gfar_init_tx_rx_base(struct gfar_private *priv) 324 { 325 struct gfar __iomem *regs = priv->gfargrp[0].regs; 326 u32 __iomem *baddr; 327 int i; 328 329 baddr = ®s->tbase0; 330 for(i = 0; i < priv->num_tx_queues; i++) { 331 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); 332 baddr += 2; 333 } 334 335 baddr = ®s->rbase0; 336 for(i = 0; i < priv->num_rx_queues; i++) { 337 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); 338 baddr += 2; 339 } 340 } 341 342 static void gfar_init_mac(struct net_device *ndev) 343 { 344 struct gfar_private *priv = netdev_priv(ndev); 345 struct gfar __iomem *regs = priv->gfargrp[0].regs; 346 u32 rctrl = 0; 347 u32 tctrl = 0; 348 u32 attrs = 0; 349 350 /* write the tx/rx base registers */ 351 gfar_init_tx_rx_base(priv); 352 353 /* Configure the coalescing support */ 354 gfar_configure_coalescing(priv, 0xFF, 0xFF); 355 356 if (priv->rx_filer_enable) { 357 rctrl |= RCTRL_FILREN; 358 /* Program the RIR0 reg with the required distribution */ 359 gfar_write(®s->rir0, DEFAULT_RIR0); 360 } 361 362 if (ndev->features & NETIF_F_RXCSUM) 363 rctrl |= RCTRL_CHECKSUMMING; 364 365 if (priv->extended_hash) { 366 rctrl |= RCTRL_EXTHASH; 367 368 gfar_clear_exact_match(ndev); 369 rctrl |= RCTRL_EMEN; 370 } 371 372 if (priv->padding) { 373 rctrl &= ~RCTRL_PAL_MASK; 374 rctrl |= RCTRL_PADDING(priv->padding); 375 } 376 377 /* Insert receive time stamps into padding alignment bytes */ 378 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { 379 rctrl &= ~RCTRL_PAL_MASK; 380 rctrl |= RCTRL_PADDING(8); 381 priv->padding = 8; 382 } 383 384 /* Enable HW time stamping if requested from user space */ 385 if (priv->hwts_rx_en) 386 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; 387 388 if (ndev->features & NETIF_F_HW_VLAN_RX) 389 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 390 391 /* Init rctrl based on our settings */ 392 gfar_write(®s->rctrl, rctrl); 393 394 if (ndev->features & NETIF_F_IP_CSUM) 395 tctrl |= TCTRL_INIT_CSUM; 396 397 tctrl |= TCTRL_TXSCHED_PRIO; 398 399 gfar_write(®s->tctrl, tctrl); 400 401 /* Set the extraction length and index */ 402 attrs = ATTRELI_EL(priv->rx_stash_size) | 403 ATTRELI_EI(priv->rx_stash_index); 404 405 gfar_write(®s->attreli, attrs); 406 407 /* Start with defaults, and add stashing or locking 408 * depending on the approprate variables */ 409 attrs = ATTR_INIT_SETTINGS; 410 411 if (priv->bd_stash_en) 412 attrs |= ATTR_BDSTASH; 413 414 if (priv->rx_stash_size != 0) 415 attrs |= ATTR_BUFSTASH; 416 417 gfar_write(®s->attr, attrs); 418 419 gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); 420 gfar_write(®s->fifo_tx_starve, priv->fifo_starve); 421 gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); 422 } 423 424 static struct net_device_stats *gfar_get_stats(struct net_device *dev) 425 { 426 struct gfar_private *priv = netdev_priv(dev); 427 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; 428 unsigned long tx_packets = 0, tx_bytes = 0; 429 int i = 0; 430 431 for (i = 0; i < priv->num_rx_queues; i++) { 432 rx_packets += priv->rx_queue[i]->stats.rx_packets; 433 rx_bytes += priv->rx_queue[i]->stats.rx_bytes; 434 rx_dropped += priv->rx_queue[i]->stats.rx_dropped; 435 } 436 437 dev->stats.rx_packets = rx_packets; 438 dev->stats.rx_bytes = rx_bytes; 439 dev->stats.rx_dropped = rx_dropped; 440 441 for (i = 0; i < priv->num_tx_queues; i++) { 442 tx_bytes += priv->tx_queue[i]->stats.tx_bytes; 443 tx_packets += priv->tx_queue[i]->stats.tx_packets; 444 } 445 446 dev->stats.tx_bytes = tx_bytes; 447 dev->stats.tx_packets = tx_packets; 448 449 return &dev->stats; 450 } 451 452 static const struct net_device_ops gfar_netdev_ops = { 453 .ndo_open = gfar_enet_open, 454 .ndo_start_xmit = gfar_start_xmit, 455 .ndo_stop = gfar_close, 456 .ndo_change_mtu = gfar_change_mtu, 457 .ndo_set_features = gfar_set_features, 458 .ndo_set_rx_mode = gfar_set_multi, 459 .ndo_tx_timeout = gfar_timeout, 460 .ndo_do_ioctl = gfar_ioctl, 461 .ndo_get_stats = gfar_get_stats, 462 .ndo_set_mac_address = eth_mac_addr, 463 .ndo_validate_addr = eth_validate_addr, 464 #ifdef CONFIG_NET_POLL_CONTROLLER 465 .ndo_poll_controller = gfar_netpoll, 466 #endif 467 }; 468 469 void lock_rx_qs(struct gfar_private *priv) 470 { 471 int i = 0x0; 472 473 for (i = 0; i < priv->num_rx_queues; i++) 474 spin_lock(&priv->rx_queue[i]->rxlock); 475 } 476 477 void lock_tx_qs(struct gfar_private *priv) 478 { 479 int i = 0x0; 480 481 for (i = 0; i < priv->num_tx_queues; i++) 482 spin_lock(&priv->tx_queue[i]->txlock); 483 } 484 485 void unlock_rx_qs(struct gfar_private *priv) 486 { 487 int i = 0x0; 488 489 for (i = 0; i < priv->num_rx_queues; i++) 490 spin_unlock(&priv->rx_queue[i]->rxlock); 491 } 492 493 void unlock_tx_qs(struct gfar_private *priv) 494 { 495 int i = 0x0; 496 497 for (i = 0; i < priv->num_tx_queues; i++) 498 spin_unlock(&priv->tx_queue[i]->txlock); 499 } 500 501 static bool gfar_is_vlan_on(struct gfar_private *priv) 502 { 503 return (priv->ndev->features & NETIF_F_HW_VLAN_RX) || 504 (priv->ndev->features & NETIF_F_HW_VLAN_TX); 505 } 506 507 /* Returns 1 if incoming frames use an FCB */ 508 static inline int gfar_uses_fcb(struct gfar_private *priv) 509 { 510 return gfar_is_vlan_on(priv) || 511 (priv->ndev->features & NETIF_F_RXCSUM) || 512 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); 513 } 514 515 static void free_tx_pointers(struct gfar_private *priv) 516 { 517 int i = 0; 518 519 for (i = 0; i < priv->num_tx_queues; i++) 520 kfree(priv->tx_queue[i]); 521 } 522 523 static void free_rx_pointers(struct gfar_private *priv) 524 { 525 int i = 0; 526 527 for (i = 0; i < priv->num_rx_queues; i++) 528 kfree(priv->rx_queue[i]); 529 } 530 531 static void unmap_group_regs(struct gfar_private *priv) 532 { 533 int i = 0; 534 535 for (i = 0; i < MAXGROUPS; i++) 536 if (priv->gfargrp[i].regs) 537 iounmap(priv->gfargrp[i].regs); 538 } 539 540 static void disable_napi(struct gfar_private *priv) 541 { 542 int i = 0; 543 544 for (i = 0; i < priv->num_grps; i++) 545 napi_disable(&priv->gfargrp[i].napi); 546 } 547 548 static void enable_napi(struct gfar_private *priv) 549 { 550 int i = 0; 551 552 for (i = 0; i < priv->num_grps; i++) 553 napi_enable(&priv->gfargrp[i].napi); 554 } 555 556 static int gfar_parse_group(struct device_node *np, 557 struct gfar_private *priv, const char *model) 558 { 559 u32 *queue_mask; 560 561 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0); 562 if (!priv->gfargrp[priv->num_grps].regs) 563 return -ENOMEM; 564 565 priv->gfargrp[priv->num_grps].interruptTransmit = 566 irq_of_parse_and_map(np, 0); 567 568 /* If we aren't the FEC we have multiple interrupts */ 569 if (model && strcasecmp(model, "FEC")) { 570 priv->gfargrp[priv->num_grps].interruptReceive = 571 irq_of_parse_and_map(np, 1); 572 priv->gfargrp[priv->num_grps].interruptError = 573 irq_of_parse_and_map(np,2); 574 if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ || 575 priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ || 576 priv->gfargrp[priv->num_grps].interruptError == NO_IRQ) 577 return -EINVAL; 578 } 579 580 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; 581 priv->gfargrp[priv->num_grps].priv = priv; 582 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); 583 if(priv->mode == MQ_MG_MODE) { 584 queue_mask = (u32 *)of_get_property(np, 585 "fsl,rx-bit-map", NULL); 586 priv->gfargrp[priv->num_grps].rx_bit_map = 587 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); 588 queue_mask = (u32 *)of_get_property(np, 589 "fsl,tx-bit-map", NULL); 590 priv->gfargrp[priv->num_grps].tx_bit_map = 591 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); 592 } else { 593 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; 594 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; 595 } 596 priv->num_grps++; 597 598 return 0; 599 } 600 601 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) 602 { 603 const char *model; 604 const char *ctype; 605 const void *mac_addr; 606 int err = 0, i; 607 struct net_device *dev = NULL; 608 struct gfar_private *priv = NULL; 609 struct device_node *np = ofdev->dev.of_node; 610 struct device_node *child = NULL; 611 const u32 *stash; 612 const u32 *stash_len; 613 const u32 *stash_idx; 614 unsigned int num_tx_qs, num_rx_qs; 615 u32 *tx_queues, *rx_queues; 616 617 if (!np || !of_device_is_available(np)) 618 return -ENODEV; 619 620 /* parse the num of tx and rx queues */ 621 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); 622 num_tx_qs = tx_queues ? *tx_queues : 1; 623 624 if (num_tx_qs > MAX_TX_QS) { 625 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", 626 num_tx_qs, MAX_TX_QS); 627 pr_err("Cannot do alloc_etherdev, aborting\n"); 628 return -EINVAL; 629 } 630 631 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); 632 num_rx_qs = rx_queues ? *rx_queues : 1; 633 634 if (num_rx_qs > MAX_RX_QS) { 635 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", 636 num_rx_qs, MAX_RX_QS); 637 pr_err("Cannot do alloc_etherdev, aborting\n"); 638 return -EINVAL; 639 } 640 641 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); 642 dev = *pdev; 643 if (NULL == dev) 644 return -ENOMEM; 645 646 priv = netdev_priv(dev); 647 priv->node = ofdev->dev.of_node; 648 priv->ndev = dev; 649 650 priv->num_tx_queues = num_tx_qs; 651 netif_set_real_num_rx_queues(dev, num_rx_qs); 652 priv->num_rx_queues = num_rx_qs; 653 priv->num_grps = 0x0; 654 655 /* Init Rx queue filer rule set linked list*/ 656 INIT_LIST_HEAD(&priv->rx_list.list); 657 priv->rx_list.count = 0; 658 mutex_init(&priv->rx_queue_access); 659 660 model = of_get_property(np, "model", NULL); 661 662 for (i = 0; i < MAXGROUPS; i++) 663 priv->gfargrp[i].regs = NULL; 664 665 /* Parse and initialize group specific information */ 666 if (of_device_is_compatible(np, "fsl,etsec2")) { 667 priv->mode = MQ_MG_MODE; 668 for_each_child_of_node(np, child) { 669 err = gfar_parse_group(child, priv, model); 670 if (err) 671 goto err_grp_init; 672 } 673 } else { 674 priv->mode = SQ_SG_MODE; 675 err = gfar_parse_group(np, priv, model); 676 if(err) 677 goto err_grp_init; 678 } 679 680 for (i = 0; i < priv->num_tx_queues; i++) 681 priv->tx_queue[i] = NULL; 682 for (i = 0; i < priv->num_rx_queues; i++) 683 priv->rx_queue[i] = NULL; 684 685 for (i = 0; i < priv->num_tx_queues; i++) { 686 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), 687 GFP_KERNEL); 688 if (!priv->tx_queue[i]) { 689 err = -ENOMEM; 690 goto tx_alloc_failed; 691 } 692 priv->tx_queue[i]->tx_skbuff = NULL; 693 priv->tx_queue[i]->qindex = i; 694 priv->tx_queue[i]->dev = dev; 695 spin_lock_init(&(priv->tx_queue[i]->txlock)); 696 } 697 698 for (i = 0; i < priv->num_rx_queues; i++) { 699 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), 700 GFP_KERNEL); 701 if (!priv->rx_queue[i]) { 702 err = -ENOMEM; 703 goto rx_alloc_failed; 704 } 705 priv->rx_queue[i]->rx_skbuff = NULL; 706 priv->rx_queue[i]->qindex = i; 707 priv->rx_queue[i]->dev = dev; 708 spin_lock_init(&(priv->rx_queue[i]->rxlock)); 709 } 710 711 712 stash = of_get_property(np, "bd-stash", NULL); 713 714 if (stash) { 715 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 716 priv->bd_stash_en = 1; 717 } 718 719 stash_len = of_get_property(np, "rx-stash-len", NULL); 720 721 if (stash_len) 722 priv->rx_stash_size = *stash_len; 723 724 stash_idx = of_get_property(np, "rx-stash-idx", NULL); 725 726 if (stash_idx) 727 priv->rx_stash_index = *stash_idx; 728 729 if (stash_len || stash_idx) 730 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; 731 732 mac_addr = of_get_mac_address(np); 733 if (mac_addr) 734 memcpy(dev->dev_addr, mac_addr, ETH_ALEN); 735 736 if (model && !strcasecmp(model, "TSEC")) 737 priv->device_flags = 738 FSL_GIANFAR_DEV_HAS_GIGABIT | 739 FSL_GIANFAR_DEV_HAS_COALESCE | 740 FSL_GIANFAR_DEV_HAS_RMON | 741 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 742 if (model && !strcasecmp(model, "eTSEC")) 743 priv->device_flags = 744 FSL_GIANFAR_DEV_HAS_GIGABIT | 745 FSL_GIANFAR_DEV_HAS_COALESCE | 746 FSL_GIANFAR_DEV_HAS_RMON | 747 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 748 FSL_GIANFAR_DEV_HAS_PADDING | 749 FSL_GIANFAR_DEV_HAS_CSUM | 750 FSL_GIANFAR_DEV_HAS_VLAN | 751 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 752 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 753 FSL_GIANFAR_DEV_HAS_TIMER; 754 755 ctype = of_get_property(np, "phy-connection-type", NULL); 756 757 /* We only care about rgmii-id. The rest are autodetected */ 758 if (ctype && !strcmp(ctype, "rgmii-id")) 759 priv->interface = PHY_INTERFACE_MODE_RGMII_ID; 760 else 761 priv->interface = PHY_INTERFACE_MODE_MII; 762 763 if (of_get_property(np, "fsl,magic-packet", NULL)) 764 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; 765 766 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 767 768 /* Find the TBI PHY. If it's not there, we don't support SGMII */ 769 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 770 771 return 0; 772 773 rx_alloc_failed: 774 free_rx_pointers(priv); 775 tx_alloc_failed: 776 free_tx_pointers(priv); 777 err_grp_init: 778 unmap_group_regs(priv); 779 free_netdev(dev); 780 return err; 781 } 782 783 static int gfar_hwtstamp_ioctl(struct net_device *netdev, 784 struct ifreq *ifr, int cmd) 785 { 786 struct hwtstamp_config config; 787 struct gfar_private *priv = netdev_priv(netdev); 788 789 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 790 return -EFAULT; 791 792 /* reserved for future extensions */ 793 if (config.flags) 794 return -EINVAL; 795 796 switch (config.tx_type) { 797 case HWTSTAMP_TX_OFF: 798 priv->hwts_tx_en = 0; 799 break; 800 case HWTSTAMP_TX_ON: 801 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 802 return -ERANGE; 803 priv->hwts_tx_en = 1; 804 break; 805 default: 806 return -ERANGE; 807 } 808 809 switch (config.rx_filter) { 810 case HWTSTAMP_FILTER_NONE: 811 if (priv->hwts_rx_en) { 812 stop_gfar(netdev); 813 priv->hwts_rx_en = 0; 814 startup_gfar(netdev); 815 } 816 break; 817 default: 818 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 819 return -ERANGE; 820 if (!priv->hwts_rx_en) { 821 stop_gfar(netdev); 822 priv->hwts_rx_en = 1; 823 startup_gfar(netdev); 824 } 825 config.rx_filter = HWTSTAMP_FILTER_ALL; 826 break; 827 } 828 829 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 830 -EFAULT : 0; 831 } 832 833 /* Ioctl MII Interface */ 834 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 835 { 836 struct gfar_private *priv = netdev_priv(dev); 837 838 if (!netif_running(dev)) 839 return -EINVAL; 840 841 if (cmd == SIOCSHWTSTAMP) 842 return gfar_hwtstamp_ioctl(dev, rq, cmd); 843 844 if (!priv->phydev) 845 return -ENODEV; 846 847 return phy_mii_ioctl(priv->phydev, rq, cmd); 848 } 849 850 static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) 851 { 852 unsigned int new_bit_map = 0x0; 853 int mask = 0x1 << (max_qs - 1), i; 854 for (i = 0; i < max_qs; i++) { 855 if (bit_map & mask) 856 new_bit_map = new_bit_map + (1 << i); 857 mask = mask >> 0x1; 858 } 859 return new_bit_map; 860 } 861 862 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 863 u32 class) 864 { 865 u32 rqfpr = FPR_FILER_MASK; 866 u32 rqfcr = 0x0; 867 868 rqfar--; 869 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; 870 priv->ftp_rqfpr[rqfar] = rqfpr; 871 priv->ftp_rqfcr[rqfar] = rqfcr; 872 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 873 874 rqfar--; 875 rqfcr = RQFCR_CMP_NOMATCH; 876 priv->ftp_rqfpr[rqfar] = rqfpr; 877 priv->ftp_rqfcr[rqfar] = rqfcr; 878 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 879 880 rqfar--; 881 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; 882 rqfpr = class; 883 priv->ftp_rqfcr[rqfar] = rqfcr; 884 priv->ftp_rqfpr[rqfar] = rqfpr; 885 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 886 887 rqfar--; 888 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; 889 rqfpr = class; 890 priv->ftp_rqfcr[rqfar] = rqfcr; 891 priv->ftp_rqfpr[rqfar] = rqfpr; 892 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 893 894 return rqfar; 895 } 896 897 static void gfar_init_filer_table(struct gfar_private *priv) 898 { 899 int i = 0x0; 900 u32 rqfar = MAX_FILER_IDX; 901 u32 rqfcr = 0x0; 902 u32 rqfpr = FPR_FILER_MASK; 903 904 /* Default rule */ 905 rqfcr = RQFCR_CMP_MATCH; 906 priv->ftp_rqfcr[rqfar] = rqfcr; 907 priv->ftp_rqfpr[rqfar] = rqfpr; 908 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 909 910 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); 911 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); 912 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); 913 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); 914 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); 915 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); 916 917 /* cur_filer_idx indicated the first non-masked rule */ 918 priv->cur_filer_idx = rqfar; 919 920 /* Rest are masked rules */ 921 rqfcr = RQFCR_CMP_NOMATCH; 922 for (i = 0; i < rqfar; i++) { 923 priv->ftp_rqfcr[i] = rqfcr; 924 priv->ftp_rqfpr[i] = rqfpr; 925 gfar_write_filer(priv, i, rqfcr, rqfpr); 926 } 927 } 928 929 static void gfar_detect_errata(struct gfar_private *priv) 930 { 931 struct device *dev = &priv->ofdev->dev; 932 unsigned int pvr = mfspr(SPRN_PVR); 933 unsigned int svr = mfspr(SPRN_SVR); 934 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ 935 unsigned int rev = svr & 0xffff; 936 937 /* MPC8313 Rev 2.0 and higher; All MPC837x */ 938 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || 939 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 940 priv->errata |= GFAR_ERRATA_74; 941 942 /* MPC8313 and MPC837x all rev */ 943 if ((pvr == 0x80850010 && mod == 0x80b0) || 944 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 945 priv->errata |= GFAR_ERRATA_76; 946 947 /* MPC8313 and MPC837x all rev */ 948 if ((pvr == 0x80850010 && mod == 0x80b0) || 949 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 950 priv->errata |= GFAR_ERRATA_A002; 951 952 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ 953 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || 954 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) 955 priv->errata |= GFAR_ERRATA_12; 956 957 if (priv->errata) 958 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 959 priv->errata); 960 } 961 962 /* Set up the ethernet device structure, private data, 963 * and anything else we need before we start */ 964 static int gfar_probe(struct platform_device *ofdev) 965 { 966 u32 tempval; 967 struct net_device *dev = NULL; 968 struct gfar_private *priv = NULL; 969 struct gfar __iomem *regs = NULL; 970 int err = 0, i, grp_idx = 0; 971 int len_devname; 972 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; 973 u32 isrg = 0; 974 u32 __iomem *baddr; 975 976 err = gfar_of_init(ofdev, &dev); 977 978 if (err) 979 return err; 980 981 priv = netdev_priv(dev); 982 priv->ndev = dev; 983 priv->ofdev = ofdev; 984 priv->node = ofdev->dev.of_node; 985 SET_NETDEV_DEV(dev, &ofdev->dev); 986 987 spin_lock_init(&priv->bflock); 988 INIT_WORK(&priv->reset_task, gfar_reset_task); 989 990 dev_set_drvdata(&ofdev->dev, priv); 991 regs = priv->gfargrp[0].regs; 992 993 gfar_detect_errata(priv); 994 995 /* Stop the DMA engine now, in case it was running before */ 996 /* (The firmware could have used it, and left it running). */ 997 gfar_halt(dev); 998 999 /* Reset MAC layer */ 1000 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); 1001 1002 /* We need to delay at least 3 TX clocks */ 1003 udelay(2); 1004 1005 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 1006 gfar_write(®s->maccfg1, tempval); 1007 1008 /* Initialize MACCFG2. */ 1009 tempval = MACCFG2_INIT_SETTINGS; 1010 if (gfar_has_errata(priv, GFAR_ERRATA_74)) 1011 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; 1012 gfar_write(®s->maccfg2, tempval); 1013 1014 /* Initialize ECNTRL */ 1015 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); 1016 1017 /* Set the dev->base_addr to the gfar reg region */ 1018 dev->base_addr = (unsigned long) regs; 1019 1020 SET_NETDEV_DEV(dev, &ofdev->dev); 1021 1022 /* Fill in the dev structure */ 1023 dev->watchdog_timeo = TX_TIMEOUT; 1024 dev->mtu = 1500; 1025 dev->netdev_ops = &gfar_netdev_ops; 1026 dev->ethtool_ops = &gfar_ethtool_ops; 1027 1028 /* Register for napi ...We are registering NAPI for each grp */ 1029 for (i = 0; i < priv->num_grps; i++) 1030 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); 1031 1032 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1033 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1034 NETIF_F_RXCSUM; 1035 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 1036 NETIF_F_RXCSUM | NETIF_F_HIGHDMA; 1037 } 1038 1039 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1040 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1041 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1042 } 1043 1044 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1045 priv->extended_hash = 1; 1046 priv->hash_width = 9; 1047 1048 priv->hash_regs[0] = ®s->igaddr0; 1049 priv->hash_regs[1] = ®s->igaddr1; 1050 priv->hash_regs[2] = ®s->igaddr2; 1051 priv->hash_regs[3] = ®s->igaddr3; 1052 priv->hash_regs[4] = ®s->igaddr4; 1053 priv->hash_regs[5] = ®s->igaddr5; 1054 priv->hash_regs[6] = ®s->igaddr6; 1055 priv->hash_regs[7] = ®s->igaddr7; 1056 priv->hash_regs[8] = ®s->gaddr0; 1057 priv->hash_regs[9] = ®s->gaddr1; 1058 priv->hash_regs[10] = ®s->gaddr2; 1059 priv->hash_regs[11] = ®s->gaddr3; 1060 priv->hash_regs[12] = ®s->gaddr4; 1061 priv->hash_regs[13] = ®s->gaddr5; 1062 priv->hash_regs[14] = ®s->gaddr6; 1063 priv->hash_regs[15] = ®s->gaddr7; 1064 1065 } else { 1066 priv->extended_hash = 0; 1067 priv->hash_width = 8; 1068 1069 priv->hash_regs[0] = ®s->gaddr0; 1070 priv->hash_regs[1] = ®s->gaddr1; 1071 priv->hash_regs[2] = ®s->gaddr2; 1072 priv->hash_regs[3] = ®s->gaddr3; 1073 priv->hash_regs[4] = ®s->gaddr4; 1074 priv->hash_regs[5] = ®s->gaddr5; 1075 priv->hash_regs[6] = ®s->gaddr6; 1076 priv->hash_regs[7] = ®s->gaddr7; 1077 } 1078 1079 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 1080 priv->padding = DEFAULT_PADDING; 1081 else 1082 priv->padding = 0; 1083 1084 if (dev->features & NETIF_F_IP_CSUM || 1085 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1086 dev->hard_header_len += GMAC_FCB_LEN; 1087 1088 /* Program the isrg regs only if number of grps > 1 */ 1089 if (priv->num_grps > 1) { 1090 baddr = ®s->isrg0; 1091 for (i = 0; i < priv->num_grps; i++) { 1092 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); 1093 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); 1094 gfar_write(baddr, isrg); 1095 baddr++; 1096 isrg = 0x0; 1097 } 1098 } 1099 1100 /* Need to reverse the bit maps as bit_map's MSB is q0 1101 * but, for_each_set_bit parses from right to left, which 1102 * basically reverses the queue numbers */ 1103 for (i = 0; i< priv->num_grps; i++) { 1104 priv->gfargrp[i].tx_bit_map = reverse_bitmap( 1105 priv->gfargrp[i].tx_bit_map, MAX_TX_QS); 1106 priv->gfargrp[i].rx_bit_map = reverse_bitmap( 1107 priv->gfargrp[i].rx_bit_map, MAX_RX_QS); 1108 } 1109 1110 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 1111 * also assign queues to groups */ 1112 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1113 priv->gfargrp[grp_idx].num_rx_queues = 0x0; 1114 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, 1115 priv->num_rx_queues) { 1116 priv->gfargrp[grp_idx].num_rx_queues++; 1117 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1118 rstat = rstat | (RSTAT_CLEAR_RHALT >> i); 1119 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 1120 } 1121 priv->gfargrp[grp_idx].num_tx_queues = 0x0; 1122 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, 1123 priv->num_tx_queues) { 1124 priv->gfargrp[grp_idx].num_tx_queues++; 1125 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1126 tstat = tstat | (TSTAT_CLEAR_THALT >> i); 1127 tqueue = tqueue | (TQUEUE_EN0 >> i); 1128 } 1129 priv->gfargrp[grp_idx].rstat = rstat; 1130 priv->gfargrp[grp_idx].tstat = tstat; 1131 rstat = tstat =0; 1132 } 1133 1134 gfar_write(®s->rqueue, rqueue); 1135 gfar_write(®s->tqueue, tqueue); 1136 1137 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1138 1139 /* Initializing some of the rx/tx queue level parameters */ 1140 for (i = 0; i < priv->num_tx_queues; i++) { 1141 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; 1142 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; 1143 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; 1144 priv->tx_queue[i]->txic = DEFAULT_TXIC; 1145 } 1146 1147 for (i = 0; i < priv->num_rx_queues; i++) { 1148 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; 1149 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; 1150 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1151 } 1152 1153 /* always enable rx filer*/ 1154 priv->rx_filer_enable = 1; 1155 /* Enable most messages by default */ 1156 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1157 1158 /* Carrier starts down, phylib will bring it up */ 1159 netif_carrier_off(dev); 1160 1161 err = register_netdev(dev); 1162 1163 if (err) { 1164 pr_err("%s: Cannot register net device, aborting\n", dev->name); 1165 goto register_fail; 1166 } 1167 1168 device_init_wakeup(&dev->dev, 1169 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1170 1171 /* fill out IRQ number and name fields */ 1172 len_devname = strlen(dev->name); 1173 for (i = 0; i < priv->num_grps; i++) { 1174 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, 1175 len_devname); 1176 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1177 strncpy(&priv->gfargrp[i].int_name_tx[len_devname], 1178 "_g", sizeof("_g")); 1179 priv->gfargrp[i].int_name_tx[ 1180 strlen(priv->gfargrp[i].int_name_tx)] = i+48; 1181 strncpy(&priv->gfargrp[i].int_name_tx[strlen( 1182 priv->gfargrp[i].int_name_tx)], 1183 "_tx", sizeof("_tx") + 1); 1184 1185 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, 1186 len_devname); 1187 strncpy(&priv->gfargrp[i].int_name_rx[len_devname], 1188 "_g", sizeof("_g")); 1189 priv->gfargrp[i].int_name_rx[ 1190 strlen(priv->gfargrp[i].int_name_rx)] = i+48; 1191 strncpy(&priv->gfargrp[i].int_name_rx[strlen( 1192 priv->gfargrp[i].int_name_rx)], 1193 "_rx", sizeof("_rx") + 1); 1194 1195 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, 1196 len_devname); 1197 strncpy(&priv->gfargrp[i].int_name_er[len_devname], 1198 "_g", sizeof("_g")); 1199 priv->gfargrp[i].int_name_er[strlen( 1200 priv->gfargrp[i].int_name_er)] = i+48; 1201 strncpy(&priv->gfargrp[i].int_name_er[strlen(\ 1202 priv->gfargrp[i].int_name_er)], 1203 "_er", sizeof("_er") + 1); 1204 } else 1205 priv->gfargrp[i].int_name_tx[len_devname] = '\0'; 1206 } 1207 1208 /* Initialize the filer table */ 1209 gfar_init_filer_table(priv); 1210 1211 /* Create all the sysfs files */ 1212 gfar_init_sysfs(dev); 1213 1214 /* Print out the device info */ 1215 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 1216 1217 /* Even more device info helps when determining which kernel */ 1218 /* provided which set of benchmarks. */ 1219 netdev_info(dev, "Running with NAPI enabled\n"); 1220 for (i = 0; i < priv->num_rx_queues; i++) 1221 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", 1222 i, priv->rx_queue[i]->rx_ring_size); 1223 for(i = 0; i < priv->num_tx_queues; i++) 1224 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", 1225 i, priv->tx_queue[i]->tx_ring_size); 1226 1227 return 0; 1228 1229 register_fail: 1230 unmap_group_regs(priv); 1231 free_tx_pointers(priv); 1232 free_rx_pointers(priv); 1233 if (priv->phy_node) 1234 of_node_put(priv->phy_node); 1235 if (priv->tbi_node) 1236 of_node_put(priv->tbi_node); 1237 free_netdev(dev); 1238 return err; 1239 } 1240 1241 static int gfar_remove(struct platform_device *ofdev) 1242 { 1243 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1244 1245 if (priv->phy_node) 1246 of_node_put(priv->phy_node); 1247 if (priv->tbi_node) 1248 of_node_put(priv->tbi_node); 1249 1250 dev_set_drvdata(&ofdev->dev, NULL); 1251 1252 unregister_netdev(priv->ndev); 1253 unmap_group_regs(priv); 1254 free_netdev(priv->ndev); 1255 1256 return 0; 1257 } 1258 1259 #ifdef CONFIG_PM 1260 1261 static int gfar_suspend(struct device *dev) 1262 { 1263 struct gfar_private *priv = dev_get_drvdata(dev); 1264 struct net_device *ndev = priv->ndev; 1265 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1266 unsigned long flags; 1267 u32 tempval; 1268 1269 int magic_packet = priv->wol_en && 1270 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1271 1272 netif_device_detach(ndev); 1273 1274 if (netif_running(ndev)) { 1275 1276 local_irq_save(flags); 1277 lock_tx_qs(priv); 1278 lock_rx_qs(priv); 1279 1280 gfar_halt_nodisable(ndev); 1281 1282 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1283 tempval = gfar_read(®s->maccfg1); 1284 1285 tempval &= ~MACCFG1_TX_EN; 1286 1287 if (!magic_packet) 1288 tempval &= ~MACCFG1_RX_EN; 1289 1290 gfar_write(®s->maccfg1, tempval); 1291 1292 unlock_rx_qs(priv); 1293 unlock_tx_qs(priv); 1294 local_irq_restore(flags); 1295 1296 disable_napi(priv); 1297 1298 if (magic_packet) { 1299 /* Enable interrupt on Magic Packet */ 1300 gfar_write(®s->imask, IMASK_MAG); 1301 1302 /* Enable Magic Packet mode */ 1303 tempval = gfar_read(®s->maccfg2); 1304 tempval |= MACCFG2_MPEN; 1305 gfar_write(®s->maccfg2, tempval); 1306 } else { 1307 phy_stop(priv->phydev); 1308 } 1309 } 1310 1311 return 0; 1312 } 1313 1314 static int gfar_resume(struct device *dev) 1315 { 1316 struct gfar_private *priv = dev_get_drvdata(dev); 1317 struct net_device *ndev = priv->ndev; 1318 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1319 unsigned long flags; 1320 u32 tempval; 1321 int magic_packet = priv->wol_en && 1322 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1323 1324 if (!netif_running(ndev)) { 1325 netif_device_attach(ndev); 1326 return 0; 1327 } 1328 1329 if (!magic_packet && priv->phydev) 1330 phy_start(priv->phydev); 1331 1332 /* Disable Magic Packet mode, in case something 1333 * else woke us up. 1334 */ 1335 local_irq_save(flags); 1336 lock_tx_qs(priv); 1337 lock_rx_qs(priv); 1338 1339 tempval = gfar_read(®s->maccfg2); 1340 tempval &= ~MACCFG2_MPEN; 1341 gfar_write(®s->maccfg2, tempval); 1342 1343 gfar_start(ndev); 1344 1345 unlock_rx_qs(priv); 1346 unlock_tx_qs(priv); 1347 local_irq_restore(flags); 1348 1349 netif_device_attach(ndev); 1350 1351 enable_napi(priv); 1352 1353 return 0; 1354 } 1355 1356 static int gfar_restore(struct device *dev) 1357 { 1358 struct gfar_private *priv = dev_get_drvdata(dev); 1359 struct net_device *ndev = priv->ndev; 1360 1361 if (!netif_running(ndev)) 1362 return 0; 1363 1364 gfar_init_bds(ndev); 1365 init_registers(ndev); 1366 gfar_set_mac_address(ndev); 1367 gfar_init_mac(ndev); 1368 gfar_start(ndev); 1369 1370 priv->oldlink = 0; 1371 priv->oldspeed = 0; 1372 priv->oldduplex = -1; 1373 1374 if (priv->phydev) 1375 phy_start(priv->phydev); 1376 1377 netif_device_attach(ndev); 1378 enable_napi(priv); 1379 1380 return 0; 1381 } 1382 1383 static struct dev_pm_ops gfar_pm_ops = { 1384 .suspend = gfar_suspend, 1385 .resume = gfar_resume, 1386 .freeze = gfar_suspend, 1387 .thaw = gfar_resume, 1388 .restore = gfar_restore, 1389 }; 1390 1391 #define GFAR_PM_OPS (&gfar_pm_ops) 1392 1393 #else 1394 1395 #define GFAR_PM_OPS NULL 1396 1397 #endif 1398 1399 /* Reads the controller's registers to determine what interface 1400 * connects it to the PHY. 1401 */ 1402 static phy_interface_t gfar_get_interface(struct net_device *dev) 1403 { 1404 struct gfar_private *priv = netdev_priv(dev); 1405 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1406 u32 ecntrl; 1407 1408 ecntrl = gfar_read(®s->ecntrl); 1409 1410 if (ecntrl & ECNTRL_SGMII_MODE) 1411 return PHY_INTERFACE_MODE_SGMII; 1412 1413 if (ecntrl & ECNTRL_TBI_MODE) { 1414 if (ecntrl & ECNTRL_REDUCED_MODE) 1415 return PHY_INTERFACE_MODE_RTBI; 1416 else 1417 return PHY_INTERFACE_MODE_TBI; 1418 } 1419 1420 if (ecntrl & ECNTRL_REDUCED_MODE) { 1421 if (ecntrl & ECNTRL_REDUCED_MII_MODE) 1422 return PHY_INTERFACE_MODE_RMII; 1423 else { 1424 phy_interface_t interface = priv->interface; 1425 1426 /* 1427 * This isn't autodetected right now, so it must 1428 * be set by the device tree or platform code. 1429 */ 1430 if (interface == PHY_INTERFACE_MODE_RGMII_ID) 1431 return PHY_INTERFACE_MODE_RGMII_ID; 1432 1433 return PHY_INTERFACE_MODE_RGMII; 1434 } 1435 } 1436 1437 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 1438 return PHY_INTERFACE_MODE_GMII; 1439 1440 return PHY_INTERFACE_MODE_MII; 1441 } 1442 1443 1444 /* Initializes driver's PHY state, and attaches to the PHY. 1445 * Returns 0 on success. 1446 */ 1447 static int init_phy(struct net_device *dev) 1448 { 1449 struct gfar_private *priv = netdev_priv(dev); 1450 uint gigabit_support = 1451 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 1452 SUPPORTED_1000baseT_Full : 0; 1453 phy_interface_t interface; 1454 1455 priv->oldlink = 0; 1456 priv->oldspeed = 0; 1457 priv->oldduplex = -1; 1458 1459 interface = gfar_get_interface(dev); 1460 1461 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, 1462 interface); 1463 if (!priv->phydev) 1464 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, 1465 interface); 1466 if (!priv->phydev) { 1467 dev_err(&dev->dev, "could not attach to PHY\n"); 1468 return -ENODEV; 1469 } 1470 1471 if (interface == PHY_INTERFACE_MODE_SGMII) 1472 gfar_configure_serdes(dev); 1473 1474 /* Remove any features not supported by the controller */ 1475 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); 1476 priv->phydev->advertising = priv->phydev->supported; 1477 1478 return 0; 1479 } 1480 1481 /* 1482 * Initialize TBI PHY interface for communicating with the 1483 * SERDES lynx PHY on the chip. We communicate with this PHY 1484 * through the MDIO bus on each controller, treating it as a 1485 * "normal" PHY at the address found in the TBIPA register. We assume 1486 * that the TBIPA register is valid. Either the MDIO bus code will set 1487 * it to a value that doesn't conflict with other PHYs on the bus, or the 1488 * value doesn't matter, as there are no other PHYs on the bus. 1489 */ 1490 static void gfar_configure_serdes(struct net_device *dev) 1491 { 1492 struct gfar_private *priv = netdev_priv(dev); 1493 struct phy_device *tbiphy; 1494 1495 if (!priv->tbi_node) { 1496 dev_warn(&dev->dev, "error: SGMII mode requires that the " 1497 "device tree specify a tbi-handle\n"); 1498 return; 1499 } 1500 1501 tbiphy = of_phy_find_device(priv->tbi_node); 1502 if (!tbiphy) { 1503 dev_err(&dev->dev, "error: Could not get TBI device\n"); 1504 return; 1505 } 1506 1507 /* 1508 * If the link is already up, we must already be ok, and don't need to 1509 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1510 * everything for us? Resetting it takes the link down and requires 1511 * several seconds for it to come back. 1512 */ 1513 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) 1514 return; 1515 1516 /* Single clk mode, mii mode off(for serdes communication) */ 1517 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1518 1519 phy_write(tbiphy, MII_ADVERTISE, 1520 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1521 ADVERTISE_1000XPSE_ASYM); 1522 1523 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | 1524 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 1525 } 1526 1527 static void init_registers(struct net_device *dev) 1528 { 1529 struct gfar_private *priv = netdev_priv(dev); 1530 struct gfar __iomem *regs = NULL; 1531 int i = 0; 1532 1533 for (i = 0; i < priv->num_grps; i++) { 1534 regs = priv->gfargrp[i].regs; 1535 /* Clear IEVENT */ 1536 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 1537 1538 /* Initialize IMASK */ 1539 gfar_write(®s->imask, IMASK_INIT_CLEAR); 1540 } 1541 1542 regs = priv->gfargrp[0].regs; 1543 /* Init hash registers to zero */ 1544 gfar_write(®s->igaddr0, 0); 1545 gfar_write(®s->igaddr1, 0); 1546 gfar_write(®s->igaddr2, 0); 1547 gfar_write(®s->igaddr3, 0); 1548 gfar_write(®s->igaddr4, 0); 1549 gfar_write(®s->igaddr5, 0); 1550 gfar_write(®s->igaddr6, 0); 1551 gfar_write(®s->igaddr7, 0); 1552 1553 gfar_write(®s->gaddr0, 0); 1554 gfar_write(®s->gaddr1, 0); 1555 gfar_write(®s->gaddr2, 0); 1556 gfar_write(®s->gaddr3, 0); 1557 gfar_write(®s->gaddr4, 0); 1558 gfar_write(®s->gaddr5, 0); 1559 gfar_write(®s->gaddr6, 0); 1560 gfar_write(®s->gaddr7, 0); 1561 1562 /* Zero out the rmon mib registers if it has them */ 1563 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1564 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); 1565 1566 /* Mask off the CAM interrupts */ 1567 gfar_write(®s->rmon.cam1, 0xffffffff); 1568 gfar_write(®s->rmon.cam2, 0xffffffff); 1569 } 1570 1571 /* Initialize the max receive buffer length */ 1572 gfar_write(®s->mrblr, priv->rx_buffer_size); 1573 1574 /* Initialize the Minimum Frame Length Register */ 1575 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); 1576 } 1577 1578 static int __gfar_is_rx_idle(struct gfar_private *priv) 1579 { 1580 u32 res; 1581 1582 /* 1583 * Normaly TSEC should not hang on GRS commands, so we should 1584 * actually wait for IEVENT_GRSC flag. 1585 */ 1586 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) 1587 return 0; 1588 1589 /* 1590 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are 1591 * the same as bits 23-30, the eTSEC Rx is assumed to be idle 1592 * and the Rx can be safely reset. 1593 */ 1594 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); 1595 res &= 0x7f807f80; 1596 if ((res & 0xffff) == (res >> 16)) 1597 return 1; 1598 1599 return 0; 1600 } 1601 1602 /* Halt the receive and transmit queues */ 1603 static void gfar_halt_nodisable(struct net_device *dev) 1604 { 1605 struct gfar_private *priv = netdev_priv(dev); 1606 struct gfar __iomem *regs = NULL; 1607 u32 tempval; 1608 int i = 0; 1609 1610 for (i = 0; i < priv->num_grps; i++) { 1611 regs = priv->gfargrp[i].regs; 1612 /* Mask all interrupts */ 1613 gfar_write(®s->imask, IMASK_INIT_CLEAR); 1614 1615 /* Clear all interrupts */ 1616 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 1617 } 1618 1619 regs = priv->gfargrp[0].regs; 1620 /* Stop the DMA, and wait for it to stop */ 1621 tempval = gfar_read(®s->dmactrl); 1622 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1623 != (DMACTRL_GRS | DMACTRL_GTS)) { 1624 int ret; 1625 1626 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1627 gfar_write(®s->dmactrl, tempval); 1628 1629 do { 1630 ret = spin_event_timeout(((gfar_read(®s->ievent) & 1631 (IEVENT_GRSC | IEVENT_GTSC)) == 1632 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); 1633 if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) 1634 ret = __gfar_is_rx_idle(priv); 1635 } while (!ret); 1636 } 1637 } 1638 1639 /* Halt the receive and transmit queues */ 1640 void gfar_halt(struct net_device *dev) 1641 { 1642 struct gfar_private *priv = netdev_priv(dev); 1643 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1644 u32 tempval; 1645 1646 gfar_halt_nodisable(dev); 1647 1648 /* Disable Rx and Tx */ 1649 tempval = gfar_read(®s->maccfg1); 1650 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1651 gfar_write(®s->maccfg1, tempval); 1652 } 1653 1654 static void free_grp_irqs(struct gfar_priv_grp *grp) 1655 { 1656 free_irq(grp->interruptError, grp); 1657 free_irq(grp->interruptTransmit, grp); 1658 free_irq(grp->interruptReceive, grp); 1659 } 1660 1661 void stop_gfar(struct net_device *dev) 1662 { 1663 struct gfar_private *priv = netdev_priv(dev); 1664 unsigned long flags; 1665 int i; 1666 1667 phy_stop(priv->phydev); 1668 1669 1670 /* Lock it down */ 1671 local_irq_save(flags); 1672 lock_tx_qs(priv); 1673 lock_rx_qs(priv); 1674 1675 gfar_halt(dev); 1676 1677 unlock_rx_qs(priv); 1678 unlock_tx_qs(priv); 1679 local_irq_restore(flags); 1680 1681 /* Free the IRQs */ 1682 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1683 for (i = 0; i < priv->num_grps; i++) 1684 free_grp_irqs(&priv->gfargrp[i]); 1685 } else { 1686 for (i = 0; i < priv->num_grps; i++) 1687 free_irq(priv->gfargrp[i].interruptTransmit, 1688 &priv->gfargrp[i]); 1689 } 1690 1691 free_skb_resources(priv); 1692 } 1693 1694 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) 1695 { 1696 struct txbd8 *txbdp; 1697 struct gfar_private *priv = netdev_priv(tx_queue->dev); 1698 int i, j; 1699 1700 txbdp = tx_queue->tx_bd_base; 1701 1702 for (i = 0; i < tx_queue->tx_ring_size; i++) { 1703 if (!tx_queue->tx_skbuff[i]) 1704 continue; 1705 1706 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1707 txbdp->length, DMA_TO_DEVICE); 1708 txbdp->lstatus = 0; 1709 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; 1710 j++) { 1711 txbdp++; 1712 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1713 txbdp->length, DMA_TO_DEVICE); 1714 } 1715 txbdp++; 1716 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); 1717 tx_queue->tx_skbuff[i] = NULL; 1718 } 1719 kfree(tx_queue->tx_skbuff); 1720 } 1721 1722 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) 1723 { 1724 struct rxbd8 *rxbdp; 1725 struct gfar_private *priv = netdev_priv(rx_queue->dev); 1726 int i; 1727 1728 rxbdp = rx_queue->rx_bd_base; 1729 1730 for (i = 0; i < rx_queue->rx_ring_size; i++) { 1731 if (rx_queue->rx_skbuff[i]) { 1732 dma_unmap_single(&priv->ofdev->dev, 1733 rxbdp->bufPtr, priv->rx_buffer_size, 1734 DMA_FROM_DEVICE); 1735 dev_kfree_skb_any(rx_queue->rx_skbuff[i]); 1736 rx_queue->rx_skbuff[i] = NULL; 1737 } 1738 rxbdp->lstatus = 0; 1739 rxbdp->bufPtr = 0; 1740 rxbdp++; 1741 } 1742 kfree(rx_queue->rx_skbuff); 1743 } 1744 1745 /* If there are any tx skbs or rx skbs still around, free them. 1746 * Then free tx_skbuff and rx_skbuff */ 1747 static void free_skb_resources(struct gfar_private *priv) 1748 { 1749 struct gfar_priv_tx_q *tx_queue = NULL; 1750 struct gfar_priv_rx_q *rx_queue = NULL; 1751 int i; 1752 1753 /* Go through all the buffer descriptors and free their data buffers */ 1754 for (i = 0; i < priv->num_tx_queues; i++) { 1755 struct netdev_queue *txq; 1756 tx_queue = priv->tx_queue[i]; 1757 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); 1758 if(tx_queue->tx_skbuff) 1759 free_skb_tx_queue(tx_queue); 1760 netdev_tx_reset_queue(txq); 1761 } 1762 1763 for (i = 0; i < priv->num_rx_queues; i++) { 1764 rx_queue = priv->rx_queue[i]; 1765 if(rx_queue->rx_skbuff) 1766 free_skb_rx_queue(rx_queue); 1767 } 1768 1769 dma_free_coherent(&priv->ofdev->dev, 1770 sizeof(struct txbd8) * priv->total_tx_ring_size + 1771 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1772 priv->tx_queue[0]->tx_bd_base, 1773 priv->tx_queue[0]->tx_bd_dma_base); 1774 skb_queue_purge(&priv->rx_recycle); 1775 } 1776 1777 void gfar_start(struct net_device *dev) 1778 { 1779 struct gfar_private *priv = netdev_priv(dev); 1780 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1781 u32 tempval; 1782 int i = 0; 1783 1784 /* Enable Rx and Tx in MACCFG1 */ 1785 tempval = gfar_read(®s->maccfg1); 1786 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 1787 gfar_write(®s->maccfg1, tempval); 1788 1789 /* Initialize DMACTRL to have WWR and WOP */ 1790 tempval = gfar_read(®s->dmactrl); 1791 tempval |= DMACTRL_INIT_SETTINGS; 1792 gfar_write(®s->dmactrl, tempval); 1793 1794 /* Make sure we aren't stopped */ 1795 tempval = gfar_read(®s->dmactrl); 1796 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1797 gfar_write(®s->dmactrl, tempval); 1798 1799 for (i = 0; i < priv->num_grps; i++) { 1800 regs = priv->gfargrp[i].regs; 1801 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1802 gfar_write(®s->tstat, priv->gfargrp[i].tstat); 1803 gfar_write(®s->rstat, priv->gfargrp[i].rstat); 1804 /* Unmask the interrupts we look for */ 1805 gfar_write(®s->imask, IMASK_DEFAULT); 1806 } 1807 1808 dev->trans_start = jiffies; /* prevent tx timeout */ 1809 } 1810 1811 void gfar_configure_coalescing(struct gfar_private *priv, 1812 unsigned long tx_mask, unsigned long rx_mask) 1813 { 1814 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1815 u32 __iomem *baddr; 1816 int i = 0; 1817 1818 /* Backward compatible case ---- even if we enable 1819 * multiple queues, there's only single reg to program 1820 */ 1821 gfar_write(®s->txic, 0); 1822 if(likely(priv->tx_queue[0]->txcoalescing)) 1823 gfar_write(®s->txic, priv->tx_queue[0]->txic); 1824 1825 gfar_write(®s->rxic, 0); 1826 if(unlikely(priv->rx_queue[0]->rxcoalescing)) 1827 gfar_write(®s->rxic, priv->rx_queue[0]->rxic); 1828 1829 if (priv->mode == MQ_MG_MODE) { 1830 baddr = ®s->txic0; 1831 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { 1832 if (likely(priv->tx_queue[i]->txcoalescing)) { 1833 gfar_write(baddr + i, 0); 1834 gfar_write(baddr + i, priv->tx_queue[i]->txic); 1835 } 1836 } 1837 1838 baddr = ®s->rxic0; 1839 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { 1840 if (likely(priv->rx_queue[i]->rxcoalescing)) { 1841 gfar_write(baddr + i, 0); 1842 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 1843 } 1844 } 1845 } 1846 } 1847 1848 static int register_grp_irqs(struct gfar_priv_grp *grp) 1849 { 1850 struct gfar_private *priv = grp->priv; 1851 struct net_device *dev = priv->ndev; 1852 int err; 1853 1854 /* If the device has multiple interrupts, register for 1855 * them. Otherwise, only register for the one */ 1856 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1857 /* Install our interrupt handlers for Error, 1858 * Transmit, and Receive */ 1859 if ((err = request_irq(grp->interruptError, gfar_error, 0, 1860 grp->int_name_er,grp)) < 0) { 1861 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1862 grp->interruptError); 1863 1864 goto err_irq_fail; 1865 } 1866 1867 if ((err = request_irq(grp->interruptTransmit, gfar_transmit, 1868 0, grp->int_name_tx, grp)) < 0) { 1869 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1870 grp->interruptTransmit); 1871 goto tx_irq_fail; 1872 } 1873 1874 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, 1875 grp->int_name_rx, grp)) < 0) { 1876 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1877 grp->interruptReceive); 1878 goto rx_irq_fail; 1879 } 1880 } else { 1881 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, 1882 grp->int_name_tx, grp)) < 0) { 1883 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1884 grp->interruptTransmit); 1885 goto err_irq_fail; 1886 } 1887 } 1888 1889 return 0; 1890 1891 rx_irq_fail: 1892 free_irq(grp->interruptTransmit, grp); 1893 tx_irq_fail: 1894 free_irq(grp->interruptError, grp); 1895 err_irq_fail: 1896 return err; 1897 1898 } 1899 1900 /* Bring the controller up and running */ 1901 int startup_gfar(struct net_device *ndev) 1902 { 1903 struct gfar_private *priv = netdev_priv(ndev); 1904 struct gfar __iomem *regs = NULL; 1905 int err, i, j; 1906 1907 for (i = 0; i < priv->num_grps; i++) { 1908 regs= priv->gfargrp[i].regs; 1909 gfar_write(®s->imask, IMASK_INIT_CLEAR); 1910 } 1911 1912 regs= priv->gfargrp[0].regs; 1913 err = gfar_alloc_skb_resources(ndev); 1914 if (err) 1915 return err; 1916 1917 gfar_init_mac(ndev); 1918 1919 for (i = 0; i < priv->num_grps; i++) { 1920 err = register_grp_irqs(&priv->gfargrp[i]); 1921 if (err) { 1922 for (j = 0; j < i; j++) 1923 free_grp_irqs(&priv->gfargrp[j]); 1924 goto irq_fail; 1925 } 1926 } 1927 1928 /* Start the controller */ 1929 gfar_start(ndev); 1930 1931 phy_start(priv->phydev); 1932 1933 gfar_configure_coalescing(priv, 0xFF, 0xFF); 1934 1935 return 0; 1936 1937 irq_fail: 1938 free_skb_resources(priv); 1939 return err; 1940 } 1941 1942 /* Called when something needs to use the ethernet device */ 1943 /* Returns 0 for success. */ 1944 static int gfar_enet_open(struct net_device *dev) 1945 { 1946 struct gfar_private *priv = netdev_priv(dev); 1947 int err; 1948 1949 enable_napi(priv); 1950 1951 skb_queue_head_init(&priv->rx_recycle); 1952 1953 /* Initialize a bunch of registers */ 1954 init_registers(dev); 1955 1956 gfar_set_mac_address(dev); 1957 1958 err = init_phy(dev); 1959 1960 if (err) { 1961 disable_napi(priv); 1962 return err; 1963 } 1964 1965 err = startup_gfar(dev); 1966 if (err) { 1967 disable_napi(priv); 1968 return err; 1969 } 1970 1971 netif_tx_start_all_queues(dev); 1972 1973 device_set_wakeup_enable(&dev->dev, priv->wol_en); 1974 1975 return err; 1976 } 1977 1978 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) 1979 { 1980 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); 1981 1982 memset(fcb, 0, GMAC_FCB_LEN); 1983 1984 return fcb; 1985 } 1986 1987 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, 1988 int fcb_length) 1989 { 1990 u8 flags = 0; 1991 1992 /* If we're here, it's a IP packet with a TCP or UDP 1993 * payload. We set it to checksum, using a pseudo-header 1994 * we provide 1995 */ 1996 flags = TXFCB_DEFAULT; 1997 1998 /* Tell the controller what the protocol is */ 1999 /* And provide the already calculated phcs */ 2000 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 2001 flags |= TXFCB_UDP; 2002 fcb->phcs = udp_hdr(skb)->check; 2003 } else 2004 fcb->phcs = tcp_hdr(skb)->check; 2005 2006 /* l3os is the distance between the start of the 2007 * frame (skb->data) and the start of the IP hdr. 2008 * l4os is the distance between the start of the 2009 * l3 hdr and the l4 hdr */ 2010 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); 2011 fcb->l4os = skb_network_header_len(skb); 2012 2013 fcb->flags = flags; 2014 } 2015 2016 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 2017 { 2018 fcb->flags |= TXFCB_VLN; 2019 fcb->vlctl = vlan_tx_tag_get(skb); 2020 } 2021 2022 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, 2023 struct txbd8 *base, int ring_size) 2024 { 2025 struct txbd8 *new_bd = bdp + stride; 2026 2027 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; 2028 } 2029 2030 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, 2031 int ring_size) 2032 { 2033 return skip_txbd(bdp, 1, base, ring_size); 2034 } 2035 2036 /* This is called by the kernel when a frame is ready for transmission. */ 2037 /* It is pointed to by the dev->hard_start_xmit function pointer */ 2038 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 2039 { 2040 struct gfar_private *priv = netdev_priv(dev); 2041 struct gfar_priv_tx_q *tx_queue = NULL; 2042 struct netdev_queue *txq; 2043 struct gfar __iomem *regs = NULL; 2044 struct txfcb *fcb = NULL; 2045 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; 2046 u32 lstatus; 2047 int i, rq = 0, do_tstamp = 0; 2048 u32 bufaddr; 2049 unsigned long flags; 2050 unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN; 2051 2052 /* 2053 * TOE=1 frames larger than 2500 bytes may see excess delays 2054 * before start of transmission. 2055 */ 2056 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && 2057 skb->ip_summed == CHECKSUM_PARTIAL && 2058 skb->len > 2500)) { 2059 int ret; 2060 2061 ret = skb_checksum_help(skb); 2062 if (ret) 2063 return ret; 2064 } 2065 2066 rq = skb->queue_mapping; 2067 tx_queue = priv->tx_queue[rq]; 2068 txq = netdev_get_tx_queue(dev, rq); 2069 base = tx_queue->tx_bd_base; 2070 regs = tx_queue->grp->regs; 2071 2072 /* check if time stamp should be generated */ 2073 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 2074 priv->hwts_tx_en)) { 2075 do_tstamp = 1; 2076 fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN; 2077 } 2078 2079 /* make space for additional header when fcb is needed */ 2080 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 2081 vlan_tx_tag_present(skb) || 2082 unlikely(do_tstamp)) && 2083 (skb_headroom(skb) < fcb_length)) { 2084 struct sk_buff *skb_new; 2085 2086 skb_new = skb_realloc_headroom(skb, fcb_length); 2087 if (!skb_new) { 2088 dev->stats.tx_errors++; 2089 kfree_skb(skb); 2090 return NETDEV_TX_OK; 2091 } 2092 2093 /* Steal sock reference for processing TX time stamps */ 2094 swap(skb_new->sk, skb->sk); 2095 swap(skb_new->destructor, skb->destructor); 2096 kfree_skb(skb); 2097 skb = skb_new; 2098 } 2099 2100 /* total number of fragments in the SKB */ 2101 nr_frags = skb_shinfo(skb)->nr_frags; 2102 2103 /* calculate the required number of TxBDs for this skb */ 2104 if (unlikely(do_tstamp)) 2105 nr_txbds = nr_frags + 2; 2106 else 2107 nr_txbds = nr_frags + 1; 2108 2109 /* check if there is space to queue this packet */ 2110 if (nr_txbds > tx_queue->num_txbdfree) { 2111 /* no space, stop the queue */ 2112 netif_tx_stop_queue(txq); 2113 dev->stats.tx_fifo_errors++; 2114 return NETDEV_TX_BUSY; 2115 } 2116 2117 /* Update transmit stats */ 2118 tx_queue->stats.tx_bytes += skb->len; 2119 tx_queue->stats.tx_packets++; 2120 2121 txbdp = txbdp_start = tx_queue->cur_tx; 2122 lstatus = txbdp->lstatus; 2123 2124 /* Time stamp insertion requires one additional TxBD */ 2125 if (unlikely(do_tstamp)) 2126 txbdp_tstamp = txbdp = next_txbd(txbdp, base, 2127 tx_queue->tx_ring_size); 2128 2129 if (nr_frags == 0) { 2130 if (unlikely(do_tstamp)) 2131 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | 2132 TXBD_INTERRUPT); 2133 else 2134 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2135 } else { 2136 /* Place the fragment addresses and lengths into the TxBDs */ 2137 for (i = 0; i < nr_frags; i++) { 2138 /* Point at the next BD, wrapping as needed */ 2139 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2140 2141 length = skb_shinfo(skb)->frags[i].size; 2142 2143 lstatus = txbdp->lstatus | length | 2144 BD_LFLAG(TXBD_READY); 2145 2146 /* Handle the last BD specially */ 2147 if (i == nr_frags - 1) 2148 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2149 2150 bufaddr = skb_frag_dma_map(&priv->ofdev->dev, 2151 &skb_shinfo(skb)->frags[i], 2152 0, 2153 length, 2154 DMA_TO_DEVICE); 2155 2156 /* set the TxBD length and buffer pointer */ 2157 txbdp->bufPtr = bufaddr; 2158 txbdp->lstatus = lstatus; 2159 } 2160 2161 lstatus = txbdp_start->lstatus; 2162 } 2163 2164 /* Add TxPAL between FCB and frame if required */ 2165 if (unlikely(do_tstamp)) { 2166 skb_push(skb, GMAC_TXPAL_LEN); 2167 memset(skb->data, 0, GMAC_TXPAL_LEN); 2168 } 2169 2170 /* Set up checksumming */ 2171 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2172 fcb = gfar_add_fcb(skb); 2173 /* as specified by errata */ 2174 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) 2175 && ((unsigned long)fcb % 0x20) > 0x18)) { 2176 __skb_pull(skb, GMAC_FCB_LEN); 2177 skb_checksum_help(skb); 2178 } else { 2179 lstatus |= BD_LFLAG(TXBD_TOE); 2180 gfar_tx_checksum(skb, fcb, fcb_length); 2181 } 2182 } 2183 2184 if (vlan_tx_tag_present(skb)) { 2185 if (unlikely(NULL == fcb)) { 2186 fcb = gfar_add_fcb(skb); 2187 lstatus |= BD_LFLAG(TXBD_TOE); 2188 } 2189 2190 gfar_tx_vlan(skb, fcb); 2191 } 2192 2193 /* Setup tx hardware time stamping if requested */ 2194 if (unlikely(do_tstamp)) { 2195 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2196 if (fcb == NULL) 2197 fcb = gfar_add_fcb(skb); 2198 fcb->ptp = 1; 2199 lstatus |= BD_LFLAG(TXBD_TOE); 2200 } 2201 2202 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2203 skb_headlen(skb), DMA_TO_DEVICE); 2204 2205 /* 2206 * If time stamping is requested one additional TxBD must be set up. The 2207 * first TxBD points to the FCB and must have a data length of 2208 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with 2209 * the full frame length. 2210 */ 2211 if (unlikely(do_tstamp)) { 2212 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length; 2213 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | 2214 (skb_headlen(skb) - fcb_length); 2215 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; 2216 } else { 2217 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2218 } 2219 2220 netdev_tx_sent_queue(txq, skb->len); 2221 2222 /* 2223 * We can work in parallel with gfar_clean_tx_ring(), except 2224 * when modifying num_txbdfree. Note that we didn't grab the lock 2225 * when we were reading the num_txbdfree and checking for available 2226 * space, that's because outside of this function it can only grow, 2227 * and once we've got needed space, it cannot suddenly disappear. 2228 * 2229 * The lock also protects us from gfar_error(), which can modify 2230 * regs->tstat and thus retrigger the transfers, which is why we 2231 * also must grab the lock before setting ready bit for the first 2232 * to be transmitted BD. 2233 */ 2234 spin_lock_irqsave(&tx_queue->txlock, flags); 2235 2236 /* 2237 * The powerpc-specific eieio() is used, as wmb() has too strong 2238 * semantics (it requires synchronization between cacheable and 2239 * uncacheable mappings, which eieio doesn't provide and which we 2240 * don't need), thus requiring a more expensive sync instruction. At 2241 * some point, the set of architecture-independent barrier functions 2242 * should be expanded to include weaker barriers. 2243 */ 2244 eieio(); 2245 2246 txbdp_start->lstatus = lstatus; 2247 2248 eieio(); /* force lstatus write before tx_skbuff */ 2249 2250 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; 2251 2252 /* Update the current skb pointer to the next entry we will use 2253 * (wrapping if necessary) */ 2254 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & 2255 TX_RING_MOD_MASK(tx_queue->tx_ring_size); 2256 2257 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2258 2259 /* reduce TxBD free count */ 2260 tx_queue->num_txbdfree -= (nr_txbds); 2261 2262 /* If the next BD still needs to be cleaned up, then the bds 2263 are full. We need to tell the kernel to stop sending us stuff. */ 2264 if (!tx_queue->num_txbdfree) { 2265 netif_tx_stop_queue(txq); 2266 2267 dev->stats.tx_fifo_errors++; 2268 } 2269 2270 /* Tell the DMA to go go go */ 2271 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); 2272 2273 /* Unlock priv */ 2274 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2275 2276 return NETDEV_TX_OK; 2277 } 2278 2279 /* Stops the kernel queue, and halts the controller */ 2280 static int gfar_close(struct net_device *dev) 2281 { 2282 struct gfar_private *priv = netdev_priv(dev); 2283 2284 disable_napi(priv); 2285 2286 cancel_work_sync(&priv->reset_task); 2287 stop_gfar(dev); 2288 2289 /* Disconnect from the PHY */ 2290 phy_disconnect(priv->phydev); 2291 priv->phydev = NULL; 2292 2293 netif_tx_stop_all_queues(dev); 2294 2295 return 0; 2296 } 2297 2298 /* Changes the mac address if the controller is not running. */ 2299 static int gfar_set_mac_address(struct net_device *dev) 2300 { 2301 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 2302 2303 return 0; 2304 } 2305 2306 /* Check if rx parser should be activated */ 2307 void gfar_check_rx_parser_mode(struct gfar_private *priv) 2308 { 2309 struct gfar __iomem *regs; 2310 u32 tempval; 2311 2312 regs = priv->gfargrp[0].regs; 2313 2314 tempval = gfar_read(®s->rctrl); 2315 /* If parse is no longer required, then disable parser */ 2316 if (tempval & RCTRL_REQ_PARSER) 2317 tempval |= RCTRL_PRSDEP_INIT; 2318 else 2319 tempval &= ~RCTRL_PRSDEP_INIT; 2320 gfar_write(®s->rctrl, tempval); 2321 } 2322 2323 /* Enables and disables VLAN insertion/extraction */ 2324 void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) 2325 { 2326 struct gfar_private *priv = netdev_priv(dev); 2327 struct gfar __iomem *regs = NULL; 2328 unsigned long flags; 2329 u32 tempval; 2330 2331 regs = priv->gfargrp[0].regs; 2332 local_irq_save(flags); 2333 lock_rx_qs(priv); 2334 2335 if (features & NETIF_F_HW_VLAN_TX) { 2336 /* Enable VLAN tag insertion */ 2337 tempval = gfar_read(®s->tctrl); 2338 tempval |= TCTRL_VLINS; 2339 gfar_write(®s->tctrl, tempval); 2340 } else { 2341 /* Disable VLAN tag insertion */ 2342 tempval = gfar_read(®s->tctrl); 2343 tempval &= ~TCTRL_VLINS; 2344 gfar_write(®s->tctrl, tempval); 2345 } 2346 2347 if (features & NETIF_F_HW_VLAN_RX) { 2348 /* Enable VLAN tag extraction */ 2349 tempval = gfar_read(®s->rctrl); 2350 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 2351 gfar_write(®s->rctrl, tempval); 2352 } else { 2353 /* Disable VLAN tag extraction */ 2354 tempval = gfar_read(®s->rctrl); 2355 tempval &= ~RCTRL_VLEX; 2356 gfar_write(®s->rctrl, tempval); 2357 2358 gfar_check_rx_parser_mode(priv); 2359 } 2360 2361 gfar_change_mtu(dev, dev->mtu); 2362 2363 unlock_rx_qs(priv); 2364 local_irq_restore(flags); 2365 } 2366 2367 static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2368 { 2369 int tempsize, tempval; 2370 struct gfar_private *priv = netdev_priv(dev); 2371 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2372 int oldsize = priv->rx_buffer_size; 2373 int frame_size = new_mtu + ETH_HLEN; 2374 2375 if (gfar_is_vlan_on(priv)) 2376 frame_size += VLAN_HLEN; 2377 2378 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 2379 netif_err(priv, drv, dev, "Invalid MTU setting\n"); 2380 return -EINVAL; 2381 } 2382 2383 if (gfar_uses_fcb(priv)) 2384 frame_size += GMAC_FCB_LEN; 2385 2386 frame_size += priv->padding; 2387 2388 tempsize = 2389 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 2390 INCREMENTAL_BUFFER_SIZE; 2391 2392 /* Only stop and start the controller if it isn't already 2393 * stopped, and we changed something */ 2394 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2395 stop_gfar(dev); 2396 2397 priv->rx_buffer_size = tempsize; 2398 2399 dev->mtu = new_mtu; 2400 2401 gfar_write(®s->mrblr, priv->rx_buffer_size); 2402 gfar_write(®s->maxfrm, priv->rx_buffer_size); 2403 2404 /* If the mtu is larger than the max size for standard 2405 * ethernet frames (ie, a jumbo frame), then set maccfg2 2406 * to allow huge frames, and to check the length */ 2407 tempval = gfar_read(®s->maccfg2); 2408 2409 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || 2410 gfar_has_errata(priv, GFAR_ERRATA_74)) 2411 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2412 else 2413 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2414 2415 gfar_write(®s->maccfg2, tempval); 2416 2417 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2418 startup_gfar(dev); 2419 2420 return 0; 2421 } 2422 2423 /* gfar_reset_task gets scheduled when a packet has not been 2424 * transmitted after a set amount of time. 2425 * For now, assume that clearing out all the structures, and 2426 * starting over will fix the problem. 2427 */ 2428 static void gfar_reset_task(struct work_struct *work) 2429 { 2430 struct gfar_private *priv = container_of(work, struct gfar_private, 2431 reset_task); 2432 struct net_device *dev = priv->ndev; 2433 2434 if (dev->flags & IFF_UP) { 2435 netif_tx_stop_all_queues(dev); 2436 stop_gfar(dev); 2437 startup_gfar(dev); 2438 netif_tx_start_all_queues(dev); 2439 } 2440 2441 netif_tx_schedule_all(dev); 2442 } 2443 2444 static void gfar_timeout(struct net_device *dev) 2445 { 2446 struct gfar_private *priv = netdev_priv(dev); 2447 2448 dev->stats.tx_errors++; 2449 schedule_work(&priv->reset_task); 2450 } 2451 2452 static void gfar_align_skb(struct sk_buff *skb) 2453 { 2454 /* We need the data buffer to be aligned properly. We will reserve 2455 * as many bytes as needed to align the data properly 2456 */ 2457 skb_reserve(skb, RXBUF_ALIGNMENT - 2458 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); 2459 } 2460 2461 /* Interrupt Handler for Transmit complete */ 2462 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) 2463 { 2464 struct net_device *dev = tx_queue->dev; 2465 struct netdev_queue *txq; 2466 struct gfar_private *priv = netdev_priv(dev); 2467 struct gfar_priv_rx_q *rx_queue = NULL; 2468 struct txbd8 *bdp, *next = NULL; 2469 struct txbd8 *lbdp = NULL; 2470 struct txbd8 *base = tx_queue->tx_bd_base; 2471 struct sk_buff *skb; 2472 int skb_dirtytx; 2473 int tx_ring_size = tx_queue->tx_ring_size; 2474 int frags = 0, nr_txbds = 0; 2475 int i; 2476 int howmany = 0; 2477 int tqi = tx_queue->qindex; 2478 unsigned int bytes_sent = 0; 2479 u32 lstatus; 2480 size_t buflen; 2481 2482 rx_queue = priv->rx_queue[tqi]; 2483 txq = netdev_get_tx_queue(dev, tqi); 2484 bdp = tx_queue->dirty_tx; 2485 skb_dirtytx = tx_queue->skb_dirtytx; 2486 2487 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { 2488 unsigned long flags; 2489 2490 frags = skb_shinfo(skb)->nr_frags; 2491 2492 /* 2493 * When time stamping, one additional TxBD must be freed. 2494 * Also, we need to dma_unmap_single() the TxPAL. 2495 */ 2496 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 2497 nr_txbds = frags + 2; 2498 else 2499 nr_txbds = frags + 1; 2500 2501 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); 2502 2503 lstatus = lbdp->lstatus; 2504 2505 /* Only clean completed frames */ 2506 if ((lstatus & BD_LFLAG(TXBD_READY)) && 2507 (lstatus & BD_LENGTH_MASK)) 2508 break; 2509 2510 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2511 next = next_txbd(bdp, base, tx_ring_size); 2512 buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN; 2513 } else 2514 buflen = bdp->length; 2515 2516 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2517 buflen, DMA_TO_DEVICE); 2518 2519 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2520 struct skb_shared_hwtstamps shhwtstamps; 2521 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); 2522 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 2523 shhwtstamps.hwtstamp = ns_to_ktime(*ns); 2524 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); 2525 skb_tstamp_tx(skb, &shhwtstamps); 2526 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2527 bdp = next; 2528 } 2529 2530 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2531 bdp = next_txbd(bdp, base, tx_ring_size); 2532 2533 for (i = 0; i < frags; i++) { 2534 dma_unmap_page(&priv->ofdev->dev, 2535 bdp->bufPtr, 2536 bdp->length, 2537 DMA_TO_DEVICE); 2538 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2539 bdp = next_txbd(bdp, base, tx_ring_size); 2540 } 2541 2542 bytes_sent += skb->len; 2543 2544 /* 2545 * If there's room in the queue (limit it to rx_buffer_size) 2546 * we add this skb back into the pool, if it's the right size 2547 */ 2548 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && 2549 skb_recycle_check(skb, priv->rx_buffer_size + 2550 RXBUF_ALIGNMENT)) { 2551 gfar_align_skb(skb); 2552 skb_queue_head(&priv->rx_recycle, skb); 2553 } else 2554 dev_kfree_skb_any(skb); 2555 2556 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2557 2558 skb_dirtytx = (skb_dirtytx + 1) & 2559 TX_RING_MOD_MASK(tx_ring_size); 2560 2561 howmany++; 2562 spin_lock_irqsave(&tx_queue->txlock, flags); 2563 tx_queue->num_txbdfree += nr_txbds; 2564 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2565 } 2566 2567 /* If we freed a buffer, we can restart transmission, if necessary */ 2568 if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree) 2569 netif_wake_subqueue(dev, tqi); 2570 2571 /* Update dirty indicators */ 2572 tx_queue->skb_dirtytx = skb_dirtytx; 2573 tx_queue->dirty_tx = bdp; 2574 2575 netdev_tx_completed_queue(txq, howmany, bytes_sent); 2576 2577 return howmany; 2578 } 2579 2580 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) 2581 { 2582 unsigned long flags; 2583 2584 spin_lock_irqsave(&gfargrp->grplock, flags); 2585 if (napi_schedule_prep(&gfargrp->napi)) { 2586 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); 2587 __napi_schedule(&gfargrp->napi); 2588 } else { 2589 /* 2590 * Clear IEVENT, so interrupts aren't called again 2591 * because of the packets that have already arrived. 2592 */ 2593 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); 2594 } 2595 spin_unlock_irqrestore(&gfargrp->grplock, flags); 2596 2597 } 2598 2599 /* Interrupt Handler for Transmit complete */ 2600 static irqreturn_t gfar_transmit(int irq, void *grp_id) 2601 { 2602 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2603 return IRQ_HANDLED; 2604 } 2605 2606 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 2607 struct sk_buff *skb) 2608 { 2609 struct net_device *dev = rx_queue->dev; 2610 struct gfar_private *priv = netdev_priv(dev); 2611 dma_addr_t buf; 2612 2613 buf = dma_map_single(&priv->ofdev->dev, skb->data, 2614 priv->rx_buffer_size, DMA_FROM_DEVICE); 2615 gfar_init_rxbdp(rx_queue, bdp, buf); 2616 } 2617 2618 static struct sk_buff * gfar_alloc_skb(struct net_device *dev) 2619 { 2620 struct gfar_private *priv = netdev_priv(dev); 2621 struct sk_buff *skb = NULL; 2622 2623 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); 2624 if (!skb) 2625 return NULL; 2626 2627 gfar_align_skb(skb); 2628 2629 return skb; 2630 } 2631 2632 struct sk_buff * gfar_new_skb(struct net_device *dev) 2633 { 2634 struct gfar_private *priv = netdev_priv(dev); 2635 struct sk_buff *skb = NULL; 2636 2637 skb = skb_dequeue(&priv->rx_recycle); 2638 if (!skb) 2639 skb = gfar_alloc_skb(dev); 2640 2641 return skb; 2642 } 2643 2644 static inline void count_errors(unsigned short status, struct net_device *dev) 2645 { 2646 struct gfar_private *priv = netdev_priv(dev); 2647 struct net_device_stats *stats = &dev->stats; 2648 struct gfar_extra_stats *estats = &priv->extra_stats; 2649 2650 /* If the packet was truncated, none of the other errors 2651 * matter */ 2652 if (status & RXBD_TRUNCATED) { 2653 stats->rx_length_errors++; 2654 2655 estats->rx_trunc++; 2656 2657 return; 2658 } 2659 /* Count the errors, if there were any */ 2660 if (status & (RXBD_LARGE | RXBD_SHORT)) { 2661 stats->rx_length_errors++; 2662 2663 if (status & RXBD_LARGE) 2664 estats->rx_large++; 2665 else 2666 estats->rx_short++; 2667 } 2668 if (status & RXBD_NONOCTET) { 2669 stats->rx_frame_errors++; 2670 estats->rx_nonoctet++; 2671 } 2672 if (status & RXBD_CRCERR) { 2673 estats->rx_crcerr++; 2674 stats->rx_crc_errors++; 2675 } 2676 if (status & RXBD_OVERRUN) { 2677 estats->rx_overrun++; 2678 stats->rx_crc_errors++; 2679 } 2680 } 2681 2682 irqreturn_t gfar_receive(int irq, void *grp_id) 2683 { 2684 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2685 return IRQ_HANDLED; 2686 } 2687 2688 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 2689 { 2690 /* If valid headers were found, and valid sums 2691 * were verified, then we tell the kernel that no 2692 * checksumming is necessary. Otherwise, it is */ 2693 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 2694 skb->ip_summed = CHECKSUM_UNNECESSARY; 2695 else 2696 skb_checksum_none_assert(skb); 2697 } 2698 2699 2700 /* gfar_process_frame() -- handle one incoming packet if skb 2701 * isn't NULL. */ 2702 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 2703 int amount_pull) 2704 { 2705 struct gfar_private *priv = netdev_priv(dev); 2706 struct rxfcb *fcb = NULL; 2707 2708 int ret; 2709 2710 /* fcb is at the beginning if exists */ 2711 fcb = (struct rxfcb *)skb->data; 2712 2713 /* Remove the FCB from the skb */ 2714 /* Remove the padded bytes, if there are any */ 2715 if (amount_pull) { 2716 skb_record_rx_queue(skb, fcb->rq); 2717 skb_pull(skb, amount_pull); 2718 } 2719 2720 /* Get receive timestamp from the skb */ 2721 if (priv->hwts_rx_en) { 2722 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 2723 u64 *ns = (u64 *) skb->data; 2724 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 2725 shhwtstamps->hwtstamp = ns_to_ktime(*ns); 2726 } 2727 2728 if (priv->padding) 2729 skb_pull(skb, priv->padding); 2730 2731 if (dev->features & NETIF_F_RXCSUM) 2732 gfar_rx_checksum(skb, fcb); 2733 2734 /* Tell the skb what kind of packet this is */ 2735 skb->protocol = eth_type_trans(skb, dev); 2736 2737 /* 2738 * There's need to check for NETIF_F_HW_VLAN_RX here. 2739 * Even if vlan rx accel is disabled, on some chips 2740 * RXFCB_VLN is pseudo randomly set. 2741 */ 2742 if (dev->features & NETIF_F_HW_VLAN_RX && 2743 fcb->flags & RXFCB_VLN) 2744 __vlan_hwaccel_put_tag(skb, fcb->vlctl); 2745 2746 /* Send the packet up the stack */ 2747 ret = netif_receive_skb(skb); 2748 2749 if (NET_RX_DROP == ret) 2750 priv->extra_stats.kernel_dropped++; 2751 2752 return 0; 2753 } 2754 2755 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring 2756 * until the budget/quota has been reached. Returns the number 2757 * of frames handled 2758 */ 2759 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) 2760 { 2761 struct net_device *dev = rx_queue->dev; 2762 struct rxbd8 *bdp, *base; 2763 struct sk_buff *skb; 2764 int pkt_len; 2765 int amount_pull; 2766 int howmany = 0; 2767 struct gfar_private *priv = netdev_priv(dev); 2768 2769 /* Get the first full descriptor */ 2770 bdp = rx_queue->cur_rx; 2771 base = rx_queue->rx_bd_base; 2772 2773 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0); 2774 2775 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 2776 struct sk_buff *newskb; 2777 rmb(); 2778 2779 /* Add another skb for the future */ 2780 newskb = gfar_new_skb(dev); 2781 2782 skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; 2783 2784 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2785 priv->rx_buffer_size, DMA_FROM_DEVICE); 2786 2787 if (unlikely(!(bdp->status & RXBD_ERR) && 2788 bdp->length > priv->rx_buffer_size)) 2789 bdp->status = RXBD_LARGE; 2790 2791 /* We drop the frame if we failed to allocate a new buffer */ 2792 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || 2793 bdp->status & RXBD_ERR)) { 2794 count_errors(bdp->status, dev); 2795 2796 if (unlikely(!newskb)) 2797 newskb = skb; 2798 else if (skb) 2799 skb_queue_head(&priv->rx_recycle, skb); 2800 } else { 2801 /* Increment the number of packets */ 2802 rx_queue->stats.rx_packets++; 2803 howmany++; 2804 2805 if (likely(skb)) { 2806 pkt_len = bdp->length - ETH_FCS_LEN; 2807 /* Remove the FCS from the packet length */ 2808 skb_put(skb, pkt_len); 2809 rx_queue->stats.rx_bytes += pkt_len; 2810 skb_record_rx_queue(skb, rx_queue->qindex); 2811 gfar_process_frame(dev, skb, amount_pull); 2812 2813 } else { 2814 netif_warn(priv, rx_err, dev, "Missing skb!\n"); 2815 rx_queue->stats.rx_dropped++; 2816 priv->extra_stats.rx_skbmissing++; 2817 } 2818 2819 } 2820 2821 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; 2822 2823 /* Setup the new bdp */ 2824 gfar_new_rxbdp(rx_queue, bdp, newskb); 2825 2826 /* Update to the next pointer */ 2827 bdp = next_bd(bdp, base, rx_queue->rx_ring_size); 2828 2829 /* update to point at the next skb */ 2830 rx_queue->skb_currx = 2831 (rx_queue->skb_currx + 1) & 2832 RX_RING_MOD_MASK(rx_queue->rx_ring_size); 2833 } 2834 2835 /* Update the current rxbd pointer to be the next one */ 2836 rx_queue->cur_rx = bdp; 2837 2838 return howmany; 2839 } 2840 2841 static int gfar_poll(struct napi_struct *napi, int budget) 2842 { 2843 struct gfar_priv_grp *gfargrp = container_of(napi, 2844 struct gfar_priv_grp, napi); 2845 struct gfar_private *priv = gfargrp->priv; 2846 struct gfar __iomem *regs = gfargrp->regs; 2847 struct gfar_priv_tx_q *tx_queue = NULL; 2848 struct gfar_priv_rx_q *rx_queue = NULL; 2849 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; 2850 int tx_cleaned = 0, i, left_over_budget = budget; 2851 unsigned long serviced_queues = 0; 2852 int num_queues = 0; 2853 2854 num_queues = gfargrp->num_rx_queues; 2855 budget_per_queue = budget/num_queues; 2856 2857 /* Clear IEVENT, so interrupts aren't called again 2858 * because of the packets that have already arrived */ 2859 gfar_write(®s->ievent, IEVENT_RTX_MASK); 2860 2861 while (num_queues && left_over_budget) { 2862 2863 budget_per_queue = left_over_budget/num_queues; 2864 left_over_budget = 0; 2865 2866 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2867 if (test_bit(i, &serviced_queues)) 2868 continue; 2869 rx_queue = priv->rx_queue[i]; 2870 tx_queue = priv->tx_queue[rx_queue->qindex]; 2871 2872 tx_cleaned += gfar_clean_tx_ring(tx_queue); 2873 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, 2874 budget_per_queue); 2875 rx_cleaned += rx_cleaned_per_queue; 2876 if(rx_cleaned_per_queue < budget_per_queue) { 2877 left_over_budget = left_over_budget + 2878 (budget_per_queue - rx_cleaned_per_queue); 2879 set_bit(i, &serviced_queues); 2880 num_queues--; 2881 } 2882 } 2883 } 2884 2885 if (tx_cleaned) 2886 return budget; 2887 2888 if (rx_cleaned < budget) { 2889 napi_complete(napi); 2890 2891 /* Clear the halt bit in RSTAT */ 2892 gfar_write(®s->rstat, gfargrp->rstat); 2893 2894 gfar_write(®s->imask, IMASK_DEFAULT); 2895 2896 /* If we are coalescing interrupts, update the timer */ 2897 /* Otherwise, clear it */ 2898 gfar_configure_coalescing(priv, 2899 gfargrp->rx_bit_map, gfargrp->tx_bit_map); 2900 } 2901 2902 return rx_cleaned; 2903 } 2904 2905 #ifdef CONFIG_NET_POLL_CONTROLLER 2906 /* 2907 * Polling 'interrupt' - used by things like netconsole to send skbs 2908 * without having to re-enable interrupts. It's not called while 2909 * the interrupt routine is executing. 2910 */ 2911 static void gfar_netpoll(struct net_device *dev) 2912 { 2913 struct gfar_private *priv = netdev_priv(dev); 2914 int i = 0; 2915 2916 /* If the device has multiple interrupts, run tx/rx */ 2917 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2918 for (i = 0; i < priv->num_grps; i++) { 2919 disable_irq(priv->gfargrp[i].interruptTransmit); 2920 disable_irq(priv->gfargrp[i].interruptReceive); 2921 disable_irq(priv->gfargrp[i].interruptError); 2922 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2923 &priv->gfargrp[i]); 2924 enable_irq(priv->gfargrp[i].interruptError); 2925 enable_irq(priv->gfargrp[i].interruptReceive); 2926 enable_irq(priv->gfargrp[i].interruptTransmit); 2927 } 2928 } else { 2929 for (i = 0; i < priv->num_grps; i++) { 2930 disable_irq(priv->gfargrp[i].interruptTransmit); 2931 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2932 &priv->gfargrp[i]); 2933 enable_irq(priv->gfargrp[i].interruptTransmit); 2934 } 2935 } 2936 } 2937 #endif 2938 2939 /* The interrupt handler for devices with one interrupt */ 2940 static irqreturn_t gfar_interrupt(int irq, void *grp_id) 2941 { 2942 struct gfar_priv_grp *gfargrp = grp_id; 2943 2944 /* Save ievent for future reference */ 2945 u32 events = gfar_read(&gfargrp->regs->ievent); 2946 2947 /* Check for reception */ 2948 if (events & IEVENT_RX_MASK) 2949 gfar_receive(irq, grp_id); 2950 2951 /* Check for transmit completion */ 2952 if (events & IEVENT_TX_MASK) 2953 gfar_transmit(irq, grp_id); 2954 2955 /* Check for errors */ 2956 if (events & IEVENT_ERR_MASK) 2957 gfar_error(irq, grp_id); 2958 2959 return IRQ_HANDLED; 2960 } 2961 2962 /* Called every time the controller might need to be made 2963 * aware of new link state. The PHY code conveys this 2964 * information through variables in the phydev structure, and this 2965 * function converts those variables into the appropriate 2966 * register values, and can bring down the device if needed. 2967 */ 2968 static void adjust_link(struct net_device *dev) 2969 { 2970 struct gfar_private *priv = netdev_priv(dev); 2971 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2972 unsigned long flags; 2973 struct phy_device *phydev = priv->phydev; 2974 int new_state = 0; 2975 2976 local_irq_save(flags); 2977 lock_tx_qs(priv); 2978 2979 if (phydev->link) { 2980 u32 tempval = gfar_read(®s->maccfg2); 2981 u32 ecntrl = gfar_read(®s->ecntrl); 2982 2983 /* Now we make sure that we can be in full duplex mode. 2984 * If not, we operate in half-duplex mode. */ 2985 if (phydev->duplex != priv->oldduplex) { 2986 new_state = 1; 2987 if (!(phydev->duplex)) 2988 tempval &= ~(MACCFG2_FULL_DUPLEX); 2989 else 2990 tempval |= MACCFG2_FULL_DUPLEX; 2991 2992 priv->oldduplex = phydev->duplex; 2993 } 2994 2995 if (phydev->speed != priv->oldspeed) { 2996 new_state = 1; 2997 switch (phydev->speed) { 2998 case 1000: 2999 tempval = 3000 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 3001 3002 ecntrl &= ~(ECNTRL_R100); 3003 break; 3004 case 100: 3005 case 10: 3006 tempval = 3007 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 3008 3009 /* Reduced mode distinguishes 3010 * between 10 and 100 */ 3011 if (phydev->speed == SPEED_100) 3012 ecntrl |= ECNTRL_R100; 3013 else 3014 ecntrl &= ~(ECNTRL_R100); 3015 break; 3016 default: 3017 netif_warn(priv, link, dev, 3018 "Ack! Speed (%d) is not 10/100/1000!\n", 3019 phydev->speed); 3020 break; 3021 } 3022 3023 priv->oldspeed = phydev->speed; 3024 } 3025 3026 gfar_write(®s->maccfg2, tempval); 3027 gfar_write(®s->ecntrl, ecntrl); 3028 3029 if (!priv->oldlink) { 3030 new_state = 1; 3031 priv->oldlink = 1; 3032 } 3033 } else if (priv->oldlink) { 3034 new_state = 1; 3035 priv->oldlink = 0; 3036 priv->oldspeed = 0; 3037 priv->oldduplex = -1; 3038 } 3039 3040 if (new_state && netif_msg_link(priv)) 3041 phy_print_status(phydev); 3042 unlock_tx_qs(priv); 3043 local_irq_restore(flags); 3044 } 3045 3046 /* Update the hash table based on the current list of multicast 3047 * addresses we subscribe to. Also, change the promiscuity of 3048 * the device based on the flags (this function is called 3049 * whenever dev->flags is changed */ 3050 static void gfar_set_multi(struct net_device *dev) 3051 { 3052 struct netdev_hw_addr *ha; 3053 struct gfar_private *priv = netdev_priv(dev); 3054 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3055 u32 tempval; 3056 3057 if (dev->flags & IFF_PROMISC) { 3058 /* Set RCTRL to PROM */ 3059 tempval = gfar_read(®s->rctrl); 3060 tempval |= RCTRL_PROM; 3061 gfar_write(®s->rctrl, tempval); 3062 } else { 3063 /* Set RCTRL to not PROM */ 3064 tempval = gfar_read(®s->rctrl); 3065 tempval &= ~(RCTRL_PROM); 3066 gfar_write(®s->rctrl, tempval); 3067 } 3068 3069 if (dev->flags & IFF_ALLMULTI) { 3070 /* Set the hash to rx all multicast frames */ 3071 gfar_write(®s->igaddr0, 0xffffffff); 3072 gfar_write(®s->igaddr1, 0xffffffff); 3073 gfar_write(®s->igaddr2, 0xffffffff); 3074 gfar_write(®s->igaddr3, 0xffffffff); 3075 gfar_write(®s->igaddr4, 0xffffffff); 3076 gfar_write(®s->igaddr5, 0xffffffff); 3077 gfar_write(®s->igaddr6, 0xffffffff); 3078 gfar_write(®s->igaddr7, 0xffffffff); 3079 gfar_write(®s->gaddr0, 0xffffffff); 3080 gfar_write(®s->gaddr1, 0xffffffff); 3081 gfar_write(®s->gaddr2, 0xffffffff); 3082 gfar_write(®s->gaddr3, 0xffffffff); 3083 gfar_write(®s->gaddr4, 0xffffffff); 3084 gfar_write(®s->gaddr5, 0xffffffff); 3085 gfar_write(®s->gaddr6, 0xffffffff); 3086 gfar_write(®s->gaddr7, 0xffffffff); 3087 } else { 3088 int em_num; 3089 int idx; 3090 3091 /* zero out the hash */ 3092 gfar_write(®s->igaddr0, 0x0); 3093 gfar_write(®s->igaddr1, 0x0); 3094 gfar_write(®s->igaddr2, 0x0); 3095 gfar_write(®s->igaddr3, 0x0); 3096 gfar_write(®s->igaddr4, 0x0); 3097 gfar_write(®s->igaddr5, 0x0); 3098 gfar_write(®s->igaddr6, 0x0); 3099 gfar_write(®s->igaddr7, 0x0); 3100 gfar_write(®s->gaddr0, 0x0); 3101 gfar_write(®s->gaddr1, 0x0); 3102 gfar_write(®s->gaddr2, 0x0); 3103 gfar_write(®s->gaddr3, 0x0); 3104 gfar_write(®s->gaddr4, 0x0); 3105 gfar_write(®s->gaddr5, 0x0); 3106 gfar_write(®s->gaddr6, 0x0); 3107 gfar_write(®s->gaddr7, 0x0); 3108 3109 /* If we have extended hash tables, we need to 3110 * clear the exact match registers to prepare for 3111 * setting them */ 3112 if (priv->extended_hash) { 3113 em_num = GFAR_EM_NUM + 1; 3114 gfar_clear_exact_match(dev); 3115 idx = 1; 3116 } else { 3117 idx = 0; 3118 em_num = 0; 3119 } 3120 3121 if (netdev_mc_empty(dev)) 3122 return; 3123 3124 /* Parse the list, and set the appropriate bits */ 3125 netdev_for_each_mc_addr(ha, dev) { 3126 if (idx < em_num) { 3127 gfar_set_mac_for_addr(dev, idx, ha->addr); 3128 idx++; 3129 } else 3130 gfar_set_hash_for_addr(dev, ha->addr); 3131 } 3132 } 3133 } 3134 3135 3136 /* Clears each of the exact match registers to zero, so they 3137 * don't interfere with normal reception */ 3138 static void gfar_clear_exact_match(struct net_device *dev) 3139 { 3140 int idx; 3141 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 3142 3143 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 3144 gfar_set_mac_for_addr(dev, idx, zero_arr); 3145 } 3146 3147 /* Set the appropriate hash bit for the given addr */ 3148 /* The algorithm works like so: 3149 * 1) Take the Destination Address (ie the multicast address), and 3150 * do a CRC on it (little endian), and reverse the bits of the 3151 * result. 3152 * 2) Use the 8 most significant bits as a hash into a 256-entry 3153 * table. The table is controlled through 8 32-bit registers: 3154 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 3155 * gaddr7. This means that the 3 most significant bits in the 3156 * hash index which gaddr register to use, and the 5 other bits 3157 * indicate which bit (assuming an IBM numbering scheme, which 3158 * for PowerPC (tm) is usually the case) in the register holds 3159 * the entry. */ 3160 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 3161 { 3162 u32 tempval; 3163 struct gfar_private *priv = netdev_priv(dev); 3164 u32 result = ether_crc(ETH_ALEN, addr); 3165 int width = priv->hash_width; 3166 u8 whichbit = (result >> (32 - width)) & 0x1f; 3167 u8 whichreg = result >> (32 - width + 5); 3168 u32 value = (1 << (31-whichbit)); 3169 3170 tempval = gfar_read(priv->hash_regs[whichreg]); 3171 tempval |= value; 3172 gfar_write(priv->hash_regs[whichreg], tempval); 3173 } 3174 3175 3176 /* There are multiple MAC Address register pairs on some controllers 3177 * This function sets the numth pair to a given address 3178 */ 3179 static void gfar_set_mac_for_addr(struct net_device *dev, int num, 3180 const u8 *addr) 3181 { 3182 struct gfar_private *priv = netdev_priv(dev); 3183 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3184 int idx; 3185 char tmpbuf[ETH_ALEN]; 3186 u32 tempval; 3187 u32 __iomem *macptr = ®s->macstnaddr1; 3188 3189 macptr += num*2; 3190 3191 /* Now copy it into the mac registers backwards, cuz */ 3192 /* little endian is silly */ 3193 for (idx = 0; idx < ETH_ALEN; idx++) 3194 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx]; 3195 3196 gfar_write(macptr, *((u32 *) (tmpbuf))); 3197 3198 tempval = *((u32 *) (tmpbuf + 4)); 3199 3200 gfar_write(macptr+1, tempval); 3201 } 3202 3203 /* GFAR error interrupt handler */ 3204 static irqreturn_t gfar_error(int irq, void *grp_id) 3205 { 3206 struct gfar_priv_grp *gfargrp = grp_id; 3207 struct gfar __iomem *regs = gfargrp->regs; 3208 struct gfar_private *priv= gfargrp->priv; 3209 struct net_device *dev = priv->ndev; 3210 3211 /* Save ievent for future reference */ 3212 u32 events = gfar_read(®s->ievent); 3213 3214 /* Clear IEVENT */ 3215 gfar_write(®s->ievent, events & IEVENT_ERR_MASK); 3216 3217 /* Magic Packet is not an error. */ 3218 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 3219 (events & IEVENT_MAG)) 3220 events &= ~IEVENT_MAG; 3221 3222 /* Hmm... */ 3223 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 3224 netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n", 3225 events, gfar_read(®s->imask)); 3226 3227 /* Update the error counters */ 3228 if (events & IEVENT_TXE) { 3229 dev->stats.tx_errors++; 3230 3231 if (events & IEVENT_LC) 3232 dev->stats.tx_window_errors++; 3233 if (events & IEVENT_CRL) 3234 dev->stats.tx_aborted_errors++; 3235 if (events & IEVENT_XFUN) { 3236 unsigned long flags; 3237 3238 netif_dbg(priv, tx_err, dev, 3239 "TX FIFO underrun, packet dropped\n"); 3240 dev->stats.tx_dropped++; 3241 priv->extra_stats.tx_underrun++; 3242 3243 local_irq_save(flags); 3244 lock_tx_qs(priv); 3245 3246 /* Reactivate the Tx Queues */ 3247 gfar_write(®s->tstat, gfargrp->tstat); 3248 3249 unlock_tx_qs(priv); 3250 local_irq_restore(flags); 3251 } 3252 netif_dbg(priv, tx_err, dev, "Transmit Error\n"); 3253 } 3254 if (events & IEVENT_BSY) { 3255 dev->stats.rx_errors++; 3256 priv->extra_stats.rx_bsy++; 3257 3258 gfar_receive(irq, grp_id); 3259 3260 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", 3261 gfar_read(®s->rstat)); 3262 } 3263 if (events & IEVENT_BABR) { 3264 dev->stats.rx_errors++; 3265 priv->extra_stats.rx_babr++; 3266 3267 netif_dbg(priv, rx_err, dev, "babbling RX error\n"); 3268 } 3269 if (events & IEVENT_EBERR) { 3270 priv->extra_stats.eberr++; 3271 netif_dbg(priv, rx_err, dev, "bus error\n"); 3272 } 3273 if (events & IEVENT_RXC) 3274 netif_dbg(priv, rx_status, dev, "control frame\n"); 3275 3276 if (events & IEVENT_BABT) { 3277 priv->extra_stats.tx_babt++; 3278 netif_dbg(priv, tx_err, dev, "babbling TX error\n"); 3279 } 3280 return IRQ_HANDLED; 3281 } 3282 3283 static struct of_device_id gfar_match[] = 3284 { 3285 { 3286 .type = "network", 3287 .compatible = "gianfar", 3288 }, 3289 { 3290 .compatible = "fsl,etsec2", 3291 }, 3292 {}, 3293 }; 3294 MODULE_DEVICE_TABLE(of, gfar_match); 3295 3296 /* Structure for a device driver */ 3297 static struct platform_driver gfar_driver = { 3298 .driver = { 3299 .name = "fsl-gianfar", 3300 .owner = THIS_MODULE, 3301 .pm = GFAR_PM_OPS, 3302 .of_match_table = gfar_match, 3303 }, 3304 .probe = gfar_probe, 3305 .remove = gfar_remove, 3306 }; 3307 3308 module_platform_driver(gfar_driver); 3309