1 /* 2 * drivers/net/ethernet/freescale/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * This driver is designed for the non-CPM ethernet controllers 6 * on the 85xx and 83xx family of integrated processors 7 * Based on 8260_io/fcc_enet.c 8 * 9 * Author: Andy Fleming 10 * Maintainer: Kumar Gala 11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 12 * 13 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. 14 * Copyright 2007 MontaVista Software, Inc. 15 * 16 * This program is free software; you can redistribute it and/or modify it 17 * under the terms of the GNU General Public License as published by the 18 * Free Software Foundation; either version 2 of the License, or (at your 19 * option) any later version. 20 * 21 * Gianfar: AKA Lambda Draconis, "Dragon" 22 * RA 11 31 24.2 23 * Dec +69 19 52 24 * V 3.84 25 * B-V +1.62 26 * 27 * Theory of operation 28 * 29 * The driver is initialized through of_device. Configuration information 30 * is therefore conveyed through an OF-style device tree. 31 * 32 * The Gianfar Ethernet Controller uses a ring of buffer 33 * descriptors. The beginning is indicated by a register 34 * pointing to the physical address of the start of the ring. 35 * The end is determined by a "wrap" bit being set in the 36 * last descriptor of the ring. 37 * 38 * When a packet is received, the RXF bit in the 39 * IEVENT register is set, triggering an interrupt when the 40 * corresponding bit in the IMASK register is also set (if 41 * interrupt coalescing is active, then the interrupt may not 42 * happen immediately, but will wait until either a set number 43 * of frames or amount of time have passed). In NAPI, the 44 * interrupt handler will signal there is work to be done, and 45 * exit. This method will start at the last known empty 46 * descriptor, and process every subsequent descriptor until there 47 * are none left with data (NAPI will stop after a set number of 48 * packets to give time to other tasks, but will eventually 49 * process all the packets). The data arrives inside a 50 * pre-allocated skb, and so after the skb is passed up to the 51 * stack, a new skb must be allocated, and the address field in 52 * the buffer descriptor must be updated to indicate this new 53 * skb. 54 * 55 * When the kernel requests that a packet be transmitted, the 56 * driver starts where it left off last time, and points the 57 * descriptor at the buffer which was passed in. The driver 58 * then informs the DMA engine that there are packets ready to 59 * be transmitted. Once the controller is finished transmitting 60 * the packet, an interrupt may be triggered (under the same 61 * conditions as for reception, but depending on the TXF bit). 62 * The driver then cleans up the buffer. 63 */ 64 65 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 66 #define DEBUG 67 68 #include <linux/kernel.h> 69 #include <linux/string.h> 70 #include <linux/errno.h> 71 #include <linux/unistd.h> 72 #include <linux/slab.h> 73 #include <linux/interrupt.h> 74 #include <linux/init.h> 75 #include <linux/delay.h> 76 #include <linux/netdevice.h> 77 #include <linux/etherdevice.h> 78 #include <linux/skbuff.h> 79 #include <linux/if_vlan.h> 80 #include <linux/spinlock.h> 81 #include <linux/mm.h> 82 #include <linux/of_mdio.h> 83 #include <linux/of_platform.h> 84 #include <linux/ip.h> 85 #include <linux/tcp.h> 86 #include <linux/udp.h> 87 #include <linux/in.h> 88 #include <linux/net_tstamp.h> 89 90 #include <asm/io.h> 91 #include <asm/reg.h> 92 #include <asm/irq.h> 93 #include <asm/uaccess.h> 94 #include <linux/module.h> 95 #include <linux/dma-mapping.h> 96 #include <linux/crc32.h> 97 #include <linux/mii.h> 98 #include <linux/phy.h> 99 #include <linux/phy_fixed.h> 100 #include <linux/of.h> 101 #include <linux/of_net.h> 102 103 #include "gianfar.h" 104 #include "fsl_pq_mdio.h" 105 106 #define TX_TIMEOUT (1*HZ) 107 108 const char gfar_driver_version[] = "1.3"; 109 110 static int gfar_enet_open(struct net_device *dev); 111 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 112 static void gfar_reset_task(struct work_struct *work); 113 static void gfar_timeout(struct net_device *dev); 114 static int gfar_close(struct net_device *dev); 115 struct sk_buff *gfar_new_skb(struct net_device *dev); 116 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 117 struct sk_buff *skb); 118 static int gfar_set_mac_address(struct net_device *dev); 119 static int gfar_change_mtu(struct net_device *dev, int new_mtu); 120 static irqreturn_t gfar_error(int irq, void *dev_id); 121 static irqreturn_t gfar_transmit(int irq, void *dev_id); 122 static irqreturn_t gfar_interrupt(int irq, void *dev_id); 123 static void adjust_link(struct net_device *dev); 124 static void init_registers(struct net_device *dev); 125 static int init_phy(struct net_device *dev); 126 static int gfar_probe(struct platform_device *ofdev); 127 static int gfar_remove(struct platform_device *ofdev); 128 static void free_skb_resources(struct gfar_private *priv); 129 static void gfar_set_multi(struct net_device *dev); 130 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 131 static void gfar_configure_serdes(struct net_device *dev); 132 static int gfar_poll(struct napi_struct *napi, int budget); 133 #ifdef CONFIG_NET_POLL_CONTROLLER 134 static void gfar_netpoll(struct net_device *dev); 135 #endif 136 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 137 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 138 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 139 int amount_pull); 140 void gfar_halt(struct net_device *dev); 141 static void gfar_halt_nodisable(struct net_device *dev); 142 void gfar_start(struct net_device *dev); 143 static void gfar_clear_exact_match(struct net_device *dev); 144 static void gfar_set_mac_for_addr(struct net_device *dev, int num, 145 const u8 *addr); 146 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 147 148 MODULE_AUTHOR("Freescale Semiconductor, Inc"); 149 MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 150 MODULE_LICENSE("GPL"); 151 152 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 153 dma_addr_t buf) 154 { 155 u32 lstatus; 156 157 bdp->bufPtr = buf; 158 159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); 160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) 161 lstatus |= BD_LFLAG(RXBD_WRAP); 162 163 eieio(); 164 165 bdp->lstatus = lstatus; 166 } 167 168 static int gfar_init_bds(struct net_device *ndev) 169 { 170 struct gfar_private *priv = netdev_priv(ndev); 171 struct gfar_priv_tx_q *tx_queue = NULL; 172 struct gfar_priv_rx_q *rx_queue = NULL; 173 struct txbd8 *txbdp; 174 struct rxbd8 *rxbdp; 175 int i, j; 176 177 for (i = 0; i < priv->num_tx_queues; i++) { 178 tx_queue = priv->tx_queue[i]; 179 /* Initialize some variables in our dev structure */ 180 tx_queue->num_txbdfree = tx_queue->tx_ring_size; 181 tx_queue->dirty_tx = tx_queue->tx_bd_base; 182 tx_queue->cur_tx = tx_queue->tx_bd_base; 183 tx_queue->skb_curtx = 0; 184 tx_queue->skb_dirtytx = 0; 185 186 /* Initialize Transmit Descriptor Ring */ 187 txbdp = tx_queue->tx_bd_base; 188 for (j = 0; j < tx_queue->tx_ring_size; j++) { 189 txbdp->lstatus = 0; 190 txbdp->bufPtr = 0; 191 txbdp++; 192 } 193 194 /* Set the last descriptor in the ring to indicate wrap */ 195 txbdp--; 196 txbdp->status |= TXBD_WRAP; 197 } 198 199 for (i = 0; i < priv->num_rx_queues; i++) { 200 rx_queue = priv->rx_queue[i]; 201 rx_queue->cur_rx = rx_queue->rx_bd_base; 202 rx_queue->skb_currx = 0; 203 rxbdp = rx_queue->rx_bd_base; 204 205 for (j = 0; j < rx_queue->rx_ring_size; j++) { 206 struct sk_buff *skb = rx_queue->rx_skbuff[j]; 207 208 if (skb) { 209 gfar_init_rxbdp(rx_queue, rxbdp, 210 rxbdp->bufPtr); 211 } else { 212 skb = gfar_new_skb(ndev); 213 if (!skb) { 214 netdev_err(ndev, "Can't allocate RX buffers\n"); 215 goto err_rxalloc_fail; 216 } 217 rx_queue->rx_skbuff[j] = skb; 218 219 gfar_new_rxbdp(rx_queue, rxbdp, skb); 220 } 221 222 rxbdp++; 223 } 224 225 } 226 227 return 0; 228 229 err_rxalloc_fail: 230 free_skb_resources(priv); 231 return -ENOMEM; 232 } 233 234 static int gfar_alloc_skb_resources(struct net_device *ndev) 235 { 236 void *vaddr; 237 dma_addr_t addr; 238 int i, j, k; 239 struct gfar_private *priv = netdev_priv(ndev); 240 struct device *dev = &priv->ofdev->dev; 241 struct gfar_priv_tx_q *tx_queue = NULL; 242 struct gfar_priv_rx_q *rx_queue = NULL; 243 244 priv->total_tx_ring_size = 0; 245 for (i = 0; i < priv->num_tx_queues; i++) 246 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; 247 248 priv->total_rx_ring_size = 0; 249 for (i = 0; i < priv->num_rx_queues; i++) 250 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; 251 252 /* Allocate memory for the buffer descriptors */ 253 vaddr = dma_alloc_coherent(dev, 254 sizeof(struct txbd8) * priv->total_tx_ring_size + 255 sizeof(struct rxbd8) * priv->total_rx_ring_size, 256 &addr, GFP_KERNEL); 257 if (!vaddr) { 258 netif_err(priv, ifup, ndev, 259 "Could not allocate buffer descriptors!\n"); 260 return -ENOMEM; 261 } 262 263 for (i = 0; i < priv->num_tx_queues; i++) { 264 tx_queue = priv->tx_queue[i]; 265 tx_queue->tx_bd_base = vaddr; 266 tx_queue->tx_bd_dma_base = addr; 267 tx_queue->dev = ndev; 268 /* enet DMA only understands physical addresses */ 269 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 270 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 271 } 272 273 /* Start the rx descriptor ring where the tx ring leaves off */ 274 for (i = 0; i < priv->num_rx_queues; i++) { 275 rx_queue = priv->rx_queue[i]; 276 rx_queue->rx_bd_base = vaddr; 277 rx_queue->rx_bd_dma_base = addr; 278 rx_queue->dev = ndev; 279 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 280 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 281 } 282 283 /* Setup the skbuff rings */ 284 for (i = 0; i < priv->num_tx_queues; i++) { 285 tx_queue = priv->tx_queue[i]; 286 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * 287 tx_queue->tx_ring_size, GFP_KERNEL); 288 if (!tx_queue->tx_skbuff) { 289 netif_err(priv, ifup, ndev, 290 "Could not allocate tx_skbuff\n"); 291 goto cleanup; 292 } 293 294 for (k = 0; k < tx_queue->tx_ring_size; k++) 295 tx_queue->tx_skbuff[k] = NULL; 296 } 297 298 for (i = 0; i < priv->num_rx_queues; i++) { 299 rx_queue = priv->rx_queue[i]; 300 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * 301 rx_queue->rx_ring_size, GFP_KERNEL); 302 303 if (!rx_queue->rx_skbuff) { 304 netif_err(priv, ifup, ndev, 305 "Could not allocate rx_skbuff\n"); 306 goto cleanup; 307 } 308 309 for (j = 0; j < rx_queue->rx_ring_size; j++) 310 rx_queue->rx_skbuff[j] = NULL; 311 } 312 313 if (gfar_init_bds(ndev)) 314 goto cleanup; 315 316 return 0; 317 318 cleanup: 319 free_skb_resources(priv); 320 return -ENOMEM; 321 } 322 323 static void gfar_init_tx_rx_base(struct gfar_private *priv) 324 { 325 struct gfar __iomem *regs = priv->gfargrp[0].regs; 326 u32 __iomem *baddr; 327 int i; 328 329 baddr = ®s->tbase0; 330 for(i = 0; i < priv->num_tx_queues; i++) { 331 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); 332 baddr += 2; 333 } 334 335 baddr = ®s->rbase0; 336 for(i = 0; i < priv->num_rx_queues; i++) { 337 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); 338 baddr += 2; 339 } 340 } 341 342 static void gfar_init_mac(struct net_device *ndev) 343 { 344 struct gfar_private *priv = netdev_priv(ndev); 345 struct gfar __iomem *regs = priv->gfargrp[0].regs; 346 u32 rctrl = 0; 347 u32 tctrl = 0; 348 u32 attrs = 0; 349 350 /* write the tx/rx base registers */ 351 gfar_init_tx_rx_base(priv); 352 353 /* Configure the coalescing support */ 354 gfar_configure_coalescing(priv, 0xFF, 0xFF); 355 356 if (priv->rx_filer_enable) { 357 rctrl |= RCTRL_FILREN; 358 /* Program the RIR0 reg with the required distribution */ 359 gfar_write(®s->rir0, DEFAULT_RIR0); 360 } 361 362 if (ndev->features & NETIF_F_RXCSUM) 363 rctrl |= RCTRL_CHECKSUMMING; 364 365 if (priv->extended_hash) { 366 rctrl |= RCTRL_EXTHASH; 367 368 gfar_clear_exact_match(ndev); 369 rctrl |= RCTRL_EMEN; 370 } 371 372 if (priv->padding) { 373 rctrl &= ~RCTRL_PAL_MASK; 374 rctrl |= RCTRL_PADDING(priv->padding); 375 } 376 377 /* Insert receive time stamps into padding alignment bytes */ 378 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { 379 rctrl &= ~RCTRL_PAL_MASK; 380 rctrl |= RCTRL_PADDING(8); 381 priv->padding = 8; 382 } 383 384 /* Enable HW time stamping if requested from user space */ 385 if (priv->hwts_rx_en) 386 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; 387 388 if (ndev->features & NETIF_F_HW_VLAN_RX) 389 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 390 391 /* Init rctrl based on our settings */ 392 gfar_write(®s->rctrl, rctrl); 393 394 if (ndev->features & NETIF_F_IP_CSUM) 395 tctrl |= TCTRL_INIT_CSUM; 396 397 tctrl |= TCTRL_TXSCHED_PRIO; 398 399 gfar_write(®s->tctrl, tctrl); 400 401 /* Set the extraction length and index */ 402 attrs = ATTRELI_EL(priv->rx_stash_size) | 403 ATTRELI_EI(priv->rx_stash_index); 404 405 gfar_write(®s->attreli, attrs); 406 407 /* Start with defaults, and add stashing or locking 408 * depending on the approprate variables */ 409 attrs = ATTR_INIT_SETTINGS; 410 411 if (priv->bd_stash_en) 412 attrs |= ATTR_BDSTASH; 413 414 if (priv->rx_stash_size != 0) 415 attrs |= ATTR_BUFSTASH; 416 417 gfar_write(®s->attr, attrs); 418 419 gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); 420 gfar_write(®s->fifo_tx_starve, priv->fifo_starve); 421 gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); 422 } 423 424 static struct net_device_stats *gfar_get_stats(struct net_device *dev) 425 { 426 struct gfar_private *priv = netdev_priv(dev); 427 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; 428 unsigned long tx_packets = 0, tx_bytes = 0; 429 int i = 0; 430 431 for (i = 0; i < priv->num_rx_queues; i++) { 432 rx_packets += priv->rx_queue[i]->stats.rx_packets; 433 rx_bytes += priv->rx_queue[i]->stats.rx_bytes; 434 rx_dropped += priv->rx_queue[i]->stats.rx_dropped; 435 } 436 437 dev->stats.rx_packets = rx_packets; 438 dev->stats.rx_bytes = rx_bytes; 439 dev->stats.rx_dropped = rx_dropped; 440 441 for (i = 0; i < priv->num_tx_queues; i++) { 442 tx_bytes += priv->tx_queue[i]->stats.tx_bytes; 443 tx_packets += priv->tx_queue[i]->stats.tx_packets; 444 } 445 446 dev->stats.tx_bytes = tx_bytes; 447 dev->stats.tx_packets = tx_packets; 448 449 return &dev->stats; 450 } 451 452 static const struct net_device_ops gfar_netdev_ops = { 453 .ndo_open = gfar_enet_open, 454 .ndo_start_xmit = gfar_start_xmit, 455 .ndo_stop = gfar_close, 456 .ndo_change_mtu = gfar_change_mtu, 457 .ndo_set_features = gfar_set_features, 458 .ndo_set_rx_mode = gfar_set_multi, 459 .ndo_tx_timeout = gfar_timeout, 460 .ndo_do_ioctl = gfar_ioctl, 461 .ndo_get_stats = gfar_get_stats, 462 .ndo_set_mac_address = eth_mac_addr, 463 .ndo_validate_addr = eth_validate_addr, 464 #ifdef CONFIG_NET_POLL_CONTROLLER 465 .ndo_poll_controller = gfar_netpoll, 466 #endif 467 }; 468 469 void lock_rx_qs(struct gfar_private *priv) 470 { 471 int i = 0x0; 472 473 for (i = 0; i < priv->num_rx_queues; i++) 474 spin_lock(&priv->rx_queue[i]->rxlock); 475 } 476 477 void lock_tx_qs(struct gfar_private *priv) 478 { 479 int i = 0x0; 480 481 for (i = 0; i < priv->num_tx_queues; i++) 482 spin_lock(&priv->tx_queue[i]->txlock); 483 } 484 485 void unlock_rx_qs(struct gfar_private *priv) 486 { 487 int i = 0x0; 488 489 for (i = 0; i < priv->num_rx_queues; i++) 490 spin_unlock(&priv->rx_queue[i]->rxlock); 491 } 492 493 void unlock_tx_qs(struct gfar_private *priv) 494 { 495 int i = 0x0; 496 497 for (i = 0; i < priv->num_tx_queues; i++) 498 spin_unlock(&priv->tx_queue[i]->txlock); 499 } 500 501 static bool gfar_is_vlan_on(struct gfar_private *priv) 502 { 503 return (priv->ndev->features & NETIF_F_HW_VLAN_RX) || 504 (priv->ndev->features & NETIF_F_HW_VLAN_TX); 505 } 506 507 /* Returns 1 if incoming frames use an FCB */ 508 static inline int gfar_uses_fcb(struct gfar_private *priv) 509 { 510 return gfar_is_vlan_on(priv) || 511 (priv->ndev->features & NETIF_F_RXCSUM) || 512 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); 513 } 514 515 static void free_tx_pointers(struct gfar_private *priv) 516 { 517 int i = 0; 518 519 for (i = 0; i < priv->num_tx_queues; i++) 520 kfree(priv->tx_queue[i]); 521 } 522 523 static void free_rx_pointers(struct gfar_private *priv) 524 { 525 int i = 0; 526 527 for (i = 0; i < priv->num_rx_queues; i++) 528 kfree(priv->rx_queue[i]); 529 } 530 531 static void unmap_group_regs(struct gfar_private *priv) 532 { 533 int i = 0; 534 535 for (i = 0; i < MAXGROUPS; i++) 536 if (priv->gfargrp[i].regs) 537 iounmap(priv->gfargrp[i].regs); 538 } 539 540 static void disable_napi(struct gfar_private *priv) 541 { 542 int i = 0; 543 544 for (i = 0; i < priv->num_grps; i++) 545 napi_disable(&priv->gfargrp[i].napi); 546 } 547 548 static void enable_napi(struct gfar_private *priv) 549 { 550 int i = 0; 551 552 for (i = 0; i < priv->num_grps; i++) 553 napi_enable(&priv->gfargrp[i].napi); 554 } 555 556 static int gfar_parse_group(struct device_node *np, 557 struct gfar_private *priv, const char *model) 558 { 559 u32 *queue_mask; 560 561 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0); 562 if (!priv->gfargrp[priv->num_grps].regs) 563 return -ENOMEM; 564 565 priv->gfargrp[priv->num_grps].interruptTransmit = 566 irq_of_parse_and_map(np, 0); 567 568 /* If we aren't the FEC we have multiple interrupts */ 569 if (model && strcasecmp(model, "FEC")) { 570 priv->gfargrp[priv->num_grps].interruptReceive = 571 irq_of_parse_and_map(np, 1); 572 priv->gfargrp[priv->num_grps].interruptError = 573 irq_of_parse_and_map(np,2); 574 if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ || 575 priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ || 576 priv->gfargrp[priv->num_grps].interruptError == NO_IRQ) 577 return -EINVAL; 578 } 579 580 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; 581 priv->gfargrp[priv->num_grps].priv = priv; 582 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); 583 if(priv->mode == MQ_MG_MODE) { 584 queue_mask = (u32 *)of_get_property(np, 585 "fsl,rx-bit-map", NULL); 586 priv->gfargrp[priv->num_grps].rx_bit_map = 587 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); 588 queue_mask = (u32 *)of_get_property(np, 589 "fsl,tx-bit-map", NULL); 590 priv->gfargrp[priv->num_grps].tx_bit_map = 591 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); 592 } else { 593 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; 594 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; 595 } 596 priv->num_grps++; 597 598 return 0; 599 } 600 601 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) 602 { 603 const char *model; 604 const char *ctype; 605 const void *mac_addr; 606 int err = 0, i; 607 struct net_device *dev = NULL; 608 struct gfar_private *priv = NULL; 609 struct device_node *np = ofdev->dev.of_node; 610 struct device_node *child = NULL; 611 const u32 *stash; 612 const u32 *stash_len; 613 const u32 *stash_idx; 614 unsigned int num_tx_qs, num_rx_qs; 615 u32 *tx_queues, *rx_queues; 616 617 if (!np || !of_device_is_available(np)) 618 return -ENODEV; 619 620 /* parse the num of tx and rx queues */ 621 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); 622 num_tx_qs = tx_queues ? *tx_queues : 1; 623 624 if (num_tx_qs > MAX_TX_QS) { 625 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", 626 num_tx_qs, MAX_TX_QS); 627 pr_err("Cannot do alloc_etherdev, aborting\n"); 628 return -EINVAL; 629 } 630 631 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); 632 num_rx_qs = rx_queues ? *rx_queues : 1; 633 634 if (num_rx_qs > MAX_RX_QS) { 635 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", 636 num_rx_qs, MAX_RX_QS); 637 pr_err("Cannot do alloc_etherdev, aborting\n"); 638 return -EINVAL; 639 } 640 641 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); 642 dev = *pdev; 643 if (NULL == dev) 644 return -ENOMEM; 645 646 priv = netdev_priv(dev); 647 priv->node = ofdev->dev.of_node; 648 priv->ndev = dev; 649 650 priv->num_tx_queues = num_tx_qs; 651 netif_set_real_num_rx_queues(dev, num_rx_qs); 652 priv->num_rx_queues = num_rx_qs; 653 priv->num_grps = 0x0; 654 655 /* Init Rx queue filer rule set linked list*/ 656 INIT_LIST_HEAD(&priv->rx_list.list); 657 priv->rx_list.count = 0; 658 mutex_init(&priv->rx_queue_access); 659 660 model = of_get_property(np, "model", NULL); 661 662 for (i = 0; i < MAXGROUPS; i++) 663 priv->gfargrp[i].regs = NULL; 664 665 /* Parse and initialize group specific information */ 666 if (of_device_is_compatible(np, "fsl,etsec2")) { 667 priv->mode = MQ_MG_MODE; 668 for_each_child_of_node(np, child) { 669 err = gfar_parse_group(child, priv, model); 670 if (err) 671 goto err_grp_init; 672 } 673 } else { 674 priv->mode = SQ_SG_MODE; 675 err = gfar_parse_group(np, priv, model); 676 if(err) 677 goto err_grp_init; 678 } 679 680 for (i = 0; i < priv->num_tx_queues; i++) 681 priv->tx_queue[i] = NULL; 682 for (i = 0; i < priv->num_rx_queues; i++) 683 priv->rx_queue[i] = NULL; 684 685 for (i = 0; i < priv->num_tx_queues; i++) { 686 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), 687 GFP_KERNEL); 688 if (!priv->tx_queue[i]) { 689 err = -ENOMEM; 690 goto tx_alloc_failed; 691 } 692 priv->tx_queue[i]->tx_skbuff = NULL; 693 priv->tx_queue[i]->qindex = i; 694 priv->tx_queue[i]->dev = dev; 695 spin_lock_init(&(priv->tx_queue[i]->txlock)); 696 } 697 698 for (i = 0; i < priv->num_rx_queues; i++) { 699 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), 700 GFP_KERNEL); 701 if (!priv->rx_queue[i]) { 702 err = -ENOMEM; 703 goto rx_alloc_failed; 704 } 705 priv->rx_queue[i]->rx_skbuff = NULL; 706 priv->rx_queue[i]->qindex = i; 707 priv->rx_queue[i]->dev = dev; 708 spin_lock_init(&(priv->rx_queue[i]->rxlock)); 709 } 710 711 712 stash = of_get_property(np, "bd-stash", NULL); 713 714 if (stash) { 715 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 716 priv->bd_stash_en = 1; 717 } 718 719 stash_len = of_get_property(np, "rx-stash-len", NULL); 720 721 if (stash_len) 722 priv->rx_stash_size = *stash_len; 723 724 stash_idx = of_get_property(np, "rx-stash-idx", NULL); 725 726 if (stash_idx) 727 priv->rx_stash_index = *stash_idx; 728 729 if (stash_len || stash_idx) 730 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; 731 732 mac_addr = of_get_mac_address(np); 733 if (mac_addr) 734 memcpy(dev->dev_addr, mac_addr, ETH_ALEN); 735 736 if (model && !strcasecmp(model, "TSEC")) 737 priv->device_flags = 738 FSL_GIANFAR_DEV_HAS_GIGABIT | 739 FSL_GIANFAR_DEV_HAS_COALESCE | 740 FSL_GIANFAR_DEV_HAS_RMON | 741 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 742 if (model && !strcasecmp(model, "eTSEC")) 743 priv->device_flags = 744 FSL_GIANFAR_DEV_HAS_GIGABIT | 745 FSL_GIANFAR_DEV_HAS_COALESCE | 746 FSL_GIANFAR_DEV_HAS_RMON | 747 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 748 FSL_GIANFAR_DEV_HAS_PADDING | 749 FSL_GIANFAR_DEV_HAS_CSUM | 750 FSL_GIANFAR_DEV_HAS_VLAN | 751 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 752 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 753 FSL_GIANFAR_DEV_HAS_TIMER; 754 755 ctype = of_get_property(np, "phy-connection-type", NULL); 756 757 /* We only care about rgmii-id. The rest are autodetected */ 758 if (ctype && !strcmp(ctype, "rgmii-id")) 759 priv->interface = PHY_INTERFACE_MODE_RGMII_ID; 760 else 761 priv->interface = PHY_INTERFACE_MODE_MII; 762 763 if (of_get_property(np, "fsl,magic-packet", NULL)) 764 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; 765 766 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 767 768 /* Find the TBI PHY. If it's not there, we don't support SGMII */ 769 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 770 771 return 0; 772 773 rx_alloc_failed: 774 free_rx_pointers(priv); 775 tx_alloc_failed: 776 free_tx_pointers(priv); 777 err_grp_init: 778 unmap_group_regs(priv); 779 free_netdev(dev); 780 return err; 781 } 782 783 static int gfar_hwtstamp_ioctl(struct net_device *netdev, 784 struct ifreq *ifr, int cmd) 785 { 786 struct hwtstamp_config config; 787 struct gfar_private *priv = netdev_priv(netdev); 788 789 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 790 return -EFAULT; 791 792 /* reserved for future extensions */ 793 if (config.flags) 794 return -EINVAL; 795 796 switch (config.tx_type) { 797 case HWTSTAMP_TX_OFF: 798 priv->hwts_tx_en = 0; 799 break; 800 case HWTSTAMP_TX_ON: 801 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 802 return -ERANGE; 803 priv->hwts_tx_en = 1; 804 break; 805 default: 806 return -ERANGE; 807 } 808 809 switch (config.rx_filter) { 810 case HWTSTAMP_FILTER_NONE: 811 if (priv->hwts_rx_en) { 812 stop_gfar(netdev); 813 priv->hwts_rx_en = 0; 814 startup_gfar(netdev); 815 } 816 break; 817 default: 818 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 819 return -ERANGE; 820 if (!priv->hwts_rx_en) { 821 stop_gfar(netdev); 822 priv->hwts_rx_en = 1; 823 startup_gfar(netdev); 824 } 825 config.rx_filter = HWTSTAMP_FILTER_ALL; 826 break; 827 } 828 829 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 830 -EFAULT : 0; 831 } 832 833 /* Ioctl MII Interface */ 834 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 835 { 836 struct gfar_private *priv = netdev_priv(dev); 837 838 if (!netif_running(dev)) 839 return -EINVAL; 840 841 if (cmd == SIOCSHWTSTAMP) 842 return gfar_hwtstamp_ioctl(dev, rq, cmd); 843 844 if (!priv->phydev) 845 return -ENODEV; 846 847 return phy_mii_ioctl(priv->phydev, rq, cmd); 848 } 849 850 static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) 851 { 852 unsigned int new_bit_map = 0x0; 853 int mask = 0x1 << (max_qs - 1), i; 854 for (i = 0; i < max_qs; i++) { 855 if (bit_map & mask) 856 new_bit_map = new_bit_map + (1 << i); 857 mask = mask >> 0x1; 858 } 859 return new_bit_map; 860 } 861 862 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 863 u32 class) 864 { 865 u32 rqfpr = FPR_FILER_MASK; 866 u32 rqfcr = 0x0; 867 868 rqfar--; 869 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; 870 priv->ftp_rqfpr[rqfar] = rqfpr; 871 priv->ftp_rqfcr[rqfar] = rqfcr; 872 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 873 874 rqfar--; 875 rqfcr = RQFCR_CMP_NOMATCH; 876 priv->ftp_rqfpr[rqfar] = rqfpr; 877 priv->ftp_rqfcr[rqfar] = rqfcr; 878 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 879 880 rqfar--; 881 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; 882 rqfpr = class; 883 priv->ftp_rqfcr[rqfar] = rqfcr; 884 priv->ftp_rqfpr[rqfar] = rqfpr; 885 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 886 887 rqfar--; 888 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; 889 rqfpr = class; 890 priv->ftp_rqfcr[rqfar] = rqfcr; 891 priv->ftp_rqfpr[rqfar] = rqfpr; 892 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 893 894 return rqfar; 895 } 896 897 static void gfar_init_filer_table(struct gfar_private *priv) 898 { 899 int i = 0x0; 900 u32 rqfar = MAX_FILER_IDX; 901 u32 rqfcr = 0x0; 902 u32 rqfpr = FPR_FILER_MASK; 903 904 /* Default rule */ 905 rqfcr = RQFCR_CMP_MATCH; 906 priv->ftp_rqfcr[rqfar] = rqfcr; 907 priv->ftp_rqfpr[rqfar] = rqfpr; 908 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 909 910 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); 911 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); 912 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); 913 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); 914 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); 915 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); 916 917 /* cur_filer_idx indicated the first non-masked rule */ 918 priv->cur_filer_idx = rqfar; 919 920 /* Rest are masked rules */ 921 rqfcr = RQFCR_CMP_NOMATCH; 922 for (i = 0; i < rqfar; i++) { 923 priv->ftp_rqfcr[i] = rqfcr; 924 priv->ftp_rqfpr[i] = rqfpr; 925 gfar_write_filer(priv, i, rqfcr, rqfpr); 926 } 927 } 928 929 static void gfar_detect_errata(struct gfar_private *priv) 930 { 931 struct device *dev = &priv->ofdev->dev; 932 unsigned int pvr = mfspr(SPRN_PVR); 933 unsigned int svr = mfspr(SPRN_SVR); 934 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ 935 unsigned int rev = svr & 0xffff; 936 937 /* MPC8313 Rev 2.0 and higher; All MPC837x */ 938 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || 939 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 940 priv->errata |= GFAR_ERRATA_74; 941 942 /* MPC8313 and MPC837x all rev */ 943 if ((pvr == 0x80850010 && mod == 0x80b0) || 944 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 945 priv->errata |= GFAR_ERRATA_76; 946 947 /* MPC8313 and MPC837x all rev */ 948 if ((pvr == 0x80850010 && mod == 0x80b0) || 949 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 950 priv->errata |= GFAR_ERRATA_A002; 951 952 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ 953 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || 954 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) 955 priv->errata |= GFAR_ERRATA_12; 956 957 if (priv->errata) 958 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 959 priv->errata); 960 } 961 962 /* Set up the ethernet device structure, private data, 963 * and anything else we need before we start */ 964 static int gfar_probe(struct platform_device *ofdev) 965 { 966 u32 tempval; 967 struct net_device *dev = NULL; 968 struct gfar_private *priv = NULL; 969 struct gfar __iomem *regs = NULL; 970 int err = 0, i, grp_idx = 0; 971 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; 972 u32 isrg = 0; 973 u32 __iomem *baddr; 974 975 err = gfar_of_init(ofdev, &dev); 976 977 if (err) 978 return err; 979 980 priv = netdev_priv(dev); 981 priv->ndev = dev; 982 priv->ofdev = ofdev; 983 priv->node = ofdev->dev.of_node; 984 SET_NETDEV_DEV(dev, &ofdev->dev); 985 986 spin_lock_init(&priv->bflock); 987 INIT_WORK(&priv->reset_task, gfar_reset_task); 988 989 dev_set_drvdata(&ofdev->dev, priv); 990 regs = priv->gfargrp[0].regs; 991 992 gfar_detect_errata(priv); 993 994 /* Stop the DMA engine now, in case it was running before */ 995 /* (The firmware could have used it, and left it running). */ 996 gfar_halt(dev); 997 998 /* Reset MAC layer */ 999 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); 1000 1001 /* We need to delay at least 3 TX clocks */ 1002 udelay(2); 1003 1004 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 1005 gfar_write(®s->maccfg1, tempval); 1006 1007 /* Initialize MACCFG2. */ 1008 tempval = MACCFG2_INIT_SETTINGS; 1009 if (gfar_has_errata(priv, GFAR_ERRATA_74)) 1010 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; 1011 gfar_write(®s->maccfg2, tempval); 1012 1013 /* Initialize ECNTRL */ 1014 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); 1015 1016 /* Set the dev->base_addr to the gfar reg region */ 1017 dev->base_addr = (unsigned long) regs; 1018 1019 SET_NETDEV_DEV(dev, &ofdev->dev); 1020 1021 /* Fill in the dev structure */ 1022 dev->watchdog_timeo = TX_TIMEOUT; 1023 dev->mtu = 1500; 1024 dev->netdev_ops = &gfar_netdev_ops; 1025 dev->ethtool_ops = &gfar_ethtool_ops; 1026 1027 /* Register for napi ...We are registering NAPI for each grp */ 1028 for (i = 0; i < priv->num_grps; i++) 1029 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); 1030 1031 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1032 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1033 NETIF_F_RXCSUM; 1034 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 1035 NETIF_F_RXCSUM | NETIF_F_HIGHDMA; 1036 } 1037 1038 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1039 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1040 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1041 } 1042 1043 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1044 priv->extended_hash = 1; 1045 priv->hash_width = 9; 1046 1047 priv->hash_regs[0] = ®s->igaddr0; 1048 priv->hash_regs[1] = ®s->igaddr1; 1049 priv->hash_regs[2] = ®s->igaddr2; 1050 priv->hash_regs[3] = ®s->igaddr3; 1051 priv->hash_regs[4] = ®s->igaddr4; 1052 priv->hash_regs[5] = ®s->igaddr5; 1053 priv->hash_regs[6] = ®s->igaddr6; 1054 priv->hash_regs[7] = ®s->igaddr7; 1055 priv->hash_regs[8] = ®s->gaddr0; 1056 priv->hash_regs[9] = ®s->gaddr1; 1057 priv->hash_regs[10] = ®s->gaddr2; 1058 priv->hash_regs[11] = ®s->gaddr3; 1059 priv->hash_regs[12] = ®s->gaddr4; 1060 priv->hash_regs[13] = ®s->gaddr5; 1061 priv->hash_regs[14] = ®s->gaddr6; 1062 priv->hash_regs[15] = ®s->gaddr7; 1063 1064 } else { 1065 priv->extended_hash = 0; 1066 priv->hash_width = 8; 1067 1068 priv->hash_regs[0] = ®s->gaddr0; 1069 priv->hash_regs[1] = ®s->gaddr1; 1070 priv->hash_regs[2] = ®s->gaddr2; 1071 priv->hash_regs[3] = ®s->gaddr3; 1072 priv->hash_regs[4] = ®s->gaddr4; 1073 priv->hash_regs[5] = ®s->gaddr5; 1074 priv->hash_regs[6] = ®s->gaddr6; 1075 priv->hash_regs[7] = ®s->gaddr7; 1076 } 1077 1078 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 1079 priv->padding = DEFAULT_PADDING; 1080 else 1081 priv->padding = 0; 1082 1083 if (dev->features & NETIF_F_IP_CSUM || 1084 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1085 dev->hard_header_len += GMAC_FCB_LEN; 1086 1087 /* Program the isrg regs only if number of grps > 1 */ 1088 if (priv->num_grps > 1) { 1089 baddr = ®s->isrg0; 1090 for (i = 0; i < priv->num_grps; i++) { 1091 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); 1092 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); 1093 gfar_write(baddr, isrg); 1094 baddr++; 1095 isrg = 0x0; 1096 } 1097 } 1098 1099 /* Need to reverse the bit maps as bit_map's MSB is q0 1100 * but, for_each_set_bit parses from right to left, which 1101 * basically reverses the queue numbers */ 1102 for (i = 0; i< priv->num_grps; i++) { 1103 priv->gfargrp[i].tx_bit_map = reverse_bitmap( 1104 priv->gfargrp[i].tx_bit_map, MAX_TX_QS); 1105 priv->gfargrp[i].rx_bit_map = reverse_bitmap( 1106 priv->gfargrp[i].rx_bit_map, MAX_RX_QS); 1107 } 1108 1109 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 1110 * also assign queues to groups */ 1111 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1112 priv->gfargrp[grp_idx].num_rx_queues = 0x0; 1113 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, 1114 priv->num_rx_queues) { 1115 priv->gfargrp[grp_idx].num_rx_queues++; 1116 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1117 rstat = rstat | (RSTAT_CLEAR_RHALT >> i); 1118 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 1119 } 1120 priv->gfargrp[grp_idx].num_tx_queues = 0x0; 1121 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, 1122 priv->num_tx_queues) { 1123 priv->gfargrp[grp_idx].num_tx_queues++; 1124 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1125 tstat = tstat | (TSTAT_CLEAR_THALT >> i); 1126 tqueue = tqueue | (TQUEUE_EN0 >> i); 1127 } 1128 priv->gfargrp[grp_idx].rstat = rstat; 1129 priv->gfargrp[grp_idx].tstat = tstat; 1130 rstat = tstat =0; 1131 } 1132 1133 gfar_write(®s->rqueue, rqueue); 1134 gfar_write(®s->tqueue, tqueue); 1135 1136 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1137 1138 /* Initializing some of the rx/tx queue level parameters */ 1139 for (i = 0; i < priv->num_tx_queues; i++) { 1140 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; 1141 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; 1142 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; 1143 priv->tx_queue[i]->txic = DEFAULT_TXIC; 1144 } 1145 1146 for (i = 0; i < priv->num_rx_queues; i++) { 1147 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; 1148 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; 1149 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1150 } 1151 1152 /* always enable rx filer*/ 1153 priv->rx_filer_enable = 1; 1154 /* Enable most messages by default */ 1155 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1156 1157 /* Carrier starts down, phylib will bring it up */ 1158 netif_carrier_off(dev); 1159 1160 err = register_netdev(dev); 1161 1162 if (err) { 1163 pr_err("%s: Cannot register net device, aborting\n", dev->name); 1164 goto register_fail; 1165 } 1166 1167 device_init_wakeup(&dev->dev, 1168 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1169 1170 /* fill out IRQ number and name fields */ 1171 for (i = 0; i < priv->num_grps; i++) { 1172 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1173 sprintf(priv->gfargrp[i].int_name_tx, "%s%s%c%s", 1174 dev->name, "_g", '0' + i, "_tx"); 1175 sprintf(priv->gfargrp[i].int_name_rx, "%s%s%c%s", 1176 dev->name, "_g", '0' + i, "_rx"); 1177 sprintf(priv->gfargrp[i].int_name_er, "%s%s%c%s", 1178 dev->name, "_g", '0' + i, "_er"); 1179 } else 1180 strcpy(priv->gfargrp[i].int_name_tx, dev->name); 1181 } 1182 1183 /* Initialize the filer table */ 1184 gfar_init_filer_table(priv); 1185 1186 /* Create all the sysfs files */ 1187 gfar_init_sysfs(dev); 1188 1189 /* Print out the device info */ 1190 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 1191 1192 /* Even more device info helps when determining which kernel */ 1193 /* provided which set of benchmarks. */ 1194 netdev_info(dev, "Running with NAPI enabled\n"); 1195 for (i = 0; i < priv->num_rx_queues; i++) 1196 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", 1197 i, priv->rx_queue[i]->rx_ring_size); 1198 for(i = 0; i < priv->num_tx_queues; i++) 1199 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", 1200 i, priv->tx_queue[i]->tx_ring_size); 1201 1202 return 0; 1203 1204 register_fail: 1205 unmap_group_regs(priv); 1206 free_tx_pointers(priv); 1207 free_rx_pointers(priv); 1208 if (priv->phy_node) 1209 of_node_put(priv->phy_node); 1210 if (priv->tbi_node) 1211 of_node_put(priv->tbi_node); 1212 free_netdev(dev); 1213 return err; 1214 } 1215 1216 static int gfar_remove(struct platform_device *ofdev) 1217 { 1218 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1219 1220 if (priv->phy_node) 1221 of_node_put(priv->phy_node); 1222 if (priv->tbi_node) 1223 of_node_put(priv->tbi_node); 1224 1225 dev_set_drvdata(&ofdev->dev, NULL); 1226 1227 unregister_netdev(priv->ndev); 1228 unmap_group_regs(priv); 1229 free_netdev(priv->ndev); 1230 1231 return 0; 1232 } 1233 1234 #ifdef CONFIG_PM 1235 1236 static int gfar_suspend(struct device *dev) 1237 { 1238 struct gfar_private *priv = dev_get_drvdata(dev); 1239 struct net_device *ndev = priv->ndev; 1240 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1241 unsigned long flags; 1242 u32 tempval; 1243 1244 int magic_packet = priv->wol_en && 1245 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1246 1247 netif_device_detach(ndev); 1248 1249 if (netif_running(ndev)) { 1250 1251 local_irq_save(flags); 1252 lock_tx_qs(priv); 1253 lock_rx_qs(priv); 1254 1255 gfar_halt_nodisable(ndev); 1256 1257 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1258 tempval = gfar_read(®s->maccfg1); 1259 1260 tempval &= ~MACCFG1_TX_EN; 1261 1262 if (!magic_packet) 1263 tempval &= ~MACCFG1_RX_EN; 1264 1265 gfar_write(®s->maccfg1, tempval); 1266 1267 unlock_rx_qs(priv); 1268 unlock_tx_qs(priv); 1269 local_irq_restore(flags); 1270 1271 disable_napi(priv); 1272 1273 if (magic_packet) { 1274 /* Enable interrupt on Magic Packet */ 1275 gfar_write(®s->imask, IMASK_MAG); 1276 1277 /* Enable Magic Packet mode */ 1278 tempval = gfar_read(®s->maccfg2); 1279 tempval |= MACCFG2_MPEN; 1280 gfar_write(®s->maccfg2, tempval); 1281 } else { 1282 phy_stop(priv->phydev); 1283 } 1284 } 1285 1286 return 0; 1287 } 1288 1289 static int gfar_resume(struct device *dev) 1290 { 1291 struct gfar_private *priv = dev_get_drvdata(dev); 1292 struct net_device *ndev = priv->ndev; 1293 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1294 unsigned long flags; 1295 u32 tempval; 1296 int magic_packet = priv->wol_en && 1297 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1298 1299 if (!netif_running(ndev)) { 1300 netif_device_attach(ndev); 1301 return 0; 1302 } 1303 1304 if (!magic_packet && priv->phydev) 1305 phy_start(priv->phydev); 1306 1307 /* Disable Magic Packet mode, in case something 1308 * else woke us up. 1309 */ 1310 local_irq_save(flags); 1311 lock_tx_qs(priv); 1312 lock_rx_qs(priv); 1313 1314 tempval = gfar_read(®s->maccfg2); 1315 tempval &= ~MACCFG2_MPEN; 1316 gfar_write(®s->maccfg2, tempval); 1317 1318 gfar_start(ndev); 1319 1320 unlock_rx_qs(priv); 1321 unlock_tx_qs(priv); 1322 local_irq_restore(flags); 1323 1324 netif_device_attach(ndev); 1325 1326 enable_napi(priv); 1327 1328 return 0; 1329 } 1330 1331 static int gfar_restore(struct device *dev) 1332 { 1333 struct gfar_private *priv = dev_get_drvdata(dev); 1334 struct net_device *ndev = priv->ndev; 1335 1336 if (!netif_running(ndev)) 1337 return 0; 1338 1339 gfar_init_bds(ndev); 1340 init_registers(ndev); 1341 gfar_set_mac_address(ndev); 1342 gfar_init_mac(ndev); 1343 gfar_start(ndev); 1344 1345 priv->oldlink = 0; 1346 priv->oldspeed = 0; 1347 priv->oldduplex = -1; 1348 1349 if (priv->phydev) 1350 phy_start(priv->phydev); 1351 1352 netif_device_attach(ndev); 1353 enable_napi(priv); 1354 1355 return 0; 1356 } 1357 1358 static struct dev_pm_ops gfar_pm_ops = { 1359 .suspend = gfar_suspend, 1360 .resume = gfar_resume, 1361 .freeze = gfar_suspend, 1362 .thaw = gfar_resume, 1363 .restore = gfar_restore, 1364 }; 1365 1366 #define GFAR_PM_OPS (&gfar_pm_ops) 1367 1368 #else 1369 1370 #define GFAR_PM_OPS NULL 1371 1372 #endif 1373 1374 /* Reads the controller's registers to determine what interface 1375 * connects it to the PHY. 1376 */ 1377 static phy_interface_t gfar_get_interface(struct net_device *dev) 1378 { 1379 struct gfar_private *priv = netdev_priv(dev); 1380 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1381 u32 ecntrl; 1382 1383 ecntrl = gfar_read(®s->ecntrl); 1384 1385 if (ecntrl & ECNTRL_SGMII_MODE) 1386 return PHY_INTERFACE_MODE_SGMII; 1387 1388 if (ecntrl & ECNTRL_TBI_MODE) { 1389 if (ecntrl & ECNTRL_REDUCED_MODE) 1390 return PHY_INTERFACE_MODE_RTBI; 1391 else 1392 return PHY_INTERFACE_MODE_TBI; 1393 } 1394 1395 if (ecntrl & ECNTRL_REDUCED_MODE) { 1396 if (ecntrl & ECNTRL_REDUCED_MII_MODE) 1397 return PHY_INTERFACE_MODE_RMII; 1398 else { 1399 phy_interface_t interface = priv->interface; 1400 1401 /* 1402 * This isn't autodetected right now, so it must 1403 * be set by the device tree or platform code. 1404 */ 1405 if (interface == PHY_INTERFACE_MODE_RGMII_ID) 1406 return PHY_INTERFACE_MODE_RGMII_ID; 1407 1408 return PHY_INTERFACE_MODE_RGMII; 1409 } 1410 } 1411 1412 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 1413 return PHY_INTERFACE_MODE_GMII; 1414 1415 return PHY_INTERFACE_MODE_MII; 1416 } 1417 1418 1419 /* Initializes driver's PHY state, and attaches to the PHY. 1420 * Returns 0 on success. 1421 */ 1422 static int init_phy(struct net_device *dev) 1423 { 1424 struct gfar_private *priv = netdev_priv(dev); 1425 uint gigabit_support = 1426 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 1427 SUPPORTED_1000baseT_Full : 0; 1428 phy_interface_t interface; 1429 1430 priv->oldlink = 0; 1431 priv->oldspeed = 0; 1432 priv->oldduplex = -1; 1433 1434 interface = gfar_get_interface(dev); 1435 1436 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, 1437 interface); 1438 if (!priv->phydev) 1439 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, 1440 interface); 1441 if (!priv->phydev) { 1442 dev_err(&dev->dev, "could not attach to PHY\n"); 1443 return -ENODEV; 1444 } 1445 1446 if (interface == PHY_INTERFACE_MODE_SGMII) 1447 gfar_configure_serdes(dev); 1448 1449 /* Remove any features not supported by the controller */ 1450 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); 1451 priv->phydev->advertising = priv->phydev->supported; 1452 1453 return 0; 1454 } 1455 1456 /* 1457 * Initialize TBI PHY interface for communicating with the 1458 * SERDES lynx PHY on the chip. We communicate with this PHY 1459 * through the MDIO bus on each controller, treating it as a 1460 * "normal" PHY at the address found in the TBIPA register. We assume 1461 * that the TBIPA register is valid. Either the MDIO bus code will set 1462 * it to a value that doesn't conflict with other PHYs on the bus, or the 1463 * value doesn't matter, as there are no other PHYs on the bus. 1464 */ 1465 static void gfar_configure_serdes(struct net_device *dev) 1466 { 1467 struct gfar_private *priv = netdev_priv(dev); 1468 struct phy_device *tbiphy; 1469 1470 if (!priv->tbi_node) { 1471 dev_warn(&dev->dev, "error: SGMII mode requires that the " 1472 "device tree specify a tbi-handle\n"); 1473 return; 1474 } 1475 1476 tbiphy = of_phy_find_device(priv->tbi_node); 1477 if (!tbiphy) { 1478 dev_err(&dev->dev, "error: Could not get TBI device\n"); 1479 return; 1480 } 1481 1482 /* 1483 * If the link is already up, we must already be ok, and don't need to 1484 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1485 * everything for us? Resetting it takes the link down and requires 1486 * several seconds for it to come back. 1487 */ 1488 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) 1489 return; 1490 1491 /* Single clk mode, mii mode off(for serdes communication) */ 1492 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1493 1494 phy_write(tbiphy, MII_ADVERTISE, 1495 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1496 ADVERTISE_1000XPSE_ASYM); 1497 1498 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | 1499 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 1500 } 1501 1502 static void init_registers(struct net_device *dev) 1503 { 1504 struct gfar_private *priv = netdev_priv(dev); 1505 struct gfar __iomem *regs = NULL; 1506 int i = 0; 1507 1508 for (i = 0; i < priv->num_grps; i++) { 1509 regs = priv->gfargrp[i].regs; 1510 /* Clear IEVENT */ 1511 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 1512 1513 /* Initialize IMASK */ 1514 gfar_write(®s->imask, IMASK_INIT_CLEAR); 1515 } 1516 1517 regs = priv->gfargrp[0].regs; 1518 /* Init hash registers to zero */ 1519 gfar_write(®s->igaddr0, 0); 1520 gfar_write(®s->igaddr1, 0); 1521 gfar_write(®s->igaddr2, 0); 1522 gfar_write(®s->igaddr3, 0); 1523 gfar_write(®s->igaddr4, 0); 1524 gfar_write(®s->igaddr5, 0); 1525 gfar_write(®s->igaddr6, 0); 1526 gfar_write(®s->igaddr7, 0); 1527 1528 gfar_write(®s->gaddr0, 0); 1529 gfar_write(®s->gaddr1, 0); 1530 gfar_write(®s->gaddr2, 0); 1531 gfar_write(®s->gaddr3, 0); 1532 gfar_write(®s->gaddr4, 0); 1533 gfar_write(®s->gaddr5, 0); 1534 gfar_write(®s->gaddr6, 0); 1535 gfar_write(®s->gaddr7, 0); 1536 1537 /* Zero out the rmon mib registers if it has them */ 1538 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1539 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); 1540 1541 /* Mask off the CAM interrupts */ 1542 gfar_write(®s->rmon.cam1, 0xffffffff); 1543 gfar_write(®s->rmon.cam2, 0xffffffff); 1544 } 1545 1546 /* Initialize the max receive buffer length */ 1547 gfar_write(®s->mrblr, priv->rx_buffer_size); 1548 1549 /* Initialize the Minimum Frame Length Register */ 1550 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); 1551 } 1552 1553 static int __gfar_is_rx_idle(struct gfar_private *priv) 1554 { 1555 u32 res; 1556 1557 /* 1558 * Normaly TSEC should not hang on GRS commands, so we should 1559 * actually wait for IEVENT_GRSC flag. 1560 */ 1561 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) 1562 return 0; 1563 1564 /* 1565 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are 1566 * the same as bits 23-30, the eTSEC Rx is assumed to be idle 1567 * and the Rx can be safely reset. 1568 */ 1569 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); 1570 res &= 0x7f807f80; 1571 if ((res & 0xffff) == (res >> 16)) 1572 return 1; 1573 1574 return 0; 1575 } 1576 1577 /* Halt the receive and transmit queues */ 1578 static void gfar_halt_nodisable(struct net_device *dev) 1579 { 1580 struct gfar_private *priv = netdev_priv(dev); 1581 struct gfar __iomem *regs = NULL; 1582 u32 tempval; 1583 int i = 0; 1584 1585 for (i = 0; i < priv->num_grps; i++) { 1586 regs = priv->gfargrp[i].regs; 1587 /* Mask all interrupts */ 1588 gfar_write(®s->imask, IMASK_INIT_CLEAR); 1589 1590 /* Clear all interrupts */ 1591 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 1592 } 1593 1594 regs = priv->gfargrp[0].regs; 1595 /* Stop the DMA, and wait for it to stop */ 1596 tempval = gfar_read(®s->dmactrl); 1597 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1598 != (DMACTRL_GRS | DMACTRL_GTS)) { 1599 int ret; 1600 1601 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1602 gfar_write(®s->dmactrl, tempval); 1603 1604 do { 1605 ret = spin_event_timeout(((gfar_read(®s->ievent) & 1606 (IEVENT_GRSC | IEVENT_GTSC)) == 1607 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); 1608 if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) 1609 ret = __gfar_is_rx_idle(priv); 1610 } while (!ret); 1611 } 1612 } 1613 1614 /* Halt the receive and transmit queues */ 1615 void gfar_halt(struct net_device *dev) 1616 { 1617 struct gfar_private *priv = netdev_priv(dev); 1618 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1619 u32 tempval; 1620 1621 gfar_halt_nodisable(dev); 1622 1623 /* Disable Rx and Tx */ 1624 tempval = gfar_read(®s->maccfg1); 1625 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1626 gfar_write(®s->maccfg1, tempval); 1627 } 1628 1629 static void free_grp_irqs(struct gfar_priv_grp *grp) 1630 { 1631 free_irq(grp->interruptError, grp); 1632 free_irq(grp->interruptTransmit, grp); 1633 free_irq(grp->interruptReceive, grp); 1634 } 1635 1636 void stop_gfar(struct net_device *dev) 1637 { 1638 struct gfar_private *priv = netdev_priv(dev); 1639 unsigned long flags; 1640 int i; 1641 1642 phy_stop(priv->phydev); 1643 1644 1645 /* Lock it down */ 1646 local_irq_save(flags); 1647 lock_tx_qs(priv); 1648 lock_rx_qs(priv); 1649 1650 gfar_halt(dev); 1651 1652 unlock_rx_qs(priv); 1653 unlock_tx_qs(priv); 1654 local_irq_restore(flags); 1655 1656 /* Free the IRQs */ 1657 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1658 for (i = 0; i < priv->num_grps; i++) 1659 free_grp_irqs(&priv->gfargrp[i]); 1660 } else { 1661 for (i = 0; i < priv->num_grps; i++) 1662 free_irq(priv->gfargrp[i].interruptTransmit, 1663 &priv->gfargrp[i]); 1664 } 1665 1666 free_skb_resources(priv); 1667 } 1668 1669 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) 1670 { 1671 struct txbd8 *txbdp; 1672 struct gfar_private *priv = netdev_priv(tx_queue->dev); 1673 int i, j; 1674 1675 txbdp = tx_queue->tx_bd_base; 1676 1677 for (i = 0; i < tx_queue->tx_ring_size; i++) { 1678 if (!tx_queue->tx_skbuff[i]) 1679 continue; 1680 1681 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1682 txbdp->length, DMA_TO_DEVICE); 1683 txbdp->lstatus = 0; 1684 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; 1685 j++) { 1686 txbdp++; 1687 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1688 txbdp->length, DMA_TO_DEVICE); 1689 } 1690 txbdp++; 1691 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); 1692 tx_queue->tx_skbuff[i] = NULL; 1693 } 1694 kfree(tx_queue->tx_skbuff); 1695 } 1696 1697 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) 1698 { 1699 struct rxbd8 *rxbdp; 1700 struct gfar_private *priv = netdev_priv(rx_queue->dev); 1701 int i; 1702 1703 rxbdp = rx_queue->rx_bd_base; 1704 1705 for (i = 0; i < rx_queue->rx_ring_size; i++) { 1706 if (rx_queue->rx_skbuff[i]) { 1707 dma_unmap_single(&priv->ofdev->dev, 1708 rxbdp->bufPtr, priv->rx_buffer_size, 1709 DMA_FROM_DEVICE); 1710 dev_kfree_skb_any(rx_queue->rx_skbuff[i]); 1711 rx_queue->rx_skbuff[i] = NULL; 1712 } 1713 rxbdp->lstatus = 0; 1714 rxbdp->bufPtr = 0; 1715 rxbdp++; 1716 } 1717 kfree(rx_queue->rx_skbuff); 1718 } 1719 1720 /* If there are any tx skbs or rx skbs still around, free them. 1721 * Then free tx_skbuff and rx_skbuff */ 1722 static void free_skb_resources(struct gfar_private *priv) 1723 { 1724 struct gfar_priv_tx_q *tx_queue = NULL; 1725 struct gfar_priv_rx_q *rx_queue = NULL; 1726 int i; 1727 1728 /* Go through all the buffer descriptors and free their data buffers */ 1729 for (i = 0; i < priv->num_tx_queues; i++) { 1730 struct netdev_queue *txq; 1731 tx_queue = priv->tx_queue[i]; 1732 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); 1733 if(tx_queue->tx_skbuff) 1734 free_skb_tx_queue(tx_queue); 1735 netdev_tx_reset_queue(txq); 1736 } 1737 1738 for (i = 0; i < priv->num_rx_queues; i++) { 1739 rx_queue = priv->rx_queue[i]; 1740 if(rx_queue->rx_skbuff) 1741 free_skb_rx_queue(rx_queue); 1742 } 1743 1744 dma_free_coherent(&priv->ofdev->dev, 1745 sizeof(struct txbd8) * priv->total_tx_ring_size + 1746 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1747 priv->tx_queue[0]->tx_bd_base, 1748 priv->tx_queue[0]->tx_bd_dma_base); 1749 skb_queue_purge(&priv->rx_recycle); 1750 } 1751 1752 void gfar_start(struct net_device *dev) 1753 { 1754 struct gfar_private *priv = netdev_priv(dev); 1755 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1756 u32 tempval; 1757 int i = 0; 1758 1759 /* Enable Rx and Tx in MACCFG1 */ 1760 tempval = gfar_read(®s->maccfg1); 1761 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 1762 gfar_write(®s->maccfg1, tempval); 1763 1764 /* Initialize DMACTRL to have WWR and WOP */ 1765 tempval = gfar_read(®s->dmactrl); 1766 tempval |= DMACTRL_INIT_SETTINGS; 1767 gfar_write(®s->dmactrl, tempval); 1768 1769 /* Make sure we aren't stopped */ 1770 tempval = gfar_read(®s->dmactrl); 1771 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1772 gfar_write(®s->dmactrl, tempval); 1773 1774 for (i = 0; i < priv->num_grps; i++) { 1775 regs = priv->gfargrp[i].regs; 1776 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1777 gfar_write(®s->tstat, priv->gfargrp[i].tstat); 1778 gfar_write(®s->rstat, priv->gfargrp[i].rstat); 1779 /* Unmask the interrupts we look for */ 1780 gfar_write(®s->imask, IMASK_DEFAULT); 1781 } 1782 1783 dev->trans_start = jiffies; /* prevent tx timeout */ 1784 } 1785 1786 void gfar_configure_coalescing(struct gfar_private *priv, 1787 unsigned long tx_mask, unsigned long rx_mask) 1788 { 1789 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1790 u32 __iomem *baddr; 1791 int i = 0; 1792 1793 /* Backward compatible case ---- even if we enable 1794 * multiple queues, there's only single reg to program 1795 */ 1796 gfar_write(®s->txic, 0); 1797 if(likely(priv->tx_queue[0]->txcoalescing)) 1798 gfar_write(®s->txic, priv->tx_queue[0]->txic); 1799 1800 gfar_write(®s->rxic, 0); 1801 if(unlikely(priv->rx_queue[0]->rxcoalescing)) 1802 gfar_write(®s->rxic, priv->rx_queue[0]->rxic); 1803 1804 if (priv->mode == MQ_MG_MODE) { 1805 baddr = ®s->txic0; 1806 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { 1807 if (likely(priv->tx_queue[i]->txcoalescing)) { 1808 gfar_write(baddr + i, 0); 1809 gfar_write(baddr + i, priv->tx_queue[i]->txic); 1810 } 1811 } 1812 1813 baddr = ®s->rxic0; 1814 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { 1815 if (likely(priv->rx_queue[i]->rxcoalescing)) { 1816 gfar_write(baddr + i, 0); 1817 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 1818 } 1819 } 1820 } 1821 } 1822 1823 static int register_grp_irqs(struct gfar_priv_grp *grp) 1824 { 1825 struct gfar_private *priv = grp->priv; 1826 struct net_device *dev = priv->ndev; 1827 int err; 1828 1829 /* If the device has multiple interrupts, register for 1830 * them. Otherwise, only register for the one */ 1831 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1832 /* Install our interrupt handlers for Error, 1833 * Transmit, and Receive */ 1834 if ((err = request_irq(grp->interruptError, gfar_error, 0, 1835 grp->int_name_er,grp)) < 0) { 1836 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1837 grp->interruptError); 1838 1839 goto err_irq_fail; 1840 } 1841 1842 if ((err = request_irq(grp->interruptTransmit, gfar_transmit, 1843 0, grp->int_name_tx, grp)) < 0) { 1844 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1845 grp->interruptTransmit); 1846 goto tx_irq_fail; 1847 } 1848 1849 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, 1850 grp->int_name_rx, grp)) < 0) { 1851 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1852 grp->interruptReceive); 1853 goto rx_irq_fail; 1854 } 1855 } else { 1856 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, 1857 grp->int_name_tx, grp)) < 0) { 1858 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1859 grp->interruptTransmit); 1860 goto err_irq_fail; 1861 } 1862 } 1863 1864 return 0; 1865 1866 rx_irq_fail: 1867 free_irq(grp->interruptTransmit, grp); 1868 tx_irq_fail: 1869 free_irq(grp->interruptError, grp); 1870 err_irq_fail: 1871 return err; 1872 1873 } 1874 1875 /* Bring the controller up and running */ 1876 int startup_gfar(struct net_device *ndev) 1877 { 1878 struct gfar_private *priv = netdev_priv(ndev); 1879 struct gfar __iomem *regs = NULL; 1880 int err, i, j; 1881 1882 for (i = 0; i < priv->num_grps; i++) { 1883 regs= priv->gfargrp[i].regs; 1884 gfar_write(®s->imask, IMASK_INIT_CLEAR); 1885 } 1886 1887 regs= priv->gfargrp[0].regs; 1888 err = gfar_alloc_skb_resources(ndev); 1889 if (err) 1890 return err; 1891 1892 gfar_init_mac(ndev); 1893 1894 for (i = 0; i < priv->num_grps; i++) { 1895 err = register_grp_irqs(&priv->gfargrp[i]); 1896 if (err) { 1897 for (j = 0; j < i; j++) 1898 free_grp_irqs(&priv->gfargrp[j]); 1899 goto irq_fail; 1900 } 1901 } 1902 1903 /* Start the controller */ 1904 gfar_start(ndev); 1905 1906 phy_start(priv->phydev); 1907 1908 gfar_configure_coalescing(priv, 0xFF, 0xFF); 1909 1910 return 0; 1911 1912 irq_fail: 1913 free_skb_resources(priv); 1914 return err; 1915 } 1916 1917 /* Called when something needs to use the ethernet device */ 1918 /* Returns 0 for success. */ 1919 static int gfar_enet_open(struct net_device *dev) 1920 { 1921 struct gfar_private *priv = netdev_priv(dev); 1922 int err; 1923 1924 enable_napi(priv); 1925 1926 skb_queue_head_init(&priv->rx_recycle); 1927 1928 /* Initialize a bunch of registers */ 1929 init_registers(dev); 1930 1931 gfar_set_mac_address(dev); 1932 1933 err = init_phy(dev); 1934 1935 if (err) { 1936 disable_napi(priv); 1937 return err; 1938 } 1939 1940 err = startup_gfar(dev); 1941 if (err) { 1942 disable_napi(priv); 1943 return err; 1944 } 1945 1946 netif_tx_start_all_queues(dev); 1947 1948 device_set_wakeup_enable(&dev->dev, priv->wol_en); 1949 1950 return err; 1951 } 1952 1953 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) 1954 { 1955 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); 1956 1957 memset(fcb, 0, GMAC_FCB_LEN); 1958 1959 return fcb; 1960 } 1961 1962 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, 1963 int fcb_length) 1964 { 1965 u8 flags = 0; 1966 1967 /* If we're here, it's a IP packet with a TCP or UDP 1968 * payload. We set it to checksum, using a pseudo-header 1969 * we provide 1970 */ 1971 flags = TXFCB_DEFAULT; 1972 1973 /* Tell the controller what the protocol is */ 1974 /* And provide the already calculated phcs */ 1975 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 1976 flags |= TXFCB_UDP; 1977 fcb->phcs = udp_hdr(skb)->check; 1978 } else 1979 fcb->phcs = tcp_hdr(skb)->check; 1980 1981 /* l3os is the distance between the start of the 1982 * frame (skb->data) and the start of the IP hdr. 1983 * l4os is the distance between the start of the 1984 * l3 hdr and the l4 hdr */ 1985 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); 1986 fcb->l4os = skb_network_header_len(skb); 1987 1988 fcb->flags = flags; 1989 } 1990 1991 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 1992 { 1993 fcb->flags |= TXFCB_VLN; 1994 fcb->vlctl = vlan_tx_tag_get(skb); 1995 } 1996 1997 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, 1998 struct txbd8 *base, int ring_size) 1999 { 2000 struct txbd8 *new_bd = bdp + stride; 2001 2002 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; 2003 } 2004 2005 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, 2006 int ring_size) 2007 { 2008 return skip_txbd(bdp, 1, base, ring_size); 2009 } 2010 2011 /* This is called by the kernel when a frame is ready for transmission. */ 2012 /* It is pointed to by the dev->hard_start_xmit function pointer */ 2013 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 2014 { 2015 struct gfar_private *priv = netdev_priv(dev); 2016 struct gfar_priv_tx_q *tx_queue = NULL; 2017 struct netdev_queue *txq; 2018 struct gfar __iomem *regs = NULL; 2019 struct txfcb *fcb = NULL; 2020 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; 2021 u32 lstatus; 2022 int i, rq = 0, do_tstamp = 0; 2023 u32 bufaddr; 2024 unsigned long flags; 2025 unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN; 2026 2027 /* 2028 * TOE=1 frames larger than 2500 bytes may see excess delays 2029 * before start of transmission. 2030 */ 2031 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && 2032 skb->ip_summed == CHECKSUM_PARTIAL && 2033 skb->len > 2500)) { 2034 int ret; 2035 2036 ret = skb_checksum_help(skb); 2037 if (ret) 2038 return ret; 2039 } 2040 2041 rq = skb->queue_mapping; 2042 tx_queue = priv->tx_queue[rq]; 2043 txq = netdev_get_tx_queue(dev, rq); 2044 base = tx_queue->tx_bd_base; 2045 regs = tx_queue->grp->regs; 2046 2047 /* check if time stamp should be generated */ 2048 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 2049 priv->hwts_tx_en)) { 2050 do_tstamp = 1; 2051 fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN; 2052 } 2053 2054 /* make space for additional header when fcb is needed */ 2055 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 2056 vlan_tx_tag_present(skb) || 2057 unlikely(do_tstamp)) && 2058 (skb_headroom(skb) < fcb_length)) { 2059 struct sk_buff *skb_new; 2060 2061 skb_new = skb_realloc_headroom(skb, fcb_length); 2062 if (!skb_new) { 2063 dev->stats.tx_errors++; 2064 kfree_skb(skb); 2065 return NETDEV_TX_OK; 2066 } 2067 2068 /* Steal sock reference for processing TX time stamps */ 2069 swap(skb_new->sk, skb->sk); 2070 swap(skb_new->destructor, skb->destructor); 2071 kfree_skb(skb); 2072 skb = skb_new; 2073 } 2074 2075 /* total number of fragments in the SKB */ 2076 nr_frags = skb_shinfo(skb)->nr_frags; 2077 2078 /* calculate the required number of TxBDs for this skb */ 2079 if (unlikely(do_tstamp)) 2080 nr_txbds = nr_frags + 2; 2081 else 2082 nr_txbds = nr_frags + 1; 2083 2084 /* check if there is space to queue this packet */ 2085 if (nr_txbds > tx_queue->num_txbdfree) { 2086 /* no space, stop the queue */ 2087 netif_tx_stop_queue(txq); 2088 dev->stats.tx_fifo_errors++; 2089 return NETDEV_TX_BUSY; 2090 } 2091 2092 /* Update transmit stats */ 2093 tx_queue->stats.tx_bytes += skb->len; 2094 tx_queue->stats.tx_packets++; 2095 2096 txbdp = txbdp_start = tx_queue->cur_tx; 2097 lstatus = txbdp->lstatus; 2098 2099 /* Time stamp insertion requires one additional TxBD */ 2100 if (unlikely(do_tstamp)) 2101 txbdp_tstamp = txbdp = next_txbd(txbdp, base, 2102 tx_queue->tx_ring_size); 2103 2104 if (nr_frags == 0) { 2105 if (unlikely(do_tstamp)) 2106 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | 2107 TXBD_INTERRUPT); 2108 else 2109 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2110 } else { 2111 /* Place the fragment addresses and lengths into the TxBDs */ 2112 for (i = 0; i < nr_frags; i++) { 2113 /* Point at the next BD, wrapping as needed */ 2114 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2115 2116 length = skb_shinfo(skb)->frags[i].size; 2117 2118 lstatus = txbdp->lstatus | length | 2119 BD_LFLAG(TXBD_READY); 2120 2121 /* Handle the last BD specially */ 2122 if (i == nr_frags - 1) 2123 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2124 2125 bufaddr = skb_frag_dma_map(&priv->ofdev->dev, 2126 &skb_shinfo(skb)->frags[i], 2127 0, 2128 length, 2129 DMA_TO_DEVICE); 2130 2131 /* set the TxBD length and buffer pointer */ 2132 txbdp->bufPtr = bufaddr; 2133 txbdp->lstatus = lstatus; 2134 } 2135 2136 lstatus = txbdp_start->lstatus; 2137 } 2138 2139 /* Add TxPAL between FCB and frame if required */ 2140 if (unlikely(do_tstamp)) { 2141 skb_push(skb, GMAC_TXPAL_LEN); 2142 memset(skb->data, 0, GMAC_TXPAL_LEN); 2143 } 2144 2145 /* Set up checksumming */ 2146 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2147 fcb = gfar_add_fcb(skb); 2148 /* as specified by errata */ 2149 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) 2150 && ((unsigned long)fcb % 0x20) > 0x18)) { 2151 __skb_pull(skb, GMAC_FCB_LEN); 2152 skb_checksum_help(skb); 2153 } else { 2154 lstatus |= BD_LFLAG(TXBD_TOE); 2155 gfar_tx_checksum(skb, fcb, fcb_length); 2156 } 2157 } 2158 2159 if (vlan_tx_tag_present(skb)) { 2160 if (unlikely(NULL == fcb)) { 2161 fcb = gfar_add_fcb(skb); 2162 lstatus |= BD_LFLAG(TXBD_TOE); 2163 } 2164 2165 gfar_tx_vlan(skb, fcb); 2166 } 2167 2168 /* Setup tx hardware time stamping if requested */ 2169 if (unlikely(do_tstamp)) { 2170 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2171 if (fcb == NULL) 2172 fcb = gfar_add_fcb(skb); 2173 fcb->ptp = 1; 2174 lstatus |= BD_LFLAG(TXBD_TOE); 2175 } 2176 2177 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2178 skb_headlen(skb), DMA_TO_DEVICE); 2179 2180 /* 2181 * If time stamping is requested one additional TxBD must be set up. The 2182 * first TxBD points to the FCB and must have a data length of 2183 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with 2184 * the full frame length. 2185 */ 2186 if (unlikely(do_tstamp)) { 2187 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length; 2188 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | 2189 (skb_headlen(skb) - fcb_length); 2190 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; 2191 } else { 2192 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2193 } 2194 2195 netdev_tx_sent_queue(txq, skb->len); 2196 2197 /* 2198 * We can work in parallel with gfar_clean_tx_ring(), except 2199 * when modifying num_txbdfree. Note that we didn't grab the lock 2200 * when we were reading the num_txbdfree and checking for available 2201 * space, that's because outside of this function it can only grow, 2202 * and once we've got needed space, it cannot suddenly disappear. 2203 * 2204 * The lock also protects us from gfar_error(), which can modify 2205 * regs->tstat and thus retrigger the transfers, which is why we 2206 * also must grab the lock before setting ready bit for the first 2207 * to be transmitted BD. 2208 */ 2209 spin_lock_irqsave(&tx_queue->txlock, flags); 2210 2211 /* 2212 * The powerpc-specific eieio() is used, as wmb() has too strong 2213 * semantics (it requires synchronization between cacheable and 2214 * uncacheable mappings, which eieio doesn't provide and which we 2215 * don't need), thus requiring a more expensive sync instruction. At 2216 * some point, the set of architecture-independent barrier functions 2217 * should be expanded to include weaker barriers. 2218 */ 2219 eieio(); 2220 2221 txbdp_start->lstatus = lstatus; 2222 2223 eieio(); /* force lstatus write before tx_skbuff */ 2224 2225 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; 2226 2227 /* Update the current skb pointer to the next entry we will use 2228 * (wrapping if necessary) */ 2229 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & 2230 TX_RING_MOD_MASK(tx_queue->tx_ring_size); 2231 2232 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2233 2234 /* reduce TxBD free count */ 2235 tx_queue->num_txbdfree -= (nr_txbds); 2236 2237 /* If the next BD still needs to be cleaned up, then the bds 2238 are full. We need to tell the kernel to stop sending us stuff. */ 2239 if (!tx_queue->num_txbdfree) { 2240 netif_tx_stop_queue(txq); 2241 2242 dev->stats.tx_fifo_errors++; 2243 } 2244 2245 /* Tell the DMA to go go go */ 2246 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); 2247 2248 /* Unlock priv */ 2249 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2250 2251 return NETDEV_TX_OK; 2252 } 2253 2254 /* Stops the kernel queue, and halts the controller */ 2255 static int gfar_close(struct net_device *dev) 2256 { 2257 struct gfar_private *priv = netdev_priv(dev); 2258 2259 disable_napi(priv); 2260 2261 cancel_work_sync(&priv->reset_task); 2262 stop_gfar(dev); 2263 2264 /* Disconnect from the PHY */ 2265 phy_disconnect(priv->phydev); 2266 priv->phydev = NULL; 2267 2268 netif_tx_stop_all_queues(dev); 2269 2270 return 0; 2271 } 2272 2273 /* Changes the mac address if the controller is not running. */ 2274 static int gfar_set_mac_address(struct net_device *dev) 2275 { 2276 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 2277 2278 return 0; 2279 } 2280 2281 /* Check if rx parser should be activated */ 2282 void gfar_check_rx_parser_mode(struct gfar_private *priv) 2283 { 2284 struct gfar __iomem *regs; 2285 u32 tempval; 2286 2287 regs = priv->gfargrp[0].regs; 2288 2289 tempval = gfar_read(®s->rctrl); 2290 /* If parse is no longer required, then disable parser */ 2291 if (tempval & RCTRL_REQ_PARSER) 2292 tempval |= RCTRL_PRSDEP_INIT; 2293 else 2294 tempval &= ~RCTRL_PRSDEP_INIT; 2295 gfar_write(®s->rctrl, tempval); 2296 } 2297 2298 /* Enables and disables VLAN insertion/extraction */ 2299 void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) 2300 { 2301 struct gfar_private *priv = netdev_priv(dev); 2302 struct gfar __iomem *regs = NULL; 2303 unsigned long flags; 2304 u32 tempval; 2305 2306 regs = priv->gfargrp[0].regs; 2307 local_irq_save(flags); 2308 lock_rx_qs(priv); 2309 2310 if (features & NETIF_F_HW_VLAN_TX) { 2311 /* Enable VLAN tag insertion */ 2312 tempval = gfar_read(®s->tctrl); 2313 tempval |= TCTRL_VLINS; 2314 gfar_write(®s->tctrl, tempval); 2315 } else { 2316 /* Disable VLAN tag insertion */ 2317 tempval = gfar_read(®s->tctrl); 2318 tempval &= ~TCTRL_VLINS; 2319 gfar_write(®s->tctrl, tempval); 2320 } 2321 2322 if (features & NETIF_F_HW_VLAN_RX) { 2323 /* Enable VLAN tag extraction */ 2324 tempval = gfar_read(®s->rctrl); 2325 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 2326 gfar_write(®s->rctrl, tempval); 2327 } else { 2328 /* Disable VLAN tag extraction */ 2329 tempval = gfar_read(®s->rctrl); 2330 tempval &= ~RCTRL_VLEX; 2331 gfar_write(®s->rctrl, tempval); 2332 2333 gfar_check_rx_parser_mode(priv); 2334 } 2335 2336 gfar_change_mtu(dev, dev->mtu); 2337 2338 unlock_rx_qs(priv); 2339 local_irq_restore(flags); 2340 } 2341 2342 static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2343 { 2344 int tempsize, tempval; 2345 struct gfar_private *priv = netdev_priv(dev); 2346 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2347 int oldsize = priv->rx_buffer_size; 2348 int frame_size = new_mtu + ETH_HLEN; 2349 2350 if (gfar_is_vlan_on(priv)) 2351 frame_size += VLAN_HLEN; 2352 2353 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 2354 netif_err(priv, drv, dev, "Invalid MTU setting\n"); 2355 return -EINVAL; 2356 } 2357 2358 if (gfar_uses_fcb(priv)) 2359 frame_size += GMAC_FCB_LEN; 2360 2361 frame_size += priv->padding; 2362 2363 tempsize = 2364 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 2365 INCREMENTAL_BUFFER_SIZE; 2366 2367 /* Only stop and start the controller if it isn't already 2368 * stopped, and we changed something */ 2369 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2370 stop_gfar(dev); 2371 2372 priv->rx_buffer_size = tempsize; 2373 2374 dev->mtu = new_mtu; 2375 2376 gfar_write(®s->mrblr, priv->rx_buffer_size); 2377 gfar_write(®s->maxfrm, priv->rx_buffer_size); 2378 2379 /* If the mtu is larger than the max size for standard 2380 * ethernet frames (ie, a jumbo frame), then set maccfg2 2381 * to allow huge frames, and to check the length */ 2382 tempval = gfar_read(®s->maccfg2); 2383 2384 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || 2385 gfar_has_errata(priv, GFAR_ERRATA_74)) 2386 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2387 else 2388 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2389 2390 gfar_write(®s->maccfg2, tempval); 2391 2392 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2393 startup_gfar(dev); 2394 2395 return 0; 2396 } 2397 2398 /* gfar_reset_task gets scheduled when a packet has not been 2399 * transmitted after a set amount of time. 2400 * For now, assume that clearing out all the structures, and 2401 * starting over will fix the problem. 2402 */ 2403 static void gfar_reset_task(struct work_struct *work) 2404 { 2405 struct gfar_private *priv = container_of(work, struct gfar_private, 2406 reset_task); 2407 struct net_device *dev = priv->ndev; 2408 2409 if (dev->flags & IFF_UP) { 2410 netif_tx_stop_all_queues(dev); 2411 stop_gfar(dev); 2412 startup_gfar(dev); 2413 netif_tx_start_all_queues(dev); 2414 } 2415 2416 netif_tx_schedule_all(dev); 2417 } 2418 2419 static void gfar_timeout(struct net_device *dev) 2420 { 2421 struct gfar_private *priv = netdev_priv(dev); 2422 2423 dev->stats.tx_errors++; 2424 schedule_work(&priv->reset_task); 2425 } 2426 2427 static void gfar_align_skb(struct sk_buff *skb) 2428 { 2429 /* We need the data buffer to be aligned properly. We will reserve 2430 * as many bytes as needed to align the data properly 2431 */ 2432 skb_reserve(skb, RXBUF_ALIGNMENT - 2433 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); 2434 } 2435 2436 /* Interrupt Handler for Transmit complete */ 2437 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) 2438 { 2439 struct net_device *dev = tx_queue->dev; 2440 struct netdev_queue *txq; 2441 struct gfar_private *priv = netdev_priv(dev); 2442 struct gfar_priv_rx_q *rx_queue = NULL; 2443 struct txbd8 *bdp, *next = NULL; 2444 struct txbd8 *lbdp = NULL; 2445 struct txbd8 *base = tx_queue->tx_bd_base; 2446 struct sk_buff *skb; 2447 int skb_dirtytx; 2448 int tx_ring_size = tx_queue->tx_ring_size; 2449 int frags = 0, nr_txbds = 0; 2450 int i; 2451 int howmany = 0; 2452 int tqi = tx_queue->qindex; 2453 unsigned int bytes_sent = 0; 2454 u32 lstatus; 2455 size_t buflen; 2456 2457 rx_queue = priv->rx_queue[tqi]; 2458 txq = netdev_get_tx_queue(dev, tqi); 2459 bdp = tx_queue->dirty_tx; 2460 skb_dirtytx = tx_queue->skb_dirtytx; 2461 2462 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { 2463 unsigned long flags; 2464 2465 frags = skb_shinfo(skb)->nr_frags; 2466 2467 /* 2468 * When time stamping, one additional TxBD must be freed. 2469 * Also, we need to dma_unmap_single() the TxPAL. 2470 */ 2471 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 2472 nr_txbds = frags + 2; 2473 else 2474 nr_txbds = frags + 1; 2475 2476 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); 2477 2478 lstatus = lbdp->lstatus; 2479 2480 /* Only clean completed frames */ 2481 if ((lstatus & BD_LFLAG(TXBD_READY)) && 2482 (lstatus & BD_LENGTH_MASK)) 2483 break; 2484 2485 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2486 next = next_txbd(bdp, base, tx_ring_size); 2487 buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN; 2488 } else 2489 buflen = bdp->length; 2490 2491 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2492 buflen, DMA_TO_DEVICE); 2493 2494 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2495 struct skb_shared_hwtstamps shhwtstamps; 2496 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); 2497 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 2498 shhwtstamps.hwtstamp = ns_to_ktime(*ns); 2499 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); 2500 skb_tstamp_tx(skb, &shhwtstamps); 2501 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2502 bdp = next; 2503 } 2504 2505 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2506 bdp = next_txbd(bdp, base, tx_ring_size); 2507 2508 for (i = 0; i < frags; i++) { 2509 dma_unmap_page(&priv->ofdev->dev, 2510 bdp->bufPtr, 2511 bdp->length, 2512 DMA_TO_DEVICE); 2513 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2514 bdp = next_txbd(bdp, base, tx_ring_size); 2515 } 2516 2517 bytes_sent += skb->len; 2518 2519 /* 2520 * If there's room in the queue (limit it to rx_buffer_size) 2521 * we add this skb back into the pool, if it's the right size 2522 */ 2523 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && 2524 skb_recycle_check(skb, priv->rx_buffer_size + 2525 RXBUF_ALIGNMENT)) { 2526 gfar_align_skb(skb); 2527 skb_queue_head(&priv->rx_recycle, skb); 2528 } else 2529 dev_kfree_skb_any(skb); 2530 2531 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2532 2533 skb_dirtytx = (skb_dirtytx + 1) & 2534 TX_RING_MOD_MASK(tx_ring_size); 2535 2536 howmany++; 2537 spin_lock_irqsave(&tx_queue->txlock, flags); 2538 tx_queue->num_txbdfree += nr_txbds; 2539 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2540 } 2541 2542 /* If we freed a buffer, we can restart transmission, if necessary */ 2543 if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree) 2544 netif_wake_subqueue(dev, tqi); 2545 2546 /* Update dirty indicators */ 2547 tx_queue->skb_dirtytx = skb_dirtytx; 2548 tx_queue->dirty_tx = bdp; 2549 2550 netdev_tx_completed_queue(txq, howmany, bytes_sent); 2551 2552 return howmany; 2553 } 2554 2555 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) 2556 { 2557 unsigned long flags; 2558 2559 spin_lock_irqsave(&gfargrp->grplock, flags); 2560 if (napi_schedule_prep(&gfargrp->napi)) { 2561 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); 2562 __napi_schedule(&gfargrp->napi); 2563 } else { 2564 /* 2565 * Clear IEVENT, so interrupts aren't called again 2566 * because of the packets that have already arrived. 2567 */ 2568 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); 2569 } 2570 spin_unlock_irqrestore(&gfargrp->grplock, flags); 2571 2572 } 2573 2574 /* Interrupt Handler for Transmit complete */ 2575 static irqreturn_t gfar_transmit(int irq, void *grp_id) 2576 { 2577 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2578 return IRQ_HANDLED; 2579 } 2580 2581 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 2582 struct sk_buff *skb) 2583 { 2584 struct net_device *dev = rx_queue->dev; 2585 struct gfar_private *priv = netdev_priv(dev); 2586 dma_addr_t buf; 2587 2588 buf = dma_map_single(&priv->ofdev->dev, skb->data, 2589 priv->rx_buffer_size, DMA_FROM_DEVICE); 2590 gfar_init_rxbdp(rx_queue, bdp, buf); 2591 } 2592 2593 static struct sk_buff * gfar_alloc_skb(struct net_device *dev) 2594 { 2595 struct gfar_private *priv = netdev_priv(dev); 2596 struct sk_buff *skb = NULL; 2597 2598 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); 2599 if (!skb) 2600 return NULL; 2601 2602 gfar_align_skb(skb); 2603 2604 return skb; 2605 } 2606 2607 struct sk_buff * gfar_new_skb(struct net_device *dev) 2608 { 2609 struct gfar_private *priv = netdev_priv(dev); 2610 struct sk_buff *skb = NULL; 2611 2612 skb = skb_dequeue(&priv->rx_recycle); 2613 if (!skb) 2614 skb = gfar_alloc_skb(dev); 2615 2616 return skb; 2617 } 2618 2619 static inline void count_errors(unsigned short status, struct net_device *dev) 2620 { 2621 struct gfar_private *priv = netdev_priv(dev); 2622 struct net_device_stats *stats = &dev->stats; 2623 struct gfar_extra_stats *estats = &priv->extra_stats; 2624 2625 /* If the packet was truncated, none of the other errors 2626 * matter */ 2627 if (status & RXBD_TRUNCATED) { 2628 stats->rx_length_errors++; 2629 2630 estats->rx_trunc++; 2631 2632 return; 2633 } 2634 /* Count the errors, if there were any */ 2635 if (status & (RXBD_LARGE | RXBD_SHORT)) { 2636 stats->rx_length_errors++; 2637 2638 if (status & RXBD_LARGE) 2639 estats->rx_large++; 2640 else 2641 estats->rx_short++; 2642 } 2643 if (status & RXBD_NONOCTET) { 2644 stats->rx_frame_errors++; 2645 estats->rx_nonoctet++; 2646 } 2647 if (status & RXBD_CRCERR) { 2648 estats->rx_crcerr++; 2649 stats->rx_crc_errors++; 2650 } 2651 if (status & RXBD_OVERRUN) { 2652 estats->rx_overrun++; 2653 stats->rx_crc_errors++; 2654 } 2655 } 2656 2657 irqreturn_t gfar_receive(int irq, void *grp_id) 2658 { 2659 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2660 return IRQ_HANDLED; 2661 } 2662 2663 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 2664 { 2665 /* If valid headers were found, and valid sums 2666 * were verified, then we tell the kernel that no 2667 * checksumming is necessary. Otherwise, it is */ 2668 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 2669 skb->ip_summed = CHECKSUM_UNNECESSARY; 2670 else 2671 skb_checksum_none_assert(skb); 2672 } 2673 2674 2675 /* gfar_process_frame() -- handle one incoming packet if skb 2676 * isn't NULL. */ 2677 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 2678 int amount_pull) 2679 { 2680 struct gfar_private *priv = netdev_priv(dev); 2681 struct rxfcb *fcb = NULL; 2682 2683 int ret; 2684 2685 /* fcb is at the beginning if exists */ 2686 fcb = (struct rxfcb *)skb->data; 2687 2688 /* Remove the FCB from the skb */ 2689 /* Remove the padded bytes, if there are any */ 2690 if (amount_pull) { 2691 skb_record_rx_queue(skb, fcb->rq); 2692 skb_pull(skb, amount_pull); 2693 } 2694 2695 /* Get receive timestamp from the skb */ 2696 if (priv->hwts_rx_en) { 2697 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 2698 u64 *ns = (u64 *) skb->data; 2699 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 2700 shhwtstamps->hwtstamp = ns_to_ktime(*ns); 2701 } 2702 2703 if (priv->padding) 2704 skb_pull(skb, priv->padding); 2705 2706 if (dev->features & NETIF_F_RXCSUM) 2707 gfar_rx_checksum(skb, fcb); 2708 2709 /* Tell the skb what kind of packet this is */ 2710 skb->protocol = eth_type_trans(skb, dev); 2711 2712 /* 2713 * There's need to check for NETIF_F_HW_VLAN_RX here. 2714 * Even if vlan rx accel is disabled, on some chips 2715 * RXFCB_VLN is pseudo randomly set. 2716 */ 2717 if (dev->features & NETIF_F_HW_VLAN_RX && 2718 fcb->flags & RXFCB_VLN) 2719 __vlan_hwaccel_put_tag(skb, fcb->vlctl); 2720 2721 /* Send the packet up the stack */ 2722 ret = netif_receive_skb(skb); 2723 2724 if (NET_RX_DROP == ret) 2725 priv->extra_stats.kernel_dropped++; 2726 2727 return 0; 2728 } 2729 2730 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring 2731 * until the budget/quota has been reached. Returns the number 2732 * of frames handled 2733 */ 2734 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) 2735 { 2736 struct net_device *dev = rx_queue->dev; 2737 struct rxbd8 *bdp, *base; 2738 struct sk_buff *skb; 2739 int pkt_len; 2740 int amount_pull; 2741 int howmany = 0; 2742 struct gfar_private *priv = netdev_priv(dev); 2743 2744 /* Get the first full descriptor */ 2745 bdp = rx_queue->cur_rx; 2746 base = rx_queue->rx_bd_base; 2747 2748 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0); 2749 2750 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 2751 struct sk_buff *newskb; 2752 rmb(); 2753 2754 /* Add another skb for the future */ 2755 newskb = gfar_new_skb(dev); 2756 2757 skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; 2758 2759 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2760 priv->rx_buffer_size, DMA_FROM_DEVICE); 2761 2762 if (unlikely(!(bdp->status & RXBD_ERR) && 2763 bdp->length > priv->rx_buffer_size)) 2764 bdp->status = RXBD_LARGE; 2765 2766 /* We drop the frame if we failed to allocate a new buffer */ 2767 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || 2768 bdp->status & RXBD_ERR)) { 2769 count_errors(bdp->status, dev); 2770 2771 if (unlikely(!newskb)) 2772 newskb = skb; 2773 else if (skb) 2774 skb_queue_head(&priv->rx_recycle, skb); 2775 } else { 2776 /* Increment the number of packets */ 2777 rx_queue->stats.rx_packets++; 2778 howmany++; 2779 2780 if (likely(skb)) { 2781 pkt_len = bdp->length - ETH_FCS_LEN; 2782 /* Remove the FCS from the packet length */ 2783 skb_put(skb, pkt_len); 2784 rx_queue->stats.rx_bytes += pkt_len; 2785 skb_record_rx_queue(skb, rx_queue->qindex); 2786 gfar_process_frame(dev, skb, amount_pull); 2787 2788 } else { 2789 netif_warn(priv, rx_err, dev, "Missing skb!\n"); 2790 rx_queue->stats.rx_dropped++; 2791 priv->extra_stats.rx_skbmissing++; 2792 } 2793 2794 } 2795 2796 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; 2797 2798 /* Setup the new bdp */ 2799 gfar_new_rxbdp(rx_queue, bdp, newskb); 2800 2801 /* Update to the next pointer */ 2802 bdp = next_bd(bdp, base, rx_queue->rx_ring_size); 2803 2804 /* update to point at the next skb */ 2805 rx_queue->skb_currx = 2806 (rx_queue->skb_currx + 1) & 2807 RX_RING_MOD_MASK(rx_queue->rx_ring_size); 2808 } 2809 2810 /* Update the current rxbd pointer to be the next one */ 2811 rx_queue->cur_rx = bdp; 2812 2813 return howmany; 2814 } 2815 2816 static int gfar_poll(struct napi_struct *napi, int budget) 2817 { 2818 struct gfar_priv_grp *gfargrp = container_of(napi, 2819 struct gfar_priv_grp, napi); 2820 struct gfar_private *priv = gfargrp->priv; 2821 struct gfar __iomem *regs = gfargrp->regs; 2822 struct gfar_priv_tx_q *tx_queue = NULL; 2823 struct gfar_priv_rx_q *rx_queue = NULL; 2824 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; 2825 int tx_cleaned = 0, i, left_over_budget = budget; 2826 unsigned long serviced_queues = 0; 2827 int num_queues = 0; 2828 2829 num_queues = gfargrp->num_rx_queues; 2830 budget_per_queue = budget/num_queues; 2831 2832 /* Clear IEVENT, so interrupts aren't called again 2833 * because of the packets that have already arrived */ 2834 gfar_write(®s->ievent, IEVENT_RTX_MASK); 2835 2836 while (num_queues && left_over_budget) { 2837 2838 budget_per_queue = left_over_budget/num_queues; 2839 left_over_budget = 0; 2840 2841 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2842 if (test_bit(i, &serviced_queues)) 2843 continue; 2844 rx_queue = priv->rx_queue[i]; 2845 tx_queue = priv->tx_queue[rx_queue->qindex]; 2846 2847 tx_cleaned += gfar_clean_tx_ring(tx_queue); 2848 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, 2849 budget_per_queue); 2850 rx_cleaned += rx_cleaned_per_queue; 2851 if(rx_cleaned_per_queue < budget_per_queue) { 2852 left_over_budget = left_over_budget + 2853 (budget_per_queue - rx_cleaned_per_queue); 2854 set_bit(i, &serviced_queues); 2855 num_queues--; 2856 } 2857 } 2858 } 2859 2860 if (tx_cleaned) 2861 return budget; 2862 2863 if (rx_cleaned < budget) { 2864 napi_complete(napi); 2865 2866 /* Clear the halt bit in RSTAT */ 2867 gfar_write(®s->rstat, gfargrp->rstat); 2868 2869 gfar_write(®s->imask, IMASK_DEFAULT); 2870 2871 /* If we are coalescing interrupts, update the timer */ 2872 /* Otherwise, clear it */ 2873 gfar_configure_coalescing(priv, 2874 gfargrp->rx_bit_map, gfargrp->tx_bit_map); 2875 } 2876 2877 return rx_cleaned; 2878 } 2879 2880 #ifdef CONFIG_NET_POLL_CONTROLLER 2881 /* 2882 * Polling 'interrupt' - used by things like netconsole to send skbs 2883 * without having to re-enable interrupts. It's not called while 2884 * the interrupt routine is executing. 2885 */ 2886 static void gfar_netpoll(struct net_device *dev) 2887 { 2888 struct gfar_private *priv = netdev_priv(dev); 2889 int i = 0; 2890 2891 /* If the device has multiple interrupts, run tx/rx */ 2892 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2893 for (i = 0; i < priv->num_grps; i++) { 2894 disable_irq(priv->gfargrp[i].interruptTransmit); 2895 disable_irq(priv->gfargrp[i].interruptReceive); 2896 disable_irq(priv->gfargrp[i].interruptError); 2897 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2898 &priv->gfargrp[i]); 2899 enable_irq(priv->gfargrp[i].interruptError); 2900 enable_irq(priv->gfargrp[i].interruptReceive); 2901 enable_irq(priv->gfargrp[i].interruptTransmit); 2902 } 2903 } else { 2904 for (i = 0; i < priv->num_grps; i++) { 2905 disable_irq(priv->gfargrp[i].interruptTransmit); 2906 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2907 &priv->gfargrp[i]); 2908 enable_irq(priv->gfargrp[i].interruptTransmit); 2909 } 2910 } 2911 } 2912 #endif 2913 2914 /* The interrupt handler for devices with one interrupt */ 2915 static irqreturn_t gfar_interrupt(int irq, void *grp_id) 2916 { 2917 struct gfar_priv_grp *gfargrp = grp_id; 2918 2919 /* Save ievent for future reference */ 2920 u32 events = gfar_read(&gfargrp->regs->ievent); 2921 2922 /* Check for reception */ 2923 if (events & IEVENT_RX_MASK) 2924 gfar_receive(irq, grp_id); 2925 2926 /* Check for transmit completion */ 2927 if (events & IEVENT_TX_MASK) 2928 gfar_transmit(irq, grp_id); 2929 2930 /* Check for errors */ 2931 if (events & IEVENT_ERR_MASK) 2932 gfar_error(irq, grp_id); 2933 2934 return IRQ_HANDLED; 2935 } 2936 2937 /* Called every time the controller might need to be made 2938 * aware of new link state. The PHY code conveys this 2939 * information through variables in the phydev structure, and this 2940 * function converts those variables into the appropriate 2941 * register values, and can bring down the device if needed. 2942 */ 2943 static void adjust_link(struct net_device *dev) 2944 { 2945 struct gfar_private *priv = netdev_priv(dev); 2946 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2947 unsigned long flags; 2948 struct phy_device *phydev = priv->phydev; 2949 int new_state = 0; 2950 2951 local_irq_save(flags); 2952 lock_tx_qs(priv); 2953 2954 if (phydev->link) { 2955 u32 tempval = gfar_read(®s->maccfg2); 2956 u32 ecntrl = gfar_read(®s->ecntrl); 2957 2958 /* Now we make sure that we can be in full duplex mode. 2959 * If not, we operate in half-duplex mode. */ 2960 if (phydev->duplex != priv->oldduplex) { 2961 new_state = 1; 2962 if (!(phydev->duplex)) 2963 tempval &= ~(MACCFG2_FULL_DUPLEX); 2964 else 2965 tempval |= MACCFG2_FULL_DUPLEX; 2966 2967 priv->oldduplex = phydev->duplex; 2968 } 2969 2970 if (phydev->speed != priv->oldspeed) { 2971 new_state = 1; 2972 switch (phydev->speed) { 2973 case 1000: 2974 tempval = 2975 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 2976 2977 ecntrl &= ~(ECNTRL_R100); 2978 break; 2979 case 100: 2980 case 10: 2981 tempval = 2982 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 2983 2984 /* Reduced mode distinguishes 2985 * between 10 and 100 */ 2986 if (phydev->speed == SPEED_100) 2987 ecntrl |= ECNTRL_R100; 2988 else 2989 ecntrl &= ~(ECNTRL_R100); 2990 break; 2991 default: 2992 netif_warn(priv, link, dev, 2993 "Ack! Speed (%d) is not 10/100/1000!\n", 2994 phydev->speed); 2995 break; 2996 } 2997 2998 priv->oldspeed = phydev->speed; 2999 } 3000 3001 gfar_write(®s->maccfg2, tempval); 3002 gfar_write(®s->ecntrl, ecntrl); 3003 3004 if (!priv->oldlink) { 3005 new_state = 1; 3006 priv->oldlink = 1; 3007 } 3008 } else if (priv->oldlink) { 3009 new_state = 1; 3010 priv->oldlink = 0; 3011 priv->oldspeed = 0; 3012 priv->oldduplex = -1; 3013 } 3014 3015 if (new_state && netif_msg_link(priv)) 3016 phy_print_status(phydev); 3017 unlock_tx_qs(priv); 3018 local_irq_restore(flags); 3019 } 3020 3021 /* Update the hash table based on the current list of multicast 3022 * addresses we subscribe to. Also, change the promiscuity of 3023 * the device based on the flags (this function is called 3024 * whenever dev->flags is changed */ 3025 static void gfar_set_multi(struct net_device *dev) 3026 { 3027 struct netdev_hw_addr *ha; 3028 struct gfar_private *priv = netdev_priv(dev); 3029 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3030 u32 tempval; 3031 3032 if (dev->flags & IFF_PROMISC) { 3033 /* Set RCTRL to PROM */ 3034 tempval = gfar_read(®s->rctrl); 3035 tempval |= RCTRL_PROM; 3036 gfar_write(®s->rctrl, tempval); 3037 } else { 3038 /* Set RCTRL to not PROM */ 3039 tempval = gfar_read(®s->rctrl); 3040 tempval &= ~(RCTRL_PROM); 3041 gfar_write(®s->rctrl, tempval); 3042 } 3043 3044 if (dev->flags & IFF_ALLMULTI) { 3045 /* Set the hash to rx all multicast frames */ 3046 gfar_write(®s->igaddr0, 0xffffffff); 3047 gfar_write(®s->igaddr1, 0xffffffff); 3048 gfar_write(®s->igaddr2, 0xffffffff); 3049 gfar_write(®s->igaddr3, 0xffffffff); 3050 gfar_write(®s->igaddr4, 0xffffffff); 3051 gfar_write(®s->igaddr5, 0xffffffff); 3052 gfar_write(®s->igaddr6, 0xffffffff); 3053 gfar_write(®s->igaddr7, 0xffffffff); 3054 gfar_write(®s->gaddr0, 0xffffffff); 3055 gfar_write(®s->gaddr1, 0xffffffff); 3056 gfar_write(®s->gaddr2, 0xffffffff); 3057 gfar_write(®s->gaddr3, 0xffffffff); 3058 gfar_write(®s->gaddr4, 0xffffffff); 3059 gfar_write(®s->gaddr5, 0xffffffff); 3060 gfar_write(®s->gaddr6, 0xffffffff); 3061 gfar_write(®s->gaddr7, 0xffffffff); 3062 } else { 3063 int em_num; 3064 int idx; 3065 3066 /* zero out the hash */ 3067 gfar_write(®s->igaddr0, 0x0); 3068 gfar_write(®s->igaddr1, 0x0); 3069 gfar_write(®s->igaddr2, 0x0); 3070 gfar_write(®s->igaddr3, 0x0); 3071 gfar_write(®s->igaddr4, 0x0); 3072 gfar_write(®s->igaddr5, 0x0); 3073 gfar_write(®s->igaddr6, 0x0); 3074 gfar_write(®s->igaddr7, 0x0); 3075 gfar_write(®s->gaddr0, 0x0); 3076 gfar_write(®s->gaddr1, 0x0); 3077 gfar_write(®s->gaddr2, 0x0); 3078 gfar_write(®s->gaddr3, 0x0); 3079 gfar_write(®s->gaddr4, 0x0); 3080 gfar_write(®s->gaddr5, 0x0); 3081 gfar_write(®s->gaddr6, 0x0); 3082 gfar_write(®s->gaddr7, 0x0); 3083 3084 /* If we have extended hash tables, we need to 3085 * clear the exact match registers to prepare for 3086 * setting them */ 3087 if (priv->extended_hash) { 3088 em_num = GFAR_EM_NUM + 1; 3089 gfar_clear_exact_match(dev); 3090 idx = 1; 3091 } else { 3092 idx = 0; 3093 em_num = 0; 3094 } 3095 3096 if (netdev_mc_empty(dev)) 3097 return; 3098 3099 /* Parse the list, and set the appropriate bits */ 3100 netdev_for_each_mc_addr(ha, dev) { 3101 if (idx < em_num) { 3102 gfar_set_mac_for_addr(dev, idx, ha->addr); 3103 idx++; 3104 } else 3105 gfar_set_hash_for_addr(dev, ha->addr); 3106 } 3107 } 3108 } 3109 3110 3111 /* Clears each of the exact match registers to zero, so they 3112 * don't interfere with normal reception */ 3113 static void gfar_clear_exact_match(struct net_device *dev) 3114 { 3115 int idx; 3116 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 3117 3118 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 3119 gfar_set_mac_for_addr(dev, idx, zero_arr); 3120 } 3121 3122 /* Set the appropriate hash bit for the given addr */ 3123 /* The algorithm works like so: 3124 * 1) Take the Destination Address (ie the multicast address), and 3125 * do a CRC on it (little endian), and reverse the bits of the 3126 * result. 3127 * 2) Use the 8 most significant bits as a hash into a 256-entry 3128 * table. The table is controlled through 8 32-bit registers: 3129 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 3130 * gaddr7. This means that the 3 most significant bits in the 3131 * hash index which gaddr register to use, and the 5 other bits 3132 * indicate which bit (assuming an IBM numbering scheme, which 3133 * for PowerPC (tm) is usually the case) in the register holds 3134 * the entry. */ 3135 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 3136 { 3137 u32 tempval; 3138 struct gfar_private *priv = netdev_priv(dev); 3139 u32 result = ether_crc(ETH_ALEN, addr); 3140 int width = priv->hash_width; 3141 u8 whichbit = (result >> (32 - width)) & 0x1f; 3142 u8 whichreg = result >> (32 - width + 5); 3143 u32 value = (1 << (31-whichbit)); 3144 3145 tempval = gfar_read(priv->hash_regs[whichreg]); 3146 tempval |= value; 3147 gfar_write(priv->hash_regs[whichreg], tempval); 3148 } 3149 3150 3151 /* There are multiple MAC Address register pairs on some controllers 3152 * This function sets the numth pair to a given address 3153 */ 3154 static void gfar_set_mac_for_addr(struct net_device *dev, int num, 3155 const u8 *addr) 3156 { 3157 struct gfar_private *priv = netdev_priv(dev); 3158 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3159 int idx; 3160 char tmpbuf[ETH_ALEN]; 3161 u32 tempval; 3162 u32 __iomem *macptr = ®s->macstnaddr1; 3163 3164 macptr += num*2; 3165 3166 /* Now copy it into the mac registers backwards, cuz */ 3167 /* little endian is silly */ 3168 for (idx = 0; idx < ETH_ALEN; idx++) 3169 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx]; 3170 3171 gfar_write(macptr, *((u32 *) (tmpbuf))); 3172 3173 tempval = *((u32 *) (tmpbuf + 4)); 3174 3175 gfar_write(macptr+1, tempval); 3176 } 3177 3178 /* GFAR error interrupt handler */ 3179 static irqreturn_t gfar_error(int irq, void *grp_id) 3180 { 3181 struct gfar_priv_grp *gfargrp = grp_id; 3182 struct gfar __iomem *regs = gfargrp->regs; 3183 struct gfar_private *priv= gfargrp->priv; 3184 struct net_device *dev = priv->ndev; 3185 3186 /* Save ievent for future reference */ 3187 u32 events = gfar_read(®s->ievent); 3188 3189 /* Clear IEVENT */ 3190 gfar_write(®s->ievent, events & IEVENT_ERR_MASK); 3191 3192 /* Magic Packet is not an error. */ 3193 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 3194 (events & IEVENT_MAG)) 3195 events &= ~IEVENT_MAG; 3196 3197 /* Hmm... */ 3198 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 3199 netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n", 3200 events, gfar_read(®s->imask)); 3201 3202 /* Update the error counters */ 3203 if (events & IEVENT_TXE) { 3204 dev->stats.tx_errors++; 3205 3206 if (events & IEVENT_LC) 3207 dev->stats.tx_window_errors++; 3208 if (events & IEVENT_CRL) 3209 dev->stats.tx_aborted_errors++; 3210 if (events & IEVENT_XFUN) { 3211 unsigned long flags; 3212 3213 netif_dbg(priv, tx_err, dev, 3214 "TX FIFO underrun, packet dropped\n"); 3215 dev->stats.tx_dropped++; 3216 priv->extra_stats.tx_underrun++; 3217 3218 local_irq_save(flags); 3219 lock_tx_qs(priv); 3220 3221 /* Reactivate the Tx Queues */ 3222 gfar_write(®s->tstat, gfargrp->tstat); 3223 3224 unlock_tx_qs(priv); 3225 local_irq_restore(flags); 3226 } 3227 netif_dbg(priv, tx_err, dev, "Transmit Error\n"); 3228 } 3229 if (events & IEVENT_BSY) { 3230 dev->stats.rx_errors++; 3231 priv->extra_stats.rx_bsy++; 3232 3233 gfar_receive(irq, grp_id); 3234 3235 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", 3236 gfar_read(®s->rstat)); 3237 } 3238 if (events & IEVENT_BABR) { 3239 dev->stats.rx_errors++; 3240 priv->extra_stats.rx_babr++; 3241 3242 netif_dbg(priv, rx_err, dev, "babbling RX error\n"); 3243 } 3244 if (events & IEVENT_EBERR) { 3245 priv->extra_stats.eberr++; 3246 netif_dbg(priv, rx_err, dev, "bus error\n"); 3247 } 3248 if (events & IEVENT_RXC) 3249 netif_dbg(priv, rx_status, dev, "control frame\n"); 3250 3251 if (events & IEVENT_BABT) { 3252 priv->extra_stats.tx_babt++; 3253 netif_dbg(priv, tx_err, dev, "babbling TX error\n"); 3254 } 3255 return IRQ_HANDLED; 3256 } 3257 3258 static struct of_device_id gfar_match[] = 3259 { 3260 { 3261 .type = "network", 3262 .compatible = "gianfar", 3263 }, 3264 { 3265 .compatible = "fsl,etsec2", 3266 }, 3267 {}, 3268 }; 3269 MODULE_DEVICE_TABLE(of, gfar_match); 3270 3271 /* Structure for a device driver */ 3272 static struct platform_driver gfar_driver = { 3273 .driver = { 3274 .name = "fsl-gianfar", 3275 .owner = THIS_MODULE, 3276 .pm = GFAR_PM_OPS, 3277 .of_match_table = gfar_match, 3278 }, 3279 .probe = gfar_probe, 3280 .remove = gfar_remove, 3281 }; 3282 3283 module_platform_driver(gfar_driver); 3284