gianfar.c (e19b9137142988bec5a76c5f8bdf12a77ea802b0) | gianfar.c (c65d7533729a965ab8d93fa0470abc263060c54c) |
---|---|
1/* drivers/net/ethernet/freescale/gianfar.c 2 * 3 * Gianfar Ethernet Driver 4 * This driver is designed for the non-CPM ethernet controllers 5 * on the 85xx and 83xx family of integrated processors 6 * Based on 8260_io/fcc_enet.c 7 * 8 * Author: Andy Fleming 9 * Maintainer: Kumar Gala 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 11 * | 1/* drivers/net/ethernet/freescale/gianfar.c 2 * 3 * Gianfar Ethernet Driver 4 * This driver is designed for the non-CPM ethernet controllers 5 * on the 85xx and 83xx family of integrated processors 6 * Based on 8260_io/fcc_enet.c 7 * 8 * Author: Andy Fleming 9 * Maintainer: Kumar Gala 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 11 * |
12 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. | 12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. |
13 * Copyright 2007 MontaVista Software, Inc. 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms of the GNU General Public License as published by the 17 * Free Software Foundation; either version 2 of the License, or (at your 18 * option) any later version. 19 * 20 * Gianfar: AKA Lambda Draconis, "Dragon" --- 95 unchanged lines hidden (view full) --- 116static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 117 struct sk_buff *skb); 118static int gfar_set_mac_address(struct net_device *dev); 119static int gfar_change_mtu(struct net_device *dev, int new_mtu); 120static irqreturn_t gfar_error(int irq, void *dev_id); 121static irqreturn_t gfar_transmit(int irq, void *dev_id); 122static irqreturn_t gfar_interrupt(int irq, void *dev_id); 123static void adjust_link(struct net_device *dev); | 13 * Copyright 2007 MontaVista Software, Inc. 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms of the GNU General Public License as published by the 17 * Free Software Foundation; either version 2 of the License, or (at your 18 * option) any later version. 19 * 20 * Gianfar: AKA Lambda Draconis, "Dragon" --- 95 unchanged lines hidden (view full) --- 116static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 117 struct sk_buff *skb); 118static int gfar_set_mac_address(struct net_device *dev); 119static int gfar_change_mtu(struct net_device *dev, int new_mtu); 120static irqreturn_t gfar_error(int irq, void *dev_id); 121static irqreturn_t gfar_transmit(int irq, void *dev_id); 122static irqreturn_t gfar_interrupt(int irq, void *dev_id); 123static void adjust_link(struct net_device *dev); |
124static void init_registers(struct net_device *dev); | |
125static int init_phy(struct net_device *dev); 126static int gfar_probe(struct platform_device *ofdev); 127static int gfar_remove(struct platform_device *ofdev); 128static void free_skb_resources(struct gfar_private *priv); 129static void gfar_set_multi(struct net_device *dev); 130static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 131static void gfar_configure_serdes(struct net_device *dev); | 124static int init_phy(struct net_device *dev); 125static int gfar_probe(struct platform_device *ofdev); 126static int gfar_remove(struct platform_device *ofdev); 127static void free_skb_resources(struct gfar_private *priv); 128static void gfar_set_multi(struct net_device *dev); 129static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 130static void gfar_configure_serdes(struct net_device *dev); |
132static int gfar_poll(struct napi_struct *napi, int budget); 133static int gfar_poll_sq(struct napi_struct *napi, int budget); | 131static int gfar_poll_rx(struct napi_struct *napi, int budget); 132static int gfar_poll_tx(struct napi_struct *napi, int budget); 133static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); 134static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); |
134#ifdef CONFIG_NET_POLL_CONTROLLER 135static void gfar_netpoll(struct net_device *dev); 136#endif 137int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 138static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 139static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 140 int amount_pull, struct napi_struct *napi); | 135#ifdef CONFIG_NET_POLL_CONTROLLER 136static void gfar_netpoll(struct net_device *dev); 137#endif 138int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 139static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 140static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 141 int amount_pull, struct napi_struct *napi); |
141void gfar_halt(struct net_device *dev); 142static void gfar_halt_nodisable(struct net_device *dev); 143void gfar_start(struct net_device *dev); | 142static void gfar_halt_nodisable(struct gfar_private *priv); |
144static void gfar_clear_exact_match(struct net_device *dev); 145static void gfar_set_mac_for_addr(struct net_device *dev, int num, 146 const u8 *addr); 147static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 148 149MODULE_AUTHOR("Freescale Semiconductor, Inc"); 150MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 151MODULE_LICENSE("GPL"); --- 175 unchanged lines hidden (view full) --- 327 328 baddr = ®s->rbase0; 329 for (i = 0; i < priv->num_rx_queues; i++) { 330 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); 331 baddr += 2; 332 } 333} 334 | 143static void gfar_clear_exact_match(struct net_device *dev); 144static void gfar_set_mac_for_addr(struct net_device *dev, int num, 145 const u8 *addr); 146static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 147 148MODULE_AUTHOR("Freescale Semiconductor, Inc"); 149MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 150MODULE_LICENSE("GPL"); --- 175 unchanged lines hidden (view full) --- 326 327 baddr = ®s->rbase0; 328 for (i = 0; i < priv->num_rx_queues; i++) { 329 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); 330 baddr += 2; 331 } 332} 333 |
335static void gfar_init_mac(struct net_device *ndev) | 334static void gfar_rx_buff_size_config(struct gfar_private *priv) |
336{ | 335{ |
337 struct gfar_private *priv = netdev_priv(ndev); 338 struct gfar __iomem *regs = priv->gfargrp[0].regs; 339 u32 rctrl = 0; 340 u32 tctrl = 0; 341 u32 attrs = 0; | 336 int frame_size = priv->ndev->mtu + ETH_HLEN; |
342 | 337 |
343 /* write the tx/rx base registers */ 344 gfar_init_tx_rx_base(priv); 345 346 /* Configure the coalescing support */ 347 gfar_configure_coalescing_all(priv); 348 | |
349 /* set this when rx hw offload (TOE) functions are being used */ 350 priv->uses_rxfcb = 0; 351 | 338 /* set this when rx hw offload (TOE) functions are being used */ 339 priv->uses_rxfcb = 0; 340 |
341 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) 342 priv->uses_rxfcb = 1; 343 344 if (priv->hwts_rx_en) 345 priv->uses_rxfcb = 1; 346 347 if (priv->uses_rxfcb) 348 frame_size += GMAC_FCB_LEN; 349 350 frame_size += priv->padding; 351 352 frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 353 INCREMENTAL_BUFFER_SIZE; 354 355 priv->rx_buffer_size = frame_size; 356} 357 358static void gfar_mac_rx_config(struct gfar_private *priv) 359{ 360 struct gfar __iomem *regs = priv->gfargrp[0].regs; 361 u32 rctrl = 0; 362 |
|
352 if (priv->rx_filer_enable) { 353 rctrl |= RCTRL_FILREN; 354 /* Program the RIR0 reg with the required distribution */ | 363 if (priv->rx_filer_enable) { 364 rctrl |= RCTRL_FILREN; 365 /* Program the RIR0 reg with the required distribution */ |
355 gfar_write(®s->rir0, DEFAULT_RIR0); | 366 if (priv->poll_mode == GFAR_SQ_POLLING) 367 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); 368 else /* GFAR_MQ_POLLING */ 369 gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0); |
356 } 357 358 /* Restore PROMISC mode */ | 370 } 371 372 /* Restore PROMISC mode */ |
359 if (ndev->flags & IFF_PROMISC) | 373 if (priv->ndev->flags & IFF_PROMISC) |
360 rctrl |= RCTRL_PROM; 361 | 374 rctrl |= RCTRL_PROM; 375 |
362 if (ndev->features & NETIF_F_RXCSUM) { | 376 if (priv->ndev->features & NETIF_F_RXCSUM) |
363 rctrl |= RCTRL_CHECKSUMMING; | 377 rctrl |= RCTRL_CHECKSUMMING; |
364 priv->uses_rxfcb = 1; 365 } | |
366 | 378 |
367 if (priv->extended_hash) { 368 rctrl |= RCTRL_EXTHASH; | 379 if (priv->extended_hash) 380 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN; |
369 | 381 |
370 gfar_clear_exact_match(ndev); 371 rctrl |= RCTRL_EMEN; 372 } 373 | |
374 if (priv->padding) { 375 rctrl &= ~RCTRL_PAL_MASK; 376 rctrl |= RCTRL_PADDING(priv->padding); 377 } 378 | 382 if (priv->padding) { 383 rctrl &= ~RCTRL_PAL_MASK; 384 rctrl |= RCTRL_PADDING(priv->padding); 385 } 386 |
379 /* Insert receive time stamps into padding alignment bytes */ 380 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { 381 rctrl &= ~RCTRL_PAL_MASK; 382 rctrl |= RCTRL_PADDING(8); 383 priv->padding = 8; 384 } 385 | |
386 /* Enable HW time stamping if requested from user space */ | 387 /* Enable HW time stamping if requested from user space */ |
387 if (priv->hwts_rx_en) { | 388 if (priv->hwts_rx_en) |
388 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; | 389 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; |
389 priv->uses_rxfcb = 1; 390 } | |
391 | 390 |
392 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { | 391 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) |
393 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; | 392 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; |
394 priv->uses_rxfcb = 1; 395 } | |
396 397 /* Init rctrl based on our settings */ 398 gfar_write(®s->rctrl, rctrl); | 393 394 /* Init rctrl based on our settings */ 395 gfar_write(®s->rctrl, rctrl); |
396} |
|
399 | 397 |
400 if (ndev->features & NETIF_F_IP_CSUM) | 398static void gfar_mac_tx_config(struct gfar_private *priv) 399{ 400 struct gfar __iomem *regs = priv->gfargrp[0].regs; 401 u32 tctrl = 0; 402 403 if (priv->ndev->features & NETIF_F_IP_CSUM) |
401 tctrl |= TCTRL_INIT_CSUM; 402 403 if (priv->prio_sched_en) 404 tctrl |= TCTRL_TXSCHED_PRIO; 405 else { 406 tctrl |= TCTRL_TXSCHED_WRRS; 407 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); 408 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); 409 } 410 | 404 tctrl |= TCTRL_INIT_CSUM; 405 406 if (priv->prio_sched_en) 407 tctrl |= TCTRL_TXSCHED_PRIO; 408 else { 409 tctrl |= TCTRL_TXSCHED_WRRS; 410 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); 411 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); 412 } 413 |
414 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) 415 tctrl |= TCTRL_VLINS; 416 |
|
411 gfar_write(®s->tctrl, tctrl); | 417 gfar_write(®s->tctrl, tctrl); |
418} |
|
412 | 419 |
413 /* Set the extraction length and index */ 414 attrs = ATTRELI_EL(priv->rx_stash_size) | 415 ATTRELI_EI(priv->rx_stash_index); | 420static void gfar_configure_coalescing(struct gfar_private *priv, 421 unsigned long tx_mask, unsigned long rx_mask) 422{ 423 struct gfar __iomem *regs = priv->gfargrp[0].regs; 424 u32 __iomem *baddr; |
416 | 425 |
417 gfar_write(®s->attreli, attrs); | 426 if (priv->mode == MQ_MG_MODE) { 427 int i = 0; |
418 | 428 |
419 /* Start with defaults, and add stashing or locking 420 * depending on the approprate variables 421 */ 422 attrs = ATTR_INIT_SETTINGS; | 429 baddr = ®s->txic0; 430 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { 431 gfar_write(baddr + i, 0); 432 if (likely(priv->tx_queue[i]->txcoalescing)) 433 gfar_write(baddr + i, priv->tx_queue[i]->txic); 434 } |
423 | 435 |
424 if (priv->bd_stash_en) 425 attrs |= ATTR_BDSTASH; | 436 baddr = ®s->rxic0; 437 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { 438 gfar_write(baddr + i, 0); 439 if (likely(priv->rx_queue[i]->rxcoalescing)) 440 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 441 } 442 } else { 443 /* Backward compatible case -- even if we enable 444 * multiple queues, there's only single reg to program 445 */ 446 gfar_write(®s->txic, 0); 447 if (likely(priv->tx_queue[0]->txcoalescing)) 448 gfar_write(®s->txic, priv->tx_queue[0]->txic); |
426 | 449 |
427 if (priv->rx_stash_size != 0) 428 attrs |= ATTR_BUFSTASH; | 450 gfar_write(®s->rxic, 0); 451 if (unlikely(priv->rx_queue[0]->rxcoalescing)) 452 gfar_write(®s->rxic, priv->rx_queue[0]->rxic); 453 } 454} |
429 | 455 |
430 gfar_write(®s->attr, attrs); 431 432 gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); 433 gfar_write(®s->fifo_tx_starve, priv->fifo_starve); 434 gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); | 456void gfar_configure_coalescing_all(struct gfar_private *priv) 457{ 458 gfar_configure_coalescing(priv, 0xFF, 0xFF); |
435} 436 437static struct net_device_stats *gfar_get_stats(struct net_device *dev) 438{ 439 struct gfar_private *priv = netdev_priv(dev); 440 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; 441 unsigned long tx_packets = 0, tx_bytes = 0; 442 int i; --- 31 unchanged lines hidden (view full) --- 474 .ndo_get_stats = gfar_get_stats, 475 .ndo_set_mac_address = eth_mac_addr, 476 .ndo_validate_addr = eth_validate_addr, 477#ifdef CONFIG_NET_POLL_CONTROLLER 478 .ndo_poll_controller = gfar_netpoll, 479#endif 480}; 481 | 459} 460 461static struct net_device_stats *gfar_get_stats(struct net_device *dev) 462{ 463 struct gfar_private *priv = netdev_priv(dev); 464 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; 465 unsigned long tx_packets = 0, tx_bytes = 0; 466 int i; --- 31 unchanged lines hidden (view full) --- 498 .ndo_get_stats = gfar_get_stats, 499 .ndo_set_mac_address = eth_mac_addr, 500 .ndo_validate_addr = eth_validate_addr, 501#ifdef CONFIG_NET_POLL_CONTROLLER 502 .ndo_poll_controller = gfar_netpoll, 503#endif 504}; 505 |
482void lock_rx_qs(struct gfar_private *priv) | 506static void gfar_ints_disable(struct gfar_private *priv) |
483{ 484 int i; | 507{ 508 int i; |
509 for (i = 0; i < priv->num_grps; i++) { 510 struct gfar __iomem *regs = priv->gfargrp[i].regs; 511 /* Clear IEVENT */ 512 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); |
|
485 | 513 |
486 for (i = 0; i < priv->num_rx_queues; i++) 487 spin_lock(&priv->rx_queue[i]->rxlock); | 514 /* Initialize IMASK */ 515 gfar_write(®s->imask, IMASK_INIT_CLEAR); 516 } |
488} 489 | 517} 518 |
519static void gfar_ints_enable(struct gfar_private *priv) 520{ 521 int i; 522 for (i = 0; i < priv->num_grps; i++) { 523 struct gfar __iomem *regs = priv->gfargrp[i].regs; 524 /* Unmask the interrupts we look for */ 525 gfar_write(®s->imask, IMASK_DEFAULT); 526 } 527} 528 |
|
490void lock_tx_qs(struct gfar_private *priv) 491{ 492 int i; 493 494 for (i = 0; i < priv->num_tx_queues; i++) 495 spin_lock(&priv->tx_queue[i]->txlock); 496} 497 | 529void lock_tx_qs(struct gfar_private *priv) 530{ 531 int i; 532 533 for (i = 0; i < priv->num_tx_queues; i++) 534 spin_lock(&priv->tx_queue[i]->txlock); 535} 536 |
498void unlock_rx_qs(struct gfar_private *priv) | 537void unlock_tx_qs(struct gfar_private *priv) |
499{ 500 int i; 501 | 538{ 539 int i; 540 |
502 for (i = 0; i < priv->num_rx_queues; i++) 503 spin_unlock(&priv->rx_queue[i]->rxlock); | 541 for (i = 0; i < priv->num_tx_queues; i++) 542 spin_unlock(&priv->tx_queue[i]->txlock); |
504} 505 | 543} 544 |
506void unlock_tx_qs(struct gfar_private *priv) | 545static int gfar_alloc_tx_queues(struct gfar_private *priv) |
507{ 508 int i; 509 | 546{ 547 int i; 548 |
510 for (i = 0; i < priv->num_tx_queues; i++) 511 spin_unlock(&priv->tx_queue[i]->txlock); | 549 for (i = 0; i < priv->num_tx_queues; i++) { 550 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), 551 GFP_KERNEL); 552 if (!priv->tx_queue[i]) 553 return -ENOMEM; 554 555 priv->tx_queue[i]->tx_skbuff = NULL; 556 priv->tx_queue[i]->qindex = i; 557 priv->tx_queue[i]->dev = priv->ndev; 558 spin_lock_init(&(priv->tx_queue[i]->txlock)); 559 } 560 return 0; |
512} 513 | 561} 562 |
514static void free_tx_pointers(struct gfar_private *priv) | 563static int gfar_alloc_rx_queues(struct gfar_private *priv) |
515{ 516 int i; 517 | 564{ 565 int i; 566 |
567 for (i = 0; i < priv->num_rx_queues; i++) { 568 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), 569 GFP_KERNEL); 570 if (!priv->rx_queue[i]) 571 return -ENOMEM; 572 573 priv->rx_queue[i]->rx_skbuff = NULL; 574 priv->rx_queue[i]->qindex = i; 575 priv->rx_queue[i]->dev = priv->ndev; 576 } 577 return 0; 578} 579 580static void gfar_free_tx_queues(struct gfar_private *priv) 581{ 582 int i; 583 |
|
518 for (i = 0; i < priv->num_tx_queues; i++) 519 kfree(priv->tx_queue[i]); 520} 521 | 584 for (i = 0; i < priv->num_tx_queues; i++) 585 kfree(priv->tx_queue[i]); 586} 587 |
522static void free_rx_pointers(struct gfar_private *priv) | 588static void gfar_free_rx_queues(struct gfar_private *priv) |
523{ 524 int i; 525 526 for (i = 0; i < priv->num_rx_queues; i++) 527 kfree(priv->rx_queue[i]); 528} 529 530static void unmap_group_regs(struct gfar_private *priv) --- 17 unchanged lines hidden (view full) --- 548 549 free_netdev(priv->ndev); 550} 551 552static void disable_napi(struct gfar_private *priv) 553{ 554 int i; 555 | 589{ 590 int i; 591 592 for (i = 0; i < priv->num_rx_queues; i++) 593 kfree(priv->rx_queue[i]); 594} 595 596static void unmap_group_regs(struct gfar_private *priv) --- 17 unchanged lines hidden (view full) --- 614 615 free_netdev(priv->ndev); 616} 617 618static void disable_napi(struct gfar_private *priv) 619{ 620 int i; 621 |
556 for (i = 0; i < priv->num_grps; i++) 557 napi_disable(&priv->gfargrp[i].napi); | 622 for (i = 0; i < priv->num_grps; i++) { 623 napi_disable(&priv->gfargrp[i].napi_rx); 624 napi_disable(&priv->gfargrp[i].napi_tx); 625 } |
558} 559 560static void enable_napi(struct gfar_private *priv) 561{ 562 int i; 563 | 626} 627 628static void enable_napi(struct gfar_private *priv) 629{ 630 int i; 631 |
564 for (i = 0; i < priv->num_grps; i++) 565 napi_enable(&priv->gfargrp[i].napi); | 632 for (i = 0; i < priv->num_grps; i++) { 633 napi_enable(&priv->gfargrp[i].napi_rx); 634 napi_enable(&priv->gfargrp[i].napi_tx); 635 } |
566} 567 568static int gfar_parse_group(struct device_node *np, 569 struct gfar_private *priv, const char *model) 570{ 571 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; | 636} 637 638static int gfar_parse_group(struct device_node *np, 639 struct gfar_private *priv, const char *model) 640{ 641 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; |
572 u32 *queue_mask; | |
573 int i; 574 575 for (i = 0; i < GFAR_NUM_IRQS; i++) { 576 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), 577 GFP_KERNEL); 578 if (!grp->irqinfo[i]) 579 return -ENOMEM; 580 } --- 12 unchanged lines hidden (view full) --- 593 gfar_irq(grp, RX)->irq == NO_IRQ || 594 gfar_irq(grp, ER)->irq == NO_IRQ) 595 return -EINVAL; 596 } 597 598 grp->priv = priv; 599 spin_lock_init(&grp->grplock); 600 if (priv->mode == MQ_MG_MODE) { | 642 int i; 643 644 for (i = 0; i < GFAR_NUM_IRQS; i++) { 645 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), 646 GFP_KERNEL); 647 if (!grp->irqinfo[i]) 648 return -ENOMEM; 649 } --- 12 unchanged lines hidden (view full) --- 662 gfar_irq(grp, RX)->irq == NO_IRQ || 663 gfar_irq(grp, ER)->irq == NO_IRQ) 664 return -EINVAL; 665 } 666 667 grp->priv = priv; 668 spin_lock_init(&grp->grplock); 669 if (priv->mode == MQ_MG_MODE) { |
601 queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); 602 grp->rx_bit_map = queue_mask ? 603 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); 604 queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); 605 grp->tx_bit_map = queue_mask ? 606 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); | 670 u32 *rxq_mask, *txq_mask; 671 rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); 672 txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); 673 674 if (priv->poll_mode == GFAR_SQ_POLLING) { 675 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ 676 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); 677 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); 678 } else { /* GFAR_MQ_POLLING */ 679 grp->rx_bit_map = rxq_mask ? 680 *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); 681 grp->tx_bit_map = txq_mask ? 682 *txq_mask : (DEFAULT_MAPPING >> priv->num_grps); 683 } |
607 } else { 608 grp->rx_bit_map = 0xFF; 609 grp->tx_bit_map = 0xFF; 610 } | 684 } else { 685 grp->rx_bit_map = 0xFF; 686 grp->tx_bit_map = 0xFF; 687 } |
688 689 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses 690 * right to left, so we need to revert the 8 bits to get the q index 691 */ 692 grp->rx_bit_map = bitrev8(grp->rx_bit_map); 693 grp->tx_bit_map = bitrev8(grp->tx_bit_map); 694 695 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 696 * also assign queues to groups 697 */ 698 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { 699 if (!grp->rx_queue) 700 grp->rx_queue = priv->rx_queue[i]; 701 grp->num_rx_queues++; 702 grp->rstat |= (RSTAT_CLEAR_RHALT >> i); 703 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 704 priv->rx_queue[i]->grp = grp; 705 } 706 707 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { 708 if (!grp->tx_queue) 709 grp->tx_queue = priv->tx_queue[i]; 710 grp->num_tx_queues++; 711 grp->tstat |= (TSTAT_CLEAR_THALT >> i); 712 priv->tqueue |= (TQUEUE_EN0 >> i); 713 priv->tx_queue[i]->grp = grp; 714 } 715 |
|
611 priv->num_grps++; 612 613 return 0; 614} 615 616static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) 617{ 618 const char *model; --- 4 unchanged lines hidden (view full) --- 623 struct gfar_private *priv = NULL; 624 struct device_node *np = ofdev->dev.of_node; 625 struct device_node *child = NULL; 626 const u32 *stash; 627 const u32 *stash_len; 628 const u32 *stash_idx; 629 unsigned int num_tx_qs, num_rx_qs; 630 u32 *tx_queues, *rx_queues; | 716 priv->num_grps++; 717 718 return 0; 719} 720 721static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) 722{ 723 const char *model; --- 4 unchanged lines hidden (view full) --- 728 struct gfar_private *priv = NULL; 729 struct device_node *np = ofdev->dev.of_node; 730 struct device_node *child = NULL; 731 const u32 *stash; 732 const u32 *stash_len; 733 const u32 *stash_idx; 734 unsigned int num_tx_qs, num_rx_qs; 735 u32 *tx_queues, *rx_queues; |
736 unsigned short mode, poll_mode; |
|
631 632 if (!np || !of_device_is_available(np)) 633 return -ENODEV; 634 | 737 738 if (!np || !of_device_is_available(np)) 739 return -ENODEV; 740 |
635 /* parse the num of tx and rx queues */ | 741 if (of_device_is_compatible(np, "fsl,etsec2")) { 742 mode = MQ_MG_MODE; 743 poll_mode = GFAR_SQ_POLLING; 744 } else { 745 mode = SQ_SG_MODE; 746 poll_mode = GFAR_SQ_POLLING; 747 } 748 749 /* parse the num of HW tx and rx queues */ |
636 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); | 750 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); |
637 num_tx_qs = tx_queues ? *tx_queues : 1; | 751 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); |
638 | 752 |
753 if (mode == SQ_SG_MODE) { 754 num_tx_qs = 1; 755 num_rx_qs = 1; 756 } else { /* MQ_MG_MODE */ 757 /* get the actual number of supported groups */ 758 unsigned int num_grps = of_get_available_child_count(np); 759 760 if (num_grps == 0 || num_grps > MAXGROUPS) { 761 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", 762 num_grps); 763 pr_err("Cannot do alloc_etherdev, aborting\n"); 764 return -EINVAL; 765 } 766 767 if (poll_mode == GFAR_SQ_POLLING) { 768 num_tx_qs = num_grps; /* one txq per int group */ 769 num_rx_qs = num_grps; /* one rxq per int group */ 770 } else { /* GFAR_MQ_POLLING */ 771 num_tx_qs = tx_queues ? *tx_queues : 1; 772 num_rx_qs = rx_queues ? *rx_queues : 1; 773 } 774 } 775 |
|
639 if (num_tx_qs > MAX_TX_QS) { 640 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", 641 num_tx_qs, MAX_TX_QS); 642 pr_err("Cannot do alloc_etherdev, aborting\n"); 643 return -EINVAL; 644 } 645 | 776 if (num_tx_qs > MAX_TX_QS) { 777 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", 778 num_tx_qs, MAX_TX_QS); 779 pr_err("Cannot do alloc_etherdev, aborting\n"); 780 return -EINVAL; 781 } 782 |
646 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); 647 num_rx_qs = rx_queues ? *rx_queues : 1; 648 | |
649 if (num_rx_qs > MAX_RX_QS) { 650 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", 651 num_rx_qs, MAX_RX_QS); 652 pr_err("Cannot do alloc_etherdev, aborting\n"); 653 return -EINVAL; 654 } 655 656 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); 657 dev = *pdev; 658 if (NULL == dev) 659 return -ENOMEM; 660 661 priv = netdev_priv(dev); 662 priv->ndev = dev; 663 | 783 if (num_rx_qs > MAX_RX_QS) { 784 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", 785 num_rx_qs, MAX_RX_QS); 786 pr_err("Cannot do alloc_etherdev, aborting\n"); 787 return -EINVAL; 788 } 789 790 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); 791 dev = *pdev; 792 if (NULL == dev) 793 return -ENOMEM; 794 795 priv = netdev_priv(dev); 796 priv->ndev = dev; 797 |
798 priv->mode = mode; 799 priv->poll_mode = poll_mode; 800 |
|
664 priv->num_tx_queues = num_tx_qs; 665 netif_set_real_num_rx_queues(dev, num_rx_qs); 666 priv->num_rx_queues = num_rx_qs; | 801 priv->num_tx_queues = num_tx_qs; 802 netif_set_real_num_rx_queues(dev, num_rx_qs); 803 priv->num_rx_queues = num_rx_qs; |
667 priv->num_grps = 0x0; | |
668 | 804 |
805 err = gfar_alloc_tx_queues(priv); 806 if (err) 807 goto tx_alloc_failed; 808 809 err = gfar_alloc_rx_queues(priv); 810 if (err) 811 goto rx_alloc_failed; 812 |
|
669 /* Init Rx queue filer rule set linked list */ 670 INIT_LIST_HEAD(&priv->rx_list.list); 671 priv->rx_list.count = 0; 672 mutex_init(&priv->rx_queue_access); 673 674 model = of_get_property(np, "model", NULL); 675 676 for (i = 0; i < MAXGROUPS; i++) 677 priv->gfargrp[i].regs = NULL; 678 679 /* Parse and initialize group specific information */ | 813 /* Init Rx queue filer rule set linked list */ 814 INIT_LIST_HEAD(&priv->rx_list.list); 815 priv->rx_list.count = 0; 816 mutex_init(&priv->rx_queue_access); 817 818 model = of_get_property(np, "model", NULL); 819 820 for (i = 0; i < MAXGROUPS; i++) 821 priv->gfargrp[i].regs = NULL; 822 823 /* Parse and initialize group specific information */ |
680 if (of_device_is_compatible(np, "fsl,etsec2")) { 681 priv->mode = MQ_MG_MODE; | 824 if (priv->mode == MQ_MG_MODE) { |
682 for_each_child_of_node(np, child) { 683 err = gfar_parse_group(child, priv, model); 684 if (err) 685 goto err_grp_init; 686 } | 825 for_each_child_of_node(np, child) { 826 err = gfar_parse_group(child, priv, model); 827 if (err) 828 goto err_grp_init; 829 } |
687 } else { 688 priv->mode = SQ_SG_MODE; | 830 } else { /* SQ_SG_MODE */ |
689 err = gfar_parse_group(np, priv, model); 690 if (err) 691 goto err_grp_init; 692 } 693 | 831 err = gfar_parse_group(np, priv, model); 832 if (err) 833 goto err_grp_init; 834 } 835 |
694 for (i = 0; i < priv->num_tx_queues; i++) 695 priv->tx_queue[i] = NULL; 696 for (i = 0; i < priv->num_rx_queues; i++) 697 priv->rx_queue[i] = NULL; 698 699 for (i = 0; i < priv->num_tx_queues; i++) { 700 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), 701 GFP_KERNEL); 702 if (!priv->tx_queue[i]) { 703 err = -ENOMEM; 704 goto tx_alloc_failed; 705 } 706 priv->tx_queue[i]->tx_skbuff = NULL; 707 priv->tx_queue[i]->qindex = i; 708 priv->tx_queue[i]->dev = dev; 709 spin_lock_init(&(priv->tx_queue[i]->txlock)); 710 } 711 712 for (i = 0; i < priv->num_rx_queues; i++) { 713 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), 714 GFP_KERNEL); 715 if (!priv->rx_queue[i]) { 716 err = -ENOMEM; 717 goto rx_alloc_failed; 718 } 719 priv->rx_queue[i]->rx_skbuff = NULL; 720 priv->rx_queue[i]->qindex = i; 721 priv->rx_queue[i]->dev = dev; 722 spin_lock_init(&(priv->rx_queue[i]->rxlock)); 723 } 724 725 | |
726 stash = of_get_property(np, "bd-stash", NULL); 727 728 if (stash) { 729 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 730 priv->bd_stash_en = 1; 731 } 732 733 stash_len = of_get_property(np, "rx-stash-len", NULL); --- 10 unchanged lines hidden (view full) --- 744 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; 745 746 mac_addr = of_get_mac_address(np); 747 748 if (mac_addr) 749 memcpy(dev->dev_addr, mac_addr, ETH_ALEN); 750 751 if (model && !strcasecmp(model, "TSEC")) | 836 stash = of_get_property(np, "bd-stash", NULL); 837 838 if (stash) { 839 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 840 priv->bd_stash_en = 1; 841 } 842 843 stash_len = of_get_property(np, "rx-stash-len", NULL); --- 10 unchanged lines hidden (view full) --- 854 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; 855 856 mac_addr = of_get_mac_address(np); 857 858 if (mac_addr) 859 memcpy(dev->dev_addr, mac_addr, ETH_ALEN); 860 861 if (model && !strcasecmp(model, "TSEC")) |
752 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | | 862 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
753 FSL_GIANFAR_DEV_HAS_COALESCE | 754 FSL_GIANFAR_DEV_HAS_RMON | 755 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 756 757 if (model && !strcasecmp(model, "eTSEC")) | 863 FSL_GIANFAR_DEV_HAS_COALESCE | 864 FSL_GIANFAR_DEV_HAS_RMON | 865 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 866 867 if (model && !strcasecmp(model, "eTSEC")) |
758 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | | 868 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
759 FSL_GIANFAR_DEV_HAS_COALESCE | 760 FSL_GIANFAR_DEV_HAS_RMON | 761 FSL_GIANFAR_DEV_HAS_MULTI_INTR | | 869 FSL_GIANFAR_DEV_HAS_COALESCE | 870 FSL_GIANFAR_DEV_HAS_RMON | 871 FSL_GIANFAR_DEV_HAS_MULTI_INTR | |
762 FSL_GIANFAR_DEV_HAS_PADDING | | |
763 FSL_GIANFAR_DEV_HAS_CSUM | 764 FSL_GIANFAR_DEV_HAS_VLAN | 765 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 766 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 767 FSL_GIANFAR_DEV_HAS_TIMER; 768 769 ctype = of_get_property(np, "phy-connection-type", NULL); 770 --- 8 unchanged lines hidden (view full) --- 779 780 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 781 782 /* Find the TBI PHY. If it's not there, we don't support SGMII */ 783 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 784 785 return 0; 786 | 872 FSL_GIANFAR_DEV_HAS_CSUM | 873 FSL_GIANFAR_DEV_HAS_VLAN | 874 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 875 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 876 FSL_GIANFAR_DEV_HAS_TIMER; 877 878 ctype = of_get_property(np, "phy-connection-type", NULL); 879 --- 8 unchanged lines hidden (view full) --- 888 889 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 890 891 /* Find the TBI PHY. If it's not there, we don't support SGMII */ 892 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 893 894 return 0; 895 |
787rx_alloc_failed: 788 free_rx_pointers(priv); 789tx_alloc_failed: 790 free_tx_pointers(priv); | |
791err_grp_init: 792 unmap_group_regs(priv); | 896err_grp_init: 897 unmap_group_regs(priv); |
898rx_alloc_failed: 899 gfar_free_rx_queues(priv); 900tx_alloc_failed: 901 gfar_free_tx_queues(priv); |
|
793 free_gfar_dev(priv); 794 return err; 795} 796 797static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) 798{ 799 struct hwtstamp_config config; 800 struct gfar_private *priv = netdev_priv(netdev); --- 16 unchanged lines hidden (view full) --- 817 break; 818 default: 819 return -ERANGE; 820 } 821 822 switch (config.rx_filter) { 823 case HWTSTAMP_FILTER_NONE: 824 if (priv->hwts_rx_en) { | 902 free_gfar_dev(priv); 903 return err; 904} 905 906static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) 907{ 908 struct hwtstamp_config config; 909 struct gfar_private *priv = netdev_priv(netdev); --- 16 unchanged lines hidden (view full) --- 926 break; 927 default: 928 return -ERANGE; 929 } 930 931 switch (config.rx_filter) { 932 case HWTSTAMP_FILTER_NONE: 933 if (priv->hwts_rx_en) { |
825 stop_gfar(netdev); | |
826 priv->hwts_rx_en = 0; | 934 priv->hwts_rx_en = 0; |
827 startup_gfar(netdev); | 935 reset_gfar(netdev); |
828 } 829 break; 830 default: 831 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 832 return -ERANGE; 833 if (!priv->hwts_rx_en) { | 936 } 937 break; 938 default: 939 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 940 return -ERANGE; 941 if (!priv->hwts_rx_en) { |
834 stop_gfar(netdev); | |
835 priv->hwts_rx_en = 1; | 942 priv->hwts_rx_en = 1; |
836 startup_gfar(netdev); | 943 reset_gfar(netdev); |
837 } 838 config.rx_filter = HWTSTAMP_FILTER_ALL; 839 break; 840 } 841 842 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 843 -EFAULT : 0; 844} --- 25 unchanged lines hidden (view full) --- 870 return gfar_hwtstamp_get(dev, rq); 871 872 if (!priv->phydev) 873 return -ENODEV; 874 875 return phy_mii_ioctl(priv->phydev, rq, cmd); 876} 877 | 944 } 945 config.rx_filter = HWTSTAMP_FILTER_ALL; 946 break; 947 } 948 949 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 950 -EFAULT : 0; 951} --- 25 unchanged lines hidden (view full) --- 977 return gfar_hwtstamp_get(dev, rq); 978 979 if (!priv->phydev) 980 return -ENODEV; 981 982 return phy_mii_ioctl(priv->phydev, rq, cmd); 983} 984 |
878static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) 879{ 880 unsigned int new_bit_map = 0x0; 881 int mask = 0x1 << (max_qs - 1), i; 882 883 for (i = 0; i < max_qs; i++) { 884 if (bit_map & mask) 885 new_bit_map = new_bit_map + (1 << i); 886 mask = mask >> 0x1; 887 } 888 return new_bit_map; 889} 890 | |
891static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 892 u32 class) 893{ 894 u32 rqfpr = FPR_FILER_MASK; 895 u32 rqfcr = 0x0; 896 897 rqfar--; 898 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; --- 101 unchanged lines hidden (view full) --- 1000 else /* non-mpc85xx parts, i.e. e300 core based */ 1001 __gfar_detect_errata_83xx(priv); 1002 1003 if (priv->errata) 1004 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 1005 priv->errata); 1006} 1007 | 985static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 986 u32 class) 987{ 988 u32 rqfpr = FPR_FILER_MASK; 989 u32 rqfcr = 0x0; 990 991 rqfar--; 992 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; --- 101 unchanged lines hidden (view full) --- 1094 else /* non-mpc85xx parts, i.e. e300 core based */ 1095 __gfar_detect_errata_83xx(priv); 1096 1097 if (priv->errata) 1098 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 1099 priv->errata); 1100} 1101 |
1008/* Set up the ethernet device structure, private data, 1009 * and anything else we need before we start 1010 */ 1011static int gfar_probe(struct platform_device *ofdev) | 1102void gfar_mac_reset(struct gfar_private *priv) |
1012{ | 1103{ |
1104 struct gfar __iomem *regs = priv->gfargrp[0].regs; |
|
1013 u32 tempval; | 1105 u32 tempval; |
1014 struct net_device *dev = NULL; 1015 struct gfar_private *priv = NULL; 1016 struct gfar __iomem *regs = NULL; 1017 int err = 0, i, grp_idx = 0; 1018 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; 1019 u32 isrg = 0; 1020 u32 __iomem *baddr; | |
1021 | 1106 |
1022 err = gfar_of_init(ofdev, &dev); 1023 1024 if (err) 1025 return err; 1026 1027 priv = netdev_priv(dev); 1028 priv->ndev = dev; 1029 priv->ofdev = ofdev; 1030 priv->dev = &ofdev->dev; 1031 SET_NETDEV_DEV(dev, &ofdev->dev); 1032 1033 spin_lock_init(&priv->bflock); 1034 INIT_WORK(&priv->reset_task, gfar_reset_task); 1035 1036 platform_set_drvdata(ofdev, priv); 1037 regs = priv->gfargrp[0].regs; 1038 1039 gfar_detect_errata(priv); 1040 1041 /* Stop the DMA engine now, in case it was running before 1042 * (The firmware could have used it, and left it running). 1043 */ 1044 gfar_halt(dev); 1045 | |
1046 /* Reset MAC layer */ 1047 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); 1048 1049 /* We need to delay at least 3 TX clocks */ | 1107 /* Reset MAC layer */ 1108 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); 1109 1110 /* We need to delay at least 3 TX clocks */ |
1050 udelay(2); | 1111 udelay(3); |
1051 | 1112 |
1052 tempval = 0; 1053 if (!priv->pause_aneg_en && priv->tx_pause_en) 1054 tempval |= MACCFG1_TX_FLOW; 1055 if (!priv->pause_aneg_en && priv->rx_pause_en) 1056 tempval |= MACCFG1_RX_FLOW; | |
1057 /* the soft reset bit is not self-resetting, so we need to 1058 * clear it before resuming normal operation 1059 */ | 1113 /* the soft reset bit is not self-resetting, so we need to 1114 * clear it before resuming normal operation 1115 */ |
1060 gfar_write(®s->maccfg1, tempval); | 1116 gfar_write(®s->maccfg1, 0); |
1061 | 1117 |
1118 udelay(3); 1119 1120 /* Compute rx_buff_size based on config flags */ 1121 gfar_rx_buff_size_config(priv); 1122 1123 /* Initialize the max receive frame/buffer lengths */ 1124 gfar_write(®s->maxfrm, priv->rx_buffer_size); 1125 gfar_write(®s->mrblr, priv->rx_buffer_size); 1126 1127 /* Initialize the Minimum Frame Length Register */ 1128 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); 1129 |
|
1062 /* Initialize MACCFG2. */ 1063 tempval = MACCFG2_INIT_SETTINGS; | 1130 /* Initialize MACCFG2. */ 1131 tempval = MACCFG2_INIT_SETTINGS; |
1064 if (gfar_has_errata(priv, GFAR_ERRATA_74)) | 1132 1133 /* If the mtu is larger than the max size for standard 1134 * ethernet frames (ie, a jumbo frame), then set maccfg2 1135 * to allow huge frames, and to check the length 1136 */ 1137 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || 1138 gfar_has_errata(priv, GFAR_ERRATA_74)) |
1065 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; | 1139 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; |
1140 |
|
1066 gfar_write(®s->maccfg2, tempval); 1067 | 1141 gfar_write(®s->maccfg2, tempval); 1142 |
1143 /* Clear mac addr hash registers */ 1144 gfar_write(®s->igaddr0, 0); 1145 gfar_write(®s->igaddr1, 0); 1146 gfar_write(®s->igaddr2, 0); 1147 gfar_write(®s->igaddr3, 0); 1148 gfar_write(®s->igaddr4, 0); 1149 gfar_write(®s->igaddr5, 0); 1150 gfar_write(®s->igaddr6, 0); 1151 gfar_write(®s->igaddr7, 0); 1152 1153 gfar_write(®s->gaddr0, 0); 1154 gfar_write(®s->gaddr1, 0); 1155 gfar_write(®s->gaddr2, 0); 1156 gfar_write(®s->gaddr3, 0); 1157 gfar_write(®s->gaddr4, 0); 1158 gfar_write(®s->gaddr5, 0); 1159 gfar_write(®s->gaddr6, 0); 1160 gfar_write(®s->gaddr7, 0); 1161 1162 if (priv->extended_hash) 1163 gfar_clear_exact_match(priv->ndev); 1164 1165 gfar_mac_rx_config(priv); 1166 1167 gfar_mac_tx_config(priv); 1168 1169 gfar_set_mac_address(priv->ndev); 1170 1171 gfar_set_multi(priv->ndev); 1172 1173 /* clear ievent and imask before configuring coalescing */ 1174 gfar_ints_disable(priv); 1175 1176 /* Configure the coalescing support */ 1177 gfar_configure_coalescing_all(priv); 1178} 1179 1180static void gfar_hw_init(struct gfar_private *priv) 1181{ 1182 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1183 u32 attrs; 1184 1185 /* Stop the DMA engine now, in case it was running before 1186 * (The firmware could have used it, and left it running). 1187 */ 1188 gfar_halt(priv); 1189 1190 gfar_mac_reset(priv); 1191 1192 /* Zero out the rmon mib registers if it has them */ 1193 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1194 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); 1195 1196 /* Mask off the CAM interrupts */ 1197 gfar_write(®s->rmon.cam1, 0xffffffff); 1198 gfar_write(®s->rmon.cam2, 0xffffffff); 1199 } 1200 |
|
1068 /* Initialize ECNTRL */ 1069 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); 1070 | 1201 /* Initialize ECNTRL */ 1202 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); 1203 |
1071 /* Set the dev->base_addr to the gfar reg region */ 1072 dev->base_addr = (unsigned long) regs; | 1204 /* Set the extraction length and index */ 1205 attrs = ATTRELI_EL(priv->rx_stash_size) | 1206 ATTRELI_EI(priv->rx_stash_index); |
1073 | 1207 |
1074 /* Fill in the dev structure */ 1075 dev->watchdog_timeo = TX_TIMEOUT; 1076 dev->mtu = 1500; 1077 dev->netdev_ops = &gfar_netdev_ops; 1078 dev->ethtool_ops = &gfar_ethtool_ops; | 1208 gfar_write(®s->attreli, attrs); |
1079 | 1209 |
1080 /* Register for napi ...We are registering NAPI for each grp */ 1081 if (priv->mode == SQ_SG_MODE) 1082 netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq, 1083 GFAR_DEV_WEIGHT); 1084 else 1085 for (i = 0; i < priv->num_grps; i++) 1086 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, 1087 GFAR_DEV_WEIGHT); | 1210 /* Start with defaults, and add stashing 1211 * depending on driver parameters 1212 */ 1213 attrs = ATTR_INIT_SETTINGS; |
1088 | 1214 |
1089 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1090 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1091 NETIF_F_RXCSUM; 1092 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 1093 NETIF_F_RXCSUM | NETIF_F_HIGHDMA; 1094 } | 1215 if (priv->bd_stash_en) 1216 attrs |= ATTR_BDSTASH; |
1095 | 1217 |
1096 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1097 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 1098 NETIF_F_HW_VLAN_CTAG_RX; 1099 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1100 } | 1218 if (priv->rx_stash_size != 0) 1219 attrs |= ATTR_BUFSTASH; |
1101 | 1220 |
1221 gfar_write(®s->attr, attrs); 1222 1223 /* FIFO configs */ 1224 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); 1225 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); 1226 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); 1227 1228 /* Program the interrupt steering regs, only for MG devices */ 1229 if (priv->num_grps > 1) 1230 gfar_write_isrg(priv); 1231} 1232 1233static void __init gfar_init_addr_hash_table(struct gfar_private *priv) 1234{ 1235 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1236 |
|
1102 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1103 priv->extended_hash = 1; 1104 priv->hash_width = 9; 1105 1106 priv->hash_regs[0] = ®s->igaddr0; 1107 priv->hash_regs[1] = ®s->igaddr1; 1108 priv->hash_regs[2] = ®s->igaddr2; 1109 priv->hash_regs[3] = ®s->igaddr3; --- 18 unchanged lines hidden (view full) --- 1128 priv->hash_regs[1] = ®s->gaddr1; 1129 priv->hash_regs[2] = ®s->gaddr2; 1130 priv->hash_regs[3] = ®s->gaddr3; 1131 priv->hash_regs[4] = ®s->gaddr4; 1132 priv->hash_regs[5] = ®s->gaddr5; 1133 priv->hash_regs[6] = ®s->gaddr6; 1134 priv->hash_regs[7] = ®s->gaddr7; 1135 } | 1237 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1238 priv->extended_hash = 1; 1239 priv->hash_width = 9; 1240 1241 priv->hash_regs[0] = ®s->igaddr0; 1242 priv->hash_regs[1] = ®s->igaddr1; 1243 priv->hash_regs[2] = ®s->igaddr2; 1244 priv->hash_regs[3] = ®s->igaddr3; --- 18 unchanged lines hidden (view full) --- 1263 priv->hash_regs[1] = ®s->gaddr1; 1264 priv->hash_regs[2] = ®s->gaddr2; 1265 priv->hash_regs[3] = ®s->gaddr3; 1266 priv->hash_regs[4] = ®s->gaddr4; 1267 priv->hash_regs[5] = ®s->gaddr5; 1268 priv->hash_regs[6] = ®s->gaddr6; 1269 priv->hash_regs[7] = ®s->gaddr7; 1270 } |
1271} |
|
1136 | 1272 |
1137 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 1138 priv->padding = DEFAULT_PADDING; 1139 else 1140 priv->padding = 0; | 1273/* Set up the ethernet device structure, private data, 1274 * and anything else we need before we start 1275 */ 1276static int gfar_probe(struct platform_device *ofdev) 1277{ 1278 struct net_device *dev = NULL; 1279 struct gfar_private *priv = NULL; 1280 int err = 0, i; |
1141 | 1281 |
1142 if (dev->features & NETIF_F_IP_CSUM || 1143 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1144 dev->needed_headroom = GMAC_FCB_LEN; | 1282 err = gfar_of_init(ofdev, &dev); |
1145 | 1283 |
1146 /* Program the isrg regs only if number of grps > 1 */ 1147 if (priv->num_grps > 1) { 1148 baddr = ®s->isrg0; 1149 for (i = 0; i < priv->num_grps; i++) { 1150 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); 1151 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); 1152 gfar_write(baddr, isrg); 1153 baddr++; 1154 isrg = 0x0; | 1284 if (err) 1285 return err; 1286 1287 priv = netdev_priv(dev); 1288 priv->ndev = dev; 1289 priv->ofdev = ofdev; 1290 priv->dev = &ofdev->dev; 1291 SET_NETDEV_DEV(dev, &ofdev->dev); 1292 1293 spin_lock_init(&priv->bflock); 1294 INIT_WORK(&priv->reset_task, gfar_reset_task); 1295 1296 platform_set_drvdata(ofdev, priv); 1297 1298 gfar_detect_errata(priv); 1299 1300 /* Set the dev->base_addr to the gfar reg region */ 1301 dev->base_addr = (unsigned long) priv->gfargrp[0].regs; 1302 1303 /* Fill in the dev structure */ 1304 dev->watchdog_timeo = TX_TIMEOUT; 1305 dev->mtu = 1500; 1306 dev->netdev_ops = &gfar_netdev_ops; 1307 dev->ethtool_ops = &gfar_ethtool_ops; 1308 1309 /* Register for napi ...We are registering NAPI for each grp */ 1310 for (i = 0; i < priv->num_grps; i++) { 1311 if (priv->poll_mode == GFAR_SQ_POLLING) { 1312 netif_napi_add(dev, &priv->gfargrp[i].napi_rx, 1313 gfar_poll_rx_sq, GFAR_DEV_WEIGHT); 1314 netif_napi_add(dev, &priv->gfargrp[i].napi_tx, 1315 gfar_poll_tx_sq, 2); 1316 } else { 1317 netif_napi_add(dev, &priv->gfargrp[i].napi_rx, 1318 gfar_poll_rx, GFAR_DEV_WEIGHT); 1319 netif_napi_add(dev, &priv->gfargrp[i].napi_tx, 1320 gfar_poll_tx, 2); |
1155 } 1156 } 1157 | 1321 } 1322 } 1323 |
1158 /* Need to reverse the bit maps as bit_map's MSB is q0 1159 * but, for_each_set_bit parses from right to left, which 1160 * basically reverses the queue numbers 1161 */ 1162 for (i = 0; i< priv->num_grps; i++) { 1163 priv->gfargrp[i].tx_bit_map = 1164 reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS); 1165 priv->gfargrp[i].rx_bit_map = 1166 reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS); | 1324 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1325 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1326 NETIF_F_RXCSUM; 1327 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 1328 NETIF_F_RXCSUM | NETIF_F_HIGHDMA; |
1167 } 1168 | 1329 } 1330 |
1169 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 1170 * also assign queues to groups 1171 */ 1172 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1173 priv->gfargrp[grp_idx].num_rx_queues = 0x0; | 1331 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1332 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 1333 NETIF_F_HW_VLAN_CTAG_RX; 1334 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1335 } |
1174 | 1336 |
1175 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, 1176 priv->num_rx_queues) { 1177 priv->gfargrp[grp_idx].num_rx_queues++; 1178 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1179 rstat = rstat | (RSTAT_CLEAR_RHALT >> i); 1180 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 1181 } 1182 priv->gfargrp[grp_idx].num_tx_queues = 0x0; | 1337 gfar_init_addr_hash_table(priv); |
1183 | 1338 |
1184 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, 1185 priv->num_tx_queues) { 1186 priv->gfargrp[grp_idx].num_tx_queues++; 1187 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1188 tstat = tstat | (TSTAT_CLEAR_THALT >> i); 1189 tqueue = tqueue | (TQUEUE_EN0 >> i); 1190 } 1191 priv->gfargrp[grp_idx].rstat = rstat; 1192 priv->gfargrp[grp_idx].tstat = tstat; 1193 rstat = tstat =0; 1194 } | 1339 /* Insert receive time stamps into padding alignment bytes */ 1340 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1341 priv->padding = 8; |
1195 | 1342 |
1196 gfar_write(®s->rqueue, rqueue); 1197 gfar_write(®s->tqueue, tqueue); | 1343 if (dev->features & NETIF_F_IP_CSUM || 1344 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1345 dev->needed_headroom = GMAC_FCB_LEN; |
1198 1199 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1200 1201 /* Initializing some of the rx/tx queue level parameters */ 1202 for (i = 0; i < priv->num_tx_queues; i++) { 1203 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; 1204 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; 1205 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; --- 9 unchanged lines hidden (view full) --- 1215 /* always enable rx filer */ 1216 priv->rx_filer_enable = 1; 1217 /* Enable most messages by default */ 1218 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1219 /* use pritority h/w tx queue scheduling for single queue devices */ 1220 if (priv->num_tx_queues == 1) 1221 priv->prio_sched_en = 1; 1222 | 1346 1347 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1348 1349 /* Initializing some of the rx/tx queue level parameters */ 1350 for (i = 0; i < priv->num_tx_queues; i++) { 1351 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; 1352 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; 1353 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; --- 9 unchanged lines hidden (view full) --- 1363 /* always enable rx filer */ 1364 priv->rx_filer_enable = 1; 1365 /* Enable most messages by default */ 1366 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1367 /* use pritority h/w tx queue scheduling for single queue devices */ 1368 if (priv->num_tx_queues == 1) 1369 priv->prio_sched_en = 1; 1370 |
1223 /* Carrier starts down, phylib will bring it up */ 1224 netif_carrier_off(dev); | 1371 set_bit(GFAR_DOWN, &priv->state); |
1225 | 1372 |
1373 gfar_hw_init(priv); 1374 |
|
1226 err = register_netdev(dev); 1227 1228 if (err) { 1229 pr_err("%s: Cannot register net device, aborting\n", dev->name); 1230 goto register_fail; 1231 } 1232 | 1375 err = register_netdev(dev); 1376 1377 if (err) { 1378 pr_err("%s: Cannot register net device, aborting\n", dev->name); 1379 goto register_fail; 1380 } 1381 |
1382 /* Carrier starts down, phylib will bring it up */ 1383 netif_carrier_off(dev); 1384 |
|
1233 device_init_wakeup(&dev->dev, 1234 priv->device_flags & 1235 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1236 1237 /* fill out IRQ number and name fields */ 1238 for (i = 0; i < priv->num_grps; i++) { 1239 struct gfar_priv_grp *grp = &priv->gfargrp[i]; 1240 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { --- 5 unchanged lines hidden (view full) --- 1246 dev->name, "_g", '0' + i, "_er"); 1247 } else 1248 strcpy(gfar_irq(grp, TX)->name, dev->name); 1249 } 1250 1251 /* Initialize the filer table */ 1252 gfar_init_filer_table(priv); 1253 | 1385 device_init_wakeup(&dev->dev, 1386 priv->device_flags & 1387 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1388 1389 /* fill out IRQ number and name fields */ 1390 for (i = 0; i < priv->num_grps; i++) { 1391 struct gfar_priv_grp *grp = &priv->gfargrp[i]; 1392 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { --- 5 unchanged lines hidden (view full) --- 1398 dev->name, "_g", '0' + i, "_er"); 1399 } else 1400 strcpy(gfar_irq(grp, TX)->name, dev->name); 1401 } 1402 1403 /* Initialize the filer table */ 1404 gfar_init_filer_table(priv); 1405 |
1254 /* Create all the sysfs files */ 1255 gfar_init_sysfs(dev); 1256 | |
1257 /* Print out the device info */ 1258 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 1259 1260 /* Even more device info helps when determining which kernel 1261 * provided which set of benchmarks. 1262 */ 1263 netdev_info(dev, "Running with NAPI enabled\n"); 1264 for (i = 0; i < priv->num_rx_queues; i++) 1265 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", 1266 i, priv->rx_queue[i]->rx_ring_size); 1267 for (i = 0; i < priv->num_tx_queues; i++) 1268 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", 1269 i, priv->tx_queue[i]->tx_ring_size); 1270 1271 return 0; 1272 1273register_fail: 1274 unmap_group_regs(priv); | 1406 /* Print out the device info */ 1407 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 1408 1409 /* Even more device info helps when determining which kernel 1410 * provided which set of benchmarks. 1411 */ 1412 netdev_info(dev, "Running with NAPI enabled\n"); 1413 for (i = 0; i < priv->num_rx_queues; i++) 1414 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", 1415 i, priv->rx_queue[i]->rx_ring_size); 1416 for (i = 0; i < priv->num_tx_queues; i++) 1417 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", 1418 i, priv->tx_queue[i]->tx_ring_size); 1419 1420 return 0; 1421 1422register_fail: 1423 unmap_group_regs(priv); |
1275 free_tx_pointers(priv); 1276 free_rx_pointers(priv); | 1424 gfar_free_rx_queues(priv); 1425 gfar_free_tx_queues(priv); |
1277 if (priv->phy_node) 1278 of_node_put(priv->phy_node); 1279 if (priv->tbi_node) 1280 of_node_put(priv->tbi_node); 1281 free_gfar_dev(priv); 1282 return err; 1283} 1284 1285static int gfar_remove(struct platform_device *ofdev) 1286{ 1287 struct gfar_private *priv = platform_get_drvdata(ofdev); 1288 1289 if (priv->phy_node) 1290 of_node_put(priv->phy_node); 1291 if (priv->tbi_node) 1292 of_node_put(priv->tbi_node); 1293 1294 unregister_netdev(priv->ndev); 1295 unmap_group_regs(priv); | 1426 if (priv->phy_node) 1427 of_node_put(priv->phy_node); 1428 if (priv->tbi_node) 1429 of_node_put(priv->tbi_node); 1430 free_gfar_dev(priv); 1431 return err; 1432} 1433 1434static int gfar_remove(struct platform_device *ofdev) 1435{ 1436 struct gfar_private *priv = platform_get_drvdata(ofdev); 1437 1438 if (priv->phy_node) 1439 of_node_put(priv->phy_node); 1440 if (priv->tbi_node) 1441 of_node_put(priv->tbi_node); 1442 1443 unregister_netdev(priv->ndev); 1444 unmap_group_regs(priv); |
1445 gfar_free_rx_queues(priv); 1446 gfar_free_tx_queues(priv); |
|
1296 free_gfar_dev(priv); 1297 1298 return 0; 1299} 1300 1301#ifdef CONFIG_PM 1302 1303static int gfar_suspend(struct device *dev) --- 9 unchanged lines hidden (view full) --- 1313 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1314 1315 netif_device_detach(ndev); 1316 1317 if (netif_running(ndev)) { 1318 1319 local_irq_save(flags); 1320 lock_tx_qs(priv); | 1447 free_gfar_dev(priv); 1448 1449 return 0; 1450} 1451 1452#ifdef CONFIG_PM 1453 1454static int gfar_suspend(struct device *dev) --- 9 unchanged lines hidden (view full) --- 1464 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1465 1466 netif_device_detach(ndev); 1467 1468 if (netif_running(ndev)) { 1469 1470 local_irq_save(flags); 1471 lock_tx_qs(priv); |
1321 lock_rx_qs(priv); | |
1322 | 1472 |
1323 gfar_halt_nodisable(ndev); | 1473 gfar_halt_nodisable(priv); |
1324 1325 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1326 tempval = gfar_read(®s->maccfg1); 1327 1328 tempval &= ~MACCFG1_TX_EN; 1329 1330 if (!magic_packet) 1331 tempval &= ~MACCFG1_RX_EN; 1332 1333 gfar_write(®s->maccfg1, tempval); 1334 | 1474 1475 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1476 tempval = gfar_read(®s->maccfg1); 1477 1478 tempval &= ~MACCFG1_TX_EN; 1479 1480 if (!magic_packet) 1481 tempval &= ~MACCFG1_RX_EN; 1482 1483 gfar_write(®s->maccfg1, tempval); 1484 |
1335 unlock_rx_qs(priv); | |
1336 unlock_tx_qs(priv); 1337 local_irq_restore(flags); 1338 1339 disable_napi(priv); 1340 1341 if (magic_packet) { 1342 /* Enable interrupt on Magic Packet */ 1343 gfar_write(®s->imask, IMASK_MAG); --- 29 unchanged lines hidden (view full) --- 1373 if (!magic_packet && priv->phydev) 1374 phy_start(priv->phydev); 1375 1376 /* Disable Magic Packet mode, in case something 1377 * else woke us up. 1378 */ 1379 local_irq_save(flags); 1380 lock_tx_qs(priv); | 1485 unlock_tx_qs(priv); 1486 local_irq_restore(flags); 1487 1488 disable_napi(priv); 1489 1490 if (magic_packet) { 1491 /* Enable interrupt on Magic Packet */ 1492 gfar_write(®s->imask, IMASK_MAG); --- 29 unchanged lines hidden (view full) --- 1522 if (!magic_packet && priv->phydev) 1523 phy_start(priv->phydev); 1524 1525 /* Disable Magic Packet mode, in case something 1526 * else woke us up. 1527 */ 1528 local_irq_save(flags); 1529 lock_tx_qs(priv); |
1381 lock_rx_qs(priv); | |
1382 1383 tempval = gfar_read(®s->maccfg2); 1384 tempval &= ~MACCFG2_MPEN; 1385 gfar_write(®s->maccfg2, tempval); 1386 | 1530 1531 tempval = gfar_read(®s->maccfg2); 1532 tempval &= ~MACCFG2_MPEN; 1533 gfar_write(®s->maccfg2, tempval); 1534 |
1387 gfar_start(ndev); | 1535 gfar_start(priv); |
1388 | 1536 |
1389 unlock_rx_qs(priv); | |
1390 unlock_tx_qs(priv); 1391 local_irq_restore(flags); 1392 1393 netif_device_attach(ndev); 1394 1395 enable_napi(priv); 1396 1397 return 0; --- 10 unchanged lines hidden (view full) --- 1408 return 0; 1409 } 1410 1411 if (gfar_init_bds(ndev)) { 1412 free_skb_resources(priv); 1413 return -ENOMEM; 1414 } 1415 | 1537 unlock_tx_qs(priv); 1538 local_irq_restore(flags); 1539 1540 netif_device_attach(ndev); 1541 1542 enable_napi(priv); 1543 1544 return 0; --- 10 unchanged lines hidden (view full) --- 1555 return 0; 1556 } 1557 1558 if (gfar_init_bds(ndev)) { 1559 free_skb_resources(priv); 1560 return -ENOMEM; 1561 } 1562 |
1416 init_registers(ndev); 1417 gfar_set_mac_address(ndev); 1418 gfar_init_mac(ndev); 1419 gfar_start(ndev); | 1563 gfar_mac_reset(priv); |
1420 | 1564 |
1565 gfar_init_tx_rx_base(priv); 1566 1567 gfar_start(priv); 1568 |
|
1421 priv->oldlink = 0; 1422 priv->oldspeed = 0; 1423 priv->oldduplex = -1; 1424 1425 if (priv->phydev) 1426 phy_start(priv->phydev); 1427 1428 netif_device_attach(ndev); --- 140 unchanged lines hidden (view full) --- 1569 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1570 ADVERTISE_1000XPSE_ASYM); 1571 1572 phy_write(tbiphy, MII_BMCR, 1573 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | 1574 BMCR_SPEED1000); 1575} 1576 | 1569 priv->oldlink = 0; 1570 priv->oldspeed = 0; 1571 priv->oldduplex = -1; 1572 1573 if (priv->phydev) 1574 phy_start(priv->phydev); 1575 1576 netif_device_attach(ndev); --- 140 unchanged lines hidden (view full) --- 1717 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1718 ADVERTISE_1000XPSE_ASYM); 1719 1720 phy_write(tbiphy, MII_BMCR, 1721 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | 1722 BMCR_SPEED1000); 1723} 1724 |
1577static void init_registers(struct net_device *dev) 1578{ 1579 struct gfar_private *priv = netdev_priv(dev); 1580 struct gfar __iomem *regs = NULL; 1581 int i; 1582 1583 for (i = 0; i < priv->num_grps; i++) { 1584 regs = priv->gfargrp[i].regs; 1585 /* Clear IEVENT */ 1586 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 1587 1588 /* Initialize IMASK */ 1589 gfar_write(®s->imask, IMASK_INIT_CLEAR); 1590 } 1591 1592 regs = priv->gfargrp[0].regs; 1593 /* Init hash registers to zero */ 1594 gfar_write(®s->igaddr0, 0); 1595 gfar_write(®s->igaddr1, 0); 1596 gfar_write(®s->igaddr2, 0); 1597 gfar_write(®s->igaddr3, 0); 1598 gfar_write(®s->igaddr4, 0); 1599 gfar_write(®s->igaddr5, 0); 1600 gfar_write(®s->igaddr6, 0); 1601 gfar_write(®s->igaddr7, 0); 1602 1603 gfar_write(®s->gaddr0, 0); 1604 gfar_write(®s->gaddr1, 0); 1605 gfar_write(®s->gaddr2, 0); 1606 gfar_write(®s->gaddr3, 0); 1607 gfar_write(®s->gaddr4, 0); 1608 gfar_write(®s->gaddr5, 0); 1609 gfar_write(®s->gaddr6, 0); 1610 gfar_write(®s->gaddr7, 0); 1611 1612 /* Zero out the rmon mib registers if it has them */ 1613 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1614 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); 1615 1616 /* Mask off the CAM interrupts */ 1617 gfar_write(®s->rmon.cam1, 0xffffffff); 1618 gfar_write(®s->rmon.cam2, 0xffffffff); 1619 } 1620 1621 /* Initialize the max receive buffer length */ 1622 gfar_write(®s->mrblr, priv->rx_buffer_size); 1623 1624 /* Initialize the Minimum Frame Length Register */ 1625 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); 1626} 1627 | |
1628static int __gfar_is_rx_idle(struct gfar_private *priv) 1629{ 1630 u32 res; 1631 1632 /* Normaly TSEC should not hang on GRS commands, so we should 1633 * actually wait for IEVENT_GRSC flag. 1634 */ 1635 if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) --- 7 unchanged lines hidden (view full) --- 1643 res &= 0x7f807f80; 1644 if ((res & 0xffff) == (res >> 16)) 1645 return 1; 1646 1647 return 0; 1648} 1649 1650/* Halt the receive and transmit queues */ | 1725static int __gfar_is_rx_idle(struct gfar_private *priv) 1726{ 1727 u32 res; 1728 1729 /* Normaly TSEC should not hang on GRS commands, so we should 1730 * actually wait for IEVENT_GRSC flag. 1731 */ 1732 if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) --- 7 unchanged lines hidden (view full) --- 1740 res &= 0x7f807f80; 1741 if ((res & 0xffff) == (res >> 16)) 1742 return 1; 1743 1744 return 0; 1745} 1746 1747/* Halt the receive and transmit queues */ |
1651static void gfar_halt_nodisable(struct net_device *dev) | 1748static void gfar_halt_nodisable(struct gfar_private *priv) |
1652{ | 1749{ |
1653 struct gfar_private *priv = netdev_priv(dev); 1654 struct gfar __iomem *regs = NULL; | 1750 struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1655 u32 tempval; | 1751 u32 tempval; |
1656 int i; | |
1657 | 1752 |
1658 for (i = 0; i < priv->num_grps; i++) { 1659 regs = priv->gfargrp[i].regs; 1660 /* Mask all interrupts */ 1661 gfar_write(®s->imask, IMASK_INIT_CLEAR); | 1753 gfar_ints_disable(priv); |
1662 | 1754 |
1663 /* Clear all interrupts */ 1664 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 1665 } 1666 1667 regs = priv->gfargrp[0].regs; | |
1668 /* Stop the DMA, and wait for it to stop */ 1669 tempval = gfar_read(®s->dmactrl); 1670 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != 1671 (DMACTRL_GRS | DMACTRL_GTS)) { 1672 int ret; 1673 1674 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1675 gfar_write(®s->dmactrl, tempval); --- 4 unchanged lines hidden (view full) --- 1680 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); 1681 if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) 1682 ret = __gfar_is_rx_idle(priv); 1683 } while (!ret); 1684 } 1685} 1686 1687/* Halt the receive and transmit queues */ | 1755 /* Stop the DMA, and wait for it to stop */ 1756 tempval = gfar_read(®s->dmactrl); 1757 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != 1758 (DMACTRL_GRS | DMACTRL_GTS)) { 1759 int ret; 1760 1761 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1762 gfar_write(®s->dmactrl, tempval); --- 4 unchanged lines hidden (view full) --- 1767 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); 1768 if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) 1769 ret = __gfar_is_rx_idle(priv); 1770 } while (!ret); 1771 } 1772} 1773 1774/* Halt the receive and transmit queues */ |
1688void gfar_halt(struct net_device *dev) | 1775void gfar_halt(struct gfar_private *priv) |
1689{ | 1776{ |
1690 struct gfar_private *priv = netdev_priv(dev); | |
1691 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1692 u32 tempval; 1693 | 1777 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1778 u32 tempval; 1779 |
1694 gfar_halt_nodisable(dev); | 1780 /* Dissable the Rx/Tx hw queues */ 1781 gfar_write(®s->rqueue, 0); 1782 gfar_write(®s->tqueue, 0); |
1695 | 1783 |
1696 /* Disable Rx and Tx */ | 1784 mdelay(10); 1785 1786 gfar_halt_nodisable(priv); 1787 1788 /* Disable Rx/Tx DMA */ |
1697 tempval = gfar_read(®s->maccfg1); 1698 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1699 gfar_write(®s->maccfg1, tempval); 1700} 1701 | 1789 tempval = gfar_read(®s->maccfg1); 1790 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1791 gfar_write(®s->maccfg1, tempval); 1792} 1793 |
1702static void free_grp_irqs(struct gfar_priv_grp *grp) 1703{ 1704 free_irq(gfar_irq(grp, TX)->irq, grp); 1705 free_irq(gfar_irq(grp, RX)->irq, grp); 1706 free_irq(gfar_irq(grp, ER)->irq, grp); 1707} 1708 | |
1709void stop_gfar(struct net_device *dev) 1710{ 1711 struct gfar_private *priv = netdev_priv(dev); | 1794void stop_gfar(struct net_device *dev) 1795{ 1796 struct gfar_private *priv = netdev_priv(dev); |
1712 unsigned long flags; 1713 int i; | |
1714 | 1797 |
1715 phy_stop(priv->phydev); | 1798 netif_tx_stop_all_queues(dev); |
1716 | 1799 |
1800 smp_mb__before_clear_bit(); 1801 set_bit(GFAR_DOWN, &priv->state); 1802 smp_mb__after_clear_bit(); |
|
1717 | 1803 |
1718 /* Lock it down */ 1719 local_irq_save(flags); 1720 lock_tx_qs(priv); 1721 lock_rx_qs(priv); | 1804 disable_napi(priv); |
1722 | 1805 |
1723 gfar_halt(dev); | 1806 /* disable ints and gracefully shut down Rx/Tx DMA */ 1807 gfar_halt(priv); |
1724 | 1808 |
1725 unlock_rx_qs(priv); 1726 unlock_tx_qs(priv); 1727 local_irq_restore(flags); | 1809 phy_stop(priv->phydev); |
1728 | 1810 |
1729 /* Free the IRQs */ 1730 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1731 for (i = 0; i < priv->num_grps; i++) 1732 free_grp_irqs(&priv->gfargrp[i]); 1733 } else { 1734 for (i = 0; i < priv->num_grps; i++) 1735 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, 1736 &priv->gfargrp[i]); 1737 } 1738 | |
1739 free_skb_resources(priv); 1740} 1741 1742static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) 1743{ 1744 struct txbd8 *txbdp; 1745 struct gfar_private *priv = netdev_priv(tx_queue->dev); 1746 int i, j; --- 73 unchanged lines hidden (view full) --- 1820 1821 dma_free_coherent(priv->dev, 1822 sizeof(struct txbd8) * priv->total_tx_ring_size + 1823 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1824 priv->tx_queue[0]->tx_bd_base, 1825 priv->tx_queue[0]->tx_bd_dma_base); 1826} 1827 | 1811 free_skb_resources(priv); 1812} 1813 1814static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) 1815{ 1816 struct txbd8 *txbdp; 1817 struct gfar_private *priv = netdev_priv(tx_queue->dev); 1818 int i, j; --- 73 unchanged lines hidden (view full) --- 1892 1893 dma_free_coherent(priv->dev, 1894 sizeof(struct txbd8) * priv->total_tx_ring_size + 1895 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1896 priv->tx_queue[0]->tx_bd_base, 1897 priv->tx_queue[0]->tx_bd_dma_base); 1898} 1899 |
1828void gfar_start(struct net_device *dev) | 1900void gfar_start(struct gfar_private *priv) |
1829{ | 1901{ |
1830 struct gfar_private *priv = netdev_priv(dev); | |
1831 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1832 u32 tempval; 1833 int i = 0; 1834 | 1902 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1903 u32 tempval; 1904 int i = 0; 1905 |
1835 /* Enable Rx and Tx in MACCFG1 */ 1836 tempval = gfar_read(®s->maccfg1); 1837 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 1838 gfar_write(®s->maccfg1, tempval); | 1906 /* Enable Rx/Tx hw queues */ 1907 gfar_write(®s->rqueue, priv->rqueue); 1908 gfar_write(®s->tqueue, priv->tqueue); |
1839 1840 /* Initialize DMACTRL to have WWR and WOP */ 1841 tempval = gfar_read(®s->dmactrl); 1842 tempval |= DMACTRL_INIT_SETTINGS; 1843 gfar_write(®s->dmactrl, tempval); 1844 1845 /* Make sure we aren't stopped */ 1846 tempval = gfar_read(®s->dmactrl); 1847 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1848 gfar_write(®s->dmactrl, tempval); 1849 1850 for (i = 0; i < priv->num_grps; i++) { 1851 regs = priv->gfargrp[i].regs; 1852 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1853 gfar_write(®s->tstat, priv->gfargrp[i].tstat); 1854 gfar_write(®s->rstat, priv->gfargrp[i].rstat); | 1909 1910 /* Initialize DMACTRL to have WWR and WOP */ 1911 tempval = gfar_read(®s->dmactrl); 1912 tempval |= DMACTRL_INIT_SETTINGS; 1913 gfar_write(®s->dmactrl, tempval); 1914 1915 /* Make sure we aren't stopped */ 1916 tempval = gfar_read(®s->dmactrl); 1917 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1918 gfar_write(®s->dmactrl, tempval); 1919 1920 for (i = 0; i < priv->num_grps; i++) { 1921 regs = priv->gfargrp[i].regs; 1922 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1923 gfar_write(®s->tstat, priv->gfargrp[i].tstat); 1924 gfar_write(®s->rstat, priv->gfargrp[i].rstat); |
1855 /* Unmask the interrupts we look for */ 1856 gfar_write(®s->imask, IMASK_DEFAULT); | |
1857 } 1858 | 1925 } 1926 |
1859 dev->trans_start = jiffies; /* prevent tx timeout */ 1860} | 1927 /* Enable Rx/Tx DMA */ 1928 tempval = gfar_read(®s->maccfg1); 1929 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 1930 gfar_write(®s->maccfg1, tempval); |
1861 | 1931 |
1862static void gfar_configure_coalescing(struct gfar_private *priv, 1863 unsigned long tx_mask, unsigned long rx_mask) 1864{ 1865 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1866 u32 __iomem *baddr; | 1932 gfar_ints_enable(priv); |
1867 | 1933 |
1868 if (priv->mode == MQ_MG_MODE) { 1869 int i = 0; 1870 1871 baddr = ®s->txic0; 1872 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { 1873 gfar_write(baddr + i, 0); 1874 if (likely(priv->tx_queue[i]->txcoalescing)) 1875 gfar_write(baddr + i, priv->tx_queue[i]->txic); 1876 } 1877 1878 baddr = ®s->rxic0; 1879 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { 1880 gfar_write(baddr + i, 0); 1881 if (likely(priv->rx_queue[i]->rxcoalescing)) 1882 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 1883 } 1884 } else { 1885 /* Backward compatible case -- even if we enable 1886 * multiple queues, there's only single reg to program 1887 */ 1888 gfar_write(®s->txic, 0); 1889 if (likely(priv->tx_queue[0]->txcoalescing)) 1890 gfar_write(®s->txic, priv->tx_queue[0]->txic); 1891 1892 gfar_write(®s->rxic, 0); 1893 if (unlikely(priv->rx_queue[0]->rxcoalescing)) 1894 gfar_write(®s->rxic, priv->rx_queue[0]->rxic); 1895 } | 1934 priv->ndev->trans_start = jiffies; /* prevent tx timeout */ |
1896} 1897 | 1935} 1936 |
1898void gfar_configure_coalescing_all(struct gfar_private *priv) | 1937static void free_grp_irqs(struct gfar_priv_grp *grp) |
1899{ | 1938{ |
1900 gfar_configure_coalescing(priv, 0xFF, 0xFF); | 1939 free_irq(gfar_irq(grp, TX)->irq, grp); 1940 free_irq(gfar_irq(grp, RX)->irq, grp); 1941 free_irq(gfar_irq(grp, ER)->irq, grp); |
1901} 1902 1903static int register_grp_irqs(struct gfar_priv_grp *grp) 1904{ 1905 struct gfar_private *priv = grp->priv; 1906 struct net_device *dev = priv->ndev; 1907 int err; 1908 --- 42 unchanged lines hidden (view full) --- 1951 free_irq(gfar_irq(grp, TX)->irq, grp); 1952tx_irq_fail: 1953 free_irq(gfar_irq(grp, ER)->irq, grp); 1954err_irq_fail: 1955 return err; 1956 1957} 1958 | 1942} 1943 1944static int register_grp_irqs(struct gfar_priv_grp *grp) 1945{ 1946 struct gfar_private *priv = grp->priv; 1947 struct net_device *dev = priv->ndev; 1948 int err; 1949 --- 42 unchanged lines hidden (view full) --- 1992 free_irq(gfar_irq(grp, TX)->irq, grp); 1993tx_irq_fail: 1994 free_irq(gfar_irq(grp, ER)->irq, grp); 1995err_irq_fail: 1996 return err; 1997 1998} 1999 |
1959/* Bring the controller up and running */ 1960int startup_gfar(struct net_device *ndev) | 2000static void gfar_free_irq(struct gfar_private *priv) |
1961{ | 2001{ |
1962 struct gfar_private *priv = netdev_priv(ndev); 1963 struct gfar __iomem *regs = NULL; 1964 int err, i, j; | 2002 int i; |
1965 | 2003 |
1966 for (i = 0; i < priv->num_grps; i++) { 1967 regs= priv->gfargrp[i].regs; 1968 gfar_write(®s->imask, IMASK_INIT_CLEAR); | 2004 /* Free the IRQs */ 2005 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2006 for (i = 0; i < priv->num_grps; i++) 2007 free_grp_irqs(&priv->gfargrp[i]); 2008 } else { 2009 for (i = 0; i < priv->num_grps; i++) 2010 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, 2011 &priv->gfargrp[i]); |
1969 } | 2012 } |
2013} |
|
1970 | 2014 |
1971 regs= priv->gfargrp[0].regs; 1972 err = gfar_alloc_skb_resources(ndev); 1973 if (err) 1974 return err; | 2015static int gfar_request_irq(struct gfar_private *priv) 2016{ 2017 int err, i, j; |
1975 | 2018 |
1976 gfar_init_mac(ndev); 1977 | |
1978 for (i = 0; i < priv->num_grps; i++) { 1979 err = register_grp_irqs(&priv->gfargrp[i]); 1980 if (err) { 1981 for (j = 0; j < i; j++) 1982 free_grp_irqs(&priv->gfargrp[j]); | 2019 for (i = 0; i < priv->num_grps; i++) { 2020 err = register_grp_irqs(&priv->gfargrp[i]); 2021 if (err) { 2022 for (j = 0; j < i; j++) 2023 free_grp_irqs(&priv->gfargrp[j]); |
1983 goto irq_fail; | 2024 return err; |
1984 } 1985 } 1986 | 2025 } 2026 } 2027 |
1987 /* Start the controller */ 1988 gfar_start(ndev); | 2028 return 0; 2029} |
1989 | 2030 |
2031/* Bring the controller up and running */ 2032int startup_gfar(struct net_device *ndev) 2033{ 2034 struct gfar_private *priv = netdev_priv(ndev); 2035 int err; 2036 2037 gfar_mac_reset(priv); 2038 2039 err = gfar_alloc_skb_resources(ndev); 2040 if (err) 2041 return err; 2042 2043 gfar_init_tx_rx_base(priv); 2044 2045 smp_mb__before_clear_bit(); 2046 clear_bit(GFAR_DOWN, &priv->state); 2047 smp_mb__after_clear_bit(); 2048 2049 /* Start Rx/Tx DMA and enable the interrupts */ 2050 gfar_start(priv); 2051 |
|
1990 phy_start(priv->phydev); 1991 | 2052 phy_start(priv->phydev); 2053 |
1992 gfar_configure_coalescing_all(priv); | 2054 enable_napi(priv); |
1993 | 2055 |
1994 return 0; | 2056 netif_tx_wake_all_queues(ndev); |
1995 | 2057 |
1996irq_fail: 1997 free_skb_resources(priv); 1998 return err; | 2058 return 0; |
1999} 2000 2001/* Called when something needs to use the ethernet device 2002 * Returns 0 for success. 2003 */ 2004static int gfar_enet_open(struct net_device *dev) 2005{ 2006 struct gfar_private *priv = netdev_priv(dev); 2007 int err; 2008 | 2059} 2060 2061/* Called when something needs to use the ethernet device 2062 * Returns 0 for success. 2063 */ 2064static int gfar_enet_open(struct net_device *dev) 2065{ 2066 struct gfar_private *priv = netdev_priv(dev); 2067 int err; 2068 |
2009 enable_napi(priv); 2010 2011 /* Initialize a bunch of registers */ 2012 init_registers(dev); 2013 2014 gfar_set_mac_address(dev); 2015 | |
2016 err = init_phy(dev); | 2069 err = init_phy(dev); |
2070 if (err) 2071 return err; |
|
2017 | 2072 |
2018 if (err) { 2019 disable_napi(priv); | 2073 err = gfar_request_irq(priv); 2074 if (err) |
2020 return err; | 2075 return err; |
2021 } | |
2022 2023 err = startup_gfar(dev); | 2076 2077 err = startup_gfar(dev); |
2024 if (err) { 2025 disable_napi(priv); | 2078 if (err) |
2026 return err; | 2079 return err; |
2027 } | |
2028 | 2080 |
2029 netif_tx_start_all_queues(dev); 2030 | |
2031 device_set_wakeup_enable(&dev->dev, priv->wol_en); 2032 2033 return err; 2034} 2035 2036static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) 2037{ 2038 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); --- 108 unchanged lines hidden (view full) --- 2147 2148 /* make space for additional header when fcb is needed */ 2149 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { 2150 struct sk_buff *skb_new; 2151 2152 skb_new = skb_realloc_headroom(skb, fcb_len); 2153 if (!skb_new) { 2154 dev->stats.tx_errors++; | 2081 device_set_wakeup_enable(&dev->dev, priv->wol_en); 2082 2083 return err; 2084} 2085 2086static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) 2087{ 2088 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); --- 108 unchanged lines hidden (view full) --- 2197 2198 /* make space for additional header when fcb is needed */ 2199 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { 2200 struct sk_buff *skb_new; 2201 2202 skb_new = skb_realloc_headroom(skb, fcb_len); 2203 if (!skb_new) { 2204 dev->stats.tx_errors++; |
2155 kfree_skb(skb); | 2205 dev_kfree_skb_any(skb); |
2156 return NETDEV_TX_OK; 2157 } 2158 2159 if (skb->sk) 2160 skb_set_owner_w(skb_new, skb->sk); | 2206 return NETDEV_TX_OK; 2207 } 2208 2209 if (skb->sk) 2210 skb_set_owner_w(skb_new, skb->sk); |
2161 consume_skb(skb); | 2211 dev_consume_skb_any(skb); |
2162 skb = skb_new; 2163 } 2164 2165 /* total number of fragments in the SKB */ 2166 nr_frags = skb_shinfo(skb)->nr_frags; 2167 2168 /* calculate the required number of TxBDs for this skb */ 2169 if (unlikely(do_tstamp)) --- 176 unchanged lines hidden (view full) --- 2346 return NETDEV_TX_OK; 2347} 2348 2349/* Stops the kernel queue, and halts the controller */ 2350static int gfar_close(struct net_device *dev) 2351{ 2352 struct gfar_private *priv = netdev_priv(dev); 2353 | 2212 skb = skb_new; 2213 } 2214 2215 /* total number of fragments in the SKB */ 2216 nr_frags = skb_shinfo(skb)->nr_frags; 2217 2218 /* calculate the required number of TxBDs for this skb */ 2219 if (unlikely(do_tstamp)) --- 176 unchanged lines hidden (view full) --- 2396 return NETDEV_TX_OK; 2397} 2398 2399/* Stops the kernel queue, and halts the controller */ 2400static int gfar_close(struct net_device *dev) 2401{ 2402 struct gfar_private *priv = netdev_priv(dev); 2403 |
2354 disable_napi(priv); 2355 | |
2356 cancel_work_sync(&priv->reset_task); 2357 stop_gfar(dev); 2358 2359 /* Disconnect from the PHY */ 2360 phy_disconnect(priv->phydev); 2361 priv->phydev = NULL; 2362 | 2404 cancel_work_sync(&priv->reset_task); 2405 stop_gfar(dev); 2406 2407 /* Disconnect from the PHY */ 2408 phy_disconnect(priv->phydev); 2409 priv->phydev = NULL; 2410 |
2363 netif_tx_stop_all_queues(dev); | 2411 gfar_free_irq(priv); |
2364 2365 return 0; 2366} 2367 2368/* Changes the mac address if the controller is not running. */ 2369static int gfar_set_mac_address(struct net_device *dev) 2370{ 2371 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 2372 2373 return 0; 2374} 2375 | 2412 2413 return 0; 2414} 2415 2416/* Changes the mac address if the controller is not running. */ 2417static int gfar_set_mac_address(struct net_device *dev) 2418{ 2419 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 2420 2421 return 0; 2422} 2423 |
2376/* Check if rx parser should be activated */ 2377void gfar_check_rx_parser_mode(struct gfar_private *priv) 2378{ 2379 struct gfar __iomem *regs; 2380 u32 tempval; 2381 2382 regs = priv->gfargrp[0].regs; 2383 2384 tempval = gfar_read(®s->rctrl); 2385 /* If parse is no longer required, then disable parser */ 2386 if (tempval & RCTRL_REQ_PARSER) { 2387 tempval |= RCTRL_PRSDEP_INIT; 2388 priv->uses_rxfcb = 1; 2389 } else { 2390 tempval &= ~RCTRL_PRSDEP_INIT; 2391 priv->uses_rxfcb = 0; 2392 } 2393 gfar_write(®s->rctrl, tempval); 2394} 2395 2396/* Enables and disables VLAN insertion/extraction */ 2397void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) 2398{ 2399 struct gfar_private *priv = netdev_priv(dev); 2400 struct gfar __iomem *regs = NULL; 2401 unsigned long flags; 2402 u32 tempval; 2403 2404 regs = priv->gfargrp[0].regs; 2405 local_irq_save(flags); 2406 lock_rx_qs(priv); 2407 2408 if (features & NETIF_F_HW_VLAN_CTAG_TX) { 2409 /* Enable VLAN tag insertion */ 2410 tempval = gfar_read(®s->tctrl); 2411 tempval |= TCTRL_VLINS; 2412 gfar_write(®s->tctrl, tempval); 2413 } else { 2414 /* Disable VLAN tag insertion */ 2415 tempval = gfar_read(®s->tctrl); 2416 tempval &= ~TCTRL_VLINS; 2417 gfar_write(®s->tctrl, tempval); 2418 } 2419 2420 if (features & NETIF_F_HW_VLAN_CTAG_RX) { 2421 /* Enable VLAN tag extraction */ 2422 tempval = gfar_read(®s->rctrl); 2423 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 2424 gfar_write(®s->rctrl, tempval); 2425 priv->uses_rxfcb = 1; 2426 } else { 2427 /* Disable VLAN tag extraction */ 2428 tempval = gfar_read(®s->rctrl); 2429 tempval &= ~RCTRL_VLEX; 2430 gfar_write(®s->rctrl, tempval); 2431 2432 gfar_check_rx_parser_mode(priv); 2433 } 2434 2435 gfar_change_mtu(dev, dev->mtu); 2436 2437 unlock_rx_qs(priv); 2438 local_irq_restore(flags); 2439} 2440 | |
2441static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2442{ | 2424static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2425{ |
2443 int tempsize, tempval; | |
2444 struct gfar_private *priv = netdev_priv(dev); | 2426 struct gfar_private *priv = netdev_priv(dev); |
2445 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2446 int oldsize = priv->rx_buffer_size; | |
2447 int frame_size = new_mtu + ETH_HLEN; 2448 2449 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 2450 netif_err(priv, drv, dev, "Invalid MTU setting\n"); 2451 return -EINVAL; 2452 } 2453 | 2427 int frame_size = new_mtu + ETH_HLEN; 2428 2429 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 2430 netif_err(priv, drv, dev, "Invalid MTU setting\n"); 2431 return -EINVAL; 2432 } 2433 |
2454 if (priv->uses_rxfcb) 2455 frame_size += GMAC_FCB_LEN; | 2434 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) 2435 cpu_relax(); |
2456 | 2436 |
2457 frame_size += priv->padding; 2458 2459 tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 2460 INCREMENTAL_BUFFER_SIZE; 2461 2462 /* Only stop and start the controller if it isn't already 2463 * stopped, and we changed something 2464 */ 2465 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) | 2437 if (dev->flags & IFF_UP) |
2466 stop_gfar(dev); 2467 | 2438 stop_gfar(dev); 2439 |
2468 priv->rx_buffer_size = tempsize; 2469 | |
2470 dev->mtu = new_mtu; 2471 | 2440 dev->mtu = new_mtu; 2441 |
2472 gfar_write(®s->mrblr, priv->rx_buffer_size); 2473 gfar_write(®s->maxfrm, priv->rx_buffer_size); | 2442 if (dev->flags & IFF_UP) 2443 startup_gfar(dev); |
2474 | 2444 |
2475 /* If the mtu is larger than the max size for standard 2476 * ethernet frames (ie, a jumbo frame), then set maccfg2 2477 * to allow huge frames, and to check the length 2478 */ 2479 tempval = gfar_read(®s->maccfg2); | 2445 clear_bit_unlock(GFAR_RESETTING, &priv->state); |
2480 | 2446 |
2481 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || 2482 gfar_has_errata(priv, GFAR_ERRATA_74)) 2483 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2484 else 2485 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | 2447 return 0; 2448} |
2486 | 2449 |
2487 gfar_write(®s->maccfg2, tempval); | 2450void reset_gfar(struct net_device *ndev) 2451{ 2452 struct gfar_private *priv = netdev_priv(ndev); |
2488 | 2453 |
2489 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2490 startup_gfar(dev); | 2454 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) 2455 cpu_relax(); |
2491 | 2456 |
2492 return 0; | 2457 stop_gfar(ndev); 2458 startup_gfar(ndev); 2459 2460 clear_bit_unlock(GFAR_RESETTING, &priv->state); |
2493} 2494 2495/* gfar_reset_task gets scheduled when a packet has not been 2496 * transmitted after a set amount of time. 2497 * For now, assume that clearing out all the structures, and 2498 * starting over will fix the problem. 2499 */ 2500static void gfar_reset_task(struct work_struct *work) 2501{ 2502 struct gfar_private *priv = container_of(work, struct gfar_private, 2503 reset_task); | 2461} 2462 2463/* gfar_reset_task gets scheduled when a packet has not been 2464 * transmitted after a set amount of time. 2465 * For now, assume that clearing out all the structures, and 2466 * starting over will fix the problem. 2467 */ 2468static void gfar_reset_task(struct work_struct *work) 2469{ 2470 struct gfar_private *priv = container_of(work, struct gfar_private, 2471 reset_task); |
2504 struct net_device *dev = priv->ndev; 2505 2506 if (dev->flags & IFF_UP) { 2507 netif_tx_stop_all_queues(dev); 2508 stop_gfar(dev); 2509 startup_gfar(dev); 2510 netif_tx_start_all_queues(dev); 2511 } 2512 2513 netif_tx_schedule_all(dev); | 2472 reset_gfar(priv->ndev); |
2514} 2515 2516static void gfar_timeout(struct net_device *dev) 2517{ 2518 struct gfar_private *priv = netdev_priv(dev); 2519 2520 dev->stats.tx_errors++; 2521 schedule_work(&priv->reset_task); --- 96 unchanged lines hidden (view full) --- 2618 2619 howmany++; 2620 spin_lock_irqsave(&tx_queue->txlock, flags); 2621 tx_queue->num_txbdfree += nr_txbds; 2622 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2623 } 2624 2625 /* If we freed a buffer, we can restart transmission, if necessary */ | 2473} 2474 2475static void gfar_timeout(struct net_device *dev) 2476{ 2477 struct gfar_private *priv = netdev_priv(dev); 2478 2479 dev->stats.tx_errors++; 2480 schedule_work(&priv->reset_task); --- 96 unchanged lines hidden (view full) --- 2577 2578 howmany++; 2579 spin_lock_irqsave(&tx_queue->txlock, flags); 2580 tx_queue->num_txbdfree += nr_txbds; 2581 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2582 } 2583 2584 /* If we freed a buffer, we can restart transmission, if necessary */ |
2626 if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree) 2627 netif_wake_subqueue(dev, tqi); | 2585 if (tx_queue->num_txbdfree && 2586 netif_tx_queue_stopped(txq) && 2587 !(test_bit(GFAR_DOWN, &priv->state))) 2588 netif_wake_subqueue(priv->ndev, tqi); |
2628 2629 /* Update dirty indicators */ 2630 tx_queue->skb_dirtytx = skb_dirtytx; 2631 tx_queue->dirty_tx = bdp; 2632 2633 netdev_tx_completed_queue(txq, howmany, bytes_sent); 2634} 2635 | 2589 2590 /* Update dirty indicators */ 2591 tx_queue->skb_dirtytx = skb_dirtytx; 2592 tx_queue->dirty_tx = bdp; 2593 2594 netdev_tx_completed_queue(txq, howmany, bytes_sent); 2595} 2596 |
2636static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) 2637{ 2638 unsigned long flags; 2639 2640 spin_lock_irqsave(&gfargrp->grplock, flags); 2641 if (napi_schedule_prep(&gfargrp->napi)) { 2642 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); 2643 __napi_schedule(&gfargrp->napi); 2644 } else { 2645 /* Clear IEVENT, so interrupts aren't called again 2646 * because of the packets that have already arrived. 2647 */ 2648 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); 2649 } 2650 spin_unlock_irqrestore(&gfargrp->grplock, flags); 2651 2652} 2653 2654/* Interrupt Handler for Transmit complete */ 2655static irqreturn_t gfar_transmit(int irq, void *grp_id) 2656{ 2657 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2658 return IRQ_HANDLED; 2659} 2660 | |
2661static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 2662 struct sk_buff *skb) 2663{ 2664 struct net_device *dev = rx_queue->dev; 2665 struct gfar_private *priv = netdev_priv(dev); 2666 dma_addr_t buf; 2667 2668 buf = dma_map_single(priv->dev, skb->data, --- 54 unchanged lines hidden (view full) --- 2723 if (status & RXBD_OVERRUN) { 2724 atomic64_inc(&estats->rx_overrun); 2725 stats->rx_crc_errors++; 2726 } 2727} 2728 2729irqreturn_t gfar_receive(int irq, void *grp_id) 2730{ | 2597static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 2598 struct sk_buff *skb) 2599{ 2600 struct net_device *dev = rx_queue->dev; 2601 struct gfar_private *priv = netdev_priv(dev); 2602 dma_addr_t buf; 2603 2604 buf = dma_map_single(priv->dev, skb->data, --- 54 unchanged lines hidden (view full) --- 2659 if (status & RXBD_OVERRUN) { 2660 atomic64_inc(&estats->rx_overrun); 2661 stats->rx_crc_errors++; 2662 } 2663} 2664 2665irqreturn_t gfar_receive(int irq, void *grp_id) 2666{ |
2731 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); | 2667 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; 2668 unsigned long flags; 2669 u32 imask; 2670 2671 if (likely(napi_schedule_prep(&grp->napi_rx))) { 2672 spin_lock_irqsave(&grp->grplock, flags); 2673 imask = gfar_read(&grp->regs->imask); 2674 imask &= IMASK_RX_DISABLED; 2675 gfar_write(&grp->regs->imask, imask); 2676 spin_unlock_irqrestore(&grp->grplock, flags); 2677 __napi_schedule(&grp->napi_rx); 2678 } else { 2679 /* Clear IEVENT, so interrupts aren't called again 2680 * because of the packets that have already arrived. 2681 */ 2682 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); 2683 } 2684 |
2732 return IRQ_HANDLED; 2733} 2734 | 2685 return IRQ_HANDLED; 2686} 2687 |
2688/* Interrupt Handler for Transmit complete */ 2689static irqreturn_t gfar_transmit(int irq, void *grp_id) 2690{ 2691 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; 2692 unsigned long flags; 2693 u32 imask; 2694 2695 if (likely(napi_schedule_prep(&grp->napi_tx))) { 2696 spin_lock_irqsave(&grp->grplock, flags); 2697 imask = gfar_read(&grp->regs->imask); 2698 imask &= IMASK_TX_DISABLED; 2699 gfar_write(&grp->regs->imask, imask); 2700 spin_unlock_irqrestore(&grp->grplock, flags); 2701 __napi_schedule(&grp->napi_tx); 2702 } else { 2703 /* Clear IEVENT, so interrupts aren't called again 2704 * because of the packets that have already arrived. 2705 */ 2706 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); 2707 } 2708 2709 return IRQ_HANDLED; 2710} 2711 |
|
2735static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 2736{ 2737 /* If valid headers were found, and valid sums 2738 * were verified, then we tell the kernel that no 2739 * checksumming is necessary. Otherwise, it is [FIXME] 2740 */ 2741 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 2742 skb->ip_summed = CHECKSUM_UNNECESSARY; --- 104 unchanged lines hidden (view full) --- 2847 2848 if (likely(skb)) { 2849 pkt_len = bdp->length - ETH_FCS_LEN; 2850 /* Remove the FCS from the packet length */ 2851 skb_put(skb, pkt_len); 2852 rx_queue->stats.rx_bytes += pkt_len; 2853 skb_record_rx_queue(skb, rx_queue->qindex); 2854 gfar_process_frame(dev, skb, amount_pull, | 2712static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 2713{ 2714 /* If valid headers were found, and valid sums 2715 * were verified, then we tell the kernel that no 2716 * checksumming is necessary. Otherwise, it is [FIXME] 2717 */ 2718 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 2719 skb->ip_summed = CHECKSUM_UNNECESSARY; --- 104 unchanged lines hidden (view full) --- 2824 2825 if (likely(skb)) { 2826 pkt_len = bdp->length - ETH_FCS_LEN; 2827 /* Remove the FCS from the packet length */ 2828 skb_put(skb, pkt_len); 2829 rx_queue->stats.rx_bytes += pkt_len; 2830 skb_record_rx_queue(skb, rx_queue->qindex); 2831 gfar_process_frame(dev, skb, amount_pull, |
2855 &rx_queue->grp->napi); | 2832 &rx_queue->grp->napi_rx); |
2856 2857 } else { 2858 netif_warn(priv, rx_err, dev, "Missing skb!\n"); 2859 rx_queue->stats.rx_dropped++; 2860 atomic64_inc(&priv->extra_stats.rx_skbmissing); 2861 } 2862 2863 } --- 12 unchanged lines hidden (view full) --- 2876 } 2877 2878 /* Update the current rxbd pointer to be the next one */ 2879 rx_queue->cur_rx = bdp; 2880 2881 return howmany; 2882} 2883 | 2833 2834 } else { 2835 netif_warn(priv, rx_err, dev, "Missing skb!\n"); 2836 rx_queue->stats.rx_dropped++; 2837 atomic64_inc(&priv->extra_stats.rx_skbmissing); 2838 } 2839 2840 } --- 12 unchanged lines hidden (view full) --- 2853 } 2854 2855 /* Update the current rxbd pointer to be the next one */ 2856 rx_queue->cur_rx = bdp; 2857 2858 return howmany; 2859} 2860 |
2884static int gfar_poll_sq(struct napi_struct *napi, int budget) | 2861static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) |
2885{ 2886 struct gfar_priv_grp *gfargrp = | 2862{ 2863 struct gfar_priv_grp *gfargrp = |
2887 container_of(napi, struct gfar_priv_grp, napi); | 2864 container_of(napi, struct gfar_priv_grp, napi_rx); |
2888 struct gfar __iomem *regs = gfargrp->regs; | 2865 struct gfar __iomem *regs = gfargrp->regs; |
2889 struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0]; 2890 struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0]; | 2866 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; |
2891 int work_done = 0; 2892 2893 /* Clear IEVENT, so interrupts aren't called again 2894 * because of the packets that have already arrived 2895 */ | 2867 int work_done = 0; 2868 2869 /* Clear IEVENT, so interrupts aren't called again 2870 * because of the packets that have already arrived 2871 */ |
2896 gfar_write(®s->ievent, IEVENT_RTX_MASK); | 2872 gfar_write(®s->ievent, IEVENT_RX_MASK); |
2897 | 2873 |
2898 /* run Tx cleanup to completion */ 2899 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) 2900 gfar_clean_tx_ring(tx_queue); 2901 | |
2902 work_done = gfar_clean_rx_ring(rx_queue, budget); 2903 2904 if (work_done < budget) { | 2874 work_done = gfar_clean_rx_ring(rx_queue, budget); 2875 2876 if (work_done < budget) { |
2877 u32 imask; |
|
2905 napi_complete(napi); 2906 /* Clear the halt bit in RSTAT */ 2907 gfar_write(®s->rstat, gfargrp->rstat); 2908 | 2878 napi_complete(napi); 2879 /* Clear the halt bit in RSTAT */ 2880 gfar_write(®s->rstat, gfargrp->rstat); 2881 |
2909 gfar_write(®s->imask, IMASK_DEFAULT); 2910 2911 /* If we are coalescing interrupts, update the timer 2912 * Otherwise, clear it 2913 */ 2914 gfar_write(®s->txic, 0); 2915 if (likely(tx_queue->txcoalescing)) 2916 gfar_write(®s->txic, tx_queue->txic); 2917 2918 gfar_write(®s->rxic, 0); 2919 if (unlikely(rx_queue->rxcoalescing)) 2920 gfar_write(®s->rxic, rx_queue->rxic); | 2882 spin_lock_irq(&gfargrp->grplock); 2883 imask = gfar_read(®s->imask); 2884 imask |= IMASK_RX_DEFAULT; 2885 gfar_write(®s->imask, imask); 2886 spin_unlock_irq(&gfargrp->grplock); |
2921 } 2922 2923 return work_done; 2924} 2925 | 2887 } 2888 2889 return work_done; 2890} 2891 |
2926static int gfar_poll(struct napi_struct *napi, int budget) | 2892static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) |
2927{ 2928 struct gfar_priv_grp *gfargrp = | 2893{ 2894 struct gfar_priv_grp *gfargrp = |
2929 container_of(napi, struct gfar_priv_grp, napi); | 2895 container_of(napi, struct gfar_priv_grp, napi_tx); 2896 struct gfar __iomem *regs = gfargrp->regs; 2897 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; 2898 u32 imask; 2899 2900 /* Clear IEVENT, so interrupts aren't called again 2901 * because of the packets that have already arrived 2902 */ 2903 gfar_write(®s->ievent, IEVENT_TX_MASK); 2904 2905 /* run Tx cleanup to completion */ 2906 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) 2907 gfar_clean_tx_ring(tx_queue); 2908 2909 napi_complete(napi); 2910 2911 spin_lock_irq(&gfargrp->grplock); 2912 imask = gfar_read(®s->imask); 2913 imask |= IMASK_TX_DEFAULT; 2914 gfar_write(®s->imask, imask); 2915 spin_unlock_irq(&gfargrp->grplock); 2916 2917 return 0; 2918} 2919 2920static int gfar_poll_rx(struct napi_struct *napi, int budget) 2921{ 2922 struct gfar_priv_grp *gfargrp = 2923 container_of(napi, struct gfar_priv_grp, napi_rx); |
2930 struct gfar_private *priv = gfargrp->priv; 2931 struct gfar __iomem *regs = gfargrp->regs; | 2924 struct gfar_private *priv = gfargrp->priv; 2925 struct gfar __iomem *regs = gfargrp->regs; |
2932 struct gfar_priv_tx_q *tx_queue = NULL; | |
2933 struct gfar_priv_rx_q *rx_queue = NULL; 2934 int work_done = 0, work_done_per_q = 0; 2935 int i, budget_per_q = 0; | 2926 struct gfar_priv_rx_q *rx_queue = NULL; 2927 int work_done = 0, work_done_per_q = 0; 2928 int i, budget_per_q = 0; |
2936 int has_tx_work = 0; | |
2937 unsigned long rstat_rxf; 2938 int num_act_queues; 2939 2940 /* Clear IEVENT, so interrupts aren't called again 2941 * because of the packets that have already arrived 2942 */ | 2929 unsigned long rstat_rxf; 2930 int num_act_queues; 2931 2932 /* Clear IEVENT, so interrupts aren't called again 2933 * because of the packets that have already arrived 2934 */ |
2943 gfar_write(®s->ievent, IEVENT_RTX_MASK); | 2935 gfar_write(®s->ievent, IEVENT_RX_MASK); |
2944 2945 rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; 2946 2947 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); 2948 if (num_act_queues) 2949 budget_per_q = budget/num_act_queues; 2950 | 2936 2937 rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; 2938 2939 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); 2940 if (num_act_queues) 2941 budget_per_q = budget/num_act_queues; 2942 |
2951 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { 2952 tx_queue = priv->tx_queue[i]; 2953 /* run Tx cleanup to completion */ 2954 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { 2955 gfar_clean_tx_ring(tx_queue); 2956 has_tx_work = 1; 2957 } 2958 } 2959 | |
2960 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2961 /* skip queue if not active */ 2962 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) 2963 continue; 2964 2965 rx_queue = priv->rx_queue[i]; 2966 work_done_per_q = 2967 gfar_clean_rx_ring(rx_queue, budget_per_q); --- 6 unchanged lines hidden (view full) --- 2974 RSTAT_CLEAR_RXF0 >> i); 2975 num_act_queues--; 2976 2977 if (!num_act_queues) 2978 break; 2979 } 2980 } 2981 | 2943 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2944 /* skip queue if not active */ 2945 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) 2946 continue; 2947 2948 rx_queue = priv->rx_queue[i]; 2949 work_done_per_q = 2950 gfar_clean_rx_ring(rx_queue, budget_per_q); --- 6 unchanged lines hidden (view full) --- 2957 RSTAT_CLEAR_RXF0 >> i); 2958 num_act_queues--; 2959 2960 if (!num_act_queues) 2961 break; 2962 } 2963 } 2964 |
2982 if (!num_act_queues && !has_tx_work) { 2983 | 2965 if (!num_act_queues) { 2966 u32 imask; |
2984 napi_complete(napi); 2985 2986 /* Clear the halt bit in RSTAT */ 2987 gfar_write(®s->rstat, gfargrp->rstat); 2988 | 2967 napi_complete(napi); 2968 2969 /* Clear the halt bit in RSTAT */ 2970 gfar_write(®s->rstat, gfargrp->rstat); 2971 |
2989 gfar_write(®s->imask, IMASK_DEFAULT); 2990 2991 /* If we are coalescing interrupts, update the timer 2992 * Otherwise, clear it 2993 */ 2994 gfar_configure_coalescing(priv, gfargrp->rx_bit_map, 2995 gfargrp->tx_bit_map); | 2972 spin_lock_irq(&gfargrp->grplock); 2973 imask = gfar_read(®s->imask); 2974 imask |= IMASK_RX_DEFAULT; 2975 gfar_write(®s->imask, imask); 2976 spin_unlock_irq(&gfargrp->grplock); |
2996 } 2997 2998 return work_done; 2999} 3000 | 2977 } 2978 2979 return work_done; 2980} 2981 |
2982static int gfar_poll_tx(struct napi_struct *napi, int budget) 2983{ 2984 struct gfar_priv_grp *gfargrp = 2985 container_of(napi, struct gfar_priv_grp, napi_tx); 2986 struct gfar_private *priv = gfargrp->priv; 2987 struct gfar __iomem *regs = gfargrp->regs; 2988 struct gfar_priv_tx_q *tx_queue = NULL; 2989 int has_tx_work = 0; 2990 int i; 2991 2992 /* Clear IEVENT, so interrupts aren't called again 2993 * because of the packets that have already arrived 2994 */ 2995 gfar_write(®s->ievent, IEVENT_TX_MASK); 2996 2997 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { 2998 tx_queue = priv->tx_queue[i]; 2999 /* run Tx cleanup to completion */ 3000 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { 3001 gfar_clean_tx_ring(tx_queue); 3002 has_tx_work = 1; 3003 } 3004 } 3005 3006 if (!has_tx_work) { 3007 u32 imask; 3008 napi_complete(napi); 3009 3010 spin_lock_irq(&gfargrp->grplock); 3011 imask = gfar_read(®s->imask); 3012 imask |= IMASK_TX_DEFAULT; 3013 gfar_write(®s->imask, imask); 3014 spin_unlock_irq(&gfargrp->grplock); 3015 } 3016 3017 return 0; 3018} 3019 3020 |
|
3001#ifdef CONFIG_NET_POLL_CONTROLLER 3002/* Polling 'interrupt' - used by things like netconsole to send skbs 3003 * without having to re-enable interrupts. It's not called while 3004 * the interrupt routine is executing. 3005 */ 3006static void gfar_netpoll(struct net_device *dev) 3007{ 3008 struct gfar_private *priv = netdev_priv(dev); --- 87 unchanged lines hidden (view full) --- 3096 * information through variables in the phydev structure, and this 3097 * function converts those variables into the appropriate 3098 * register values, and can bring down the device if needed. 3099 */ 3100static void adjust_link(struct net_device *dev) 3101{ 3102 struct gfar_private *priv = netdev_priv(dev); 3103 struct gfar __iomem *regs = priv->gfargrp[0].regs; | 3021#ifdef CONFIG_NET_POLL_CONTROLLER 3022/* Polling 'interrupt' - used by things like netconsole to send skbs 3023 * without having to re-enable interrupts. It's not called while 3024 * the interrupt routine is executing. 3025 */ 3026static void gfar_netpoll(struct net_device *dev) 3027{ 3028 struct gfar_private *priv = netdev_priv(dev); --- 87 unchanged lines hidden (view full) --- 3116 * information through variables in the phydev structure, and this 3117 * function converts those variables into the appropriate 3118 * register values, and can bring down the device if needed. 3119 */ 3120static void adjust_link(struct net_device *dev) 3121{ 3122 struct gfar_private *priv = netdev_priv(dev); 3123 struct gfar __iomem *regs = priv->gfargrp[0].regs; |
3104 unsigned long flags; | |
3105 struct phy_device *phydev = priv->phydev; 3106 int new_state = 0; 3107 | 3124 struct phy_device *phydev = priv->phydev; 3125 int new_state = 0; 3126 |
3108 local_irq_save(flags); 3109 lock_tx_qs(priv); | 3127 if (test_bit(GFAR_RESETTING, &priv->state)) 3128 return; |
3110 3111 if (phydev->link) { 3112 u32 tempval1 = gfar_read(®s->maccfg1); 3113 u32 tempval = gfar_read(®s->maccfg2); 3114 u32 ecntrl = gfar_read(®s->ecntrl); 3115 3116 /* Now we make sure that we can be in full duplex mode. 3117 * If not, we operate in half-duplex mode. --- 55 unchanged lines hidden (view full) --- 3173 new_state = 1; 3174 priv->oldlink = 0; 3175 priv->oldspeed = 0; 3176 priv->oldduplex = -1; 3177 } 3178 3179 if (new_state && netif_msg_link(priv)) 3180 phy_print_status(phydev); | 3129 3130 if (phydev->link) { 3131 u32 tempval1 = gfar_read(®s->maccfg1); 3132 u32 tempval = gfar_read(®s->maccfg2); 3133 u32 ecntrl = gfar_read(®s->ecntrl); 3134 3135 /* Now we make sure that we can be in full duplex mode. 3136 * If not, we operate in half-duplex mode. --- 55 unchanged lines hidden (view full) --- 3192 new_state = 1; 3193 priv->oldlink = 0; 3194 priv->oldspeed = 0; 3195 priv->oldduplex = -1; 3196 } 3197 3198 if (new_state && netif_msg_link(priv)) 3199 phy_print_status(phydev); |
3181 unlock_tx_qs(priv); 3182 local_irq_restore(flags); | |
3183} 3184 3185/* Update the hash table based on the current list of multicast 3186 * addresses we subscribe to. Also, change the promiscuity of 3187 * the device based on the flags (this function is called 3188 * whenever dev->flags is changed 3189 */ 3190static void gfar_set_multi(struct net_device *dev) --- 263 unchanged lines hidden --- | 3200} 3201 3202/* Update the hash table based on the current list of multicast 3203 * addresses we subscribe to. Also, change the promiscuity of 3204 * the device based on the flags (this function is called 3205 * whenever dev->flags is changed 3206 */ 3207static void gfar_set_multi(struct net_device *dev) --- 263 unchanged lines hidden --- |