greth.c (f01387d2693813eb5271a3448e6a082322c7d75d) | greth.c (e1743a16a043f3d6b707730e46ba33ca931fb553) |
---|---|
1/* 2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. 3 * 4 * 2005-2010 (c) Aeroflex Gaisler AB 5 * 6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs 7 * available in the GRLIB VHDL IP core library. 8 * --- 109 unchanged lines hidden (view full) --- 118} 119 120static inline void greth_enable_tx(struct greth_private *greth) 121{ 122 wmb(); 123 GRETH_REGORIN(greth->regs->control, GRETH_TXEN); 124} 125 | 1/* 2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. 3 * 4 * 2005-2010 (c) Aeroflex Gaisler AB 5 * 6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs 7 * available in the GRLIB VHDL IP core library. 8 * --- 109 unchanged lines hidden (view full) --- 118} 119 120static inline void greth_enable_tx(struct greth_private *greth) 121{ 122 wmb(); 123 GRETH_REGORIN(greth->regs->control, GRETH_TXEN); 124} 125 |
126static inline void greth_enable_tx_and_irq(struct greth_private *greth) 127{ 128 wmb(); /* BDs must been written to memory before enabling TX */ 129 GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI); 130} 131 |
|
126static inline void greth_disable_tx(struct greth_private *greth) 127{ 128 GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN); 129} 130 131static inline void greth_enable_rx(struct greth_private *greth) 132{ 133 wmb(); --- 308 unchanged lines hidden (view full) --- 442 greth_enable_tx(greth); 443 spin_unlock_irqrestore(&greth->devlock, flags); 444 445out: 446 dev_kfree_skb(skb); 447 return err; 448} 449 | 132static inline void greth_disable_tx(struct greth_private *greth) 133{ 134 GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN); 135} 136 137static inline void greth_enable_rx(struct greth_private *greth) 138{ 139 wmb(); --- 308 unchanged lines hidden (view full) --- 448 greth_enable_tx(greth); 449 spin_unlock_irqrestore(&greth->devlock, flags); 450 451out: 452 dev_kfree_skb(skb); 453 return err; 454} 455 |
456static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next) 457{ 458 if (tx_next < tx_last) 459 return (tx_last - tx_next) - 1; 460 else 461 return GRETH_TXBD_NUM - (tx_next - tx_last) - 1; 462} |
|
450 451static netdev_tx_t 452greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) 453{ 454 struct greth_private *greth = netdev_priv(dev); 455 struct greth_bd *bdp; | 463 464static netdev_tx_t 465greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) 466{ 467 struct greth_private *greth = netdev_priv(dev); 468 struct greth_bd *bdp; |
456 u32 status = 0, dma_addr, ctrl; | 469 u32 status, dma_addr; |
457 int curr_tx, nr_frags, i, err = NETDEV_TX_OK; 458 unsigned long flags; | 470 int curr_tx, nr_frags, i, err = NETDEV_TX_OK; 471 unsigned long flags; |
472 u16 tx_last; |
|
459 460 nr_frags = skb_shinfo(skb)->nr_frags; | 473 474 nr_frags = skb_shinfo(skb)->nr_frags; |
475 tx_last = greth->tx_last; 476 rmb(); /* tx_last is updated by the poll task */ |
|
461 | 477 |
462 /* Clean TX Ring */ 463 greth_clean_tx_gbit(dev); 464 465 if (greth->tx_free < nr_frags + 1) { 466 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ 467 ctrl = GRETH_REGLOAD(greth->regs->control); 468 /* Enable TX IRQ only if not already in poll() routine */ 469 if (ctrl & GRETH_RXI) 470 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); | 478 if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) { |
471 netif_stop_queue(dev); | 479 netif_stop_queue(dev); |
472 spin_unlock_irqrestore(&greth->devlock, flags); | |
473 err = NETDEV_TX_BUSY; 474 goto out; 475 } 476 477 if (netif_msg_pktdata(greth)) 478 greth_print_tx_packet(skb); 479 480 if (unlikely(skb->len > MAX_FRAME_SIZE)) { 481 dev->stats.tx_errors++; 482 goto out; 483 } 484 485 /* Save skb pointer. */ 486 greth->tx_skbuff[greth->tx_next] = skb; 487 488 /* Linear buf */ 489 if (nr_frags != 0) 490 status = GRETH_TXBD_MORE; | 480 err = NETDEV_TX_BUSY; 481 goto out; 482 } 483 484 if (netif_msg_pktdata(greth)) 485 greth_print_tx_packet(skb); 486 487 if (unlikely(skb->len > MAX_FRAME_SIZE)) { 488 dev->stats.tx_errors++; 489 goto out; 490 } 491 492 /* Save skb pointer. */ 493 greth->tx_skbuff[greth->tx_next] = skb; 494 495 /* Linear buf */ 496 if (nr_frags != 0) 497 status = GRETH_TXBD_MORE; |
498 else 499 status = GRETH_BD_IE; |
|
491 492 if (skb->ip_summed == CHECKSUM_PARTIAL) 493 status |= GRETH_TXBD_CSALL; 494 status |= skb_headlen(skb) & GRETH_BD_LEN; 495 if (greth->tx_next == GRETH_TXBD_NUM_MASK) 496 status |= GRETH_BD_WR; 497 498 --- 41 unchanged lines hidden (view full) --- 540 541 curr_tx = NEXT_TX(curr_tx); 542 } 543 544 wmb(); 545 546 /* Enable the descriptor chain by enabling the first descriptor */ 547 bdp = greth->tx_bd_base + greth->tx_next; | 500 501 if (skb->ip_summed == CHECKSUM_PARTIAL) 502 status |= GRETH_TXBD_CSALL; 503 status |= skb_headlen(skb) & GRETH_BD_LEN; 504 if (greth->tx_next == GRETH_TXBD_NUM_MASK) 505 status |= GRETH_BD_WR; 506 507 --- 41 unchanged lines hidden (view full) --- 549 550 curr_tx = NEXT_TX(curr_tx); 551 } 552 553 wmb(); 554 555 /* Enable the descriptor chain by enabling the first descriptor */ 556 bdp = greth->tx_bd_base + greth->tx_next; |
548 greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); 549 greth->tx_next = curr_tx; 550 greth->tx_free -= nr_frags + 1; | 557 greth_write_bd(&bdp->stat, 558 greth_read_bd(&bdp->stat) | GRETH_BD_EN); |
551 | 559 |
552 wmb(); 553 | |
554 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ | 560 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ |
555 greth_enable_tx(greth); | 561 greth->tx_next = curr_tx; 562 greth_enable_tx_and_irq(greth); |
556 spin_unlock_irqrestore(&greth->devlock, flags); 557 558 return NETDEV_TX_OK; 559 560frag_map_error: 561 /* Unmap SKB mappings that succeeded and disable descriptor */ 562 for (i = 0; greth->tx_next + i != curr_tx; i++) { 563 bdp = greth->tx_bd_base + greth->tx_next + i; --- 79 unchanged lines hidden (view full) --- 643 dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last]; 644 greth->tx_last = NEXT_TX(greth->tx_last); 645 greth->tx_free++; 646 } 647 648 if (greth->tx_free > 0) { 649 netif_wake_queue(dev); 650 } | 563 spin_unlock_irqrestore(&greth->devlock, flags); 564 565 return NETDEV_TX_OK; 566 567frag_map_error: 568 /* Unmap SKB mappings that succeeded and disable descriptor */ 569 for (i = 0; greth->tx_next + i != curr_tx; i++) { 570 bdp = greth->tx_bd_base + greth->tx_next + i; --- 79 unchanged lines hidden (view full) --- 650 dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last]; 651 greth->tx_last = NEXT_TX(greth->tx_last); 652 greth->tx_free++; 653 } 654 655 if (greth->tx_free > 0) { 656 netif_wake_queue(dev); 657 } |
651 | |
652} 653 654static inline void greth_update_tx_stats(struct net_device *dev, u32 stat) 655{ 656 /* Check status for errors */ 657 if (unlikely(stat & GRETH_TXBD_STATUS)) { 658 dev->stats.tx_errors++; 659 if (stat & GRETH_TXBD_ERR_AL) --- 5 unchanged lines hidden (view full) --- 665 } 666 dev->stats.tx_packets++; 667} 668 669static void greth_clean_tx_gbit(struct net_device *dev) 670{ 671 struct greth_private *greth; 672 struct greth_bd *bdp, *bdp_last_frag; | 658} 659 660static inline void greth_update_tx_stats(struct net_device *dev, u32 stat) 661{ 662 /* Check status for errors */ 663 if (unlikely(stat & GRETH_TXBD_STATUS)) { 664 dev->stats.tx_errors++; 665 if (stat & GRETH_TXBD_ERR_AL) --- 5 unchanged lines hidden (view full) --- 671 } 672 dev->stats.tx_packets++; 673} 674 675static void greth_clean_tx_gbit(struct net_device *dev) 676{ 677 struct greth_private *greth; 678 struct greth_bd *bdp, *bdp_last_frag; |
673 struct sk_buff *skb; | 679 struct sk_buff *skb = NULL; |
674 u32 stat; 675 int nr_frags, i; | 680 u32 stat; 681 int nr_frags, i; |
682 u16 tx_last; |
|
676 677 greth = netdev_priv(dev); | 683 684 greth = netdev_priv(dev); |
685 tx_last = greth->tx_last; |
|
678 | 686 |
679 while (greth->tx_free < GRETH_TXBD_NUM) { | 687 while (tx_last != greth->tx_next) { |
680 | 688 |
681 skb = greth->tx_skbuff[greth->tx_last]; | 689 skb = greth->tx_skbuff[tx_last]; |
682 683 nr_frags = skb_shinfo(skb)->nr_frags; 684 685 /* We only clean fully completed SKBs */ | 690 691 nr_frags = skb_shinfo(skb)->nr_frags; 692 693 /* We only clean fully completed SKBs */ |
686 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); | 694 bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags); |
687 688 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); 689 mb(); 690 stat = greth_read_bd(&bdp_last_frag->stat); 691 692 if (stat & GRETH_BD_EN) 693 break; 694 | 695 696 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); 697 mb(); 698 stat = greth_read_bd(&bdp_last_frag->stat); 699 700 if (stat & GRETH_BD_EN) 701 break; 702 |
695 greth->tx_skbuff[greth->tx_last] = NULL; | 703 greth->tx_skbuff[tx_last] = NULL; |
696 697 greth_update_tx_stats(dev, stat); 698 dev->stats.tx_bytes += skb->len; 699 | 704 705 greth_update_tx_stats(dev, stat); 706 dev->stats.tx_bytes += skb->len; 707 |
700 bdp = greth->tx_bd_base + greth->tx_last; | 708 bdp = greth->tx_bd_base + tx_last; |
701 | 709 |
702 greth->tx_last = NEXT_TX(greth->tx_last); | 710 tx_last = NEXT_TX(tx_last); |
703 704 dma_unmap_single(greth->dev, 705 greth_read_bd(&bdp->addr), 706 skb_headlen(skb), 707 DMA_TO_DEVICE); 708 709 for (i = 0; i < nr_frags; i++) { 710 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 711 712 dma_unmap_single(greth->dev, 713 greth_read_bd(&bdp->addr), 714 skb_headlen(skb), 715 DMA_TO_DEVICE); 716 717 for (i = 0; i < nr_frags; i++) { 718 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
711 bdp = greth->tx_bd_base + greth->tx_last; | 719 bdp = greth->tx_bd_base + tx_last; |
712 713 dma_unmap_page(greth->dev, 714 greth_read_bd(&bdp->addr), 715 skb_frag_size(frag), 716 DMA_TO_DEVICE); 717 | 720 721 dma_unmap_page(greth->dev, 722 greth_read_bd(&bdp->addr), 723 skb_frag_size(frag), 724 DMA_TO_DEVICE); 725 |
718 greth->tx_last = NEXT_TX(greth->tx_last); | 726 tx_last = NEXT_TX(tx_last); |
719 } | 727 } |
720 greth->tx_free += nr_frags+1; | |
721 dev_kfree_skb(skb); 722 } | 728 dev_kfree_skb(skb); 729 } |
730 if (skb) { /* skb is set only if the above while loop was entered */ 731 wmb(); 732 greth->tx_last = tx_last; |
|
723 | 733 |
724 if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) 725 netif_wake_queue(dev); | 734 if (netif_queue_stopped(dev) && 735 (greth_num_free_bds(tx_last, greth->tx_next) > 736 (MAX_SKB_FRAGS+1))) 737 netif_wake_queue(dev); 738 } |
726} 727 728static int greth_rx(struct net_device *dev, int limit) 729{ 730 struct greth_private *greth; 731 struct greth_bd *bdp; 732 struct sk_buff *skb; 733 int pkt_len; --- 226 unchanged lines hidden (view full) --- 960{ 961 struct greth_private *greth; 962 int work_done = 0; 963 unsigned long flags; 964 u32 mask, ctrl; 965 greth = container_of(napi, struct greth_private, napi); 966 967restart_txrx_poll: | 739} 740 741static int greth_rx(struct net_device *dev, int limit) 742{ 743 struct greth_private *greth; 744 struct greth_bd *bdp; 745 struct sk_buff *skb; 746 int pkt_len; --- 226 unchanged lines hidden (view full) --- 973{ 974 struct greth_private *greth; 975 int work_done = 0; 976 unsigned long flags; 977 u32 mask, ctrl; 978 greth = container_of(napi, struct greth_private, napi); 979 980restart_txrx_poll: |
968 if (netif_queue_stopped(greth->netdev)) { 969 if (greth->gbit_mac) 970 greth_clean_tx_gbit(greth->netdev); 971 else 972 greth_clean_tx(greth->netdev); 973 } 974 | |
975 if (greth->gbit_mac) { | 981 if (greth->gbit_mac) { |
982 greth_clean_tx_gbit(greth->netdev); |
|
976 work_done += greth_rx_gbit(greth->netdev, budget - work_done); 977 } else { | 983 work_done += greth_rx_gbit(greth->netdev, budget - work_done); 984 } else { |
985 if (netif_queue_stopped(greth->netdev)) 986 greth_clean_tx(greth->netdev); |
|
978 work_done += greth_rx(greth->netdev, budget - work_done); 979 } 980 981 if (work_done < budget) { 982 983 spin_lock_irqsave(&greth->devlock, flags); 984 985 ctrl = GRETH_REGLOAD(greth->regs->control); | 987 work_done += greth_rx(greth->netdev, budget - work_done); 988 } 989 990 if (work_done < budget) { 991 992 spin_lock_irqsave(&greth->devlock, flags); 993 994 ctrl = GRETH_REGLOAD(greth->regs->control); |
986 if (netif_queue_stopped(greth->netdev)) { | 995 if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) || 996 (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) { |
987 GRETH_REGSAVE(greth->regs->control, 988 ctrl | GRETH_TXI | GRETH_RXI); 989 mask = GRETH_INT_RX | GRETH_INT_RE | 990 GRETH_INT_TX | GRETH_INT_TE; 991 } else { 992 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI); 993 mask = GRETH_INT_RX | GRETH_INT_RE; 994 } --- 613 unchanged lines hidden --- | 997 GRETH_REGSAVE(greth->regs->control, 998 ctrl | GRETH_TXI | GRETH_RXI); 999 mask = GRETH_INT_RX | GRETH_INT_RE | 1000 GRETH_INT_TX | GRETH_INT_TE; 1001 } else { 1002 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI); 1003 mask = GRETH_INT_RX | GRETH_INT_RE; 1004 } --- 613 unchanged lines hidden --- |