gianfar.c (aeb12c5ef7cb08d879af22fc0a56cab9e70689ea) gianfar.c (71ff9e3df7e1c5d3293af6b595309124e8c97412)
1/* drivers/net/ethernet/freescale/gianfar.c
2 *
3 * Gianfar Ethernet Driver
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming

--- 349 unchanged lines hidden (view full) ---

358static void gfar_mac_rx_config(struct gfar_private *priv)
359{
360 struct gfar __iomem *regs = priv->gfargrp[0].regs;
361 u32 rctrl = 0;
362
363 if (priv->rx_filer_enable) {
364 rctrl |= RCTRL_FILREN;
365 /* Program the RIR0 reg with the required distribution */
1/* drivers/net/ethernet/freescale/gianfar.c
2 *
3 * Gianfar Ethernet Driver
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming

--- 349 unchanged lines hidden (view full) ---

358static void gfar_mac_rx_config(struct gfar_private *priv)
359{
360 struct gfar __iomem *regs = priv->gfargrp[0].regs;
361 u32 rctrl = 0;
362
363 if (priv->rx_filer_enable) {
364 rctrl |= RCTRL_FILREN;
365 /* Program the RIR0 reg with the required distribution */
366 gfar_write(&regs->rir0, DEFAULT_RIR0);
366 if (priv->poll_mode == GFAR_SQ_POLLING)
367 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
368 else /* GFAR_MQ_POLLING */
369 gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
367 }
368
369 /* Restore PROMISC mode */
370 if (priv->ndev->flags & IFF_PROMISC)
371 rctrl |= RCTRL_PROM;
372
373 if (priv->ndev->features & NETIF_F_RXCSUM)
374 rctrl |= RCTRL_CHECKSUMMING;

--- 256 unchanged lines hidden (view full) ---

631 napi_enable(&priv->gfargrp[i].napi_tx);
632 }
633}
634
635static int gfar_parse_group(struct device_node *np,
636 struct gfar_private *priv, const char *model)
637{
638 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
370 }
371
372 /* Restore PROMISC mode */
373 if (priv->ndev->flags & IFF_PROMISC)
374 rctrl |= RCTRL_PROM;
375
376 if (priv->ndev->features & NETIF_F_RXCSUM)
377 rctrl |= RCTRL_CHECKSUMMING;

--- 256 unchanged lines hidden (view full) ---

634 napi_enable(&priv->gfargrp[i].napi_tx);
635 }
636}
637
638static int gfar_parse_group(struct device_node *np,
639 struct gfar_private *priv, const char *model)
640{
641 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
639 u32 *queue_mask;
640 int i;
641
642 for (i = 0; i < GFAR_NUM_IRQS; i++) {
643 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
644 GFP_KERNEL);
645 if (!grp->irqinfo[i])
646 return -ENOMEM;
647 }

--- 12 unchanged lines hidden (view full) ---

660 gfar_irq(grp, RX)->irq == NO_IRQ ||
661 gfar_irq(grp, ER)->irq == NO_IRQ)
662 return -EINVAL;
663 }
664
665 grp->priv = priv;
666 spin_lock_init(&grp->grplock);
667 if (priv->mode == MQ_MG_MODE) {
642 int i;
643
644 for (i = 0; i < GFAR_NUM_IRQS; i++) {
645 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
646 GFP_KERNEL);
647 if (!grp->irqinfo[i])
648 return -ENOMEM;
649 }

--- 12 unchanged lines hidden (view full) ---

662 gfar_irq(grp, RX)->irq == NO_IRQ ||
663 gfar_irq(grp, ER)->irq == NO_IRQ)
664 return -EINVAL;
665 }
666
667 grp->priv = priv;
668 spin_lock_init(&grp->grplock);
669 if (priv->mode == MQ_MG_MODE) {
668 queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
669 grp->rx_bit_map = queue_mask ?
670 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
671 queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
672 grp->tx_bit_map = queue_mask ?
673 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
670 u32 *rxq_mask, *txq_mask;
671 rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
672 txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
673
674 if (priv->poll_mode == GFAR_SQ_POLLING) {
675 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
676 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
677 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
678 } else { /* GFAR_MQ_POLLING */
679 grp->rx_bit_map = rxq_mask ?
680 *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
681 grp->tx_bit_map = txq_mask ?
682 *txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
683 }
674 } else {
675 grp->rx_bit_map = 0xFF;
676 grp->tx_bit_map = 0xFF;
677 }
678
679 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
680 * right to left, so we need to revert the 8 bits to get the q index
681 */
682 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
683 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
684
685 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
686 * also assign queues to groups
687 */
688 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
684 } else {
685 grp->rx_bit_map = 0xFF;
686 grp->tx_bit_map = 0xFF;
687 }
688
689 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
690 * right to left, so we need to revert the 8 bits to get the q index
691 */
692 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
693 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
694
695 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
696 * also assign queues to groups
697 */
698 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
699 if (!grp->rx_queue)
700 grp->rx_queue = priv->rx_queue[i];
689 grp->num_rx_queues++;
690 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
691 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
692 priv->rx_queue[i]->grp = grp;
693 }
694
695 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
701 grp->num_rx_queues++;
702 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
703 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
704 priv->rx_queue[i]->grp = grp;
705 }
706
707 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
708 if (!grp->tx_queue)
709 grp->tx_queue = priv->tx_queue[i];
696 grp->num_tx_queues++;
697 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
698 priv->tqueue |= (TQUEUE_EN0 >> i);
699 priv->tx_queue[i]->grp = grp;
700 }
701
702 priv->num_grps++;
703

--- 14 unchanged lines hidden (view full) ---

718 const u32 *stash_len;
719 const u32 *stash_idx;
720 unsigned int num_tx_qs, num_rx_qs;
721 u32 *tx_queues, *rx_queues;
722
723 if (!np || !of_device_is_available(np))
724 return -ENODEV;
725
710 grp->num_tx_queues++;
711 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
712 priv->tqueue |= (TQUEUE_EN0 >> i);
713 priv->tx_queue[i]->grp = grp;
714 }
715
716 priv->num_grps++;
717

--- 14 unchanged lines hidden (view full) ---

732 const u32 *stash_len;
733 const u32 *stash_idx;
734 unsigned int num_tx_qs, num_rx_qs;
735 u32 *tx_queues, *rx_queues;
736
737 if (!np || !of_device_is_available(np))
738 return -ENODEV;
739
726 /* parse the num of tx and rx queues */
740 /* parse the num of HW tx and rx queues */
727 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
741 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
728 num_tx_qs = tx_queues ? *tx_queues : 1;
742 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
729
743
744 if (priv->mode == SQ_SG_MODE) {
745 num_tx_qs = 1;
746 num_rx_qs = 1;
747 } else { /* MQ_MG_MODE */
748 if (priv->poll_mode == GFAR_SQ_POLLING) {
749 num_tx_qs = 2; /* one q per int group */
750 num_rx_qs = 2; /* one q per int group */
751 } else { /* GFAR_MQ_POLLING */
752 num_tx_qs = tx_queues ? *tx_queues : 1;
753 num_rx_qs = rx_queues ? *rx_queues : 1;
754 }
755 }
756
730 if (num_tx_qs > MAX_TX_QS) {
731 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
732 num_tx_qs, MAX_TX_QS);
733 pr_err("Cannot do alloc_etherdev, aborting\n");
734 return -EINVAL;
735 }
736
757 if (num_tx_qs > MAX_TX_QS) {
758 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
759 num_tx_qs, MAX_TX_QS);
760 pr_err("Cannot do alloc_etherdev, aborting\n");
761 return -EINVAL;
762 }
763
737 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
738 num_rx_qs = rx_queues ? *rx_queues : 1;
739
740 if (num_rx_qs > MAX_RX_QS) {
741 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
742 num_rx_qs, MAX_RX_QS);
743 pr_err("Cannot do alloc_etherdev, aborting\n");
744 return -EINVAL;
745 }
746
747 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);

--- 24 unchanged lines hidden (view full) ---

772 model = of_get_property(np, "model", NULL);
773
774 for (i = 0; i < MAXGROUPS; i++)
775 priv->gfargrp[i].regs = NULL;
776
777 /* Parse and initialize group specific information */
778 if (of_device_is_compatible(np, "fsl,etsec2")) {
779 priv->mode = MQ_MG_MODE;
764 if (num_rx_qs > MAX_RX_QS) {
765 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
766 num_rx_qs, MAX_RX_QS);
767 pr_err("Cannot do alloc_etherdev, aborting\n");
768 return -EINVAL;
769 }
770
771 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);

--- 24 unchanged lines hidden (view full) ---

796 model = of_get_property(np, "model", NULL);
797
798 for (i = 0; i < MAXGROUPS; i++)
799 priv->gfargrp[i].regs = NULL;
800
801 /* Parse and initialize group specific information */
802 if (of_device_is_compatible(np, "fsl,etsec2")) {
803 priv->mode = MQ_MG_MODE;
804 priv->poll_mode = GFAR_SQ_POLLING;
780 for_each_child_of_node(np, child) {
781 err = gfar_parse_group(child, priv, model);
782 if (err)
783 goto err_grp_init;
784 }
785 } else {
786 priv->mode = SQ_SG_MODE;
805 for_each_child_of_node(np, child) {
806 err = gfar_parse_group(child, priv, model);
807 if (err)
808 goto err_grp_init;
809 }
810 } else {
811 priv->mode = SQ_SG_MODE;
812 priv->poll_mode = GFAR_SQ_POLLING;
787 err = gfar_parse_group(np, priv, model);
788 if (err)
789 goto err_grp_init;
790 }
791
792 stash = of_get_property(np, "bd-stash", NULL);
793
794 if (stash) {

--- 463 unchanged lines hidden (view full) ---

1258
1259 /* Fill in the dev structure */
1260 dev->watchdog_timeo = TX_TIMEOUT;
1261 dev->mtu = 1500;
1262 dev->netdev_ops = &gfar_netdev_ops;
1263 dev->ethtool_ops = &gfar_ethtool_ops;
1264
1265 /* Register for napi ...We are registering NAPI for each grp */
813 err = gfar_parse_group(np, priv, model);
814 if (err)
815 goto err_grp_init;
816 }
817
818 stash = of_get_property(np, "bd-stash", NULL);
819
820 if (stash) {

--- 463 unchanged lines hidden (view full) ---

1284
1285 /* Fill in the dev structure */
1286 dev->watchdog_timeo = TX_TIMEOUT;
1287 dev->mtu = 1500;
1288 dev->netdev_ops = &gfar_netdev_ops;
1289 dev->ethtool_ops = &gfar_ethtool_ops;
1290
1291 /* Register for napi ...We are registering NAPI for each grp */
1266 if (priv->mode == SQ_SG_MODE) {
1267 netif_napi_add(dev, &priv->gfargrp[0].napi_rx, gfar_poll_rx_sq,
1268 GFAR_DEV_WEIGHT);
1269 netif_napi_add(dev, &priv->gfargrp[0].napi_tx, gfar_poll_tx_sq,
1270 2);
1271 } else {
1272 for (i = 0; i < priv->num_grps; i++) {
1292 for (i = 0; i < priv->num_grps; i++) {
1293 if (priv->poll_mode == GFAR_SQ_POLLING) {
1273 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1294 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1295 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1296 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1297 gfar_poll_tx_sq, 2);
1298 } else {
1299 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1274 gfar_poll_rx, GFAR_DEV_WEIGHT);
1275 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1276 gfar_poll_tx, 2);
1277 }
1278 }
1279
1280 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1281 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |

--- 1532 unchanged lines hidden (view full) ---

2814 return howmany;
2815}
2816
2817static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2818{
2819 struct gfar_priv_grp *gfargrp =
2820 container_of(napi, struct gfar_priv_grp, napi_rx);
2821 struct gfar __iomem *regs = gfargrp->regs;
1300 gfar_poll_rx, GFAR_DEV_WEIGHT);
1301 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1302 gfar_poll_tx, 2);
1303 }
1304 }
1305
1306 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1307 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |

--- 1532 unchanged lines hidden (view full) ---

2840 return howmany;
2841}
2842
2843static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2844{
2845 struct gfar_priv_grp *gfargrp =
2846 container_of(napi, struct gfar_priv_grp, napi_rx);
2847 struct gfar __iomem *regs = gfargrp->regs;
2822 struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
2848 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2823 int work_done = 0;
2824
2825 /* Clear IEVENT, so interrupts aren't called again
2826 * because of the packets that have already arrived
2827 */
2828 gfar_write(&regs->ievent, IEVENT_RX_MASK);
2829
2830 work_done = gfar_clean_rx_ring(rx_queue, budget);

--- 14 unchanged lines hidden (view full) ---

2845 return work_done;
2846}
2847
2848static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2849{
2850 struct gfar_priv_grp *gfargrp =
2851 container_of(napi, struct gfar_priv_grp, napi_tx);
2852 struct gfar __iomem *regs = gfargrp->regs;
2849 int work_done = 0;
2850
2851 /* Clear IEVENT, so interrupts aren't called again
2852 * because of the packets that have already arrived
2853 */
2854 gfar_write(&regs->ievent, IEVENT_RX_MASK);
2855
2856 work_done = gfar_clean_rx_ring(rx_queue, budget);

--- 14 unchanged lines hidden (view full) ---

2871 return work_done;
2872}
2873
2874static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2875{
2876 struct gfar_priv_grp *gfargrp =
2877 container_of(napi, struct gfar_priv_grp, napi_tx);
2878 struct gfar __iomem *regs = gfargrp->regs;
2853 struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0];
2879 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2854 u32 imask;
2855
2856 /* Clear IEVENT, so interrupts aren't called again
2857 * because of the packets that have already arrived
2858 */
2859 gfar_write(&regs->ievent, IEVENT_TX_MASK);
2860
2861 /* run Tx cleanup to completion */

--- 565 unchanged lines hidden ---
2880 u32 imask;
2881
2882 /* Clear IEVENT, so interrupts aren't called again
2883 * because of the packets that have already arrived
2884 */
2885 gfar_write(&regs->ievent, IEVENT_TX_MASK);
2886
2887 /* run Tx cleanup to completion */

--- 565 unchanged lines hidden ---