Lines Matching refs:sp

53 #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \  argument
54 sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
55 sp->tx_old - sp->tx_new - 1)
57 #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \ argument
59 (unsigned long)((sp)->rx_desc)))
115 struct sgiseeq_private *sp = netdev_priv(dev); in dma_sync_desc_cpu() local
117 dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr), in dma_sync_desc_cpu()
123 struct sgiseeq_private *sp = netdev_priv(dev); in dma_sync_desc_dev() local
125 dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr), in dma_sync_desc_dev()
146 static inline void seeq_go(struct sgiseeq_private *sp, in seeq_go() argument
150 sregs->rstat = sp->mode | RSTAT_GO_BITS; in seeq_go()
156 struct sgiseeq_private *sp = netdev_priv(dev); in __sgiseeq_set_mac_address() local
157 struct sgiseeq_regs *sregs = sp->sregs; in __sgiseeq_set_mac_address()
167 struct sgiseeq_private *sp = netdev_priv(dev); in sgiseeq_set_mac_address() local
172 spin_lock_irq(&sp->tx_lock); in sgiseeq_set_mac_address()
174 spin_unlock_irq(&sp->tx_lock); in sgiseeq_set_mac_address()
185 struct sgiseeq_private *sp = netdev_priv(dev); in seeq_init_ring() local
189 sp->rx_new = sp->tx_new = 0; in seeq_init_ring()
190 sp->rx_old = sp->tx_old = 0; in seeq_init_ring()
196 sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; in seeq_init_ring()
197 dma_sync_desc_dev(dev, &sp->tx_desc[i]); in seeq_init_ring()
202 if (!sp->rx_desc[i].skb) { in seeq_init_ring()
212 sp->rx_desc[i].skb = skb; in seeq_init_ring()
213 sp->rx_desc[i].rdma.pbuf = dma_addr; in seeq_init_ring()
215 sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; in seeq_init_ring()
216 dma_sync_desc_dev(dev, &sp->rx_desc[i]); in seeq_init_ring()
218 sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; in seeq_init_ring()
219 dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]); in seeq_init_ring()
225 struct sgiseeq_private *sp = netdev_priv(dev); in seeq_purge_ring() local
230 if (sp->tx_desc[i].skb) { in seeq_purge_ring()
231 dev_kfree_skb(sp->tx_desc[i].skb); in seeq_purge_ring()
232 sp->tx_desc[i].skb = NULL; in seeq_purge_ring()
238 if (sp->rx_desc[i].skb) { in seeq_purge_ring()
239 dev_kfree_skb(sp->rx_desc[i].skb); in seeq_purge_ring()
240 sp->rx_desc[i].skb = NULL; in seeq_purge_ring()
291 static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, in init_seeq() argument
294 struct hpc3_ethregs *hregs = sp->hregs; in init_seeq()
303 if (sp->is_edlc) { in init_seeq()
305 sregs->rw.wregs.control = sp->control; in init_seeq()
311 hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc); in init_seeq()
312 hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc); in init_seeq()
314 seeq_go(sp, hregs, sregs); in init_seeq()
331 static inline void rx_maybe_restart(struct sgiseeq_private *sp, in rx_maybe_restart() argument
336 hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new); in rx_maybe_restart()
337 seeq_go(sp, hregs, sregs); in rx_maybe_restart()
341 static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, in sgiseeq_rx() argument
350 unsigned int orig_end = PREV_RX(sp->rx_new); in sgiseeq_rx()
353 rd = &sp->rx_desc[sp->rx_new]; in sgiseeq_rx()
405 sp->rx_new = NEXT_RX(sp->rx_new); in sgiseeq_rx()
407 rd = &sp->rx_desc[sp->rx_new]; in sgiseeq_rx()
412 dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]); in sgiseeq_rx()
413 sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); in sgiseeq_rx()
414 dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]); in sgiseeq_rx()
415 dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); in sgiseeq_rx()
416 sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; in sgiseeq_rx()
417 dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); in sgiseeq_rx()
418 rx_maybe_restart(sp, hregs, sregs); in sgiseeq_rx()
421 static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp, in tx_maybe_reset_collisions() argument
424 if (sp->is_edlc) { in tx_maybe_reset_collisions()
425 sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT); in tx_maybe_reset_collisions()
426 sregs->rw.wregs.control = sp->control; in tx_maybe_reset_collisions()
431 struct sgiseeq_private *sp, in kick_tx() argument
435 int i = sp->tx_old; in kick_tx()
443 td = &sp->tx_desc[i]; in kick_tx()
448 td = &sp->tx_desc[i]; in kick_tx()
453 hregs->tx_ndptr = VIRT_TO_DMA(sp, td); in kick_tx()
458 static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp, in sgiseeq_tx() argument
466 tx_maybe_reset_collisions(sp, sregs); in sgiseeq_tx()
479 for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { in sgiseeq_tx()
480 td = &sp->tx_desc[j]; in sgiseeq_tx()
488 hregs->tx_ndptr = VIRT_TO_DMA(sp, td); in sgiseeq_tx()
494 sp->tx_old = NEXT_TX(sp->tx_old); in sgiseeq_tx()
508 struct sgiseeq_private *sp = netdev_priv(dev); in sgiseeq_interrupt() local
509 struct hpc3_ethregs *hregs = sp->hregs; in sgiseeq_interrupt()
510 struct sgiseeq_regs *sregs = sp->sregs; in sgiseeq_interrupt()
512 spin_lock(&sp->tx_lock); in sgiseeq_interrupt()
518 sgiseeq_rx(dev, sp, hregs, sregs); in sgiseeq_interrupt()
521 if (sp->tx_old != sp->tx_new) in sgiseeq_interrupt()
522 sgiseeq_tx(dev, sp, hregs, sregs); in sgiseeq_interrupt()
524 if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) { in sgiseeq_interrupt()
527 spin_unlock(&sp->tx_lock); in sgiseeq_interrupt()
534 struct sgiseeq_private *sp = netdev_priv(dev); in sgiseeq_open() local
535 struct sgiseeq_regs *sregs = sp->sregs; in sgiseeq_open()
544 err = init_seeq(dev, sp, sregs); in sgiseeq_open()
560 struct sgiseeq_private *sp = netdev_priv(dev); in sgiseeq_close() local
561 struct sgiseeq_regs *sregs = sp->sregs; in sgiseeq_close()
567 reset_hpc3_and_seeq(sp->hregs, sregs); in sgiseeq_close()
576 struct sgiseeq_private *sp = netdev_priv(dev); in sgiseeq_reset() local
577 struct sgiseeq_regs *sregs = sp->sregs; in sgiseeq_reset()
580 err = init_seeq(dev, sp, sregs); in sgiseeq_reset()
593 struct sgiseeq_private *sp = netdev_priv(dev); in sgiseeq_start_xmit() local
594 struct hpc3_ethregs *hregs = sp->hregs; in sgiseeq_start_xmit()
599 spin_lock_irqsave(&sp->tx_lock, flags); in sgiseeq_start_xmit()
605 spin_unlock_irqrestore(&sp->tx_lock, flags); in sgiseeq_start_xmit()
612 entry = sp->tx_new; in sgiseeq_start_xmit()
613 td = &sp->tx_desc[entry]; in sgiseeq_start_xmit()
635 if (sp->tx_old != sp->tx_new) { in sgiseeq_start_xmit()
638 backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; in sgiseeq_start_xmit()
643 sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ in sgiseeq_start_xmit()
647 kick_tx(dev, sp, hregs); in sgiseeq_start_xmit()
649 if (!TX_BUFFS_AVAIL(sp)) in sgiseeq_start_xmit()
651 spin_unlock_irqrestore(&sp->tx_lock, flags); in sgiseeq_start_xmit()
667 struct sgiseeq_private *sp = netdev_priv(dev); in sgiseeq_set_multicast() local
668 unsigned char oldmode = sp->mode; in sgiseeq_set_multicast()
671 sp->mode = SEEQ_RCMD_RANY; in sgiseeq_set_multicast()
673 sp->mode = SEEQ_RCMD_RBMCAST; in sgiseeq_set_multicast()
675 sp->mode = SEEQ_RCMD_RBCAST; in sgiseeq_set_multicast()
681 if (oldmode != sp->mode) in sgiseeq_set_multicast()
689 struct sgiseeq_private *sp = netdev_priv(dev); in setup_tx_ring() local
693 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); in setup_tx_ring()
698 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf); in setup_tx_ring()
706 struct sgiseeq_private *sp = netdev_priv(dev); in setup_rx_ring() local
710 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); in setup_rx_ring()
716 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf); in setup_rx_ring()
736 struct sgiseeq_private *sp; in sgiseeq_probe() local
748 sp = netdev_priv(dev); in sgiseeq_probe()
751 sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings), in sgiseeq_probe()
752 &sp->srings_dma, DMA_BIDIRECTIONAL, GFP_KERNEL); in sgiseeq_probe()
758 sp->srings = sr; in sgiseeq_probe()
759 sp->rx_desc = sp->srings->rxvector; in sgiseeq_probe()
760 sp->tx_desc = sp->srings->txvector; in sgiseeq_probe()
761 spin_lock_init(&sp->tx_lock); in sgiseeq_probe()
764 setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS); in sgiseeq_probe()
765 setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS); in sgiseeq_probe()
770 gpriv = sp; in sgiseeq_probe()
773 sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0]; in sgiseeq_probe()
774 sp->hregs = &hpcregs->ethregs; in sgiseeq_probe()
775 sp->name = sgiseeqstr; in sgiseeq_probe()
776 sp->mode = SEEQ_RCMD_RBCAST; in sgiseeq_probe()
779 sp->hregs->pconfig = 0x161; in sgiseeq_probe()
780 sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | in sgiseeq_probe()
784 sp->hregs->pconfig = 0x161; in sgiseeq_probe()
785 sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | in sgiseeq_probe()
789 hpc3_eth_reset(sp->hregs); in sgiseeq_probe()
791 sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff); in sgiseeq_probe()
792 if (sp->is_edlc) in sgiseeq_probe()
793 sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT | in sgiseeq_probe()
813 dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, in sgiseeq_probe()
814 sp->srings_dma, DMA_BIDIRECTIONAL); in sgiseeq_probe()
825 struct sgiseeq_private *sp = netdev_priv(dev); in sgiseeq_remove() local
828 dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, in sgiseeq_remove()
829 sp->srings_dma, DMA_BIDIRECTIONAL); in sgiseeq_remove()