Lines Matching refs:np
672 struct netdev_private *np = netdev_priv(to_net_dev(dev)); in natsemi_show_dspcfg_workaround() local
674 return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off"); in natsemi_show_dspcfg_workaround()
681 struct netdev_private *np = netdev_priv(to_net_dev(dev)); in natsemi_set_dspcfg_workaround() local
694 spin_lock_irqsave(&np->lock, flags); in natsemi_set_dspcfg_workaround()
696 np->dspcfg_workaround = new_setting; in natsemi_set_dspcfg_workaround()
698 spin_unlock_irqrestore(&np->lock, flags); in natsemi_set_dspcfg_workaround()
705 struct netdev_private *np = netdev_priv(dev); in ns_ioaddr() local
707 return np->ioaddr; in ns_ioaddr()
724 struct netdev_private *np = netdev_priv(dev); in move_int_phy() local
740 if (target == np->phy_addr_external) in move_int_phy()
749 struct netdev_private *np = netdev_priv(dev); in natsemi_init_media() local
752 if (np->ignore_phy) in natsemi_init_media()
759 np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10; in natsemi_init_media()
760 np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF; in natsemi_init_media()
761 np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE; in natsemi_init_media()
762 np->advertising= mdio_read(dev, MII_ADVERTISE); in natsemi_init_media()
764 if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL && in natsemi_init_media()
765 netif_msg_probe(np)) { in natsemi_init_media()
768 pci_name(np->pci_dev), in natsemi_init_media()
771 (np->advertising & in natsemi_init_media()
774 (np->advertising & in natsemi_init_media()
778 if (netif_msg_probe(np)) in natsemi_init_media()
781 pci_name(np->pci_dev), mdio_read(dev, MII_BMSR), in natsemi_init_media()
782 np->advertising); in natsemi_init_media()
805 struct netdev_private *np; in natsemi_probe1() local
869 np = netdev_priv(dev); in natsemi_probe1()
870 np->ioaddr = ioaddr; in natsemi_probe1()
872 netif_napi_add(dev, &np->napi, natsemi_poll); in natsemi_probe1()
873 np->dev = dev; in natsemi_probe1()
875 np->pci_dev = pdev; in natsemi_probe1()
877 np->iosize = iosize; in natsemi_probe1()
878 spin_lock_init(&np->lock); in natsemi_probe1()
879 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG; in natsemi_probe1()
880 np->hands_off = 0; in natsemi_probe1()
881 np->intr_status = 0; in natsemi_probe1()
882 np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size; in natsemi_probe1()
884 np->ignore_phy = 1; in natsemi_probe1()
886 np->ignore_phy = 0; in natsemi_probe1()
887 np->dspcfg_workaround = dspcfg_workaround; in natsemi_probe1()
898 if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy) in natsemi_probe1()
907 np->phy_addr_external = find_mii(dev); in natsemi_probe1()
910 if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) { in natsemi_probe1()
912 np->phy_addr_external = PHY_ADDR_INTERNAL; in natsemi_probe1()
915 np->phy_addr_external = PHY_ADDR_INTERNAL; in natsemi_probe1()
922 np->full_duplex = 1; in natsemi_probe1()
926 pci_name(np->pci_dev), option & 15); in natsemi_probe1()
929 np->full_duplex = 1; in natsemi_probe1()
946 np->srr = readl(ioaddr + SiliconRev); in natsemi_probe1()
947 if (netif_msg_hw(np)) in natsemi_probe1()
949 pci_name(np->pci_dev), np->srr); in natsemi_probe1()
958 if (netif_msg_drv(np)) { in natsemi_probe1()
962 (unsigned long long)iostart, pci_name(np->pci_dev), in natsemi_probe1()
966 else if (np->ignore_phy) in natsemi_probe1()
969 printk(", port MII, phy ad %d.\n", np->phy_addr_external); in natsemi_probe1()
1118 struct netdev_private *np = netdev_priv(dev); in mdio_read() local
1128 return miiport_read(dev, np->phy_addr_external, reg); in mdio_read()
1133 struct netdev_private *np = netdev_priv(dev); in mdio_write() local
1140 miiport_write(dev, np->phy_addr_external, reg, data); in mdio_write()
1145 struct netdev_private *np = netdev_priv(dev); in init_phy_fixup() local
1153 if (np->autoneg == AUTONEG_ENABLE) { in init_phy_fixup()
1156 np->advertising != mdio_read(dev, MII_ADVERTISE)) in init_phy_fixup()
1160 mdio_write(dev, MII_ADVERTISE, np->advertising); in init_phy_fixup()
1165 if (np->speed == SPEED_100) in init_phy_fixup()
1167 if (np->duplex == DUPLEX_FULL) in init_phy_fixup()
1182 np->mii = (mdio_read(dev, MII_PHYSID1) << 16) in init_phy_fixup()
1186 switch (np->mii) { in init_phy_fixup()
1221 np->dspcfg = (np->srr <= SRR_DP83815_C)? in init_phy_fixup()
1223 writew(np->dspcfg, ioaddr + DSPCFG); in init_phy_fixup()
1232 if (np->dspcfg == dspcfg) in init_phy_fixup()
1236 if (netif_msg_link(np)) { in init_phy_fixup()
1258 struct netdev_private *np = netdev_priv(dev); in switch_port_external() local
1266 if (netif_msg_link(np)) { in switch_port_external()
1283 move_int_phy(dev, np->phy_addr_external); in switch_port_external()
1291 struct netdev_private *np = netdev_priv(dev); in switch_port_internal() local
1301 if (netif_msg_link(np)) { in switch_port_internal()
1322 if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) { in switch_port_internal()
1343 struct netdev_private *np = netdev_priv(dev); in find_mii() local
1362 np->mii = (mdio_read(dev, MII_PHYSID1) << 16) in find_mii()
1364 if (netif_msg_probe(np)) { in find_mii()
1366 pci_name(np->pci_dev), np->mii, i); in find_mii()
1392 struct netdev_private *np = netdev_priv(dev); in natsemi_reset() local
1430 } else if (netif_msg_hw(np)) { in natsemi_reset()
1464 struct netdev_private *np = netdev_priv(dev); in reset_rx() local
1467 np->intr_status &= ~RxResetDone; in reset_rx()
1472 np->intr_status |= readl(ioaddr + IntrStatus); in reset_rx()
1473 if (np->intr_status & RxResetDone) in reset_rx()
1480 } else if (netif_msg_hw(np)) { in reset_rx()
1488 struct netdev_private *np = netdev_priv(dev); in natsemi_reload_eeprom() local
1500 pci_name(np->pci_dev), i*50); in natsemi_reload_eeprom()
1501 } else if (netif_msg_hw(np)) { in natsemi_reload_eeprom()
1503 pci_name(np->pci_dev), i*50); in natsemi_reload_eeprom()
1510 struct netdev_private *np = netdev_priv(dev); in natsemi_stop_rxtx() local
1522 } else if (netif_msg_hw(np)) { in natsemi_stop_rxtx()
1530 struct netdev_private *np = netdev_priv(dev); in netdev_open() local
1532 const int irq = np->pci_dev->irq; in netdev_open()
1541 if (netif_msg_ifup(np)) in netdev_open()
1549 napi_enable(&np->napi); in netdev_open()
1552 spin_lock_irq(&np->lock); in netdev_open()
1561 writel(np->cur_rx_mode, ioaddr + RxFilterAddr); in netdev_open()
1562 spin_unlock_irq(&np->lock); in netdev_open()
1566 if (netif_msg_ifup(np)) in netdev_open()
1571 timer_setup(&np->timer, netdev_timer, 0); in netdev_open()
1572 np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ); in netdev_open()
1573 add_timer(&np->timer); in netdev_open()
1580 struct netdev_private *np = netdev_priv(dev); in do_cable_magic() local
1586 if (np->srr >= SRR_DP83816_A5) in do_cable_magic()
1609 np = netdev_priv(dev); in do_cable_magic()
1615 np->dspcfg = data | DSPCFG_LOCK; in do_cable_magic()
1616 writew(np->dspcfg, ioaddr + DSPCFG); in do_cable_magic()
1625 struct netdev_private *np = netdev_priv(dev); in undo_cable_magic() local
1631 if (np->srr >= SRR_DP83816_A5) in undo_cable_magic()
1637 np->dspcfg = data & ~DSPCFG_LOCK; in undo_cable_magic()
1638 writew(np->dspcfg, ioaddr + DSPCFG); in undo_cable_magic()
1644 struct netdev_private *np = netdev_priv(dev); in check_link() local
1646 int duplex = np->duplex; in check_link()
1650 if (np->ignore_phy) in check_link()
1662 if (netif_msg_link(np)) in check_link()
1671 if (netif_msg_link(np)) in check_link()
1677 duplex = np->full_duplex; in check_link()
1681 np->advertising & mdio_read(dev, MII_LPA)); in check_link()
1690 if (duplex ^ !!(np->rx_config & RxAcceptTx)) { in check_link()
1691 if (netif_msg_link(np)) in check_link()
1697 np->rx_config |= RxAcceptTx; in check_link()
1698 np->tx_config |= TxCarrierIgn | TxHeartIgn; in check_link()
1700 np->rx_config &= ~RxAcceptTx; in check_link()
1701 np->tx_config &= ~(TxCarrierIgn | TxHeartIgn); in check_link()
1703 writel(np->tx_config, ioaddr + TxConfig); in check_link()
1704 writel(np->rx_config, ioaddr + RxConfig); in check_link()
1710 struct netdev_private *np = netdev_priv(dev); in init_registers() local
1718 writel(np->ring_dma, ioaddr + RxRingPtr); in init_registers()
1719 writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc), in init_registers()
1736 np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 | in init_registers()
1738 writel(np->tx_config, ioaddr + TxConfig); in init_registers()
1743 np->rx_config = RxMxdma_256 | RX_DRTH_VAL; in init_registers()
1745 if (np->rx_buf_sz > NATSEMI_LONGPKT) in init_registers()
1746 np->rx_config |= RxAcceptLong; in init_registers()
1748 writel(np->rx_config, ioaddr + RxConfig); in init_registers()
1756 np->SavedClkRun = readl(ioaddr + ClkRun); in init_registers()
1757 writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun); in init_registers()
1758 if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) { in init_registers()
1789 struct netdev_private *np = from_timer(np, t, timer); in netdev_timer() local
1790 struct net_device *dev = np->dev; in netdev_timer()
1793 const int irq = np->pci_dev->irq; in netdev_timer()
1795 if (netif_msg_timer(np)) { in netdev_timer()
1806 spin_lock_irq(&np->lock); in netdev_timer()
1811 if (np->dspcfg_workaround && dspcfg != np->dspcfg) { in netdev_timer()
1813 spin_unlock_irq(&np->lock); in netdev_timer()
1814 if (netif_msg_drv(np)) in netdev_timer()
1818 spin_lock_irq(&np->lock); in netdev_timer()
1823 spin_unlock_irq(&np->lock); in netdev_timer()
1828 spin_unlock_irq(&np->lock); in netdev_timer()
1833 spin_unlock_irq(&np->lock); in netdev_timer()
1836 spin_lock_irq(&np->lock); in netdev_timer()
1838 spin_unlock_irq(&np->lock); in netdev_timer()
1840 if (np->oom) { in netdev_timer()
1842 np->oom = 0; in netdev_timer()
1845 if (!np->oom) { in netdev_timer()
1853 mod_timer(&np->timer, round_jiffies(jiffies + next_tick)); in netdev_timer()
1855 mod_timer(&np->timer, jiffies + next_tick); in netdev_timer()
1860 struct netdev_private *np = netdev_priv(dev); in dump_ring() local
1862 if (netif_msg_pktdata(np)) { in dump_ring()
1864 printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring); in dump_ring()
1867 i, np->tx_ring[i].next_desc, in dump_ring()
1868 np->tx_ring[i].cmd_status, in dump_ring()
1869 np->tx_ring[i].addr); in dump_ring()
1871 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); in dump_ring()
1874 i, np->rx_ring[i].next_desc, in dump_ring()
1875 np->rx_ring[i].cmd_status, in dump_ring()
1876 np->rx_ring[i].addr); in dump_ring()
1883 struct netdev_private *np = netdev_priv(dev); in ns_tx_timeout() local
1885 const int irq = np->pci_dev->irq; in ns_tx_timeout()
1888 spin_lock_irq(&np->lock); in ns_tx_timeout()
1889 if (!np->hands_off) { in ns_tx_timeout()
1890 if (netif_msg_tx_err(np)) in ns_tx_timeout()
1905 spin_unlock_irq(&np->lock); in ns_tx_timeout()
1915 struct netdev_private *np = netdev_priv(dev); in alloc_ring() local
1916 np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev, in alloc_ring()
1918 &np->ring_dma, GFP_KERNEL); in alloc_ring()
1919 if (!np->rx_ring) in alloc_ring()
1921 np->tx_ring = &np->rx_ring[RX_RING_SIZE]; in alloc_ring()
1927 struct netdev_private *np = netdev_priv(dev); in refill_rx() local
1930 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { in refill_rx()
1932 int entry = np->dirty_rx % RX_RING_SIZE; in refill_rx()
1933 if (np->rx_skbuff[entry] == NULL) { in refill_rx()
1934 unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING; in refill_rx()
1936 np->rx_skbuff[entry] = skb; in refill_rx()
1939 np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev, in refill_rx()
1942 if (dma_mapping_error(&np->pci_dev->dev, np->rx_dma[entry])) { in refill_rx()
1944 np->rx_skbuff[entry] = NULL; in refill_rx()
1947 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); in refill_rx()
1949 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); in refill_rx()
1951 if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) { in refill_rx()
1952 if (netif_msg_rx_err(np)) in refill_rx()
1954 np->oom = 1; in refill_rx()
1960 struct netdev_private *np = netdev_priv(dev); in set_bufsize() local
1962 np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS; in set_bufsize()
1964 np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS; in set_bufsize()
1970 struct netdev_private *np = netdev_priv(dev); in init_ring() local
1974 np->dirty_tx = np->cur_tx = 0; in init_ring()
1976 np->tx_skbuff[i] = NULL; in init_ring()
1977 np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma in init_ring()
1980 np->tx_ring[i].cmd_status = 0; in init_ring()
1984 np->dirty_rx = 0; in init_ring()
1985 np->cur_rx = RX_RING_SIZE; in init_ring()
1986 np->oom = 0; in init_ring()
1989 np->rx_head_desc = &np->rx_ring[0]; in init_ring()
1996 np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma in init_ring()
1999 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); in init_ring()
2000 np->rx_skbuff[i] = NULL; in init_ring()
2008 struct netdev_private *np = netdev_priv(dev); in drain_tx() local
2012 if (np->tx_skbuff[i]) { in drain_tx()
2013 dma_unmap_single(&np->pci_dev->dev, np->tx_dma[i], in drain_tx()
2014 np->tx_skbuff[i]->len, DMA_TO_DEVICE); in drain_tx()
2015 dev_kfree_skb(np->tx_skbuff[i]); in drain_tx()
2018 np->tx_skbuff[i] = NULL; in drain_tx()
2024 struct netdev_private *np = netdev_priv(dev); in drain_rx() local
2025 unsigned int buflen = np->rx_buf_sz; in drain_rx()
2030 np->rx_ring[i].cmd_status = 0; in drain_rx()
2031 np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ in drain_rx()
2032 if (np->rx_skbuff[i]) { in drain_rx()
2033 dma_unmap_single(&np->pci_dev->dev, np->rx_dma[i], in drain_rx()
2036 dev_kfree_skb(np->rx_skbuff[i]); in drain_rx()
2038 np->rx_skbuff[i] = NULL; in drain_rx()
2050 struct netdev_private *np = netdev_priv(dev); in free_ring() local
2051 dma_free_coherent(&np->pci_dev->dev, in free_ring()
2053 np->rx_ring, np->ring_dma); in free_ring()
2058 struct netdev_private *np = netdev_priv(dev); in reinit_rx() local
2062 np->dirty_rx = 0; in reinit_rx()
2063 np->cur_rx = RX_RING_SIZE; in reinit_rx()
2064 np->rx_head_desc = &np->rx_ring[0]; in reinit_rx()
2067 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); in reinit_rx()
2074 struct netdev_private *np = netdev_priv(dev); in reinit_ring() local
2079 np->dirty_tx = np->cur_tx = 0; in reinit_ring()
2081 np->tx_ring[i].cmd_status = 0; in reinit_ring()
2088 struct netdev_private *np = netdev_priv(dev); in start_tx() local
2097 entry = np->cur_tx % TX_RING_SIZE; in start_tx()
2099 np->tx_skbuff[entry] = skb; in start_tx()
2100 np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev, skb->data, in start_tx()
2102 if (dma_mapping_error(&np->pci_dev->dev, np->tx_dma[entry])) { in start_tx()
2103 np->tx_skbuff[entry] = NULL; in start_tx()
2109 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); in start_tx()
2111 spin_lock_irqsave(&np->lock, flags); in start_tx()
2113 if (!np->hands_off) { in start_tx()
2114 np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); in start_tx()
2118 np->cur_tx++; in start_tx()
2119 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) { in start_tx()
2121 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) in start_tx()
2130 spin_unlock_irqrestore(&np->lock, flags); in start_tx()
2132 if (netif_msg_tx_queued(np)) { in start_tx()
2134 dev->name, np->cur_tx, entry); in start_tx()
2141 struct netdev_private *np = netdev_priv(dev); in netdev_tx_done() local
2143 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { in netdev_tx_done()
2144 int entry = np->dirty_tx % TX_RING_SIZE; in netdev_tx_done()
2145 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn)) in netdev_tx_done()
2147 if (netif_msg_tx_done(np)) in netdev_tx_done()
2150 dev->name, np->dirty_tx, in netdev_tx_done()
2151 le32_to_cpu(np->tx_ring[entry].cmd_status)); in netdev_tx_done()
2152 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) { in netdev_tx_done()
2154 dev->stats.tx_bytes += np->tx_skbuff[entry]->len; in netdev_tx_done()
2157 le32_to_cpu(np->tx_ring[entry].cmd_status); in netdev_tx_done()
2168 dma_unmap_single(&np->pci_dev->dev, np->tx_dma[entry], in netdev_tx_done()
2169 np->tx_skbuff[entry]->len, DMA_TO_DEVICE); in netdev_tx_done()
2171 dev_consume_skb_irq(np->tx_skbuff[entry]); in netdev_tx_done()
2172 np->tx_skbuff[entry] = NULL; in netdev_tx_done()
2175 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { in netdev_tx_done()
2186 struct netdev_private *np = netdev_priv(dev); in intr_handler() local
2192 if (np->hands_off || !readl(ioaddr + IntrEnable)) in intr_handler()
2195 np->intr_status = readl(ioaddr + IntrStatus); in intr_handler()
2197 if (!np->intr_status) in intr_handler()
2200 if (netif_msg_intr(np)) in intr_handler()
2203 dev->name, np->intr_status, in intr_handler()
2206 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); in intr_handler()
2208 if (napi_schedule_prep(&np->napi)) { in intr_handler()
2211 __napi_schedule(&np->napi); in intr_handler()
2215 dev->name, np->intr_status, in intr_handler()
2226 struct netdev_private *np = container_of(napi, struct netdev_private, napi); in natsemi_poll() local
2227 struct net_device *dev = np->dev; in natsemi_poll()
2232 if (netif_msg_intr(np)) in natsemi_poll()
2235 dev->name, np->intr_status, in natsemi_poll()
2240 if (np->intr_status & in natsemi_poll()
2246 if (np->intr_status & in natsemi_poll()
2248 spin_lock(&np->lock); in natsemi_poll()
2250 spin_unlock(&np->lock); in natsemi_poll()
2254 if (np->intr_status & IntrAbnormalSummary) in natsemi_poll()
2255 netdev_error(dev, np->intr_status); in natsemi_poll()
2260 np->intr_status = readl(ioaddr + IntrStatus); in natsemi_poll()
2261 } while (np->intr_status); in natsemi_poll()
2267 spin_lock(&np->lock); in natsemi_poll()
2268 if (!np->hands_off) in natsemi_poll()
2270 spin_unlock(&np->lock); in natsemi_poll()
2279 struct netdev_private *np = netdev_priv(dev); in netdev_rx() local
2280 int entry = np->cur_rx % RX_RING_SIZE; in netdev_rx()
2281 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx; in netdev_rx()
2282 s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); in netdev_rx()
2283 unsigned int buflen = np->rx_buf_sz; in netdev_rx()
2289 if (netif_msg_rx_status(np)) in netdev_rx()
2306 if (netif_msg_rx_err(np)) in netdev_rx()
2312 np->cur_rx, desc_status); in netdev_rx()
2320 spin_lock_irqsave(&np->lock, flags); in netdev_rx()
2323 writel(np->ring_dma, ioaddr + RxRingPtr); in netdev_rx()
2325 spin_unlock_irqrestore(&np->lock, flags); in netdev_rx()
2343 } else if (pkt_len > np->rx_buf_sz) { in netdev_rx()
2357 dma_sync_single_for_cpu(&np->pci_dev->dev, in netdev_rx()
2358 np->rx_dma[entry], in netdev_rx()
2362 np->rx_skbuff[entry]->data, pkt_len); in netdev_rx()
2364 dma_sync_single_for_device(&np->pci_dev->dev, in netdev_rx()
2365 np->rx_dma[entry], in netdev_rx()
2369 dma_unmap_single(&np->pci_dev->dev, in netdev_rx()
2370 np->rx_dma[entry], in netdev_rx()
2373 skb_put(skb = np->rx_skbuff[entry], pkt_len); in netdev_rx()
2374 np->rx_skbuff[entry] = NULL; in netdev_rx()
2381 entry = (++np->cur_rx) % RX_RING_SIZE; in netdev_rx()
2382 np->rx_head_desc = &np->rx_ring[entry]; in netdev_rx()
2383 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); in netdev_rx()
2388 if (np->oom) in netdev_rx()
2389 mod_timer(&np->timer, jiffies + 1); in netdev_rx()
2396 struct netdev_private *np = netdev_priv(dev); in netdev_error() local
2399 spin_lock(&np->lock); in netdev_error()
2403 netif_msg_link(np)) { in netdev_error()
2407 np->advertising, lpa); in netdev_error()
2418 if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) { in netdev_error()
2419 np->tx_config += TX_DRTH_VAL_INC; in netdev_error()
2420 if (netif_msg_tx_err(np)) in netdev_error()
2423 dev->name, np->tx_config); in netdev_error()
2425 if (netif_msg_tx_err(np)) in netdev_error()
2428 dev->name, np->tx_config); in netdev_error()
2430 writel(np->tx_config, ioaddr + TxConfig); in netdev_error()
2432 if (intr_status & WOLPkt && netif_msg_wol(np)) { in netdev_error()
2438 if (netif_msg_rx_err(np) && netif_msg_intr(np)) { in netdev_error()
2454 spin_unlock(&np->lock); in netdev_error()
2468 struct netdev_private *np = netdev_priv(dev); in get_stats() local
2471 spin_lock_irq(&np->lock); in get_stats()
2472 if (netif_running(dev) && !np->hands_off) in get_stats()
2474 spin_unlock_irq(&np->lock); in get_stats()
2482 struct netdev_private *np = netdev_priv(dev); in natsemi_poll_controller() local
2483 const int irq = np->pci_dev->irq; in natsemi_poll_controller()
2495 struct netdev_private *np = netdev_priv(dev); in __set_rx_mode() local
2524 np->cur_rx_mode = rx_mode; in __set_rx_mode()
2533 struct netdev_private *np = netdev_priv(dev); in natsemi_change_mtu() local
2535 const int irq = np->pci_dev->irq; in natsemi_change_mtu()
2538 spin_lock(&np->lock); in natsemi_change_mtu()
2546 writel(np->ring_dma, ioaddr + RxRingPtr); in natsemi_change_mtu()
2549 spin_unlock(&np->lock); in natsemi_change_mtu()
2557 struct netdev_private *np = netdev_priv(dev); in set_rx_mode() local
2558 spin_lock_irq(&np->lock); in set_rx_mode()
2559 if (!np->hands_off) in set_rx_mode()
2561 spin_unlock_irq(&np->lock); in set_rx_mode()
2566 struct netdev_private *np = netdev_priv(dev); in get_drvinfo() local
2569 strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in get_drvinfo()
2579 struct netdev_private *np = netdev_priv(dev); in get_eeprom_len() local
2580 return np->eeprom_size; in get_eeprom_len()
2586 struct netdev_private *np = netdev_priv(dev); in get_link_ksettings() local
2587 spin_lock_irq(&np->lock); in get_link_ksettings()
2589 spin_unlock_irq(&np->lock); in get_link_ksettings()
2596 struct netdev_private *np = netdev_priv(dev); in set_link_ksettings() local
2598 spin_lock_irq(&np->lock); in set_link_ksettings()
2600 spin_unlock_irq(&np->lock); in set_link_ksettings()
2606 struct netdev_private *np = netdev_priv(dev); in get_wol() local
2607 spin_lock_irq(&np->lock); in get_wol()
2610 spin_unlock_irq(&np->lock); in get_wol()
2615 struct netdev_private *np = netdev_priv(dev); in set_wol() local
2617 spin_lock_irq(&np->lock); in set_wol()
2620 spin_unlock_irq(&np->lock); in set_wol()
2626 struct netdev_private *np = netdev_priv(dev); in get_regs() local
2628 spin_lock_irq(&np->lock); in get_regs()
2630 spin_unlock_irq(&np->lock); in get_regs()
2635 struct netdev_private *np = netdev_priv(dev); in get_msglevel() local
2636 return np->msg_enable; in get_msglevel()
2641 struct netdev_private *np = netdev_priv(dev); in set_msglevel() local
2642 np->msg_enable = val; in set_msglevel()
2668 struct netdev_private *np = netdev_priv(dev); in get_eeprom() local
2672 eebuf = kmalloc(np->eeprom_size, GFP_KERNEL); in get_eeprom()
2677 spin_lock_irq(&np->lock); in get_eeprom()
2679 spin_unlock_irq(&np->lock); in get_eeprom()
2704 struct netdev_private *np = netdev_priv(dev); in netdev_set_wol() local
2721 if (np->srr >= SRR_DP83815_D) { in netdev_set_wol()
2734 struct netdev_private *np = netdev_priv(dev); in netdev_get_wol() local
2741 if (np->srr >= SRR_DP83815_D) { in netdev_get_wol()
2770 struct netdev_private *np = netdev_priv(dev); in netdev_set_sopass() local
2775 if (np->srr < SRR_DP83815_D) { in netdev_set_sopass()
2802 struct netdev_private *np = netdev_priv(dev); in netdev_get_sopass() local
2807 if (np->srr < SRR_DP83815_D) { in netdev_get_sopass()
2832 struct netdev_private *np = netdev_priv(dev); in netdev_get_ecmd() local
2837 ecmd->base.speed = np->speed; in netdev_get_ecmd()
2838 ecmd->base.duplex = np->duplex; in netdev_get_ecmd()
2839 ecmd->base.autoneg = np->autoneg; in netdev_get_ecmd()
2842 if (np->advertising & ADVERTISE_10HALF) in netdev_get_ecmd()
2844 if (np->advertising & ADVERTISE_10FULL) in netdev_get_ecmd()
2846 if (np->advertising & ADVERTISE_100HALF) in netdev_get_ecmd()
2848 if (np->advertising & ADVERTISE_100FULL) in netdev_get_ecmd()
2854 ecmd->base.phy_address = np->phy_addr_external; in netdev_get_ecmd()
2891 np->advertising & mdio_read(dev, MII_LPA)); in netdev_get_ecmd()
2915 struct netdev_private *np = netdev_priv(dev); in netdev_set_ecmd() local
2948 if (np->ignore_phy && (ecmd->base.autoneg == AUTONEG_ENABLE || in netdev_set_ecmd()
2969 np->autoneg = ecmd->base.autoneg; in netdev_set_ecmd()
2970 np->phy_addr_external = ecmd->base.phy_address & PhyAddrMask; in netdev_set_ecmd()
2971 if (np->autoneg == AUTONEG_ENABLE) { in netdev_set_ecmd()
2973 np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); in netdev_set_ecmd()
2975 np->advertising |= ADVERTISE_10HALF; in netdev_set_ecmd()
2977 np->advertising |= ADVERTISE_10FULL; in netdev_set_ecmd()
2979 np->advertising |= ADVERTISE_100HALF; in netdev_set_ecmd()
2981 np->advertising |= ADVERTISE_100FULL; in netdev_set_ecmd()
2983 np->speed = ecmd->base.speed; in netdev_set_ecmd()
2984 np->duplex = ecmd->base.duplex; in netdev_set_ecmd()
2986 if (np->duplex == DUPLEX_HALF) in netdev_set_ecmd()
2987 np->full_duplex = 0; in netdev_set_ecmd()
3059 struct netdev_private *np = netdev_priv(dev); in netdev_get_eeprom() local
3062 for (i = 0; i < np->eeprom_size/2; i++) { in netdev_get_eeprom()
3075 struct netdev_private *np = netdev_priv(dev); in netdev_ioctl() local
3079 data->phy_id = np->phy_addr_external; in netdev_ioctl()
3088 if ((data->phy_id & 0x1f) == np->phy_addr_external) in netdev_ioctl()
3102 if ((data->phy_id & 0x1f) == np->phy_addr_external) { in netdev_ioctl()
3104 np->advertising = data->val_in; in netdev_ioctl()
3109 if ((data->phy_id & 0x1f) == np->phy_addr_external) { in netdev_ioctl()
3111 np->advertising = data->val_in; in netdev_ioctl()
3127 struct netdev_private *np = netdev_priv(dev); in enable_wol_mode() local
3129 if (netif_msg_wol(np)) in enable_wol_mode()
3143 writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun); in enable_wol_mode()
3160 struct netdev_private *np = netdev_priv(dev); in netdev_close() local
3161 const int irq = np->pci_dev->irq; in netdev_close()
3163 if (netif_msg_ifdown(np)) in netdev_close()
3167 if (netif_msg_pktdata(np)) in netdev_close()
3170 dev->name, np->cur_tx, np->dirty_tx, in netdev_close()
3171 np->cur_rx, np->dirty_rx); in netdev_close()
3173 napi_disable(&np->napi); in netdev_close()
3182 del_timer_sync(&np->timer); in netdev_close()
3184 spin_lock_irq(&np->lock); in netdev_close()
3186 np->hands_off = 1; in netdev_close()
3187 spin_unlock_irq(&np->lock); in netdev_close()
3196 spin_lock_irq(&np->lock); in netdev_close()
3197 np->hands_off = 0; in netdev_close()
3208 spin_unlock_irq(&np->lock); in netdev_close()
3227 writel(np->SavedClkRun, ioaddr + ClkRun); in netdev_close()
3274 struct netdev_private *np = netdev_priv(dev); in natsemi_suspend() local
3279 const int irq = np->pci_dev->irq; in natsemi_suspend()
3281 del_timer_sync(&np->timer); in natsemi_suspend()
3284 spin_lock_irq(&np->lock); in natsemi_suspend()
3287 np->hands_off = 1; in natsemi_suspend()
3291 spin_unlock_irq(&np->lock); in natsemi_suspend()
3294 napi_disable(&np->napi); in natsemi_suspend()
3312 writel(np->SavedClkRun, ioaddr + ClkRun); in natsemi_suspend()
3325 struct netdev_private *np = netdev_priv(dev); in natsemi_resume() local
3331 const int irq = np->pci_dev->irq; in natsemi_resume()
3333 BUG_ON(!np->hands_off); in natsemi_resume()
3336 napi_enable(&np->napi); in natsemi_resume()
3341 spin_lock_irq(&np->lock); in natsemi_resume()
3342 np->hands_off = 0; in natsemi_resume()
3345 spin_unlock_irq(&np->lock); in natsemi_resume()
3348 mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ)); in natsemi_resume()