Lines Matching refs:dev

360 static int i596_open(struct net_device *dev);
361 static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
363 static int i596_close(struct net_device *dev);
364 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
365 static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
367 static void set_multicast_list(struct net_device *dev);
374 static inline void CA(struct net_device *dev) in CA() argument
378 ((struct i596_reg *) dev->base_addr)->ca = 1; in CA()
385 i = *(volatile u32 *) (dev->base_addr); in CA()
391 static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x) in MPU_PORT() argument
395 struct i596_reg *p = (struct i596_reg *) (dev->base_addr); in MPU_PORT()
404 *(volatile u32 *) dev->base_addr = v; in MPU_PORT()
406 *(volatile u32 *) dev->base_addr = v; in MPU_PORT()
412 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) in wait_istat() argument
418 dev->name, str, lp->scb.status, lp->scb.command); in wait_istat()
426 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) in wait_cmd() argument
432 dev->name, str, lp->scb.status, lp->scb.command); in wait_cmd()
440 static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str) in wait_cfg() argument
447 printk(KERN_ERR "%s: %s.\n", dev->name, str); in wait_cfg()
455 static void i596_display_data(struct net_device *dev) in i596_display_data() argument
457 struct i596_private *lp = dev->ml_priv; in i596_display_data()
502 struct net_device *dev = dev_id; in i596_error() local
519 printk(KERN_ERR "%s: Error interrupt\n", dev->name); in i596_error()
520 i596_display_data(dev); in i596_error()
525 static inline void remove_rx_bufs(struct net_device *dev) in remove_rx_bufs() argument
527 struct i596_private *lp = dev->ml_priv; in remove_rx_bufs()
539 static inline int init_rx_bufs(struct net_device *dev) in init_rx_bufs() argument
541 struct i596_private *lp = dev->ml_priv; in init_rx_bufs()
549 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ); in init_rx_bufs()
552 remove_rx_bufs(dev); in init_rx_bufs()
595 static void rebuild_rx_bufs(struct net_device *dev) in rebuild_rx_bufs() argument
597 struct i596_private *lp = dev->ml_priv; in rebuild_rx_bufs()
614 static int init_i596_mem(struct net_device *dev) in init_i596_mem() argument
616 struct i596_private *lp = dev->ml_priv; in init_i596_mem()
619 MPU_PORT(dev, PORT_RESET, NULL); in init_i596_mem()
648 MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp)); in init_i596_mem()
677 DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); in init_i596_mem()
679 CA(dev); in init_i596_mem()
681 if (wait_istat(dev,lp,1000,"initialization timed out")) in init_i596_mem()
683 DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name)); in init_i596_mem()
686 rebuild_rx_bufs(dev); in init_i596_mem()
707 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name)); in init_i596_mem()
710 i596_add_cmd(dev, &lp->cf_cmd.cmd); in init_i596_mem()
712 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); in init_i596_mem()
713 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN); in init_i596_mem()
715 i596_add_cmd(dev, &lp->sa_cmd.cmd); in init_i596_mem()
717 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); in init_i596_mem()
719 i596_add_cmd(dev, &lp->tdr_cmd.cmd); in init_i596_mem()
723 if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) { in init_i596_mem()
727 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); in init_i596_mem()
729 CA(dev); in init_i596_mem()
733 if (wait_cmd(dev,lp,1000,"RX_START not processed")) in init_i596_mem()
735 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name)); in init_i596_mem()
739 printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name); in init_i596_mem()
740 MPU_PORT(dev, PORT_RESET, NULL); in init_i596_mem()
744 static inline int i596_rx(struct net_device *dev) in i596_rx() argument
746 struct i596_private *lp = dev->ml_priv; in i596_rx()
762 printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name); in i596_rx()
786 newskb = netdev_alloc_skb(dev, PKT_BUF_SZ); in i596_rx()
801 skb = netdev_alloc_skb(dev, pkt_len + 2); in i596_rx()
806 dev->stats.rx_dropped++; in i596_rx()
814 skb->protocol=eth_type_trans(skb,dev); in i596_rx()
821 dev->stats.rx_packets++; in i596_rx()
822 dev->stats.rx_bytes+=pkt_len; in i596_rx()
827 dev->name, rfd->stat)); in i596_rx()
828 dev->stats.rx_errors++; in i596_rx()
830 dev->stats.collisions++; in i596_rx()
832 dev->stats.rx_length_errors++; in i596_rx()
834 dev->stats.rx_over_errors++; in i596_rx()
836 dev->stats.rx_fifo_errors++; in i596_rx()
838 dev->stats.rx_frame_errors++; in i596_rx()
840 dev->stats.rx_crc_errors++; in i596_rx()
842 dev->stats.rx_length_errors++; in i596_rx()
876 static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) in i596_cleanup_cmd() argument
893 dev->stats.tx_errors++; in i596_cleanup_cmd()
894 dev->stats.tx_aborted_errors++; in i596_cleanup_cmd()
905 wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out"); in i596_cleanup_cmd()
909 static void i596_reset(struct net_device *dev, struct i596_private *lp, in i596_reset() argument
918 wait_cmd(dev,lp,100,"i596_reset timed out"); in i596_reset()
920 netif_stop_queue(dev); in i596_reset()
923 CA(dev); in i596_reset()
926 wait_cmd(dev,lp,1000,"i596_reset 2 timed out"); in i596_reset()
929 i596_cleanup_cmd(dev,lp); in i596_reset()
930 i596_rx(dev); in i596_reset()
932 netif_start_queue(dev); in i596_reset()
933 init_i596_mem(dev); in i596_reset()
936 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) in i596_add_cmd() argument
938 struct i596_private *lp = dev->ml_priv; in i596_add_cmd()
939 int ioaddr = dev->base_addr; in i596_add_cmd()
955 wait_cmd(dev,lp,100,"i596_add_cmd timed out"); in i596_add_cmd()
958 CA(dev); in i596_add_cmd()
971 printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name); in i596_add_cmd()
973 i596_reset(dev, lp, ioaddr); in i596_add_cmd()
977 static int i596_open(struct net_device *dev) in i596_open() argument
981 DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq)); in i596_open()
983 if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { in i596_open()
984 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); in i596_open()
989 if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) { in i596_open()
995 res = init_rx_bufs(dev); in i596_open()
999 netif_start_queue(dev); in i596_open()
1001 if (init_i596_mem(dev)) { in i596_open()
1009 netif_stop_queue(dev); in i596_open()
1010 remove_rx_bufs(dev); in i596_open()
1013 free_irq(0x56, dev); in i596_open()
1016 free_irq(dev->irq, dev); in i596_open()
1021 static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue) in i596_tx_timeout() argument
1023 struct i596_private *lp = dev->ml_priv; in i596_tx_timeout()
1024 int ioaddr = dev->base_addr; in i596_tx_timeout()
1028 dev->name)); in i596_tx_timeout()
1030 dev->stats.tx_errors++; in i596_tx_timeout()
1033 if (lp->last_restart == dev->stats.tx_packets) { in i596_tx_timeout()
1036 i596_reset (dev, lp, ioaddr); in i596_tx_timeout()
1041 CA (dev); in i596_tx_timeout()
1042 lp->last_restart = dev->stats.tx_packets; in i596_tx_timeout()
1045 netif_trans_update(dev); /* prevent tx timeout */ in i596_tx_timeout()
1046 netif_wake_queue (dev); in i596_tx_timeout()
1049 static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) in i596_start_xmit() argument
1051 struct i596_private *lp = dev->ml_priv; in i596_start_xmit()
1057 dev->name, skb->len, skb->data)); in i596_start_xmit()
1064 netif_stop_queue(dev); in i596_start_xmit()
1071 dev->name); in i596_start_xmit()
1072 dev->stats.tx_dropped++; in i596_start_xmit()
1095 i596_add_cmd(dev, &tx_cmd->cmd); in i596_start_xmit()
1097 dev->stats.tx_packets++; in i596_start_xmit()
1098 dev->stats.tx_bytes += length; in i596_start_xmit()
1101 netif_start_queue(dev); in i596_start_xmit()
1124 struct net_device *dev; in i82596_probe() local
1135 dev = alloc_etherdev(0); in i82596_probe()
1136 if (!dev) in i82596_probe()
1147 dev->base_addr = MVME_I596_BASE; in i82596_probe()
1148 dev->irq = (unsigned) MVME16x_IRQ_I596; in i82596_probe()
1162 dev->base_addr = BVME_I596_BASE; in i82596_probe()
1163 dev->irq = (unsigned) BVME_IRQ_I596; in i82596_probe()
1171 dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0); in i82596_probe()
1172 if (!dev->mem_start) { in i82596_probe()
1177 DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr)); in i82596_probe()
1181 eth_hw_addr_set(dev, eth_addr); in i82596_probe()
1183 DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq)); in i82596_probe()
1188 dev->netdev_ops = &i596_netdev_ops; in i82596_probe()
1189 dev->watchdog_timeo = TX_TIMEOUT; in i82596_probe()
1191 dev->ml_priv = (void *)(dev->mem_start); in i82596_probe()
1193 lp = dev->ml_priv; in i82596_probe()
1196 dev->name, (unsigned long)lp, in i82596_probe()
1201 cache_push(virt_to_phys((void *)(dev->mem_start)), 4096); in i82596_probe()
1202 cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096); in i82596_probe()
1203 kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER); in i82596_probe()
1210 err = register_netdev(dev); in i82596_probe()
1213 return dev; in i82596_probe()
1219 kernel_set_cachemode((void *)(dev->mem_start), 4096, in i82596_probe()
1222 free_page ((u32)(dev->mem_start)); in i82596_probe()
1225 free_netdev(dev); in i82596_probe()
1231 struct net_device *dev = dev_id; in i596_interrupt() local
1245 if (dev == NULL) { in i596_interrupt()
1250 ioaddr = dev->base_addr; in i596_interrupt()
1251 lp = dev->ml_priv; in i596_interrupt()
1255 wait_cmd(dev,lp,100,"i596 interrupt, timeout"); in i596_interrupt()
1259 dev->name, irq, status)); in i596_interrupt()
1268 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name)); in i596_interrupt()
1270 …DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status… in i596_interrupt()
1289 dev->stats.tx_errors++; in i596_interrupt()
1291 dev->stats.collisions++; in i596_interrupt()
1293 dev->stats.tx_heartbeat_errors++; in i596_interrupt()
1295 dev->stats.tx_carrier_errors++; in i596_interrupt()
1297 dev->stats.collisions++; in i596_interrupt()
1299 dev->stats.tx_aborted_errors++; in i596_interrupt()
1312 DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name)); in i596_interrupt()
1315 printk(KERN_ERR "%s: Transceiver problem.\n", dev->name); in i596_interrupt()
1317 printk(KERN_ERR "%s: Termination problem.\n", dev->name); in i596_interrupt()
1319 printk(KERN_ERR "%s: Short circuit.\n", dev->name); in i596_interrupt()
1321 DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff)); in i596_interrupt()
1347 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name)); in i596_interrupt()
1348 i596_rx(dev); in i596_interrupt()
1351 if (netif_running(dev)) { in i596_interrupt()
1352 …DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->nam… in i596_interrupt()
1354 dev->stats.rx_errors++; in i596_interrupt()
1355 dev->stats.rx_fifo_errors++; in i596_interrupt()
1356 rebuild_rx_bufs(dev); in i596_interrupt()
1360 wait_cmd(dev,lp,100,"i596 interrupt, timeout"); in i596_interrupt()
1380 CA(dev); in i596_interrupt()
1382 DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); in i596_interrupt()
1388 static int i596_close(struct net_device *dev) in i596_close() argument
1390 struct i596_private *lp = dev->ml_priv; in i596_close()
1393 netif_stop_queue(dev); in i596_close()
1396 dev->name, lp->scb.status)); in i596_close()
1400 wait_cmd(dev,lp,100,"close1 timed out"); in i596_close()
1402 CA(dev); in i596_close()
1404 wait_cmd(dev,lp,100,"close2 timed out"); in i596_close()
1407 DEB(DEB_STRUCT,i596_display_data(dev)); in i596_close()
1408 i596_cleanup_cmd(dev,lp); in i596_close()
1429 free_irq(0x56, dev); in i596_close()
1431 free_irq(dev->irq, dev); in i596_close()
1432 remove_rx_bufs(dev); in i596_close()
1441 static void set_multicast_list(struct net_device *dev) in set_multicast_list() argument
1443 struct i596_private *lp = dev->ml_priv; in set_multicast_list()
1447 dev->name, netdev_mc_count(dev), in set_multicast_list()
1448 dev->flags & IFF_PROMISC ? "ON" : "OFF", in set_multicast_list()
1449 dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); in set_multicast_list()
1451 if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out")) in set_multicast_list()
1454 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) { in set_multicast_list()
1458 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) { in set_multicast_list()
1462 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) { in set_multicast_list()
1466 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) { in set_multicast_list()
1472 i596_add_cmd(dev, &lp->cf_cmd.cmd); in set_multicast_list()
1475 cnt = netdev_mc_count(dev); in set_multicast_list()
1480 dev->name, cnt); in set_multicast_list()
1483 if (!netdev_mc_empty(dev)) { in set_multicast_list()
1488 if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out")) in set_multicast_list()
1494 netdev_for_each_mc_addr(ha, dev) { in set_multicast_list()
1500 dev->name, cp)); in set_multicast_list()
1503 i596_add_cmd(dev, &cmd->cmd); in set_multicast_list()