19e13fbf7SJeff Kirsher /* 29e13fbf7SJeff Kirsher * sgiseeq.c: Seeq8003 ethernet driver for SGI machines. 39e13fbf7SJeff Kirsher * 49e13fbf7SJeff Kirsher * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 59e13fbf7SJeff Kirsher */ 69e13fbf7SJeff Kirsher 79e13fbf7SJeff Kirsher #undef DEBUG 89e13fbf7SJeff Kirsher 99e13fbf7SJeff Kirsher #include <linux/dma-mapping.h> 109e13fbf7SJeff Kirsher #include <linux/kernel.h> 119e13fbf7SJeff Kirsher #include <linux/module.h> 129e13fbf7SJeff Kirsher #include <linux/slab.h> 139e13fbf7SJeff Kirsher #include <linux/errno.h> 149e13fbf7SJeff Kirsher #include <linux/init.h> 159e13fbf7SJeff Kirsher #include <linux/types.h> 169e13fbf7SJeff Kirsher #include <linux/interrupt.h> 179e13fbf7SJeff Kirsher #include <linux/string.h> 189e13fbf7SJeff Kirsher #include <linux/delay.h> 199e13fbf7SJeff Kirsher #include <linux/netdevice.h> 209e13fbf7SJeff Kirsher #include <linux/platform_device.h> 219e13fbf7SJeff Kirsher #include <linux/etherdevice.h> 229e13fbf7SJeff Kirsher #include <linux/skbuff.h> 239e13fbf7SJeff Kirsher 249e13fbf7SJeff Kirsher #include <asm/sgi/hpc3.h> 259e13fbf7SJeff Kirsher #include <asm/sgi/ip22.h> 269e13fbf7SJeff Kirsher #include <asm/sgi/seeq.h> 279e13fbf7SJeff Kirsher 289e13fbf7SJeff Kirsher #include "sgiseeq.h" 299e13fbf7SJeff Kirsher 309e13fbf7SJeff Kirsher static char *sgiseeqstr = "SGI Seeq8003"; 319e13fbf7SJeff Kirsher 329e13fbf7SJeff Kirsher /* 339e13fbf7SJeff Kirsher * If you want speed, you do something silly, it always has worked for me. So, 349e13fbf7SJeff Kirsher * with that in mind, I've decided to make this driver look completely like a 359e13fbf7SJeff Kirsher * stupid Lance from a driver architecture perspective. Only difference is that 369e13fbf7SJeff Kirsher * here our "ring buffer" looks and acts like a real Lance one does but is 379e13fbf7SJeff Kirsher * laid out like how the HPC DMA and the Seeq want it to. You'd be surprised 389e13fbf7SJeff Kirsher * how a stupid idea like this can pay off in performance, not to mention 399e13fbf7SJeff Kirsher * making this driver 2,000 times easier to write. ;-) 409e13fbf7SJeff Kirsher */ 419e13fbf7SJeff Kirsher 429e13fbf7SJeff Kirsher /* Tune these if we tend to run out often etc. */ 439e13fbf7SJeff Kirsher #define SEEQ_RX_BUFFERS 16 449e13fbf7SJeff Kirsher #define SEEQ_TX_BUFFERS 16 459e13fbf7SJeff Kirsher 469e13fbf7SJeff Kirsher #define PKT_BUF_SZ 1584 479e13fbf7SJeff Kirsher 489e13fbf7SJeff Kirsher #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1)) 499e13fbf7SJeff Kirsher #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1)) 509e13fbf7SJeff Kirsher #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1)) 519e13fbf7SJeff Kirsher #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1)) 529e13fbf7SJeff Kirsher 539e13fbf7SJeff Kirsher #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \ 549e13fbf7SJeff Kirsher sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ 559e13fbf7SJeff Kirsher sp->tx_old - sp->tx_new - 1) 569e13fbf7SJeff Kirsher 579e13fbf7SJeff Kirsher #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \ 589e13fbf7SJeff Kirsher (dma_addr_t)((unsigned long)(v) - \ 599e13fbf7SJeff Kirsher (unsigned long)((sp)->rx_desc))) 609e13fbf7SJeff Kirsher 619e13fbf7SJeff Kirsher /* Copy frames shorter than rx_copybreak, otherwise pass on up in 629e13fbf7SJeff Kirsher * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). 639e13fbf7SJeff Kirsher */ 649e13fbf7SJeff Kirsher static int rx_copybreak = 100; 659e13fbf7SJeff Kirsher 669e13fbf7SJeff Kirsher #define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *)) 679e13fbf7SJeff Kirsher 689e13fbf7SJeff Kirsher struct sgiseeq_rx_desc { 699e13fbf7SJeff Kirsher volatile struct hpc_dma_desc rdma; 709e13fbf7SJeff Kirsher u8 padding[PAD_SIZE]; 719e13fbf7SJeff Kirsher struct sk_buff *skb; 729e13fbf7SJeff Kirsher }; 739e13fbf7SJeff Kirsher 749e13fbf7SJeff Kirsher struct sgiseeq_tx_desc { 759e13fbf7SJeff Kirsher volatile struct hpc_dma_desc tdma; 769e13fbf7SJeff Kirsher u8 padding[PAD_SIZE]; 779e13fbf7SJeff Kirsher struct sk_buff *skb; 789e13fbf7SJeff Kirsher }; 799e13fbf7SJeff Kirsher 809e13fbf7SJeff Kirsher /* 819e13fbf7SJeff Kirsher * Warning: This structure is laid out in a certain way because HPC dma 829e13fbf7SJeff Kirsher * descriptors must be 8-byte aligned. So don't touch this without 839e13fbf7SJeff Kirsher * some care. 849e13fbf7SJeff Kirsher */ 859e13fbf7SJeff Kirsher struct sgiseeq_init_block { /* Note the name ;-) */ 869e13fbf7SJeff Kirsher struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS]; 879e13fbf7SJeff Kirsher struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS]; 889e13fbf7SJeff Kirsher }; 899e13fbf7SJeff Kirsher 909e13fbf7SJeff Kirsher struct sgiseeq_private { 919e13fbf7SJeff Kirsher struct sgiseeq_init_block *srings; 929e13fbf7SJeff Kirsher dma_addr_t srings_dma; 939e13fbf7SJeff Kirsher 949e13fbf7SJeff Kirsher /* Ptrs to the descriptors in uncached space. */ 959e13fbf7SJeff Kirsher struct sgiseeq_rx_desc *rx_desc; 969e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *tx_desc; 979e13fbf7SJeff Kirsher 989e13fbf7SJeff Kirsher char *name; 999e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs; 1009e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs; 1019e13fbf7SJeff Kirsher 1029e13fbf7SJeff Kirsher /* Ring entry counters. */ 1039e13fbf7SJeff Kirsher unsigned int rx_new, tx_new; 1049e13fbf7SJeff Kirsher unsigned int rx_old, tx_old; 1059e13fbf7SJeff Kirsher 1069e13fbf7SJeff Kirsher int is_edlc; 1079e13fbf7SJeff Kirsher unsigned char control; 1089e13fbf7SJeff Kirsher unsigned char mode; 1099e13fbf7SJeff Kirsher 1109e13fbf7SJeff Kirsher spinlock_t tx_lock; 1119e13fbf7SJeff Kirsher }; 1129e13fbf7SJeff Kirsher 1139e13fbf7SJeff Kirsher static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr) 1149e13fbf7SJeff Kirsher { 1159e13fbf7SJeff Kirsher dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), 1169e13fbf7SJeff Kirsher DMA_FROM_DEVICE); 1179e13fbf7SJeff Kirsher } 1189e13fbf7SJeff Kirsher 1199e13fbf7SJeff Kirsher static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) 1209e13fbf7SJeff Kirsher { 1219e13fbf7SJeff Kirsher dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), 1229e13fbf7SJeff Kirsher DMA_TO_DEVICE); 1239e13fbf7SJeff Kirsher } 1249e13fbf7SJeff Kirsher 1259e13fbf7SJeff Kirsher static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) 1269e13fbf7SJeff Kirsher { 1279e13fbf7SJeff Kirsher hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ; 1289e13fbf7SJeff Kirsher udelay(20); 1299e13fbf7SJeff Kirsher hregs->reset = 0; 1309e13fbf7SJeff Kirsher } 1319e13fbf7SJeff Kirsher 1329e13fbf7SJeff Kirsher static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs, 1339e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 1349e13fbf7SJeff Kirsher { 1359e13fbf7SJeff Kirsher hregs->rx_ctrl = hregs->tx_ctrl = 0; 1369e13fbf7SJeff Kirsher hpc3_eth_reset(hregs); 1379e13fbf7SJeff Kirsher } 1389e13fbf7SJeff Kirsher 1399e13fbf7SJeff Kirsher #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \ 1409e13fbf7SJeff Kirsher SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC) 1419e13fbf7SJeff Kirsher 1429e13fbf7SJeff Kirsher static inline void seeq_go(struct sgiseeq_private *sp, 1439e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs, 1449e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 1459e13fbf7SJeff Kirsher { 1469e13fbf7SJeff Kirsher sregs->rstat = sp->mode | RSTAT_GO_BITS; 1479e13fbf7SJeff Kirsher hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE; 1489e13fbf7SJeff Kirsher } 1499e13fbf7SJeff Kirsher 1509e13fbf7SJeff Kirsher static inline void __sgiseeq_set_mac_address(struct net_device *dev) 1519e13fbf7SJeff Kirsher { 1529e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 1539e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs = sp->sregs; 1549e13fbf7SJeff Kirsher int i; 1559e13fbf7SJeff Kirsher 1569e13fbf7SJeff Kirsher sregs->tstat = SEEQ_TCMD_RB0; 1579e13fbf7SJeff Kirsher for (i = 0; i < 6; i++) 1589e13fbf7SJeff Kirsher sregs->rw.eth_addr[i] = dev->dev_addr[i]; 1599e13fbf7SJeff Kirsher } 1609e13fbf7SJeff Kirsher 1619e13fbf7SJeff Kirsher static int sgiseeq_set_mac_address(struct net_device *dev, void *addr) 1629e13fbf7SJeff Kirsher { 1639e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 1649e13fbf7SJeff Kirsher struct sockaddr *sa = addr; 1659e13fbf7SJeff Kirsher 1669e13fbf7SJeff Kirsher memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); 1679e13fbf7SJeff Kirsher 1689e13fbf7SJeff Kirsher spin_lock_irq(&sp->tx_lock); 1699e13fbf7SJeff Kirsher __sgiseeq_set_mac_address(dev); 1709e13fbf7SJeff Kirsher spin_unlock_irq(&sp->tx_lock); 1719e13fbf7SJeff Kirsher 1729e13fbf7SJeff Kirsher return 0; 1739e13fbf7SJeff Kirsher } 1749e13fbf7SJeff Kirsher 1759e13fbf7SJeff Kirsher #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD) 1769e13fbf7SJeff Kirsher #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE) 1779e13fbf7SJeff Kirsher #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT)) 1789e13fbf7SJeff Kirsher 1799e13fbf7SJeff Kirsher static int seeq_init_ring(struct net_device *dev) 1809e13fbf7SJeff Kirsher { 1819e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 1829e13fbf7SJeff Kirsher int i; 1839e13fbf7SJeff Kirsher 1849e13fbf7SJeff Kirsher netif_stop_queue(dev); 1859e13fbf7SJeff Kirsher sp->rx_new = sp->tx_new = 0; 1869e13fbf7SJeff Kirsher sp->rx_old = sp->tx_old = 0; 1879e13fbf7SJeff Kirsher 1889e13fbf7SJeff Kirsher __sgiseeq_set_mac_address(dev); 1899e13fbf7SJeff Kirsher 1909e13fbf7SJeff Kirsher /* Setup tx ring. */ 1919e13fbf7SJeff Kirsher for(i = 0; i < SEEQ_TX_BUFFERS; i++) { 1929e13fbf7SJeff Kirsher sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; 1939e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &sp->tx_desc[i]); 1949e13fbf7SJeff Kirsher } 1959e13fbf7SJeff Kirsher 1969e13fbf7SJeff Kirsher /* And now the rx ring. */ 1979e13fbf7SJeff Kirsher for (i = 0; i < SEEQ_RX_BUFFERS; i++) { 1989e13fbf7SJeff Kirsher if (!sp->rx_desc[i].skb) { 1999e13fbf7SJeff Kirsher dma_addr_t dma_addr; 2009e13fbf7SJeff Kirsher struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ); 2019e13fbf7SJeff Kirsher 2029e13fbf7SJeff Kirsher if (skb == NULL) 2039e13fbf7SJeff Kirsher return -ENOMEM; 2049e13fbf7SJeff Kirsher skb_reserve(skb, 2); 2059e13fbf7SJeff Kirsher dma_addr = dma_map_single(dev->dev.parent, 2069e13fbf7SJeff Kirsher skb->data - 2, 2079e13fbf7SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE); 2089e13fbf7SJeff Kirsher sp->rx_desc[i].skb = skb; 2099e13fbf7SJeff Kirsher sp->rx_desc[i].rdma.pbuf = dma_addr; 2109e13fbf7SJeff Kirsher } 2119e13fbf7SJeff Kirsher sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; 2129e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &sp->rx_desc[i]); 2139e13fbf7SJeff Kirsher } 2149e13fbf7SJeff Kirsher sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; 2159e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]); 2169e13fbf7SJeff Kirsher return 0; 2179e13fbf7SJeff Kirsher } 2189e13fbf7SJeff Kirsher 2199e13fbf7SJeff Kirsher static void seeq_purge_ring(struct net_device *dev) 2209e13fbf7SJeff Kirsher { 2219e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 2229e13fbf7SJeff Kirsher int i; 2239e13fbf7SJeff Kirsher 2249e13fbf7SJeff Kirsher /* clear tx ring. */ 2259e13fbf7SJeff Kirsher for (i = 0; i < SEEQ_TX_BUFFERS; i++) { 2269e13fbf7SJeff Kirsher if (sp->tx_desc[i].skb) { 2279e13fbf7SJeff Kirsher dev_kfree_skb(sp->tx_desc[i].skb); 2289e13fbf7SJeff Kirsher sp->tx_desc[i].skb = NULL; 2299e13fbf7SJeff Kirsher } 2309e13fbf7SJeff Kirsher } 2319e13fbf7SJeff Kirsher 2329e13fbf7SJeff Kirsher /* And now the rx ring. */ 2339e13fbf7SJeff Kirsher for (i = 0; i < SEEQ_RX_BUFFERS; i++) { 2349e13fbf7SJeff Kirsher if (sp->rx_desc[i].skb) { 2359e13fbf7SJeff Kirsher dev_kfree_skb(sp->rx_desc[i].skb); 2369e13fbf7SJeff Kirsher sp->rx_desc[i].skb = NULL; 2379e13fbf7SJeff Kirsher } 2389e13fbf7SJeff Kirsher } 2399e13fbf7SJeff Kirsher } 2409e13fbf7SJeff Kirsher 2419e13fbf7SJeff Kirsher #ifdef DEBUG 2429e13fbf7SJeff Kirsher static struct sgiseeq_private *gpriv; 2439e13fbf7SJeff Kirsher static struct net_device *gdev; 2449e13fbf7SJeff Kirsher 2459e13fbf7SJeff Kirsher static void sgiseeq_dump_rings(void) 2469e13fbf7SJeff Kirsher { 2479e13fbf7SJeff Kirsher static int once; 2489e13fbf7SJeff Kirsher struct sgiseeq_rx_desc *r = gpriv->rx_desc; 2499e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *t = gpriv->tx_desc; 2509e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs = gpriv->hregs; 2519e13fbf7SJeff Kirsher int i; 2529e13fbf7SJeff Kirsher 2539e13fbf7SJeff Kirsher if (once) 2549e13fbf7SJeff Kirsher return; 2559e13fbf7SJeff Kirsher once++; 2569e13fbf7SJeff Kirsher printk("RING DUMP:\n"); 2579e13fbf7SJeff Kirsher for (i = 0; i < SEEQ_RX_BUFFERS; i++) { 2589e13fbf7SJeff Kirsher printk("RX [%d]: @(%p) [%08x,%08x,%08x] ", 2599e13fbf7SJeff Kirsher i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, 2609e13fbf7SJeff Kirsher r[i].rdma.pnext); 2619e13fbf7SJeff Kirsher i += 1; 2629e13fbf7SJeff Kirsher printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", 2639e13fbf7SJeff Kirsher i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, 2649e13fbf7SJeff Kirsher r[i].rdma.pnext); 2659e13fbf7SJeff Kirsher } 2669e13fbf7SJeff Kirsher for (i = 0; i < SEEQ_TX_BUFFERS; i++) { 2679e13fbf7SJeff Kirsher printk("TX [%d]: @(%p) [%08x,%08x,%08x] ", 2689e13fbf7SJeff Kirsher i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, 2699e13fbf7SJeff Kirsher t[i].tdma.pnext); 2709e13fbf7SJeff Kirsher i += 1; 2719e13fbf7SJeff Kirsher printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", 2729e13fbf7SJeff Kirsher i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, 2739e13fbf7SJeff Kirsher t[i].tdma.pnext); 2749e13fbf7SJeff Kirsher } 2759e13fbf7SJeff Kirsher printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n", 2769e13fbf7SJeff Kirsher gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old); 2779e13fbf7SJeff Kirsher printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n", 2789e13fbf7SJeff Kirsher hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl); 2799e13fbf7SJeff Kirsher printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n", 2809e13fbf7SJeff Kirsher hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl); 2819e13fbf7SJeff Kirsher } 2829e13fbf7SJeff Kirsher #endif 2839e13fbf7SJeff Kirsher 2849e13fbf7SJeff Kirsher #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF) 2859e13fbf7SJeff Kirsher #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2) 2869e13fbf7SJeff Kirsher 2879e13fbf7SJeff Kirsher static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, 2889e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 2899e13fbf7SJeff Kirsher { 2909e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs = sp->hregs; 2919e13fbf7SJeff Kirsher int err; 2929e13fbf7SJeff Kirsher 2939e13fbf7SJeff Kirsher reset_hpc3_and_seeq(hregs, sregs); 2949e13fbf7SJeff Kirsher err = seeq_init_ring(dev); 2959e13fbf7SJeff Kirsher if (err) 2969e13fbf7SJeff Kirsher return err; 2979e13fbf7SJeff Kirsher 2989e13fbf7SJeff Kirsher /* Setup to field the proper interrupt types. */ 2999e13fbf7SJeff Kirsher if (sp->is_edlc) { 3009e13fbf7SJeff Kirsher sregs->tstat = TSTAT_INIT_EDLC; 3019e13fbf7SJeff Kirsher sregs->rw.wregs.control = sp->control; 3029e13fbf7SJeff Kirsher sregs->rw.wregs.frame_gap = 0; 3039e13fbf7SJeff Kirsher } else { 3049e13fbf7SJeff Kirsher sregs->tstat = TSTAT_INIT_SEEQ; 3059e13fbf7SJeff Kirsher } 3069e13fbf7SJeff Kirsher 3079e13fbf7SJeff Kirsher hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc); 3089e13fbf7SJeff Kirsher hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc); 3099e13fbf7SJeff Kirsher 3109e13fbf7SJeff Kirsher seeq_go(sp, hregs, sregs); 3119e13fbf7SJeff Kirsher return 0; 3129e13fbf7SJeff Kirsher } 3139e13fbf7SJeff Kirsher 3149e13fbf7SJeff Kirsher static void record_rx_errors(struct net_device *dev, unsigned char status) 3159e13fbf7SJeff Kirsher { 3169e13fbf7SJeff Kirsher if (status & SEEQ_RSTAT_OVERF || 3179e13fbf7SJeff Kirsher status & SEEQ_RSTAT_SFRAME) 3189e13fbf7SJeff Kirsher dev->stats.rx_over_errors++; 3199e13fbf7SJeff Kirsher if (status & SEEQ_RSTAT_CERROR) 3209e13fbf7SJeff Kirsher dev->stats.rx_crc_errors++; 3219e13fbf7SJeff Kirsher if (status & SEEQ_RSTAT_DERROR) 3229e13fbf7SJeff Kirsher dev->stats.rx_frame_errors++; 3239e13fbf7SJeff Kirsher if (status & SEEQ_RSTAT_REOF) 3249e13fbf7SJeff Kirsher dev->stats.rx_errors++; 3259e13fbf7SJeff Kirsher } 3269e13fbf7SJeff Kirsher 3279e13fbf7SJeff Kirsher static inline void rx_maybe_restart(struct sgiseeq_private *sp, 3289e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs, 3299e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 3309e13fbf7SJeff Kirsher { 3319e13fbf7SJeff Kirsher if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) { 3329e13fbf7SJeff Kirsher hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new); 3339e13fbf7SJeff Kirsher seeq_go(sp, hregs, sregs); 3349e13fbf7SJeff Kirsher } 3359e13fbf7SJeff Kirsher } 3369e13fbf7SJeff Kirsher 3379e13fbf7SJeff Kirsher static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, 3389e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs, 3399e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 3409e13fbf7SJeff Kirsher { 3419e13fbf7SJeff Kirsher struct sgiseeq_rx_desc *rd; 3429e13fbf7SJeff Kirsher struct sk_buff *skb = NULL; 3439e13fbf7SJeff Kirsher struct sk_buff *newskb; 3449e13fbf7SJeff Kirsher unsigned char pkt_status; 3459e13fbf7SJeff Kirsher int len = 0; 3469e13fbf7SJeff Kirsher unsigned int orig_end = PREV_RX(sp->rx_new); 3479e13fbf7SJeff Kirsher 3489e13fbf7SJeff Kirsher /* Service every received packet. */ 3499e13fbf7SJeff Kirsher rd = &sp->rx_desc[sp->rx_new]; 3509e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, rd); 3519e13fbf7SJeff Kirsher while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { 3529e13fbf7SJeff Kirsher len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; 3539e13fbf7SJeff Kirsher dma_unmap_single(dev->dev.parent, rd->rdma.pbuf, 3549e13fbf7SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE); 3559e13fbf7SJeff Kirsher pkt_status = rd->skb->data[len]; 3569e13fbf7SJeff Kirsher if (pkt_status & SEEQ_RSTAT_FIG) { 3579e13fbf7SJeff Kirsher /* Packet is OK. */ 3589e13fbf7SJeff Kirsher /* We don't want to receive our own packets */ 3599e13fbf7SJeff Kirsher if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) { 3609e13fbf7SJeff Kirsher if (len > rx_copybreak) { 3619e13fbf7SJeff Kirsher skb = rd->skb; 3629e13fbf7SJeff Kirsher newskb = netdev_alloc_skb(dev, PKT_BUF_SZ); 3639e13fbf7SJeff Kirsher if (!newskb) { 3649e13fbf7SJeff Kirsher newskb = skb; 3659e13fbf7SJeff Kirsher skb = NULL; 3669e13fbf7SJeff Kirsher goto memory_squeeze; 3679e13fbf7SJeff Kirsher } 3689e13fbf7SJeff Kirsher skb_reserve(newskb, 2); 3699e13fbf7SJeff Kirsher } else { 3709e13fbf7SJeff Kirsher skb = netdev_alloc_skb_ip_align(dev, len); 3719e13fbf7SJeff Kirsher if (skb) 3729e13fbf7SJeff Kirsher skb_copy_to_linear_data(skb, rd->skb->data, len); 3739e13fbf7SJeff Kirsher 3749e13fbf7SJeff Kirsher newskb = rd->skb; 3759e13fbf7SJeff Kirsher } 3769e13fbf7SJeff Kirsher memory_squeeze: 3779e13fbf7SJeff Kirsher if (skb) { 3789e13fbf7SJeff Kirsher skb_put(skb, len); 3799e13fbf7SJeff Kirsher skb->protocol = eth_type_trans(skb, dev); 3809e13fbf7SJeff Kirsher netif_rx(skb); 3819e13fbf7SJeff Kirsher dev->stats.rx_packets++; 3829e13fbf7SJeff Kirsher dev->stats.rx_bytes += len; 3839e13fbf7SJeff Kirsher } else { 3849e13fbf7SJeff Kirsher printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", 3859e13fbf7SJeff Kirsher dev->name); 3869e13fbf7SJeff Kirsher dev->stats.rx_dropped++; 3879e13fbf7SJeff Kirsher } 3889e13fbf7SJeff Kirsher } else { 3899e13fbf7SJeff Kirsher /* Silently drop my own packets */ 3909e13fbf7SJeff Kirsher newskb = rd->skb; 3919e13fbf7SJeff Kirsher } 3929e13fbf7SJeff Kirsher } else { 3939e13fbf7SJeff Kirsher record_rx_errors(dev, pkt_status); 3949e13fbf7SJeff Kirsher newskb = rd->skb; 3959e13fbf7SJeff Kirsher } 3969e13fbf7SJeff Kirsher rd->skb = newskb; 3979e13fbf7SJeff Kirsher rd->rdma.pbuf = dma_map_single(dev->dev.parent, 3989e13fbf7SJeff Kirsher newskb->data - 2, 3999e13fbf7SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE); 4009e13fbf7SJeff Kirsher 4019e13fbf7SJeff Kirsher /* Return the entry to the ring pool. */ 4029e13fbf7SJeff Kirsher rd->rdma.cntinfo = RCNTINFO_INIT; 4039e13fbf7SJeff Kirsher sp->rx_new = NEXT_RX(sp->rx_new); 4049e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, rd); 4059e13fbf7SJeff Kirsher rd = &sp->rx_desc[sp->rx_new]; 4069e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, rd); 4079e13fbf7SJeff Kirsher } 4089e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]); 4099e13fbf7SJeff Kirsher sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); 4109e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]); 4119e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); 4129e13fbf7SJeff Kirsher sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; 4139e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); 4149e13fbf7SJeff Kirsher rx_maybe_restart(sp, hregs, sregs); 4159e13fbf7SJeff Kirsher } 4169e13fbf7SJeff Kirsher 4179e13fbf7SJeff Kirsher static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp, 4189e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 4199e13fbf7SJeff Kirsher { 4209e13fbf7SJeff Kirsher if (sp->is_edlc) { 4219e13fbf7SJeff Kirsher sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT); 4229e13fbf7SJeff Kirsher sregs->rw.wregs.control = sp->control; 4239e13fbf7SJeff Kirsher } 4249e13fbf7SJeff Kirsher } 4259e13fbf7SJeff Kirsher 4269e13fbf7SJeff Kirsher static inline void kick_tx(struct net_device *dev, 4279e13fbf7SJeff Kirsher struct sgiseeq_private *sp, 4289e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs) 4299e13fbf7SJeff Kirsher { 4309e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *td; 4319e13fbf7SJeff Kirsher int i = sp->tx_old; 4329e13fbf7SJeff Kirsher 4339e13fbf7SJeff Kirsher /* If the HPC aint doin nothin, and there are more packets 4349e13fbf7SJeff Kirsher * with ETXD cleared and XIU set we must make very certain 4359e13fbf7SJeff Kirsher * that we restart the HPC else we risk locking up the 4369e13fbf7SJeff Kirsher * adapter. The following code is only safe iff the HPCDMA 4379e13fbf7SJeff Kirsher * is not active! 4389e13fbf7SJeff Kirsher */ 4399e13fbf7SJeff Kirsher td = &sp->tx_desc[i]; 4409e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, td); 4419e13fbf7SJeff Kirsher while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) == 4429e13fbf7SJeff Kirsher (HPCDMA_XIU | HPCDMA_ETXD)) { 4439e13fbf7SJeff Kirsher i = NEXT_TX(i); 4449e13fbf7SJeff Kirsher td = &sp->tx_desc[i]; 4459e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, td); 4469e13fbf7SJeff Kirsher } 4479e13fbf7SJeff Kirsher if (td->tdma.cntinfo & HPCDMA_XIU) { 4489e13fbf7SJeff Kirsher hregs->tx_ndptr = VIRT_TO_DMA(sp, td); 4499e13fbf7SJeff Kirsher hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; 4509e13fbf7SJeff Kirsher } 4519e13fbf7SJeff Kirsher } 4529e13fbf7SJeff Kirsher 4539e13fbf7SJeff Kirsher static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp, 4549e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs, 4559e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 4569e13fbf7SJeff Kirsher { 4579e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *td; 4589e13fbf7SJeff Kirsher unsigned long status = hregs->tx_ctrl; 4599e13fbf7SJeff Kirsher int j; 4609e13fbf7SJeff Kirsher 4619e13fbf7SJeff Kirsher tx_maybe_reset_collisions(sp, sregs); 4629e13fbf7SJeff Kirsher 4639e13fbf7SJeff Kirsher if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { 4649e13fbf7SJeff Kirsher /* Oops, HPC detected some sort of error. */ 4659e13fbf7SJeff Kirsher if (status & SEEQ_TSTAT_R16) 4669e13fbf7SJeff Kirsher dev->stats.tx_aborted_errors++; 4679e13fbf7SJeff Kirsher if (status & SEEQ_TSTAT_UFLOW) 4689e13fbf7SJeff Kirsher dev->stats.tx_fifo_errors++; 4699e13fbf7SJeff Kirsher if (status & SEEQ_TSTAT_LCLS) 4709e13fbf7SJeff Kirsher dev->stats.collisions++; 4719e13fbf7SJeff Kirsher } 4729e13fbf7SJeff Kirsher 4739e13fbf7SJeff Kirsher /* Ack 'em... */ 4749e13fbf7SJeff Kirsher for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { 4759e13fbf7SJeff Kirsher td = &sp->tx_desc[j]; 4769e13fbf7SJeff Kirsher 4779e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, td); 4789e13fbf7SJeff Kirsher if (!(td->tdma.cntinfo & (HPCDMA_XIU))) 4799e13fbf7SJeff Kirsher break; 4809e13fbf7SJeff Kirsher if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) { 4819e13fbf7SJeff Kirsher if (!(status & HPC3_ETXCTRL_ACTIVE)) { 4829e13fbf7SJeff Kirsher hregs->tx_ndptr = VIRT_TO_DMA(sp, td); 4839e13fbf7SJeff Kirsher hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; 4849e13fbf7SJeff Kirsher } 4859e13fbf7SJeff Kirsher break; 4869e13fbf7SJeff Kirsher } 4879e13fbf7SJeff Kirsher dev->stats.tx_packets++; 4889e13fbf7SJeff Kirsher sp->tx_old = NEXT_TX(sp->tx_old); 4899e13fbf7SJeff Kirsher td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); 4909e13fbf7SJeff Kirsher td->tdma.cntinfo |= HPCDMA_EOX; 4919e13fbf7SJeff Kirsher if (td->skb) { 4929e13fbf7SJeff Kirsher dev_kfree_skb_any(td->skb); 4939e13fbf7SJeff Kirsher td->skb = NULL; 4949e13fbf7SJeff Kirsher } 4959e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, td); 4969e13fbf7SJeff Kirsher } 4979e13fbf7SJeff Kirsher } 4989e13fbf7SJeff Kirsher 4999e13fbf7SJeff Kirsher static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id) 5009e13fbf7SJeff Kirsher { 5019e13fbf7SJeff Kirsher struct net_device *dev = (struct net_device *) dev_id; 5029e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 5039e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs = sp->hregs; 5049e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs = sp->sregs; 5059e13fbf7SJeff Kirsher 5069e13fbf7SJeff Kirsher spin_lock(&sp->tx_lock); 5079e13fbf7SJeff Kirsher 5089e13fbf7SJeff Kirsher /* Ack the IRQ and set software state. */ 5099e13fbf7SJeff Kirsher hregs->reset = HPC3_ERST_CLRIRQ; 5109e13fbf7SJeff Kirsher 5119e13fbf7SJeff Kirsher /* Always check for received packets. */ 5129e13fbf7SJeff Kirsher sgiseeq_rx(dev, sp, hregs, sregs); 5139e13fbf7SJeff Kirsher 5149e13fbf7SJeff Kirsher /* Only check for tx acks if we have something queued. */ 5159e13fbf7SJeff Kirsher if (sp->tx_old != sp->tx_new) 5169e13fbf7SJeff Kirsher sgiseeq_tx(dev, sp, hregs, sregs); 5179e13fbf7SJeff Kirsher 5189e13fbf7SJeff Kirsher if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) { 5199e13fbf7SJeff Kirsher netif_wake_queue(dev); 5209e13fbf7SJeff Kirsher } 5219e13fbf7SJeff Kirsher spin_unlock(&sp->tx_lock); 5229e13fbf7SJeff Kirsher 5239e13fbf7SJeff Kirsher return IRQ_HANDLED; 5249e13fbf7SJeff Kirsher } 5259e13fbf7SJeff Kirsher 5269e13fbf7SJeff Kirsher static int sgiseeq_open(struct net_device *dev) 5279e13fbf7SJeff Kirsher { 5289e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 5299e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs = sp->sregs; 5309e13fbf7SJeff Kirsher unsigned int irq = dev->irq; 5319e13fbf7SJeff Kirsher int err; 5329e13fbf7SJeff Kirsher 5339e13fbf7SJeff Kirsher if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) { 5349e13fbf7SJeff Kirsher printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq); 5359e13fbf7SJeff Kirsher return -EAGAIN; 5369e13fbf7SJeff Kirsher } 5379e13fbf7SJeff Kirsher 5389e13fbf7SJeff Kirsher err = init_seeq(dev, sp, sregs); 5399e13fbf7SJeff Kirsher if (err) 5409e13fbf7SJeff Kirsher goto out_free_irq; 5419e13fbf7SJeff Kirsher 5429e13fbf7SJeff Kirsher netif_start_queue(dev); 5439e13fbf7SJeff Kirsher 5449e13fbf7SJeff Kirsher return 0; 5459e13fbf7SJeff Kirsher 5469e13fbf7SJeff Kirsher out_free_irq: 5479e13fbf7SJeff Kirsher free_irq(irq, dev); 5489e13fbf7SJeff Kirsher 5499e13fbf7SJeff Kirsher return err; 5509e13fbf7SJeff Kirsher } 5519e13fbf7SJeff Kirsher 5529e13fbf7SJeff Kirsher static int sgiseeq_close(struct net_device *dev) 5539e13fbf7SJeff Kirsher { 5549e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 5559e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs = sp->sregs; 5569e13fbf7SJeff Kirsher unsigned int irq = dev->irq; 5579e13fbf7SJeff Kirsher 5589e13fbf7SJeff Kirsher netif_stop_queue(dev); 5599e13fbf7SJeff Kirsher 5609e13fbf7SJeff Kirsher /* Shutdown the Seeq. */ 5619e13fbf7SJeff Kirsher reset_hpc3_and_seeq(sp->hregs, sregs); 5629e13fbf7SJeff Kirsher free_irq(irq, dev); 5639e13fbf7SJeff Kirsher seeq_purge_ring(dev); 5649e13fbf7SJeff Kirsher 5659e13fbf7SJeff Kirsher return 0; 5669e13fbf7SJeff Kirsher } 5679e13fbf7SJeff Kirsher 5689e13fbf7SJeff Kirsher static inline int sgiseeq_reset(struct net_device *dev) 5699e13fbf7SJeff Kirsher { 5709e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 5719e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs = sp->sregs; 5729e13fbf7SJeff Kirsher int err; 5739e13fbf7SJeff Kirsher 5749e13fbf7SJeff Kirsher err = init_seeq(dev, sp, sregs); 5759e13fbf7SJeff Kirsher if (err) 5769e13fbf7SJeff Kirsher return err; 5779e13fbf7SJeff Kirsher 5789e13fbf7SJeff Kirsher dev->trans_start = jiffies; /* prevent tx timeout */ 5799e13fbf7SJeff Kirsher netif_wake_queue(dev); 5809e13fbf7SJeff Kirsher 5819e13fbf7SJeff Kirsher return 0; 5829e13fbf7SJeff Kirsher } 5839e13fbf7SJeff Kirsher 5849e13fbf7SJeff Kirsher static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) 5859e13fbf7SJeff Kirsher { 5869e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 5879e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs = sp->hregs; 5889e13fbf7SJeff Kirsher unsigned long flags; 5899e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *td; 5909e13fbf7SJeff Kirsher int len, entry; 5919e13fbf7SJeff Kirsher 5929e13fbf7SJeff Kirsher spin_lock_irqsave(&sp->tx_lock, flags); 5939e13fbf7SJeff Kirsher 5949e13fbf7SJeff Kirsher /* Setup... */ 5959e13fbf7SJeff Kirsher len = skb->len; 5969e13fbf7SJeff Kirsher if (len < ETH_ZLEN) { 5979e13fbf7SJeff Kirsher if (skb_padto(skb, ETH_ZLEN)) { 5989e13fbf7SJeff Kirsher spin_unlock_irqrestore(&sp->tx_lock, flags); 5999e13fbf7SJeff Kirsher return NETDEV_TX_OK; 6009e13fbf7SJeff Kirsher } 6019e13fbf7SJeff Kirsher len = ETH_ZLEN; 6029e13fbf7SJeff Kirsher } 6039e13fbf7SJeff Kirsher 6049e13fbf7SJeff Kirsher dev->stats.tx_bytes += len; 6059e13fbf7SJeff Kirsher entry = sp->tx_new; 6069e13fbf7SJeff Kirsher td = &sp->tx_desc[entry]; 6079e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, td); 6089e13fbf7SJeff Kirsher 6099e13fbf7SJeff Kirsher /* Create entry. There are so many races with adding a new 6109e13fbf7SJeff Kirsher * descriptor to the chain: 6119e13fbf7SJeff Kirsher * 1) Assume that the HPC is off processing a DMA chain while 6129e13fbf7SJeff Kirsher * we are changing all of the following. 6139e13fbf7SJeff Kirsher * 2) Do no allow the HPC to look at a new descriptor until 6149e13fbf7SJeff Kirsher * we have completely set up it's state. This means, do 6159e13fbf7SJeff Kirsher * not clear HPCDMA_EOX in the current last descritptor 6169e13fbf7SJeff Kirsher * until the one we are adding looks consistent and could 6179e13fbf7SJeff Kirsher * be processes right now. 6189e13fbf7SJeff Kirsher * 3) The tx interrupt code must notice when we've added a new 6199e13fbf7SJeff Kirsher * entry and the HPC got to the end of the chain before we 6209e13fbf7SJeff Kirsher * added this new entry and restarted it. 6219e13fbf7SJeff Kirsher */ 6229e13fbf7SJeff Kirsher td->skb = skb; 6239e13fbf7SJeff Kirsher td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data, 6249e13fbf7SJeff Kirsher len, DMA_TO_DEVICE); 6259e13fbf7SJeff Kirsher td->tdma.cntinfo = (len & HPCDMA_BCNT) | 6269e13fbf7SJeff Kirsher HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; 6279e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, td); 6289e13fbf7SJeff Kirsher if (sp->tx_old != sp->tx_new) { 6299e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *backend; 6309e13fbf7SJeff Kirsher 6319e13fbf7SJeff Kirsher backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; 6329e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, backend); 6339e13fbf7SJeff Kirsher backend->tdma.cntinfo &= ~HPCDMA_EOX; 6349e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, backend); 6359e13fbf7SJeff Kirsher } 6369e13fbf7SJeff Kirsher sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ 6379e13fbf7SJeff Kirsher 6389e13fbf7SJeff Kirsher /* Maybe kick the HPC back into motion. */ 6399e13fbf7SJeff Kirsher if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE)) 6409e13fbf7SJeff Kirsher kick_tx(dev, sp, hregs); 6419e13fbf7SJeff Kirsher 6429e13fbf7SJeff Kirsher if (!TX_BUFFS_AVAIL(sp)) 6439e13fbf7SJeff Kirsher netif_stop_queue(dev); 6449e13fbf7SJeff Kirsher spin_unlock_irqrestore(&sp->tx_lock, flags); 6459e13fbf7SJeff Kirsher 6469e13fbf7SJeff Kirsher return NETDEV_TX_OK; 6479e13fbf7SJeff Kirsher } 6489e13fbf7SJeff Kirsher 6499e13fbf7SJeff Kirsher static void timeout(struct net_device *dev) 6509e13fbf7SJeff Kirsher { 6519e13fbf7SJeff Kirsher printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name); 6529e13fbf7SJeff Kirsher sgiseeq_reset(dev); 6539e13fbf7SJeff Kirsher 6549e13fbf7SJeff Kirsher dev->trans_start = jiffies; /* prevent tx timeout */ 6559e13fbf7SJeff Kirsher netif_wake_queue(dev); 6569e13fbf7SJeff Kirsher } 6579e13fbf7SJeff Kirsher 6589e13fbf7SJeff Kirsher static void sgiseeq_set_multicast(struct net_device *dev) 6599e13fbf7SJeff Kirsher { 6609e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 6619e13fbf7SJeff Kirsher unsigned char oldmode = sp->mode; 6629e13fbf7SJeff Kirsher 6639e13fbf7SJeff Kirsher if(dev->flags & IFF_PROMISC) 6649e13fbf7SJeff Kirsher sp->mode = SEEQ_RCMD_RANY; 6659e13fbf7SJeff Kirsher else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) 6669e13fbf7SJeff Kirsher sp->mode = SEEQ_RCMD_RBMCAST; 6679e13fbf7SJeff Kirsher else 6689e13fbf7SJeff Kirsher sp->mode = SEEQ_RCMD_RBCAST; 6699e13fbf7SJeff Kirsher 6709e13fbf7SJeff Kirsher /* XXX I know this sucks, but is there a better way to reprogram 6719e13fbf7SJeff Kirsher * XXX the receiver? At least, this shouldn't happen too often. 6729e13fbf7SJeff Kirsher */ 6739e13fbf7SJeff Kirsher 6749e13fbf7SJeff Kirsher if (oldmode != sp->mode) 6759e13fbf7SJeff Kirsher sgiseeq_reset(dev); 6769e13fbf7SJeff Kirsher } 6779e13fbf7SJeff Kirsher 6789e13fbf7SJeff Kirsher static inline void setup_tx_ring(struct net_device *dev, 6799e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *buf, 6809e13fbf7SJeff Kirsher int nbufs) 6819e13fbf7SJeff Kirsher { 6829e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 6839e13fbf7SJeff Kirsher int i = 0; 6849e13fbf7SJeff Kirsher 6859e13fbf7SJeff Kirsher while (i < (nbufs - 1)) { 6869e13fbf7SJeff Kirsher buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); 6879e13fbf7SJeff Kirsher buf[i].tdma.pbuf = 0; 6889e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &buf[i]); 6899e13fbf7SJeff Kirsher i++; 6909e13fbf7SJeff Kirsher } 6919e13fbf7SJeff Kirsher buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf); 6929e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &buf[i]); 6939e13fbf7SJeff Kirsher } 6949e13fbf7SJeff Kirsher 6959e13fbf7SJeff Kirsher static inline void setup_rx_ring(struct net_device *dev, 6969e13fbf7SJeff Kirsher struct sgiseeq_rx_desc *buf, 6979e13fbf7SJeff Kirsher int nbufs) 6989e13fbf7SJeff Kirsher { 6999e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 7009e13fbf7SJeff Kirsher int i = 0; 7019e13fbf7SJeff Kirsher 7029e13fbf7SJeff Kirsher while (i < (nbufs - 1)) { 7039e13fbf7SJeff Kirsher buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); 7049e13fbf7SJeff Kirsher buf[i].rdma.pbuf = 0; 7059e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &buf[i]); 7069e13fbf7SJeff Kirsher i++; 7079e13fbf7SJeff Kirsher } 7089e13fbf7SJeff Kirsher buf[i].rdma.pbuf = 0; 7099e13fbf7SJeff Kirsher buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf); 7109e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &buf[i]); 7119e13fbf7SJeff Kirsher } 7129e13fbf7SJeff Kirsher 7139e13fbf7SJeff Kirsher static const struct net_device_ops sgiseeq_netdev_ops = { 7149e13fbf7SJeff Kirsher .ndo_open = sgiseeq_open, 7159e13fbf7SJeff Kirsher .ndo_stop = sgiseeq_close, 7169e13fbf7SJeff Kirsher .ndo_start_xmit = sgiseeq_start_xmit, 7179e13fbf7SJeff Kirsher .ndo_tx_timeout = timeout, 718afc4b13dSJiri Pirko .ndo_set_rx_mode = sgiseeq_set_multicast, 7199e13fbf7SJeff Kirsher .ndo_set_mac_address = sgiseeq_set_mac_address, 7209e13fbf7SJeff Kirsher .ndo_change_mtu = eth_change_mtu, 7219e13fbf7SJeff Kirsher .ndo_validate_addr = eth_validate_addr, 7229e13fbf7SJeff Kirsher }; 7239e13fbf7SJeff Kirsher 7249e13fbf7SJeff Kirsher static int __devinit sgiseeq_probe(struct platform_device *pdev) 7259e13fbf7SJeff Kirsher { 7269e13fbf7SJeff Kirsher struct sgiseeq_platform_data *pd = pdev->dev.platform_data; 7279e13fbf7SJeff Kirsher struct hpc3_regs *hpcregs = pd->hpc; 7289e13fbf7SJeff Kirsher struct sgiseeq_init_block *sr; 7299e13fbf7SJeff Kirsher unsigned int irq = pd->irq; 7309e13fbf7SJeff Kirsher struct sgiseeq_private *sp; 7319e13fbf7SJeff Kirsher struct net_device *dev; 7329e13fbf7SJeff Kirsher int err; 7339e13fbf7SJeff Kirsher 7349e13fbf7SJeff Kirsher dev = alloc_etherdev(sizeof (struct sgiseeq_private)); 7359e13fbf7SJeff Kirsher if (!dev) { 7369e13fbf7SJeff Kirsher printk(KERN_ERR "Sgiseeq: Etherdev alloc failed, aborting.\n"); 7379e13fbf7SJeff Kirsher err = -ENOMEM; 7389e13fbf7SJeff Kirsher goto err_out; 7399e13fbf7SJeff Kirsher } 7409e13fbf7SJeff Kirsher 7419e13fbf7SJeff Kirsher platform_set_drvdata(pdev, dev); 7429e13fbf7SJeff Kirsher sp = netdev_priv(dev); 7439e13fbf7SJeff Kirsher 7449e13fbf7SJeff Kirsher /* Make private data page aligned */ 7459e13fbf7SJeff Kirsher sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings), 7469e13fbf7SJeff Kirsher &sp->srings_dma, GFP_KERNEL); 7479e13fbf7SJeff Kirsher if (!sr) { 7489e13fbf7SJeff Kirsher printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); 7499e13fbf7SJeff Kirsher err = -ENOMEM; 7509e13fbf7SJeff Kirsher goto err_out_free_dev; 7519e13fbf7SJeff Kirsher } 7529e13fbf7SJeff Kirsher sp->srings = sr; 7539e13fbf7SJeff Kirsher sp->rx_desc = sp->srings->rxvector; 7549e13fbf7SJeff Kirsher sp->tx_desc = sp->srings->txvector; 7559e13fbf7SJeff Kirsher 7569e13fbf7SJeff Kirsher /* A couple calculations now, saves many cycles later. */ 7579e13fbf7SJeff Kirsher setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS); 7589e13fbf7SJeff Kirsher setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS); 7599e13fbf7SJeff Kirsher 7609e13fbf7SJeff Kirsher memcpy(dev->dev_addr, pd->mac, ETH_ALEN); 7619e13fbf7SJeff Kirsher 7629e13fbf7SJeff Kirsher #ifdef DEBUG 7639e13fbf7SJeff Kirsher gpriv = sp; 7649e13fbf7SJeff Kirsher gdev = dev; 7659e13fbf7SJeff Kirsher #endif 7669e13fbf7SJeff Kirsher sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0]; 7679e13fbf7SJeff Kirsher sp->hregs = &hpcregs->ethregs; 7689e13fbf7SJeff Kirsher sp->name = sgiseeqstr; 7699e13fbf7SJeff Kirsher sp->mode = SEEQ_RCMD_RBCAST; 7709e13fbf7SJeff Kirsher 7719e13fbf7SJeff Kirsher /* Setup PIO and DMA transfer timing */ 7729e13fbf7SJeff Kirsher sp->hregs->pconfig = 0x161; 7739e13fbf7SJeff Kirsher sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | 7749e13fbf7SJeff Kirsher HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; 7759e13fbf7SJeff Kirsher 7769e13fbf7SJeff Kirsher /* Setup PIO and DMA transfer timing */ 7779e13fbf7SJeff Kirsher sp->hregs->pconfig = 0x161; 7789e13fbf7SJeff Kirsher sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | 7799e13fbf7SJeff Kirsher HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; 7809e13fbf7SJeff Kirsher 7819e13fbf7SJeff Kirsher /* Reset the chip. */ 7829e13fbf7SJeff Kirsher hpc3_eth_reset(sp->hregs); 7839e13fbf7SJeff Kirsher 7849e13fbf7SJeff Kirsher sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff); 7859e13fbf7SJeff Kirsher if (sp->is_edlc) 7869e13fbf7SJeff Kirsher sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT | 7879e13fbf7SJeff Kirsher SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT | 7889e13fbf7SJeff Kirsher SEEQ_CTRL_ENCARR; 7899e13fbf7SJeff Kirsher 7909e13fbf7SJeff Kirsher dev->netdev_ops = &sgiseeq_netdev_ops; 7919e13fbf7SJeff Kirsher dev->watchdog_timeo = (200 * HZ) / 1000; 7929e13fbf7SJeff Kirsher dev->irq = irq; 7939e13fbf7SJeff Kirsher 7949e13fbf7SJeff Kirsher if (register_netdev(dev)) { 7959e13fbf7SJeff Kirsher printk(KERN_ERR "Sgiseeq: Cannot register net device, " 7969e13fbf7SJeff Kirsher "aborting.\n"); 7979e13fbf7SJeff Kirsher err = -ENODEV; 7989e13fbf7SJeff Kirsher goto err_out_free_page; 7999e13fbf7SJeff Kirsher } 8009e13fbf7SJeff Kirsher 8019e13fbf7SJeff Kirsher printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); 8029e13fbf7SJeff Kirsher 8039e13fbf7SJeff Kirsher return 0; 8049e13fbf7SJeff Kirsher 8059e13fbf7SJeff Kirsher err_out_free_page: 8069e13fbf7SJeff Kirsher free_page((unsigned long) sp->srings); 8079e13fbf7SJeff Kirsher err_out_free_dev: 8089e13fbf7SJeff Kirsher free_netdev(dev); 8099e13fbf7SJeff Kirsher 8109e13fbf7SJeff Kirsher err_out: 8119e13fbf7SJeff Kirsher return err; 8129e13fbf7SJeff Kirsher } 8139e13fbf7SJeff Kirsher 8149e13fbf7SJeff Kirsher static int __exit sgiseeq_remove(struct platform_device *pdev) 8159e13fbf7SJeff Kirsher { 8169e13fbf7SJeff Kirsher struct net_device *dev = platform_get_drvdata(pdev); 8179e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 8189e13fbf7SJeff Kirsher 8199e13fbf7SJeff Kirsher unregister_netdev(dev); 8209e13fbf7SJeff Kirsher dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, 8219e13fbf7SJeff Kirsher sp->srings_dma); 8229e13fbf7SJeff Kirsher free_netdev(dev); 8239e13fbf7SJeff Kirsher platform_set_drvdata(pdev, NULL); 8249e13fbf7SJeff Kirsher 8259e13fbf7SJeff Kirsher return 0; 8269e13fbf7SJeff Kirsher } 8279e13fbf7SJeff Kirsher 8289e13fbf7SJeff Kirsher static struct platform_driver sgiseeq_driver = { 8299e13fbf7SJeff Kirsher .probe = sgiseeq_probe, 8309e13fbf7SJeff Kirsher .remove = __exit_p(sgiseeq_remove), 8319e13fbf7SJeff Kirsher .driver = { 8329e13fbf7SJeff Kirsher .name = "sgiseeq", 8339e13fbf7SJeff Kirsher .owner = THIS_MODULE, 8349e13fbf7SJeff Kirsher } 8359e13fbf7SJeff Kirsher }; 8369e13fbf7SJeff Kirsher 837*db62f684SAxel Lin module_platform_driver(sgiseeq_driver); 8389e13fbf7SJeff Kirsher 8399e13fbf7SJeff Kirsher MODULE_DESCRIPTION("SGI Seeq 8003 driver"); 8409e13fbf7SJeff Kirsher MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>"); 8419e13fbf7SJeff Kirsher MODULE_LICENSE("GPL"); 8429e13fbf7SJeff Kirsher MODULE_ALIAS("platform:sgiseeq"); 843