19e13fbf7SJeff Kirsher /* 29e13fbf7SJeff Kirsher * sgiseeq.c: Seeq8003 ethernet driver for SGI machines. 39e13fbf7SJeff Kirsher * 49e13fbf7SJeff Kirsher * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 59e13fbf7SJeff Kirsher */ 69e13fbf7SJeff Kirsher 79e13fbf7SJeff Kirsher #undef DEBUG 89e13fbf7SJeff Kirsher 99e13fbf7SJeff Kirsher #include <linux/dma-mapping.h> 109e13fbf7SJeff Kirsher #include <linux/kernel.h> 119e13fbf7SJeff Kirsher #include <linux/module.h> 129e13fbf7SJeff Kirsher #include <linux/slab.h> 139e13fbf7SJeff Kirsher #include <linux/errno.h> 149e13fbf7SJeff Kirsher #include <linux/types.h> 159e13fbf7SJeff Kirsher #include <linux/interrupt.h> 169e13fbf7SJeff Kirsher #include <linux/string.h> 179e13fbf7SJeff Kirsher #include <linux/delay.h> 189e13fbf7SJeff Kirsher #include <linux/netdevice.h> 199e13fbf7SJeff Kirsher #include <linux/platform_device.h> 209e13fbf7SJeff Kirsher #include <linux/etherdevice.h> 219e13fbf7SJeff Kirsher #include <linux/skbuff.h> 229e13fbf7SJeff Kirsher 239e13fbf7SJeff Kirsher #include <asm/sgi/hpc3.h> 249e13fbf7SJeff Kirsher #include <asm/sgi/ip22.h> 259e13fbf7SJeff Kirsher #include <asm/sgi/seeq.h> 269e13fbf7SJeff Kirsher 279e13fbf7SJeff Kirsher #include "sgiseeq.h" 289e13fbf7SJeff Kirsher 299e13fbf7SJeff Kirsher static char *sgiseeqstr = "SGI Seeq8003"; 309e13fbf7SJeff Kirsher 319e13fbf7SJeff Kirsher /* 329e13fbf7SJeff Kirsher * If you want speed, you do something silly, it always has worked for me. So, 339e13fbf7SJeff Kirsher * with that in mind, I've decided to make this driver look completely like a 349e13fbf7SJeff Kirsher * stupid Lance from a driver architecture perspective. Only difference is that 359e13fbf7SJeff Kirsher * here our "ring buffer" looks and acts like a real Lance one does but is 369e13fbf7SJeff Kirsher * laid out like how the HPC DMA and the Seeq want it to. You'd be surprised 379e13fbf7SJeff Kirsher * how a stupid idea like this can pay off in performance, not to mention 389e13fbf7SJeff Kirsher * making this driver 2,000 times easier to write. ;-) 399e13fbf7SJeff Kirsher */ 409e13fbf7SJeff Kirsher 419e13fbf7SJeff Kirsher /* Tune these if we tend to run out often etc. */ 429e13fbf7SJeff Kirsher #define SEEQ_RX_BUFFERS 16 439e13fbf7SJeff Kirsher #define SEEQ_TX_BUFFERS 16 449e13fbf7SJeff Kirsher 459e13fbf7SJeff Kirsher #define PKT_BUF_SZ 1584 469e13fbf7SJeff Kirsher 479e13fbf7SJeff Kirsher #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1)) 489e13fbf7SJeff Kirsher #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1)) 499e13fbf7SJeff Kirsher #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1)) 509e13fbf7SJeff Kirsher #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1)) 519e13fbf7SJeff Kirsher 529e13fbf7SJeff Kirsher #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \ 539e13fbf7SJeff Kirsher sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ 549e13fbf7SJeff Kirsher sp->tx_old - sp->tx_new - 1) 559e13fbf7SJeff Kirsher 569e13fbf7SJeff Kirsher #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \ 579e13fbf7SJeff Kirsher (dma_addr_t)((unsigned long)(v) - \ 589e13fbf7SJeff Kirsher (unsigned long)((sp)->rx_desc))) 599e13fbf7SJeff Kirsher 609e13fbf7SJeff Kirsher /* Copy frames shorter than rx_copybreak, otherwise pass on up in 619e13fbf7SJeff Kirsher * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). 629e13fbf7SJeff Kirsher */ 639e13fbf7SJeff Kirsher static int rx_copybreak = 100; 649e13fbf7SJeff Kirsher 659e13fbf7SJeff Kirsher #define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *)) 669e13fbf7SJeff Kirsher 679e13fbf7SJeff Kirsher struct sgiseeq_rx_desc { 689e13fbf7SJeff Kirsher volatile struct hpc_dma_desc rdma; 699e13fbf7SJeff Kirsher u8 padding[PAD_SIZE]; 709e13fbf7SJeff Kirsher struct sk_buff *skb; 719e13fbf7SJeff Kirsher }; 729e13fbf7SJeff Kirsher 739e13fbf7SJeff Kirsher struct sgiseeq_tx_desc { 749e13fbf7SJeff Kirsher volatile struct hpc_dma_desc tdma; 759e13fbf7SJeff Kirsher u8 padding[PAD_SIZE]; 769e13fbf7SJeff Kirsher struct sk_buff *skb; 779e13fbf7SJeff Kirsher }; 789e13fbf7SJeff Kirsher 799e13fbf7SJeff Kirsher /* 809e13fbf7SJeff Kirsher * Warning: This structure is laid out in a certain way because HPC dma 819e13fbf7SJeff Kirsher * descriptors must be 8-byte aligned. So don't touch this without 829e13fbf7SJeff Kirsher * some care. 839e13fbf7SJeff Kirsher */ 849e13fbf7SJeff Kirsher struct sgiseeq_init_block { /* Note the name ;-) */ 859e13fbf7SJeff Kirsher struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS]; 869e13fbf7SJeff Kirsher struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS]; 879e13fbf7SJeff Kirsher }; 889e13fbf7SJeff Kirsher 899e13fbf7SJeff Kirsher struct sgiseeq_private { 909e13fbf7SJeff Kirsher struct sgiseeq_init_block *srings; 919e13fbf7SJeff Kirsher dma_addr_t srings_dma; 929e13fbf7SJeff Kirsher 939e13fbf7SJeff Kirsher /* Ptrs to the descriptors in uncached space. */ 949e13fbf7SJeff Kirsher struct sgiseeq_rx_desc *rx_desc; 959e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *tx_desc; 969e13fbf7SJeff Kirsher 979e13fbf7SJeff Kirsher char *name; 989e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs; 999e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs; 1009e13fbf7SJeff Kirsher 1019e13fbf7SJeff Kirsher /* Ring entry counters. */ 1029e13fbf7SJeff Kirsher unsigned int rx_new, tx_new; 1039e13fbf7SJeff Kirsher unsigned int rx_old, tx_old; 1049e13fbf7SJeff Kirsher 1059e13fbf7SJeff Kirsher int is_edlc; 1069e13fbf7SJeff Kirsher unsigned char control; 1079e13fbf7SJeff Kirsher unsigned char mode; 1089e13fbf7SJeff Kirsher 1099e13fbf7SJeff Kirsher spinlock_t tx_lock; 1109e13fbf7SJeff Kirsher }; 1119e13fbf7SJeff Kirsher 1129e13fbf7SJeff Kirsher static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr) 1139e13fbf7SJeff Kirsher { 1149e13fbf7SJeff Kirsher dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), 1159e13fbf7SJeff Kirsher DMA_FROM_DEVICE); 1169e13fbf7SJeff Kirsher } 1179e13fbf7SJeff Kirsher 1189e13fbf7SJeff Kirsher static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) 1199e13fbf7SJeff Kirsher { 1209e13fbf7SJeff Kirsher dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), 1219e13fbf7SJeff Kirsher DMA_TO_DEVICE); 1229e13fbf7SJeff Kirsher } 1239e13fbf7SJeff Kirsher 1249e13fbf7SJeff Kirsher static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) 1259e13fbf7SJeff Kirsher { 1269e13fbf7SJeff Kirsher hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ; 1279e13fbf7SJeff Kirsher udelay(20); 1289e13fbf7SJeff Kirsher hregs->reset = 0; 1299e13fbf7SJeff Kirsher } 1309e13fbf7SJeff Kirsher 1319e13fbf7SJeff Kirsher static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs, 1329e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 1339e13fbf7SJeff Kirsher { 1349e13fbf7SJeff Kirsher hregs->rx_ctrl = hregs->tx_ctrl = 0; 1359e13fbf7SJeff Kirsher hpc3_eth_reset(hregs); 1369e13fbf7SJeff Kirsher } 1379e13fbf7SJeff Kirsher 1389e13fbf7SJeff Kirsher #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \ 1399e13fbf7SJeff Kirsher SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC) 1409e13fbf7SJeff Kirsher 1419e13fbf7SJeff Kirsher static inline void seeq_go(struct sgiseeq_private *sp, 1429e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs, 1439e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 1449e13fbf7SJeff Kirsher { 1459e13fbf7SJeff Kirsher sregs->rstat = sp->mode | RSTAT_GO_BITS; 1469e13fbf7SJeff Kirsher hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE; 1479e13fbf7SJeff Kirsher } 1489e13fbf7SJeff Kirsher 1499e13fbf7SJeff Kirsher static inline void __sgiseeq_set_mac_address(struct net_device *dev) 1509e13fbf7SJeff Kirsher { 1519e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 1529e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs = sp->sregs; 1539e13fbf7SJeff Kirsher int i; 1549e13fbf7SJeff Kirsher 1559e13fbf7SJeff Kirsher sregs->tstat = SEEQ_TCMD_RB0; 1569e13fbf7SJeff Kirsher for (i = 0; i < 6; i++) 1579e13fbf7SJeff Kirsher sregs->rw.eth_addr[i] = dev->dev_addr[i]; 1589e13fbf7SJeff Kirsher } 1599e13fbf7SJeff Kirsher 1609e13fbf7SJeff Kirsher static int sgiseeq_set_mac_address(struct net_device *dev, void *addr) 1619e13fbf7SJeff Kirsher { 1629e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 1639e13fbf7SJeff Kirsher struct sockaddr *sa = addr; 1649e13fbf7SJeff Kirsher 1659e13fbf7SJeff Kirsher memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); 1669e13fbf7SJeff Kirsher 1679e13fbf7SJeff Kirsher spin_lock_irq(&sp->tx_lock); 1689e13fbf7SJeff Kirsher __sgiseeq_set_mac_address(dev); 1699e13fbf7SJeff Kirsher spin_unlock_irq(&sp->tx_lock); 1709e13fbf7SJeff Kirsher 1719e13fbf7SJeff Kirsher return 0; 1729e13fbf7SJeff Kirsher } 1739e13fbf7SJeff Kirsher 1749e13fbf7SJeff Kirsher #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD) 1759e13fbf7SJeff Kirsher #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE) 1769e13fbf7SJeff Kirsher #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT)) 1779e13fbf7SJeff Kirsher 1789e13fbf7SJeff Kirsher static int seeq_init_ring(struct net_device *dev) 1799e13fbf7SJeff Kirsher { 1809e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 1819e13fbf7SJeff Kirsher int i; 1829e13fbf7SJeff Kirsher 1839e13fbf7SJeff Kirsher netif_stop_queue(dev); 1849e13fbf7SJeff Kirsher sp->rx_new = sp->tx_new = 0; 1859e13fbf7SJeff Kirsher sp->rx_old = sp->tx_old = 0; 1869e13fbf7SJeff Kirsher 1879e13fbf7SJeff Kirsher __sgiseeq_set_mac_address(dev); 1889e13fbf7SJeff Kirsher 1899e13fbf7SJeff Kirsher /* Setup tx ring. */ 1909e13fbf7SJeff Kirsher for(i = 0; i < SEEQ_TX_BUFFERS; i++) { 1919e13fbf7SJeff Kirsher sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; 1929e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &sp->tx_desc[i]); 1939e13fbf7SJeff Kirsher } 1949e13fbf7SJeff Kirsher 1959e13fbf7SJeff Kirsher /* And now the rx ring. */ 1969e13fbf7SJeff Kirsher for (i = 0; i < SEEQ_RX_BUFFERS; i++) { 1979e13fbf7SJeff Kirsher if (!sp->rx_desc[i].skb) { 1989e13fbf7SJeff Kirsher dma_addr_t dma_addr; 1999e13fbf7SJeff Kirsher struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ); 2009e13fbf7SJeff Kirsher 2019e13fbf7SJeff Kirsher if (skb == NULL) 2029e13fbf7SJeff Kirsher return -ENOMEM; 2039e13fbf7SJeff Kirsher skb_reserve(skb, 2); 2049e13fbf7SJeff Kirsher dma_addr = dma_map_single(dev->dev.parent, 2059e13fbf7SJeff Kirsher skb->data - 2, 2069e13fbf7SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE); 2079e13fbf7SJeff Kirsher sp->rx_desc[i].skb = skb; 2089e13fbf7SJeff Kirsher sp->rx_desc[i].rdma.pbuf = dma_addr; 2099e13fbf7SJeff Kirsher } 2109e13fbf7SJeff Kirsher sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; 2119e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &sp->rx_desc[i]); 2129e13fbf7SJeff Kirsher } 2139e13fbf7SJeff Kirsher sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; 2149e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]); 2159e13fbf7SJeff Kirsher return 0; 2169e13fbf7SJeff Kirsher } 2179e13fbf7SJeff Kirsher 2189e13fbf7SJeff Kirsher static void seeq_purge_ring(struct net_device *dev) 2199e13fbf7SJeff Kirsher { 2209e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 2219e13fbf7SJeff Kirsher int i; 2229e13fbf7SJeff Kirsher 2239e13fbf7SJeff Kirsher /* clear tx ring. */ 2249e13fbf7SJeff Kirsher for (i = 0; i < SEEQ_TX_BUFFERS; i++) { 2259e13fbf7SJeff Kirsher if (sp->tx_desc[i].skb) { 2269e13fbf7SJeff Kirsher dev_kfree_skb(sp->tx_desc[i].skb); 2279e13fbf7SJeff Kirsher sp->tx_desc[i].skb = NULL; 2289e13fbf7SJeff Kirsher } 2299e13fbf7SJeff Kirsher } 2309e13fbf7SJeff Kirsher 2319e13fbf7SJeff Kirsher /* And now the rx ring. */ 2329e13fbf7SJeff Kirsher for (i = 0; i < SEEQ_RX_BUFFERS; i++) { 2339e13fbf7SJeff Kirsher if (sp->rx_desc[i].skb) { 2349e13fbf7SJeff Kirsher dev_kfree_skb(sp->rx_desc[i].skb); 2359e13fbf7SJeff Kirsher sp->rx_desc[i].skb = NULL; 2369e13fbf7SJeff Kirsher } 2379e13fbf7SJeff Kirsher } 2389e13fbf7SJeff Kirsher } 2399e13fbf7SJeff Kirsher 2409e13fbf7SJeff Kirsher #ifdef DEBUG 2419e13fbf7SJeff Kirsher static struct sgiseeq_private *gpriv; 2429e13fbf7SJeff Kirsher static struct net_device *gdev; 2439e13fbf7SJeff Kirsher 2449e13fbf7SJeff Kirsher static void sgiseeq_dump_rings(void) 2459e13fbf7SJeff Kirsher { 2469e13fbf7SJeff Kirsher static int once; 2479e13fbf7SJeff Kirsher struct sgiseeq_rx_desc *r = gpriv->rx_desc; 2489e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *t = gpriv->tx_desc; 2499e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs = gpriv->hregs; 2509e13fbf7SJeff Kirsher int i; 2519e13fbf7SJeff Kirsher 2529e13fbf7SJeff Kirsher if (once) 2539e13fbf7SJeff Kirsher return; 2549e13fbf7SJeff Kirsher once++; 2559e13fbf7SJeff Kirsher printk("RING DUMP:\n"); 2569e13fbf7SJeff Kirsher for (i = 0; i < SEEQ_RX_BUFFERS; i++) { 2579e13fbf7SJeff Kirsher printk("RX [%d]: @(%p) [%08x,%08x,%08x] ", 2589e13fbf7SJeff Kirsher i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, 2599e13fbf7SJeff Kirsher r[i].rdma.pnext); 2609e13fbf7SJeff Kirsher i += 1; 2619e13fbf7SJeff Kirsher printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", 2629e13fbf7SJeff Kirsher i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, 2639e13fbf7SJeff Kirsher r[i].rdma.pnext); 2649e13fbf7SJeff Kirsher } 2659e13fbf7SJeff Kirsher for (i = 0; i < SEEQ_TX_BUFFERS; i++) { 2669e13fbf7SJeff Kirsher printk("TX [%d]: @(%p) [%08x,%08x,%08x] ", 2679e13fbf7SJeff Kirsher i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, 2689e13fbf7SJeff Kirsher t[i].tdma.pnext); 2699e13fbf7SJeff Kirsher i += 1; 2709e13fbf7SJeff Kirsher printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", 2719e13fbf7SJeff Kirsher i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, 2729e13fbf7SJeff Kirsher t[i].tdma.pnext); 2739e13fbf7SJeff Kirsher } 2749e13fbf7SJeff Kirsher printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n", 2759e13fbf7SJeff Kirsher gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old); 2769e13fbf7SJeff Kirsher printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n", 2779e13fbf7SJeff Kirsher hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl); 2789e13fbf7SJeff Kirsher printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n", 2799e13fbf7SJeff Kirsher hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl); 2809e13fbf7SJeff Kirsher } 2819e13fbf7SJeff Kirsher #endif 2829e13fbf7SJeff Kirsher 2839e13fbf7SJeff Kirsher #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF) 2849e13fbf7SJeff Kirsher #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2) 2859e13fbf7SJeff Kirsher 2869e13fbf7SJeff Kirsher static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, 2879e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 2889e13fbf7SJeff Kirsher { 2899e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs = sp->hregs; 2909e13fbf7SJeff Kirsher int err; 2919e13fbf7SJeff Kirsher 2929e13fbf7SJeff Kirsher reset_hpc3_and_seeq(hregs, sregs); 2939e13fbf7SJeff Kirsher err = seeq_init_ring(dev); 2949e13fbf7SJeff Kirsher if (err) 2959e13fbf7SJeff Kirsher return err; 2969e13fbf7SJeff Kirsher 2979e13fbf7SJeff Kirsher /* Setup to field the proper interrupt types. */ 2989e13fbf7SJeff Kirsher if (sp->is_edlc) { 2999e13fbf7SJeff Kirsher sregs->tstat = TSTAT_INIT_EDLC; 3009e13fbf7SJeff Kirsher sregs->rw.wregs.control = sp->control; 3019e13fbf7SJeff Kirsher sregs->rw.wregs.frame_gap = 0; 3029e13fbf7SJeff Kirsher } else { 3039e13fbf7SJeff Kirsher sregs->tstat = TSTAT_INIT_SEEQ; 3049e13fbf7SJeff Kirsher } 3059e13fbf7SJeff Kirsher 3069e13fbf7SJeff Kirsher hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc); 3079e13fbf7SJeff Kirsher hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc); 3089e13fbf7SJeff Kirsher 3099e13fbf7SJeff Kirsher seeq_go(sp, hregs, sregs); 3109e13fbf7SJeff Kirsher return 0; 3119e13fbf7SJeff Kirsher } 3129e13fbf7SJeff Kirsher 3139e13fbf7SJeff Kirsher static void record_rx_errors(struct net_device *dev, unsigned char status) 3149e13fbf7SJeff Kirsher { 3159e13fbf7SJeff Kirsher if (status & SEEQ_RSTAT_OVERF || 3169e13fbf7SJeff Kirsher status & SEEQ_RSTAT_SFRAME) 3179e13fbf7SJeff Kirsher dev->stats.rx_over_errors++; 3189e13fbf7SJeff Kirsher if (status & SEEQ_RSTAT_CERROR) 3199e13fbf7SJeff Kirsher dev->stats.rx_crc_errors++; 3209e13fbf7SJeff Kirsher if (status & SEEQ_RSTAT_DERROR) 3219e13fbf7SJeff Kirsher dev->stats.rx_frame_errors++; 3229e13fbf7SJeff Kirsher if (status & SEEQ_RSTAT_REOF) 3239e13fbf7SJeff Kirsher dev->stats.rx_errors++; 3249e13fbf7SJeff Kirsher } 3259e13fbf7SJeff Kirsher 3269e13fbf7SJeff Kirsher static inline void rx_maybe_restart(struct sgiseeq_private *sp, 3279e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs, 3289e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 3299e13fbf7SJeff Kirsher { 3309e13fbf7SJeff Kirsher if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) { 3319e13fbf7SJeff Kirsher hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new); 3329e13fbf7SJeff Kirsher seeq_go(sp, hregs, sregs); 3339e13fbf7SJeff Kirsher } 3349e13fbf7SJeff Kirsher } 3359e13fbf7SJeff Kirsher 3369e13fbf7SJeff Kirsher static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, 3379e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs, 3389e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 3399e13fbf7SJeff Kirsher { 3409e13fbf7SJeff Kirsher struct sgiseeq_rx_desc *rd; 3419e13fbf7SJeff Kirsher struct sk_buff *skb = NULL; 3429e13fbf7SJeff Kirsher struct sk_buff *newskb; 3439e13fbf7SJeff Kirsher unsigned char pkt_status; 3449e13fbf7SJeff Kirsher int len = 0; 3459e13fbf7SJeff Kirsher unsigned int orig_end = PREV_RX(sp->rx_new); 3469e13fbf7SJeff Kirsher 3479e13fbf7SJeff Kirsher /* Service every received packet. */ 3489e13fbf7SJeff Kirsher rd = &sp->rx_desc[sp->rx_new]; 3499e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, rd); 3509e13fbf7SJeff Kirsher while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { 3519e13fbf7SJeff Kirsher len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; 3529e13fbf7SJeff Kirsher dma_unmap_single(dev->dev.parent, rd->rdma.pbuf, 3539e13fbf7SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE); 3549e13fbf7SJeff Kirsher pkt_status = rd->skb->data[len]; 3559e13fbf7SJeff Kirsher if (pkt_status & SEEQ_RSTAT_FIG) { 3569e13fbf7SJeff Kirsher /* Packet is OK. */ 3579e13fbf7SJeff Kirsher /* We don't want to receive our own packets */ 3589116d7b0Sdingtianhong if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) { 3599e13fbf7SJeff Kirsher if (len > rx_copybreak) { 3609e13fbf7SJeff Kirsher skb = rd->skb; 3619e13fbf7SJeff Kirsher newskb = netdev_alloc_skb(dev, PKT_BUF_SZ); 3629e13fbf7SJeff Kirsher if (!newskb) { 3639e13fbf7SJeff Kirsher newskb = skb; 3649e13fbf7SJeff Kirsher skb = NULL; 3659e13fbf7SJeff Kirsher goto memory_squeeze; 3669e13fbf7SJeff Kirsher } 3679e13fbf7SJeff Kirsher skb_reserve(newskb, 2); 3689e13fbf7SJeff Kirsher } else { 3699e13fbf7SJeff Kirsher skb = netdev_alloc_skb_ip_align(dev, len); 3709e13fbf7SJeff Kirsher if (skb) 3719e13fbf7SJeff Kirsher skb_copy_to_linear_data(skb, rd->skb->data, len); 3729e13fbf7SJeff Kirsher 3739e13fbf7SJeff Kirsher newskb = rd->skb; 3749e13fbf7SJeff Kirsher } 3759e13fbf7SJeff Kirsher memory_squeeze: 3769e13fbf7SJeff Kirsher if (skb) { 3779e13fbf7SJeff Kirsher skb_put(skb, len); 3789e13fbf7SJeff Kirsher skb->protocol = eth_type_trans(skb, dev); 3799e13fbf7SJeff Kirsher netif_rx(skb); 3809e13fbf7SJeff Kirsher dev->stats.rx_packets++; 3819e13fbf7SJeff Kirsher dev->stats.rx_bytes += len; 3829e13fbf7SJeff Kirsher } else { 3839e13fbf7SJeff Kirsher dev->stats.rx_dropped++; 3849e13fbf7SJeff Kirsher } 3859e13fbf7SJeff Kirsher } else { 3869e13fbf7SJeff Kirsher /* Silently drop my own packets */ 3879e13fbf7SJeff Kirsher newskb = rd->skb; 3889e13fbf7SJeff Kirsher } 3899e13fbf7SJeff Kirsher } else { 3909e13fbf7SJeff Kirsher record_rx_errors(dev, pkt_status); 3919e13fbf7SJeff Kirsher newskb = rd->skb; 3929e13fbf7SJeff Kirsher } 3939e13fbf7SJeff Kirsher rd->skb = newskb; 3949e13fbf7SJeff Kirsher rd->rdma.pbuf = dma_map_single(dev->dev.parent, 3959e13fbf7SJeff Kirsher newskb->data - 2, 3969e13fbf7SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE); 3979e13fbf7SJeff Kirsher 3989e13fbf7SJeff Kirsher /* Return the entry to the ring pool. */ 3999e13fbf7SJeff Kirsher rd->rdma.cntinfo = RCNTINFO_INIT; 4009e13fbf7SJeff Kirsher sp->rx_new = NEXT_RX(sp->rx_new); 4019e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, rd); 4029e13fbf7SJeff Kirsher rd = &sp->rx_desc[sp->rx_new]; 4039e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, rd); 4049e13fbf7SJeff Kirsher } 4059e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]); 4069e13fbf7SJeff Kirsher sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); 4079e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]); 4089e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); 4099e13fbf7SJeff Kirsher sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; 4109e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); 4119e13fbf7SJeff Kirsher rx_maybe_restart(sp, hregs, sregs); 4129e13fbf7SJeff Kirsher } 4139e13fbf7SJeff Kirsher 4149e13fbf7SJeff Kirsher static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp, 4159e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 4169e13fbf7SJeff Kirsher { 4179e13fbf7SJeff Kirsher if (sp->is_edlc) { 4189e13fbf7SJeff Kirsher sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT); 4199e13fbf7SJeff Kirsher sregs->rw.wregs.control = sp->control; 4209e13fbf7SJeff Kirsher } 4219e13fbf7SJeff Kirsher } 4229e13fbf7SJeff Kirsher 4239e13fbf7SJeff Kirsher static inline void kick_tx(struct net_device *dev, 4249e13fbf7SJeff Kirsher struct sgiseeq_private *sp, 4259e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs) 4269e13fbf7SJeff Kirsher { 4279e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *td; 4289e13fbf7SJeff Kirsher int i = sp->tx_old; 4299e13fbf7SJeff Kirsher 4309e13fbf7SJeff Kirsher /* If the HPC aint doin nothin, and there are more packets 4319e13fbf7SJeff Kirsher * with ETXD cleared and XIU set we must make very certain 4329e13fbf7SJeff Kirsher * that we restart the HPC else we risk locking up the 4339e13fbf7SJeff Kirsher * adapter. The following code is only safe iff the HPCDMA 4349e13fbf7SJeff Kirsher * is not active! 4359e13fbf7SJeff Kirsher */ 4369e13fbf7SJeff Kirsher td = &sp->tx_desc[i]; 4379e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, td); 4389e13fbf7SJeff Kirsher while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) == 4399e13fbf7SJeff Kirsher (HPCDMA_XIU | HPCDMA_ETXD)) { 4409e13fbf7SJeff Kirsher i = NEXT_TX(i); 4419e13fbf7SJeff Kirsher td = &sp->tx_desc[i]; 4429e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, td); 4439e13fbf7SJeff Kirsher } 4449e13fbf7SJeff Kirsher if (td->tdma.cntinfo & HPCDMA_XIU) { 4459e13fbf7SJeff Kirsher hregs->tx_ndptr = VIRT_TO_DMA(sp, td); 4469e13fbf7SJeff Kirsher hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; 4479e13fbf7SJeff Kirsher } 4489e13fbf7SJeff Kirsher } 4499e13fbf7SJeff Kirsher 4509e13fbf7SJeff Kirsher static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp, 4519e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs, 4529e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs) 4539e13fbf7SJeff Kirsher { 4549e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *td; 4559e13fbf7SJeff Kirsher unsigned long status = hregs->tx_ctrl; 4569e13fbf7SJeff Kirsher int j; 4579e13fbf7SJeff Kirsher 4589e13fbf7SJeff Kirsher tx_maybe_reset_collisions(sp, sregs); 4599e13fbf7SJeff Kirsher 4609e13fbf7SJeff Kirsher if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { 4619e13fbf7SJeff Kirsher /* Oops, HPC detected some sort of error. */ 4629e13fbf7SJeff Kirsher if (status & SEEQ_TSTAT_R16) 4639e13fbf7SJeff Kirsher dev->stats.tx_aborted_errors++; 4649e13fbf7SJeff Kirsher if (status & SEEQ_TSTAT_UFLOW) 4659e13fbf7SJeff Kirsher dev->stats.tx_fifo_errors++; 4669e13fbf7SJeff Kirsher if (status & SEEQ_TSTAT_LCLS) 4679e13fbf7SJeff Kirsher dev->stats.collisions++; 4689e13fbf7SJeff Kirsher } 4699e13fbf7SJeff Kirsher 4709e13fbf7SJeff Kirsher /* Ack 'em... */ 4719e13fbf7SJeff Kirsher for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { 4729e13fbf7SJeff Kirsher td = &sp->tx_desc[j]; 4739e13fbf7SJeff Kirsher 4749e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, td); 4759e13fbf7SJeff Kirsher if (!(td->tdma.cntinfo & (HPCDMA_XIU))) 4769e13fbf7SJeff Kirsher break; 4779e13fbf7SJeff Kirsher if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) { 4789e13fbf7SJeff Kirsher if (!(status & HPC3_ETXCTRL_ACTIVE)) { 4799e13fbf7SJeff Kirsher hregs->tx_ndptr = VIRT_TO_DMA(sp, td); 4809e13fbf7SJeff Kirsher hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; 4819e13fbf7SJeff Kirsher } 4829e13fbf7SJeff Kirsher break; 4839e13fbf7SJeff Kirsher } 4849e13fbf7SJeff Kirsher dev->stats.tx_packets++; 4859e13fbf7SJeff Kirsher sp->tx_old = NEXT_TX(sp->tx_old); 4869e13fbf7SJeff Kirsher td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); 4879e13fbf7SJeff Kirsher td->tdma.cntinfo |= HPCDMA_EOX; 4889e13fbf7SJeff Kirsher if (td->skb) { 4899e13fbf7SJeff Kirsher dev_kfree_skb_any(td->skb); 4909e13fbf7SJeff Kirsher td->skb = NULL; 4919e13fbf7SJeff Kirsher } 4929e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, td); 4939e13fbf7SJeff Kirsher } 4949e13fbf7SJeff Kirsher } 4959e13fbf7SJeff Kirsher 4969e13fbf7SJeff Kirsher static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id) 4979e13fbf7SJeff Kirsher { 4989e13fbf7SJeff Kirsher struct net_device *dev = (struct net_device *) dev_id; 4999e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 5009e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs = sp->hregs; 5019e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs = sp->sregs; 5029e13fbf7SJeff Kirsher 5039e13fbf7SJeff Kirsher spin_lock(&sp->tx_lock); 5049e13fbf7SJeff Kirsher 5059e13fbf7SJeff Kirsher /* Ack the IRQ and set software state. */ 5069e13fbf7SJeff Kirsher hregs->reset = HPC3_ERST_CLRIRQ; 5079e13fbf7SJeff Kirsher 5089e13fbf7SJeff Kirsher /* Always check for received packets. */ 5099e13fbf7SJeff Kirsher sgiseeq_rx(dev, sp, hregs, sregs); 5109e13fbf7SJeff Kirsher 5119e13fbf7SJeff Kirsher /* Only check for tx acks if we have something queued. */ 5129e13fbf7SJeff Kirsher if (sp->tx_old != sp->tx_new) 5139e13fbf7SJeff Kirsher sgiseeq_tx(dev, sp, hregs, sregs); 5149e13fbf7SJeff Kirsher 5159e13fbf7SJeff Kirsher if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) { 5169e13fbf7SJeff Kirsher netif_wake_queue(dev); 5179e13fbf7SJeff Kirsher } 5189e13fbf7SJeff Kirsher spin_unlock(&sp->tx_lock); 5199e13fbf7SJeff Kirsher 5209e13fbf7SJeff Kirsher return IRQ_HANDLED; 5219e13fbf7SJeff Kirsher } 5229e13fbf7SJeff Kirsher 5239e13fbf7SJeff Kirsher static int sgiseeq_open(struct net_device *dev) 5249e13fbf7SJeff Kirsher { 5259e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 5269e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs = sp->sregs; 5279e13fbf7SJeff Kirsher unsigned int irq = dev->irq; 5289e13fbf7SJeff Kirsher int err; 5299e13fbf7SJeff Kirsher 5309e13fbf7SJeff Kirsher if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) { 5319e13fbf7SJeff Kirsher printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq); 5329e13fbf7SJeff Kirsher return -EAGAIN; 5339e13fbf7SJeff Kirsher } 5349e13fbf7SJeff Kirsher 5359e13fbf7SJeff Kirsher err = init_seeq(dev, sp, sregs); 5369e13fbf7SJeff Kirsher if (err) 5379e13fbf7SJeff Kirsher goto out_free_irq; 5389e13fbf7SJeff Kirsher 5399e13fbf7SJeff Kirsher netif_start_queue(dev); 5409e13fbf7SJeff Kirsher 5419e13fbf7SJeff Kirsher return 0; 5429e13fbf7SJeff Kirsher 5439e13fbf7SJeff Kirsher out_free_irq: 5449e13fbf7SJeff Kirsher free_irq(irq, dev); 5459e13fbf7SJeff Kirsher 5469e13fbf7SJeff Kirsher return err; 5479e13fbf7SJeff Kirsher } 5489e13fbf7SJeff Kirsher 5499e13fbf7SJeff Kirsher static int sgiseeq_close(struct net_device *dev) 5509e13fbf7SJeff Kirsher { 5519e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 5529e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs = sp->sregs; 5539e13fbf7SJeff Kirsher unsigned int irq = dev->irq; 5549e13fbf7SJeff Kirsher 5559e13fbf7SJeff Kirsher netif_stop_queue(dev); 5569e13fbf7SJeff Kirsher 5579e13fbf7SJeff Kirsher /* Shutdown the Seeq. */ 5589e13fbf7SJeff Kirsher reset_hpc3_and_seeq(sp->hregs, sregs); 5599e13fbf7SJeff Kirsher free_irq(irq, dev); 5609e13fbf7SJeff Kirsher seeq_purge_ring(dev); 5619e13fbf7SJeff Kirsher 5629e13fbf7SJeff Kirsher return 0; 5639e13fbf7SJeff Kirsher } 5649e13fbf7SJeff Kirsher 5659e13fbf7SJeff Kirsher static inline int sgiseeq_reset(struct net_device *dev) 5669e13fbf7SJeff Kirsher { 5679e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 5689e13fbf7SJeff Kirsher struct sgiseeq_regs *sregs = sp->sregs; 5699e13fbf7SJeff Kirsher int err; 5709e13fbf7SJeff Kirsher 5719e13fbf7SJeff Kirsher err = init_seeq(dev, sp, sregs); 5729e13fbf7SJeff Kirsher if (err) 5739e13fbf7SJeff Kirsher return err; 5749e13fbf7SJeff Kirsher 575*860e9538SFlorian Westphal netif_trans_update(dev); /* prevent tx timeout */ 5769e13fbf7SJeff Kirsher netif_wake_queue(dev); 5779e13fbf7SJeff Kirsher 5789e13fbf7SJeff Kirsher return 0; 5799e13fbf7SJeff Kirsher } 5809e13fbf7SJeff Kirsher 5819e13fbf7SJeff Kirsher static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) 5829e13fbf7SJeff Kirsher { 5839e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 5849e13fbf7SJeff Kirsher struct hpc3_ethregs *hregs = sp->hregs; 5859e13fbf7SJeff Kirsher unsigned long flags; 5869e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *td; 5879e13fbf7SJeff Kirsher int len, entry; 5889e13fbf7SJeff Kirsher 5899e13fbf7SJeff Kirsher spin_lock_irqsave(&sp->tx_lock, flags); 5909e13fbf7SJeff Kirsher 5919e13fbf7SJeff Kirsher /* Setup... */ 5929e13fbf7SJeff Kirsher len = skb->len; 5939e13fbf7SJeff Kirsher if (len < ETH_ZLEN) { 5949e13fbf7SJeff Kirsher if (skb_padto(skb, ETH_ZLEN)) { 5959e13fbf7SJeff Kirsher spin_unlock_irqrestore(&sp->tx_lock, flags); 5969e13fbf7SJeff Kirsher return NETDEV_TX_OK; 5979e13fbf7SJeff Kirsher } 5989e13fbf7SJeff Kirsher len = ETH_ZLEN; 5999e13fbf7SJeff Kirsher } 6009e13fbf7SJeff Kirsher 6019e13fbf7SJeff Kirsher dev->stats.tx_bytes += len; 6029e13fbf7SJeff Kirsher entry = sp->tx_new; 6039e13fbf7SJeff Kirsher td = &sp->tx_desc[entry]; 6049e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, td); 6059e13fbf7SJeff Kirsher 6069e13fbf7SJeff Kirsher /* Create entry. There are so many races with adding a new 6079e13fbf7SJeff Kirsher * descriptor to the chain: 6089e13fbf7SJeff Kirsher * 1) Assume that the HPC is off processing a DMA chain while 6099e13fbf7SJeff Kirsher * we are changing all of the following. 6109e13fbf7SJeff Kirsher * 2) Do no allow the HPC to look at a new descriptor until 6119e13fbf7SJeff Kirsher * we have completely set up it's state. This means, do 6129e13fbf7SJeff Kirsher * not clear HPCDMA_EOX in the current last descritptor 6139e13fbf7SJeff Kirsher * until the one we are adding looks consistent and could 6149e13fbf7SJeff Kirsher * be processes right now. 6159e13fbf7SJeff Kirsher * 3) The tx interrupt code must notice when we've added a new 6169e13fbf7SJeff Kirsher * entry and the HPC got to the end of the chain before we 6179e13fbf7SJeff Kirsher * added this new entry and restarted it. 6189e13fbf7SJeff Kirsher */ 6199e13fbf7SJeff Kirsher td->skb = skb; 6209e13fbf7SJeff Kirsher td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data, 6219e13fbf7SJeff Kirsher len, DMA_TO_DEVICE); 6229e13fbf7SJeff Kirsher td->tdma.cntinfo = (len & HPCDMA_BCNT) | 6239e13fbf7SJeff Kirsher HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; 6249e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, td); 6259e13fbf7SJeff Kirsher if (sp->tx_old != sp->tx_new) { 6269e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *backend; 6279e13fbf7SJeff Kirsher 6289e13fbf7SJeff Kirsher backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; 6299e13fbf7SJeff Kirsher dma_sync_desc_cpu(dev, backend); 6309e13fbf7SJeff Kirsher backend->tdma.cntinfo &= ~HPCDMA_EOX; 6319e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, backend); 6329e13fbf7SJeff Kirsher } 6339e13fbf7SJeff Kirsher sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ 6349e13fbf7SJeff Kirsher 6359e13fbf7SJeff Kirsher /* Maybe kick the HPC back into motion. */ 6369e13fbf7SJeff Kirsher if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE)) 6379e13fbf7SJeff Kirsher kick_tx(dev, sp, hregs); 6389e13fbf7SJeff Kirsher 6399e13fbf7SJeff Kirsher if (!TX_BUFFS_AVAIL(sp)) 6409e13fbf7SJeff Kirsher netif_stop_queue(dev); 6419e13fbf7SJeff Kirsher spin_unlock_irqrestore(&sp->tx_lock, flags); 6429e13fbf7SJeff Kirsher 6439e13fbf7SJeff Kirsher return NETDEV_TX_OK; 6449e13fbf7SJeff Kirsher } 6459e13fbf7SJeff Kirsher 6469e13fbf7SJeff Kirsher static void timeout(struct net_device *dev) 6479e13fbf7SJeff Kirsher { 6489e13fbf7SJeff Kirsher printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name); 6499e13fbf7SJeff Kirsher sgiseeq_reset(dev); 6509e13fbf7SJeff Kirsher 651*860e9538SFlorian Westphal netif_trans_update(dev); /* prevent tx timeout */ 6529e13fbf7SJeff Kirsher netif_wake_queue(dev); 6539e13fbf7SJeff Kirsher } 6549e13fbf7SJeff Kirsher 6559e13fbf7SJeff Kirsher static void sgiseeq_set_multicast(struct net_device *dev) 6569e13fbf7SJeff Kirsher { 6579e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 6589e13fbf7SJeff Kirsher unsigned char oldmode = sp->mode; 6599e13fbf7SJeff Kirsher 6609e13fbf7SJeff Kirsher if(dev->flags & IFF_PROMISC) 6619e13fbf7SJeff Kirsher sp->mode = SEEQ_RCMD_RANY; 6629e13fbf7SJeff Kirsher else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) 6639e13fbf7SJeff Kirsher sp->mode = SEEQ_RCMD_RBMCAST; 6649e13fbf7SJeff Kirsher else 6659e13fbf7SJeff Kirsher sp->mode = SEEQ_RCMD_RBCAST; 6669e13fbf7SJeff Kirsher 6679e13fbf7SJeff Kirsher /* XXX I know this sucks, but is there a better way to reprogram 6689e13fbf7SJeff Kirsher * XXX the receiver? At least, this shouldn't happen too often. 6699e13fbf7SJeff Kirsher */ 6709e13fbf7SJeff Kirsher 6719e13fbf7SJeff Kirsher if (oldmode != sp->mode) 6729e13fbf7SJeff Kirsher sgiseeq_reset(dev); 6739e13fbf7SJeff Kirsher } 6749e13fbf7SJeff Kirsher 6759e13fbf7SJeff Kirsher static inline void setup_tx_ring(struct net_device *dev, 6769e13fbf7SJeff Kirsher struct sgiseeq_tx_desc *buf, 6779e13fbf7SJeff Kirsher int nbufs) 6789e13fbf7SJeff Kirsher { 6799e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 6809e13fbf7SJeff Kirsher int i = 0; 6819e13fbf7SJeff Kirsher 6829e13fbf7SJeff Kirsher while (i < (nbufs - 1)) { 6839e13fbf7SJeff Kirsher buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); 6849e13fbf7SJeff Kirsher buf[i].tdma.pbuf = 0; 6859e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &buf[i]); 6869e13fbf7SJeff Kirsher i++; 6879e13fbf7SJeff Kirsher } 6889e13fbf7SJeff Kirsher buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf); 6899e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &buf[i]); 6909e13fbf7SJeff Kirsher } 6919e13fbf7SJeff Kirsher 6929e13fbf7SJeff Kirsher static inline void setup_rx_ring(struct net_device *dev, 6939e13fbf7SJeff Kirsher struct sgiseeq_rx_desc *buf, 6949e13fbf7SJeff Kirsher int nbufs) 6959e13fbf7SJeff Kirsher { 6969e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 6979e13fbf7SJeff Kirsher int i = 0; 6989e13fbf7SJeff Kirsher 6999e13fbf7SJeff Kirsher while (i < (nbufs - 1)) { 7009e13fbf7SJeff Kirsher buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); 7019e13fbf7SJeff Kirsher buf[i].rdma.pbuf = 0; 7029e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &buf[i]); 7039e13fbf7SJeff Kirsher i++; 7049e13fbf7SJeff Kirsher } 7059e13fbf7SJeff Kirsher buf[i].rdma.pbuf = 0; 7069e13fbf7SJeff Kirsher buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf); 7079e13fbf7SJeff Kirsher dma_sync_desc_dev(dev, &buf[i]); 7089e13fbf7SJeff Kirsher } 7099e13fbf7SJeff Kirsher 7109e13fbf7SJeff Kirsher static const struct net_device_ops sgiseeq_netdev_ops = { 7119e13fbf7SJeff Kirsher .ndo_open = sgiseeq_open, 7129e13fbf7SJeff Kirsher .ndo_stop = sgiseeq_close, 7139e13fbf7SJeff Kirsher .ndo_start_xmit = sgiseeq_start_xmit, 7149e13fbf7SJeff Kirsher .ndo_tx_timeout = timeout, 715afc4b13dSJiri Pirko .ndo_set_rx_mode = sgiseeq_set_multicast, 7169e13fbf7SJeff Kirsher .ndo_set_mac_address = sgiseeq_set_mac_address, 7179e13fbf7SJeff Kirsher .ndo_change_mtu = eth_change_mtu, 7189e13fbf7SJeff Kirsher .ndo_validate_addr = eth_validate_addr, 7199e13fbf7SJeff Kirsher }; 7209e13fbf7SJeff Kirsher 7215911ce0dSBill Pemberton static int sgiseeq_probe(struct platform_device *pdev) 7229e13fbf7SJeff Kirsher { 723d7765428SJingoo Han struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev); 7249e13fbf7SJeff Kirsher struct hpc3_regs *hpcregs = pd->hpc; 7259e13fbf7SJeff Kirsher struct sgiseeq_init_block *sr; 7269e13fbf7SJeff Kirsher unsigned int irq = pd->irq; 7279e13fbf7SJeff Kirsher struct sgiseeq_private *sp; 7289e13fbf7SJeff Kirsher struct net_device *dev; 7299e13fbf7SJeff Kirsher int err; 7309e13fbf7SJeff Kirsher 7319e13fbf7SJeff Kirsher dev = alloc_etherdev(sizeof (struct sgiseeq_private)); 7329e13fbf7SJeff Kirsher if (!dev) { 7339e13fbf7SJeff Kirsher err = -ENOMEM; 7349e13fbf7SJeff Kirsher goto err_out; 7359e13fbf7SJeff Kirsher } 7369e13fbf7SJeff Kirsher 7379e13fbf7SJeff Kirsher platform_set_drvdata(pdev, dev); 7389e13fbf7SJeff Kirsher sp = netdev_priv(dev); 7399e13fbf7SJeff Kirsher 7409e13fbf7SJeff Kirsher /* Make private data page aligned */ 7419e13fbf7SJeff Kirsher sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings), 7429e13fbf7SJeff Kirsher &sp->srings_dma, GFP_KERNEL); 7439e13fbf7SJeff Kirsher if (!sr) { 7449e13fbf7SJeff Kirsher printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); 7459e13fbf7SJeff Kirsher err = -ENOMEM; 7469e13fbf7SJeff Kirsher goto err_out_free_dev; 7479e13fbf7SJeff Kirsher } 7489e13fbf7SJeff Kirsher sp->srings = sr; 7499e13fbf7SJeff Kirsher sp->rx_desc = sp->srings->rxvector; 7509e13fbf7SJeff Kirsher sp->tx_desc = sp->srings->txvector; 751b8dfc6a0SJean Delvare spin_lock_init(&sp->tx_lock); 7529e13fbf7SJeff Kirsher 7539e13fbf7SJeff Kirsher /* A couple calculations now, saves many cycles later. */ 7549e13fbf7SJeff Kirsher setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS); 7559e13fbf7SJeff Kirsher setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS); 7569e13fbf7SJeff Kirsher 7579e13fbf7SJeff Kirsher memcpy(dev->dev_addr, pd->mac, ETH_ALEN); 7589e13fbf7SJeff Kirsher 7599e13fbf7SJeff Kirsher #ifdef DEBUG 7609e13fbf7SJeff Kirsher gpriv = sp; 7619e13fbf7SJeff Kirsher gdev = dev; 7629e13fbf7SJeff Kirsher #endif 7639e13fbf7SJeff Kirsher sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0]; 7649e13fbf7SJeff Kirsher sp->hregs = &hpcregs->ethregs; 7659e13fbf7SJeff Kirsher sp->name = sgiseeqstr; 7669e13fbf7SJeff Kirsher sp->mode = SEEQ_RCMD_RBCAST; 7679e13fbf7SJeff Kirsher 7689e13fbf7SJeff Kirsher /* Setup PIO and DMA transfer timing */ 7699e13fbf7SJeff Kirsher sp->hregs->pconfig = 0x161; 7709e13fbf7SJeff Kirsher sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | 7719e13fbf7SJeff Kirsher HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; 7729e13fbf7SJeff Kirsher 7739e13fbf7SJeff Kirsher /* Setup PIO and DMA transfer timing */ 7749e13fbf7SJeff Kirsher sp->hregs->pconfig = 0x161; 7759e13fbf7SJeff Kirsher sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | 7769e13fbf7SJeff Kirsher HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; 7779e13fbf7SJeff Kirsher 7789e13fbf7SJeff Kirsher /* Reset the chip. */ 7799e13fbf7SJeff Kirsher hpc3_eth_reset(sp->hregs); 7809e13fbf7SJeff Kirsher 7819e13fbf7SJeff Kirsher sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff); 7829e13fbf7SJeff Kirsher if (sp->is_edlc) 7839e13fbf7SJeff Kirsher sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT | 7849e13fbf7SJeff Kirsher SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT | 7859e13fbf7SJeff Kirsher SEEQ_CTRL_ENCARR; 7869e13fbf7SJeff Kirsher 7879e13fbf7SJeff Kirsher dev->netdev_ops = &sgiseeq_netdev_ops; 7889e13fbf7SJeff Kirsher dev->watchdog_timeo = (200 * HZ) / 1000; 7899e13fbf7SJeff Kirsher dev->irq = irq; 7909e13fbf7SJeff Kirsher 7919e13fbf7SJeff Kirsher if (register_netdev(dev)) { 7929e13fbf7SJeff Kirsher printk(KERN_ERR "Sgiseeq: Cannot register net device, " 7939e13fbf7SJeff Kirsher "aborting.\n"); 7949e13fbf7SJeff Kirsher err = -ENODEV; 7959e13fbf7SJeff Kirsher goto err_out_free_page; 7969e13fbf7SJeff Kirsher } 7979e13fbf7SJeff Kirsher 7989e13fbf7SJeff Kirsher printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); 7999e13fbf7SJeff Kirsher 8009e13fbf7SJeff Kirsher return 0; 8019e13fbf7SJeff Kirsher 8029e13fbf7SJeff Kirsher err_out_free_page: 8039e13fbf7SJeff Kirsher free_page((unsigned long) sp->srings); 8049e13fbf7SJeff Kirsher err_out_free_dev: 8059e13fbf7SJeff Kirsher free_netdev(dev); 8069e13fbf7SJeff Kirsher 8079e13fbf7SJeff Kirsher err_out: 8089e13fbf7SJeff Kirsher return err; 8099e13fbf7SJeff Kirsher } 8109e13fbf7SJeff Kirsher 8119e13fbf7SJeff Kirsher static int __exit sgiseeq_remove(struct platform_device *pdev) 8129e13fbf7SJeff Kirsher { 8139e13fbf7SJeff Kirsher struct net_device *dev = platform_get_drvdata(pdev); 8149e13fbf7SJeff Kirsher struct sgiseeq_private *sp = netdev_priv(dev); 8159e13fbf7SJeff Kirsher 8169e13fbf7SJeff Kirsher unregister_netdev(dev); 8179e13fbf7SJeff Kirsher dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, 8189e13fbf7SJeff Kirsher sp->srings_dma); 8199e13fbf7SJeff Kirsher free_netdev(dev); 8209e13fbf7SJeff Kirsher 8219e13fbf7SJeff Kirsher return 0; 8229e13fbf7SJeff Kirsher } 8239e13fbf7SJeff Kirsher 8249e13fbf7SJeff Kirsher static struct platform_driver sgiseeq_driver = { 8259e13fbf7SJeff Kirsher .probe = sgiseeq_probe, 8269e13fbf7SJeff Kirsher .remove = __exit_p(sgiseeq_remove), 8279e13fbf7SJeff Kirsher .driver = { 8289e13fbf7SJeff Kirsher .name = "sgiseeq", 8299e13fbf7SJeff Kirsher } 8309e13fbf7SJeff Kirsher }; 8319e13fbf7SJeff Kirsher 832db62f684SAxel Lin module_platform_driver(sgiseeq_driver); 8339e13fbf7SJeff Kirsher 8349e13fbf7SJeff Kirsher MODULE_DESCRIPTION("SGI Seeq 8003 driver"); 8359e13fbf7SJeff Kirsher MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>"); 8369e13fbf7SJeff Kirsher MODULE_LICENSE("GPL"); 8379e13fbf7SJeff Kirsher MODULE_ALIAS("platform:sgiseeq"); 838