1*19c72cacSJeff Kirsher /* 2*19c72cacSJeff Kirsher * Driver for the IDT RC32434 (Korina) on-chip ethernet controller. 3*19c72cacSJeff Kirsher * 4*19c72cacSJeff Kirsher * Copyright 2004 IDT Inc. (rischelp@idt.com) 5*19c72cacSJeff Kirsher * Copyright 2006 Felix Fietkau <nbd@openwrt.org> 6*19c72cacSJeff Kirsher * Copyright 2008 Florian Fainelli <florian@openwrt.org> 7*19c72cacSJeff Kirsher * 8*19c72cacSJeff Kirsher * This program is free software; you can redistribute it and/or modify it 9*19c72cacSJeff Kirsher * under the terms of the GNU General Public License as published by the 10*19c72cacSJeff Kirsher * Free Software Foundation; either version 2 of the License, or (at your 11*19c72cacSJeff Kirsher * option) any later version. 12*19c72cacSJeff Kirsher * 13*19c72cacSJeff Kirsher * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 14*19c72cacSJeff Kirsher * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 15*19c72cacSJeff Kirsher * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 16*19c72cacSJeff Kirsher * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 17*19c72cacSJeff Kirsher * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 18*19c72cacSJeff Kirsher * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 19*19c72cacSJeff Kirsher * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20*19c72cacSJeff Kirsher * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21*19c72cacSJeff Kirsher * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 22*19c72cacSJeff Kirsher * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23*19c72cacSJeff Kirsher * 24*19c72cacSJeff Kirsher * You should have received a copy of the GNU General Public License along 25*19c72cacSJeff Kirsher * with this program; if not, write to the Free Software Foundation, Inc., 26*19c72cacSJeff Kirsher * 675 Mass Ave, Cambridge, MA 02139, USA. 27*19c72cacSJeff Kirsher * 28*19c72cacSJeff Kirsher * Writing to a DMA status register: 29*19c72cacSJeff Kirsher * 30*19c72cacSJeff Kirsher * When writing to the status register, you should mask the bit you have 31*19c72cacSJeff Kirsher * been testing the status register with. Both Tx and Rx DMA registers 32*19c72cacSJeff Kirsher * should stick to this procedure. 33*19c72cacSJeff Kirsher */ 34*19c72cacSJeff Kirsher 35*19c72cacSJeff Kirsher #include <linux/module.h> 36*19c72cacSJeff Kirsher #include <linux/kernel.h> 37*19c72cacSJeff Kirsher #include <linux/moduleparam.h> 38*19c72cacSJeff Kirsher #include <linux/sched.h> 39*19c72cacSJeff Kirsher #include <linux/ctype.h> 40*19c72cacSJeff Kirsher #include <linux/types.h> 41*19c72cacSJeff Kirsher #include <linux/interrupt.h> 42*19c72cacSJeff Kirsher #include <linux/init.h> 43*19c72cacSJeff Kirsher #include <linux/ioport.h> 44*19c72cacSJeff Kirsher #include <linux/in.h> 45*19c72cacSJeff Kirsher #include <linux/slab.h> 46*19c72cacSJeff Kirsher #include <linux/string.h> 47*19c72cacSJeff Kirsher #include <linux/delay.h> 48*19c72cacSJeff Kirsher #include <linux/netdevice.h> 49*19c72cacSJeff Kirsher #include <linux/etherdevice.h> 50*19c72cacSJeff Kirsher #include <linux/skbuff.h> 51*19c72cacSJeff Kirsher #include <linux/errno.h> 52*19c72cacSJeff Kirsher #include <linux/platform_device.h> 53*19c72cacSJeff Kirsher #include <linux/mii.h> 54*19c72cacSJeff Kirsher #include <linux/ethtool.h> 55*19c72cacSJeff Kirsher #include <linux/crc32.h> 56*19c72cacSJeff Kirsher 57*19c72cacSJeff Kirsher #include <asm/bootinfo.h> 58*19c72cacSJeff Kirsher #include <asm/system.h> 59*19c72cacSJeff Kirsher #include <asm/bitops.h> 60*19c72cacSJeff Kirsher #include <asm/pgtable.h> 61*19c72cacSJeff Kirsher #include <asm/segment.h> 62*19c72cacSJeff Kirsher #include <asm/io.h> 63*19c72cacSJeff Kirsher #include <asm/dma.h> 64*19c72cacSJeff Kirsher 65*19c72cacSJeff Kirsher #include <asm/mach-rc32434/rb.h> 66*19c72cacSJeff Kirsher #include <asm/mach-rc32434/rc32434.h> 67*19c72cacSJeff Kirsher #include <asm/mach-rc32434/eth.h> 68*19c72cacSJeff Kirsher #include <asm/mach-rc32434/dma_v.h> 69*19c72cacSJeff Kirsher 70*19c72cacSJeff Kirsher #define DRV_NAME "korina" 71*19c72cacSJeff Kirsher #define DRV_VERSION "0.10" 72*19c72cacSJeff Kirsher #define DRV_RELDATE "04Mar2008" 73*19c72cacSJeff Kirsher 74*19c72cacSJeff Kirsher #define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \ 75*19c72cacSJeff Kirsher ((dev)->dev_addr[1])) 76*19c72cacSJeff Kirsher #define STATION_ADDRESS_LOW(dev) (((dev)->dev_addr[2] << 24) | \ 77*19c72cacSJeff Kirsher ((dev)->dev_addr[3] << 16) | \ 78*19c72cacSJeff Kirsher ((dev)->dev_addr[4] << 8) | \ 79*19c72cacSJeff Kirsher ((dev)->dev_addr[5])) 80*19c72cacSJeff Kirsher 81*19c72cacSJeff Kirsher #define MII_CLOCK 1250000 /* no more than 2.5MHz */ 82*19c72cacSJeff Kirsher 83*19c72cacSJeff Kirsher /* the following must be powers of two */ 84*19c72cacSJeff Kirsher #define KORINA_NUM_RDS 64 /* number of receive descriptors */ 85*19c72cacSJeff Kirsher #define KORINA_NUM_TDS 64 /* number of transmit descriptors */ 86*19c72cacSJeff Kirsher 87*19c72cacSJeff Kirsher /* KORINA_RBSIZE is the hardware's default maximum receive 88*19c72cacSJeff Kirsher * frame size in bytes. Having this hardcoded means that there 89*19c72cacSJeff Kirsher * is no support for MTU sizes greater than 1500. */ 90*19c72cacSJeff Kirsher #define KORINA_RBSIZE 1536 /* size of one resource buffer = Ether MTU */ 91*19c72cacSJeff Kirsher #define KORINA_RDS_MASK (KORINA_NUM_RDS - 1) 92*19c72cacSJeff Kirsher #define KORINA_TDS_MASK (KORINA_NUM_TDS - 1) 93*19c72cacSJeff Kirsher #define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc)) 94*19c72cacSJeff Kirsher #define TD_RING_SIZE (KORINA_NUM_TDS * sizeof(struct dma_desc)) 95*19c72cacSJeff Kirsher 96*19c72cacSJeff Kirsher #define TX_TIMEOUT (6000 * HZ / 1000) 97*19c72cacSJeff Kirsher 98*19c72cacSJeff Kirsher enum chain_status { desc_filled, desc_empty }; 99*19c72cacSJeff Kirsher #define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0) 100*19c72cacSJeff Kirsher #define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0) 101*19c72cacSJeff Kirsher #define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT) 102*19c72cacSJeff Kirsher 103*19c72cacSJeff Kirsher /* Information that need to be kept for each board. */ 104*19c72cacSJeff Kirsher struct korina_private { 105*19c72cacSJeff Kirsher struct eth_regs *eth_regs; 106*19c72cacSJeff Kirsher struct dma_reg *rx_dma_regs; 107*19c72cacSJeff Kirsher struct dma_reg *tx_dma_regs; 108*19c72cacSJeff Kirsher struct dma_desc *td_ring; /* transmit descriptor ring */ 109*19c72cacSJeff Kirsher struct dma_desc *rd_ring; /* receive descriptor ring */ 110*19c72cacSJeff Kirsher 111*19c72cacSJeff Kirsher struct sk_buff *tx_skb[KORINA_NUM_TDS]; 112*19c72cacSJeff Kirsher struct sk_buff *rx_skb[KORINA_NUM_RDS]; 113*19c72cacSJeff Kirsher 114*19c72cacSJeff Kirsher int rx_next_done; 115*19c72cacSJeff Kirsher int rx_chain_head; 116*19c72cacSJeff Kirsher int rx_chain_tail; 117*19c72cacSJeff Kirsher enum chain_status rx_chain_status; 118*19c72cacSJeff Kirsher 119*19c72cacSJeff Kirsher int tx_next_done; 120*19c72cacSJeff Kirsher int tx_chain_head; 121*19c72cacSJeff Kirsher int tx_chain_tail; 122*19c72cacSJeff Kirsher enum chain_status tx_chain_status; 123*19c72cacSJeff Kirsher int tx_count; 124*19c72cacSJeff Kirsher int tx_full; 125*19c72cacSJeff Kirsher 126*19c72cacSJeff Kirsher int rx_irq; 127*19c72cacSJeff Kirsher int tx_irq; 128*19c72cacSJeff Kirsher int ovr_irq; 129*19c72cacSJeff Kirsher int und_irq; 130*19c72cacSJeff Kirsher 131*19c72cacSJeff Kirsher spinlock_t lock; /* NIC xmit lock */ 132*19c72cacSJeff Kirsher 133*19c72cacSJeff Kirsher int dma_halt_cnt; 134*19c72cacSJeff Kirsher int dma_run_cnt; 135*19c72cacSJeff Kirsher struct napi_struct napi; 136*19c72cacSJeff Kirsher struct timer_list media_check_timer; 137*19c72cacSJeff Kirsher struct mii_if_info mii_if; 138*19c72cacSJeff Kirsher struct work_struct restart_task; 139*19c72cacSJeff Kirsher struct net_device *dev; 140*19c72cacSJeff Kirsher int phy_addr; 141*19c72cacSJeff Kirsher }; 142*19c72cacSJeff Kirsher 143*19c72cacSJeff Kirsher extern unsigned int idt_cpu_freq; 144*19c72cacSJeff Kirsher 145*19c72cacSJeff Kirsher static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr) 146*19c72cacSJeff Kirsher { 147*19c72cacSJeff Kirsher writel(0, &ch->dmandptr); 148*19c72cacSJeff Kirsher writel(dma_addr, &ch->dmadptr); 149*19c72cacSJeff Kirsher } 150*19c72cacSJeff Kirsher 151*19c72cacSJeff Kirsher static inline void korina_abort_dma(struct net_device *dev, 152*19c72cacSJeff Kirsher struct dma_reg *ch) 153*19c72cacSJeff Kirsher { 154*19c72cacSJeff Kirsher if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) { 155*19c72cacSJeff Kirsher writel(0x10, &ch->dmac); 156*19c72cacSJeff Kirsher 157*19c72cacSJeff Kirsher while (!(readl(&ch->dmas) & DMA_STAT_HALT)) 158*19c72cacSJeff Kirsher dev->trans_start = jiffies; 159*19c72cacSJeff Kirsher 160*19c72cacSJeff Kirsher writel(0, &ch->dmas); 161*19c72cacSJeff Kirsher } 162*19c72cacSJeff Kirsher 163*19c72cacSJeff Kirsher writel(0, &ch->dmadptr); 164*19c72cacSJeff Kirsher writel(0, &ch->dmandptr); 165*19c72cacSJeff Kirsher } 166*19c72cacSJeff Kirsher 167*19c72cacSJeff Kirsher static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr) 168*19c72cacSJeff Kirsher { 169*19c72cacSJeff Kirsher writel(dma_addr, &ch->dmandptr); 170*19c72cacSJeff Kirsher } 171*19c72cacSJeff Kirsher 172*19c72cacSJeff Kirsher static void korina_abort_tx(struct net_device *dev) 173*19c72cacSJeff Kirsher { 174*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 175*19c72cacSJeff Kirsher 176*19c72cacSJeff Kirsher korina_abort_dma(dev, lp->tx_dma_regs); 177*19c72cacSJeff Kirsher } 178*19c72cacSJeff Kirsher 179*19c72cacSJeff Kirsher static void korina_abort_rx(struct net_device *dev) 180*19c72cacSJeff Kirsher { 181*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 182*19c72cacSJeff Kirsher 183*19c72cacSJeff Kirsher korina_abort_dma(dev, lp->rx_dma_regs); 184*19c72cacSJeff Kirsher } 185*19c72cacSJeff Kirsher 186*19c72cacSJeff Kirsher static void korina_start_rx(struct korina_private *lp, 187*19c72cacSJeff Kirsher struct dma_desc *rd) 188*19c72cacSJeff Kirsher { 189*19c72cacSJeff Kirsher korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd)); 190*19c72cacSJeff Kirsher } 191*19c72cacSJeff Kirsher 192*19c72cacSJeff Kirsher static void korina_chain_rx(struct korina_private *lp, 193*19c72cacSJeff Kirsher struct dma_desc *rd) 194*19c72cacSJeff Kirsher { 195*19c72cacSJeff Kirsher korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd)); 196*19c72cacSJeff Kirsher } 197*19c72cacSJeff Kirsher 198*19c72cacSJeff Kirsher /* transmit packet */ 199*19c72cacSJeff Kirsher static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) 200*19c72cacSJeff Kirsher { 201*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 202*19c72cacSJeff Kirsher unsigned long flags; 203*19c72cacSJeff Kirsher u32 length; 204*19c72cacSJeff Kirsher u32 chain_prev, chain_next; 205*19c72cacSJeff Kirsher struct dma_desc *td; 206*19c72cacSJeff Kirsher 207*19c72cacSJeff Kirsher spin_lock_irqsave(&lp->lock, flags); 208*19c72cacSJeff Kirsher 209*19c72cacSJeff Kirsher td = &lp->td_ring[lp->tx_chain_tail]; 210*19c72cacSJeff Kirsher 211*19c72cacSJeff Kirsher /* stop queue when full, drop pkts if queue already full */ 212*19c72cacSJeff Kirsher if (lp->tx_count >= (KORINA_NUM_TDS - 2)) { 213*19c72cacSJeff Kirsher lp->tx_full = 1; 214*19c72cacSJeff Kirsher 215*19c72cacSJeff Kirsher if (lp->tx_count == (KORINA_NUM_TDS - 2)) 216*19c72cacSJeff Kirsher netif_stop_queue(dev); 217*19c72cacSJeff Kirsher else { 218*19c72cacSJeff Kirsher dev->stats.tx_dropped++; 219*19c72cacSJeff Kirsher dev_kfree_skb_any(skb); 220*19c72cacSJeff Kirsher spin_unlock_irqrestore(&lp->lock, flags); 221*19c72cacSJeff Kirsher 222*19c72cacSJeff Kirsher return NETDEV_TX_BUSY; 223*19c72cacSJeff Kirsher } 224*19c72cacSJeff Kirsher } 225*19c72cacSJeff Kirsher 226*19c72cacSJeff Kirsher lp->tx_count++; 227*19c72cacSJeff Kirsher 228*19c72cacSJeff Kirsher lp->tx_skb[lp->tx_chain_tail] = skb; 229*19c72cacSJeff Kirsher 230*19c72cacSJeff Kirsher length = skb->len; 231*19c72cacSJeff Kirsher dma_cache_wback((u32)skb->data, skb->len); 232*19c72cacSJeff Kirsher 233*19c72cacSJeff Kirsher /* Setup the transmit descriptor. */ 234*19c72cacSJeff Kirsher dma_cache_inv((u32) td, sizeof(*td)); 235*19c72cacSJeff Kirsher td->ca = CPHYSADDR(skb->data); 236*19c72cacSJeff Kirsher chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK; 237*19c72cacSJeff Kirsher chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK; 238*19c72cacSJeff Kirsher 239*19c72cacSJeff Kirsher if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) { 240*19c72cacSJeff Kirsher if (lp->tx_chain_status == desc_empty) { 241*19c72cacSJeff Kirsher /* Update tail */ 242*19c72cacSJeff Kirsher td->control = DMA_COUNT(length) | 243*19c72cacSJeff Kirsher DMA_DESC_COF | DMA_DESC_IOF; 244*19c72cacSJeff Kirsher /* Move tail */ 245*19c72cacSJeff Kirsher lp->tx_chain_tail = chain_next; 246*19c72cacSJeff Kirsher /* Write to NDPTR */ 247*19c72cacSJeff Kirsher writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), 248*19c72cacSJeff Kirsher &lp->tx_dma_regs->dmandptr); 249*19c72cacSJeff Kirsher /* Move head to tail */ 250*19c72cacSJeff Kirsher lp->tx_chain_head = lp->tx_chain_tail; 251*19c72cacSJeff Kirsher } else { 252*19c72cacSJeff Kirsher /* Update tail */ 253*19c72cacSJeff Kirsher td->control = DMA_COUNT(length) | 254*19c72cacSJeff Kirsher DMA_DESC_COF | DMA_DESC_IOF; 255*19c72cacSJeff Kirsher /* Link to prev */ 256*19c72cacSJeff Kirsher lp->td_ring[chain_prev].control &= 257*19c72cacSJeff Kirsher ~DMA_DESC_COF; 258*19c72cacSJeff Kirsher /* Link to prev */ 259*19c72cacSJeff Kirsher lp->td_ring[chain_prev].link = CPHYSADDR(td); 260*19c72cacSJeff Kirsher /* Move tail */ 261*19c72cacSJeff Kirsher lp->tx_chain_tail = chain_next; 262*19c72cacSJeff Kirsher /* Write to NDPTR */ 263*19c72cacSJeff Kirsher writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), 264*19c72cacSJeff Kirsher &(lp->tx_dma_regs->dmandptr)); 265*19c72cacSJeff Kirsher /* Move head to tail */ 266*19c72cacSJeff Kirsher lp->tx_chain_head = lp->tx_chain_tail; 267*19c72cacSJeff Kirsher lp->tx_chain_status = desc_empty; 268*19c72cacSJeff Kirsher } 269*19c72cacSJeff Kirsher } else { 270*19c72cacSJeff Kirsher if (lp->tx_chain_status == desc_empty) { 271*19c72cacSJeff Kirsher /* Update tail */ 272*19c72cacSJeff Kirsher td->control = DMA_COUNT(length) | 273*19c72cacSJeff Kirsher DMA_DESC_COF | DMA_DESC_IOF; 274*19c72cacSJeff Kirsher /* Move tail */ 275*19c72cacSJeff Kirsher lp->tx_chain_tail = chain_next; 276*19c72cacSJeff Kirsher lp->tx_chain_status = desc_filled; 277*19c72cacSJeff Kirsher } else { 278*19c72cacSJeff Kirsher /* Update tail */ 279*19c72cacSJeff Kirsher td->control = DMA_COUNT(length) | 280*19c72cacSJeff Kirsher DMA_DESC_COF | DMA_DESC_IOF; 281*19c72cacSJeff Kirsher lp->td_ring[chain_prev].control &= 282*19c72cacSJeff Kirsher ~DMA_DESC_COF; 283*19c72cacSJeff Kirsher lp->td_ring[chain_prev].link = CPHYSADDR(td); 284*19c72cacSJeff Kirsher lp->tx_chain_tail = chain_next; 285*19c72cacSJeff Kirsher } 286*19c72cacSJeff Kirsher } 287*19c72cacSJeff Kirsher dma_cache_wback((u32) td, sizeof(*td)); 288*19c72cacSJeff Kirsher 289*19c72cacSJeff Kirsher dev->trans_start = jiffies; 290*19c72cacSJeff Kirsher spin_unlock_irqrestore(&lp->lock, flags); 291*19c72cacSJeff Kirsher 292*19c72cacSJeff Kirsher return NETDEV_TX_OK; 293*19c72cacSJeff Kirsher } 294*19c72cacSJeff Kirsher 295*19c72cacSJeff Kirsher static int mdio_read(struct net_device *dev, int mii_id, int reg) 296*19c72cacSJeff Kirsher { 297*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 298*19c72cacSJeff Kirsher int ret; 299*19c72cacSJeff Kirsher 300*19c72cacSJeff Kirsher mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8); 301*19c72cacSJeff Kirsher 302*19c72cacSJeff Kirsher writel(0, &lp->eth_regs->miimcfg); 303*19c72cacSJeff Kirsher writel(0, &lp->eth_regs->miimcmd); 304*19c72cacSJeff Kirsher writel(mii_id | reg, &lp->eth_regs->miimaddr); 305*19c72cacSJeff Kirsher writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd); 306*19c72cacSJeff Kirsher 307*19c72cacSJeff Kirsher ret = (int)(readl(&lp->eth_regs->miimrdd)); 308*19c72cacSJeff Kirsher return ret; 309*19c72cacSJeff Kirsher } 310*19c72cacSJeff Kirsher 311*19c72cacSJeff Kirsher static void mdio_write(struct net_device *dev, int mii_id, int reg, int val) 312*19c72cacSJeff Kirsher { 313*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 314*19c72cacSJeff Kirsher 315*19c72cacSJeff Kirsher mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8); 316*19c72cacSJeff Kirsher 317*19c72cacSJeff Kirsher writel(0, &lp->eth_regs->miimcfg); 318*19c72cacSJeff Kirsher writel(1, &lp->eth_regs->miimcmd); 319*19c72cacSJeff Kirsher writel(mii_id | reg, &lp->eth_regs->miimaddr); 320*19c72cacSJeff Kirsher writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd); 321*19c72cacSJeff Kirsher writel(val, &lp->eth_regs->miimwtd); 322*19c72cacSJeff Kirsher } 323*19c72cacSJeff Kirsher 324*19c72cacSJeff Kirsher /* Ethernet Rx DMA interrupt */ 325*19c72cacSJeff Kirsher static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id) 326*19c72cacSJeff Kirsher { 327*19c72cacSJeff Kirsher struct net_device *dev = dev_id; 328*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 329*19c72cacSJeff Kirsher u32 dmas, dmasm; 330*19c72cacSJeff Kirsher irqreturn_t retval; 331*19c72cacSJeff Kirsher 332*19c72cacSJeff Kirsher dmas = readl(&lp->rx_dma_regs->dmas); 333*19c72cacSJeff Kirsher if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) { 334*19c72cacSJeff Kirsher dmasm = readl(&lp->rx_dma_regs->dmasm); 335*19c72cacSJeff Kirsher writel(dmasm | (DMA_STAT_DONE | 336*19c72cacSJeff Kirsher DMA_STAT_HALT | DMA_STAT_ERR), 337*19c72cacSJeff Kirsher &lp->rx_dma_regs->dmasm); 338*19c72cacSJeff Kirsher 339*19c72cacSJeff Kirsher napi_schedule(&lp->napi); 340*19c72cacSJeff Kirsher 341*19c72cacSJeff Kirsher if (dmas & DMA_STAT_ERR) 342*19c72cacSJeff Kirsher printk(KERN_ERR "%s: DMA error\n", dev->name); 343*19c72cacSJeff Kirsher 344*19c72cacSJeff Kirsher retval = IRQ_HANDLED; 345*19c72cacSJeff Kirsher } else 346*19c72cacSJeff Kirsher retval = IRQ_NONE; 347*19c72cacSJeff Kirsher 348*19c72cacSJeff Kirsher return retval; 349*19c72cacSJeff Kirsher } 350*19c72cacSJeff Kirsher 351*19c72cacSJeff Kirsher static int korina_rx(struct net_device *dev, int limit) 352*19c72cacSJeff Kirsher { 353*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 354*19c72cacSJeff Kirsher struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done]; 355*19c72cacSJeff Kirsher struct sk_buff *skb, *skb_new; 356*19c72cacSJeff Kirsher u8 *pkt_buf; 357*19c72cacSJeff Kirsher u32 devcs, pkt_len, dmas; 358*19c72cacSJeff Kirsher int count; 359*19c72cacSJeff Kirsher 360*19c72cacSJeff Kirsher dma_cache_inv((u32)rd, sizeof(*rd)); 361*19c72cacSJeff Kirsher 362*19c72cacSJeff Kirsher for (count = 0; count < limit; count++) { 363*19c72cacSJeff Kirsher skb = lp->rx_skb[lp->rx_next_done]; 364*19c72cacSJeff Kirsher skb_new = NULL; 365*19c72cacSJeff Kirsher 366*19c72cacSJeff Kirsher devcs = rd->devcs; 367*19c72cacSJeff Kirsher 368*19c72cacSJeff Kirsher if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0) 369*19c72cacSJeff Kirsher break; 370*19c72cacSJeff Kirsher 371*19c72cacSJeff Kirsher /* Update statistics counters */ 372*19c72cacSJeff Kirsher if (devcs & ETH_RX_CRC) 373*19c72cacSJeff Kirsher dev->stats.rx_crc_errors++; 374*19c72cacSJeff Kirsher if (devcs & ETH_RX_LOR) 375*19c72cacSJeff Kirsher dev->stats.rx_length_errors++; 376*19c72cacSJeff Kirsher if (devcs & ETH_RX_LE) 377*19c72cacSJeff Kirsher dev->stats.rx_length_errors++; 378*19c72cacSJeff Kirsher if (devcs & ETH_RX_OVR) 379*19c72cacSJeff Kirsher dev->stats.rx_fifo_errors++; 380*19c72cacSJeff Kirsher if (devcs & ETH_RX_CV) 381*19c72cacSJeff Kirsher dev->stats.rx_frame_errors++; 382*19c72cacSJeff Kirsher if (devcs & ETH_RX_CES) 383*19c72cacSJeff Kirsher dev->stats.rx_length_errors++; 384*19c72cacSJeff Kirsher if (devcs & ETH_RX_MP) 385*19c72cacSJeff Kirsher dev->stats.multicast++; 386*19c72cacSJeff Kirsher 387*19c72cacSJeff Kirsher if ((devcs & ETH_RX_LD) != ETH_RX_LD) { 388*19c72cacSJeff Kirsher /* check that this is a whole packet 389*19c72cacSJeff Kirsher * WARNING: DMA_FD bit incorrectly set 390*19c72cacSJeff Kirsher * in Rc32434 (errata ref #077) */ 391*19c72cacSJeff Kirsher dev->stats.rx_errors++; 392*19c72cacSJeff Kirsher dev->stats.rx_dropped++; 393*19c72cacSJeff Kirsher } else if ((devcs & ETH_RX_ROK)) { 394*19c72cacSJeff Kirsher pkt_len = RCVPKT_LENGTH(devcs); 395*19c72cacSJeff Kirsher 396*19c72cacSJeff Kirsher /* must be the (first and) last 397*19c72cacSJeff Kirsher * descriptor then */ 398*19c72cacSJeff Kirsher pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data; 399*19c72cacSJeff Kirsher 400*19c72cacSJeff Kirsher /* invalidate the cache */ 401*19c72cacSJeff Kirsher dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); 402*19c72cacSJeff Kirsher 403*19c72cacSJeff Kirsher /* Malloc up new buffer. */ 404*19c72cacSJeff Kirsher skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE); 405*19c72cacSJeff Kirsher 406*19c72cacSJeff Kirsher if (!skb_new) 407*19c72cacSJeff Kirsher break; 408*19c72cacSJeff Kirsher /* Do not count the CRC */ 409*19c72cacSJeff Kirsher skb_put(skb, pkt_len - 4); 410*19c72cacSJeff Kirsher skb->protocol = eth_type_trans(skb, dev); 411*19c72cacSJeff Kirsher 412*19c72cacSJeff Kirsher /* Pass the packet to upper layers */ 413*19c72cacSJeff Kirsher netif_receive_skb(skb); 414*19c72cacSJeff Kirsher dev->stats.rx_packets++; 415*19c72cacSJeff Kirsher dev->stats.rx_bytes += pkt_len; 416*19c72cacSJeff Kirsher 417*19c72cacSJeff Kirsher /* Update the mcast stats */ 418*19c72cacSJeff Kirsher if (devcs & ETH_RX_MP) 419*19c72cacSJeff Kirsher dev->stats.multicast++; 420*19c72cacSJeff Kirsher 421*19c72cacSJeff Kirsher lp->rx_skb[lp->rx_next_done] = skb_new; 422*19c72cacSJeff Kirsher } 423*19c72cacSJeff Kirsher 424*19c72cacSJeff Kirsher rd->devcs = 0; 425*19c72cacSJeff Kirsher 426*19c72cacSJeff Kirsher /* Restore descriptor's curr_addr */ 427*19c72cacSJeff Kirsher if (skb_new) 428*19c72cacSJeff Kirsher rd->ca = CPHYSADDR(skb_new->data); 429*19c72cacSJeff Kirsher else 430*19c72cacSJeff Kirsher rd->ca = CPHYSADDR(skb->data); 431*19c72cacSJeff Kirsher 432*19c72cacSJeff Kirsher rd->control = DMA_COUNT(KORINA_RBSIZE) | 433*19c72cacSJeff Kirsher DMA_DESC_COD | DMA_DESC_IOD; 434*19c72cacSJeff Kirsher lp->rd_ring[(lp->rx_next_done - 1) & 435*19c72cacSJeff Kirsher KORINA_RDS_MASK].control &= 436*19c72cacSJeff Kirsher ~DMA_DESC_COD; 437*19c72cacSJeff Kirsher 438*19c72cacSJeff Kirsher lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK; 439*19c72cacSJeff Kirsher dma_cache_wback((u32)rd, sizeof(*rd)); 440*19c72cacSJeff Kirsher rd = &lp->rd_ring[lp->rx_next_done]; 441*19c72cacSJeff Kirsher writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas); 442*19c72cacSJeff Kirsher } 443*19c72cacSJeff Kirsher 444*19c72cacSJeff Kirsher dmas = readl(&lp->rx_dma_regs->dmas); 445*19c72cacSJeff Kirsher 446*19c72cacSJeff Kirsher if (dmas & DMA_STAT_HALT) { 447*19c72cacSJeff Kirsher writel(~(DMA_STAT_HALT | DMA_STAT_ERR), 448*19c72cacSJeff Kirsher &lp->rx_dma_regs->dmas); 449*19c72cacSJeff Kirsher 450*19c72cacSJeff Kirsher lp->dma_halt_cnt++; 451*19c72cacSJeff Kirsher rd->devcs = 0; 452*19c72cacSJeff Kirsher skb = lp->rx_skb[lp->rx_next_done]; 453*19c72cacSJeff Kirsher rd->ca = CPHYSADDR(skb->data); 454*19c72cacSJeff Kirsher dma_cache_wback((u32)rd, sizeof(*rd)); 455*19c72cacSJeff Kirsher korina_chain_rx(lp, rd); 456*19c72cacSJeff Kirsher } 457*19c72cacSJeff Kirsher 458*19c72cacSJeff Kirsher return count; 459*19c72cacSJeff Kirsher } 460*19c72cacSJeff Kirsher 461*19c72cacSJeff Kirsher static int korina_poll(struct napi_struct *napi, int budget) 462*19c72cacSJeff Kirsher { 463*19c72cacSJeff Kirsher struct korina_private *lp = 464*19c72cacSJeff Kirsher container_of(napi, struct korina_private, napi); 465*19c72cacSJeff Kirsher struct net_device *dev = lp->dev; 466*19c72cacSJeff Kirsher int work_done; 467*19c72cacSJeff Kirsher 468*19c72cacSJeff Kirsher work_done = korina_rx(dev, budget); 469*19c72cacSJeff Kirsher if (work_done < budget) { 470*19c72cacSJeff Kirsher napi_complete(napi); 471*19c72cacSJeff Kirsher 472*19c72cacSJeff Kirsher writel(readl(&lp->rx_dma_regs->dmasm) & 473*19c72cacSJeff Kirsher ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), 474*19c72cacSJeff Kirsher &lp->rx_dma_regs->dmasm); 475*19c72cacSJeff Kirsher } 476*19c72cacSJeff Kirsher return work_done; 477*19c72cacSJeff Kirsher } 478*19c72cacSJeff Kirsher 479*19c72cacSJeff Kirsher /* 480*19c72cacSJeff Kirsher * Set or clear the multicast filter for this adaptor. 481*19c72cacSJeff Kirsher */ 482*19c72cacSJeff Kirsher static void korina_multicast_list(struct net_device *dev) 483*19c72cacSJeff Kirsher { 484*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 485*19c72cacSJeff Kirsher unsigned long flags; 486*19c72cacSJeff Kirsher struct netdev_hw_addr *ha; 487*19c72cacSJeff Kirsher u32 recognise = ETH_ARC_AB; /* always accept broadcasts */ 488*19c72cacSJeff Kirsher int i; 489*19c72cacSJeff Kirsher 490*19c72cacSJeff Kirsher /* Set promiscuous mode */ 491*19c72cacSJeff Kirsher if (dev->flags & IFF_PROMISC) 492*19c72cacSJeff Kirsher recognise |= ETH_ARC_PRO; 493*19c72cacSJeff Kirsher 494*19c72cacSJeff Kirsher else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4)) 495*19c72cacSJeff Kirsher /* All multicast and broadcast */ 496*19c72cacSJeff Kirsher recognise |= ETH_ARC_AM; 497*19c72cacSJeff Kirsher 498*19c72cacSJeff Kirsher /* Build the hash table */ 499*19c72cacSJeff Kirsher if (netdev_mc_count(dev) > 4) { 500*19c72cacSJeff Kirsher u16 hash_table[4]; 501*19c72cacSJeff Kirsher u32 crc; 502*19c72cacSJeff Kirsher 503*19c72cacSJeff Kirsher for (i = 0; i < 4; i++) 504*19c72cacSJeff Kirsher hash_table[i] = 0; 505*19c72cacSJeff Kirsher 506*19c72cacSJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 507*19c72cacSJeff Kirsher crc = ether_crc_le(6, ha->addr); 508*19c72cacSJeff Kirsher crc >>= 26; 509*19c72cacSJeff Kirsher hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 510*19c72cacSJeff Kirsher } 511*19c72cacSJeff Kirsher /* Accept filtered multicast */ 512*19c72cacSJeff Kirsher recognise |= ETH_ARC_AFM; 513*19c72cacSJeff Kirsher 514*19c72cacSJeff Kirsher /* Fill the MAC hash tables with their values */ 515*19c72cacSJeff Kirsher writel((u32)(hash_table[1] << 16 | hash_table[0]), 516*19c72cacSJeff Kirsher &lp->eth_regs->ethhash0); 517*19c72cacSJeff Kirsher writel((u32)(hash_table[3] << 16 | hash_table[2]), 518*19c72cacSJeff Kirsher &lp->eth_regs->ethhash1); 519*19c72cacSJeff Kirsher } 520*19c72cacSJeff Kirsher 521*19c72cacSJeff Kirsher spin_lock_irqsave(&lp->lock, flags); 522*19c72cacSJeff Kirsher writel(recognise, &lp->eth_regs->etharc); 523*19c72cacSJeff Kirsher spin_unlock_irqrestore(&lp->lock, flags); 524*19c72cacSJeff Kirsher } 525*19c72cacSJeff Kirsher 526*19c72cacSJeff Kirsher static void korina_tx(struct net_device *dev) 527*19c72cacSJeff Kirsher { 528*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 529*19c72cacSJeff Kirsher struct dma_desc *td = &lp->td_ring[lp->tx_next_done]; 530*19c72cacSJeff Kirsher u32 devcs; 531*19c72cacSJeff Kirsher u32 dmas; 532*19c72cacSJeff Kirsher 533*19c72cacSJeff Kirsher spin_lock(&lp->lock); 534*19c72cacSJeff Kirsher 535*19c72cacSJeff Kirsher /* Process all desc that are done */ 536*19c72cacSJeff Kirsher while (IS_DMA_FINISHED(td->control)) { 537*19c72cacSJeff Kirsher if (lp->tx_full == 1) { 538*19c72cacSJeff Kirsher netif_wake_queue(dev); 539*19c72cacSJeff Kirsher lp->tx_full = 0; 540*19c72cacSJeff Kirsher } 541*19c72cacSJeff Kirsher 542*19c72cacSJeff Kirsher devcs = lp->td_ring[lp->tx_next_done].devcs; 543*19c72cacSJeff Kirsher if ((devcs & (ETH_TX_FD | ETH_TX_LD)) != 544*19c72cacSJeff Kirsher (ETH_TX_FD | ETH_TX_LD)) { 545*19c72cacSJeff Kirsher dev->stats.tx_errors++; 546*19c72cacSJeff Kirsher dev->stats.tx_dropped++; 547*19c72cacSJeff Kirsher 548*19c72cacSJeff Kirsher /* Should never happen */ 549*19c72cacSJeff Kirsher printk(KERN_ERR "%s: split tx ignored\n", 550*19c72cacSJeff Kirsher dev->name); 551*19c72cacSJeff Kirsher } else if (devcs & ETH_TX_TOK) { 552*19c72cacSJeff Kirsher dev->stats.tx_packets++; 553*19c72cacSJeff Kirsher dev->stats.tx_bytes += 554*19c72cacSJeff Kirsher lp->tx_skb[lp->tx_next_done]->len; 555*19c72cacSJeff Kirsher } else { 556*19c72cacSJeff Kirsher dev->stats.tx_errors++; 557*19c72cacSJeff Kirsher dev->stats.tx_dropped++; 558*19c72cacSJeff Kirsher 559*19c72cacSJeff Kirsher /* Underflow */ 560*19c72cacSJeff Kirsher if (devcs & ETH_TX_UND) 561*19c72cacSJeff Kirsher dev->stats.tx_fifo_errors++; 562*19c72cacSJeff Kirsher 563*19c72cacSJeff Kirsher /* Oversized frame */ 564*19c72cacSJeff Kirsher if (devcs & ETH_TX_OF) 565*19c72cacSJeff Kirsher dev->stats.tx_aborted_errors++; 566*19c72cacSJeff Kirsher 567*19c72cacSJeff Kirsher /* Excessive deferrals */ 568*19c72cacSJeff Kirsher if (devcs & ETH_TX_ED) 569*19c72cacSJeff Kirsher dev->stats.tx_carrier_errors++; 570*19c72cacSJeff Kirsher 571*19c72cacSJeff Kirsher /* Collisions: medium busy */ 572*19c72cacSJeff Kirsher if (devcs & ETH_TX_EC) 573*19c72cacSJeff Kirsher dev->stats.collisions++; 574*19c72cacSJeff Kirsher 575*19c72cacSJeff Kirsher /* Late collision */ 576*19c72cacSJeff Kirsher if (devcs & ETH_TX_LC) 577*19c72cacSJeff Kirsher dev->stats.tx_window_errors++; 578*19c72cacSJeff Kirsher } 579*19c72cacSJeff Kirsher 580*19c72cacSJeff Kirsher /* We must always free the original skb */ 581*19c72cacSJeff Kirsher if (lp->tx_skb[lp->tx_next_done]) { 582*19c72cacSJeff Kirsher dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]); 583*19c72cacSJeff Kirsher lp->tx_skb[lp->tx_next_done] = NULL; 584*19c72cacSJeff Kirsher } 585*19c72cacSJeff Kirsher 586*19c72cacSJeff Kirsher lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF; 587*19c72cacSJeff Kirsher lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD; 588*19c72cacSJeff Kirsher lp->td_ring[lp->tx_next_done].link = 0; 589*19c72cacSJeff Kirsher lp->td_ring[lp->tx_next_done].ca = 0; 590*19c72cacSJeff Kirsher lp->tx_count--; 591*19c72cacSJeff Kirsher 592*19c72cacSJeff Kirsher /* Go on to next transmission */ 593*19c72cacSJeff Kirsher lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK; 594*19c72cacSJeff Kirsher td = &lp->td_ring[lp->tx_next_done]; 595*19c72cacSJeff Kirsher 596*19c72cacSJeff Kirsher } 597*19c72cacSJeff Kirsher 598*19c72cacSJeff Kirsher /* Clear the DMA status register */ 599*19c72cacSJeff Kirsher dmas = readl(&lp->tx_dma_regs->dmas); 600*19c72cacSJeff Kirsher writel(~dmas, &lp->tx_dma_regs->dmas); 601*19c72cacSJeff Kirsher 602*19c72cacSJeff Kirsher writel(readl(&lp->tx_dma_regs->dmasm) & 603*19c72cacSJeff Kirsher ~(DMA_STAT_FINI | DMA_STAT_ERR), 604*19c72cacSJeff Kirsher &lp->tx_dma_regs->dmasm); 605*19c72cacSJeff Kirsher 606*19c72cacSJeff Kirsher spin_unlock(&lp->lock); 607*19c72cacSJeff Kirsher } 608*19c72cacSJeff Kirsher 609*19c72cacSJeff Kirsher static irqreturn_t 610*19c72cacSJeff Kirsher korina_tx_dma_interrupt(int irq, void *dev_id) 611*19c72cacSJeff Kirsher { 612*19c72cacSJeff Kirsher struct net_device *dev = dev_id; 613*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 614*19c72cacSJeff Kirsher u32 dmas, dmasm; 615*19c72cacSJeff Kirsher irqreturn_t retval; 616*19c72cacSJeff Kirsher 617*19c72cacSJeff Kirsher dmas = readl(&lp->tx_dma_regs->dmas); 618*19c72cacSJeff Kirsher 619*19c72cacSJeff Kirsher if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) { 620*19c72cacSJeff Kirsher dmasm = readl(&lp->tx_dma_regs->dmasm); 621*19c72cacSJeff Kirsher writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR), 622*19c72cacSJeff Kirsher &lp->tx_dma_regs->dmasm); 623*19c72cacSJeff Kirsher 624*19c72cacSJeff Kirsher korina_tx(dev); 625*19c72cacSJeff Kirsher 626*19c72cacSJeff Kirsher if (lp->tx_chain_status == desc_filled && 627*19c72cacSJeff Kirsher (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) { 628*19c72cacSJeff Kirsher writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), 629*19c72cacSJeff Kirsher &(lp->tx_dma_regs->dmandptr)); 630*19c72cacSJeff Kirsher lp->tx_chain_status = desc_empty; 631*19c72cacSJeff Kirsher lp->tx_chain_head = lp->tx_chain_tail; 632*19c72cacSJeff Kirsher dev->trans_start = jiffies; 633*19c72cacSJeff Kirsher } 634*19c72cacSJeff Kirsher if (dmas & DMA_STAT_ERR) 635*19c72cacSJeff Kirsher printk(KERN_ERR "%s: DMA error\n", dev->name); 636*19c72cacSJeff Kirsher 637*19c72cacSJeff Kirsher retval = IRQ_HANDLED; 638*19c72cacSJeff Kirsher } else 639*19c72cacSJeff Kirsher retval = IRQ_NONE; 640*19c72cacSJeff Kirsher 641*19c72cacSJeff Kirsher return retval; 642*19c72cacSJeff Kirsher } 643*19c72cacSJeff Kirsher 644*19c72cacSJeff Kirsher 645*19c72cacSJeff Kirsher static void korina_check_media(struct net_device *dev, unsigned int init_media) 646*19c72cacSJeff Kirsher { 647*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 648*19c72cacSJeff Kirsher 649*19c72cacSJeff Kirsher mii_check_media(&lp->mii_if, 0, init_media); 650*19c72cacSJeff Kirsher 651*19c72cacSJeff Kirsher if (lp->mii_if.full_duplex) 652*19c72cacSJeff Kirsher writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD, 653*19c72cacSJeff Kirsher &lp->eth_regs->ethmac2); 654*19c72cacSJeff Kirsher else 655*19c72cacSJeff Kirsher writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD, 656*19c72cacSJeff Kirsher &lp->eth_regs->ethmac2); 657*19c72cacSJeff Kirsher } 658*19c72cacSJeff Kirsher 659*19c72cacSJeff Kirsher static void korina_poll_media(unsigned long data) 660*19c72cacSJeff Kirsher { 661*19c72cacSJeff Kirsher struct net_device *dev = (struct net_device *) data; 662*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 663*19c72cacSJeff Kirsher 664*19c72cacSJeff Kirsher korina_check_media(dev, 0); 665*19c72cacSJeff Kirsher mod_timer(&lp->media_check_timer, jiffies + HZ); 666*19c72cacSJeff Kirsher } 667*19c72cacSJeff Kirsher 668*19c72cacSJeff Kirsher static void korina_set_carrier(struct mii_if_info *mii) 669*19c72cacSJeff Kirsher { 670*19c72cacSJeff Kirsher if (mii->force_media) { 671*19c72cacSJeff Kirsher /* autoneg is off: Link is always assumed to be up */ 672*19c72cacSJeff Kirsher if (!netif_carrier_ok(mii->dev)) 673*19c72cacSJeff Kirsher netif_carrier_on(mii->dev); 674*19c72cacSJeff Kirsher } else /* Let MMI library update carrier status */ 675*19c72cacSJeff Kirsher korina_check_media(mii->dev, 0); 676*19c72cacSJeff Kirsher } 677*19c72cacSJeff Kirsher 678*19c72cacSJeff Kirsher static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 679*19c72cacSJeff Kirsher { 680*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 681*19c72cacSJeff Kirsher struct mii_ioctl_data *data = if_mii(rq); 682*19c72cacSJeff Kirsher int rc; 683*19c72cacSJeff Kirsher 684*19c72cacSJeff Kirsher if (!netif_running(dev)) 685*19c72cacSJeff Kirsher return -EINVAL; 686*19c72cacSJeff Kirsher spin_lock_irq(&lp->lock); 687*19c72cacSJeff Kirsher rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL); 688*19c72cacSJeff Kirsher spin_unlock_irq(&lp->lock); 689*19c72cacSJeff Kirsher korina_set_carrier(&lp->mii_if); 690*19c72cacSJeff Kirsher 691*19c72cacSJeff Kirsher return rc; 692*19c72cacSJeff Kirsher } 693*19c72cacSJeff Kirsher 694*19c72cacSJeff Kirsher /* ethtool helpers */ 695*19c72cacSJeff Kirsher static void netdev_get_drvinfo(struct net_device *dev, 696*19c72cacSJeff Kirsher struct ethtool_drvinfo *info) 697*19c72cacSJeff Kirsher { 698*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 699*19c72cacSJeff Kirsher 700*19c72cacSJeff Kirsher strcpy(info->driver, DRV_NAME); 701*19c72cacSJeff Kirsher strcpy(info->version, DRV_VERSION); 702*19c72cacSJeff Kirsher strcpy(info->bus_info, lp->dev->name); 703*19c72cacSJeff Kirsher } 704*19c72cacSJeff Kirsher 705*19c72cacSJeff Kirsher static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 706*19c72cacSJeff Kirsher { 707*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 708*19c72cacSJeff Kirsher int rc; 709*19c72cacSJeff Kirsher 710*19c72cacSJeff Kirsher spin_lock_irq(&lp->lock); 711*19c72cacSJeff Kirsher rc = mii_ethtool_gset(&lp->mii_if, cmd); 712*19c72cacSJeff Kirsher spin_unlock_irq(&lp->lock); 713*19c72cacSJeff Kirsher 714*19c72cacSJeff Kirsher return rc; 715*19c72cacSJeff Kirsher } 716*19c72cacSJeff Kirsher 717*19c72cacSJeff Kirsher static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 718*19c72cacSJeff Kirsher { 719*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 720*19c72cacSJeff Kirsher int rc; 721*19c72cacSJeff Kirsher 722*19c72cacSJeff Kirsher spin_lock_irq(&lp->lock); 723*19c72cacSJeff Kirsher rc = mii_ethtool_sset(&lp->mii_if, cmd); 724*19c72cacSJeff Kirsher spin_unlock_irq(&lp->lock); 725*19c72cacSJeff Kirsher korina_set_carrier(&lp->mii_if); 726*19c72cacSJeff Kirsher 727*19c72cacSJeff Kirsher return rc; 728*19c72cacSJeff Kirsher } 729*19c72cacSJeff Kirsher 730*19c72cacSJeff Kirsher static u32 netdev_get_link(struct net_device *dev) 731*19c72cacSJeff Kirsher { 732*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 733*19c72cacSJeff Kirsher 734*19c72cacSJeff Kirsher return mii_link_ok(&lp->mii_if); 735*19c72cacSJeff Kirsher } 736*19c72cacSJeff Kirsher 737*19c72cacSJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = { 738*19c72cacSJeff Kirsher .get_drvinfo = netdev_get_drvinfo, 739*19c72cacSJeff Kirsher .get_settings = netdev_get_settings, 740*19c72cacSJeff Kirsher .set_settings = netdev_set_settings, 741*19c72cacSJeff Kirsher .get_link = netdev_get_link, 742*19c72cacSJeff Kirsher }; 743*19c72cacSJeff Kirsher 744*19c72cacSJeff Kirsher static int korina_alloc_ring(struct net_device *dev) 745*19c72cacSJeff Kirsher { 746*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 747*19c72cacSJeff Kirsher struct sk_buff *skb; 748*19c72cacSJeff Kirsher int i; 749*19c72cacSJeff Kirsher 750*19c72cacSJeff Kirsher /* Initialize the transmit descriptors */ 751*19c72cacSJeff Kirsher for (i = 0; i < KORINA_NUM_TDS; i++) { 752*19c72cacSJeff Kirsher lp->td_ring[i].control = DMA_DESC_IOF; 753*19c72cacSJeff Kirsher lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD; 754*19c72cacSJeff Kirsher lp->td_ring[i].ca = 0; 755*19c72cacSJeff Kirsher lp->td_ring[i].link = 0; 756*19c72cacSJeff Kirsher } 757*19c72cacSJeff Kirsher lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail = 758*19c72cacSJeff Kirsher lp->tx_full = lp->tx_count = 0; 759*19c72cacSJeff Kirsher lp->tx_chain_status = desc_empty; 760*19c72cacSJeff Kirsher 761*19c72cacSJeff Kirsher /* Initialize the receive descriptors */ 762*19c72cacSJeff Kirsher for (i = 0; i < KORINA_NUM_RDS; i++) { 763*19c72cacSJeff Kirsher skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE); 764*19c72cacSJeff Kirsher if (!skb) 765*19c72cacSJeff Kirsher return -ENOMEM; 766*19c72cacSJeff Kirsher lp->rx_skb[i] = skb; 767*19c72cacSJeff Kirsher lp->rd_ring[i].control = DMA_DESC_IOD | 768*19c72cacSJeff Kirsher DMA_COUNT(KORINA_RBSIZE); 769*19c72cacSJeff Kirsher lp->rd_ring[i].devcs = 0; 770*19c72cacSJeff Kirsher lp->rd_ring[i].ca = CPHYSADDR(skb->data); 771*19c72cacSJeff Kirsher lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]); 772*19c72cacSJeff Kirsher } 773*19c72cacSJeff Kirsher 774*19c72cacSJeff Kirsher /* loop back receive descriptors, so the last 775*19c72cacSJeff Kirsher * descriptor points to the first one */ 776*19c72cacSJeff Kirsher lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]); 777*19c72cacSJeff Kirsher lp->rd_ring[i - 1].control |= DMA_DESC_COD; 778*19c72cacSJeff Kirsher 779*19c72cacSJeff Kirsher lp->rx_next_done = 0; 780*19c72cacSJeff Kirsher lp->rx_chain_head = 0; 781*19c72cacSJeff Kirsher lp->rx_chain_tail = 0; 782*19c72cacSJeff Kirsher lp->rx_chain_status = desc_empty; 783*19c72cacSJeff Kirsher 784*19c72cacSJeff Kirsher return 0; 785*19c72cacSJeff Kirsher } 786*19c72cacSJeff Kirsher 787*19c72cacSJeff Kirsher static void korina_free_ring(struct net_device *dev) 788*19c72cacSJeff Kirsher { 789*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 790*19c72cacSJeff Kirsher int i; 791*19c72cacSJeff Kirsher 792*19c72cacSJeff Kirsher for (i = 0; i < KORINA_NUM_RDS; i++) { 793*19c72cacSJeff Kirsher lp->rd_ring[i].control = 0; 794*19c72cacSJeff Kirsher if (lp->rx_skb[i]) 795*19c72cacSJeff Kirsher dev_kfree_skb_any(lp->rx_skb[i]); 796*19c72cacSJeff Kirsher lp->rx_skb[i] = NULL; 797*19c72cacSJeff Kirsher } 798*19c72cacSJeff Kirsher 799*19c72cacSJeff Kirsher for (i = 0; i < KORINA_NUM_TDS; i++) { 800*19c72cacSJeff Kirsher lp->td_ring[i].control = 0; 801*19c72cacSJeff Kirsher if (lp->tx_skb[i]) 802*19c72cacSJeff Kirsher dev_kfree_skb_any(lp->tx_skb[i]); 803*19c72cacSJeff Kirsher lp->tx_skb[i] = NULL; 804*19c72cacSJeff Kirsher } 805*19c72cacSJeff Kirsher } 806*19c72cacSJeff Kirsher 807*19c72cacSJeff Kirsher /* 808*19c72cacSJeff Kirsher * Initialize the RC32434 ethernet controller. 809*19c72cacSJeff Kirsher */ 810*19c72cacSJeff Kirsher static int korina_init(struct net_device *dev) 811*19c72cacSJeff Kirsher { 812*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 813*19c72cacSJeff Kirsher 814*19c72cacSJeff Kirsher /* Disable DMA */ 815*19c72cacSJeff Kirsher korina_abort_tx(dev); 816*19c72cacSJeff Kirsher korina_abort_rx(dev); 817*19c72cacSJeff Kirsher 818*19c72cacSJeff Kirsher /* reset ethernet logic */ 819*19c72cacSJeff Kirsher writel(0, &lp->eth_regs->ethintfc); 820*19c72cacSJeff Kirsher while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP)) 821*19c72cacSJeff Kirsher dev->trans_start = jiffies; 822*19c72cacSJeff Kirsher 823*19c72cacSJeff Kirsher /* Enable Ethernet Interface */ 824*19c72cacSJeff Kirsher writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc); 825*19c72cacSJeff Kirsher 826*19c72cacSJeff Kirsher /* Allocate rings */ 827*19c72cacSJeff Kirsher if (korina_alloc_ring(dev)) { 828*19c72cacSJeff Kirsher printk(KERN_ERR "%s: descriptor allocation failed\n", dev->name); 829*19c72cacSJeff Kirsher korina_free_ring(dev); 830*19c72cacSJeff Kirsher return -ENOMEM; 831*19c72cacSJeff Kirsher } 832*19c72cacSJeff Kirsher 833*19c72cacSJeff Kirsher writel(0, &lp->rx_dma_regs->dmas); 834*19c72cacSJeff Kirsher /* Start Rx DMA */ 835*19c72cacSJeff Kirsher korina_start_rx(lp, &lp->rd_ring[0]); 836*19c72cacSJeff Kirsher 837*19c72cacSJeff Kirsher writel(readl(&lp->tx_dma_regs->dmasm) & 838*19c72cacSJeff Kirsher ~(DMA_STAT_FINI | DMA_STAT_ERR), 839*19c72cacSJeff Kirsher &lp->tx_dma_regs->dmasm); 840*19c72cacSJeff Kirsher writel(readl(&lp->rx_dma_regs->dmasm) & 841*19c72cacSJeff Kirsher ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), 842*19c72cacSJeff Kirsher &lp->rx_dma_regs->dmasm); 843*19c72cacSJeff Kirsher 844*19c72cacSJeff Kirsher /* Accept only packets destined for this Ethernet device address */ 845*19c72cacSJeff Kirsher writel(ETH_ARC_AB, &lp->eth_regs->etharc); 846*19c72cacSJeff Kirsher 847*19c72cacSJeff Kirsher /* Set all Ether station address registers to their initial values */ 848*19c72cacSJeff Kirsher writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0); 849*19c72cacSJeff Kirsher writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0); 850*19c72cacSJeff Kirsher 851*19c72cacSJeff Kirsher writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1); 852*19c72cacSJeff Kirsher writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1); 853*19c72cacSJeff Kirsher 854*19c72cacSJeff Kirsher writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2); 855*19c72cacSJeff Kirsher writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2); 856*19c72cacSJeff Kirsher 857*19c72cacSJeff Kirsher writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3); 858*19c72cacSJeff Kirsher writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3); 859*19c72cacSJeff Kirsher 860*19c72cacSJeff Kirsher 861*19c72cacSJeff Kirsher /* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */ 862*19c72cacSJeff Kirsher writel(ETH_MAC2_PE | ETH_MAC2_CEN | ETH_MAC2_FD, 863*19c72cacSJeff Kirsher &lp->eth_regs->ethmac2); 864*19c72cacSJeff Kirsher 865*19c72cacSJeff Kirsher /* Back to back inter-packet-gap */ 866*19c72cacSJeff Kirsher writel(0x15, &lp->eth_regs->ethipgt); 867*19c72cacSJeff Kirsher /* Non - Back to back inter-packet-gap */ 868*19c72cacSJeff Kirsher writel(0x12, &lp->eth_regs->ethipgr); 869*19c72cacSJeff Kirsher 870*19c72cacSJeff Kirsher /* Management Clock Prescaler Divisor 871*19c72cacSJeff Kirsher * Clock independent setting */ 872*19c72cacSJeff Kirsher writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1, 873*19c72cacSJeff Kirsher &lp->eth_regs->ethmcp); 874*19c72cacSJeff Kirsher 875*19c72cacSJeff Kirsher /* don't transmit until fifo contains 48b */ 876*19c72cacSJeff Kirsher writel(48, &lp->eth_regs->ethfifott); 877*19c72cacSJeff Kirsher 878*19c72cacSJeff Kirsher writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1); 879*19c72cacSJeff Kirsher 880*19c72cacSJeff Kirsher napi_enable(&lp->napi); 881*19c72cacSJeff Kirsher netif_start_queue(dev); 882*19c72cacSJeff Kirsher 883*19c72cacSJeff Kirsher return 0; 884*19c72cacSJeff Kirsher } 885*19c72cacSJeff Kirsher 886*19c72cacSJeff Kirsher /* 887*19c72cacSJeff Kirsher * Restart the RC32434 ethernet controller. 888*19c72cacSJeff Kirsher */ 889*19c72cacSJeff Kirsher static void korina_restart_task(struct work_struct *work) 890*19c72cacSJeff Kirsher { 891*19c72cacSJeff Kirsher struct korina_private *lp = container_of(work, 892*19c72cacSJeff Kirsher struct korina_private, restart_task); 893*19c72cacSJeff Kirsher struct net_device *dev = lp->dev; 894*19c72cacSJeff Kirsher 895*19c72cacSJeff Kirsher /* 896*19c72cacSJeff Kirsher * Disable interrupts 897*19c72cacSJeff Kirsher */ 898*19c72cacSJeff Kirsher disable_irq(lp->rx_irq); 899*19c72cacSJeff Kirsher disable_irq(lp->tx_irq); 900*19c72cacSJeff Kirsher disable_irq(lp->ovr_irq); 901*19c72cacSJeff Kirsher disable_irq(lp->und_irq); 902*19c72cacSJeff Kirsher 903*19c72cacSJeff Kirsher writel(readl(&lp->tx_dma_regs->dmasm) | 904*19c72cacSJeff Kirsher DMA_STAT_FINI | DMA_STAT_ERR, 905*19c72cacSJeff Kirsher &lp->tx_dma_regs->dmasm); 906*19c72cacSJeff Kirsher writel(readl(&lp->rx_dma_regs->dmasm) | 907*19c72cacSJeff Kirsher DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR, 908*19c72cacSJeff Kirsher &lp->rx_dma_regs->dmasm); 909*19c72cacSJeff Kirsher 910*19c72cacSJeff Kirsher korina_free_ring(dev); 911*19c72cacSJeff Kirsher 912*19c72cacSJeff Kirsher napi_disable(&lp->napi); 913*19c72cacSJeff Kirsher 914*19c72cacSJeff Kirsher if (korina_init(dev) < 0) { 915*19c72cacSJeff Kirsher printk(KERN_ERR "%s: cannot restart device\n", dev->name); 916*19c72cacSJeff Kirsher return; 917*19c72cacSJeff Kirsher } 918*19c72cacSJeff Kirsher korina_multicast_list(dev); 919*19c72cacSJeff Kirsher 920*19c72cacSJeff Kirsher enable_irq(lp->und_irq); 921*19c72cacSJeff Kirsher enable_irq(lp->ovr_irq); 922*19c72cacSJeff Kirsher enable_irq(lp->tx_irq); 923*19c72cacSJeff Kirsher enable_irq(lp->rx_irq); 924*19c72cacSJeff Kirsher } 925*19c72cacSJeff Kirsher 926*19c72cacSJeff Kirsher static void korina_clear_and_restart(struct net_device *dev, u32 value) 927*19c72cacSJeff Kirsher { 928*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 929*19c72cacSJeff Kirsher 930*19c72cacSJeff Kirsher netif_stop_queue(dev); 931*19c72cacSJeff Kirsher writel(value, &lp->eth_regs->ethintfc); 932*19c72cacSJeff Kirsher schedule_work(&lp->restart_task); 933*19c72cacSJeff Kirsher } 934*19c72cacSJeff Kirsher 935*19c72cacSJeff Kirsher /* Ethernet Tx Underflow interrupt */ 936*19c72cacSJeff Kirsher static irqreturn_t korina_und_interrupt(int irq, void *dev_id) 937*19c72cacSJeff Kirsher { 938*19c72cacSJeff Kirsher struct net_device *dev = dev_id; 939*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 940*19c72cacSJeff Kirsher unsigned int und; 941*19c72cacSJeff Kirsher 942*19c72cacSJeff Kirsher spin_lock(&lp->lock); 943*19c72cacSJeff Kirsher 944*19c72cacSJeff Kirsher und = readl(&lp->eth_regs->ethintfc); 945*19c72cacSJeff Kirsher 946*19c72cacSJeff Kirsher if (und & ETH_INT_FC_UND) 947*19c72cacSJeff Kirsher korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND); 948*19c72cacSJeff Kirsher 949*19c72cacSJeff Kirsher spin_unlock(&lp->lock); 950*19c72cacSJeff Kirsher 951*19c72cacSJeff Kirsher return IRQ_HANDLED; 952*19c72cacSJeff Kirsher } 953*19c72cacSJeff Kirsher 954*19c72cacSJeff Kirsher static void korina_tx_timeout(struct net_device *dev) 955*19c72cacSJeff Kirsher { 956*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 957*19c72cacSJeff Kirsher 958*19c72cacSJeff Kirsher schedule_work(&lp->restart_task); 959*19c72cacSJeff Kirsher } 960*19c72cacSJeff Kirsher 961*19c72cacSJeff Kirsher /* Ethernet Rx Overflow interrupt */ 962*19c72cacSJeff Kirsher static irqreturn_t 963*19c72cacSJeff Kirsher korina_ovr_interrupt(int irq, void *dev_id) 964*19c72cacSJeff Kirsher { 965*19c72cacSJeff Kirsher struct net_device *dev = dev_id; 966*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 967*19c72cacSJeff Kirsher unsigned int ovr; 968*19c72cacSJeff Kirsher 969*19c72cacSJeff Kirsher spin_lock(&lp->lock); 970*19c72cacSJeff Kirsher ovr = readl(&lp->eth_regs->ethintfc); 971*19c72cacSJeff Kirsher 972*19c72cacSJeff Kirsher if (ovr & ETH_INT_FC_OVR) 973*19c72cacSJeff Kirsher korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR); 974*19c72cacSJeff Kirsher 975*19c72cacSJeff Kirsher spin_unlock(&lp->lock); 976*19c72cacSJeff Kirsher 977*19c72cacSJeff Kirsher return IRQ_HANDLED; 978*19c72cacSJeff Kirsher } 979*19c72cacSJeff Kirsher 980*19c72cacSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 981*19c72cacSJeff Kirsher static void korina_poll_controller(struct net_device *dev) 982*19c72cacSJeff Kirsher { 983*19c72cacSJeff Kirsher disable_irq(dev->irq); 984*19c72cacSJeff Kirsher korina_tx_dma_interrupt(dev->irq, dev); 985*19c72cacSJeff Kirsher enable_irq(dev->irq); 986*19c72cacSJeff Kirsher } 987*19c72cacSJeff Kirsher #endif 988*19c72cacSJeff Kirsher 989*19c72cacSJeff Kirsher static int korina_open(struct net_device *dev) 990*19c72cacSJeff Kirsher { 991*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 992*19c72cacSJeff Kirsher int ret; 993*19c72cacSJeff Kirsher 994*19c72cacSJeff Kirsher /* Initialize */ 995*19c72cacSJeff Kirsher ret = korina_init(dev); 996*19c72cacSJeff Kirsher if (ret < 0) { 997*19c72cacSJeff Kirsher printk(KERN_ERR "%s: cannot open device\n", dev->name); 998*19c72cacSJeff Kirsher goto out; 999*19c72cacSJeff Kirsher } 1000*19c72cacSJeff Kirsher 1001*19c72cacSJeff Kirsher /* Install the interrupt handler 1002*19c72cacSJeff Kirsher * that handles the Done Finished 1003*19c72cacSJeff Kirsher * Ovr and Und Events */ 1004*19c72cacSJeff Kirsher ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt, 1005*19c72cacSJeff Kirsher IRQF_DISABLED, "Korina ethernet Rx", dev); 1006*19c72cacSJeff Kirsher if (ret < 0) { 1007*19c72cacSJeff Kirsher printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n", 1008*19c72cacSJeff Kirsher dev->name, lp->rx_irq); 1009*19c72cacSJeff Kirsher goto err_release; 1010*19c72cacSJeff Kirsher } 1011*19c72cacSJeff Kirsher ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt, 1012*19c72cacSJeff Kirsher IRQF_DISABLED, "Korina ethernet Tx", dev); 1013*19c72cacSJeff Kirsher if (ret < 0) { 1014*19c72cacSJeff Kirsher printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n", 1015*19c72cacSJeff Kirsher dev->name, lp->tx_irq); 1016*19c72cacSJeff Kirsher goto err_free_rx_irq; 1017*19c72cacSJeff Kirsher } 1018*19c72cacSJeff Kirsher 1019*19c72cacSJeff Kirsher /* Install handler for overrun error. */ 1020*19c72cacSJeff Kirsher ret = request_irq(lp->ovr_irq, korina_ovr_interrupt, 1021*19c72cacSJeff Kirsher IRQF_DISABLED, "Ethernet Overflow", dev); 1022*19c72cacSJeff Kirsher if (ret < 0) { 1023*19c72cacSJeff Kirsher printk(KERN_ERR "%s: unable to get OVR IRQ %d\n", 1024*19c72cacSJeff Kirsher dev->name, lp->ovr_irq); 1025*19c72cacSJeff Kirsher goto err_free_tx_irq; 1026*19c72cacSJeff Kirsher } 1027*19c72cacSJeff Kirsher 1028*19c72cacSJeff Kirsher /* Install handler for underflow error. */ 1029*19c72cacSJeff Kirsher ret = request_irq(lp->und_irq, korina_und_interrupt, 1030*19c72cacSJeff Kirsher IRQF_DISABLED, "Ethernet Underflow", dev); 1031*19c72cacSJeff Kirsher if (ret < 0) { 1032*19c72cacSJeff Kirsher printk(KERN_ERR "%s: unable to get UND IRQ %d\n", 1033*19c72cacSJeff Kirsher dev->name, lp->und_irq); 1034*19c72cacSJeff Kirsher goto err_free_ovr_irq; 1035*19c72cacSJeff Kirsher } 1036*19c72cacSJeff Kirsher mod_timer(&lp->media_check_timer, jiffies + 1); 1037*19c72cacSJeff Kirsher out: 1038*19c72cacSJeff Kirsher return ret; 1039*19c72cacSJeff Kirsher 1040*19c72cacSJeff Kirsher err_free_ovr_irq: 1041*19c72cacSJeff Kirsher free_irq(lp->ovr_irq, dev); 1042*19c72cacSJeff Kirsher err_free_tx_irq: 1043*19c72cacSJeff Kirsher free_irq(lp->tx_irq, dev); 1044*19c72cacSJeff Kirsher err_free_rx_irq: 1045*19c72cacSJeff Kirsher free_irq(lp->rx_irq, dev); 1046*19c72cacSJeff Kirsher err_release: 1047*19c72cacSJeff Kirsher korina_free_ring(dev); 1048*19c72cacSJeff Kirsher goto out; 1049*19c72cacSJeff Kirsher } 1050*19c72cacSJeff Kirsher 1051*19c72cacSJeff Kirsher static int korina_close(struct net_device *dev) 1052*19c72cacSJeff Kirsher { 1053*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(dev); 1054*19c72cacSJeff Kirsher u32 tmp; 1055*19c72cacSJeff Kirsher 1056*19c72cacSJeff Kirsher del_timer(&lp->media_check_timer); 1057*19c72cacSJeff Kirsher 1058*19c72cacSJeff Kirsher /* Disable interrupts */ 1059*19c72cacSJeff Kirsher disable_irq(lp->rx_irq); 1060*19c72cacSJeff Kirsher disable_irq(lp->tx_irq); 1061*19c72cacSJeff Kirsher disable_irq(lp->ovr_irq); 1062*19c72cacSJeff Kirsher disable_irq(lp->und_irq); 1063*19c72cacSJeff Kirsher 1064*19c72cacSJeff Kirsher korina_abort_tx(dev); 1065*19c72cacSJeff Kirsher tmp = readl(&lp->tx_dma_regs->dmasm); 1066*19c72cacSJeff Kirsher tmp = tmp | DMA_STAT_FINI | DMA_STAT_ERR; 1067*19c72cacSJeff Kirsher writel(tmp, &lp->tx_dma_regs->dmasm); 1068*19c72cacSJeff Kirsher 1069*19c72cacSJeff Kirsher korina_abort_rx(dev); 1070*19c72cacSJeff Kirsher tmp = readl(&lp->rx_dma_regs->dmasm); 1071*19c72cacSJeff Kirsher tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR; 1072*19c72cacSJeff Kirsher writel(tmp, &lp->rx_dma_regs->dmasm); 1073*19c72cacSJeff Kirsher 1074*19c72cacSJeff Kirsher korina_free_ring(dev); 1075*19c72cacSJeff Kirsher 1076*19c72cacSJeff Kirsher napi_disable(&lp->napi); 1077*19c72cacSJeff Kirsher 1078*19c72cacSJeff Kirsher cancel_work_sync(&lp->restart_task); 1079*19c72cacSJeff Kirsher 1080*19c72cacSJeff Kirsher free_irq(lp->rx_irq, dev); 1081*19c72cacSJeff Kirsher free_irq(lp->tx_irq, dev); 1082*19c72cacSJeff Kirsher free_irq(lp->ovr_irq, dev); 1083*19c72cacSJeff Kirsher free_irq(lp->und_irq, dev); 1084*19c72cacSJeff Kirsher 1085*19c72cacSJeff Kirsher return 0; 1086*19c72cacSJeff Kirsher } 1087*19c72cacSJeff Kirsher 1088*19c72cacSJeff Kirsher static const struct net_device_ops korina_netdev_ops = { 1089*19c72cacSJeff Kirsher .ndo_open = korina_open, 1090*19c72cacSJeff Kirsher .ndo_stop = korina_close, 1091*19c72cacSJeff Kirsher .ndo_start_xmit = korina_send_packet, 1092*19c72cacSJeff Kirsher .ndo_set_multicast_list = korina_multicast_list, 1093*19c72cacSJeff Kirsher .ndo_tx_timeout = korina_tx_timeout, 1094*19c72cacSJeff Kirsher .ndo_do_ioctl = korina_ioctl, 1095*19c72cacSJeff Kirsher .ndo_change_mtu = eth_change_mtu, 1096*19c72cacSJeff Kirsher .ndo_validate_addr = eth_validate_addr, 1097*19c72cacSJeff Kirsher .ndo_set_mac_address = eth_mac_addr, 1098*19c72cacSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 1099*19c72cacSJeff Kirsher .ndo_poll_controller = korina_poll_controller, 1100*19c72cacSJeff Kirsher #endif 1101*19c72cacSJeff Kirsher }; 1102*19c72cacSJeff Kirsher 1103*19c72cacSJeff Kirsher static int korina_probe(struct platform_device *pdev) 1104*19c72cacSJeff Kirsher { 1105*19c72cacSJeff Kirsher struct korina_device *bif = platform_get_drvdata(pdev); 1106*19c72cacSJeff Kirsher struct korina_private *lp; 1107*19c72cacSJeff Kirsher struct net_device *dev; 1108*19c72cacSJeff Kirsher struct resource *r; 1109*19c72cacSJeff Kirsher int rc; 1110*19c72cacSJeff Kirsher 1111*19c72cacSJeff Kirsher dev = alloc_etherdev(sizeof(struct korina_private)); 1112*19c72cacSJeff Kirsher if (!dev) { 1113*19c72cacSJeff Kirsher printk(KERN_ERR DRV_NAME ": alloc_etherdev failed\n"); 1114*19c72cacSJeff Kirsher return -ENOMEM; 1115*19c72cacSJeff Kirsher } 1116*19c72cacSJeff Kirsher SET_NETDEV_DEV(dev, &pdev->dev); 1117*19c72cacSJeff Kirsher lp = netdev_priv(dev); 1118*19c72cacSJeff Kirsher 1119*19c72cacSJeff Kirsher bif->dev = dev; 1120*19c72cacSJeff Kirsher memcpy(dev->dev_addr, bif->mac, 6); 1121*19c72cacSJeff Kirsher 1122*19c72cacSJeff Kirsher lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx"); 1123*19c72cacSJeff Kirsher lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx"); 1124*19c72cacSJeff Kirsher lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr"); 1125*19c72cacSJeff Kirsher lp->und_irq = platform_get_irq_byname(pdev, "korina_und"); 1126*19c72cacSJeff Kirsher 1127*19c72cacSJeff Kirsher r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs"); 1128*19c72cacSJeff Kirsher dev->base_addr = r->start; 1129*19c72cacSJeff Kirsher lp->eth_regs = ioremap_nocache(r->start, resource_size(r)); 1130*19c72cacSJeff Kirsher if (!lp->eth_regs) { 1131*19c72cacSJeff Kirsher printk(KERN_ERR DRV_NAME ": cannot remap registers\n"); 1132*19c72cacSJeff Kirsher rc = -ENXIO; 1133*19c72cacSJeff Kirsher goto probe_err_out; 1134*19c72cacSJeff Kirsher } 1135*19c72cacSJeff Kirsher 1136*19c72cacSJeff Kirsher r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx"); 1137*19c72cacSJeff Kirsher lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r)); 1138*19c72cacSJeff Kirsher if (!lp->rx_dma_regs) { 1139*19c72cacSJeff Kirsher printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n"); 1140*19c72cacSJeff Kirsher rc = -ENXIO; 1141*19c72cacSJeff Kirsher goto probe_err_dma_rx; 1142*19c72cacSJeff Kirsher } 1143*19c72cacSJeff Kirsher 1144*19c72cacSJeff Kirsher r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx"); 1145*19c72cacSJeff Kirsher lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r)); 1146*19c72cacSJeff Kirsher if (!lp->tx_dma_regs) { 1147*19c72cacSJeff Kirsher printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n"); 1148*19c72cacSJeff Kirsher rc = -ENXIO; 1149*19c72cacSJeff Kirsher goto probe_err_dma_tx; 1150*19c72cacSJeff Kirsher } 1151*19c72cacSJeff Kirsher 1152*19c72cacSJeff Kirsher lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL); 1153*19c72cacSJeff Kirsher if (!lp->td_ring) { 1154*19c72cacSJeff Kirsher printk(KERN_ERR DRV_NAME ": cannot allocate descriptors\n"); 1155*19c72cacSJeff Kirsher rc = -ENXIO; 1156*19c72cacSJeff Kirsher goto probe_err_td_ring; 1157*19c72cacSJeff Kirsher } 1158*19c72cacSJeff Kirsher 1159*19c72cacSJeff Kirsher dma_cache_inv((unsigned long)(lp->td_ring), 1160*19c72cacSJeff Kirsher TD_RING_SIZE + RD_RING_SIZE); 1161*19c72cacSJeff Kirsher 1162*19c72cacSJeff Kirsher /* now convert TD_RING pointer to KSEG1 */ 1163*19c72cacSJeff Kirsher lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring); 1164*19c72cacSJeff Kirsher lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS]; 1165*19c72cacSJeff Kirsher 1166*19c72cacSJeff Kirsher spin_lock_init(&lp->lock); 1167*19c72cacSJeff Kirsher /* just use the rx dma irq */ 1168*19c72cacSJeff Kirsher dev->irq = lp->rx_irq; 1169*19c72cacSJeff Kirsher lp->dev = dev; 1170*19c72cacSJeff Kirsher 1171*19c72cacSJeff Kirsher dev->netdev_ops = &korina_netdev_ops; 1172*19c72cacSJeff Kirsher dev->ethtool_ops = &netdev_ethtool_ops; 1173*19c72cacSJeff Kirsher dev->watchdog_timeo = TX_TIMEOUT; 1174*19c72cacSJeff Kirsher netif_napi_add(dev, &lp->napi, korina_poll, 64); 1175*19c72cacSJeff Kirsher 1176*19c72cacSJeff Kirsher lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05); 1177*19c72cacSJeff Kirsher lp->mii_if.dev = dev; 1178*19c72cacSJeff Kirsher lp->mii_if.mdio_read = mdio_read; 1179*19c72cacSJeff Kirsher lp->mii_if.mdio_write = mdio_write; 1180*19c72cacSJeff Kirsher lp->mii_if.phy_id = lp->phy_addr; 1181*19c72cacSJeff Kirsher lp->mii_if.phy_id_mask = 0x1f; 1182*19c72cacSJeff Kirsher lp->mii_if.reg_num_mask = 0x1f; 1183*19c72cacSJeff Kirsher 1184*19c72cacSJeff Kirsher rc = register_netdev(dev); 1185*19c72cacSJeff Kirsher if (rc < 0) { 1186*19c72cacSJeff Kirsher printk(KERN_ERR DRV_NAME 1187*19c72cacSJeff Kirsher ": cannot register net device: %d\n", rc); 1188*19c72cacSJeff Kirsher goto probe_err_register; 1189*19c72cacSJeff Kirsher } 1190*19c72cacSJeff Kirsher setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev); 1191*19c72cacSJeff Kirsher 1192*19c72cacSJeff Kirsher INIT_WORK(&lp->restart_task, korina_restart_task); 1193*19c72cacSJeff Kirsher 1194*19c72cacSJeff Kirsher printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n", 1195*19c72cacSJeff Kirsher dev->name); 1196*19c72cacSJeff Kirsher out: 1197*19c72cacSJeff Kirsher return rc; 1198*19c72cacSJeff Kirsher 1199*19c72cacSJeff Kirsher probe_err_register: 1200*19c72cacSJeff Kirsher kfree(lp->td_ring); 1201*19c72cacSJeff Kirsher probe_err_td_ring: 1202*19c72cacSJeff Kirsher iounmap(lp->tx_dma_regs); 1203*19c72cacSJeff Kirsher probe_err_dma_tx: 1204*19c72cacSJeff Kirsher iounmap(lp->rx_dma_regs); 1205*19c72cacSJeff Kirsher probe_err_dma_rx: 1206*19c72cacSJeff Kirsher iounmap(lp->eth_regs); 1207*19c72cacSJeff Kirsher probe_err_out: 1208*19c72cacSJeff Kirsher free_netdev(dev); 1209*19c72cacSJeff Kirsher goto out; 1210*19c72cacSJeff Kirsher } 1211*19c72cacSJeff Kirsher 1212*19c72cacSJeff Kirsher static int korina_remove(struct platform_device *pdev) 1213*19c72cacSJeff Kirsher { 1214*19c72cacSJeff Kirsher struct korina_device *bif = platform_get_drvdata(pdev); 1215*19c72cacSJeff Kirsher struct korina_private *lp = netdev_priv(bif->dev); 1216*19c72cacSJeff Kirsher 1217*19c72cacSJeff Kirsher iounmap(lp->eth_regs); 1218*19c72cacSJeff Kirsher iounmap(lp->rx_dma_regs); 1219*19c72cacSJeff Kirsher iounmap(lp->tx_dma_regs); 1220*19c72cacSJeff Kirsher 1221*19c72cacSJeff Kirsher platform_set_drvdata(pdev, NULL); 1222*19c72cacSJeff Kirsher unregister_netdev(bif->dev); 1223*19c72cacSJeff Kirsher free_netdev(bif->dev); 1224*19c72cacSJeff Kirsher 1225*19c72cacSJeff Kirsher return 0; 1226*19c72cacSJeff Kirsher } 1227*19c72cacSJeff Kirsher 1228*19c72cacSJeff Kirsher static struct platform_driver korina_driver = { 1229*19c72cacSJeff Kirsher .driver.name = "korina", 1230*19c72cacSJeff Kirsher .probe = korina_probe, 1231*19c72cacSJeff Kirsher .remove = korina_remove, 1232*19c72cacSJeff Kirsher }; 1233*19c72cacSJeff Kirsher 1234*19c72cacSJeff Kirsher static int __init korina_init_module(void) 1235*19c72cacSJeff Kirsher { 1236*19c72cacSJeff Kirsher return platform_driver_register(&korina_driver); 1237*19c72cacSJeff Kirsher } 1238*19c72cacSJeff Kirsher 1239*19c72cacSJeff Kirsher static void korina_cleanup_module(void) 1240*19c72cacSJeff Kirsher { 1241*19c72cacSJeff Kirsher return platform_driver_unregister(&korina_driver); 1242*19c72cacSJeff Kirsher } 1243*19c72cacSJeff Kirsher 1244*19c72cacSJeff Kirsher module_init(korina_init_module); 1245*19c72cacSJeff Kirsher module_exit(korina_cleanup_module); 1246*19c72cacSJeff Kirsher 1247*19c72cacSJeff Kirsher MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>"); 1248*19c72cacSJeff Kirsher MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>"); 1249*19c72cacSJeff Kirsher MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); 1250*19c72cacSJeff Kirsher MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver"); 1251*19c72cacSJeff Kirsher MODULE_LICENSE("GPL"); 1252