19aa32835SJeff Kirsher /* 23396c782SPaul Gortmaker * drivers/net/ethernet/ibm/emac/mal.c 39aa32835SJeff Kirsher * 49aa32835SJeff Kirsher * Memory Access Layer (MAL) support 59aa32835SJeff Kirsher * 69aa32835SJeff Kirsher * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. 79aa32835SJeff Kirsher * <benh@kernel.crashing.org> 89aa32835SJeff Kirsher * 99aa32835SJeff Kirsher * Based on the arch/ppc version of the driver: 109aa32835SJeff Kirsher * 119aa32835SJeff Kirsher * Copyright (c) 2004, 2005 Zultys Technologies. 129aa32835SJeff Kirsher * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> 139aa32835SJeff Kirsher * 149aa32835SJeff Kirsher * Based on original work by 159aa32835SJeff Kirsher * Benjamin Herrenschmidt <benh@kernel.crashing.org>, 169aa32835SJeff Kirsher * David Gibson <hermes@gibson.dropbear.id.au>, 179aa32835SJeff Kirsher * 189aa32835SJeff Kirsher * Armin Kuster <akuster@mvista.com> 199aa32835SJeff Kirsher * Copyright 2002 MontaVista Softare Inc. 209aa32835SJeff Kirsher * 219aa32835SJeff Kirsher * This program is free software; you can redistribute it and/or modify it 229aa32835SJeff Kirsher * under the terms of the GNU General Public License as published by the 239aa32835SJeff Kirsher * Free Software Foundation; either version 2 of the License, or (at your 249aa32835SJeff Kirsher * option) any later version. 259aa32835SJeff Kirsher * 269aa32835SJeff Kirsher */ 279aa32835SJeff Kirsher 289aa32835SJeff Kirsher #include <linux/delay.h> 299aa32835SJeff Kirsher #include <linux/slab.h> 305af50730SRob Herring #include <linux/of_irq.h> 319aa32835SJeff Kirsher 329aa32835SJeff Kirsher #include "core.h" 339aa32835SJeff Kirsher #include <asm/dcr-regs.h> 349aa32835SJeff Kirsher 359aa32835SJeff Kirsher static int mal_count; 369aa32835SJeff Kirsher 371dd06ae8SGreg Kroah-Hartman int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac) 389aa32835SJeff Kirsher { 399aa32835SJeff Kirsher unsigned long flags; 409aa32835SJeff Kirsher 419aa32835SJeff Kirsher spin_lock_irqsave(&mal->lock, flags); 429aa32835SJeff Kirsher 439aa32835SJeff Kirsher MAL_DBG(mal, "reg(%08x, %08x)" NL, 449aa32835SJeff Kirsher commac->tx_chan_mask, commac->rx_chan_mask); 459aa32835SJeff Kirsher 469aa32835SJeff Kirsher /* Don't let multiple commacs claim the same channel(s) */ 479aa32835SJeff Kirsher if ((mal->tx_chan_mask & commac->tx_chan_mask) || 489aa32835SJeff Kirsher (mal->rx_chan_mask & commac->rx_chan_mask)) { 499aa32835SJeff Kirsher spin_unlock_irqrestore(&mal->lock, flags); 509aa32835SJeff Kirsher printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n", 519aa32835SJeff Kirsher mal->index); 529aa32835SJeff Kirsher return -EBUSY; 539aa32835SJeff Kirsher } 549aa32835SJeff Kirsher 559aa32835SJeff Kirsher if (list_empty(&mal->list)) 569aa32835SJeff Kirsher napi_enable(&mal->napi); 579aa32835SJeff Kirsher mal->tx_chan_mask |= commac->tx_chan_mask; 589aa32835SJeff Kirsher mal->rx_chan_mask |= commac->rx_chan_mask; 599aa32835SJeff Kirsher list_add(&commac->list, &mal->list); 609aa32835SJeff Kirsher 619aa32835SJeff Kirsher spin_unlock_irqrestore(&mal->lock, flags); 629aa32835SJeff Kirsher 639aa32835SJeff Kirsher return 0; 649aa32835SJeff Kirsher } 659aa32835SJeff Kirsher 669aa32835SJeff Kirsher void mal_unregister_commac(struct mal_instance *mal, 679aa32835SJeff Kirsher struct mal_commac *commac) 689aa32835SJeff Kirsher { 699aa32835SJeff Kirsher unsigned long flags; 709aa32835SJeff Kirsher 719aa32835SJeff Kirsher spin_lock_irqsave(&mal->lock, flags); 729aa32835SJeff Kirsher 739aa32835SJeff Kirsher MAL_DBG(mal, "unreg(%08x, %08x)" NL, 749aa32835SJeff Kirsher commac->tx_chan_mask, commac->rx_chan_mask); 759aa32835SJeff Kirsher 769aa32835SJeff Kirsher mal->tx_chan_mask &= ~commac->tx_chan_mask; 779aa32835SJeff Kirsher mal->rx_chan_mask &= ~commac->rx_chan_mask; 789aa32835SJeff Kirsher list_del_init(&commac->list); 799aa32835SJeff Kirsher if (list_empty(&mal->list)) 809aa32835SJeff Kirsher napi_disable(&mal->napi); 819aa32835SJeff Kirsher 829aa32835SJeff Kirsher spin_unlock_irqrestore(&mal->lock, flags); 839aa32835SJeff Kirsher } 849aa32835SJeff Kirsher 859aa32835SJeff Kirsher int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size) 869aa32835SJeff Kirsher { 879aa32835SJeff Kirsher BUG_ON(channel < 0 || channel >= mal->num_rx_chans || 889aa32835SJeff Kirsher size > MAL_MAX_RX_SIZE); 899aa32835SJeff Kirsher 909aa32835SJeff Kirsher MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size); 919aa32835SJeff Kirsher 929aa32835SJeff Kirsher if (size & 0xf) { 939aa32835SJeff Kirsher printk(KERN_WARNING 949aa32835SJeff Kirsher "mal%d: incorrect RX size %lu for the channel %d\n", 959aa32835SJeff Kirsher mal->index, size, channel); 969aa32835SJeff Kirsher return -EINVAL; 979aa32835SJeff Kirsher } 989aa32835SJeff Kirsher 999aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4); 1009aa32835SJeff Kirsher return 0; 1019aa32835SJeff Kirsher } 1029aa32835SJeff Kirsher 1039aa32835SJeff Kirsher int mal_tx_bd_offset(struct mal_instance *mal, int channel) 1049aa32835SJeff Kirsher { 1059aa32835SJeff Kirsher BUG_ON(channel < 0 || channel >= mal->num_tx_chans); 1069aa32835SJeff Kirsher 1079aa32835SJeff Kirsher return channel * NUM_TX_BUFF; 1089aa32835SJeff Kirsher } 1099aa32835SJeff Kirsher 1109aa32835SJeff Kirsher int mal_rx_bd_offset(struct mal_instance *mal, int channel) 1119aa32835SJeff Kirsher { 1129aa32835SJeff Kirsher BUG_ON(channel < 0 || channel >= mal->num_rx_chans); 1139aa32835SJeff Kirsher return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF; 1149aa32835SJeff Kirsher } 1159aa32835SJeff Kirsher 1169aa32835SJeff Kirsher void mal_enable_tx_channel(struct mal_instance *mal, int channel) 1179aa32835SJeff Kirsher { 1189aa32835SJeff Kirsher unsigned long flags; 1199aa32835SJeff Kirsher 1209aa32835SJeff Kirsher spin_lock_irqsave(&mal->lock, flags); 1219aa32835SJeff Kirsher 1229aa32835SJeff Kirsher MAL_DBG(mal, "enable_tx(%d)" NL, channel); 1239aa32835SJeff Kirsher 1249aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_TXCASR, 1259aa32835SJeff Kirsher get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel)); 1269aa32835SJeff Kirsher 1279aa32835SJeff Kirsher spin_unlock_irqrestore(&mal->lock, flags); 1289aa32835SJeff Kirsher } 1299aa32835SJeff Kirsher 1309aa32835SJeff Kirsher void mal_disable_tx_channel(struct mal_instance *mal, int channel) 1319aa32835SJeff Kirsher { 1329aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel)); 1339aa32835SJeff Kirsher 1349aa32835SJeff Kirsher MAL_DBG(mal, "disable_tx(%d)" NL, channel); 1359aa32835SJeff Kirsher } 1369aa32835SJeff Kirsher 1379aa32835SJeff Kirsher void mal_enable_rx_channel(struct mal_instance *mal, int channel) 1389aa32835SJeff Kirsher { 1399aa32835SJeff Kirsher unsigned long flags; 1409aa32835SJeff Kirsher 1419aa32835SJeff Kirsher /* 1429aa32835SJeff Kirsher * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple 1439aa32835SJeff Kirsher * of 8, but enabling in MAL_RXCASR needs the divided by 8 value 1449aa32835SJeff Kirsher * for the bitmask 1459aa32835SJeff Kirsher */ 1469aa32835SJeff Kirsher if (!(channel % 8)) 1479aa32835SJeff Kirsher channel >>= 3; 1489aa32835SJeff Kirsher 1499aa32835SJeff Kirsher spin_lock_irqsave(&mal->lock, flags); 1509aa32835SJeff Kirsher 1519aa32835SJeff Kirsher MAL_DBG(mal, "enable_rx(%d)" NL, channel); 1529aa32835SJeff Kirsher 1539aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_RXCASR, 1549aa32835SJeff Kirsher get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel)); 1559aa32835SJeff Kirsher 1569aa32835SJeff Kirsher spin_unlock_irqrestore(&mal->lock, flags); 1579aa32835SJeff Kirsher } 1589aa32835SJeff Kirsher 1599aa32835SJeff Kirsher void mal_disable_rx_channel(struct mal_instance *mal, int channel) 1609aa32835SJeff Kirsher { 1619aa32835SJeff Kirsher /* 1629aa32835SJeff Kirsher * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple 1639aa32835SJeff Kirsher * of 8, but enabling in MAL_RXCASR needs the divided by 8 value 1649aa32835SJeff Kirsher * for the bitmask 1659aa32835SJeff Kirsher */ 1669aa32835SJeff Kirsher if (!(channel % 8)) 1679aa32835SJeff Kirsher channel >>= 3; 1689aa32835SJeff Kirsher 1699aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); 1709aa32835SJeff Kirsher 1719aa32835SJeff Kirsher MAL_DBG(mal, "disable_rx(%d)" NL, channel); 1729aa32835SJeff Kirsher } 1739aa32835SJeff Kirsher 1749aa32835SJeff Kirsher void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac) 1759aa32835SJeff Kirsher { 1769aa32835SJeff Kirsher unsigned long flags; 1779aa32835SJeff Kirsher 1789aa32835SJeff Kirsher spin_lock_irqsave(&mal->lock, flags); 1799aa32835SJeff Kirsher 1809aa32835SJeff Kirsher MAL_DBG(mal, "poll_add(%p)" NL, commac); 1819aa32835SJeff Kirsher 1829aa32835SJeff Kirsher /* starts disabled */ 1839aa32835SJeff Kirsher set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); 1849aa32835SJeff Kirsher 1859aa32835SJeff Kirsher list_add_tail(&commac->poll_list, &mal->poll_list); 1869aa32835SJeff Kirsher 1879aa32835SJeff Kirsher spin_unlock_irqrestore(&mal->lock, flags); 1889aa32835SJeff Kirsher } 1899aa32835SJeff Kirsher 1909aa32835SJeff Kirsher void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac) 1919aa32835SJeff Kirsher { 1929aa32835SJeff Kirsher unsigned long flags; 1939aa32835SJeff Kirsher 1949aa32835SJeff Kirsher spin_lock_irqsave(&mal->lock, flags); 1959aa32835SJeff Kirsher 1969aa32835SJeff Kirsher MAL_DBG(mal, "poll_del(%p)" NL, commac); 1979aa32835SJeff Kirsher 1989aa32835SJeff Kirsher list_del(&commac->poll_list); 1999aa32835SJeff Kirsher 2009aa32835SJeff Kirsher spin_unlock_irqrestore(&mal->lock, flags); 2019aa32835SJeff Kirsher } 2029aa32835SJeff Kirsher 2039aa32835SJeff Kirsher /* synchronized by mal_poll() */ 2049aa32835SJeff Kirsher static inline void mal_enable_eob_irq(struct mal_instance *mal) 2059aa32835SJeff Kirsher { 2069aa32835SJeff Kirsher MAL_DBG2(mal, "enable_irq" NL); 2079aa32835SJeff Kirsher 2089aa32835SJeff Kirsher // XXX might want to cache MAL_CFG as the DCR read can be slooooow 2099aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); 2109aa32835SJeff Kirsher } 2119aa32835SJeff Kirsher 2129aa32835SJeff Kirsher /* synchronized by NAPI state */ 2139aa32835SJeff Kirsher static inline void mal_disable_eob_irq(struct mal_instance *mal) 2149aa32835SJeff Kirsher { 2159aa32835SJeff Kirsher // XXX might want to cache MAL_CFG as the DCR read can be slooooow 2169aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE); 2179aa32835SJeff Kirsher 2189aa32835SJeff Kirsher MAL_DBG2(mal, "disable_irq" NL); 2199aa32835SJeff Kirsher } 2209aa32835SJeff Kirsher 2219aa32835SJeff Kirsher static irqreturn_t mal_serr(int irq, void *dev_instance) 2229aa32835SJeff Kirsher { 2239aa32835SJeff Kirsher struct mal_instance *mal = dev_instance; 2249aa32835SJeff Kirsher 2259aa32835SJeff Kirsher u32 esr = get_mal_dcrn(mal, MAL_ESR); 2269aa32835SJeff Kirsher 2279aa32835SJeff Kirsher /* Clear the error status register */ 2289aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_ESR, esr); 2299aa32835SJeff Kirsher 2309aa32835SJeff Kirsher MAL_DBG(mal, "SERR %08x" NL, esr); 2319aa32835SJeff Kirsher 2329aa32835SJeff Kirsher if (esr & MAL_ESR_EVB) { 2339aa32835SJeff Kirsher if (esr & MAL_ESR_DE) { 2349aa32835SJeff Kirsher /* We ignore Descriptor error, 2359aa32835SJeff Kirsher * TXDE or RXDE interrupt will be generated anyway. 2369aa32835SJeff Kirsher */ 2379aa32835SJeff Kirsher return IRQ_HANDLED; 2389aa32835SJeff Kirsher } 2399aa32835SJeff Kirsher 2409aa32835SJeff Kirsher if (esr & MAL_ESR_PEIN) { 2419aa32835SJeff Kirsher /* PLB error, it's probably buggy hardware or 2429aa32835SJeff Kirsher * incorrect physical address in BD (i.e. bug) 2439aa32835SJeff Kirsher */ 2449aa32835SJeff Kirsher if (net_ratelimit()) 2459aa32835SJeff Kirsher printk(KERN_ERR 2469aa32835SJeff Kirsher "mal%d: system error, " 2479aa32835SJeff Kirsher "PLB (ESR = 0x%08x)\n", 2489aa32835SJeff Kirsher mal->index, esr); 2499aa32835SJeff Kirsher return IRQ_HANDLED; 2509aa32835SJeff Kirsher } 2519aa32835SJeff Kirsher 2529aa32835SJeff Kirsher /* OPB error, it's probably buggy hardware or incorrect 2539aa32835SJeff Kirsher * EBC setup 2549aa32835SJeff Kirsher */ 2559aa32835SJeff Kirsher if (net_ratelimit()) 2569aa32835SJeff Kirsher printk(KERN_ERR 2579aa32835SJeff Kirsher "mal%d: system error, OPB (ESR = 0x%08x)\n", 2589aa32835SJeff Kirsher mal->index, esr); 2599aa32835SJeff Kirsher } 2609aa32835SJeff Kirsher return IRQ_HANDLED; 2619aa32835SJeff Kirsher } 2629aa32835SJeff Kirsher 2639aa32835SJeff Kirsher static inline void mal_schedule_poll(struct mal_instance *mal) 2649aa32835SJeff Kirsher { 2659aa32835SJeff Kirsher if (likely(napi_schedule_prep(&mal->napi))) { 2669aa32835SJeff Kirsher MAL_DBG2(mal, "schedule_poll" NL); 26732663b8bSAlistair Popple spin_lock(&mal->lock); 2689aa32835SJeff Kirsher mal_disable_eob_irq(mal); 26932663b8bSAlistair Popple spin_unlock(&mal->lock); 2709aa32835SJeff Kirsher __napi_schedule(&mal->napi); 2719aa32835SJeff Kirsher } else 2729aa32835SJeff Kirsher MAL_DBG2(mal, "already in poll" NL); 2739aa32835SJeff Kirsher } 2749aa32835SJeff Kirsher 2759aa32835SJeff Kirsher static irqreturn_t mal_txeob(int irq, void *dev_instance) 2769aa32835SJeff Kirsher { 2779aa32835SJeff Kirsher struct mal_instance *mal = dev_instance; 2789aa32835SJeff Kirsher 2799aa32835SJeff Kirsher u32 r = get_mal_dcrn(mal, MAL_TXEOBISR); 2809aa32835SJeff Kirsher 2819aa32835SJeff Kirsher MAL_DBG2(mal, "txeob %08x" NL, r); 2829aa32835SJeff Kirsher 2839aa32835SJeff Kirsher mal_schedule_poll(mal); 2849aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_TXEOBISR, r); 2859aa32835SJeff Kirsher 2869aa32835SJeff Kirsher #ifdef CONFIG_PPC_DCR_NATIVE 2879aa32835SJeff Kirsher if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) 2889aa32835SJeff Kirsher mtdcri(SDR0, DCRN_SDR_ICINTSTAT, 2899aa32835SJeff Kirsher (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX)); 2909aa32835SJeff Kirsher #endif 2919aa32835SJeff Kirsher 2929aa32835SJeff Kirsher return IRQ_HANDLED; 2939aa32835SJeff Kirsher } 2949aa32835SJeff Kirsher 2959aa32835SJeff Kirsher static irqreturn_t mal_rxeob(int irq, void *dev_instance) 2969aa32835SJeff Kirsher { 2979aa32835SJeff Kirsher struct mal_instance *mal = dev_instance; 2989aa32835SJeff Kirsher 2999aa32835SJeff Kirsher u32 r = get_mal_dcrn(mal, MAL_RXEOBISR); 3009aa32835SJeff Kirsher 3019aa32835SJeff Kirsher MAL_DBG2(mal, "rxeob %08x" NL, r); 3029aa32835SJeff Kirsher 3039aa32835SJeff Kirsher mal_schedule_poll(mal); 3049aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_RXEOBISR, r); 3059aa32835SJeff Kirsher 3069aa32835SJeff Kirsher #ifdef CONFIG_PPC_DCR_NATIVE 3079aa32835SJeff Kirsher if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) 3089aa32835SJeff Kirsher mtdcri(SDR0, DCRN_SDR_ICINTSTAT, 3099aa32835SJeff Kirsher (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX)); 3109aa32835SJeff Kirsher #endif 3119aa32835SJeff Kirsher 3129aa32835SJeff Kirsher return IRQ_HANDLED; 3139aa32835SJeff Kirsher } 3149aa32835SJeff Kirsher 3159aa32835SJeff Kirsher static irqreturn_t mal_txde(int irq, void *dev_instance) 3169aa32835SJeff Kirsher { 3179aa32835SJeff Kirsher struct mal_instance *mal = dev_instance; 3189aa32835SJeff Kirsher 3199aa32835SJeff Kirsher u32 deir = get_mal_dcrn(mal, MAL_TXDEIR); 3209aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_TXDEIR, deir); 3219aa32835SJeff Kirsher 3229aa32835SJeff Kirsher MAL_DBG(mal, "txde %08x" NL, deir); 3239aa32835SJeff Kirsher 3249aa32835SJeff Kirsher if (net_ratelimit()) 3259aa32835SJeff Kirsher printk(KERN_ERR 3269aa32835SJeff Kirsher "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n", 3279aa32835SJeff Kirsher mal->index, deir); 3289aa32835SJeff Kirsher 3299aa32835SJeff Kirsher return IRQ_HANDLED; 3309aa32835SJeff Kirsher } 3319aa32835SJeff Kirsher 3329aa32835SJeff Kirsher static irqreturn_t mal_rxde(int irq, void *dev_instance) 3339aa32835SJeff Kirsher { 3349aa32835SJeff Kirsher struct mal_instance *mal = dev_instance; 3359aa32835SJeff Kirsher struct list_head *l; 3369aa32835SJeff Kirsher 3379aa32835SJeff Kirsher u32 deir = get_mal_dcrn(mal, MAL_RXDEIR); 3389aa32835SJeff Kirsher 3399aa32835SJeff Kirsher MAL_DBG(mal, "rxde %08x" NL, deir); 3409aa32835SJeff Kirsher 3419aa32835SJeff Kirsher list_for_each(l, &mal->list) { 3429aa32835SJeff Kirsher struct mal_commac *mc = list_entry(l, struct mal_commac, list); 3439aa32835SJeff Kirsher if (deir & mc->rx_chan_mask) { 3449aa32835SJeff Kirsher set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags); 3459aa32835SJeff Kirsher mc->ops->rxde(mc->dev); 3469aa32835SJeff Kirsher } 3479aa32835SJeff Kirsher } 3489aa32835SJeff Kirsher 3499aa32835SJeff Kirsher mal_schedule_poll(mal); 3509aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_RXDEIR, deir); 3519aa32835SJeff Kirsher 3529aa32835SJeff Kirsher return IRQ_HANDLED; 3539aa32835SJeff Kirsher } 3549aa32835SJeff Kirsher 3559aa32835SJeff Kirsher static irqreturn_t mal_int(int irq, void *dev_instance) 3569aa32835SJeff Kirsher { 3579aa32835SJeff Kirsher struct mal_instance *mal = dev_instance; 3589aa32835SJeff Kirsher u32 esr = get_mal_dcrn(mal, MAL_ESR); 3599aa32835SJeff Kirsher 3609aa32835SJeff Kirsher if (esr & MAL_ESR_EVB) { 3619aa32835SJeff Kirsher /* descriptor error */ 3629aa32835SJeff Kirsher if (esr & MAL_ESR_DE) { 3639aa32835SJeff Kirsher if (esr & MAL_ESR_CIDT) 3649aa32835SJeff Kirsher return mal_rxde(irq, dev_instance); 3659aa32835SJeff Kirsher else 3669aa32835SJeff Kirsher return mal_txde(irq, dev_instance); 3679aa32835SJeff Kirsher } else { /* SERR */ 3689aa32835SJeff Kirsher return mal_serr(irq, dev_instance); 3699aa32835SJeff Kirsher } 3709aa32835SJeff Kirsher } 3719aa32835SJeff Kirsher return IRQ_HANDLED; 3729aa32835SJeff Kirsher } 3739aa32835SJeff Kirsher 3749aa32835SJeff Kirsher void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac) 3759aa32835SJeff Kirsher { 3769aa32835SJeff Kirsher /* Spinlock-type semantics: only one caller disable poll at a time */ 3779aa32835SJeff Kirsher while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) 3789aa32835SJeff Kirsher msleep(1); 3799aa32835SJeff Kirsher 3809aa32835SJeff Kirsher /* Synchronize with the MAL NAPI poller */ 3819aa32835SJeff Kirsher napi_synchronize(&mal->napi); 3829aa32835SJeff Kirsher } 3839aa32835SJeff Kirsher 3849aa32835SJeff Kirsher void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) 3859aa32835SJeff Kirsher { 3869aa32835SJeff Kirsher smp_wmb(); 3879aa32835SJeff Kirsher clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); 3889aa32835SJeff Kirsher 3899aa32835SJeff Kirsher /* Feels better to trigger a poll here to catch up with events that 3909aa32835SJeff Kirsher * may have happened on this channel while disabled. It will most 3919aa32835SJeff Kirsher * probably be delayed until the next interrupt but that's mostly a 3929aa32835SJeff Kirsher * non-issue in the context where this is called. 3939aa32835SJeff Kirsher */ 3949aa32835SJeff Kirsher napi_schedule(&mal->napi); 3959aa32835SJeff Kirsher } 3969aa32835SJeff Kirsher 3979aa32835SJeff Kirsher static int mal_poll(struct napi_struct *napi, int budget) 3989aa32835SJeff Kirsher { 3999aa32835SJeff Kirsher struct mal_instance *mal = container_of(napi, struct mal_instance, napi); 4009aa32835SJeff Kirsher struct list_head *l; 4019aa32835SJeff Kirsher int received = 0; 4029aa32835SJeff Kirsher unsigned long flags; 4039aa32835SJeff Kirsher 4049aa32835SJeff Kirsher MAL_DBG2(mal, "poll(%d)" NL, budget); 4059aa32835SJeff Kirsher again: 4069aa32835SJeff Kirsher /* Process TX skbs */ 4079aa32835SJeff Kirsher list_for_each(l, &mal->poll_list) { 4089aa32835SJeff Kirsher struct mal_commac *mc = 4099aa32835SJeff Kirsher list_entry(l, struct mal_commac, poll_list); 4109aa32835SJeff Kirsher mc->ops->poll_tx(mc->dev); 4119aa32835SJeff Kirsher } 4129aa32835SJeff Kirsher 4139aa32835SJeff Kirsher /* Process RX skbs. 4149aa32835SJeff Kirsher * 4159aa32835SJeff Kirsher * We _might_ need something more smart here to enforce polling 4169aa32835SJeff Kirsher * fairness. 4179aa32835SJeff Kirsher */ 4189aa32835SJeff Kirsher list_for_each(l, &mal->poll_list) { 4199aa32835SJeff Kirsher struct mal_commac *mc = 4209aa32835SJeff Kirsher list_entry(l, struct mal_commac, poll_list); 4219aa32835SJeff Kirsher int n; 4229aa32835SJeff Kirsher if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) 4239aa32835SJeff Kirsher continue; 4243d1a6333SEric Dumazet n = mc->ops->poll_rx(mc->dev, budget - received); 4259aa32835SJeff Kirsher if (n) { 4269aa32835SJeff Kirsher received += n; 4273d1a6333SEric Dumazet if (received >= budget) 4283d1a6333SEric Dumazet return budget; 4299aa32835SJeff Kirsher } 4309aa32835SJeff Kirsher } 4319aa32835SJeff Kirsher 4323d1a6333SEric Dumazet if (napi_complete_done(napi, received)) { 4339aa32835SJeff Kirsher /* We need to disable IRQs to protect from RXDE IRQ here */ 4349aa32835SJeff Kirsher spin_lock_irqsave(&mal->lock, flags); 4359aa32835SJeff Kirsher mal_enable_eob_irq(mal); 4369aa32835SJeff Kirsher spin_unlock_irqrestore(&mal->lock, flags); 4373d1a6333SEric Dumazet } 4389aa32835SJeff Kirsher 4399aa32835SJeff Kirsher /* Check for "rotting" packet(s) */ 4409aa32835SJeff Kirsher list_for_each(l, &mal->poll_list) { 4419aa32835SJeff Kirsher struct mal_commac *mc = 4429aa32835SJeff Kirsher list_entry(l, struct mal_commac, poll_list); 4439aa32835SJeff Kirsher if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) 4449aa32835SJeff Kirsher continue; 4459aa32835SJeff Kirsher if (unlikely(mc->ops->peek_rx(mc->dev) || 4469aa32835SJeff Kirsher test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { 4479aa32835SJeff Kirsher MAL_DBG2(mal, "rotting packet" NL); 448b4dfd326SAlistair Popple if (!napi_reschedule(napi)) 4499aa32835SJeff Kirsher goto more_work; 450b4dfd326SAlistair Popple 45132663b8bSAlistair Popple spin_lock_irqsave(&mal->lock, flags); 452b4dfd326SAlistair Popple mal_disable_eob_irq(mal); 45332663b8bSAlistair Popple spin_unlock_irqrestore(&mal->lock, flags); 454b4dfd326SAlistair Popple goto again; 4559aa32835SJeff Kirsher } 4569aa32835SJeff Kirsher mc->ops->poll_tx(mc->dev); 4579aa32835SJeff Kirsher } 4589aa32835SJeff Kirsher 4599aa32835SJeff Kirsher more_work: 4609aa32835SJeff Kirsher MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received); 4619aa32835SJeff Kirsher return received; 4629aa32835SJeff Kirsher } 4639aa32835SJeff Kirsher 4649aa32835SJeff Kirsher static void mal_reset(struct mal_instance *mal) 4659aa32835SJeff Kirsher { 4669aa32835SJeff Kirsher int n = 10; 4679aa32835SJeff Kirsher 4689aa32835SJeff Kirsher MAL_DBG(mal, "reset" NL); 4699aa32835SJeff Kirsher 4709aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR); 4719aa32835SJeff Kirsher 4729aa32835SJeff Kirsher /* Wait for reset to complete (1 system clock) */ 4739aa32835SJeff Kirsher while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n) 4749aa32835SJeff Kirsher --n; 4759aa32835SJeff Kirsher 4769aa32835SJeff Kirsher if (unlikely(!n)) 4779aa32835SJeff Kirsher printk(KERN_ERR "mal%d: reset timeout\n", mal->index); 4789aa32835SJeff Kirsher } 4799aa32835SJeff Kirsher 4809aa32835SJeff Kirsher int mal_get_regs_len(struct mal_instance *mal) 4819aa32835SJeff Kirsher { 4829aa32835SJeff Kirsher return sizeof(struct emac_ethtool_regs_subhdr) + 4839aa32835SJeff Kirsher sizeof(struct mal_regs); 4849aa32835SJeff Kirsher } 4859aa32835SJeff Kirsher 4869aa32835SJeff Kirsher void *mal_dump_regs(struct mal_instance *mal, void *buf) 4879aa32835SJeff Kirsher { 4889aa32835SJeff Kirsher struct emac_ethtool_regs_subhdr *hdr = buf; 4899aa32835SJeff Kirsher struct mal_regs *regs = (struct mal_regs *)(hdr + 1); 4909aa32835SJeff Kirsher int i; 4919aa32835SJeff Kirsher 4929aa32835SJeff Kirsher hdr->version = mal->version; 4939aa32835SJeff Kirsher hdr->index = mal->index; 4949aa32835SJeff Kirsher 4959aa32835SJeff Kirsher regs->tx_count = mal->num_tx_chans; 4969aa32835SJeff Kirsher regs->rx_count = mal->num_rx_chans; 4979aa32835SJeff Kirsher 4989aa32835SJeff Kirsher regs->cfg = get_mal_dcrn(mal, MAL_CFG); 4999aa32835SJeff Kirsher regs->esr = get_mal_dcrn(mal, MAL_ESR); 5009aa32835SJeff Kirsher regs->ier = get_mal_dcrn(mal, MAL_IER); 5019aa32835SJeff Kirsher regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR); 5029aa32835SJeff Kirsher regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR); 5039aa32835SJeff Kirsher regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR); 5049aa32835SJeff Kirsher regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR); 5059aa32835SJeff Kirsher regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR); 5069aa32835SJeff Kirsher regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR); 5079aa32835SJeff Kirsher regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR); 5089aa32835SJeff Kirsher regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR); 5099aa32835SJeff Kirsher 5109aa32835SJeff Kirsher for (i = 0; i < regs->tx_count; ++i) 5119aa32835SJeff Kirsher regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i)); 5129aa32835SJeff Kirsher 5139aa32835SJeff Kirsher for (i = 0; i < regs->rx_count; ++i) { 5149aa32835SJeff Kirsher regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i)); 5159aa32835SJeff Kirsher regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i)); 5169aa32835SJeff Kirsher } 5179aa32835SJeff Kirsher return regs + 1; 5189aa32835SJeff Kirsher } 5199aa32835SJeff Kirsher 520fe17dc1eSBill Pemberton static int mal_probe(struct platform_device *ofdev) 5219aa32835SJeff Kirsher { 5229aa32835SJeff Kirsher struct mal_instance *mal; 5239aa32835SJeff Kirsher int err = 0, i, bd_size; 5249aa32835SJeff Kirsher int index = mal_count++; 5259aa32835SJeff Kirsher unsigned int dcr_base; 5269aa32835SJeff Kirsher const u32 *prop; 5279aa32835SJeff Kirsher u32 cfg; 5289aa32835SJeff Kirsher unsigned long irqflags; 5299aa32835SJeff Kirsher irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde; 5309aa32835SJeff Kirsher 5319aa32835SJeff Kirsher mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL); 532b2adaca9SJoe Perches if (!mal) 5339aa32835SJeff Kirsher return -ENOMEM; 534b2adaca9SJoe Perches 5359aa32835SJeff Kirsher mal->index = index; 5369aa32835SJeff Kirsher mal->ofdev = ofdev; 5379aa32835SJeff Kirsher mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1; 5389aa32835SJeff Kirsher 5399aa32835SJeff Kirsher MAL_DBG(mal, "probe" NL); 5409aa32835SJeff Kirsher 5419aa32835SJeff Kirsher prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL); 5429aa32835SJeff Kirsher if (prop == NULL) { 5439aa32835SJeff Kirsher printk(KERN_ERR 5449aa32835SJeff Kirsher "mal%d: can't find MAL num-tx-chans property!\n", 5459aa32835SJeff Kirsher index); 5469aa32835SJeff Kirsher err = -ENODEV; 5479aa32835SJeff Kirsher goto fail; 5489aa32835SJeff Kirsher } 5499aa32835SJeff Kirsher mal->num_tx_chans = prop[0]; 5509aa32835SJeff Kirsher 5519aa32835SJeff Kirsher prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL); 5529aa32835SJeff Kirsher if (prop == NULL) { 5539aa32835SJeff Kirsher printk(KERN_ERR 5549aa32835SJeff Kirsher "mal%d: can't find MAL num-rx-chans property!\n", 5559aa32835SJeff Kirsher index); 5569aa32835SJeff Kirsher err = -ENODEV; 5579aa32835SJeff Kirsher goto fail; 5589aa32835SJeff Kirsher } 5599aa32835SJeff Kirsher mal->num_rx_chans = prop[0]; 5609aa32835SJeff Kirsher 5619aa32835SJeff Kirsher dcr_base = dcr_resource_start(ofdev->dev.of_node, 0); 5629aa32835SJeff Kirsher if (dcr_base == 0) { 5639aa32835SJeff Kirsher printk(KERN_ERR 5649aa32835SJeff Kirsher "mal%d: can't find DCR resource!\n", index); 5659aa32835SJeff Kirsher err = -ENODEV; 5669aa32835SJeff Kirsher goto fail; 5679aa32835SJeff Kirsher } 5689aa32835SJeff Kirsher mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100); 5699aa32835SJeff Kirsher if (!DCR_MAP_OK(mal->dcr_host)) { 5709aa32835SJeff Kirsher printk(KERN_ERR 5719aa32835SJeff Kirsher "mal%d: failed to map DCRs !\n", index); 5729aa32835SJeff Kirsher err = -ENODEV; 5739aa32835SJeff Kirsher goto fail; 5749aa32835SJeff Kirsher } 5759aa32835SJeff Kirsher 5769aa32835SJeff Kirsher if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) { 5773b3bceefSTony Breeds #if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \ 5783b3bceefSTony Breeds defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR) 5799aa32835SJeff Kirsher mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | 5809aa32835SJeff Kirsher MAL_FTR_COMMON_ERR_INT); 5819aa32835SJeff Kirsher #else 582*f7ce9103SRob Herring printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n", 583*f7ce9103SRob Herring ofdev->dev.of_node); 5849aa32835SJeff Kirsher err = -ENODEV; 5859aa32835SJeff Kirsher goto fail; 5869aa32835SJeff Kirsher #endif 5879aa32835SJeff Kirsher } 5889aa32835SJeff Kirsher 5899aa32835SJeff Kirsher mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); 5909aa32835SJeff Kirsher mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1); 5919aa32835SJeff Kirsher mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2); 5929aa32835SJeff Kirsher 5939aa32835SJeff Kirsher if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { 5949aa32835SJeff Kirsher mal->txde_irq = mal->rxde_irq = mal->serr_irq; 5959aa32835SJeff Kirsher } else { 5969aa32835SJeff Kirsher mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3); 5979aa32835SJeff Kirsher mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4); 5989aa32835SJeff Kirsher } 5999aa32835SJeff Kirsher 60099c1790eSMichael Ellerman if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq || 60199c1790eSMichael Ellerman !mal->txde_irq || !mal->rxde_irq) { 6029aa32835SJeff Kirsher printk(KERN_ERR 6039aa32835SJeff Kirsher "mal%d: failed to map interrupts !\n", index); 6049aa32835SJeff Kirsher err = -ENODEV; 6059aa32835SJeff Kirsher goto fail_unmap; 6069aa32835SJeff Kirsher } 6079aa32835SJeff Kirsher 6089aa32835SJeff Kirsher INIT_LIST_HEAD(&mal->poll_list); 6099aa32835SJeff Kirsher INIT_LIST_HEAD(&mal->list); 6109aa32835SJeff Kirsher spin_lock_init(&mal->lock); 6119aa32835SJeff Kirsher 6129aa32835SJeff Kirsher init_dummy_netdev(&mal->dummy_dev); 6139aa32835SJeff Kirsher 6149aa32835SJeff Kirsher netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll, 6153b3bceefSTony Breeds CONFIG_IBM_EMAC_POLL_WEIGHT); 6169aa32835SJeff Kirsher 6179aa32835SJeff Kirsher /* Load power-on reset defaults */ 6189aa32835SJeff Kirsher mal_reset(mal); 6199aa32835SJeff Kirsher 6209aa32835SJeff Kirsher /* Set the MAL configuration register */ 6219aa32835SJeff Kirsher cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT; 6229aa32835SJeff Kirsher cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA; 6239aa32835SJeff Kirsher 6249aa32835SJeff Kirsher /* Current Axon is not happy with priority being non-0, it can 6259aa32835SJeff Kirsher * deadlock, fix it up here 6269aa32835SJeff Kirsher */ 6279aa32835SJeff Kirsher if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon")) 6289aa32835SJeff Kirsher cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10); 6299aa32835SJeff Kirsher 6309aa32835SJeff Kirsher /* Apply configuration */ 6319aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_CFG, cfg); 6329aa32835SJeff Kirsher 6339aa32835SJeff Kirsher /* Allocate space for BD rings */ 6349aa32835SJeff Kirsher BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32); 6359aa32835SJeff Kirsher BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32); 6369aa32835SJeff Kirsher 6379aa32835SJeff Kirsher bd_size = sizeof(struct mal_descriptor) * 6389aa32835SJeff Kirsher (NUM_TX_BUFF * mal->num_tx_chans + 6399aa32835SJeff Kirsher NUM_RX_BUFF * mal->num_rx_chans); 640ede23fa8SJoe Perches mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, 641ede23fa8SJoe Perches GFP_KERNEL); 6429aa32835SJeff Kirsher if (mal->bd_virt == NULL) { 6439aa32835SJeff Kirsher err = -ENOMEM; 6449aa32835SJeff Kirsher goto fail_unmap; 6459aa32835SJeff Kirsher } 6469aa32835SJeff Kirsher 6479aa32835SJeff Kirsher for (i = 0; i < mal->num_tx_chans; ++i) 6489aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + 6499aa32835SJeff Kirsher sizeof(struct mal_descriptor) * 6509aa32835SJeff Kirsher mal_tx_bd_offset(mal, i)); 6519aa32835SJeff Kirsher 6529aa32835SJeff Kirsher for (i = 0; i < mal->num_rx_chans; ++i) 6539aa32835SJeff Kirsher set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma + 6549aa32835SJeff Kirsher sizeof(struct mal_descriptor) * 6559aa32835SJeff Kirsher mal_rx_bd_offset(mal, i)); 6569aa32835SJeff Kirsher 6579aa32835SJeff Kirsher if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { 6589aa32835SJeff Kirsher irqflags = IRQF_SHARED; 6599aa32835SJeff Kirsher hdlr_serr = hdlr_txde = hdlr_rxde = mal_int; 6609aa32835SJeff Kirsher } else { 6619aa32835SJeff Kirsher irqflags = 0; 6629aa32835SJeff Kirsher hdlr_serr = mal_serr; 6639aa32835SJeff Kirsher hdlr_txde = mal_txde; 6649aa32835SJeff Kirsher hdlr_rxde = mal_rxde; 6659aa32835SJeff Kirsher } 6669aa32835SJeff Kirsher 6679aa32835SJeff Kirsher err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal); 6689aa32835SJeff Kirsher if (err) 6699aa32835SJeff Kirsher goto fail2; 6709aa32835SJeff Kirsher err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal); 6719aa32835SJeff Kirsher if (err) 6729aa32835SJeff Kirsher goto fail3; 6739aa32835SJeff Kirsher err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); 6749aa32835SJeff Kirsher if (err) 6759aa32835SJeff Kirsher goto fail4; 6769aa32835SJeff Kirsher err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal); 6779aa32835SJeff Kirsher if (err) 6789aa32835SJeff Kirsher goto fail5; 6799aa32835SJeff Kirsher err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); 6809aa32835SJeff Kirsher if (err) 6819aa32835SJeff Kirsher goto fail6; 6829aa32835SJeff Kirsher 6839aa32835SJeff Kirsher /* Enable all MAL SERR interrupt sources */ 68409271db6SIvan Mikhaylov set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS); 6859aa32835SJeff Kirsher 6869aa32835SJeff Kirsher /* Enable EOB interrupt */ 6879aa32835SJeff Kirsher mal_enable_eob_irq(mal); 6889aa32835SJeff Kirsher 6899aa32835SJeff Kirsher printk(KERN_INFO 690*f7ce9103SRob Herring "MAL v%d %pOF, %d TX channels, %d RX channels\n", 691*f7ce9103SRob Herring mal->version, ofdev->dev.of_node, 6929aa32835SJeff Kirsher mal->num_tx_chans, mal->num_rx_chans); 6939aa32835SJeff Kirsher 6949aa32835SJeff Kirsher /* Advertise this instance to the rest of the world */ 6959aa32835SJeff Kirsher wmb(); 6968513fbd8SJingoo Han platform_set_drvdata(ofdev, mal); 6979aa32835SJeff Kirsher 6989aa32835SJeff Kirsher return 0; 6999aa32835SJeff Kirsher 7009aa32835SJeff Kirsher fail6: 7019aa32835SJeff Kirsher free_irq(mal->rxde_irq, mal); 7029aa32835SJeff Kirsher fail5: 7039aa32835SJeff Kirsher free_irq(mal->txeob_irq, mal); 7049aa32835SJeff Kirsher fail4: 7059aa32835SJeff Kirsher free_irq(mal->txde_irq, mal); 7069aa32835SJeff Kirsher fail3: 7079aa32835SJeff Kirsher free_irq(mal->serr_irq, mal); 7089aa32835SJeff Kirsher fail2: 7099aa32835SJeff Kirsher dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma); 7109aa32835SJeff Kirsher fail_unmap: 7119aa32835SJeff Kirsher dcr_unmap(mal->dcr_host, 0x100); 7129aa32835SJeff Kirsher fail: 7139aa32835SJeff Kirsher kfree(mal); 7149aa32835SJeff Kirsher 7159aa32835SJeff Kirsher return err; 7169aa32835SJeff Kirsher } 7179aa32835SJeff Kirsher 718fe17dc1eSBill Pemberton static int mal_remove(struct platform_device *ofdev) 7199aa32835SJeff Kirsher { 7208513fbd8SJingoo Han struct mal_instance *mal = platform_get_drvdata(ofdev); 7219aa32835SJeff Kirsher 7229aa32835SJeff Kirsher MAL_DBG(mal, "remove" NL); 7239aa32835SJeff Kirsher 7249aa32835SJeff Kirsher /* Synchronize with scheduled polling */ 7259aa32835SJeff Kirsher napi_disable(&mal->napi); 7269aa32835SJeff Kirsher 727f7c3f96aSJulia Lawall if (!list_empty(&mal->list)) 7289aa32835SJeff Kirsher /* This is *very* bad */ 729f7c3f96aSJulia Lawall WARN(1, KERN_EMERG 7309aa32835SJeff Kirsher "mal%d: commac list is not empty on remove!\n", 7319aa32835SJeff Kirsher mal->index); 7329aa32835SJeff Kirsher 7339aa32835SJeff Kirsher free_irq(mal->serr_irq, mal); 7349aa32835SJeff Kirsher free_irq(mal->txde_irq, mal); 7359aa32835SJeff Kirsher free_irq(mal->txeob_irq, mal); 7369aa32835SJeff Kirsher free_irq(mal->rxde_irq, mal); 7379aa32835SJeff Kirsher free_irq(mal->rxeob_irq, mal); 7389aa32835SJeff Kirsher 7399aa32835SJeff Kirsher mal_reset(mal); 7409aa32835SJeff Kirsher 7419aa32835SJeff Kirsher dma_free_coherent(&ofdev->dev, 7429aa32835SJeff Kirsher sizeof(struct mal_descriptor) * 7439aa32835SJeff Kirsher (NUM_TX_BUFF * mal->num_tx_chans + 7449aa32835SJeff Kirsher NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt, 7459aa32835SJeff Kirsher mal->bd_dma); 7469aa32835SJeff Kirsher kfree(mal); 7479aa32835SJeff Kirsher 7489aa32835SJeff Kirsher return 0; 7499aa32835SJeff Kirsher } 7509aa32835SJeff Kirsher 75147b61667SFabian Frederick static const struct of_device_id mal_platform_match[] = 7529aa32835SJeff Kirsher { 7539aa32835SJeff Kirsher { 7549aa32835SJeff Kirsher .compatible = "ibm,mcmal", 7559aa32835SJeff Kirsher }, 7569aa32835SJeff Kirsher { 7579aa32835SJeff Kirsher .compatible = "ibm,mcmal2", 7589aa32835SJeff Kirsher }, 7599aa32835SJeff Kirsher /* Backward compat */ 7609aa32835SJeff Kirsher { 7619aa32835SJeff Kirsher .type = "mcmal-dma", 7629aa32835SJeff Kirsher .compatible = "ibm,mcmal", 7639aa32835SJeff Kirsher }, 7649aa32835SJeff Kirsher { 7659aa32835SJeff Kirsher .type = "mcmal-dma", 7669aa32835SJeff Kirsher .compatible = "ibm,mcmal2", 7679aa32835SJeff Kirsher }, 7689aa32835SJeff Kirsher {}, 7699aa32835SJeff Kirsher }; 7709aa32835SJeff Kirsher 7719aa32835SJeff Kirsher static struct platform_driver mal_of_driver = { 7729aa32835SJeff Kirsher .driver = { 7739aa32835SJeff Kirsher .name = "mcmal", 7749aa32835SJeff Kirsher .of_match_table = mal_platform_match, 7759aa32835SJeff Kirsher }, 7769aa32835SJeff Kirsher .probe = mal_probe, 7779aa32835SJeff Kirsher .remove = mal_remove, 7789aa32835SJeff Kirsher }; 7799aa32835SJeff Kirsher 7809aa32835SJeff Kirsher int __init mal_init(void) 7819aa32835SJeff Kirsher { 7829aa32835SJeff Kirsher return platform_driver_register(&mal_of_driver); 7839aa32835SJeff Kirsher } 7849aa32835SJeff Kirsher 7859aa32835SJeff Kirsher void mal_exit(void) 7869aa32835SJeff Kirsher { 7879aa32835SJeff Kirsher platform_driver_unregister(&mal_of_driver); 7889aa32835SJeff Kirsher } 789