xref: /openbmc/linux/drivers/net/ethernet/ibm/emac/mal.c (revision 9aa3283595451ca093500ff0977b106e1f465586)
1*9aa32835SJeff Kirsher /*
2*9aa32835SJeff Kirsher  * drivers/net/ibm_newemac/mal.c
3*9aa32835SJeff Kirsher  *
4*9aa32835SJeff Kirsher  * Memory Access Layer (MAL) support
5*9aa32835SJeff Kirsher  *
6*9aa32835SJeff Kirsher  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7*9aa32835SJeff Kirsher  *                <benh@kernel.crashing.org>
8*9aa32835SJeff Kirsher  *
9*9aa32835SJeff Kirsher  * Based on the arch/ppc version of the driver:
10*9aa32835SJeff Kirsher  *
11*9aa32835SJeff Kirsher  * Copyright (c) 2004, 2005 Zultys Technologies.
12*9aa32835SJeff Kirsher  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13*9aa32835SJeff Kirsher  *
14*9aa32835SJeff Kirsher  * Based on original work by
15*9aa32835SJeff Kirsher  *      Benjamin Herrenschmidt <benh@kernel.crashing.org>,
16*9aa32835SJeff Kirsher  *      David Gibson <hermes@gibson.dropbear.id.au>,
17*9aa32835SJeff Kirsher  *
18*9aa32835SJeff Kirsher  *      Armin Kuster <akuster@mvista.com>
19*9aa32835SJeff Kirsher  *      Copyright 2002 MontaVista Softare Inc.
20*9aa32835SJeff Kirsher  *
21*9aa32835SJeff Kirsher  * This program is free software; you can redistribute  it and/or modify it
22*9aa32835SJeff Kirsher  * under  the terms of  the GNU General  Public License as published by the
23*9aa32835SJeff Kirsher  * Free Software Foundation;  either version 2 of the  License, or (at your
24*9aa32835SJeff Kirsher  * option) any later version.
25*9aa32835SJeff Kirsher  *
26*9aa32835SJeff Kirsher  */
27*9aa32835SJeff Kirsher 
28*9aa32835SJeff Kirsher #include <linux/delay.h>
29*9aa32835SJeff Kirsher #include <linux/slab.h>
30*9aa32835SJeff Kirsher 
31*9aa32835SJeff Kirsher #include "core.h"
32*9aa32835SJeff Kirsher #include <asm/dcr-regs.h>
33*9aa32835SJeff Kirsher 
34*9aa32835SJeff Kirsher static int mal_count;
35*9aa32835SJeff Kirsher 
36*9aa32835SJeff Kirsher int __devinit mal_register_commac(struct mal_instance	*mal,
37*9aa32835SJeff Kirsher 				  struct mal_commac	*commac)
38*9aa32835SJeff Kirsher {
39*9aa32835SJeff Kirsher 	unsigned long flags;
40*9aa32835SJeff Kirsher 
41*9aa32835SJeff Kirsher 	spin_lock_irqsave(&mal->lock, flags);
42*9aa32835SJeff Kirsher 
43*9aa32835SJeff Kirsher 	MAL_DBG(mal, "reg(%08x, %08x)" NL,
44*9aa32835SJeff Kirsher 		commac->tx_chan_mask, commac->rx_chan_mask);
45*9aa32835SJeff Kirsher 
46*9aa32835SJeff Kirsher 	/* Don't let multiple commacs claim the same channel(s) */
47*9aa32835SJeff Kirsher 	if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
48*9aa32835SJeff Kirsher 	    (mal->rx_chan_mask & commac->rx_chan_mask)) {
49*9aa32835SJeff Kirsher 		spin_unlock_irqrestore(&mal->lock, flags);
50*9aa32835SJeff Kirsher 		printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
51*9aa32835SJeff Kirsher 		       mal->index);
52*9aa32835SJeff Kirsher 		return -EBUSY;
53*9aa32835SJeff Kirsher 	}
54*9aa32835SJeff Kirsher 
55*9aa32835SJeff Kirsher 	if (list_empty(&mal->list))
56*9aa32835SJeff Kirsher 		napi_enable(&mal->napi);
57*9aa32835SJeff Kirsher 	mal->tx_chan_mask |= commac->tx_chan_mask;
58*9aa32835SJeff Kirsher 	mal->rx_chan_mask |= commac->rx_chan_mask;
59*9aa32835SJeff Kirsher 	list_add(&commac->list, &mal->list);
60*9aa32835SJeff Kirsher 
61*9aa32835SJeff Kirsher 	spin_unlock_irqrestore(&mal->lock, flags);
62*9aa32835SJeff Kirsher 
63*9aa32835SJeff Kirsher 	return 0;
64*9aa32835SJeff Kirsher }
65*9aa32835SJeff Kirsher 
66*9aa32835SJeff Kirsher void mal_unregister_commac(struct mal_instance	*mal,
67*9aa32835SJeff Kirsher 		struct mal_commac *commac)
68*9aa32835SJeff Kirsher {
69*9aa32835SJeff Kirsher 	unsigned long flags;
70*9aa32835SJeff Kirsher 
71*9aa32835SJeff Kirsher 	spin_lock_irqsave(&mal->lock, flags);
72*9aa32835SJeff Kirsher 
73*9aa32835SJeff Kirsher 	MAL_DBG(mal, "unreg(%08x, %08x)" NL,
74*9aa32835SJeff Kirsher 		commac->tx_chan_mask, commac->rx_chan_mask);
75*9aa32835SJeff Kirsher 
76*9aa32835SJeff Kirsher 	mal->tx_chan_mask &= ~commac->tx_chan_mask;
77*9aa32835SJeff Kirsher 	mal->rx_chan_mask &= ~commac->rx_chan_mask;
78*9aa32835SJeff Kirsher 	list_del_init(&commac->list);
79*9aa32835SJeff Kirsher 	if (list_empty(&mal->list))
80*9aa32835SJeff Kirsher 		napi_disable(&mal->napi);
81*9aa32835SJeff Kirsher 
82*9aa32835SJeff Kirsher 	spin_unlock_irqrestore(&mal->lock, flags);
83*9aa32835SJeff Kirsher }
84*9aa32835SJeff Kirsher 
85*9aa32835SJeff Kirsher int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
86*9aa32835SJeff Kirsher {
87*9aa32835SJeff Kirsher 	BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||
88*9aa32835SJeff Kirsher 	       size > MAL_MAX_RX_SIZE);
89*9aa32835SJeff Kirsher 
90*9aa32835SJeff Kirsher 	MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
91*9aa32835SJeff Kirsher 
92*9aa32835SJeff Kirsher 	if (size & 0xf) {
93*9aa32835SJeff Kirsher 		printk(KERN_WARNING
94*9aa32835SJeff Kirsher 		       "mal%d: incorrect RX size %lu for the channel %d\n",
95*9aa32835SJeff Kirsher 		       mal->index, size, channel);
96*9aa32835SJeff Kirsher 		return -EINVAL;
97*9aa32835SJeff Kirsher 	}
98*9aa32835SJeff Kirsher 
99*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
100*9aa32835SJeff Kirsher 	return 0;
101*9aa32835SJeff Kirsher }
102*9aa32835SJeff Kirsher 
103*9aa32835SJeff Kirsher int mal_tx_bd_offset(struct mal_instance *mal, int channel)
104*9aa32835SJeff Kirsher {
105*9aa32835SJeff Kirsher 	BUG_ON(channel < 0 || channel >= mal->num_tx_chans);
106*9aa32835SJeff Kirsher 
107*9aa32835SJeff Kirsher 	return channel * NUM_TX_BUFF;
108*9aa32835SJeff Kirsher }
109*9aa32835SJeff Kirsher 
110*9aa32835SJeff Kirsher int mal_rx_bd_offset(struct mal_instance *mal, int channel)
111*9aa32835SJeff Kirsher {
112*9aa32835SJeff Kirsher 	BUG_ON(channel < 0 || channel >= mal->num_rx_chans);
113*9aa32835SJeff Kirsher 	return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
114*9aa32835SJeff Kirsher }
115*9aa32835SJeff Kirsher 
116*9aa32835SJeff Kirsher void mal_enable_tx_channel(struct mal_instance *mal, int channel)
117*9aa32835SJeff Kirsher {
118*9aa32835SJeff Kirsher 	unsigned long flags;
119*9aa32835SJeff Kirsher 
120*9aa32835SJeff Kirsher 	spin_lock_irqsave(&mal->lock, flags);
121*9aa32835SJeff Kirsher 
122*9aa32835SJeff Kirsher 	MAL_DBG(mal, "enable_tx(%d)" NL, channel);
123*9aa32835SJeff Kirsher 
124*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_TXCASR,
125*9aa32835SJeff Kirsher 		     get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
126*9aa32835SJeff Kirsher 
127*9aa32835SJeff Kirsher 	spin_unlock_irqrestore(&mal->lock, flags);
128*9aa32835SJeff Kirsher }
129*9aa32835SJeff Kirsher 
130*9aa32835SJeff Kirsher void mal_disable_tx_channel(struct mal_instance *mal, int channel)
131*9aa32835SJeff Kirsher {
132*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
133*9aa32835SJeff Kirsher 
134*9aa32835SJeff Kirsher 	MAL_DBG(mal, "disable_tx(%d)" NL, channel);
135*9aa32835SJeff Kirsher }
136*9aa32835SJeff Kirsher 
137*9aa32835SJeff Kirsher void mal_enable_rx_channel(struct mal_instance *mal, int channel)
138*9aa32835SJeff Kirsher {
139*9aa32835SJeff Kirsher 	unsigned long flags;
140*9aa32835SJeff Kirsher 
141*9aa32835SJeff Kirsher 	/*
142*9aa32835SJeff Kirsher 	 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
143*9aa32835SJeff Kirsher 	 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
144*9aa32835SJeff Kirsher 	 * for the bitmask
145*9aa32835SJeff Kirsher 	 */
146*9aa32835SJeff Kirsher 	if (!(channel % 8))
147*9aa32835SJeff Kirsher 		channel >>= 3;
148*9aa32835SJeff Kirsher 
149*9aa32835SJeff Kirsher 	spin_lock_irqsave(&mal->lock, flags);
150*9aa32835SJeff Kirsher 
151*9aa32835SJeff Kirsher 	MAL_DBG(mal, "enable_rx(%d)" NL, channel);
152*9aa32835SJeff Kirsher 
153*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_RXCASR,
154*9aa32835SJeff Kirsher 		     get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
155*9aa32835SJeff Kirsher 
156*9aa32835SJeff Kirsher 	spin_unlock_irqrestore(&mal->lock, flags);
157*9aa32835SJeff Kirsher }
158*9aa32835SJeff Kirsher 
159*9aa32835SJeff Kirsher void mal_disable_rx_channel(struct mal_instance *mal, int channel)
160*9aa32835SJeff Kirsher {
161*9aa32835SJeff Kirsher 	/*
162*9aa32835SJeff Kirsher 	 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
163*9aa32835SJeff Kirsher 	 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
164*9aa32835SJeff Kirsher 	 * for the bitmask
165*9aa32835SJeff Kirsher 	 */
166*9aa32835SJeff Kirsher 	if (!(channel % 8))
167*9aa32835SJeff Kirsher 		channel >>= 3;
168*9aa32835SJeff Kirsher 
169*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
170*9aa32835SJeff Kirsher 
171*9aa32835SJeff Kirsher 	MAL_DBG(mal, "disable_rx(%d)" NL, channel);
172*9aa32835SJeff Kirsher }
173*9aa32835SJeff Kirsher 
174*9aa32835SJeff Kirsher void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
175*9aa32835SJeff Kirsher {
176*9aa32835SJeff Kirsher 	unsigned long flags;
177*9aa32835SJeff Kirsher 
178*9aa32835SJeff Kirsher 	spin_lock_irqsave(&mal->lock, flags);
179*9aa32835SJeff Kirsher 
180*9aa32835SJeff Kirsher 	MAL_DBG(mal, "poll_add(%p)" NL, commac);
181*9aa32835SJeff Kirsher 
182*9aa32835SJeff Kirsher 	/* starts disabled */
183*9aa32835SJeff Kirsher 	set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
184*9aa32835SJeff Kirsher 
185*9aa32835SJeff Kirsher 	list_add_tail(&commac->poll_list, &mal->poll_list);
186*9aa32835SJeff Kirsher 
187*9aa32835SJeff Kirsher 	spin_unlock_irqrestore(&mal->lock, flags);
188*9aa32835SJeff Kirsher }
189*9aa32835SJeff Kirsher 
190*9aa32835SJeff Kirsher void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
191*9aa32835SJeff Kirsher {
192*9aa32835SJeff Kirsher 	unsigned long flags;
193*9aa32835SJeff Kirsher 
194*9aa32835SJeff Kirsher 	spin_lock_irqsave(&mal->lock, flags);
195*9aa32835SJeff Kirsher 
196*9aa32835SJeff Kirsher 	MAL_DBG(mal, "poll_del(%p)" NL, commac);
197*9aa32835SJeff Kirsher 
198*9aa32835SJeff Kirsher 	list_del(&commac->poll_list);
199*9aa32835SJeff Kirsher 
200*9aa32835SJeff Kirsher 	spin_unlock_irqrestore(&mal->lock, flags);
201*9aa32835SJeff Kirsher }
202*9aa32835SJeff Kirsher 
203*9aa32835SJeff Kirsher /* synchronized by mal_poll() */
204*9aa32835SJeff Kirsher static inline void mal_enable_eob_irq(struct mal_instance *mal)
205*9aa32835SJeff Kirsher {
206*9aa32835SJeff Kirsher 	MAL_DBG2(mal, "enable_irq" NL);
207*9aa32835SJeff Kirsher 
208*9aa32835SJeff Kirsher 	// XXX might want to cache MAL_CFG as the DCR read can be slooooow
209*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
210*9aa32835SJeff Kirsher }
211*9aa32835SJeff Kirsher 
212*9aa32835SJeff Kirsher /* synchronized by NAPI state */
213*9aa32835SJeff Kirsher static inline void mal_disable_eob_irq(struct mal_instance *mal)
214*9aa32835SJeff Kirsher {
215*9aa32835SJeff Kirsher 	// XXX might want to cache MAL_CFG as the DCR read can be slooooow
216*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
217*9aa32835SJeff Kirsher 
218*9aa32835SJeff Kirsher 	MAL_DBG2(mal, "disable_irq" NL);
219*9aa32835SJeff Kirsher }
220*9aa32835SJeff Kirsher 
221*9aa32835SJeff Kirsher static irqreturn_t mal_serr(int irq, void *dev_instance)
222*9aa32835SJeff Kirsher {
223*9aa32835SJeff Kirsher 	struct mal_instance *mal = dev_instance;
224*9aa32835SJeff Kirsher 
225*9aa32835SJeff Kirsher 	u32 esr = get_mal_dcrn(mal, MAL_ESR);
226*9aa32835SJeff Kirsher 
227*9aa32835SJeff Kirsher 	/* Clear the error status register */
228*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_ESR, esr);
229*9aa32835SJeff Kirsher 
230*9aa32835SJeff Kirsher 	MAL_DBG(mal, "SERR %08x" NL, esr);
231*9aa32835SJeff Kirsher 
232*9aa32835SJeff Kirsher 	if (esr & MAL_ESR_EVB) {
233*9aa32835SJeff Kirsher 		if (esr & MAL_ESR_DE) {
234*9aa32835SJeff Kirsher 			/* We ignore Descriptor error,
235*9aa32835SJeff Kirsher 			 * TXDE or RXDE interrupt will be generated anyway.
236*9aa32835SJeff Kirsher 			 */
237*9aa32835SJeff Kirsher 			return IRQ_HANDLED;
238*9aa32835SJeff Kirsher 		}
239*9aa32835SJeff Kirsher 
240*9aa32835SJeff Kirsher 		if (esr & MAL_ESR_PEIN) {
241*9aa32835SJeff Kirsher 			/* PLB error, it's probably buggy hardware or
242*9aa32835SJeff Kirsher 			 * incorrect physical address in BD (i.e. bug)
243*9aa32835SJeff Kirsher 			 */
244*9aa32835SJeff Kirsher 			if (net_ratelimit())
245*9aa32835SJeff Kirsher 				printk(KERN_ERR
246*9aa32835SJeff Kirsher 				       "mal%d: system error, "
247*9aa32835SJeff Kirsher 				       "PLB (ESR = 0x%08x)\n",
248*9aa32835SJeff Kirsher 				       mal->index, esr);
249*9aa32835SJeff Kirsher 			return IRQ_HANDLED;
250*9aa32835SJeff Kirsher 		}
251*9aa32835SJeff Kirsher 
252*9aa32835SJeff Kirsher 		/* OPB error, it's probably buggy hardware or incorrect
253*9aa32835SJeff Kirsher 		 * EBC setup
254*9aa32835SJeff Kirsher 		 */
255*9aa32835SJeff Kirsher 		if (net_ratelimit())
256*9aa32835SJeff Kirsher 			printk(KERN_ERR
257*9aa32835SJeff Kirsher 			       "mal%d: system error, OPB (ESR = 0x%08x)\n",
258*9aa32835SJeff Kirsher 			       mal->index, esr);
259*9aa32835SJeff Kirsher 	}
260*9aa32835SJeff Kirsher 	return IRQ_HANDLED;
261*9aa32835SJeff Kirsher }
262*9aa32835SJeff Kirsher 
263*9aa32835SJeff Kirsher static inline void mal_schedule_poll(struct mal_instance *mal)
264*9aa32835SJeff Kirsher {
265*9aa32835SJeff Kirsher 	if (likely(napi_schedule_prep(&mal->napi))) {
266*9aa32835SJeff Kirsher 		MAL_DBG2(mal, "schedule_poll" NL);
267*9aa32835SJeff Kirsher 		mal_disable_eob_irq(mal);
268*9aa32835SJeff Kirsher 		__napi_schedule(&mal->napi);
269*9aa32835SJeff Kirsher 	} else
270*9aa32835SJeff Kirsher 		MAL_DBG2(mal, "already in poll" NL);
271*9aa32835SJeff Kirsher }
272*9aa32835SJeff Kirsher 
273*9aa32835SJeff Kirsher static irqreturn_t mal_txeob(int irq, void *dev_instance)
274*9aa32835SJeff Kirsher {
275*9aa32835SJeff Kirsher 	struct mal_instance *mal = dev_instance;
276*9aa32835SJeff Kirsher 
277*9aa32835SJeff Kirsher 	u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
278*9aa32835SJeff Kirsher 
279*9aa32835SJeff Kirsher 	MAL_DBG2(mal, "txeob %08x" NL, r);
280*9aa32835SJeff Kirsher 
281*9aa32835SJeff Kirsher 	mal_schedule_poll(mal);
282*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_TXEOBISR, r);
283*9aa32835SJeff Kirsher 
284*9aa32835SJeff Kirsher #ifdef CONFIG_PPC_DCR_NATIVE
285*9aa32835SJeff Kirsher 	if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
286*9aa32835SJeff Kirsher 		mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
287*9aa32835SJeff Kirsher 				(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
288*9aa32835SJeff Kirsher #endif
289*9aa32835SJeff Kirsher 
290*9aa32835SJeff Kirsher 	return IRQ_HANDLED;
291*9aa32835SJeff Kirsher }
292*9aa32835SJeff Kirsher 
293*9aa32835SJeff Kirsher static irqreturn_t mal_rxeob(int irq, void *dev_instance)
294*9aa32835SJeff Kirsher {
295*9aa32835SJeff Kirsher 	struct mal_instance *mal = dev_instance;
296*9aa32835SJeff Kirsher 
297*9aa32835SJeff Kirsher 	u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
298*9aa32835SJeff Kirsher 
299*9aa32835SJeff Kirsher 	MAL_DBG2(mal, "rxeob %08x" NL, r);
300*9aa32835SJeff Kirsher 
301*9aa32835SJeff Kirsher 	mal_schedule_poll(mal);
302*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_RXEOBISR, r);
303*9aa32835SJeff Kirsher 
304*9aa32835SJeff Kirsher #ifdef CONFIG_PPC_DCR_NATIVE
305*9aa32835SJeff Kirsher 	if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
306*9aa32835SJeff Kirsher 		mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
307*9aa32835SJeff Kirsher 				(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
308*9aa32835SJeff Kirsher #endif
309*9aa32835SJeff Kirsher 
310*9aa32835SJeff Kirsher 	return IRQ_HANDLED;
311*9aa32835SJeff Kirsher }
312*9aa32835SJeff Kirsher 
313*9aa32835SJeff Kirsher static irqreturn_t mal_txde(int irq, void *dev_instance)
314*9aa32835SJeff Kirsher {
315*9aa32835SJeff Kirsher 	struct mal_instance *mal = dev_instance;
316*9aa32835SJeff Kirsher 
317*9aa32835SJeff Kirsher 	u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
318*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_TXDEIR, deir);
319*9aa32835SJeff Kirsher 
320*9aa32835SJeff Kirsher 	MAL_DBG(mal, "txde %08x" NL, deir);
321*9aa32835SJeff Kirsher 
322*9aa32835SJeff Kirsher 	if (net_ratelimit())
323*9aa32835SJeff Kirsher 		printk(KERN_ERR
324*9aa32835SJeff Kirsher 		       "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
325*9aa32835SJeff Kirsher 		       mal->index, deir);
326*9aa32835SJeff Kirsher 
327*9aa32835SJeff Kirsher 	return IRQ_HANDLED;
328*9aa32835SJeff Kirsher }
329*9aa32835SJeff Kirsher 
330*9aa32835SJeff Kirsher static irqreturn_t mal_rxde(int irq, void *dev_instance)
331*9aa32835SJeff Kirsher {
332*9aa32835SJeff Kirsher 	struct mal_instance *mal = dev_instance;
333*9aa32835SJeff Kirsher 	struct list_head *l;
334*9aa32835SJeff Kirsher 
335*9aa32835SJeff Kirsher 	u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
336*9aa32835SJeff Kirsher 
337*9aa32835SJeff Kirsher 	MAL_DBG(mal, "rxde %08x" NL, deir);
338*9aa32835SJeff Kirsher 
339*9aa32835SJeff Kirsher 	list_for_each(l, &mal->list) {
340*9aa32835SJeff Kirsher 		struct mal_commac *mc = list_entry(l, struct mal_commac, list);
341*9aa32835SJeff Kirsher 		if (deir & mc->rx_chan_mask) {
342*9aa32835SJeff Kirsher 			set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
343*9aa32835SJeff Kirsher 			mc->ops->rxde(mc->dev);
344*9aa32835SJeff Kirsher 		}
345*9aa32835SJeff Kirsher 	}
346*9aa32835SJeff Kirsher 
347*9aa32835SJeff Kirsher 	mal_schedule_poll(mal);
348*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_RXDEIR, deir);
349*9aa32835SJeff Kirsher 
350*9aa32835SJeff Kirsher 	return IRQ_HANDLED;
351*9aa32835SJeff Kirsher }
352*9aa32835SJeff Kirsher 
353*9aa32835SJeff Kirsher static irqreturn_t mal_int(int irq, void *dev_instance)
354*9aa32835SJeff Kirsher {
355*9aa32835SJeff Kirsher 	struct mal_instance *mal = dev_instance;
356*9aa32835SJeff Kirsher 	u32 esr = get_mal_dcrn(mal, MAL_ESR);
357*9aa32835SJeff Kirsher 
358*9aa32835SJeff Kirsher 	if (esr & MAL_ESR_EVB) {
359*9aa32835SJeff Kirsher 		/* descriptor error */
360*9aa32835SJeff Kirsher 		if (esr & MAL_ESR_DE) {
361*9aa32835SJeff Kirsher 			if (esr & MAL_ESR_CIDT)
362*9aa32835SJeff Kirsher 				return mal_rxde(irq, dev_instance);
363*9aa32835SJeff Kirsher 			else
364*9aa32835SJeff Kirsher 				return mal_txde(irq, dev_instance);
365*9aa32835SJeff Kirsher 		} else { /* SERR */
366*9aa32835SJeff Kirsher 			return mal_serr(irq, dev_instance);
367*9aa32835SJeff Kirsher 		}
368*9aa32835SJeff Kirsher 	}
369*9aa32835SJeff Kirsher 	return IRQ_HANDLED;
370*9aa32835SJeff Kirsher }
371*9aa32835SJeff Kirsher 
372*9aa32835SJeff Kirsher void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
373*9aa32835SJeff Kirsher {
374*9aa32835SJeff Kirsher 	/* Spinlock-type semantics: only one caller disable poll at a time */
375*9aa32835SJeff Kirsher 	while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
376*9aa32835SJeff Kirsher 		msleep(1);
377*9aa32835SJeff Kirsher 
378*9aa32835SJeff Kirsher 	/* Synchronize with the MAL NAPI poller */
379*9aa32835SJeff Kirsher 	napi_synchronize(&mal->napi);
380*9aa32835SJeff Kirsher }
381*9aa32835SJeff Kirsher 
382*9aa32835SJeff Kirsher void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
383*9aa32835SJeff Kirsher {
384*9aa32835SJeff Kirsher 	smp_wmb();
385*9aa32835SJeff Kirsher 	clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
386*9aa32835SJeff Kirsher 
387*9aa32835SJeff Kirsher 	/* Feels better to trigger a poll here to catch up with events that
388*9aa32835SJeff Kirsher 	 * may have happened on this channel while disabled. It will most
389*9aa32835SJeff Kirsher 	 * probably be delayed until the next interrupt but that's mostly a
390*9aa32835SJeff Kirsher 	 * non-issue in the context where this is called.
391*9aa32835SJeff Kirsher 	 */
392*9aa32835SJeff Kirsher 	napi_schedule(&mal->napi);
393*9aa32835SJeff Kirsher }
394*9aa32835SJeff Kirsher 
395*9aa32835SJeff Kirsher static int mal_poll(struct napi_struct *napi, int budget)
396*9aa32835SJeff Kirsher {
397*9aa32835SJeff Kirsher 	struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
398*9aa32835SJeff Kirsher 	struct list_head *l;
399*9aa32835SJeff Kirsher 	int received = 0;
400*9aa32835SJeff Kirsher 	unsigned long flags;
401*9aa32835SJeff Kirsher 
402*9aa32835SJeff Kirsher 	MAL_DBG2(mal, "poll(%d)" NL, budget);
403*9aa32835SJeff Kirsher  again:
404*9aa32835SJeff Kirsher 	/* Process TX skbs */
405*9aa32835SJeff Kirsher 	list_for_each(l, &mal->poll_list) {
406*9aa32835SJeff Kirsher 		struct mal_commac *mc =
407*9aa32835SJeff Kirsher 			list_entry(l, struct mal_commac, poll_list);
408*9aa32835SJeff Kirsher 		mc->ops->poll_tx(mc->dev);
409*9aa32835SJeff Kirsher 	}
410*9aa32835SJeff Kirsher 
411*9aa32835SJeff Kirsher 	/* Process RX skbs.
412*9aa32835SJeff Kirsher 	 *
413*9aa32835SJeff Kirsher 	 * We _might_ need something more smart here to enforce polling
414*9aa32835SJeff Kirsher 	 * fairness.
415*9aa32835SJeff Kirsher 	 */
416*9aa32835SJeff Kirsher 	list_for_each(l, &mal->poll_list) {
417*9aa32835SJeff Kirsher 		struct mal_commac *mc =
418*9aa32835SJeff Kirsher 			list_entry(l, struct mal_commac, poll_list);
419*9aa32835SJeff Kirsher 		int n;
420*9aa32835SJeff Kirsher 		if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
421*9aa32835SJeff Kirsher 			continue;
422*9aa32835SJeff Kirsher 		n = mc->ops->poll_rx(mc->dev, budget);
423*9aa32835SJeff Kirsher 		if (n) {
424*9aa32835SJeff Kirsher 			received += n;
425*9aa32835SJeff Kirsher 			budget -= n;
426*9aa32835SJeff Kirsher 			if (budget <= 0)
427*9aa32835SJeff Kirsher 				goto more_work; // XXX What if this is the last one ?
428*9aa32835SJeff Kirsher 		}
429*9aa32835SJeff Kirsher 	}
430*9aa32835SJeff Kirsher 
431*9aa32835SJeff Kirsher 	/* We need to disable IRQs to protect from RXDE IRQ here */
432*9aa32835SJeff Kirsher 	spin_lock_irqsave(&mal->lock, flags);
433*9aa32835SJeff Kirsher 	__napi_complete(napi);
434*9aa32835SJeff Kirsher 	mal_enable_eob_irq(mal);
435*9aa32835SJeff Kirsher 	spin_unlock_irqrestore(&mal->lock, flags);
436*9aa32835SJeff Kirsher 
437*9aa32835SJeff Kirsher 	/* Check for "rotting" packet(s) */
438*9aa32835SJeff Kirsher 	list_for_each(l, &mal->poll_list) {
439*9aa32835SJeff Kirsher 		struct mal_commac *mc =
440*9aa32835SJeff Kirsher 			list_entry(l, struct mal_commac, poll_list);
441*9aa32835SJeff Kirsher 		if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
442*9aa32835SJeff Kirsher 			continue;
443*9aa32835SJeff Kirsher 		if (unlikely(mc->ops->peek_rx(mc->dev) ||
444*9aa32835SJeff Kirsher 			     test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
445*9aa32835SJeff Kirsher 			MAL_DBG2(mal, "rotting packet" NL);
446*9aa32835SJeff Kirsher 			if (napi_reschedule(napi))
447*9aa32835SJeff Kirsher 				mal_disable_eob_irq(mal);
448*9aa32835SJeff Kirsher 			else
449*9aa32835SJeff Kirsher 				MAL_DBG2(mal, "already in poll list" NL);
450*9aa32835SJeff Kirsher 
451*9aa32835SJeff Kirsher 			if (budget > 0)
452*9aa32835SJeff Kirsher 				goto again;
453*9aa32835SJeff Kirsher 			else
454*9aa32835SJeff Kirsher 				goto more_work;
455*9aa32835SJeff Kirsher 		}
456*9aa32835SJeff Kirsher 		mc->ops->poll_tx(mc->dev);
457*9aa32835SJeff Kirsher 	}
458*9aa32835SJeff Kirsher 
459*9aa32835SJeff Kirsher  more_work:
460*9aa32835SJeff Kirsher 	MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
461*9aa32835SJeff Kirsher 	return received;
462*9aa32835SJeff Kirsher }
463*9aa32835SJeff Kirsher 
464*9aa32835SJeff Kirsher static void mal_reset(struct mal_instance *mal)
465*9aa32835SJeff Kirsher {
466*9aa32835SJeff Kirsher 	int n = 10;
467*9aa32835SJeff Kirsher 
468*9aa32835SJeff Kirsher 	MAL_DBG(mal, "reset" NL);
469*9aa32835SJeff Kirsher 
470*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
471*9aa32835SJeff Kirsher 
472*9aa32835SJeff Kirsher 	/* Wait for reset to complete (1 system clock) */
473*9aa32835SJeff Kirsher 	while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
474*9aa32835SJeff Kirsher 		--n;
475*9aa32835SJeff Kirsher 
476*9aa32835SJeff Kirsher 	if (unlikely(!n))
477*9aa32835SJeff Kirsher 		printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
478*9aa32835SJeff Kirsher }
479*9aa32835SJeff Kirsher 
480*9aa32835SJeff Kirsher int mal_get_regs_len(struct mal_instance *mal)
481*9aa32835SJeff Kirsher {
482*9aa32835SJeff Kirsher 	return sizeof(struct emac_ethtool_regs_subhdr) +
483*9aa32835SJeff Kirsher 	    sizeof(struct mal_regs);
484*9aa32835SJeff Kirsher }
485*9aa32835SJeff Kirsher 
486*9aa32835SJeff Kirsher void *mal_dump_regs(struct mal_instance *mal, void *buf)
487*9aa32835SJeff Kirsher {
488*9aa32835SJeff Kirsher 	struct emac_ethtool_regs_subhdr *hdr = buf;
489*9aa32835SJeff Kirsher 	struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
490*9aa32835SJeff Kirsher 	int i;
491*9aa32835SJeff Kirsher 
492*9aa32835SJeff Kirsher 	hdr->version = mal->version;
493*9aa32835SJeff Kirsher 	hdr->index = mal->index;
494*9aa32835SJeff Kirsher 
495*9aa32835SJeff Kirsher 	regs->tx_count = mal->num_tx_chans;
496*9aa32835SJeff Kirsher 	regs->rx_count = mal->num_rx_chans;
497*9aa32835SJeff Kirsher 
498*9aa32835SJeff Kirsher 	regs->cfg = get_mal_dcrn(mal, MAL_CFG);
499*9aa32835SJeff Kirsher 	regs->esr = get_mal_dcrn(mal, MAL_ESR);
500*9aa32835SJeff Kirsher 	regs->ier = get_mal_dcrn(mal, MAL_IER);
501*9aa32835SJeff Kirsher 	regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
502*9aa32835SJeff Kirsher 	regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
503*9aa32835SJeff Kirsher 	regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
504*9aa32835SJeff Kirsher 	regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
505*9aa32835SJeff Kirsher 	regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
506*9aa32835SJeff Kirsher 	regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
507*9aa32835SJeff Kirsher 	regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
508*9aa32835SJeff Kirsher 	regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
509*9aa32835SJeff Kirsher 
510*9aa32835SJeff Kirsher 	for (i = 0; i < regs->tx_count; ++i)
511*9aa32835SJeff Kirsher 		regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
512*9aa32835SJeff Kirsher 
513*9aa32835SJeff Kirsher 	for (i = 0; i < regs->rx_count; ++i) {
514*9aa32835SJeff Kirsher 		regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
515*9aa32835SJeff Kirsher 		regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
516*9aa32835SJeff Kirsher 	}
517*9aa32835SJeff Kirsher 	return regs + 1;
518*9aa32835SJeff Kirsher }
519*9aa32835SJeff Kirsher 
520*9aa32835SJeff Kirsher static int __devinit mal_probe(struct platform_device *ofdev)
521*9aa32835SJeff Kirsher {
522*9aa32835SJeff Kirsher 	struct mal_instance *mal;
523*9aa32835SJeff Kirsher 	int err = 0, i, bd_size;
524*9aa32835SJeff Kirsher 	int index = mal_count++;
525*9aa32835SJeff Kirsher 	unsigned int dcr_base;
526*9aa32835SJeff Kirsher 	const u32 *prop;
527*9aa32835SJeff Kirsher 	u32 cfg;
528*9aa32835SJeff Kirsher 	unsigned long irqflags;
529*9aa32835SJeff Kirsher 	irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
530*9aa32835SJeff Kirsher 
531*9aa32835SJeff Kirsher 	mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
532*9aa32835SJeff Kirsher 	if (!mal) {
533*9aa32835SJeff Kirsher 		printk(KERN_ERR
534*9aa32835SJeff Kirsher 		       "mal%d: out of memory allocating MAL structure!\n",
535*9aa32835SJeff Kirsher 		       index);
536*9aa32835SJeff Kirsher 		return -ENOMEM;
537*9aa32835SJeff Kirsher 	}
538*9aa32835SJeff Kirsher 	mal->index = index;
539*9aa32835SJeff Kirsher 	mal->ofdev = ofdev;
540*9aa32835SJeff Kirsher 	mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
541*9aa32835SJeff Kirsher 
542*9aa32835SJeff Kirsher 	MAL_DBG(mal, "probe" NL);
543*9aa32835SJeff Kirsher 
544*9aa32835SJeff Kirsher 	prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
545*9aa32835SJeff Kirsher 	if (prop == NULL) {
546*9aa32835SJeff Kirsher 		printk(KERN_ERR
547*9aa32835SJeff Kirsher 		       "mal%d: can't find MAL num-tx-chans property!\n",
548*9aa32835SJeff Kirsher 		       index);
549*9aa32835SJeff Kirsher 		err = -ENODEV;
550*9aa32835SJeff Kirsher 		goto fail;
551*9aa32835SJeff Kirsher 	}
552*9aa32835SJeff Kirsher 	mal->num_tx_chans = prop[0];
553*9aa32835SJeff Kirsher 
554*9aa32835SJeff Kirsher 	prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
555*9aa32835SJeff Kirsher 	if (prop == NULL) {
556*9aa32835SJeff Kirsher 		printk(KERN_ERR
557*9aa32835SJeff Kirsher 		       "mal%d: can't find MAL num-rx-chans property!\n",
558*9aa32835SJeff Kirsher 		       index);
559*9aa32835SJeff Kirsher 		err = -ENODEV;
560*9aa32835SJeff Kirsher 		goto fail;
561*9aa32835SJeff Kirsher 	}
562*9aa32835SJeff Kirsher 	mal->num_rx_chans = prop[0];
563*9aa32835SJeff Kirsher 
564*9aa32835SJeff Kirsher 	dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
565*9aa32835SJeff Kirsher 	if (dcr_base == 0) {
566*9aa32835SJeff Kirsher 		printk(KERN_ERR
567*9aa32835SJeff Kirsher 		       "mal%d: can't find DCR resource!\n", index);
568*9aa32835SJeff Kirsher 		err = -ENODEV;
569*9aa32835SJeff Kirsher 		goto fail;
570*9aa32835SJeff Kirsher 	}
571*9aa32835SJeff Kirsher 	mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
572*9aa32835SJeff Kirsher 	if (!DCR_MAP_OK(mal->dcr_host)) {
573*9aa32835SJeff Kirsher 		printk(KERN_ERR
574*9aa32835SJeff Kirsher 		       "mal%d: failed to map DCRs !\n", index);
575*9aa32835SJeff Kirsher 		err = -ENODEV;
576*9aa32835SJeff Kirsher 		goto fail;
577*9aa32835SJeff Kirsher 	}
578*9aa32835SJeff Kirsher 
579*9aa32835SJeff Kirsher 	if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
580*9aa32835SJeff Kirsher #if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
581*9aa32835SJeff Kirsher 		defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
582*9aa32835SJeff Kirsher 		mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
583*9aa32835SJeff Kirsher 				MAL_FTR_COMMON_ERR_INT);
584*9aa32835SJeff Kirsher #else
585*9aa32835SJeff Kirsher 		printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
586*9aa32835SJeff Kirsher 				ofdev->dev.of_node->full_name);
587*9aa32835SJeff Kirsher 		err = -ENODEV;
588*9aa32835SJeff Kirsher 		goto fail;
589*9aa32835SJeff Kirsher #endif
590*9aa32835SJeff Kirsher 	}
591*9aa32835SJeff Kirsher 
592*9aa32835SJeff Kirsher 	mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
593*9aa32835SJeff Kirsher 	mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
594*9aa32835SJeff Kirsher 	mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
595*9aa32835SJeff Kirsher 
596*9aa32835SJeff Kirsher 	if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
597*9aa32835SJeff Kirsher 		mal->txde_irq = mal->rxde_irq = mal->serr_irq;
598*9aa32835SJeff Kirsher 	} else {
599*9aa32835SJeff Kirsher 		mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
600*9aa32835SJeff Kirsher 		mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
601*9aa32835SJeff Kirsher 	}
602*9aa32835SJeff Kirsher 
603*9aa32835SJeff Kirsher 	if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
604*9aa32835SJeff Kirsher 	    mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ ||
605*9aa32835SJeff Kirsher 	    mal->rxde_irq == NO_IRQ) {
606*9aa32835SJeff Kirsher 		printk(KERN_ERR
607*9aa32835SJeff Kirsher 		       "mal%d: failed to map interrupts !\n", index);
608*9aa32835SJeff Kirsher 		err = -ENODEV;
609*9aa32835SJeff Kirsher 		goto fail_unmap;
610*9aa32835SJeff Kirsher 	}
611*9aa32835SJeff Kirsher 
612*9aa32835SJeff Kirsher 	INIT_LIST_HEAD(&mal->poll_list);
613*9aa32835SJeff Kirsher 	INIT_LIST_HEAD(&mal->list);
614*9aa32835SJeff Kirsher 	spin_lock_init(&mal->lock);
615*9aa32835SJeff Kirsher 
616*9aa32835SJeff Kirsher 	init_dummy_netdev(&mal->dummy_dev);
617*9aa32835SJeff Kirsher 
618*9aa32835SJeff Kirsher 	netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
619*9aa32835SJeff Kirsher 		       CONFIG_IBM_NEW_EMAC_POLL_WEIGHT);
620*9aa32835SJeff Kirsher 
621*9aa32835SJeff Kirsher 	/* Load power-on reset defaults */
622*9aa32835SJeff Kirsher 	mal_reset(mal);
623*9aa32835SJeff Kirsher 
624*9aa32835SJeff Kirsher 	/* Set the MAL configuration register */
625*9aa32835SJeff Kirsher 	cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
626*9aa32835SJeff Kirsher 	cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA;
627*9aa32835SJeff Kirsher 
628*9aa32835SJeff Kirsher 	/* Current Axon is not happy with priority being non-0, it can
629*9aa32835SJeff Kirsher 	 * deadlock, fix it up here
630*9aa32835SJeff Kirsher 	 */
631*9aa32835SJeff Kirsher 	if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
632*9aa32835SJeff Kirsher 		cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
633*9aa32835SJeff Kirsher 
634*9aa32835SJeff Kirsher 	/* Apply configuration */
635*9aa32835SJeff Kirsher 	set_mal_dcrn(mal, MAL_CFG, cfg);
636*9aa32835SJeff Kirsher 
637*9aa32835SJeff Kirsher 	/* Allocate space for BD rings */
638*9aa32835SJeff Kirsher 	BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
639*9aa32835SJeff Kirsher 	BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
640*9aa32835SJeff Kirsher 
641*9aa32835SJeff Kirsher 	bd_size = sizeof(struct mal_descriptor) *
642*9aa32835SJeff Kirsher 		(NUM_TX_BUFF * mal->num_tx_chans +
643*9aa32835SJeff Kirsher 		 NUM_RX_BUFF * mal->num_rx_chans);
644*9aa32835SJeff Kirsher 	mal->bd_virt =
645*9aa32835SJeff Kirsher 		dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
646*9aa32835SJeff Kirsher 				   GFP_KERNEL);
647*9aa32835SJeff Kirsher 	if (mal->bd_virt == NULL) {
648*9aa32835SJeff Kirsher 		printk(KERN_ERR
649*9aa32835SJeff Kirsher 		       "mal%d: out of memory allocating RX/TX descriptors!\n",
650*9aa32835SJeff Kirsher 		       index);
651*9aa32835SJeff Kirsher 		err = -ENOMEM;
652*9aa32835SJeff Kirsher 		goto fail_unmap;
653*9aa32835SJeff Kirsher 	}
654*9aa32835SJeff Kirsher 	memset(mal->bd_virt, 0, bd_size);
655*9aa32835SJeff Kirsher 
656*9aa32835SJeff Kirsher 	for (i = 0; i < mal->num_tx_chans; ++i)
657*9aa32835SJeff Kirsher 		set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
658*9aa32835SJeff Kirsher 			     sizeof(struct mal_descriptor) *
659*9aa32835SJeff Kirsher 			     mal_tx_bd_offset(mal, i));
660*9aa32835SJeff Kirsher 
661*9aa32835SJeff Kirsher 	for (i = 0; i < mal->num_rx_chans; ++i)
662*9aa32835SJeff Kirsher 		set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
663*9aa32835SJeff Kirsher 			     sizeof(struct mal_descriptor) *
664*9aa32835SJeff Kirsher 			     mal_rx_bd_offset(mal, i));
665*9aa32835SJeff Kirsher 
666*9aa32835SJeff Kirsher 	if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
667*9aa32835SJeff Kirsher 		irqflags = IRQF_SHARED;
668*9aa32835SJeff Kirsher 		hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
669*9aa32835SJeff Kirsher 	} else {
670*9aa32835SJeff Kirsher 		irqflags = 0;
671*9aa32835SJeff Kirsher 		hdlr_serr = mal_serr;
672*9aa32835SJeff Kirsher 		hdlr_txde = mal_txde;
673*9aa32835SJeff Kirsher 		hdlr_rxde = mal_rxde;
674*9aa32835SJeff Kirsher 	}
675*9aa32835SJeff Kirsher 
676*9aa32835SJeff Kirsher 	err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
677*9aa32835SJeff Kirsher 	if (err)
678*9aa32835SJeff Kirsher 		goto fail2;
679*9aa32835SJeff Kirsher 	err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
680*9aa32835SJeff Kirsher 	if (err)
681*9aa32835SJeff Kirsher 		goto fail3;
682*9aa32835SJeff Kirsher 	err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
683*9aa32835SJeff Kirsher 	if (err)
684*9aa32835SJeff Kirsher 		goto fail4;
685*9aa32835SJeff Kirsher 	err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
686*9aa32835SJeff Kirsher 	if (err)
687*9aa32835SJeff Kirsher 		goto fail5;
688*9aa32835SJeff Kirsher 	err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
689*9aa32835SJeff Kirsher 	if (err)
690*9aa32835SJeff Kirsher 		goto fail6;
691*9aa32835SJeff Kirsher 
692*9aa32835SJeff Kirsher 	/* Enable all MAL SERR interrupt sources */
693*9aa32835SJeff Kirsher 	if (mal->version == 2)
694*9aa32835SJeff Kirsher 		set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS);
695*9aa32835SJeff Kirsher 	else
696*9aa32835SJeff Kirsher 		set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
697*9aa32835SJeff Kirsher 
698*9aa32835SJeff Kirsher 	/* Enable EOB interrupt */
699*9aa32835SJeff Kirsher 	mal_enable_eob_irq(mal);
700*9aa32835SJeff Kirsher 
701*9aa32835SJeff Kirsher 	printk(KERN_INFO
702*9aa32835SJeff Kirsher 	       "MAL v%d %s, %d TX channels, %d RX channels\n",
703*9aa32835SJeff Kirsher 	       mal->version, ofdev->dev.of_node->full_name,
704*9aa32835SJeff Kirsher 	       mal->num_tx_chans, mal->num_rx_chans);
705*9aa32835SJeff Kirsher 
706*9aa32835SJeff Kirsher 	/* Advertise this instance to the rest of the world */
707*9aa32835SJeff Kirsher 	wmb();
708*9aa32835SJeff Kirsher 	dev_set_drvdata(&ofdev->dev, mal);
709*9aa32835SJeff Kirsher 
710*9aa32835SJeff Kirsher 	mal_dbg_register(mal);
711*9aa32835SJeff Kirsher 
712*9aa32835SJeff Kirsher 	return 0;
713*9aa32835SJeff Kirsher 
714*9aa32835SJeff Kirsher  fail6:
715*9aa32835SJeff Kirsher 	free_irq(mal->rxde_irq, mal);
716*9aa32835SJeff Kirsher  fail5:
717*9aa32835SJeff Kirsher 	free_irq(mal->txeob_irq, mal);
718*9aa32835SJeff Kirsher  fail4:
719*9aa32835SJeff Kirsher 	free_irq(mal->txde_irq, mal);
720*9aa32835SJeff Kirsher  fail3:
721*9aa32835SJeff Kirsher 	free_irq(mal->serr_irq, mal);
722*9aa32835SJeff Kirsher  fail2:
723*9aa32835SJeff Kirsher 	dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
724*9aa32835SJeff Kirsher  fail_unmap:
725*9aa32835SJeff Kirsher 	dcr_unmap(mal->dcr_host, 0x100);
726*9aa32835SJeff Kirsher  fail:
727*9aa32835SJeff Kirsher 	kfree(mal);
728*9aa32835SJeff Kirsher 
729*9aa32835SJeff Kirsher 	return err;
730*9aa32835SJeff Kirsher }
731*9aa32835SJeff Kirsher 
732*9aa32835SJeff Kirsher static int __devexit mal_remove(struct platform_device *ofdev)
733*9aa32835SJeff Kirsher {
734*9aa32835SJeff Kirsher 	struct mal_instance *mal = dev_get_drvdata(&ofdev->dev);
735*9aa32835SJeff Kirsher 
736*9aa32835SJeff Kirsher 	MAL_DBG(mal, "remove" NL);
737*9aa32835SJeff Kirsher 
738*9aa32835SJeff Kirsher 	/* Synchronize with scheduled polling */
739*9aa32835SJeff Kirsher 	napi_disable(&mal->napi);
740*9aa32835SJeff Kirsher 
741*9aa32835SJeff Kirsher 	if (!list_empty(&mal->list)) {
742*9aa32835SJeff Kirsher 		/* This is *very* bad */
743*9aa32835SJeff Kirsher 		printk(KERN_EMERG
744*9aa32835SJeff Kirsher 		       "mal%d: commac list is not empty on remove!\n",
745*9aa32835SJeff Kirsher 		       mal->index);
746*9aa32835SJeff Kirsher 		WARN_ON(1);
747*9aa32835SJeff Kirsher 	}
748*9aa32835SJeff Kirsher 
749*9aa32835SJeff Kirsher 	dev_set_drvdata(&ofdev->dev, NULL);
750*9aa32835SJeff Kirsher 
751*9aa32835SJeff Kirsher 	free_irq(mal->serr_irq, mal);
752*9aa32835SJeff Kirsher 	free_irq(mal->txde_irq, mal);
753*9aa32835SJeff Kirsher 	free_irq(mal->txeob_irq, mal);
754*9aa32835SJeff Kirsher 	free_irq(mal->rxde_irq, mal);
755*9aa32835SJeff Kirsher 	free_irq(mal->rxeob_irq, mal);
756*9aa32835SJeff Kirsher 
757*9aa32835SJeff Kirsher 	mal_reset(mal);
758*9aa32835SJeff Kirsher 
759*9aa32835SJeff Kirsher 	mal_dbg_unregister(mal);
760*9aa32835SJeff Kirsher 
761*9aa32835SJeff Kirsher 	dma_free_coherent(&ofdev->dev,
762*9aa32835SJeff Kirsher 			  sizeof(struct mal_descriptor) *
763*9aa32835SJeff Kirsher 			  (NUM_TX_BUFF * mal->num_tx_chans +
764*9aa32835SJeff Kirsher 			   NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
765*9aa32835SJeff Kirsher 			  mal->bd_dma);
766*9aa32835SJeff Kirsher 	kfree(mal);
767*9aa32835SJeff Kirsher 
768*9aa32835SJeff Kirsher 	return 0;
769*9aa32835SJeff Kirsher }
770*9aa32835SJeff Kirsher 
771*9aa32835SJeff Kirsher static struct of_device_id mal_platform_match[] =
772*9aa32835SJeff Kirsher {
773*9aa32835SJeff Kirsher 	{
774*9aa32835SJeff Kirsher 		.compatible	= "ibm,mcmal",
775*9aa32835SJeff Kirsher 	},
776*9aa32835SJeff Kirsher 	{
777*9aa32835SJeff Kirsher 		.compatible	= "ibm,mcmal2",
778*9aa32835SJeff Kirsher 	},
779*9aa32835SJeff Kirsher 	/* Backward compat */
780*9aa32835SJeff Kirsher 	{
781*9aa32835SJeff Kirsher 		.type		= "mcmal-dma",
782*9aa32835SJeff Kirsher 		.compatible	= "ibm,mcmal",
783*9aa32835SJeff Kirsher 	},
784*9aa32835SJeff Kirsher 	{
785*9aa32835SJeff Kirsher 		.type		= "mcmal-dma",
786*9aa32835SJeff Kirsher 		.compatible	= "ibm,mcmal2",
787*9aa32835SJeff Kirsher 	},
788*9aa32835SJeff Kirsher 	{},
789*9aa32835SJeff Kirsher };
790*9aa32835SJeff Kirsher 
791*9aa32835SJeff Kirsher static struct platform_driver mal_of_driver = {
792*9aa32835SJeff Kirsher 	.driver = {
793*9aa32835SJeff Kirsher 		.name = "mcmal",
794*9aa32835SJeff Kirsher 		.owner = THIS_MODULE,
795*9aa32835SJeff Kirsher 		.of_match_table = mal_platform_match,
796*9aa32835SJeff Kirsher 	},
797*9aa32835SJeff Kirsher 	.probe = mal_probe,
798*9aa32835SJeff Kirsher 	.remove = mal_remove,
799*9aa32835SJeff Kirsher };
800*9aa32835SJeff Kirsher 
801*9aa32835SJeff Kirsher int __init mal_init(void)
802*9aa32835SJeff Kirsher {
803*9aa32835SJeff Kirsher 	return platform_driver_register(&mal_of_driver);
804*9aa32835SJeff Kirsher }
805*9aa32835SJeff Kirsher 
806*9aa32835SJeff Kirsher void mal_exit(void)
807*9aa32835SJeff Kirsher {
808*9aa32835SJeff Kirsher 	platform_driver_unregister(&mal_of_driver);
809*9aa32835SJeff Kirsher }
810