xref: /openbmc/linux/drivers/dma/mv_xor.c (revision ff7b04796d9866327ea76e1393f1e902ef032f84)
1*ff7b0479SSaeed Bishara /*
2*ff7b0479SSaeed Bishara  * offload engine driver for the Marvell XOR engine
3*ff7b0479SSaeed Bishara  * Copyright (C) 2007, 2008, Marvell International Ltd.
4*ff7b0479SSaeed Bishara  *
5*ff7b0479SSaeed Bishara  * This program is free software; you can redistribute it and/or modify it
6*ff7b0479SSaeed Bishara  * under the terms and conditions of the GNU General Public License,
7*ff7b0479SSaeed Bishara  * version 2, as published by the Free Software Foundation.
8*ff7b0479SSaeed Bishara  *
9*ff7b0479SSaeed Bishara  * This program is distributed in the hope it will be useful, but WITHOUT
10*ff7b0479SSaeed Bishara  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11*ff7b0479SSaeed Bishara  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12*ff7b0479SSaeed Bishara  * more details.
13*ff7b0479SSaeed Bishara  *
14*ff7b0479SSaeed Bishara  * You should have received a copy of the GNU General Public License along with
15*ff7b0479SSaeed Bishara  * this program; if not, write to the Free Software Foundation, Inc.,
16*ff7b0479SSaeed Bishara  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17*ff7b0479SSaeed Bishara  */
18*ff7b0479SSaeed Bishara 
19*ff7b0479SSaeed Bishara #include <linux/init.h>
20*ff7b0479SSaeed Bishara #include <linux/module.h>
21*ff7b0479SSaeed Bishara #include <linux/async_tx.h>
22*ff7b0479SSaeed Bishara #include <linux/delay.h>
23*ff7b0479SSaeed Bishara #include <linux/dma-mapping.h>
24*ff7b0479SSaeed Bishara #include <linux/spinlock.h>
25*ff7b0479SSaeed Bishara #include <linux/interrupt.h>
26*ff7b0479SSaeed Bishara #include <linux/platform_device.h>
27*ff7b0479SSaeed Bishara #include <linux/memory.h>
28*ff7b0479SSaeed Bishara #include <asm/plat-orion/mv_xor.h>
29*ff7b0479SSaeed Bishara #include "mv_xor.h"
30*ff7b0479SSaeed Bishara 
31*ff7b0479SSaeed Bishara static void mv_xor_issue_pending(struct dma_chan *chan);
32*ff7b0479SSaeed Bishara 
33*ff7b0479SSaeed Bishara #define to_mv_xor_chan(chan)		\
34*ff7b0479SSaeed Bishara 	container_of(chan, struct mv_xor_chan, common)
35*ff7b0479SSaeed Bishara 
36*ff7b0479SSaeed Bishara #define to_mv_xor_device(dev)		\
37*ff7b0479SSaeed Bishara 	container_of(dev, struct mv_xor_device, common)
38*ff7b0479SSaeed Bishara 
39*ff7b0479SSaeed Bishara #define to_mv_xor_slot(tx)		\
40*ff7b0479SSaeed Bishara 	container_of(tx, struct mv_xor_desc_slot, async_tx)
41*ff7b0479SSaeed Bishara 
42*ff7b0479SSaeed Bishara static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
43*ff7b0479SSaeed Bishara {
44*ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
45*ff7b0479SSaeed Bishara 
46*ff7b0479SSaeed Bishara 	hw_desc->status = (1 << 31);
47*ff7b0479SSaeed Bishara 	hw_desc->phy_next_desc = 0;
48*ff7b0479SSaeed Bishara 	hw_desc->desc_command = (1 << 31);
49*ff7b0479SSaeed Bishara }
50*ff7b0479SSaeed Bishara 
51*ff7b0479SSaeed Bishara static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
52*ff7b0479SSaeed Bishara {
53*ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
54*ff7b0479SSaeed Bishara 	return hw_desc->phy_dest_addr;
55*ff7b0479SSaeed Bishara }
56*ff7b0479SSaeed Bishara 
57*ff7b0479SSaeed Bishara static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
58*ff7b0479SSaeed Bishara 				int src_idx)
59*ff7b0479SSaeed Bishara {
60*ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
61*ff7b0479SSaeed Bishara 	return hw_desc->phy_src_addr[src_idx];
62*ff7b0479SSaeed Bishara }
63*ff7b0479SSaeed Bishara 
64*ff7b0479SSaeed Bishara 
65*ff7b0479SSaeed Bishara static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
66*ff7b0479SSaeed Bishara 				   u32 byte_count)
67*ff7b0479SSaeed Bishara {
68*ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
69*ff7b0479SSaeed Bishara 	hw_desc->byte_count = byte_count;
70*ff7b0479SSaeed Bishara }
71*ff7b0479SSaeed Bishara 
72*ff7b0479SSaeed Bishara static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
73*ff7b0479SSaeed Bishara 				  u32 next_desc_addr)
74*ff7b0479SSaeed Bishara {
75*ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
76*ff7b0479SSaeed Bishara 	BUG_ON(hw_desc->phy_next_desc);
77*ff7b0479SSaeed Bishara 	hw_desc->phy_next_desc = next_desc_addr;
78*ff7b0479SSaeed Bishara }
79*ff7b0479SSaeed Bishara 
80*ff7b0479SSaeed Bishara static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
81*ff7b0479SSaeed Bishara {
82*ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
83*ff7b0479SSaeed Bishara 	hw_desc->phy_next_desc = 0;
84*ff7b0479SSaeed Bishara }
85*ff7b0479SSaeed Bishara 
86*ff7b0479SSaeed Bishara static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
87*ff7b0479SSaeed Bishara {
88*ff7b0479SSaeed Bishara 	desc->value = val;
89*ff7b0479SSaeed Bishara }
90*ff7b0479SSaeed Bishara 
91*ff7b0479SSaeed Bishara static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
92*ff7b0479SSaeed Bishara 				  dma_addr_t addr)
93*ff7b0479SSaeed Bishara {
94*ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
95*ff7b0479SSaeed Bishara 	hw_desc->phy_dest_addr = addr;
96*ff7b0479SSaeed Bishara }
97*ff7b0479SSaeed Bishara 
98*ff7b0479SSaeed Bishara static int mv_chan_memset_slot_count(size_t len)
99*ff7b0479SSaeed Bishara {
100*ff7b0479SSaeed Bishara 	return 1;
101*ff7b0479SSaeed Bishara }
102*ff7b0479SSaeed Bishara 
103*ff7b0479SSaeed Bishara #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
104*ff7b0479SSaeed Bishara 
105*ff7b0479SSaeed Bishara static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
106*ff7b0479SSaeed Bishara 				 int index, dma_addr_t addr)
107*ff7b0479SSaeed Bishara {
108*ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
109*ff7b0479SSaeed Bishara 	hw_desc->phy_src_addr[index] = addr;
110*ff7b0479SSaeed Bishara 	if (desc->type == DMA_XOR)
111*ff7b0479SSaeed Bishara 		hw_desc->desc_command |= (1 << index);
112*ff7b0479SSaeed Bishara }
113*ff7b0479SSaeed Bishara 
114*ff7b0479SSaeed Bishara static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
115*ff7b0479SSaeed Bishara {
116*ff7b0479SSaeed Bishara 	return __raw_readl(XOR_CURR_DESC(chan));
117*ff7b0479SSaeed Bishara }
118*ff7b0479SSaeed Bishara 
119*ff7b0479SSaeed Bishara static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
120*ff7b0479SSaeed Bishara 					u32 next_desc_addr)
121*ff7b0479SSaeed Bishara {
122*ff7b0479SSaeed Bishara 	__raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
123*ff7b0479SSaeed Bishara }
124*ff7b0479SSaeed Bishara 
125*ff7b0479SSaeed Bishara static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
126*ff7b0479SSaeed Bishara {
127*ff7b0479SSaeed Bishara 	__raw_writel(desc_addr, XOR_DEST_POINTER(chan));
128*ff7b0479SSaeed Bishara }
129*ff7b0479SSaeed Bishara 
130*ff7b0479SSaeed Bishara static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
131*ff7b0479SSaeed Bishara {
132*ff7b0479SSaeed Bishara 	__raw_writel(block_size, XOR_BLOCK_SIZE(chan));
133*ff7b0479SSaeed Bishara }
134*ff7b0479SSaeed Bishara 
135*ff7b0479SSaeed Bishara static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
136*ff7b0479SSaeed Bishara {
137*ff7b0479SSaeed Bishara 	__raw_writel(value, XOR_INIT_VALUE_LOW(chan));
138*ff7b0479SSaeed Bishara 	__raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
139*ff7b0479SSaeed Bishara }
140*ff7b0479SSaeed Bishara 
141*ff7b0479SSaeed Bishara static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
142*ff7b0479SSaeed Bishara {
143*ff7b0479SSaeed Bishara 	u32 val = __raw_readl(XOR_INTR_MASK(chan));
144*ff7b0479SSaeed Bishara 	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
145*ff7b0479SSaeed Bishara 	__raw_writel(val, XOR_INTR_MASK(chan));
146*ff7b0479SSaeed Bishara }
147*ff7b0479SSaeed Bishara 
148*ff7b0479SSaeed Bishara static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
149*ff7b0479SSaeed Bishara {
150*ff7b0479SSaeed Bishara 	u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
151*ff7b0479SSaeed Bishara 	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
152*ff7b0479SSaeed Bishara 	return intr_cause;
153*ff7b0479SSaeed Bishara }
154*ff7b0479SSaeed Bishara 
155*ff7b0479SSaeed Bishara static int mv_is_err_intr(u32 intr_cause)
156*ff7b0479SSaeed Bishara {
157*ff7b0479SSaeed Bishara 	if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
158*ff7b0479SSaeed Bishara 		return 1;
159*ff7b0479SSaeed Bishara 
160*ff7b0479SSaeed Bishara 	return 0;
161*ff7b0479SSaeed Bishara }
162*ff7b0479SSaeed Bishara 
163*ff7b0479SSaeed Bishara static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
164*ff7b0479SSaeed Bishara {
165*ff7b0479SSaeed Bishara 	u32 val = (1 << (1 + (chan->idx * 16)));
166*ff7b0479SSaeed Bishara 	dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
167*ff7b0479SSaeed Bishara 	__raw_writel(val, XOR_INTR_CAUSE(chan));
168*ff7b0479SSaeed Bishara }
169*ff7b0479SSaeed Bishara 
170*ff7b0479SSaeed Bishara static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
171*ff7b0479SSaeed Bishara {
172*ff7b0479SSaeed Bishara 	u32 val = 0xFFFF0000 >> (chan->idx * 16);
173*ff7b0479SSaeed Bishara 	__raw_writel(val, XOR_INTR_CAUSE(chan));
174*ff7b0479SSaeed Bishara }
175*ff7b0479SSaeed Bishara 
176*ff7b0479SSaeed Bishara static int mv_can_chain(struct mv_xor_desc_slot *desc)
177*ff7b0479SSaeed Bishara {
178*ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *chain_old_tail = list_entry(
179*ff7b0479SSaeed Bishara 		desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
180*ff7b0479SSaeed Bishara 
181*ff7b0479SSaeed Bishara 	if (chain_old_tail->type != desc->type)
182*ff7b0479SSaeed Bishara 		return 0;
183*ff7b0479SSaeed Bishara 	if (desc->type == DMA_MEMSET)
184*ff7b0479SSaeed Bishara 		return 0;
185*ff7b0479SSaeed Bishara 
186*ff7b0479SSaeed Bishara 	return 1;
187*ff7b0479SSaeed Bishara }
188*ff7b0479SSaeed Bishara 
189*ff7b0479SSaeed Bishara static void mv_set_mode(struct mv_xor_chan *chan,
190*ff7b0479SSaeed Bishara 			       enum dma_transaction_type type)
191*ff7b0479SSaeed Bishara {
192*ff7b0479SSaeed Bishara 	u32 op_mode;
193*ff7b0479SSaeed Bishara 	u32 config = __raw_readl(XOR_CONFIG(chan));
194*ff7b0479SSaeed Bishara 
195*ff7b0479SSaeed Bishara 	switch (type) {
196*ff7b0479SSaeed Bishara 	case DMA_XOR:
197*ff7b0479SSaeed Bishara 		op_mode = XOR_OPERATION_MODE_XOR;
198*ff7b0479SSaeed Bishara 		break;
199*ff7b0479SSaeed Bishara 	case DMA_MEMCPY:
200*ff7b0479SSaeed Bishara 		op_mode = XOR_OPERATION_MODE_MEMCPY;
201*ff7b0479SSaeed Bishara 		break;
202*ff7b0479SSaeed Bishara 	case DMA_MEMSET:
203*ff7b0479SSaeed Bishara 		op_mode = XOR_OPERATION_MODE_MEMSET;
204*ff7b0479SSaeed Bishara 		break;
205*ff7b0479SSaeed Bishara 	default:
206*ff7b0479SSaeed Bishara 		dev_printk(KERN_ERR, chan->device->common.dev,
207*ff7b0479SSaeed Bishara 			   "error: unsupported operation %d.\n",
208*ff7b0479SSaeed Bishara 			   type);
209*ff7b0479SSaeed Bishara 		BUG();
210*ff7b0479SSaeed Bishara 		return;
211*ff7b0479SSaeed Bishara 	}
212*ff7b0479SSaeed Bishara 
213*ff7b0479SSaeed Bishara 	config &= ~0x7;
214*ff7b0479SSaeed Bishara 	config |= op_mode;
215*ff7b0479SSaeed Bishara 	__raw_writel(config, XOR_CONFIG(chan));
216*ff7b0479SSaeed Bishara 	chan->current_type = type;
217*ff7b0479SSaeed Bishara }
218*ff7b0479SSaeed Bishara 
219*ff7b0479SSaeed Bishara static void mv_chan_activate(struct mv_xor_chan *chan)
220*ff7b0479SSaeed Bishara {
221*ff7b0479SSaeed Bishara 	u32 activation;
222*ff7b0479SSaeed Bishara 
223*ff7b0479SSaeed Bishara 	dev_dbg(chan->device->common.dev, " activate chan.\n");
224*ff7b0479SSaeed Bishara 	activation = __raw_readl(XOR_ACTIVATION(chan));
225*ff7b0479SSaeed Bishara 	activation |= 0x1;
226*ff7b0479SSaeed Bishara 	__raw_writel(activation, XOR_ACTIVATION(chan));
227*ff7b0479SSaeed Bishara }
228*ff7b0479SSaeed Bishara 
229*ff7b0479SSaeed Bishara static char mv_chan_is_busy(struct mv_xor_chan *chan)
230*ff7b0479SSaeed Bishara {
231*ff7b0479SSaeed Bishara 	u32 state = __raw_readl(XOR_ACTIVATION(chan));
232*ff7b0479SSaeed Bishara 
233*ff7b0479SSaeed Bishara 	state = (state >> 4) & 0x3;
234*ff7b0479SSaeed Bishara 
235*ff7b0479SSaeed Bishara 	return (state == 1) ? 1 : 0;
236*ff7b0479SSaeed Bishara }
237*ff7b0479SSaeed Bishara 
238*ff7b0479SSaeed Bishara static int mv_chan_xor_slot_count(size_t len, int src_cnt)
239*ff7b0479SSaeed Bishara {
240*ff7b0479SSaeed Bishara 	return 1;
241*ff7b0479SSaeed Bishara }
242*ff7b0479SSaeed Bishara 
243*ff7b0479SSaeed Bishara /**
244*ff7b0479SSaeed Bishara  * mv_xor_free_slots - flags descriptor slots for reuse
245*ff7b0479SSaeed Bishara  * @slot: Slot to free
246*ff7b0479SSaeed Bishara  * Caller must hold &mv_chan->lock while calling this function
247*ff7b0479SSaeed Bishara  */
248*ff7b0479SSaeed Bishara static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
249*ff7b0479SSaeed Bishara 			      struct mv_xor_desc_slot *slot)
250*ff7b0479SSaeed Bishara {
251*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
252*ff7b0479SSaeed Bishara 		__func__, __LINE__, slot);
253*ff7b0479SSaeed Bishara 
254*ff7b0479SSaeed Bishara 	slot->slots_per_op = 0;
255*ff7b0479SSaeed Bishara 
256*ff7b0479SSaeed Bishara }
257*ff7b0479SSaeed Bishara 
258*ff7b0479SSaeed Bishara /*
259*ff7b0479SSaeed Bishara  * mv_xor_start_new_chain - program the engine to operate on new chain headed by
260*ff7b0479SSaeed Bishara  * sw_desc
261*ff7b0479SSaeed Bishara  * Caller must hold &mv_chan->lock while calling this function
262*ff7b0479SSaeed Bishara  */
263*ff7b0479SSaeed Bishara static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
264*ff7b0479SSaeed Bishara 				   struct mv_xor_desc_slot *sw_desc)
265*ff7b0479SSaeed Bishara {
266*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
267*ff7b0479SSaeed Bishara 		__func__, __LINE__, sw_desc);
268*ff7b0479SSaeed Bishara 	if (sw_desc->type != mv_chan->current_type)
269*ff7b0479SSaeed Bishara 		mv_set_mode(mv_chan, sw_desc->type);
270*ff7b0479SSaeed Bishara 
271*ff7b0479SSaeed Bishara 	if (sw_desc->type == DMA_MEMSET) {
272*ff7b0479SSaeed Bishara 		/* for memset requests we need to program the engine, no
273*ff7b0479SSaeed Bishara 		 * descriptors used.
274*ff7b0479SSaeed Bishara 		 */
275*ff7b0479SSaeed Bishara 		struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
276*ff7b0479SSaeed Bishara 		mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
277*ff7b0479SSaeed Bishara 		mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
278*ff7b0479SSaeed Bishara 		mv_chan_set_value(mv_chan, sw_desc->value);
279*ff7b0479SSaeed Bishara 	} else {
280*ff7b0479SSaeed Bishara 		/* set the hardware chain */
281*ff7b0479SSaeed Bishara 		mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
282*ff7b0479SSaeed Bishara 	}
283*ff7b0479SSaeed Bishara 	mv_chan->pending += sw_desc->slot_cnt;
284*ff7b0479SSaeed Bishara 	mv_xor_issue_pending(&mv_chan->common);
285*ff7b0479SSaeed Bishara }
286*ff7b0479SSaeed Bishara 
287*ff7b0479SSaeed Bishara static dma_cookie_t
288*ff7b0479SSaeed Bishara mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
289*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
290*ff7b0479SSaeed Bishara {
291*ff7b0479SSaeed Bishara 	BUG_ON(desc->async_tx.cookie < 0);
292*ff7b0479SSaeed Bishara 
293*ff7b0479SSaeed Bishara 	if (desc->async_tx.cookie > 0) {
294*ff7b0479SSaeed Bishara 		cookie = desc->async_tx.cookie;
295*ff7b0479SSaeed Bishara 
296*ff7b0479SSaeed Bishara 		/* call the callback (must not sleep or submit new
297*ff7b0479SSaeed Bishara 		 * operations to this channel)
298*ff7b0479SSaeed Bishara 		 */
299*ff7b0479SSaeed Bishara 		if (desc->async_tx.callback)
300*ff7b0479SSaeed Bishara 			desc->async_tx.callback(
301*ff7b0479SSaeed Bishara 				desc->async_tx.callback_param);
302*ff7b0479SSaeed Bishara 
303*ff7b0479SSaeed Bishara 		/* unmap dma addresses
304*ff7b0479SSaeed Bishara 		 * (unmap_single vs unmap_page?)
305*ff7b0479SSaeed Bishara 		 */
306*ff7b0479SSaeed Bishara 		if (desc->group_head && desc->unmap_len) {
307*ff7b0479SSaeed Bishara 			struct mv_xor_desc_slot *unmap = desc->group_head;
308*ff7b0479SSaeed Bishara 			struct device *dev =
309*ff7b0479SSaeed Bishara 				&mv_chan->device->pdev->dev;
310*ff7b0479SSaeed Bishara 			u32 len = unmap->unmap_len;
311*ff7b0479SSaeed Bishara 			u32 src_cnt = unmap->unmap_src_cnt;
312*ff7b0479SSaeed Bishara 			dma_addr_t addr = mv_desc_get_dest_addr(unmap);
313*ff7b0479SSaeed Bishara 
314*ff7b0479SSaeed Bishara 			dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
315*ff7b0479SSaeed Bishara 			while (src_cnt--) {
316*ff7b0479SSaeed Bishara 				addr = mv_desc_get_src_addr(unmap, src_cnt);
317*ff7b0479SSaeed Bishara 				dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
318*ff7b0479SSaeed Bishara 			}
319*ff7b0479SSaeed Bishara 			desc->group_head = NULL;
320*ff7b0479SSaeed Bishara 		}
321*ff7b0479SSaeed Bishara 	}
322*ff7b0479SSaeed Bishara 
323*ff7b0479SSaeed Bishara 	/* run dependent operations */
324*ff7b0479SSaeed Bishara 	async_tx_run_dependencies(&desc->async_tx);
325*ff7b0479SSaeed Bishara 
326*ff7b0479SSaeed Bishara 	return cookie;
327*ff7b0479SSaeed Bishara }
328*ff7b0479SSaeed Bishara 
329*ff7b0479SSaeed Bishara static int
330*ff7b0479SSaeed Bishara mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
331*ff7b0479SSaeed Bishara {
332*ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
333*ff7b0479SSaeed Bishara 
334*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
335*ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
336*ff7b0479SSaeed Bishara 				 completed_node) {
337*ff7b0479SSaeed Bishara 
338*ff7b0479SSaeed Bishara 		if (async_tx_test_ack(&iter->async_tx)) {
339*ff7b0479SSaeed Bishara 			list_del(&iter->completed_node);
340*ff7b0479SSaeed Bishara 			mv_xor_free_slots(mv_chan, iter);
341*ff7b0479SSaeed Bishara 		}
342*ff7b0479SSaeed Bishara 	}
343*ff7b0479SSaeed Bishara 	return 0;
344*ff7b0479SSaeed Bishara }
345*ff7b0479SSaeed Bishara 
346*ff7b0479SSaeed Bishara static int
347*ff7b0479SSaeed Bishara mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
348*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan)
349*ff7b0479SSaeed Bishara {
350*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
351*ff7b0479SSaeed Bishara 		__func__, __LINE__, desc, desc->async_tx.flags);
352*ff7b0479SSaeed Bishara 	list_del(&desc->chain_node);
353*ff7b0479SSaeed Bishara 	/* the client is allowed to attach dependent operations
354*ff7b0479SSaeed Bishara 	 * until 'ack' is set
355*ff7b0479SSaeed Bishara 	 */
356*ff7b0479SSaeed Bishara 	if (!async_tx_test_ack(&desc->async_tx)) {
357*ff7b0479SSaeed Bishara 		/* move this slot to the completed_slots */
358*ff7b0479SSaeed Bishara 		list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
359*ff7b0479SSaeed Bishara 		return 0;
360*ff7b0479SSaeed Bishara 	}
361*ff7b0479SSaeed Bishara 
362*ff7b0479SSaeed Bishara 	mv_xor_free_slots(mv_chan, desc);
363*ff7b0479SSaeed Bishara 	return 0;
364*ff7b0479SSaeed Bishara }
365*ff7b0479SSaeed Bishara 
366*ff7b0479SSaeed Bishara static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
367*ff7b0479SSaeed Bishara {
368*ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
369*ff7b0479SSaeed Bishara 	dma_cookie_t cookie = 0;
370*ff7b0479SSaeed Bishara 	int busy = mv_chan_is_busy(mv_chan);
371*ff7b0479SSaeed Bishara 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
372*ff7b0479SSaeed Bishara 	int seen_current = 0;
373*ff7b0479SSaeed Bishara 
374*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
375*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
376*ff7b0479SSaeed Bishara 	mv_xor_clean_completed_slots(mv_chan);
377*ff7b0479SSaeed Bishara 
378*ff7b0479SSaeed Bishara 	/* free completed slots from the chain starting with
379*ff7b0479SSaeed Bishara 	 * the oldest descriptor
380*ff7b0479SSaeed Bishara 	 */
381*ff7b0479SSaeed Bishara 
382*ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
383*ff7b0479SSaeed Bishara 					chain_node) {
384*ff7b0479SSaeed Bishara 		prefetch(_iter);
385*ff7b0479SSaeed Bishara 		prefetch(&_iter->async_tx);
386*ff7b0479SSaeed Bishara 
387*ff7b0479SSaeed Bishara 		/* do not advance past the current descriptor loaded into the
388*ff7b0479SSaeed Bishara 		 * hardware channel, subsequent descriptors are either in
389*ff7b0479SSaeed Bishara 		 * process or have not been submitted
390*ff7b0479SSaeed Bishara 		 */
391*ff7b0479SSaeed Bishara 		if (seen_current)
392*ff7b0479SSaeed Bishara 			break;
393*ff7b0479SSaeed Bishara 
394*ff7b0479SSaeed Bishara 		/* stop the search if we reach the current descriptor and the
395*ff7b0479SSaeed Bishara 		 * channel is busy
396*ff7b0479SSaeed Bishara 		 */
397*ff7b0479SSaeed Bishara 		if (iter->async_tx.phys == current_desc) {
398*ff7b0479SSaeed Bishara 			seen_current = 1;
399*ff7b0479SSaeed Bishara 			if (busy)
400*ff7b0479SSaeed Bishara 				break;
401*ff7b0479SSaeed Bishara 		}
402*ff7b0479SSaeed Bishara 
403*ff7b0479SSaeed Bishara 		cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
404*ff7b0479SSaeed Bishara 
405*ff7b0479SSaeed Bishara 		if (mv_xor_clean_slot(iter, mv_chan))
406*ff7b0479SSaeed Bishara 			break;
407*ff7b0479SSaeed Bishara 	}
408*ff7b0479SSaeed Bishara 
409*ff7b0479SSaeed Bishara 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
410*ff7b0479SSaeed Bishara 		struct mv_xor_desc_slot *chain_head;
411*ff7b0479SSaeed Bishara 		chain_head = list_entry(mv_chan->chain.next,
412*ff7b0479SSaeed Bishara 					struct mv_xor_desc_slot,
413*ff7b0479SSaeed Bishara 					chain_node);
414*ff7b0479SSaeed Bishara 
415*ff7b0479SSaeed Bishara 		mv_xor_start_new_chain(mv_chan, chain_head);
416*ff7b0479SSaeed Bishara 	}
417*ff7b0479SSaeed Bishara 
418*ff7b0479SSaeed Bishara 	if (cookie > 0)
419*ff7b0479SSaeed Bishara 		mv_chan->completed_cookie = cookie;
420*ff7b0479SSaeed Bishara }
421*ff7b0479SSaeed Bishara 
422*ff7b0479SSaeed Bishara static void
423*ff7b0479SSaeed Bishara mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
424*ff7b0479SSaeed Bishara {
425*ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
426*ff7b0479SSaeed Bishara 	__mv_xor_slot_cleanup(mv_chan);
427*ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
428*ff7b0479SSaeed Bishara }
429*ff7b0479SSaeed Bishara 
430*ff7b0479SSaeed Bishara static void mv_xor_tasklet(unsigned long data)
431*ff7b0479SSaeed Bishara {
432*ff7b0479SSaeed Bishara 	struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
433*ff7b0479SSaeed Bishara 	__mv_xor_slot_cleanup(chan);
434*ff7b0479SSaeed Bishara }
435*ff7b0479SSaeed Bishara 
436*ff7b0479SSaeed Bishara static struct mv_xor_desc_slot *
437*ff7b0479SSaeed Bishara mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
438*ff7b0479SSaeed Bishara 		    int slots_per_op)
439*ff7b0479SSaeed Bishara {
440*ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
441*ff7b0479SSaeed Bishara 	LIST_HEAD(chain);
442*ff7b0479SSaeed Bishara 	int slots_found, retry = 0;
443*ff7b0479SSaeed Bishara 
444*ff7b0479SSaeed Bishara 	/* start search from the last allocated descrtiptor
445*ff7b0479SSaeed Bishara 	 * if a contiguous allocation can not be found start searching
446*ff7b0479SSaeed Bishara 	 * from the beginning of the list
447*ff7b0479SSaeed Bishara 	 */
448*ff7b0479SSaeed Bishara retry:
449*ff7b0479SSaeed Bishara 	slots_found = 0;
450*ff7b0479SSaeed Bishara 	if (retry == 0)
451*ff7b0479SSaeed Bishara 		iter = mv_chan->last_used;
452*ff7b0479SSaeed Bishara 	else
453*ff7b0479SSaeed Bishara 		iter = list_entry(&mv_chan->all_slots,
454*ff7b0479SSaeed Bishara 			struct mv_xor_desc_slot,
455*ff7b0479SSaeed Bishara 			slot_node);
456*ff7b0479SSaeed Bishara 
457*ff7b0479SSaeed Bishara 	list_for_each_entry_safe_continue(
458*ff7b0479SSaeed Bishara 		iter, _iter, &mv_chan->all_slots, slot_node) {
459*ff7b0479SSaeed Bishara 		prefetch(_iter);
460*ff7b0479SSaeed Bishara 		prefetch(&_iter->async_tx);
461*ff7b0479SSaeed Bishara 		if (iter->slots_per_op) {
462*ff7b0479SSaeed Bishara 			/* give up after finding the first busy slot
463*ff7b0479SSaeed Bishara 			 * on the second pass through the list
464*ff7b0479SSaeed Bishara 			 */
465*ff7b0479SSaeed Bishara 			if (retry)
466*ff7b0479SSaeed Bishara 				break;
467*ff7b0479SSaeed Bishara 
468*ff7b0479SSaeed Bishara 			slots_found = 0;
469*ff7b0479SSaeed Bishara 			continue;
470*ff7b0479SSaeed Bishara 		}
471*ff7b0479SSaeed Bishara 
472*ff7b0479SSaeed Bishara 		/* start the allocation if the slot is correctly aligned */
473*ff7b0479SSaeed Bishara 		if (!slots_found++)
474*ff7b0479SSaeed Bishara 			alloc_start = iter;
475*ff7b0479SSaeed Bishara 
476*ff7b0479SSaeed Bishara 		if (slots_found == num_slots) {
477*ff7b0479SSaeed Bishara 			struct mv_xor_desc_slot *alloc_tail = NULL;
478*ff7b0479SSaeed Bishara 			struct mv_xor_desc_slot *last_used = NULL;
479*ff7b0479SSaeed Bishara 			iter = alloc_start;
480*ff7b0479SSaeed Bishara 			while (num_slots) {
481*ff7b0479SSaeed Bishara 				int i;
482*ff7b0479SSaeed Bishara 
483*ff7b0479SSaeed Bishara 				/* pre-ack all but the last descriptor */
484*ff7b0479SSaeed Bishara 				async_tx_ack(&iter->async_tx);
485*ff7b0479SSaeed Bishara 
486*ff7b0479SSaeed Bishara 				list_add_tail(&iter->chain_node, &chain);
487*ff7b0479SSaeed Bishara 				alloc_tail = iter;
488*ff7b0479SSaeed Bishara 				iter->async_tx.cookie = 0;
489*ff7b0479SSaeed Bishara 				iter->slot_cnt = num_slots;
490*ff7b0479SSaeed Bishara 				iter->xor_check_result = NULL;
491*ff7b0479SSaeed Bishara 				for (i = 0; i < slots_per_op; i++) {
492*ff7b0479SSaeed Bishara 					iter->slots_per_op = slots_per_op - i;
493*ff7b0479SSaeed Bishara 					last_used = iter;
494*ff7b0479SSaeed Bishara 					iter = list_entry(iter->slot_node.next,
495*ff7b0479SSaeed Bishara 						struct mv_xor_desc_slot,
496*ff7b0479SSaeed Bishara 						slot_node);
497*ff7b0479SSaeed Bishara 				}
498*ff7b0479SSaeed Bishara 				num_slots -= slots_per_op;
499*ff7b0479SSaeed Bishara 			}
500*ff7b0479SSaeed Bishara 			alloc_tail->group_head = alloc_start;
501*ff7b0479SSaeed Bishara 			alloc_tail->async_tx.cookie = -EBUSY;
502*ff7b0479SSaeed Bishara 			list_splice(&chain, &alloc_tail->async_tx.tx_list);
503*ff7b0479SSaeed Bishara 			mv_chan->last_used = last_used;
504*ff7b0479SSaeed Bishara 			mv_desc_clear_next_desc(alloc_start);
505*ff7b0479SSaeed Bishara 			mv_desc_clear_next_desc(alloc_tail);
506*ff7b0479SSaeed Bishara 			return alloc_tail;
507*ff7b0479SSaeed Bishara 		}
508*ff7b0479SSaeed Bishara 	}
509*ff7b0479SSaeed Bishara 	if (!retry++)
510*ff7b0479SSaeed Bishara 		goto retry;
511*ff7b0479SSaeed Bishara 
512*ff7b0479SSaeed Bishara 	/* try to free some slots if the allocation fails */
513*ff7b0479SSaeed Bishara 	tasklet_schedule(&mv_chan->irq_tasklet);
514*ff7b0479SSaeed Bishara 
515*ff7b0479SSaeed Bishara 	return NULL;
516*ff7b0479SSaeed Bishara }
517*ff7b0479SSaeed Bishara 
518*ff7b0479SSaeed Bishara static dma_cookie_t
519*ff7b0479SSaeed Bishara mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
520*ff7b0479SSaeed Bishara 		      struct mv_xor_desc_slot *desc)
521*ff7b0479SSaeed Bishara {
522*ff7b0479SSaeed Bishara 	dma_cookie_t cookie = mv_chan->common.cookie;
523*ff7b0479SSaeed Bishara 
524*ff7b0479SSaeed Bishara 	if (++cookie < 0)
525*ff7b0479SSaeed Bishara 		cookie = 1;
526*ff7b0479SSaeed Bishara 	mv_chan->common.cookie = desc->async_tx.cookie = cookie;
527*ff7b0479SSaeed Bishara 	return cookie;
528*ff7b0479SSaeed Bishara }
529*ff7b0479SSaeed Bishara 
530*ff7b0479SSaeed Bishara /************************ DMA engine API functions ****************************/
531*ff7b0479SSaeed Bishara static dma_cookie_t
532*ff7b0479SSaeed Bishara mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
533*ff7b0479SSaeed Bishara {
534*ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
535*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
536*ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *grp_start, *old_chain_tail;
537*ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
538*ff7b0479SSaeed Bishara 	int new_hw_chain = 1;
539*ff7b0479SSaeed Bishara 
540*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev,
541*ff7b0479SSaeed Bishara 		"%s sw_desc %p: async_tx %p\n",
542*ff7b0479SSaeed Bishara 		__func__, sw_desc, &sw_desc->async_tx);
543*ff7b0479SSaeed Bishara 
544*ff7b0479SSaeed Bishara 	grp_start = sw_desc->group_head;
545*ff7b0479SSaeed Bishara 
546*ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
547*ff7b0479SSaeed Bishara 	cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
548*ff7b0479SSaeed Bishara 
549*ff7b0479SSaeed Bishara 	if (list_empty(&mv_chan->chain))
550*ff7b0479SSaeed Bishara 		list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain);
551*ff7b0479SSaeed Bishara 	else {
552*ff7b0479SSaeed Bishara 		new_hw_chain = 0;
553*ff7b0479SSaeed Bishara 
554*ff7b0479SSaeed Bishara 		old_chain_tail = list_entry(mv_chan->chain.prev,
555*ff7b0479SSaeed Bishara 					    struct mv_xor_desc_slot,
556*ff7b0479SSaeed Bishara 					    chain_node);
557*ff7b0479SSaeed Bishara 		list_splice_init(&grp_start->async_tx.tx_list,
558*ff7b0479SSaeed Bishara 				 &old_chain_tail->chain_node);
559*ff7b0479SSaeed Bishara 
560*ff7b0479SSaeed Bishara 		if (!mv_can_chain(grp_start))
561*ff7b0479SSaeed Bishara 			goto submit_done;
562*ff7b0479SSaeed Bishara 
563*ff7b0479SSaeed Bishara 		dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
564*ff7b0479SSaeed Bishara 			old_chain_tail->async_tx.phys);
565*ff7b0479SSaeed Bishara 
566*ff7b0479SSaeed Bishara 		/* fix up the hardware chain */
567*ff7b0479SSaeed Bishara 		mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
568*ff7b0479SSaeed Bishara 
569*ff7b0479SSaeed Bishara 		/* if the channel is not busy */
570*ff7b0479SSaeed Bishara 		if (!mv_chan_is_busy(mv_chan)) {
571*ff7b0479SSaeed Bishara 			u32 current_desc = mv_chan_get_current_desc(mv_chan);
572*ff7b0479SSaeed Bishara 			/*
573*ff7b0479SSaeed Bishara 			 * and the curren desc is the end of the chain before
574*ff7b0479SSaeed Bishara 			 * the append, then we need to start the channel
575*ff7b0479SSaeed Bishara 			 */
576*ff7b0479SSaeed Bishara 			if (current_desc == old_chain_tail->async_tx.phys)
577*ff7b0479SSaeed Bishara 				new_hw_chain = 1;
578*ff7b0479SSaeed Bishara 		}
579*ff7b0479SSaeed Bishara 	}
580*ff7b0479SSaeed Bishara 
581*ff7b0479SSaeed Bishara 	if (new_hw_chain)
582*ff7b0479SSaeed Bishara 		mv_xor_start_new_chain(mv_chan, grp_start);
583*ff7b0479SSaeed Bishara 
584*ff7b0479SSaeed Bishara submit_done:
585*ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
586*ff7b0479SSaeed Bishara 
587*ff7b0479SSaeed Bishara 	return cookie;
588*ff7b0479SSaeed Bishara }
589*ff7b0479SSaeed Bishara 
590*ff7b0479SSaeed Bishara /* returns the number of allocated descriptors */
591*ff7b0479SSaeed Bishara static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
592*ff7b0479SSaeed Bishara {
593*ff7b0479SSaeed Bishara 	char *hw_desc;
594*ff7b0479SSaeed Bishara 	int idx;
595*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
596*ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *slot = NULL;
597*ff7b0479SSaeed Bishara 	struct mv_xor_platform_data *plat_data =
598*ff7b0479SSaeed Bishara 		mv_chan->device->pdev->dev.platform_data;
599*ff7b0479SSaeed Bishara 	int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
600*ff7b0479SSaeed Bishara 
601*ff7b0479SSaeed Bishara 	/* Allocate descriptor slots */
602*ff7b0479SSaeed Bishara 	idx = mv_chan->slots_allocated;
603*ff7b0479SSaeed Bishara 	while (idx < num_descs_in_pool) {
604*ff7b0479SSaeed Bishara 		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
605*ff7b0479SSaeed Bishara 		if (!slot) {
606*ff7b0479SSaeed Bishara 			printk(KERN_INFO "MV XOR Channel only initialized"
607*ff7b0479SSaeed Bishara 				" %d descriptor slots", idx);
608*ff7b0479SSaeed Bishara 			break;
609*ff7b0479SSaeed Bishara 		}
610*ff7b0479SSaeed Bishara 		hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
611*ff7b0479SSaeed Bishara 		slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
612*ff7b0479SSaeed Bishara 
613*ff7b0479SSaeed Bishara 		dma_async_tx_descriptor_init(&slot->async_tx, chan);
614*ff7b0479SSaeed Bishara 		slot->async_tx.tx_submit = mv_xor_tx_submit;
615*ff7b0479SSaeed Bishara 		INIT_LIST_HEAD(&slot->chain_node);
616*ff7b0479SSaeed Bishara 		INIT_LIST_HEAD(&slot->slot_node);
617*ff7b0479SSaeed Bishara 		INIT_LIST_HEAD(&slot->async_tx.tx_list);
618*ff7b0479SSaeed Bishara 		hw_desc = (char *) mv_chan->device->dma_desc_pool;
619*ff7b0479SSaeed Bishara 		slot->async_tx.phys =
620*ff7b0479SSaeed Bishara 			(dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
621*ff7b0479SSaeed Bishara 		slot->idx = idx++;
622*ff7b0479SSaeed Bishara 
623*ff7b0479SSaeed Bishara 		spin_lock_bh(&mv_chan->lock);
624*ff7b0479SSaeed Bishara 		mv_chan->slots_allocated = idx;
625*ff7b0479SSaeed Bishara 		list_add_tail(&slot->slot_node, &mv_chan->all_slots);
626*ff7b0479SSaeed Bishara 		spin_unlock_bh(&mv_chan->lock);
627*ff7b0479SSaeed Bishara 	}
628*ff7b0479SSaeed Bishara 
629*ff7b0479SSaeed Bishara 	if (mv_chan->slots_allocated && !mv_chan->last_used)
630*ff7b0479SSaeed Bishara 		mv_chan->last_used = list_entry(mv_chan->all_slots.next,
631*ff7b0479SSaeed Bishara 					struct mv_xor_desc_slot,
632*ff7b0479SSaeed Bishara 					slot_node);
633*ff7b0479SSaeed Bishara 
634*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev,
635*ff7b0479SSaeed Bishara 		"allocated %d descriptor slots last_used: %p\n",
636*ff7b0479SSaeed Bishara 		mv_chan->slots_allocated, mv_chan->last_used);
637*ff7b0479SSaeed Bishara 
638*ff7b0479SSaeed Bishara 	return mv_chan->slots_allocated ? : -ENOMEM;
639*ff7b0479SSaeed Bishara }
640*ff7b0479SSaeed Bishara 
641*ff7b0479SSaeed Bishara static struct dma_async_tx_descriptor *
642*ff7b0479SSaeed Bishara mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
643*ff7b0479SSaeed Bishara 		size_t len, unsigned long flags)
644*ff7b0479SSaeed Bishara {
645*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
646*ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *sw_desc, *grp_start;
647*ff7b0479SSaeed Bishara 	int slot_cnt;
648*ff7b0479SSaeed Bishara 
649*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev,
650*ff7b0479SSaeed Bishara 		"%s dest: %x src %x len: %u flags: %ld\n",
651*ff7b0479SSaeed Bishara 		__func__, dest, src, len, flags);
652*ff7b0479SSaeed Bishara 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
653*ff7b0479SSaeed Bishara 		return NULL;
654*ff7b0479SSaeed Bishara 
655*ff7b0479SSaeed Bishara 	BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
656*ff7b0479SSaeed Bishara 
657*ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
658*ff7b0479SSaeed Bishara 	slot_cnt = mv_chan_memcpy_slot_count(len);
659*ff7b0479SSaeed Bishara 	sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
660*ff7b0479SSaeed Bishara 	if (sw_desc) {
661*ff7b0479SSaeed Bishara 		sw_desc->type = DMA_MEMCPY;
662*ff7b0479SSaeed Bishara 		sw_desc->async_tx.flags = flags;
663*ff7b0479SSaeed Bishara 		grp_start = sw_desc->group_head;
664*ff7b0479SSaeed Bishara 		mv_desc_init(grp_start, flags);
665*ff7b0479SSaeed Bishara 		mv_desc_set_byte_count(grp_start, len);
666*ff7b0479SSaeed Bishara 		mv_desc_set_dest_addr(sw_desc->group_head, dest);
667*ff7b0479SSaeed Bishara 		mv_desc_set_src_addr(grp_start, 0, src);
668*ff7b0479SSaeed Bishara 		sw_desc->unmap_src_cnt = 1;
669*ff7b0479SSaeed Bishara 		sw_desc->unmap_len = len;
670*ff7b0479SSaeed Bishara 	}
671*ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
672*ff7b0479SSaeed Bishara 
673*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev,
674*ff7b0479SSaeed Bishara 		"%s sw_desc %p async_tx %p\n",
675*ff7b0479SSaeed Bishara 		__func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
676*ff7b0479SSaeed Bishara 
677*ff7b0479SSaeed Bishara 	return sw_desc ? &sw_desc->async_tx : NULL;
678*ff7b0479SSaeed Bishara }
679*ff7b0479SSaeed Bishara 
680*ff7b0479SSaeed Bishara static struct dma_async_tx_descriptor *
681*ff7b0479SSaeed Bishara mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
682*ff7b0479SSaeed Bishara 		       size_t len, unsigned long flags)
683*ff7b0479SSaeed Bishara {
684*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
685*ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *sw_desc, *grp_start;
686*ff7b0479SSaeed Bishara 	int slot_cnt;
687*ff7b0479SSaeed Bishara 
688*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev,
689*ff7b0479SSaeed Bishara 		"%s dest: %x len: %u flags: %ld\n",
690*ff7b0479SSaeed Bishara 		__func__, dest, len, flags);
691*ff7b0479SSaeed Bishara 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
692*ff7b0479SSaeed Bishara 		return NULL;
693*ff7b0479SSaeed Bishara 
694*ff7b0479SSaeed Bishara 	BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
695*ff7b0479SSaeed Bishara 
696*ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
697*ff7b0479SSaeed Bishara 	slot_cnt = mv_chan_memset_slot_count(len);
698*ff7b0479SSaeed Bishara 	sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
699*ff7b0479SSaeed Bishara 	if (sw_desc) {
700*ff7b0479SSaeed Bishara 		sw_desc->type = DMA_MEMSET;
701*ff7b0479SSaeed Bishara 		sw_desc->async_tx.flags = flags;
702*ff7b0479SSaeed Bishara 		grp_start = sw_desc->group_head;
703*ff7b0479SSaeed Bishara 		mv_desc_init(grp_start, flags);
704*ff7b0479SSaeed Bishara 		mv_desc_set_byte_count(grp_start, len);
705*ff7b0479SSaeed Bishara 		mv_desc_set_dest_addr(sw_desc->group_head, dest);
706*ff7b0479SSaeed Bishara 		mv_desc_set_block_fill_val(grp_start, value);
707*ff7b0479SSaeed Bishara 		sw_desc->unmap_src_cnt = 1;
708*ff7b0479SSaeed Bishara 		sw_desc->unmap_len = len;
709*ff7b0479SSaeed Bishara 	}
710*ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
711*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev,
712*ff7b0479SSaeed Bishara 		"%s sw_desc %p async_tx %p \n",
713*ff7b0479SSaeed Bishara 		__func__, sw_desc, &sw_desc->async_tx);
714*ff7b0479SSaeed Bishara 	return sw_desc ? &sw_desc->async_tx : NULL;
715*ff7b0479SSaeed Bishara }
716*ff7b0479SSaeed Bishara 
717*ff7b0479SSaeed Bishara static struct dma_async_tx_descriptor *
718*ff7b0479SSaeed Bishara mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
719*ff7b0479SSaeed Bishara 		    unsigned int src_cnt, size_t len, unsigned long flags)
720*ff7b0479SSaeed Bishara {
721*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
722*ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *sw_desc, *grp_start;
723*ff7b0479SSaeed Bishara 	int slot_cnt;
724*ff7b0479SSaeed Bishara 
725*ff7b0479SSaeed Bishara 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
726*ff7b0479SSaeed Bishara 		return NULL;
727*ff7b0479SSaeed Bishara 
728*ff7b0479SSaeed Bishara 	BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
729*ff7b0479SSaeed Bishara 
730*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev,
731*ff7b0479SSaeed Bishara 		"%s src_cnt: %d len: dest %x %u flags: %ld\n",
732*ff7b0479SSaeed Bishara 		__func__, src_cnt, len, dest, flags);
733*ff7b0479SSaeed Bishara 
734*ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
735*ff7b0479SSaeed Bishara 	slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
736*ff7b0479SSaeed Bishara 	sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
737*ff7b0479SSaeed Bishara 	if (sw_desc) {
738*ff7b0479SSaeed Bishara 		sw_desc->type = DMA_XOR;
739*ff7b0479SSaeed Bishara 		sw_desc->async_tx.flags = flags;
740*ff7b0479SSaeed Bishara 		grp_start = sw_desc->group_head;
741*ff7b0479SSaeed Bishara 		mv_desc_init(grp_start, flags);
742*ff7b0479SSaeed Bishara 		/* the byte count field is the same as in memcpy desc*/
743*ff7b0479SSaeed Bishara 		mv_desc_set_byte_count(grp_start, len);
744*ff7b0479SSaeed Bishara 		mv_desc_set_dest_addr(sw_desc->group_head, dest);
745*ff7b0479SSaeed Bishara 		sw_desc->unmap_src_cnt = src_cnt;
746*ff7b0479SSaeed Bishara 		sw_desc->unmap_len = len;
747*ff7b0479SSaeed Bishara 		while (src_cnt--)
748*ff7b0479SSaeed Bishara 			mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
749*ff7b0479SSaeed Bishara 	}
750*ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
751*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev,
752*ff7b0479SSaeed Bishara 		"%s sw_desc %p async_tx %p \n",
753*ff7b0479SSaeed Bishara 		__func__, sw_desc, &sw_desc->async_tx);
754*ff7b0479SSaeed Bishara 	return sw_desc ? &sw_desc->async_tx : NULL;
755*ff7b0479SSaeed Bishara }
756*ff7b0479SSaeed Bishara 
757*ff7b0479SSaeed Bishara static void mv_xor_free_chan_resources(struct dma_chan *chan)
758*ff7b0479SSaeed Bishara {
759*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
760*ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
761*ff7b0479SSaeed Bishara 	int in_use_descs = 0;
762*ff7b0479SSaeed Bishara 
763*ff7b0479SSaeed Bishara 	mv_xor_slot_cleanup(mv_chan);
764*ff7b0479SSaeed Bishara 
765*ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
766*ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
767*ff7b0479SSaeed Bishara 					chain_node) {
768*ff7b0479SSaeed Bishara 		in_use_descs++;
769*ff7b0479SSaeed Bishara 		list_del(&iter->chain_node);
770*ff7b0479SSaeed Bishara 	}
771*ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
772*ff7b0479SSaeed Bishara 				 completed_node) {
773*ff7b0479SSaeed Bishara 		in_use_descs++;
774*ff7b0479SSaeed Bishara 		list_del(&iter->completed_node);
775*ff7b0479SSaeed Bishara 	}
776*ff7b0479SSaeed Bishara 	list_for_each_entry_safe_reverse(
777*ff7b0479SSaeed Bishara 		iter, _iter, &mv_chan->all_slots, slot_node) {
778*ff7b0479SSaeed Bishara 		list_del(&iter->slot_node);
779*ff7b0479SSaeed Bishara 		kfree(iter);
780*ff7b0479SSaeed Bishara 		mv_chan->slots_allocated--;
781*ff7b0479SSaeed Bishara 	}
782*ff7b0479SSaeed Bishara 	mv_chan->last_used = NULL;
783*ff7b0479SSaeed Bishara 
784*ff7b0479SSaeed Bishara 	dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
785*ff7b0479SSaeed Bishara 		__func__, mv_chan->slots_allocated);
786*ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
787*ff7b0479SSaeed Bishara 
788*ff7b0479SSaeed Bishara 	if (in_use_descs)
789*ff7b0479SSaeed Bishara 		dev_err(mv_chan->device->common.dev,
790*ff7b0479SSaeed Bishara 			"freeing %d in use descriptors!\n", in_use_descs);
791*ff7b0479SSaeed Bishara }
792*ff7b0479SSaeed Bishara 
793*ff7b0479SSaeed Bishara /**
794*ff7b0479SSaeed Bishara  * mv_xor_is_complete - poll the status of an XOR transaction
795*ff7b0479SSaeed Bishara  * @chan: XOR channel handle
796*ff7b0479SSaeed Bishara  * @cookie: XOR transaction identifier
797*ff7b0479SSaeed Bishara  */
798*ff7b0479SSaeed Bishara static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
799*ff7b0479SSaeed Bishara 					  dma_cookie_t cookie,
800*ff7b0479SSaeed Bishara 					  dma_cookie_t *done,
801*ff7b0479SSaeed Bishara 					  dma_cookie_t *used)
802*ff7b0479SSaeed Bishara {
803*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
804*ff7b0479SSaeed Bishara 	dma_cookie_t last_used;
805*ff7b0479SSaeed Bishara 	dma_cookie_t last_complete;
806*ff7b0479SSaeed Bishara 	enum dma_status ret;
807*ff7b0479SSaeed Bishara 
808*ff7b0479SSaeed Bishara 	last_used = chan->cookie;
809*ff7b0479SSaeed Bishara 	last_complete = mv_chan->completed_cookie;
810*ff7b0479SSaeed Bishara 	mv_chan->is_complete_cookie = cookie;
811*ff7b0479SSaeed Bishara 	if (done)
812*ff7b0479SSaeed Bishara 		*done = last_complete;
813*ff7b0479SSaeed Bishara 	if (used)
814*ff7b0479SSaeed Bishara 		*used = last_used;
815*ff7b0479SSaeed Bishara 
816*ff7b0479SSaeed Bishara 	ret = dma_async_is_complete(cookie, last_complete, last_used);
817*ff7b0479SSaeed Bishara 	if (ret == DMA_SUCCESS) {
818*ff7b0479SSaeed Bishara 		mv_xor_clean_completed_slots(mv_chan);
819*ff7b0479SSaeed Bishara 		return ret;
820*ff7b0479SSaeed Bishara 	}
821*ff7b0479SSaeed Bishara 	mv_xor_slot_cleanup(mv_chan);
822*ff7b0479SSaeed Bishara 
823*ff7b0479SSaeed Bishara 	last_used = chan->cookie;
824*ff7b0479SSaeed Bishara 	last_complete = mv_chan->completed_cookie;
825*ff7b0479SSaeed Bishara 
826*ff7b0479SSaeed Bishara 	if (done)
827*ff7b0479SSaeed Bishara 		*done = last_complete;
828*ff7b0479SSaeed Bishara 	if (used)
829*ff7b0479SSaeed Bishara 		*used = last_used;
830*ff7b0479SSaeed Bishara 
831*ff7b0479SSaeed Bishara 	return dma_async_is_complete(cookie, last_complete, last_used);
832*ff7b0479SSaeed Bishara }
833*ff7b0479SSaeed Bishara 
834*ff7b0479SSaeed Bishara static void mv_dump_xor_regs(struct mv_xor_chan *chan)
835*ff7b0479SSaeed Bishara {
836*ff7b0479SSaeed Bishara 	u32 val;
837*ff7b0479SSaeed Bishara 
838*ff7b0479SSaeed Bishara 	val = __raw_readl(XOR_CONFIG(chan));
839*ff7b0479SSaeed Bishara 	dev_printk(KERN_ERR, chan->device->common.dev,
840*ff7b0479SSaeed Bishara 		   "config       0x%08x.\n", val);
841*ff7b0479SSaeed Bishara 
842*ff7b0479SSaeed Bishara 	val = __raw_readl(XOR_ACTIVATION(chan));
843*ff7b0479SSaeed Bishara 	dev_printk(KERN_ERR, chan->device->common.dev,
844*ff7b0479SSaeed Bishara 		   "activation   0x%08x.\n", val);
845*ff7b0479SSaeed Bishara 
846*ff7b0479SSaeed Bishara 	val = __raw_readl(XOR_INTR_CAUSE(chan));
847*ff7b0479SSaeed Bishara 	dev_printk(KERN_ERR, chan->device->common.dev,
848*ff7b0479SSaeed Bishara 		   "intr cause   0x%08x.\n", val);
849*ff7b0479SSaeed Bishara 
850*ff7b0479SSaeed Bishara 	val = __raw_readl(XOR_INTR_MASK(chan));
851*ff7b0479SSaeed Bishara 	dev_printk(KERN_ERR, chan->device->common.dev,
852*ff7b0479SSaeed Bishara 		   "intr mask    0x%08x.\n", val);
853*ff7b0479SSaeed Bishara 
854*ff7b0479SSaeed Bishara 	val = __raw_readl(XOR_ERROR_CAUSE(chan));
855*ff7b0479SSaeed Bishara 	dev_printk(KERN_ERR, chan->device->common.dev,
856*ff7b0479SSaeed Bishara 		   "error cause  0x%08x.\n", val);
857*ff7b0479SSaeed Bishara 
858*ff7b0479SSaeed Bishara 	val = __raw_readl(XOR_ERROR_ADDR(chan));
859*ff7b0479SSaeed Bishara 	dev_printk(KERN_ERR, chan->device->common.dev,
860*ff7b0479SSaeed Bishara 		   "error addr   0x%08x.\n", val);
861*ff7b0479SSaeed Bishara }
862*ff7b0479SSaeed Bishara 
863*ff7b0479SSaeed Bishara static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
864*ff7b0479SSaeed Bishara 					 u32 intr_cause)
865*ff7b0479SSaeed Bishara {
866*ff7b0479SSaeed Bishara 	if (intr_cause & (1 << 4)) {
867*ff7b0479SSaeed Bishara 	     dev_dbg(chan->device->common.dev,
868*ff7b0479SSaeed Bishara 		     "ignore this error\n");
869*ff7b0479SSaeed Bishara 	     return;
870*ff7b0479SSaeed Bishara 	}
871*ff7b0479SSaeed Bishara 
872*ff7b0479SSaeed Bishara 	dev_printk(KERN_ERR, chan->device->common.dev,
873*ff7b0479SSaeed Bishara 		   "error on chan %d. intr cause 0x%08x.\n",
874*ff7b0479SSaeed Bishara 		   chan->idx, intr_cause);
875*ff7b0479SSaeed Bishara 
876*ff7b0479SSaeed Bishara 	mv_dump_xor_regs(chan);
877*ff7b0479SSaeed Bishara 	BUG();
878*ff7b0479SSaeed Bishara }
879*ff7b0479SSaeed Bishara 
880*ff7b0479SSaeed Bishara static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
881*ff7b0479SSaeed Bishara {
882*ff7b0479SSaeed Bishara 	struct mv_xor_chan *chan = data;
883*ff7b0479SSaeed Bishara 	u32 intr_cause = mv_chan_get_intr_cause(chan);
884*ff7b0479SSaeed Bishara 
885*ff7b0479SSaeed Bishara 	dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
886*ff7b0479SSaeed Bishara 
887*ff7b0479SSaeed Bishara 	if (mv_is_err_intr(intr_cause))
888*ff7b0479SSaeed Bishara 		mv_xor_err_interrupt_handler(chan, intr_cause);
889*ff7b0479SSaeed Bishara 
890*ff7b0479SSaeed Bishara 	tasklet_schedule(&chan->irq_tasklet);
891*ff7b0479SSaeed Bishara 
892*ff7b0479SSaeed Bishara 	mv_xor_device_clear_eoc_cause(chan);
893*ff7b0479SSaeed Bishara 
894*ff7b0479SSaeed Bishara 	return IRQ_HANDLED;
895*ff7b0479SSaeed Bishara }
896*ff7b0479SSaeed Bishara 
897*ff7b0479SSaeed Bishara static void mv_xor_issue_pending(struct dma_chan *chan)
898*ff7b0479SSaeed Bishara {
899*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
900*ff7b0479SSaeed Bishara 
901*ff7b0479SSaeed Bishara 	if (mv_chan->pending >= MV_XOR_THRESHOLD) {
902*ff7b0479SSaeed Bishara 		mv_chan->pending = 0;
903*ff7b0479SSaeed Bishara 		mv_chan_activate(mv_chan);
904*ff7b0479SSaeed Bishara 	}
905*ff7b0479SSaeed Bishara }
906*ff7b0479SSaeed Bishara 
907*ff7b0479SSaeed Bishara /*
908*ff7b0479SSaeed Bishara  * Perform a transaction to verify the HW works.
909*ff7b0479SSaeed Bishara  */
910*ff7b0479SSaeed Bishara #define MV_XOR_TEST_SIZE 2000
911*ff7b0479SSaeed Bishara 
912*ff7b0479SSaeed Bishara static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
913*ff7b0479SSaeed Bishara {
914*ff7b0479SSaeed Bishara 	int i;
915*ff7b0479SSaeed Bishara 	void *src, *dest;
916*ff7b0479SSaeed Bishara 	dma_addr_t src_dma, dest_dma;
917*ff7b0479SSaeed Bishara 	struct dma_chan *dma_chan;
918*ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
919*ff7b0479SSaeed Bishara 	struct dma_async_tx_descriptor *tx;
920*ff7b0479SSaeed Bishara 	int err = 0;
921*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan;
922*ff7b0479SSaeed Bishara 
923*ff7b0479SSaeed Bishara 	src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
924*ff7b0479SSaeed Bishara 	if (!src)
925*ff7b0479SSaeed Bishara 		return -ENOMEM;
926*ff7b0479SSaeed Bishara 
927*ff7b0479SSaeed Bishara 	dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
928*ff7b0479SSaeed Bishara 	if (!dest) {
929*ff7b0479SSaeed Bishara 		kfree(src);
930*ff7b0479SSaeed Bishara 		return -ENOMEM;
931*ff7b0479SSaeed Bishara 	}
932*ff7b0479SSaeed Bishara 
933*ff7b0479SSaeed Bishara 	/* Fill in src buffer */
934*ff7b0479SSaeed Bishara 	for (i = 0; i < MV_XOR_TEST_SIZE; i++)
935*ff7b0479SSaeed Bishara 		((u8 *) src)[i] = (u8)i;
936*ff7b0479SSaeed Bishara 
937*ff7b0479SSaeed Bishara 	/* Start copy, using first DMA channel */
938*ff7b0479SSaeed Bishara 	dma_chan = container_of(device->common.channels.next,
939*ff7b0479SSaeed Bishara 				struct dma_chan,
940*ff7b0479SSaeed Bishara 				device_node);
941*ff7b0479SSaeed Bishara 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
942*ff7b0479SSaeed Bishara 		err = -ENODEV;
943*ff7b0479SSaeed Bishara 		goto out;
944*ff7b0479SSaeed Bishara 	}
945*ff7b0479SSaeed Bishara 
946*ff7b0479SSaeed Bishara 	dest_dma = dma_map_single(dma_chan->device->dev, dest,
947*ff7b0479SSaeed Bishara 				  MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
948*ff7b0479SSaeed Bishara 
949*ff7b0479SSaeed Bishara 	src_dma = dma_map_single(dma_chan->device->dev, src,
950*ff7b0479SSaeed Bishara 				 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
951*ff7b0479SSaeed Bishara 
952*ff7b0479SSaeed Bishara 	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
953*ff7b0479SSaeed Bishara 				    MV_XOR_TEST_SIZE, 0);
954*ff7b0479SSaeed Bishara 	cookie = mv_xor_tx_submit(tx);
955*ff7b0479SSaeed Bishara 	mv_xor_issue_pending(dma_chan);
956*ff7b0479SSaeed Bishara 	async_tx_ack(tx);
957*ff7b0479SSaeed Bishara 	msleep(1);
958*ff7b0479SSaeed Bishara 
959*ff7b0479SSaeed Bishara 	if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
960*ff7b0479SSaeed Bishara 	    DMA_SUCCESS) {
961*ff7b0479SSaeed Bishara 		dev_printk(KERN_ERR, dma_chan->device->dev,
962*ff7b0479SSaeed Bishara 			   "Self-test copy timed out, disabling\n");
963*ff7b0479SSaeed Bishara 		err = -ENODEV;
964*ff7b0479SSaeed Bishara 		goto free_resources;
965*ff7b0479SSaeed Bishara 	}
966*ff7b0479SSaeed Bishara 
967*ff7b0479SSaeed Bishara 	mv_chan = to_mv_xor_chan(dma_chan);
968*ff7b0479SSaeed Bishara 	dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
969*ff7b0479SSaeed Bishara 				MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
970*ff7b0479SSaeed Bishara 	if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
971*ff7b0479SSaeed Bishara 		dev_printk(KERN_ERR, dma_chan->device->dev,
972*ff7b0479SSaeed Bishara 			   "Self-test copy failed compare, disabling\n");
973*ff7b0479SSaeed Bishara 		err = -ENODEV;
974*ff7b0479SSaeed Bishara 		goto free_resources;
975*ff7b0479SSaeed Bishara 	}
976*ff7b0479SSaeed Bishara 
977*ff7b0479SSaeed Bishara free_resources:
978*ff7b0479SSaeed Bishara 	mv_xor_free_chan_resources(dma_chan);
979*ff7b0479SSaeed Bishara out:
980*ff7b0479SSaeed Bishara 	kfree(src);
981*ff7b0479SSaeed Bishara 	kfree(dest);
982*ff7b0479SSaeed Bishara 	return err;
983*ff7b0479SSaeed Bishara }
984*ff7b0479SSaeed Bishara 
985*ff7b0479SSaeed Bishara #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
986*ff7b0479SSaeed Bishara static int __devinit
987*ff7b0479SSaeed Bishara mv_xor_xor_self_test(struct mv_xor_device *device)
988*ff7b0479SSaeed Bishara {
989*ff7b0479SSaeed Bishara 	int i, src_idx;
990*ff7b0479SSaeed Bishara 	struct page *dest;
991*ff7b0479SSaeed Bishara 	struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
992*ff7b0479SSaeed Bishara 	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
993*ff7b0479SSaeed Bishara 	dma_addr_t dest_dma;
994*ff7b0479SSaeed Bishara 	struct dma_async_tx_descriptor *tx;
995*ff7b0479SSaeed Bishara 	struct dma_chan *dma_chan;
996*ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
997*ff7b0479SSaeed Bishara 	u8 cmp_byte = 0;
998*ff7b0479SSaeed Bishara 	u32 cmp_word;
999*ff7b0479SSaeed Bishara 	int err = 0;
1000*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan;
1001*ff7b0479SSaeed Bishara 
1002*ff7b0479SSaeed Bishara 	for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1003*ff7b0479SSaeed Bishara 		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1004*ff7b0479SSaeed Bishara 		if (!xor_srcs[src_idx])
1005*ff7b0479SSaeed Bishara 			while (src_idx--) {
1006*ff7b0479SSaeed Bishara 				__free_page(xor_srcs[src_idx]);
1007*ff7b0479SSaeed Bishara 				return -ENOMEM;
1008*ff7b0479SSaeed Bishara 			}
1009*ff7b0479SSaeed Bishara 	}
1010*ff7b0479SSaeed Bishara 
1011*ff7b0479SSaeed Bishara 	dest = alloc_page(GFP_KERNEL);
1012*ff7b0479SSaeed Bishara 	if (!dest)
1013*ff7b0479SSaeed Bishara 		while (src_idx--) {
1014*ff7b0479SSaeed Bishara 			__free_page(xor_srcs[src_idx]);
1015*ff7b0479SSaeed Bishara 			return -ENOMEM;
1016*ff7b0479SSaeed Bishara 		}
1017*ff7b0479SSaeed Bishara 
1018*ff7b0479SSaeed Bishara 	/* Fill in src buffers */
1019*ff7b0479SSaeed Bishara 	for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1020*ff7b0479SSaeed Bishara 		u8 *ptr = page_address(xor_srcs[src_idx]);
1021*ff7b0479SSaeed Bishara 		for (i = 0; i < PAGE_SIZE; i++)
1022*ff7b0479SSaeed Bishara 			ptr[i] = (1 << src_idx);
1023*ff7b0479SSaeed Bishara 	}
1024*ff7b0479SSaeed Bishara 
1025*ff7b0479SSaeed Bishara 	for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1026*ff7b0479SSaeed Bishara 		cmp_byte ^= (u8) (1 << src_idx);
1027*ff7b0479SSaeed Bishara 
1028*ff7b0479SSaeed Bishara 	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1029*ff7b0479SSaeed Bishara 		(cmp_byte << 8) | cmp_byte;
1030*ff7b0479SSaeed Bishara 
1031*ff7b0479SSaeed Bishara 	memset(page_address(dest), 0, PAGE_SIZE);
1032*ff7b0479SSaeed Bishara 
1033*ff7b0479SSaeed Bishara 	dma_chan = container_of(device->common.channels.next,
1034*ff7b0479SSaeed Bishara 				struct dma_chan,
1035*ff7b0479SSaeed Bishara 				device_node);
1036*ff7b0479SSaeed Bishara 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1037*ff7b0479SSaeed Bishara 		err = -ENODEV;
1038*ff7b0479SSaeed Bishara 		goto out;
1039*ff7b0479SSaeed Bishara 	}
1040*ff7b0479SSaeed Bishara 
1041*ff7b0479SSaeed Bishara 	/* test xor */
1042*ff7b0479SSaeed Bishara 	dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1043*ff7b0479SSaeed Bishara 				DMA_FROM_DEVICE);
1044*ff7b0479SSaeed Bishara 
1045*ff7b0479SSaeed Bishara 	for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1046*ff7b0479SSaeed Bishara 		dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1047*ff7b0479SSaeed Bishara 					   0, PAGE_SIZE, DMA_TO_DEVICE);
1048*ff7b0479SSaeed Bishara 
1049*ff7b0479SSaeed Bishara 	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1050*ff7b0479SSaeed Bishara 				 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1051*ff7b0479SSaeed Bishara 
1052*ff7b0479SSaeed Bishara 	cookie = mv_xor_tx_submit(tx);
1053*ff7b0479SSaeed Bishara 	mv_xor_issue_pending(dma_chan);
1054*ff7b0479SSaeed Bishara 	async_tx_ack(tx);
1055*ff7b0479SSaeed Bishara 	msleep(8);
1056*ff7b0479SSaeed Bishara 
1057*ff7b0479SSaeed Bishara 	if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
1058*ff7b0479SSaeed Bishara 	    DMA_SUCCESS) {
1059*ff7b0479SSaeed Bishara 		dev_printk(KERN_ERR, dma_chan->device->dev,
1060*ff7b0479SSaeed Bishara 			   "Self-test xor timed out, disabling\n");
1061*ff7b0479SSaeed Bishara 		err = -ENODEV;
1062*ff7b0479SSaeed Bishara 		goto free_resources;
1063*ff7b0479SSaeed Bishara 	}
1064*ff7b0479SSaeed Bishara 
1065*ff7b0479SSaeed Bishara 	mv_chan = to_mv_xor_chan(dma_chan);
1066*ff7b0479SSaeed Bishara 	dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1067*ff7b0479SSaeed Bishara 				PAGE_SIZE, DMA_FROM_DEVICE);
1068*ff7b0479SSaeed Bishara 	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1069*ff7b0479SSaeed Bishara 		u32 *ptr = page_address(dest);
1070*ff7b0479SSaeed Bishara 		if (ptr[i] != cmp_word) {
1071*ff7b0479SSaeed Bishara 			dev_printk(KERN_ERR, dma_chan->device->dev,
1072*ff7b0479SSaeed Bishara 				   "Self-test xor failed compare, disabling."
1073*ff7b0479SSaeed Bishara 				   " index %d, data %x, expected %x\n", i,
1074*ff7b0479SSaeed Bishara 				   ptr[i], cmp_word);
1075*ff7b0479SSaeed Bishara 			err = -ENODEV;
1076*ff7b0479SSaeed Bishara 			goto free_resources;
1077*ff7b0479SSaeed Bishara 		}
1078*ff7b0479SSaeed Bishara 	}
1079*ff7b0479SSaeed Bishara 
1080*ff7b0479SSaeed Bishara free_resources:
1081*ff7b0479SSaeed Bishara 	mv_xor_free_chan_resources(dma_chan);
1082*ff7b0479SSaeed Bishara out:
1083*ff7b0479SSaeed Bishara 	src_idx = MV_XOR_NUM_SRC_TEST;
1084*ff7b0479SSaeed Bishara 	while (src_idx--)
1085*ff7b0479SSaeed Bishara 		__free_page(xor_srcs[src_idx]);
1086*ff7b0479SSaeed Bishara 	__free_page(dest);
1087*ff7b0479SSaeed Bishara 	return err;
1088*ff7b0479SSaeed Bishara }
1089*ff7b0479SSaeed Bishara 
1090*ff7b0479SSaeed Bishara static int __devexit mv_xor_remove(struct platform_device *dev)
1091*ff7b0479SSaeed Bishara {
1092*ff7b0479SSaeed Bishara 	struct mv_xor_device *device = platform_get_drvdata(dev);
1093*ff7b0479SSaeed Bishara 	struct dma_chan *chan, *_chan;
1094*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan;
1095*ff7b0479SSaeed Bishara 	struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
1096*ff7b0479SSaeed Bishara 
1097*ff7b0479SSaeed Bishara 	dma_async_device_unregister(&device->common);
1098*ff7b0479SSaeed Bishara 
1099*ff7b0479SSaeed Bishara 	dma_free_coherent(&dev->dev, plat_data->pool_size,
1100*ff7b0479SSaeed Bishara 			device->dma_desc_pool_virt, device->dma_desc_pool);
1101*ff7b0479SSaeed Bishara 
1102*ff7b0479SSaeed Bishara 	list_for_each_entry_safe(chan, _chan, &device->common.channels,
1103*ff7b0479SSaeed Bishara 				device_node) {
1104*ff7b0479SSaeed Bishara 		mv_chan = to_mv_xor_chan(chan);
1105*ff7b0479SSaeed Bishara 		list_del(&chan->device_node);
1106*ff7b0479SSaeed Bishara 	}
1107*ff7b0479SSaeed Bishara 
1108*ff7b0479SSaeed Bishara 	return 0;
1109*ff7b0479SSaeed Bishara }
1110*ff7b0479SSaeed Bishara 
1111*ff7b0479SSaeed Bishara static int __devinit mv_xor_probe(struct platform_device *pdev)
1112*ff7b0479SSaeed Bishara {
1113*ff7b0479SSaeed Bishara 	int ret = 0;
1114*ff7b0479SSaeed Bishara 	int irq;
1115*ff7b0479SSaeed Bishara 	struct mv_xor_device *adev;
1116*ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan;
1117*ff7b0479SSaeed Bishara 	struct dma_device *dma_dev;
1118*ff7b0479SSaeed Bishara 	struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1119*ff7b0479SSaeed Bishara 
1120*ff7b0479SSaeed Bishara 
1121*ff7b0479SSaeed Bishara 	adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1122*ff7b0479SSaeed Bishara 	if (!adev)
1123*ff7b0479SSaeed Bishara 		return -ENOMEM;
1124*ff7b0479SSaeed Bishara 
1125*ff7b0479SSaeed Bishara 	dma_dev = &adev->common;
1126*ff7b0479SSaeed Bishara 
1127*ff7b0479SSaeed Bishara 	/* allocate coherent memory for hardware descriptors
1128*ff7b0479SSaeed Bishara 	 * note: writecombine gives slightly better performance, but
1129*ff7b0479SSaeed Bishara 	 * requires that we explicitly flush the writes
1130*ff7b0479SSaeed Bishara 	 */
1131*ff7b0479SSaeed Bishara 	adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1132*ff7b0479SSaeed Bishara 							  plat_data->pool_size,
1133*ff7b0479SSaeed Bishara 							  &adev->dma_desc_pool,
1134*ff7b0479SSaeed Bishara 							  GFP_KERNEL);
1135*ff7b0479SSaeed Bishara 	if (!adev->dma_desc_pool_virt)
1136*ff7b0479SSaeed Bishara 		return -ENOMEM;
1137*ff7b0479SSaeed Bishara 
1138*ff7b0479SSaeed Bishara 	adev->id = plat_data->hw_id;
1139*ff7b0479SSaeed Bishara 
1140*ff7b0479SSaeed Bishara 	/* discover transaction capabilites from the platform data */
1141*ff7b0479SSaeed Bishara 	dma_dev->cap_mask = plat_data->cap_mask;
1142*ff7b0479SSaeed Bishara 	adev->pdev = pdev;
1143*ff7b0479SSaeed Bishara 	platform_set_drvdata(pdev, adev);
1144*ff7b0479SSaeed Bishara 
1145*ff7b0479SSaeed Bishara 	adev->shared = platform_get_drvdata(plat_data->shared);
1146*ff7b0479SSaeed Bishara 
1147*ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&dma_dev->channels);
1148*ff7b0479SSaeed Bishara 
1149*ff7b0479SSaeed Bishara 	/* set base routines */
1150*ff7b0479SSaeed Bishara 	dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1151*ff7b0479SSaeed Bishara 	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1152*ff7b0479SSaeed Bishara 	dma_dev->device_is_tx_complete = mv_xor_is_complete;
1153*ff7b0479SSaeed Bishara 	dma_dev->device_issue_pending = mv_xor_issue_pending;
1154*ff7b0479SSaeed Bishara 	dma_dev->dev = &pdev->dev;
1155*ff7b0479SSaeed Bishara 
1156*ff7b0479SSaeed Bishara 	/* set prep routines based on capability */
1157*ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1158*ff7b0479SSaeed Bishara 		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1159*ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1160*ff7b0479SSaeed Bishara 		dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1161*ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1162*ff7b0479SSaeed Bishara 		dma_dev->max_xor = 8;                  ;
1163*ff7b0479SSaeed Bishara 		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1164*ff7b0479SSaeed Bishara 	}
1165*ff7b0479SSaeed Bishara 
1166*ff7b0479SSaeed Bishara 	mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1167*ff7b0479SSaeed Bishara 	if (!mv_chan) {
1168*ff7b0479SSaeed Bishara 		ret = -ENOMEM;
1169*ff7b0479SSaeed Bishara 		goto err_free_dma;
1170*ff7b0479SSaeed Bishara 	}
1171*ff7b0479SSaeed Bishara 	mv_chan->device = adev;
1172*ff7b0479SSaeed Bishara 	mv_chan->idx = plat_data->hw_id;
1173*ff7b0479SSaeed Bishara 	mv_chan->mmr_base = adev->shared->xor_base;
1174*ff7b0479SSaeed Bishara 
1175*ff7b0479SSaeed Bishara 	if (!mv_chan->mmr_base) {
1176*ff7b0479SSaeed Bishara 		ret = -ENOMEM;
1177*ff7b0479SSaeed Bishara 		goto err_free_dma;
1178*ff7b0479SSaeed Bishara 	}
1179*ff7b0479SSaeed Bishara 	tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1180*ff7b0479SSaeed Bishara 		     mv_chan);
1181*ff7b0479SSaeed Bishara 
1182*ff7b0479SSaeed Bishara 	/* clear errors before enabling interrupts */
1183*ff7b0479SSaeed Bishara 	mv_xor_device_clear_err_status(mv_chan);
1184*ff7b0479SSaeed Bishara 
1185*ff7b0479SSaeed Bishara 	irq = platform_get_irq(pdev, 0);
1186*ff7b0479SSaeed Bishara 	if (irq < 0) {
1187*ff7b0479SSaeed Bishara 		ret = irq;
1188*ff7b0479SSaeed Bishara 		goto err_free_dma;
1189*ff7b0479SSaeed Bishara 	}
1190*ff7b0479SSaeed Bishara 	ret = devm_request_irq(&pdev->dev, irq,
1191*ff7b0479SSaeed Bishara 			       mv_xor_interrupt_handler,
1192*ff7b0479SSaeed Bishara 			       0, dev_name(&pdev->dev), mv_chan);
1193*ff7b0479SSaeed Bishara 	if (ret)
1194*ff7b0479SSaeed Bishara 		goto err_free_dma;
1195*ff7b0479SSaeed Bishara 
1196*ff7b0479SSaeed Bishara 	mv_chan_unmask_interrupts(mv_chan);
1197*ff7b0479SSaeed Bishara 
1198*ff7b0479SSaeed Bishara 	mv_set_mode(mv_chan, DMA_MEMCPY);
1199*ff7b0479SSaeed Bishara 
1200*ff7b0479SSaeed Bishara 	spin_lock_init(&mv_chan->lock);
1201*ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&mv_chan->chain);
1202*ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&mv_chan->completed_slots);
1203*ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&mv_chan->all_slots);
1204*ff7b0479SSaeed Bishara 	INIT_RCU_HEAD(&mv_chan->common.rcu);
1205*ff7b0479SSaeed Bishara 	mv_chan->common.device = dma_dev;
1206*ff7b0479SSaeed Bishara 
1207*ff7b0479SSaeed Bishara 	list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1208*ff7b0479SSaeed Bishara 
1209*ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1210*ff7b0479SSaeed Bishara 		ret = mv_xor_memcpy_self_test(adev);
1211*ff7b0479SSaeed Bishara 		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1212*ff7b0479SSaeed Bishara 		if (ret)
1213*ff7b0479SSaeed Bishara 			goto err_free_dma;
1214*ff7b0479SSaeed Bishara 	}
1215*ff7b0479SSaeed Bishara 
1216*ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1217*ff7b0479SSaeed Bishara 		ret = mv_xor_xor_self_test(adev);
1218*ff7b0479SSaeed Bishara 		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1219*ff7b0479SSaeed Bishara 		if (ret)
1220*ff7b0479SSaeed Bishara 			goto err_free_dma;
1221*ff7b0479SSaeed Bishara 	}
1222*ff7b0479SSaeed Bishara 
1223*ff7b0479SSaeed Bishara 	dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
1224*ff7b0479SSaeed Bishara 	  "( %s%s%s%s)\n",
1225*ff7b0479SSaeed Bishara 	  dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1226*ff7b0479SSaeed Bishara 	  dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)  ? "fill " : "",
1227*ff7b0479SSaeed Bishara 	  dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1228*ff7b0479SSaeed Bishara 	  dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1229*ff7b0479SSaeed Bishara 
1230*ff7b0479SSaeed Bishara 	dma_async_device_register(dma_dev);
1231*ff7b0479SSaeed Bishara 	goto out;
1232*ff7b0479SSaeed Bishara 
1233*ff7b0479SSaeed Bishara  err_free_dma:
1234*ff7b0479SSaeed Bishara 	dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1235*ff7b0479SSaeed Bishara 			adev->dma_desc_pool_virt, adev->dma_desc_pool);
1236*ff7b0479SSaeed Bishara  out:
1237*ff7b0479SSaeed Bishara 	return ret;
1238*ff7b0479SSaeed Bishara }
1239*ff7b0479SSaeed Bishara 
1240*ff7b0479SSaeed Bishara static void
1241*ff7b0479SSaeed Bishara mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
1242*ff7b0479SSaeed Bishara 			 struct mbus_dram_target_info *dram)
1243*ff7b0479SSaeed Bishara {
1244*ff7b0479SSaeed Bishara 	void __iomem *base = msp->xor_base;
1245*ff7b0479SSaeed Bishara 	u32 win_enable = 0;
1246*ff7b0479SSaeed Bishara 	int i;
1247*ff7b0479SSaeed Bishara 
1248*ff7b0479SSaeed Bishara 	for (i = 0; i < 8; i++) {
1249*ff7b0479SSaeed Bishara 		writel(0, base + WINDOW_BASE(i));
1250*ff7b0479SSaeed Bishara 		writel(0, base + WINDOW_SIZE(i));
1251*ff7b0479SSaeed Bishara 		if (i < 4)
1252*ff7b0479SSaeed Bishara 			writel(0, base + WINDOW_REMAP_HIGH(i));
1253*ff7b0479SSaeed Bishara 	}
1254*ff7b0479SSaeed Bishara 
1255*ff7b0479SSaeed Bishara 	for (i = 0; i < dram->num_cs; i++) {
1256*ff7b0479SSaeed Bishara 		struct mbus_dram_window *cs = dram->cs + i;
1257*ff7b0479SSaeed Bishara 
1258*ff7b0479SSaeed Bishara 		writel((cs->base & 0xffff0000) |
1259*ff7b0479SSaeed Bishara 		       (cs->mbus_attr << 8) |
1260*ff7b0479SSaeed Bishara 		       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1261*ff7b0479SSaeed Bishara 		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1262*ff7b0479SSaeed Bishara 
1263*ff7b0479SSaeed Bishara 		win_enable |= (1 << i);
1264*ff7b0479SSaeed Bishara 		win_enable |= 3 << (16 + (2 * i));
1265*ff7b0479SSaeed Bishara 	}
1266*ff7b0479SSaeed Bishara 
1267*ff7b0479SSaeed Bishara 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1268*ff7b0479SSaeed Bishara 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1269*ff7b0479SSaeed Bishara }
1270*ff7b0479SSaeed Bishara 
1271*ff7b0479SSaeed Bishara static struct platform_driver mv_xor_driver = {
1272*ff7b0479SSaeed Bishara 	.probe		= mv_xor_probe,
1273*ff7b0479SSaeed Bishara 	.remove		= mv_xor_remove,
1274*ff7b0479SSaeed Bishara 	.driver		= {
1275*ff7b0479SSaeed Bishara 		.owner	= THIS_MODULE,
1276*ff7b0479SSaeed Bishara 		.name	= MV_XOR_NAME,
1277*ff7b0479SSaeed Bishara 	},
1278*ff7b0479SSaeed Bishara };
1279*ff7b0479SSaeed Bishara 
1280*ff7b0479SSaeed Bishara static int mv_xor_shared_probe(struct platform_device *pdev)
1281*ff7b0479SSaeed Bishara {
1282*ff7b0479SSaeed Bishara 	struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
1283*ff7b0479SSaeed Bishara 	struct mv_xor_shared_private *msp;
1284*ff7b0479SSaeed Bishara 	struct resource *res;
1285*ff7b0479SSaeed Bishara 
1286*ff7b0479SSaeed Bishara 	dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
1287*ff7b0479SSaeed Bishara 
1288*ff7b0479SSaeed Bishara 	msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
1289*ff7b0479SSaeed Bishara 	if (!msp)
1290*ff7b0479SSaeed Bishara 		return -ENOMEM;
1291*ff7b0479SSaeed Bishara 
1292*ff7b0479SSaeed Bishara 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1293*ff7b0479SSaeed Bishara 	if (!res)
1294*ff7b0479SSaeed Bishara 		return -ENODEV;
1295*ff7b0479SSaeed Bishara 
1296*ff7b0479SSaeed Bishara 	msp->xor_base = devm_ioremap(&pdev->dev, res->start,
1297*ff7b0479SSaeed Bishara 				     res->end - res->start + 1);
1298*ff7b0479SSaeed Bishara 	if (!msp->xor_base)
1299*ff7b0479SSaeed Bishara 		return -EBUSY;
1300*ff7b0479SSaeed Bishara 
1301*ff7b0479SSaeed Bishara 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1302*ff7b0479SSaeed Bishara 	if (!res)
1303*ff7b0479SSaeed Bishara 		return -ENODEV;
1304*ff7b0479SSaeed Bishara 
1305*ff7b0479SSaeed Bishara 	msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1306*ff7b0479SSaeed Bishara 					  res->end - res->start + 1);
1307*ff7b0479SSaeed Bishara 	if (!msp->xor_high_base)
1308*ff7b0479SSaeed Bishara 		return -EBUSY;
1309*ff7b0479SSaeed Bishara 
1310*ff7b0479SSaeed Bishara 	platform_set_drvdata(pdev, msp);
1311*ff7b0479SSaeed Bishara 
1312*ff7b0479SSaeed Bishara 	/*
1313*ff7b0479SSaeed Bishara 	 * (Re-)program MBUS remapping windows if we are asked to.
1314*ff7b0479SSaeed Bishara 	 */
1315*ff7b0479SSaeed Bishara 	if (msd != NULL && msd->dram != NULL)
1316*ff7b0479SSaeed Bishara 		mv_xor_conf_mbus_windows(msp, msd->dram);
1317*ff7b0479SSaeed Bishara 
1318*ff7b0479SSaeed Bishara 	return 0;
1319*ff7b0479SSaeed Bishara }
1320*ff7b0479SSaeed Bishara 
1321*ff7b0479SSaeed Bishara static int mv_xor_shared_remove(struct platform_device *pdev)
1322*ff7b0479SSaeed Bishara {
1323*ff7b0479SSaeed Bishara 	return 0;
1324*ff7b0479SSaeed Bishara }
1325*ff7b0479SSaeed Bishara 
1326*ff7b0479SSaeed Bishara static struct platform_driver mv_xor_shared_driver = {
1327*ff7b0479SSaeed Bishara 	.probe		= mv_xor_shared_probe,
1328*ff7b0479SSaeed Bishara 	.remove		= mv_xor_shared_remove,
1329*ff7b0479SSaeed Bishara 	.driver		= {
1330*ff7b0479SSaeed Bishara 		.owner	= THIS_MODULE,
1331*ff7b0479SSaeed Bishara 		.name	= MV_XOR_SHARED_NAME,
1332*ff7b0479SSaeed Bishara 	},
1333*ff7b0479SSaeed Bishara };
1334*ff7b0479SSaeed Bishara 
1335*ff7b0479SSaeed Bishara 
1336*ff7b0479SSaeed Bishara static int __init mv_xor_init(void)
1337*ff7b0479SSaeed Bishara {
1338*ff7b0479SSaeed Bishara 	int rc;
1339*ff7b0479SSaeed Bishara 
1340*ff7b0479SSaeed Bishara 	rc = platform_driver_register(&mv_xor_shared_driver);
1341*ff7b0479SSaeed Bishara 	if (!rc) {
1342*ff7b0479SSaeed Bishara 		rc = platform_driver_register(&mv_xor_driver);
1343*ff7b0479SSaeed Bishara 		if (rc)
1344*ff7b0479SSaeed Bishara 			platform_driver_unregister(&mv_xor_shared_driver);
1345*ff7b0479SSaeed Bishara 	}
1346*ff7b0479SSaeed Bishara 	return rc;
1347*ff7b0479SSaeed Bishara }
1348*ff7b0479SSaeed Bishara module_init(mv_xor_init);
1349*ff7b0479SSaeed Bishara 
1350*ff7b0479SSaeed Bishara /* it's currently unsafe to unload this module */
1351*ff7b0479SSaeed Bishara #if 0
1352*ff7b0479SSaeed Bishara static void __exit mv_xor_exit(void)
1353*ff7b0479SSaeed Bishara {
1354*ff7b0479SSaeed Bishara 	platform_driver_unregister(&mv_xor_driver);
1355*ff7b0479SSaeed Bishara 	platform_driver_unregister(&mv_xor_shared_driver);
1356*ff7b0479SSaeed Bishara 	return;
1357*ff7b0479SSaeed Bishara }
1358*ff7b0479SSaeed Bishara 
1359*ff7b0479SSaeed Bishara module_exit(mv_xor_exit);
1360*ff7b0479SSaeed Bishara #endif
1361*ff7b0479SSaeed Bishara 
1362*ff7b0479SSaeed Bishara MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1363*ff7b0479SSaeed Bishara MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1364*ff7b0479SSaeed Bishara MODULE_LICENSE("GPL");
1365