xref: /openbmc/linux/drivers/dma/mv_xor.c (revision 777572911a732c0d3e6dbc514f9a1206606ffd0b)
1ff7b0479SSaeed Bishara /*
2ff7b0479SSaeed Bishara  * offload engine driver for the Marvell XOR engine
3ff7b0479SSaeed Bishara  * Copyright (C) 2007, 2008, Marvell International Ltd.
4ff7b0479SSaeed Bishara  *
5ff7b0479SSaeed Bishara  * This program is free software; you can redistribute it and/or modify it
6ff7b0479SSaeed Bishara  * under the terms and conditions of the GNU General Public License,
7ff7b0479SSaeed Bishara  * version 2, as published by the Free Software Foundation.
8ff7b0479SSaeed Bishara  *
9ff7b0479SSaeed Bishara  * This program is distributed in the hope it will be useful, but WITHOUT
10ff7b0479SSaeed Bishara  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11ff7b0479SSaeed Bishara  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12ff7b0479SSaeed Bishara  * more details.
13ff7b0479SSaeed Bishara  */
14ff7b0479SSaeed Bishara 
15ff7b0479SSaeed Bishara #include <linux/init.h>
16ff7b0479SSaeed Bishara #include <linux/module.h>
175a0e3ad6STejun Heo #include <linux/slab.h>
18ff7b0479SSaeed Bishara #include <linux/delay.h>
19ff7b0479SSaeed Bishara #include <linux/dma-mapping.h>
20ff7b0479SSaeed Bishara #include <linux/spinlock.h>
21ff7b0479SSaeed Bishara #include <linux/interrupt.h>
226f166312SLior Amsalem #include <linux/of_device.h>
23ff7b0479SSaeed Bishara #include <linux/platform_device.h>
24ff7b0479SSaeed Bishara #include <linux/memory.h>
25c510182bSAndrew Lunn #include <linux/clk.h>
26f7d12ef5SThomas Petazzoni #include <linux/of.h>
27f7d12ef5SThomas Petazzoni #include <linux/of_irq.h>
28f7d12ef5SThomas Petazzoni #include <linux/irqdomain.h>
29*77757291SThomas Petazzoni #include <linux/cpumask.h>
30c02cecb9SArnd Bergmann #include <linux/platform_data/dma-mv_xor.h>
31d2ebfb33SRussell King - ARM Linux 
32d2ebfb33SRussell King - ARM Linux #include "dmaengine.h"
33ff7b0479SSaeed Bishara #include "mv_xor.h"
34ff7b0479SSaeed Bishara 
356f166312SLior Amsalem enum mv_xor_mode {
366f166312SLior Amsalem 	XOR_MODE_IN_REG,
376f166312SLior Amsalem 	XOR_MODE_IN_DESC,
386f166312SLior Amsalem };
396f166312SLior Amsalem 
40ff7b0479SSaeed Bishara static void mv_xor_issue_pending(struct dma_chan *chan);
41ff7b0479SSaeed Bishara 
42ff7b0479SSaeed Bishara #define to_mv_xor_chan(chan)		\
4398817b99SThomas Petazzoni 	container_of(chan, struct mv_xor_chan, dmachan)
44ff7b0479SSaeed Bishara 
45ff7b0479SSaeed Bishara #define to_mv_xor_slot(tx)		\
46ff7b0479SSaeed Bishara 	container_of(tx, struct mv_xor_desc_slot, async_tx)
47ff7b0479SSaeed Bishara 
48c98c1781SThomas Petazzoni #define mv_chan_to_devp(chan)           \
491ef48a26SThomas Petazzoni 	((chan)->dmadev.dev)
50c98c1781SThomas Petazzoni 
51dfc97661SLior Amsalem static void mv_desc_init(struct mv_xor_desc_slot *desc,
52ba87d137SLior Amsalem 			 dma_addr_t addr, u32 byte_count,
53ba87d137SLior Amsalem 			 enum dma_ctrl_flags flags)
54ff7b0479SSaeed Bishara {
55ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
56ff7b0479SSaeed Bishara 
570e7488edSEzequiel Garcia 	hw_desc->status = XOR_DESC_DMA_OWNED;
58ff7b0479SSaeed Bishara 	hw_desc->phy_next_desc = 0;
59ba87d137SLior Amsalem 	/* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
60ba87d137SLior Amsalem 	hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
61ba87d137SLior Amsalem 				XOR_DESC_EOD_INT_EN : 0;
62dfc97661SLior Amsalem 	hw_desc->phy_dest_addr = addr;
63ff7b0479SSaeed Bishara 	hw_desc->byte_count = byte_count;
64ff7b0479SSaeed Bishara }
65ff7b0479SSaeed Bishara 
666f166312SLior Amsalem static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
676f166312SLior Amsalem {
686f166312SLior Amsalem 	struct mv_xor_desc *hw_desc = desc->hw_desc;
696f166312SLior Amsalem 
706f166312SLior Amsalem 	switch (desc->type) {
716f166312SLior Amsalem 	case DMA_XOR:
726f166312SLior Amsalem 	case DMA_INTERRUPT:
736f166312SLior Amsalem 		hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
746f166312SLior Amsalem 		break;
756f166312SLior Amsalem 	case DMA_MEMCPY:
766f166312SLior Amsalem 		hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
776f166312SLior Amsalem 		break;
786f166312SLior Amsalem 	default:
796f166312SLior Amsalem 		BUG();
806f166312SLior Amsalem 		return;
816f166312SLior Amsalem 	}
826f166312SLior Amsalem }
836f166312SLior Amsalem 
84ff7b0479SSaeed Bishara static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
85ff7b0479SSaeed Bishara 				  u32 next_desc_addr)
86ff7b0479SSaeed Bishara {
87ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
88ff7b0479SSaeed Bishara 	BUG_ON(hw_desc->phy_next_desc);
89ff7b0479SSaeed Bishara 	hw_desc->phy_next_desc = next_desc_addr;
90ff7b0479SSaeed Bishara }
91ff7b0479SSaeed Bishara 
92ff7b0479SSaeed Bishara static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
93ff7b0479SSaeed Bishara 				 int index, dma_addr_t addr)
94ff7b0479SSaeed Bishara {
95ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
96e03bc654SThomas Petazzoni 	hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
97ff7b0479SSaeed Bishara 	if (desc->type == DMA_XOR)
98ff7b0479SSaeed Bishara 		hw_desc->desc_command |= (1 << index);
99ff7b0479SSaeed Bishara }
100ff7b0479SSaeed Bishara 
101ff7b0479SSaeed Bishara static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
102ff7b0479SSaeed Bishara {
1035733c38aSThomas Petazzoni 	return readl_relaxed(XOR_CURR_DESC(chan));
104ff7b0479SSaeed Bishara }
105ff7b0479SSaeed Bishara 
106ff7b0479SSaeed Bishara static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
107ff7b0479SSaeed Bishara 					u32 next_desc_addr)
108ff7b0479SSaeed Bishara {
1095733c38aSThomas Petazzoni 	writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
110ff7b0479SSaeed Bishara }
111ff7b0479SSaeed Bishara 
112ff7b0479SSaeed Bishara static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
113ff7b0479SSaeed Bishara {
1145733c38aSThomas Petazzoni 	u32 val = readl_relaxed(XOR_INTR_MASK(chan));
115ff7b0479SSaeed Bishara 	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
1165733c38aSThomas Petazzoni 	writel_relaxed(val, XOR_INTR_MASK(chan));
117ff7b0479SSaeed Bishara }
118ff7b0479SSaeed Bishara 
119ff7b0479SSaeed Bishara static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
120ff7b0479SSaeed Bishara {
1215733c38aSThomas Petazzoni 	u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
122ff7b0479SSaeed Bishara 	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
123ff7b0479SSaeed Bishara 	return intr_cause;
124ff7b0479SSaeed Bishara }
125ff7b0479SSaeed Bishara 
1260951e728SMaxime Ripard static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
127ff7b0479SSaeed Bishara {
128ba87d137SLior Amsalem 	u32 val;
129ba87d137SLior Amsalem 
130ba87d137SLior Amsalem 	val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
131ba87d137SLior Amsalem 	val = ~(val << (chan->idx * 16));
132c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
1335733c38aSThomas Petazzoni 	writel_relaxed(val, XOR_INTR_CAUSE(chan));
134ff7b0479SSaeed Bishara }
135ff7b0479SSaeed Bishara 
1360951e728SMaxime Ripard static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
137ff7b0479SSaeed Bishara {
138ff7b0479SSaeed Bishara 	u32 val = 0xFFFF0000 >> (chan->idx * 16);
1395733c38aSThomas Petazzoni 	writel_relaxed(val, XOR_INTR_CAUSE(chan));
140ff7b0479SSaeed Bishara }
141ff7b0479SSaeed Bishara 
1420951e728SMaxime Ripard static void mv_chan_set_mode(struct mv_xor_chan *chan,
143ff7b0479SSaeed Bishara 			     enum dma_transaction_type type)
144ff7b0479SSaeed Bishara {
145ff7b0479SSaeed Bishara 	u32 op_mode;
1465733c38aSThomas Petazzoni 	u32 config = readl_relaxed(XOR_CONFIG(chan));
147ff7b0479SSaeed Bishara 
148ff7b0479SSaeed Bishara 	switch (type) {
149ff7b0479SSaeed Bishara 	case DMA_XOR:
150ff7b0479SSaeed Bishara 		op_mode = XOR_OPERATION_MODE_XOR;
151ff7b0479SSaeed Bishara 		break;
152ff7b0479SSaeed Bishara 	case DMA_MEMCPY:
153ff7b0479SSaeed Bishara 		op_mode = XOR_OPERATION_MODE_MEMCPY;
154ff7b0479SSaeed Bishara 		break;
155ff7b0479SSaeed Bishara 	default:
156c98c1781SThomas Petazzoni 		dev_err(mv_chan_to_devp(chan),
1571ba151cdSJoe Perches 			"error: unsupported operation %d\n",
158ff7b0479SSaeed Bishara 			type);
159ff7b0479SSaeed Bishara 		BUG();
160ff7b0479SSaeed Bishara 		return;
161ff7b0479SSaeed Bishara 	}
162ff7b0479SSaeed Bishara 
163ff7b0479SSaeed Bishara 	config &= ~0x7;
164ff7b0479SSaeed Bishara 	config |= op_mode;
165e03bc654SThomas Petazzoni 
1666f166312SLior Amsalem 	if (IS_ENABLED(__BIG_ENDIAN))
1676f166312SLior Amsalem 		config |= XOR_DESCRIPTOR_SWAP;
1686f166312SLior Amsalem 	else
1696f166312SLior Amsalem 		config &= ~XOR_DESCRIPTOR_SWAP;
1706f166312SLior Amsalem 
1716f166312SLior Amsalem 	writel_relaxed(config, XOR_CONFIG(chan));
1726f166312SLior Amsalem 	chan->current_type = type;
1736f166312SLior Amsalem }
1746f166312SLior Amsalem 
1756f166312SLior Amsalem static void mv_chan_set_mode_to_desc(struct mv_xor_chan *chan)
1766f166312SLior Amsalem {
1776f166312SLior Amsalem 	u32 op_mode;
1786f166312SLior Amsalem 	u32 config = readl_relaxed(XOR_CONFIG(chan));
1796f166312SLior Amsalem 
1806f166312SLior Amsalem 	op_mode = XOR_OPERATION_MODE_IN_DESC;
1816f166312SLior Amsalem 
1826f166312SLior Amsalem 	config &= ~0x7;
1836f166312SLior Amsalem 	config |= op_mode;
1846f166312SLior Amsalem 
185e03bc654SThomas Petazzoni #if defined(__BIG_ENDIAN)
186e03bc654SThomas Petazzoni 	config |= XOR_DESCRIPTOR_SWAP;
187e03bc654SThomas Petazzoni #else
188e03bc654SThomas Petazzoni 	config &= ~XOR_DESCRIPTOR_SWAP;
189e03bc654SThomas Petazzoni #endif
190e03bc654SThomas Petazzoni 
1915733c38aSThomas Petazzoni 	writel_relaxed(config, XOR_CONFIG(chan));
192ff7b0479SSaeed Bishara }
193ff7b0479SSaeed Bishara 
194ff7b0479SSaeed Bishara static void mv_chan_activate(struct mv_xor_chan *chan)
195ff7b0479SSaeed Bishara {
196c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
1975a9a55bfSEzequiel Garcia 
1985a9a55bfSEzequiel Garcia 	/* writel ensures all descriptors are flushed before activation */
1995a9a55bfSEzequiel Garcia 	writel(BIT(0), XOR_ACTIVATION(chan));
200ff7b0479SSaeed Bishara }
201ff7b0479SSaeed Bishara 
202ff7b0479SSaeed Bishara static char mv_chan_is_busy(struct mv_xor_chan *chan)
203ff7b0479SSaeed Bishara {
2045733c38aSThomas Petazzoni 	u32 state = readl_relaxed(XOR_ACTIVATION(chan));
205ff7b0479SSaeed Bishara 
206ff7b0479SSaeed Bishara 	state = (state >> 4) & 0x3;
207ff7b0479SSaeed Bishara 
208ff7b0479SSaeed Bishara 	return (state == 1) ? 1 : 0;
209ff7b0479SSaeed Bishara }
210ff7b0479SSaeed Bishara 
211ff7b0479SSaeed Bishara /*
2120951e728SMaxime Ripard  * mv_chan_start_new_chain - program the engine to operate on new
2130951e728SMaxime Ripard  * chain headed by sw_desc
214ff7b0479SSaeed Bishara  * Caller must hold &mv_chan->lock while calling this function
215ff7b0479SSaeed Bishara  */
2160951e728SMaxime Ripard static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
217ff7b0479SSaeed Bishara 				    struct mv_xor_desc_slot *sw_desc)
218ff7b0479SSaeed Bishara {
219c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
220ff7b0479SSaeed Bishara 		__func__, __LINE__, sw_desc);
221ff7b0479SSaeed Bishara 
222ff7b0479SSaeed Bishara 	/* set the hardware chain */
223ff7b0479SSaeed Bishara 	mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
22448a9db46SBartlomiej Zolnierkiewicz 
225dfc97661SLior Amsalem 	mv_chan->pending++;
22698817b99SThomas Petazzoni 	mv_xor_issue_pending(&mv_chan->dmachan);
227ff7b0479SSaeed Bishara }
228ff7b0479SSaeed Bishara 
229ff7b0479SSaeed Bishara static dma_cookie_t
2300951e728SMaxime Ripard mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
2310951e728SMaxime Ripard 				struct mv_xor_chan *mv_chan,
2320951e728SMaxime Ripard 				dma_cookie_t cookie)
233ff7b0479SSaeed Bishara {
234ff7b0479SSaeed Bishara 	BUG_ON(desc->async_tx.cookie < 0);
235ff7b0479SSaeed Bishara 
236ff7b0479SSaeed Bishara 	if (desc->async_tx.cookie > 0) {
237ff7b0479SSaeed Bishara 		cookie = desc->async_tx.cookie;
238ff7b0479SSaeed Bishara 
239ff7b0479SSaeed Bishara 		/* call the callback (must not sleep or submit new
240ff7b0479SSaeed Bishara 		 * operations to this channel)
241ff7b0479SSaeed Bishara 		 */
242ff7b0479SSaeed Bishara 		if (desc->async_tx.callback)
243ff7b0479SSaeed Bishara 			desc->async_tx.callback(
244ff7b0479SSaeed Bishara 				desc->async_tx.callback_param);
245ff7b0479SSaeed Bishara 
246d38a8c62SDan Williams 		dma_descriptor_unmap(&desc->async_tx);
247ff7b0479SSaeed Bishara 	}
248ff7b0479SSaeed Bishara 
249ff7b0479SSaeed Bishara 	/* run dependent operations */
25007f2211eSDan Williams 	dma_run_dependencies(&desc->async_tx);
251ff7b0479SSaeed Bishara 
252ff7b0479SSaeed Bishara 	return cookie;
253ff7b0479SSaeed Bishara }
254ff7b0479SSaeed Bishara 
255ff7b0479SSaeed Bishara static int
2560951e728SMaxime Ripard mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
257ff7b0479SSaeed Bishara {
258ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
259ff7b0479SSaeed Bishara 
260c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
261ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
262fbea28a2SLior Amsalem 				 node) {
263ff7b0479SSaeed Bishara 
264fbea28a2SLior Amsalem 		if (async_tx_test_ack(&iter->async_tx))
265fbea28a2SLior Amsalem 			list_move_tail(&iter->node, &mv_chan->free_slots);
266ff7b0479SSaeed Bishara 	}
267ff7b0479SSaeed Bishara 	return 0;
268ff7b0479SSaeed Bishara }
269ff7b0479SSaeed Bishara 
270ff7b0479SSaeed Bishara static int
2710951e728SMaxime Ripard mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
272ff7b0479SSaeed Bishara 		   struct mv_xor_chan *mv_chan)
273ff7b0479SSaeed Bishara {
274c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
275ff7b0479SSaeed Bishara 		__func__, __LINE__, desc, desc->async_tx.flags);
276fbea28a2SLior Amsalem 
277ff7b0479SSaeed Bishara 	/* the client is allowed to attach dependent operations
278ff7b0479SSaeed Bishara 	 * until 'ack' is set
279ff7b0479SSaeed Bishara 	 */
280fbea28a2SLior Amsalem 	if (!async_tx_test_ack(&desc->async_tx))
281ff7b0479SSaeed Bishara 		/* move this slot to the completed_slots */
282fbea28a2SLior Amsalem 		list_move_tail(&desc->node, &mv_chan->completed_slots);
283fbea28a2SLior Amsalem 	else
284fbea28a2SLior Amsalem 		list_move_tail(&desc->node, &mv_chan->free_slots);
285ff7b0479SSaeed Bishara 
286ff7b0479SSaeed Bishara 	return 0;
287ff7b0479SSaeed Bishara }
288ff7b0479SSaeed Bishara 
289fbeec99aSEzequiel Garcia /* This function must be called with the mv_xor_chan spinlock held */
2900951e728SMaxime Ripard static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
291ff7b0479SSaeed Bishara {
292ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
293ff7b0479SSaeed Bishara 	dma_cookie_t cookie = 0;
294ff7b0479SSaeed Bishara 	int busy = mv_chan_is_busy(mv_chan);
295ff7b0479SSaeed Bishara 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
2969136291fSLior Amsalem 	int current_cleaned = 0;
2979136291fSLior Amsalem 	struct mv_xor_desc *hw_desc;
298ff7b0479SSaeed Bishara 
299c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
300c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
3010951e728SMaxime Ripard 	mv_chan_clean_completed_slots(mv_chan);
302ff7b0479SSaeed Bishara 
303ff7b0479SSaeed Bishara 	/* free completed slots from the chain starting with
304ff7b0479SSaeed Bishara 	 * the oldest descriptor
305ff7b0479SSaeed Bishara 	 */
306ff7b0479SSaeed Bishara 
307ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
308fbea28a2SLior Amsalem 				 node) {
309ff7b0479SSaeed Bishara 
3109136291fSLior Amsalem 		/* clean finished descriptors */
3119136291fSLior Amsalem 		hw_desc = iter->hw_desc;
3129136291fSLior Amsalem 		if (hw_desc->status & XOR_DESC_SUCCESS) {
3130951e728SMaxime Ripard 			cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
3149136291fSLior Amsalem 								 cookie);
315ff7b0479SSaeed Bishara 
3169136291fSLior Amsalem 			/* done processing desc, clean slot */
3170951e728SMaxime Ripard 			mv_desc_clean_slot(iter, mv_chan);
3189136291fSLior Amsalem 
3199136291fSLior Amsalem 			/* break if we did cleaned the current */
320ff7b0479SSaeed Bishara 			if (iter->async_tx.phys == current_desc) {
3219136291fSLior Amsalem 				current_cleaned = 1;
322ff7b0479SSaeed Bishara 				break;
323ff7b0479SSaeed Bishara 			}
3249136291fSLior Amsalem 		} else {
3259136291fSLior Amsalem 			if (iter->async_tx.phys == current_desc) {
3269136291fSLior Amsalem 				current_cleaned = 0;
327ff7b0479SSaeed Bishara 				break;
328ff7b0479SSaeed Bishara 			}
3299136291fSLior Amsalem 		}
3309136291fSLior Amsalem 	}
331ff7b0479SSaeed Bishara 
332ff7b0479SSaeed Bishara 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
3339136291fSLior Amsalem 		if (current_cleaned) {
3349136291fSLior Amsalem 			/*
3359136291fSLior Amsalem 			 * current descriptor cleaned and removed, run
3369136291fSLior Amsalem 			 * from list head
3379136291fSLior Amsalem 			 */
3389136291fSLior Amsalem 			iter = list_entry(mv_chan->chain.next,
339ff7b0479SSaeed Bishara 					  struct mv_xor_desc_slot,
340fbea28a2SLior Amsalem 					  node);
3410951e728SMaxime Ripard 			mv_chan_start_new_chain(mv_chan, iter);
3429136291fSLior Amsalem 		} else {
343fbea28a2SLior Amsalem 			if (!list_is_last(&iter->node, &mv_chan->chain)) {
3449136291fSLior Amsalem 				/*
3459136291fSLior Amsalem 				 * descriptors are still waiting after
3469136291fSLior Amsalem 				 * current, trigger them
3479136291fSLior Amsalem 				 */
348fbea28a2SLior Amsalem 				iter = list_entry(iter->node.next,
3499136291fSLior Amsalem 						  struct mv_xor_desc_slot,
350fbea28a2SLior Amsalem 						  node);
3510951e728SMaxime Ripard 				mv_chan_start_new_chain(mv_chan, iter);
3529136291fSLior Amsalem 			} else {
3539136291fSLior Amsalem 				/*
3549136291fSLior Amsalem 				 * some descriptors are still waiting
3559136291fSLior Amsalem 				 * to be cleaned
3569136291fSLior Amsalem 				 */
3579136291fSLior Amsalem 				tasklet_schedule(&mv_chan->irq_tasklet);
3589136291fSLior Amsalem 			}
3599136291fSLior Amsalem 		}
360ff7b0479SSaeed Bishara 	}
361ff7b0479SSaeed Bishara 
362ff7b0479SSaeed Bishara 	if (cookie > 0)
36398817b99SThomas Petazzoni 		mv_chan->dmachan.completed_cookie = cookie;
364ff7b0479SSaeed Bishara }
365ff7b0479SSaeed Bishara 
366ff7b0479SSaeed Bishara static void mv_xor_tasklet(unsigned long data)
367ff7b0479SSaeed Bishara {
368ff7b0479SSaeed Bishara 	struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
369e43147acSEzequiel Garcia 
370e43147acSEzequiel Garcia 	spin_lock_bh(&chan->lock);
3710951e728SMaxime Ripard 	mv_chan_slot_cleanup(chan);
372e43147acSEzequiel Garcia 	spin_unlock_bh(&chan->lock);
373ff7b0479SSaeed Bishara }
374ff7b0479SSaeed Bishara 
375ff7b0479SSaeed Bishara static struct mv_xor_desc_slot *
3760951e728SMaxime Ripard mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
377ff7b0479SSaeed Bishara {
378fbea28a2SLior Amsalem 	struct mv_xor_desc_slot *iter;
379ff7b0479SSaeed Bishara 
380fbea28a2SLior Amsalem 	spin_lock_bh(&mv_chan->lock);
381fbea28a2SLior Amsalem 
382fbea28a2SLior Amsalem 	if (!list_empty(&mv_chan->free_slots)) {
383fbea28a2SLior Amsalem 		iter = list_first_entry(&mv_chan->free_slots,
384ff7b0479SSaeed Bishara 					struct mv_xor_desc_slot,
385fbea28a2SLior Amsalem 					node);
386ff7b0479SSaeed Bishara 
387fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->allocated_slots);
388dfc97661SLior Amsalem 
389fbea28a2SLior Amsalem 		spin_unlock_bh(&mv_chan->lock);
390ff7b0479SSaeed Bishara 
391dfc97661SLior Amsalem 		/* pre-ack descriptor */
392ff7b0479SSaeed Bishara 		async_tx_ack(&iter->async_tx);
393dfc97661SLior Amsalem 		iter->async_tx.cookie = -EBUSY;
394dfc97661SLior Amsalem 
395dfc97661SLior Amsalem 		return iter;
396dfc97661SLior Amsalem 
397ff7b0479SSaeed Bishara 	}
398fbea28a2SLior Amsalem 
399fbea28a2SLior Amsalem 	spin_unlock_bh(&mv_chan->lock);
400ff7b0479SSaeed Bishara 
401ff7b0479SSaeed Bishara 	/* try to free some slots if the allocation fails */
402ff7b0479SSaeed Bishara 	tasklet_schedule(&mv_chan->irq_tasklet);
403ff7b0479SSaeed Bishara 
404ff7b0479SSaeed Bishara 	return NULL;
405ff7b0479SSaeed Bishara }
406ff7b0479SSaeed Bishara 
407ff7b0479SSaeed Bishara /************************ DMA engine API functions ****************************/
408ff7b0479SSaeed Bishara static dma_cookie_t
409ff7b0479SSaeed Bishara mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
410ff7b0479SSaeed Bishara {
411ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
412ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
413dfc97661SLior Amsalem 	struct mv_xor_desc_slot *old_chain_tail;
414ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
415ff7b0479SSaeed Bishara 	int new_hw_chain = 1;
416ff7b0479SSaeed Bishara 
417c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
418ff7b0479SSaeed Bishara 		"%s sw_desc %p: async_tx %p\n",
419ff7b0479SSaeed Bishara 		__func__, sw_desc, &sw_desc->async_tx);
420ff7b0479SSaeed Bishara 
421ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
422884485e1SRussell King - ARM Linux 	cookie = dma_cookie_assign(tx);
423ff7b0479SSaeed Bishara 
424ff7b0479SSaeed Bishara 	if (list_empty(&mv_chan->chain))
425fbea28a2SLior Amsalem 		list_move_tail(&sw_desc->node, &mv_chan->chain);
426ff7b0479SSaeed Bishara 	else {
427ff7b0479SSaeed Bishara 		new_hw_chain = 0;
428ff7b0479SSaeed Bishara 
429ff7b0479SSaeed Bishara 		old_chain_tail = list_entry(mv_chan->chain.prev,
430ff7b0479SSaeed Bishara 					    struct mv_xor_desc_slot,
431fbea28a2SLior Amsalem 					    node);
432fbea28a2SLior Amsalem 		list_move_tail(&sw_desc->node, &mv_chan->chain);
433ff7b0479SSaeed Bishara 
43431fd8f5bSOlof Johansson 		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
43531fd8f5bSOlof Johansson 			&old_chain_tail->async_tx.phys);
436ff7b0479SSaeed Bishara 
437ff7b0479SSaeed Bishara 		/* fix up the hardware chain */
438dfc97661SLior Amsalem 		mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
439ff7b0479SSaeed Bishara 
440ff7b0479SSaeed Bishara 		/* if the channel is not busy */
441ff7b0479SSaeed Bishara 		if (!mv_chan_is_busy(mv_chan)) {
442ff7b0479SSaeed Bishara 			u32 current_desc = mv_chan_get_current_desc(mv_chan);
443ff7b0479SSaeed Bishara 			/*
444ff7b0479SSaeed Bishara 			 * and the curren desc is the end of the chain before
445ff7b0479SSaeed Bishara 			 * the append, then we need to start the channel
446ff7b0479SSaeed Bishara 			 */
447ff7b0479SSaeed Bishara 			if (current_desc == old_chain_tail->async_tx.phys)
448ff7b0479SSaeed Bishara 				new_hw_chain = 1;
449ff7b0479SSaeed Bishara 		}
450ff7b0479SSaeed Bishara 	}
451ff7b0479SSaeed Bishara 
452ff7b0479SSaeed Bishara 	if (new_hw_chain)
4530951e728SMaxime Ripard 		mv_chan_start_new_chain(mv_chan, sw_desc);
454ff7b0479SSaeed Bishara 
455ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
456ff7b0479SSaeed Bishara 
457ff7b0479SSaeed Bishara 	return cookie;
458ff7b0479SSaeed Bishara }
459ff7b0479SSaeed Bishara 
460ff7b0479SSaeed Bishara /* returns the number of allocated descriptors */
461aa1e6f1aSDan Williams static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
462ff7b0479SSaeed Bishara {
46331fd8f5bSOlof Johansson 	void *virt_desc;
46431fd8f5bSOlof Johansson 	dma_addr_t dma_desc;
465ff7b0479SSaeed Bishara 	int idx;
466ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
467ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *slot = NULL;
468b503fa01SThomas Petazzoni 	int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
469ff7b0479SSaeed Bishara 
470ff7b0479SSaeed Bishara 	/* Allocate descriptor slots */
471ff7b0479SSaeed Bishara 	idx = mv_chan->slots_allocated;
472ff7b0479SSaeed Bishara 	while (idx < num_descs_in_pool) {
473ff7b0479SSaeed Bishara 		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
474ff7b0479SSaeed Bishara 		if (!slot) {
475b8291ddeSEzequiel Garcia 			dev_info(mv_chan_to_devp(mv_chan),
476b8291ddeSEzequiel Garcia 				 "channel only initialized %d descriptor slots",
477b8291ddeSEzequiel Garcia 				 idx);
478ff7b0479SSaeed Bishara 			break;
479ff7b0479SSaeed Bishara 		}
48031fd8f5bSOlof Johansson 		virt_desc = mv_chan->dma_desc_pool_virt;
48131fd8f5bSOlof Johansson 		slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
482ff7b0479SSaeed Bishara 
483ff7b0479SSaeed Bishara 		dma_async_tx_descriptor_init(&slot->async_tx, chan);
484ff7b0479SSaeed Bishara 		slot->async_tx.tx_submit = mv_xor_tx_submit;
485fbea28a2SLior Amsalem 		INIT_LIST_HEAD(&slot->node);
48631fd8f5bSOlof Johansson 		dma_desc = mv_chan->dma_desc_pool;
48731fd8f5bSOlof Johansson 		slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
488ff7b0479SSaeed Bishara 		slot->idx = idx++;
489ff7b0479SSaeed Bishara 
490ff7b0479SSaeed Bishara 		spin_lock_bh(&mv_chan->lock);
491ff7b0479SSaeed Bishara 		mv_chan->slots_allocated = idx;
492fbea28a2SLior Amsalem 		list_add_tail(&slot->node, &mv_chan->free_slots);
493ff7b0479SSaeed Bishara 		spin_unlock_bh(&mv_chan->lock);
494ff7b0479SSaeed Bishara 	}
495ff7b0479SSaeed Bishara 
496c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
497fbea28a2SLior Amsalem 		"allocated %d descriptor slots\n",
498fbea28a2SLior Amsalem 		mv_chan->slots_allocated);
499ff7b0479SSaeed Bishara 
500ff7b0479SSaeed Bishara 	return mv_chan->slots_allocated ? : -ENOMEM;
501ff7b0479SSaeed Bishara }
502ff7b0479SSaeed Bishara 
503ff7b0479SSaeed Bishara static struct dma_async_tx_descriptor *
504ff7b0479SSaeed Bishara mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
505ff7b0479SSaeed Bishara 		    unsigned int src_cnt, size_t len, unsigned long flags)
506ff7b0479SSaeed Bishara {
507ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
508dfc97661SLior Amsalem 	struct mv_xor_desc_slot *sw_desc;
509ff7b0479SSaeed Bishara 
510ff7b0479SSaeed Bishara 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
511ff7b0479SSaeed Bishara 		return NULL;
512ff7b0479SSaeed Bishara 
5137912d300SColy Li 	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
514ff7b0479SSaeed Bishara 
515c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
51631fd8f5bSOlof Johansson 		"%s src_cnt: %d len: %u dest %pad flags: %ld\n",
51731fd8f5bSOlof Johansson 		__func__, src_cnt, len, &dest, flags);
518ff7b0479SSaeed Bishara 
5190951e728SMaxime Ripard 	sw_desc = mv_chan_alloc_slot(mv_chan);
520ff7b0479SSaeed Bishara 	if (sw_desc) {
521ff7b0479SSaeed Bishara 		sw_desc->type = DMA_XOR;
522ff7b0479SSaeed Bishara 		sw_desc->async_tx.flags = flags;
523ba87d137SLior Amsalem 		mv_desc_init(sw_desc, dest, len, flags);
5246f166312SLior Amsalem 		if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
5256f166312SLior Amsalem 			mv_desc_set_mode(sw_desc);
526ff7b0479SSaeed Bishara 		while (src_cnt--)
527dfc97661SLior Amsalem 			mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
528ff7b0479SSaeed Bishara 	}
529fbea28a2SLior Amsalem 
530c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
531ff7b0479SSaeed Bishara 		"%s sw_desc %p async_tx %p \n",
532ff7b0479SSaeed Bishara 		__func__, sw_desc, &sw_desc->async_tx);
533ff7b0479SSaeed Bishara 	return sw_desc ? &sw_desc->async_tx : NULL;
534ff7b0479SSaeed Bishara }
535ff7b0479SSaeed Bishara 
5363e4f52e2SLior Amsalem static struct dma_async_tx_descriptor *
5373e4f52e2SLior Amsalem mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
5383e4f52e2SLior Amsalem 		size_t len, unsigned long flags)
5393e4f52e2SLior Amsalem {
5403e4f52e2SLior Amsalem 	/*
5413e4f52e2SLior Amsalem 	 * A MEMCPY operation is identical to an XOR operation with only
5423e4f52e2SLior Amsalem 	 * a single source address.
5433e4f52e2SLior Amsalem 	 */
5443e4f52e2SLior Amsalem 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
5453e4f52e2SLior Amsalem }
5463e4f52e2SLior Amsalem 
54722843545SLior Amsalem static struct dma_async_tx_descriptor *
54822843545SLior Amsalem mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
54922843545SLior Amsalem {
55022843545SLior Amsalem 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
55122843545SLior Amsalem 	dma_addr_t src, dest;
55222843545SLior Amsalem 	size_t len;
55322843545SLior Amsalem 
55422843545SLior Amsalem 	src = mv_chan->dummy_src_addr;
55522843545SLior Amsalem 	dest = mv_chan->dummy_dst_addr;
55622843545SLior Amsalem 	len = MV_XOR_MIN_BYTE_COUNT;
55722843545SLior Amsalem 
55822843545SLior Amsalem 	/*
55922843545SLior Amsalem 	 * We implement the DMA_INTERRUPT operation as a minimum sized
56022843545SLior Amsalem 	 * XOR operation with a single dummy source address.
56122843545SLior Amsalem 	 */
56222843545SLior Amsalem 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
56322843545SLior Amsalem }
56422843545SLior Amsalem 
565ff7b0479SSaeed Bishara static void mv_xor_free_chan_resources(struct dma_chan *chan)
566ff7b0479SSaeed Bishara {
567ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
568ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
569ff7b0479SSaeed Bishara 	int in_use_descs = 0;
570ff7b0479SSaeed Bishara 
571ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
572e43147acSEzequiel Garcia 
5730951e728SMaxime Ripard 	mv_chan_slot_cleanup(mv_chan);
574ff7b0479SSaeed Bishara 
575ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
576fbea28a2SLior Amsalem 					node) {
577ff7b0479SSaeed Bishara 		in_use_descs++;
578fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->free_slots);
579ff7b0479SSaeed Bishara 	}
580ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
581fbea28a2SLior Amsalem 				 node) {
582ff7b0479SSaeed Bishara 		in_use_descs++;
583fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->free_slots);
584fbea28a2SLior Amsalem 	}
585fbea28a2SLior Amsalem 	list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
586fbea28a2SLior Amsalem 				 node) {
587fbea28a2SLior Amsalem 		in_use_descs++;
588fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->free_slots);
589ff7b0479SSaeed Bishara 	}
590ff7b0479SSaeed Bishara 	list_for_each_entry_safe_reverse(
591fbea28a2SLior Amsalem 		iter, _iter, &mv_chan->free_slots, node) {
592fbea28a2SLior Amsalem 		list_del(&iter->node);
593ff7b0479SSaeed Bishara 		kfree(iter);
594ff7b0479SSaeed Bishara 		mv_chan->slots_allocated--;
595ff7b0479SSaeed Bishara 	}
596ff7b0479SSaeed Bishara 
597c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
598ff7b0479SSaeed Bishara 		__func__, mv_chan->slots_allocated);
599ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
600ff7b0479SSaeed Bishara 
601ff7b0479SSaeed Bishara 	if (in_use_descs)
602c98c1781SThomas Petazzoni 		dev_err(mv_chan_to_devp(mv_chan),
603ff7b0479SSaeed Bishara 			"freeing %d in use descriptors!\n", in_use_descs);
604ff7b0479SSaeed Bishara }
605ff7b0479SSaeed Bishara 
606ff7b0479SSaeed Bishara /**
60707934481SLinus Walleij  * mv_xor_status - poll the status of an XOR transaction
608ff7b0479SSaeed Bishara  * @chan: XOR channel handle
609ff7b0479SSaeed Bishara  * @cookie: XOR transaction identifier
61007934481SLinus Walleij  * @txstate: XOR transactions state holder (or NULL)
611ff7b0479SSaeed Bishara  */
61207934481SLinus Walleij static enum dma_status mv_xor_status(struct dma_chan *chan,
613ff7b0479SSaeed Bishara 					  dma_cookie_t cookie,
61407934481SLinus Walleij 					  struct dma_tx_state *txstate)
615ff7b0479SSaeed Bishara {
616ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
617ff7b0479SSaeed Bishara 	enum dma_status ret;
618ff7b0479SSaeed Bishara 
61996a2af41SRussell King - ARM Linux 	ret = dma_cookie_status(chan, cookie, txstate);
620890766d2SEzequiel Garcia 	if (ret == DMA_COMPLETE)
621ff7b0479SSaeed Bishara 		return ret;
622e43147acSEzequiel Garcia 
623e43147acSEzequiel Garcia 	spin_lock_bh(&mv_chan->lock);
6240951e728SMaxime Ripard 	mv_chan_slot_cleanup(mv_chan);
625e43147acSEzequiel Garcia 	spin_unlock_bh(&mv_chan->lock);
626ff7b0479SSaeed Bishara 
62796a2af41SRussell King - ARM Linux 	return dma_cookie_status(chan, cookie, txstate);
628ff7b0479SSaeed Bishara }
629ff7b0479SSaeed Bishara 
6300951e728SMaxime Ripard static void mv_chan_dump_regs(struct mv_xor_chan *chan)
631ff7b0479SSaeed Bishara {
632ff7b0479SSaeed Bishara 	u32 val;
633ff7b0479SSaeed Bishara 
6345733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_CONFIG(chan));
6351ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
636ff7b0479SSaeed Bishara 
6375733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_ACTIVATION(chan));
6381ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
639ff7b0479SSaeed Bishara 
6405733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_INTR_CAUSE(chan));
6411ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
642ff7b0479SSaeed Bishara 
6435733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_INTR_MASK(chan));
6441ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
645ff7b0479SSaeed Bishara 
6465733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_ERROR_CAUSE(chan));
6471ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
648ff7b0479SSaeed Bishara 
6495733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_ERROR_ADDR(chan));
6501ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
651ff7b0479SSaeed Bishara }
652ff7b0479SSaeed Bishara 
6530951e728SMaxime Ripard static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
654ff7b0479SSaeed Bishara 					  u32 intr_cause)
655ff7b0479SSaeed Bishara {
6560e7488edSEzequiel Garcia 	if (intr_cause & XOR_INT_ERR_DECODE) {
6570e7488edSEzequiel Garcia 		dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
658ff7b0479SSaeed Bishara 		return;
659ff7b0479SSaeed Bishara 	}
660ff7b0479SSaeed Bishara 
6610e7488edSEzequiel Garcia 	dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
662ff7b0479SSaeed Bishara 		chan->idx, intr_cause);
663ff7b0479SSaeed Bishara 
6640951e728SMaxime Ripard 	mv_chan_dump_regs(chan);
6650e7488edSEzequiel Garcia 	WARN_ON(1);
666ff7b0479SSaeed Bishara }
667ff7b0479SSaeed Bishara 
668ff7b0479SSaeed Bishara static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
669ff7b0479SSaeed Bishara {
670ff7b0479SSaeed Bishara 	struct mv_xor_chan *chan = data;
671ff7b0479SSaeed Bishara 	u32 intr_cause = mv_chan_get_intr_cause(chan);
672ff7b0479SSaeed Bishara 
673c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
674ff7b0479SSaeed Bishara 
6750e7488edSEzequiel Garcia 	if (intr_cause & XOR_INTR_ERRORS)
6760951e728SMaxime Ripard 		mv_chan_err_interrupt_handler(chan, intr_cause);
677ff7b0479SSaeed Bishara 
678ff7b0479SSaeed Bishara 	tasklet_schedule(&chan->irq_tasklet);
679ff7b0479SSaeed Bishara 
6800951e728SMaxime Ripard 	mv_chan_clear_eoc_cause(chan);
681ff7b0479SSaeed Bishara 
682ff7b0479SSaeed Bishara 	return IRQ_HANDLED;
683ff7b0479SSaeed Bishara }
684ff7b0479SSaeed Bishara 
685ff7b0479SSaeed Bishara static void mv_xor_issue_pending(struct dma_chan *chan)
686ff7b0479SSaeed Bishara {
687ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
688ff7b0479SSaeed Bishara 
689ff7b0479SSaeed Bishara 	if (mv_chan->pending >= MV_XOR_THRESHOLD) {
690ff7b0479SSaeed Bishara 		mv_chan->pending = 0;
691ff7b0479SSaeed Bishara 		mv_chan_activate(mv_chan);
692ff7b0479SSaeed Bishara 	}
693ff7b0479SSaeed Bishara }
694ff7b0479SSaeed Bishara 
695ff7b0479SSaeed Bishara /*
696ff7b0479SSaeed Bishara  * Perform a transaction to verify the HW works.
697ff7b0479SSaeed Bishara  */
698ff7b0479SSaeed Bishara 
6990951e728SMaxime Ripard static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
700ff7b0479SSaeed Bishara {
701b8c01d25SEzequiel Garcia 	int i, ret;
702ff7b0479SSaeed Bishara 	void *src, *dest;
703ff7b0479SSaeed Bishara 	dma_addr_t src_dma, dest_dma;
704ff7b0479SSaeed Bishara 	struct dma_chan *dma_chan;
705ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
706ff7b0479SSaeed Bishara 	struct dma_async_tx_descriptor *tx;
707d16695a7SEzequiel Garcia 	struct dmaengine_unmap_data *unmap;
708ff7b0479SSaeed Bishara 	int err = 0;
709ff7b0479SSaeed Bishara 
710d16695a7SEzequiel Garcia 	src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
711ff7b0479SSaeed Bishara 	if (!src)
712ff7b0479SSaeed Bishara 		return -ENOMEM;
713ff7b0479SSaeed Bishara 
714d16695a7SEzequiel Garcia 	dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
715ff7b0479SSaeed Bishara 	if (!dest) {
716ff7b0479SSaeed Bishara 		kfree(src);
717ff7b0479SSaeed Bishara 		return -ENOMEM;
718ff7b0479SSaeed Bishara 	}
719ff7b0479SSaeed Bishara 
720ff7b0479SSaeed Bishara 	/* Fill in src buffer */
721d16695a7SEzequiel Garcia 	for (i = 0; i < PAGE_SIZE; i++)
722ff7b0479SSaeed Bishara 		((u8 *) src)[i] = (u8)i;
723ff7b0479SSaeed Bishara 
724275cc0c8SThomas Petazzoni 	dma_chan = &mv_chan->dmachan;
725aa1e6f1aSDan Williams 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
726ff7b0479SSaeed Bishara 		err = -ENODEV;
727ff7b0479SSaeed Bishara 		goto out;
728ff7b0479SSaeed Bishara 	}
729ff7b0479SSaeed Bishara 
730d16695a7SEzequiel Garcia 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
731d16695a7SEzequiel Garcia 	if (!unmap) {
732d16695a7SEzequiel Garcia 		err = -ENOMEM;
733d16695a7SEzequiel Garcia 		goto free_resources;
734d16695a7SEzequiel Garcia 	}
735ff7b0479SSaeed Bishara 
736d16695a7SEzequiel Garcia 	src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
737d16695a7SEzequiel Garcia 				 PAGE_SIZE, DMA_TO_DEVICE);
738d16695a7SEzequiel Garcia 	unmap->addr[0] = src_dma;
739d16695a7SEzequiel Garcia 
740b8c01d25SEzequiel Garcia 	ret = dma_mapping_error(dma_chan->device->dev, src_dma);
741b8c01d25SEzequiel Garcia 	if (ret) {
742b8c01d25SEzequiel Garcia 		err = -ENOMEM;
743b8c01d25SEzequiel Garcia 		goto free_resources;
744b8c01d25SEzequiel Garcia 	}
745b8c01d25SEzequiel Garcia 	unmap->to_cnt = 1;
746b8c01d25SEzequiel Garcia 
747d16695a7SEzequiel Garcia 	dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
748d16695a7SEzequiel Garcia 				  PAGE_SIZE, DMA_FROM_DEVICE);
749d16695a7SEzequiel Garcia 	unmap->addr[1] = dest_dma;
750d16695a7SEzequiel Garcia 
751b8c01d25SEzequiel Garcia 	ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
752b8c01d25SEzequiel Garcia 	if (ret) {
753b8c01d25SEzequiel Garcia 		err = -ENOMEM;
754b8c01d25SEzequiel Garcia 		goto free_resources;
755b8c01d25SEzequiel Garcia 	}
756b8c01d25SEzequiel Garcia 	unmap->from_cnt = 1;
757d16695a7SEzequiel Garcia 	unmap->len = PAGE_SIZE;
758ff7b0479SSaeed Bishara 
759ff7b0479SSaeed Bishara 	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
760d16695a7SEzequiel Garcia 				    PAGE_SIZE, 0);
761b8c01d25SEzequiel Garcia 	if (!tx) {
762b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
763b8c01d25SEzequiel Garcia 			"Self-test cannot prepare operation, disabling\n");
764b8c01d25SEzequiel Garcia 		err = -ENODEV;
765b8c01d25SEzequiel Garcia 		goto free_resources;
766b8c01d25SEzequiel Garcia 	}
767b8c01d25SEzequiel Garcia 
768ff7b0479SSaeed Bishara 	cookie = mv_xor_tx_submit(tx);
769b8c01d25SEzequiel Garcia 	if (dma_submit_error(cookie)) {
770b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
771b8c01d25SEzequiel Garcia 			"Self-test submit error, disabling\n");
772b8c01d25SEzequiel Garcia 		err = -ENODEV;
773b8c01d25SEzequiel Garcia 		goto free_resources;
774b8c01d25SEzequiel Garcia 	}
775b8c01d25SEzequiel Garcia 
776ff7b0479SSaeed Bishara 	mv_xor_issue_pending(dma_chan);
777ff7b0479SSaeed Bishara 	async_tx_ack(tx);
778ff7b0479SSaeed Bishara 	msleep(1);
779ff7b0479SSaeed Bishara 
78007934481SLinus Walleij 	if (mv_xor_status(dma_chan, cookie, NULL) !=
781b3efb8fcSVinod Koul 	    DMA_COMPLETE) {
782a3fc74bcSThomas Petazzoni 		dev_err(dma_chan->device->dev,
783ff7b0479SSaeed Bishara 			"Self-test copy timed out, disabling\n");
784ff7b0479SSaeed Bishara 		err = -ENODEV;
785ff7b0479SSaeed Bishara 		goto free_resources;
786ff7b0479SSaeed Bishara 	}
787ff7b0479SSaeed Bishara 
788c35064c4SThomas Petazzoni 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
789d16695a7SEzequiel Garcia 				PAGE_SIZE, DMA_FROM_DEVICE);
790d16695a7SEzequiel Garcia 	if (memcmp(src, dest, PAGE_SIZE)) {
791a3fc74bcSThomas Petazzoni 		dev_err(dma_chan->device->dev,
792ff7b0479SSaeed Bishara 			"Self-test copy failed compare, disabling\n");
793ff7b0479SSaeed Bishara 		err = -ENODEV;
794ff7b0479SSaeed Bishara 		goto free_resources;
795ff7b0479SSaeed Bishara 	}
796ff7b0479SSaeed Bishara 
797ff7b0479SSaeed Bishara free_resources:
798d16695a7SEzequiel Garcia 	dmaengine_unmap_put(unmap);
799ff7b0479SSaeed Bishara 	mv_xor_free_chan_resources(dma_chan);
800ff7b0479SSaeed Bishara out:
801ff7b0479SSaeed Bishara 	kfree(src);
802ff7b0479SSaeed Bishara 	kfree(dest);
803ff7b0479SSaeed Bishara 	return err;
804ff7b0479SSaeed Bishara }
805ff7b0479SSaeed Bishara 
806ff7b0479SSaeed Bishara #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
807463a1f8bSBill Pemberton static int
8080951e728SMaxime Ripard mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
809ff7b0479SSaeed Bishara {
810b8c01d25SEzequiel Garcia 	int i, src_idx, ret;
811ff7b0479SSaeed Bishara 	struct page *dest;
812ff7b0479SSaeed Bishara 	struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
813ff7b0479SSaeed Bishara 	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
814ff7b0479SSaeed Bishara 	dma_addr_t dest_dma;
815ff7b0479SSaeed Bishara 	struct dma_async_tx_descriptor *tx;
816d16695a7SEzequiel Garcia 	struct dmaengine_unmap_data *unmap;
817ff7b0479SSaeed Bishara 	struct dma_chan *dma_chan;
818ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
819ff7b0479SSaeed Bishara 	u8 cmp_byte = 0;
820ff7b0479SSaeed Bishara 	u32 cmp_word;
821ff7b0479SSaeed Bishara 	int err = 0;
822d16695a7SEzequiel Garcia 	int src_count = MV_XOR_NUM_SRC_TEST;
823ff7b0479SSaeed Bishara 
824d16695a7SEzequiel Garcia 	for (src_idx = 0; src_idx < src_count; src_idx++) {
825ff7b0479SSaeed Bishara 		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
826a09b09aeSRoel Kluin 		if (!xor_srcs[src_idx]) {
827a09b09aeSRoel Kluin 			while (src_idx--)
828ff7b0479SSaeed Bishara 				__free_page(xor_srcs[src_idx]);
829ff7b0479SSaeed Bishara 			return -ENOMEM;
830ff7b0479SSaeed Bishara 		}
831ff7b0479SSaeed Bishara 	}
832ff7b0479SSaeed Bishara 
833ff7b0479SSaeed Bishara 	dest = alloc_page(GFP_KERNEL);
834a09b09aeSRoel Kluin 	if (!dest) {
835a09b09aeSRoel Kluin 		while (src_idx--)
836ff7b0479SSaeed Bishara 			__free_page(xor_srcs[src_idx]);
837ff7b0479SSaeed Bishara 		return -ENOMEM;
838ff7b0479SSaeed Bishara 	}
839ff7b0479SSaeed Bishara 
840ff7b0479SSaeed Bishara 	/* Fill in src buffers */
841d16695a7SEzequiel Garcia 	for (src_idx = 0; src_idx < src_count; src_idx++) {
842ff7b0479SSaeed Bishara 		u8 *ptr = page_address(xor_srcs[src_idx]);
843ff7b0479SSaeed Bishara 		for (i = 0; i < PAGE_SIZE; i++)
844ff7b0479SSaeed Bishara 			ptr[i] = (1 << src_idx);
845ff7b0479SSaeed Bishara 	}
846ff7b0479SSaeed Bishara 
847d16695a7SEzequiel Garcia 	for (src_idx = 0; src_idx < src_count; src_idx++)
848ff7b0479SSaeed Bishara 		cmp_byte ^= (u8) (1 << src_idx);
849ff7b0479SSaeed Bishara 
850ff7b0479SSaeed Bishara 	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
851ff7b0479SSaeed Bishara 		(cmp_byte << 8) | cmp_byte;
852ff7b0479SSaeed Bishara 
853ff7b0479SSaeed Bishara 	memset(page_address(dest), 0, PAGE_SIZE);
854ff7b0479SSaeed Bishara 
855275cc0c8SThomas Petazzoni 	dma_chan = &mv_chan->dmachan;
856aa1e6f1aSDan Williams 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
857ff7b0479SSaeed Bishara 		err = -ENODEV;
858ff7b0479SSaeed Bishara 		goto out;
859ff7b0479SSaeed Bishara 	}
860ff7b0479SSaeed Bishara 
861d16695a7SEzequiel Garcia 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
862d16695a7SEzequiel Garcia 					 GFP_KERNEL);
863d16695a7SEzequiel Garcia 	if (!unmap) {
864d16695a7SEzequiel Garcia 		err = -ENOMEM;
865d16695a7SEzequiel Garcia 		goto free_resources;
866d16695a7SEzequiel Garcia 	}
867ff7b0479SSaeed Bishara 
868d16695a7SEzequiel Garcia 	/* test xor */
869d16695a7SEzequiel Garcia 	for (i = 0; i < src_count; i++) {
870d16695a7SEzequiel Garcia 		unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
871ff7b0479SSaeed Bishara 					      0, PAGE_SIZE, DMA_TO_DEVICE);
872d16695a7SEzequiel Garcia 		dma_srcs[i] = unmap->addr[i];
873b8c01d25SEzequiel Garcia 		ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
874b8c01d25SEzequiel Garcia 		if (ret) {
875b8c01d25SEzequiel Garcia 			err = -ENOMEM;
876b8c01d25SEzequiel Garcia 			goto free_resources;
877b8c01d25SEzequiel Garcia 		}
878d16695a7SEzequiel Garcia 		unmap->to_cnt++;
879d16695a7SEzequiel Garcia 	}
880d16695a7SEzequiel Garcia 
881d16695a7SEzequiel Garcia 	unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
882d16695a7SEzequiel Garcia 				      DMA_FROM_DEVICE);
883d16695a7SEzequiel Garcia 	dest_dma = unmap->addr[src_count];
884b8c01d25SEzequiel Garcia 	ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
885b8c01d25SEzequiel Garcia 	if (ret) {
886b8c01d25SEzequiel Garcia 		err = -ENOMEM;
887b8c01d25SEzequiel Garcia 		goto free_resources;
888b8c01d25SEzequiel Garcia 	}
889d16695a7SEzequiel Garcia 	unmap->from_cnt = 1;
890d16695a7SEzequiel Garcia 	unmap->len = PAGE_SIZE;
891ff7b0479SSaeed Bishara 
892ff7b0479SSaeed Bishara 	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
893d16695a7SEzequiel Garcia 				 src_count, PAGE_SIZE, 0);
894b8c01d25SEzequiel Garcia 	if (!tx) {
895b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
896b8c01d25SEzequiel Garcia 			"Self-test cannot prepare operation, disabling\n");
897b8c01d25SEzequiel Garcia 		err = -ENODEV;
898b8c01d25SEzequiel Garcia 		goto free_resources;
899b8c01d25SEzequiel Garcia 	}
900ff7b0479SSaeed Bishara 
901ff7b0479SSaeed Bishara 	cookie = mv_xor_tx_submit(tx);
902b8c01d25SEzequiel Garcia 	if (dma_submit_error(cookie)) {
903b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
904b8c01d25SEzequiel Garcia 			"Self-test submit error, disabling\n");
905b8c01d25SEzequiel Garcia 		err = -ENODEV;
906b8c01d25SEzequiel Garcia 		goto free_resources;
907b8c01d25SEzequiel Garcia 	}
908b8c01d25SEzequiel Garcia 
909ff7b0479SSaeed Bishara 	mv_xor_issue_pending(dma_chan);
910ff7b0479SSaeed Bishara 	async_tx_ack(tx);
911ff7b0479SSaeed Bishara 	msleep(8);
912ff7b0479SSaeed Bishara 
91307934481SLinus Walleij 	if (mv_xor_status(dma_chan, cookie, NULL) !=
914b3efb8fcSVinod Koul 	    DMA_COMPLETE) {
915a3fc74bcSThomas Petazzoni 		dev_err(dma_chan->device->dev,
916ff7b0479SSaeed Bishara 			"Self-test xor timed out, disabling\n");
917ff7b0479SSaeed Bishara 		err = -ENODEV;
918ff7b0479SSaeed Bishara 		goto free_resources;
919ff7b0479SSaeed Bishara 	}
920ff7b0479SSaeed Bishara 
921c35064c4SThomas Petazzoni 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
922ff7b0479SSaeed Bishara 				PAGE_SIZE, DMA_FROM_DEVICE);
923ff7b0479SSaeed Bishara 	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
924ff7b0479SSaeed Bishara 		u32 *ptr = page_address(dest);
925ff7b0479SSaeed Bishara 		if (ptr[i] != cmp_word) {
926a3fc74bcSThomas Petazzoni 			dev_err(dma_chan->device->dev,
9271ba151cdSJoe Perches 				"Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
9281ba151cdSJoe Perches 				i, ptr[i], cmp_word);
929ff7b0479SSaeed Bishara 			err = -ENODEV;
930ff7b0479SSaeed Bishara 			goto free_resources;
931ff7b0479SSaeed Bishara 		}
932ff7b0479SSaeed Bishara 	}
933ff7b0479SSaeed Bishara 
934ff7b0479SSaeed Bishara free_resources:
935d16695a7SEzequiel Garcia 	dmaengine_unmap_put(unmap);
936ff7b0479SSaeed Bishara 	mv_xor_free_chan_resources(dma_chan);
937ff7b0479SSaeed Bishara out:
938d16695a7SEzequiel Garcia 	src_idx = src_count;
939ff7b0479SSaeed Bishara 	while (src_idx--)
940ff7b0479SSaeed Bishara 		__free_page(xor_srcs[src_idx]);
941ff7b0479SSaeed Bishara 	__free_page(dest);
942ff7b0479SSaeed Bishara 	return err;
943ff7b0479SSaeed Bishara }
944ff7b0479SSaeed Bishara 
9451ef48a26SThomas Petazzoni static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
946ff7b0479SSaeed Bishara {
947ff7b0479SSaeed Bishara 	struct dma_chan *chan, *_chan;
9481ef48a26SThomas Petazzoni 	struct device *dev = mv_chan->dmadev.dev;
949ff7b0479SSaeed Bishara 
9501ef48a26SThomas Petazzoni 	dma_async_device_unregister(&mv_chan->dmadev);
951ff7b0479SSaeed Bishara 
952b503fa01SThomas Petazzoni 	dma_free_coherent(dev, MV_XOR_POOL_SIZE,
9531ef48a26SThomas Petazzoni 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
95422843545SLior Amsalem 	dma_unmap_single(dev, mv_chan->dummy_src_addr,
95522843545SLior Amsalem 			 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
95622843545SLior Amsalem 	dma_unmap_single(dev, mv_chan->dummy_dst_addr,
95722843545SLior Amsalem 			 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
958ff7b0479SSaeed Bishara 
9591ef48a26SThomas Petazzoni 	list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
960ff7b0479SSaeed Bishara 				 device_node) {
961ff7b0479SSaeed Bishara 		list_del(&chan->device_node);
962ff7b0479SSaeed Bishara 	}
963ff7b0479SSaeed Bishara 
96488eb92cbSThomas Petazzoni 	free_irq(mv_chan->irq, mv_chan);
96588eb92cbSThomas Petazzoni 
966ff7b0479SSaeed Bishara 	return 0;
967ff7b0479SSaeed Bishara }
968ff7b0479SSaeed Bishara 
9691ef48a26SThomas Petazzoni static struct mv_xor_chan *
970297eedbaSThomas Petazzoni mv_xor_channel_add(struct mv_xor_device *xordev,
971a6b4a9d2SThomas Petazzoni 		   struct platform_device *pdev,
9726f166312SLior Amsalem 		   int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc)
973ff7b0479SSaeed Bishara {
974ff7b0479SSaeed Bishara 	int ret = 0;
975ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan;
976ff7b0479SSaeed Bishara 	struct dma_device *dma_dev;
977ff7b0479SSaeed Bishara 
9781ef48a26SThomas Petazzoni 	mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
979a577659fSSachin Kamat 	if (!mv_chan)
980a577659fSSachin Kamat 		return ERR_PTR(-ENOMEM);
981ff7b0479SSaeed Bishara 
9829aedbdbaSThomas Petazzoni 	mv_chan->idx = idx;
98388eb92cbSThomas Petazzoni 	mv_chan->irq = irq;
9846f166312SLior Amsalem 	mv_chan->op_in_desc = op_in_desc;
985ff7b0479SSaeed Bishara 
9861ef48a26SThomas Petazzoni 	dma_dev = &mv_chan->dmadev;
987ff7b0479SSaeed Bishara 
98822843545SLior Amsalem 	/*
98922843545SLior Amsalem 	 * These source and destination dummy buffers are used to implement
99022843545SLior Amsalem 	 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
99122843545SLior Amsalem 	 * Hence, we only need to map the buffers at initialization-time.
99222843545SLior Amsalem 	 */
99322843545SLior Amsalem 	mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
99422843545SLior Amsalem 		mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
99522843545SLior Amsalem 	mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
99622843545SLior Amsalem 		mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
99722843545SLior Amsalem 
998ff7b0479SSaeed Bishara 	/* allocate coherent memory for hardware descriptors
999ff7b0479SSaeed Bishara 	 * note: writecombine gives slightly better performance, but
1000ff7b0479SSaeed Bishara 	 * requires that we explicitly flush the writes
1001ff7b0479SSaeed Bishara 	 */
10021ef48a26SThomas Petazzoni 	mv_chan->dma_desc_pool_virt =
1003b503fa01SThomas Petazzoni 	  dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
10041ef48a26SThomas Petazzoni 				 &mv_chan->dma_desc_pool, GFP_KERNEL);
10051ef48a26SThomas Petazzoni 	if (!mv_chan->dma_desc_pool_virt)
1006a6b4a9d2SThomas Petazzoni 		return ERR_PTR(-ENOMEM);
1007ff7b0479SSaeed Bishara 
1008ff7b0479SSaeed Bishara 	/* discover transaction capabilites from the platform data */
1009a6b4a9d2SThomas Petazzoni 	dma_dev->cap_mask = cap_mask;
1010ff7b0479SSaeed Bishara 
1011ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&dma_dev->channels);
1012ff7b0479SSaeed Bishara 
1013ff7b0479SSaeed Bishara 	/* set base routines */
1014ff7b0479SSaeed Bishara 	dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1015ff7b0479SSaeed Bishara 	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
101607934481SLinus Walleij 	dma_dev->device_tx_status = mv_xor_status;
1017ff7b0479SSaeed Bishara 	dma_dev->device_issue_pending = mv_xor_issue_pending;
1018ff7b0479SSaeed Bishara 	dma_dev->dev = &pdev->dev;
1019ff7b0479SSaeed Bishara 
1020ff7b0479SSaeed Bishara 	/* set prep routines based on capability */
102122843545SLior Amsalem 	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
102222843545SLior Amsalem 		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
1023ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1024ff7b0479SSaeed Bishara 		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1025ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1026c019894eSJoe Perches 		dma_dev->max_xor = 8;
1027ff7b0479SSaeed Bishara 		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1028ff7b0479SSaeed Bishara 	}
1029ff7b0479SSaeed Bishara 
1030297eedbaSThomas Petazzoni 	mv_chan->mmr_base = xordev->xor_base;
103182a1402eSEzequiel Garcia 	mv_chan->mmr_high_base = xordev->xor_high_base;
1032ff7b0479SSaeed Bishara 	tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1033ff7b0479SSaeed Bishara 		     mv_chan);
1034ff7b0479SSaeed Bishara 
1035ff7b0479SSaeed Bishara 	/* clear errors before enabling interrupts */
10360951e728SMaxime Ripard 	mv_chan_clear_err_status(mv_chan);
1037ff7b0479SSaeed Bishara 
10382d0a0745SThomas Petazzoni 	ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1039ff7b0479SSaeed Bishara 			  0, dev_name(&pdev->dev), mv_chan);
1040ff7b0479SSaeed Bishara 	if (ret)
1041ff7b0479SSaeed Bishara 		goto err_free_dma;
1042ff7b0479SSaeed Bishara 
1043ff7b0479SSaeed Bishara 	mv_chan_unmask_interrupts(mv_chan);
1044ff7b0479SSaeed Bishara 
10456f166312SLior Amsalem 	if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
10466f166312SLior Amsalem 		mv_chan_set_mode_to_desc(mv_chan);
10476f166312SLior Amsalem 	else
10480951e728SMaxime Ripard 		mv_chan_set_mode(mv_chan, DMA_XOR);
1049ff7b0479SSaeed Bishara 
1050ff7b0479SSaeed Bishara 	spin_lock_init(&mv_chan->lock);
1051ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&mv_chan->chain);
1052ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&mv_chan->completed_slots);
1053fbea28a2SLior Amsalem 	INIT_LIST_HEAD(&mv_chan->free_slots);
1054fbea28a2SLior Amsalem 	INIT_LIST_HEAD(&mv_chan->allocated_slots);
105598817b99SThomas Petazzoni 	mv_chan->dmachan.device = dma_dev;
105698817b99SThomas Petazzoni 	dma_cookie_init(&mv_chan->dmachan);
1057ff7b0479SSaeed Bishara 
105898817b99SThomas Petazzoni 	list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1059ff7b0479SSaeed Bishara 
1060ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
10610951e728SMaxime Ripard 		ret = mv_chan_memcpy_self_test(mv_chan);
1062ff7b0479SSaeed Bishara 		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1063ff7b0479SSaeed Bishara 		if (ret)
10642d0a0745SThomas Petazzoni 			goto err_free_irq;
1065ff7b0479SSaeed Bishara 	}
1066ff7b0479SSaeed Bishara 
1067ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
10680951e728SMaxime Ripard 		ret = mv_chan_xor_self_test(mv_chan);
1069ff7b0479SSaeed Bishara 		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1070ff7b0479SSaeed Bishara 		if (ret)
10712d0a0745SThomas Petazzoni 			goto err_free_irq;
1072ff7b0479SSaeed Bishara 	}
1073ff7b0479SSaeed Bishara 
10746f166312SLior Amsalem 	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
10756f166312SLior Amsalem 		 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1076ff7b0479SSaeed Bishara 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1077ff7b0479SSaeed Bishara 		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1078ff7b0479SSaeed Bishara 		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1079ff7b0479SSaeed Bishara 
1080ff7b0479SSaeed Bishara 	dma_async_device_register(dma_dev);
10811ef48a26SThomas Petazzoni 	return mv_chan;
1082ff7b0479SSaeed Bishara 
10832d0a0745SThomas Petazzoni err_free_irq:
10842d0a0745SThomas Petazzoni 	free_irq(mv_chan->irq, mv_chan);
1085ff7b0479SSaeed Bishara  err_free_dma:
1086b503fa01SThomas Petazzoni 	dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
10871ef48a26SThomas Petazzoni 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1088a6b4a9d2SThomas Petazzoni 	return ERR_PTR(ret);
1089ff7b0479SSaeed Bishara }
1090ff7b0479SSaeed Bishara 
1091ff7b0479SSaeed Bishara static void
1092297eedbaSThomas Petazzoni mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
109363a9332bSAndrew Lunn 			 const struct mbus_dram_target_info *dram)
1094ff7b0479SSaeed Bishara {
109582a1402eSEzequiel Garcia 	void __iomem *base = xordev->xor_high_base;
1096ff7b0479SSaeed Bishara 	u32 win_enable = 0;
1097ff7b0479SSaeed Bishara 	int i;
1098ff7b0479SSaeed Bishara 
1099ff7b0479SSaeed Bishara 	for (i = 0; i < 8; i++) {
1100ff7b0479SSaeed Bishara 		writel(0, base + WINDOW_BASE(i));
1101ff7b0479SSaeed Bishara 		writel(0, base + WINDOW_SIZE(i));
1102ff7b0479SSaeed Bishara 		if (i < 4)
1103ff7b0479SSaeed Bishara 			writel(0, base + WINDOW_REMAP_HIGH(i));
1104ff7b0479SSaeed Bishara 	}
1105ff7b0479SSaeed Bishara 
1106ff7b0479SSaeed Bishara 	for (i = 0; i < dram->num_cs; i++) {
110763a9332bSAndrew Lunn 		const struct mbus_dram_window *cs = dram->cs + i;
1108ff7b0479SSaeed Bishara 
1109ff7b0479SSaeed Bishara 		writel((cs->base & 0xffff0000) |
1110ff7b0479SSaeed Bishara 		       (cs->mbus_attr << 8) |
1111ff7b0479SSaeed Bishara 		       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1112ff7b0479SSaeed Bishara 		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1113ff7b0479SSaeed Bishara 
1114ff7b0479SSaeed Bishara 		win_enable |= (1 << i);
1115ff7b0479SSaeed Bishara 		win_enable |= 3 << (16 + (2 * i));
1116ff7b0479SSaeed Bishara 	}
1117ff7b0479SSaeed Bishara 
1118ff7b0479SSaeed Bishara 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1119ff7b0479SSaeed Bishara 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1120c4b4b732SThomas Petazzoni 	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1121c4b4b732SThomas Petazzoni 	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1122ff7b0479SSaeed Bishara }
1123ff7b0479SSaeed Bishara 
11246f166312SLior Amsalem static const struct of_device_id mv_xor_dt_ids[] = {
11256f166312SLior Amsalem 	{ .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG },
11266f166312SLior Amsalem 	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
11276f166312SLior Amsalem 	{},
11286f166312SLior Amsalem };
11296f166312SLior Amsalem MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
11306f166312SLior Amsalem 
1131*77757291SThomas Petazzoni static unsigned int mv_xor_engine_count;
1132*77757291SThomas Petazzoni 
1133c2714334SLinus Torvalds static int mv_xor_probe(struct platform_device *pdev)
1134ff7b0479SSaeed Bishara {
113563a9332bSAndrew Lunn 	const struct mbus_dram_target_info *dram;
1136297eedbaSThomas Petazzoni 	struct mv_xor_device *xordev;
1137d4adcc01SJingoo Han 	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1138ff7b0479SSaeed Bishara 	struct resource *res;
1139*77757291SThomas Petazzoni 	unsigned int max_engines, max_channels;
114060d151f3SThomas Petazzoni 	int i, ret;
11416f166312SLior Amsalem 	int op_in_desc;
1142ff7b0479SSaeed Bishara 
11431ba151cdSJoe Perches 	dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1144ff7b0479SSaeed Bishara 
1145297eedbaSThomas Petazzoni 	xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1146297eedbaSThomas Petazzoni 	if (!xordev)
1147ff7b0479SSaeed Bishara 		return -ENOMEM;
1148ff7b0479SSaeed Bishara 
1149ff7b0479SSaeed Bishara 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1150ff7b0479SSaeed Bishara 	if (!res)
1151ff7b0479SSaeed Bishara 		return -ENODEV;
1152ff7b0479SSaeed Bishara 
1153297eedbaSThomas Petazzoni 	xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
11544de1ba15SH Hartley Sweeten 					resource_size(res));
1155297eedbaSThomas Petazzoni 	if (!xordev->xor_base)
1156ff7b0479SSaeed Bishara 		return -EBUSY;
1157ff7b0479SSaeed Bishara 
1158ff7b0479SSaeed Bishara 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1159ff7b0479SSaeed Bishara 	if (!res)
1160ff7b0479SSaeed Bishara 		return -ENODEV;
1161ff7b0479SSaeed Bishara 
1162297eedbaSThomas Petazzoni 	xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
11634de1ba15SH Hartley Sweeten 					     resource_size(res));
1164297eedbaSThomas Petazzoni 	if (!xordev->xor_high_base)
1165ff7b0479SSaeed Bishara 		return -EBUSY;
1166ff7b0479SSaeed Bishara 
1167297eedbaSThomas Petazzoni 	platform_set_drvdata(pdev, xordev);
1168ff7b0479SSaeed Bishara 
1169ff7b0479SSaeed Bishara 	/*
1170ff7b0479SSaeed Bishara 	 * (Re-)program MBUS remapping windows if we are asked to.
1171ff7b0479SSaeed Bishara 	 */
117263a9332bSAndrew Lunn 	dram = mv_mbus_dram_info();
117363a9332bSAndrew Lunn 	if (dram)
1174297eedbaSThomas Petazzoni 		mv_xor_conf_mbus_windows(xordev, dram);
1175ff7b0479SSaeed Bishara 
1176c510182bSAndrew Lunn 	/* Not all platforms can gate the clock, so it is not
1177c510182bSAndrew Lunn 	 * an error if the clock does not exists.
1178c510182bSAndrew Lunn 	 */
1179297eedbaSThomas Petazzoni 	xordev->clk = clk_get(&pdev->dev, NULL);
1180297eedbaSThomas Petazzoni 	if (!IS_ERR(xordev->clk))
1181297eedbaSThomas Petazzoni 		clk_prepare_enable(xordev->clk);
1182c510182bSAndrew Lunn 
1183*77757291SThomas Petazzoni 	/*
1184*77757291SThomas Petazzoni 	 * We don't want to have more than one channel per CPU in
1185*77757291SThomas Petazzoni 	 * order for async_tx to perform well. So we limit the number
1186*77757291SThomas Petazzoni 	 * of engines and channels so that we take into account this
1187*77757291SThomas Petazzoni 	 * constraint. Note that we also want to use channels from
1188*77757291SThomas Petazzoni 	 * separate engines when possible.
1189*77757291SThomas Petazzoni 	 */
1190*77757291SThomas Petazzoni 	max_engines = num_present_cpus();
1191*77757291SThomas Petazzoni 	max_channels = min_t(unsigned int,
1192*77757291SThomas Petazzoni 			     MV_XOR_MAX_CHANNELS,
1193*77757291SThomas Petazzoni 			     DIV_ROUND_UP(num_present_cpus(), 2));
1194*77757291SThomas Petazzoni 
1195*77757291SThomas Petazzoni 	if (mv_xor_engine_count >= max_engines)
1196*77757291SThomas Petazzoni 		return 0;
1197*77757291SThomas Petazzoni 
1198f7d12ef5SThomas Petazzoni 	if (pdev->dev.of_node) {
1199f7d12ef5SThomas Petazzoni 		struct device_node *np;
1200f7d12ef5SThomas Petazzoni 		int i = 0;
12016f166312SLior Amsalem 		const struct of_device_id *of_id =
12026f166312SLior Amsalem 			of_match_device(mv_xor_dt_ids,
12036f166312SLior Amsalem 					&pdev->dev);
1204f7d12ef5SThomas Petazzoni 
1205f7d12ef5SThomas Petazzoni 		for_each_child_of_node(pdev->dev.of_node, np) {
12060be8253fSRussell King 			struct mv_xor_chan *chan;
1207f7d12ef5SThomas Petazzoni 			dma_cap_mask_t cap_mask;
1208f7d12ef5SThomas Petazzoni 			int irq;
12096f166312SLior Amsalem 			op_in_desc = (int)of_id->data;
1210f7d12ef5SThomas Petazzoni 
1211*77757291SThomas Petazzoni 			if (i >= max_channels)
1212*77757291SThomas Petazzoni 				continue;
1213*77757291SThomas Petazzoni 
1214f7d12ef5SThomas Petazzoni 			dma_cap_zero(cap_mask);
1215f7d12ef5SThomas Petazzoni 			dma_cap_set(DMA_MEMCPY, cap_mask);
1216f7d12ef5SThomas Petazzoni 			dma_cap_set(DMA_XOR, cap_mask);
1217f7d12ef5SThomas Petazzoni 			dma_cap_set(DMA_INTERRUPT, cap_mask);
1218f7d12ef5SThomas Petazzoni 
1219f7d12ef5SThomas Petazzoni 			irq = irq_of_parse_and_map(np, 0);
1220f8eb9e7dSThomas Petazzoni 			if (!irq) {
1221f8eb9e7dSThomas Petazzoni 				ret = -ENODEV;
1222f7d12ef5SThomas Petazzoni 				goto err_channel_add;
1223f7d12ef5SThomas Petazzoni 			}
1224f7d12ef5SThomas Petazzoni 
12250be8253fSRussell King 			chan = mv_xor_channel_add(xordev, pdev, i,
12266f166312SLior Amsalem 						  cap_mask, irq, op_in_desc);
12270be8253fSRussell King 			if (IS_ERR(chan)) {
12280be8253fSRussell King 				ret = PTR_ERR(chan);
1229f7d12ef5SThomas Petazzoni 				irq_dispose_mapping(irq);
1230f7d12ef5SThomas Petazzoni 				goto err_channel_add;
1231f7d12ef5SThomas Petazzoni 			}
1232f7d12ef5SThomas Petazzoni 
12330be8253fSRussell King 			xordev->channels[i] = chan;
1234f7d12ef5SThomas Petazzoni 			i++;
1235f7d12ef5SThomas Petazzoni 		}
1236f7d12ef5SThomas Petazzoni 	} else if (pdata && pdata->channels) {
1237*77757291SThomas Petazzoni 		for (i = 0; i < max_channels; i++) {
1238e39f6ec1SThomas Petazzoni 			struct mv_xor_channel_data *cd;
12390be8253fSRussell King 			struct mv_xor_chan *chan;
124060d151f3SThomas Petazzoni 			int irq;
124160d151f3SThomas Petazzoni 
124260d151f3SThomas Petazzoni 			cd = &pdata->channels[i];
124360d151f3SThomas Petazzoni 			if (!cd) {
124460d151f3SThomas Petazzoni 				ret = -ENODEV;
124560d151f3SThomas Petazzoni 				goto err_channel_add;
124660d151f3SThomas Petazzoni 			}
124760d151f3SThomas Petazzoni 
124860d151f3SThomas Petazzoni 			irq = platform_get_irq(pdev, i);
124960d151f3SThomas Petazzoni 			if (irq < 0) {
125060d151f3SThomas Petazzoni 				ret = irq;
125160d151f3SThomas Petazzoni 				goto err_channel_add;
125260d151f3SThomas Petazzoni 			}
125360d151f3SThomas Petazzoni 
12540be8253fSRussell King 			chan = mv_xor_channel_add(xordev, pdev, i,
12556f166312SLior Amsalem 						  cd->cap_mask, irq,
12566f166312SLior Amsalem 						  XOR_MODE_IN_REG);
12570be8253fSRussell King 			if (IS_ERR(chan)) {
12580be8253fSRussell King 				ret = PTR_ERR(chan);
125960d151f3SThomas Petazzoni 				goto err_channel_add;
126060d151f3SThomas Petazzoni 			}
12610be8253fSRussell King 
12620be8253fSRussell King 			xordev->channels[i] = chan;
126360d151f3SThomas Petazzoni 		}
126460d151f3SThomas Petazzoni 	}
126560d151f3SThomas Petazzoni 
1266ff7b0479SSaeed Bishara 	return 0;
126760d151f3SThomas Petazzoni 
126860d151f3SThomas Petazzoni err_channel_add:
126960d151f3SThomas Petazzoni 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1270f7d12ef5SThomas Petazzoni 		if (xordev->channels[i]) {
1271ab6e439fSThomas Petazzoni 			mv_xor_channel_remove(xordev->channels[i]);
1272f7d12ef5SThomas Petazzoni 			if (pdev->dev.of_node)
1273f7d12ef5SThomas Petazzoni 				irq_dispose_mapping(xordev->channels[i]->irq);
1274f7d12ef5SThomas Petazzoni 		}
127560d151f3SThomas Petazzoni 
1276dab92064SThomas Petazzoni 	if (!IS_ERR(xordev->clk)) {
1277297eedbaSThomas Petazzoni 		clk_disable_unprepare(xordev->clk);
1278297eedbaSThomas Petazzoni 		clk_put(xordev->clk);
1279dab92064SThomas Petazzoni 	}
1280dab92064SThomas Petazzoni 
128160d151f3SThomas Petazzoni 	return ret;
1282ff7b0479SSaeed Bishara }
1283ff7b0479SSaeed Bishara 
1284c2714334SLinus Torvalds static int mv_xor_remove(struct platform_device *pdev)
1285ff7b0479SSaeed Bishara {
1286297eedbaSThomas Petazzoni 	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
128760d151f3SThomas Petazzoni 	int i;
128860d151f3SThomas Petazzoni 
128960d151f3SThomas Petazzoni 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1290297eedbaSThomas Petazzoni 		if (xordev->channels[i])
1291297eedbaSThomas Petazzoni 			mv_xor_channel_remove(xordev->channels[i]);
129260d151f3SThomas Petazzoni 	}
1293c510182bSAndrew Lunn 
1294297eedbaSThomas Petazzoni 	if (!IS_ERR(xordev->clk)) {
1295297eedbaSThomas Petazzoni 		clk_disable_unprepare(xordev->clk);
1296297eedbaSThomas Petazzoni 		clk_put(xordev->clk);
1297c510182bSAndrew Lunn 	}
1298c510182bSAndrew Lunn 
1299ff7b0479SSaeed Bishara 	return 0;
1300ff7b0479SSaeed Bishara }
1301ff7b0479SSaeed Bishara 
1302ff7b0479SSaeed Bishara static struct platform_driver mv_xor_driver = {
1303ff7b0479SSaeed Bishara 	.probe		= mv_xor_probe,
1304a7d6e3ecSBill Pemberton 	.remove		= mv_xor_remove,
1305ff7b0479SSaeed Bishara 	.driver		= {
1306ff7b0479SSaeed Bishara 		.name	        = MV_XOR_NAME,
1307f7d12ef5SThomas Petazzoni 		.of_match_table = of_match_ptr(mv_xor_dt_ids),
1308ff7b0479SSaeed Bishara 	},
1309ff7b0479SSaeed Bishara };
1310ff7b0479SSaeed Bishara 
1311ff7b0479SSaeed Bishara 
1312ff7b0479SSaeed Bishara static int __init mv_xor_init(void)
1313ff7b0479SSaeed Bishara {
131461971656SThomas Petazzoni 	return platform_driver_register(&mv_xor_driver);
1315ff7b0479SSaeed Bishara }
1316ff7b0479SSaeed Bishara module_init(mv_xor_init);
1317ff7b0479SSaeed Bishara 
1318ff7b0479SSaeed Bishara /* it's currently unsafe to unload this module */
1319ff7b0479SSaeed Bishara #if 0
1320ff7b0479SSaeed Bishara static void __exit mv_xor_exit(void)
1321ff7b0479SSaeed Bishara {
1322ff7b0479SSaeed Bishara 	platform_driver_unregister(&mv_xor_driver);
1323ff7b0479SSaeed Bishara 	return;
1324ff7b0479SSaeed Bishara }
1325ff7b0479SSaeed Bishara 
1326ff7b0479SSaeed Bishara module_exit(mv_xor_exit);
1327ff7b0479SSaeed Bishara #endif
1328ff7b0479SSaeed Bishara 
1329ff7b0479SSaeed Bishara MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1330ff7b0479SSaeed Bishara MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1331ff7b0479SSaeed Bishara MODULE_LICENSE("GPL");
1332