xref: /openbmc/linux/drivers/dma/mv_xor.c (revision 77ff7a706f014a56d38f07acf220f381a8fe0fd8)
1ff7b0479SSaeed Bishara /*
2ff7b0479SSaeed Bishara  * offload engine driver for the Marvell XOR engine
3ff7b0479SSaeed Bishara  * Copyright (C) 2007, 2008, Marvell International Ltd.
4ff7b0479SSaeed Bishara  *
5ff7b0479SSaeed Bishara  * This program is free software; you can redistribute it and/or modify it
6ff7b0479SSaeed Bishara  * under the terms and conditions of the GNU General Public License,
7ff7b0479SSaeed Bishara  * version 2, as published by the Free Software Foundation.
8ff7b0479SSaeed Bishara  *
9ff7b0479SSaeed Bishara  * This program is distributed in the hope it will be useful, but WITHOUT
10ff7b0479SSaeed Bishara  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11ff7b0479SSaeed Bishara  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12ff7b0479SSaeed Bishara  * more details.
13ff7b0479SSaeed Bishara  */
14ff7b0479SSaeed Bishara 
15ff7b0479SSaeed Bishara #include <linux/init.h>
165a0e3ad6STejun Heo #include <linux/slab.h>
17ff7b0479SSaeed Bishara #include <linux/delay.h>
18ff7b0479SSaeed Bishara #include <linux/dma-mapping.h>
19ff7b0479SSaeed Bishara #include <linux/spinlock.h>
20ff7b0479SSaeed Bishara #include <linux/interrupt.h>
216f166312SLior Amsalem #include <linux/of_device.h>
22ff7b0479SSaeed Bishara #include <linux/platform_device.h>
23ff7b0479SSaeed Bishara #include <linux/memory.h>
24c510182bSAndrew Lunn #include <linux/clk.h>
25f7d12ef5SThomas Petazzoni #include <linux/of.h>
26f7d12ef5SThomas Petazzoni #include <linux/of_irq.h>
27f7d12ef5SThomas Petazzoni #include <linux/irqdomain.h>
2877757291SThomas Petazzoni #include <linux/cpumask.h>
29c02cecb9SArnd Bergmann #include <linux/platform_data/dma-mv_xor.h>
30d2ebfb33SRussell King - ARM Linux 
31d2ebfb33SRussell King - ARM Linux #include "dmaengine.h"
32ff7b0479SSaeed Bishara #include "mv_xor.h"
33ff7b0479SSaeed Bishara 
34dd130c65SGregory CLEMENT enum mv_xor_type {
35dd130c65SGregory CLEMENT 	XOR_ORION,
36dd130c65SGregory CLEMENT 	XOR_ARMADA_38X,
37ac5f0f3fSMarcin Wojtas 	XOR_ARMADA_37XX,
38dd130c65SGregory CLEMENT };
39dd130c65SGregory CLEMENT 
406f166312SLior Amsalem enum mv_xor_mode {
416f166312SLior Amsalem 	XOR_MODE_IN_REG,
426f166312SLior Amsalem 	XOR_MODE_IN_DESC,
436f166312SLior Amsalem };
446f166312SLior Amsalem 
45ff7b0479SSaeed Bishara static void mv_xor_issue_pending(struct dma_chan *chan);
46ff7b0479SSaeed Bishara 
47ff7b0479SSaeed Bishara #define to_mv_xor_chan(chan)		\
4898817b99SThomas Petazzoni 	container_of(chan, struct mv_xor_chan, dmachan)
49ff7b0479SSaeed Bishara 
50ff7b0479SSaeed Bishara #define to_mv_xor_slot(tx)		\
51ff7b0479SSaeed Bishara 	container_of(tx, struct mv_xor_desc_slot, async_tx)
52ff7b0479SSaeed Bishara 
53c98c1781SThomas Petazzoni #define mv_chan_to_devp(chan)           \
541ef48a26SThomas Petazzoni 	((chan)->dmadev.dev)
55c98c1781SThomas Petazzoni 
56dfc97661SLior Amsalem static void mv_desc_init(struct mv_xor_desc_slot *desc,
57ba87d137SLior Amsalem 			 dma_addr_t addr, u32 byte_count,
58ba87d137SLior Amsalem 			 enum dma_ctrl_flags flags)
59ff7b0479SSaeed Bishara {
60ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
61ff7b0479SSaeed Bishara 
620e7488edSEzequiel Garcia 	hw_desc->status = XOR_DESC_DMA_OWNED;
63ff7b0479SSaeed Bishara 	hw_desc->phy_next_desc = 0;
64ba87d137SLior Amsalem 	/* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
65ba87d137SLior Amsalem 	hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
66ba87d137SLior Amsalem 				XOR_DESC_EOD_INT_EN : 0;
67dfc97661SLior Amsalem 	hw_desc->phy_dest_addr = addr;
68ff7b0479SSaeed Bishara 	hw_desc->byte_count = byte_count;
69ff7b0479SSaeed Bishara }
70ff7b0479SSaeed Bishara 
716f166312SLior Amsalem static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
726f166312SLior Amsalem {
736f166312SLior Amsalem 	struct mv_xor_desc *hw_desc = desc->hw_desc;
746f166312SLior Amsalem 
756f166312SLior Amsalem 	switch (desc->type) {
766f166312SLior Amsalem 	case DMA_XOR:
776f166312SLior Amsalem 	case DMA_INTERRUPT:
786f166312SLior Amsalem 		hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
796f166312SLior Amsalem 		break;
806f166312SLior Amsalem 	case DMA_MEMCPY:
816f166312SLior Amsalem 		hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
826f166312SLior Amsalem 		break;
836f166312SLior Amsalem 	default:
846f166312SLior Amsalem 		BUG();
856f166312SLior Amsalem 		return;
866f166312SLior Amsalem 	}
876f166312SLior Amsalem }
886f166312SLior Amsalem 
89ff7b0479SSaeed Bishara static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
90ff7b0479SSaeed Bishara 				  u32 next_desc_addr)
91ff7b0479SSaeed Bishara {
92ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
93ff7b0479SSaeed Bishara 	BUG_ON(hw_desc->phy_next_desc);
94ff7b0479SSaeed Bishara 	hw_desc->phy_next_desc = next_desc_addr;
95ff7b0479SSaeed Bishara }
96ff7b0479SSaeed Bishara 
97ff7b0479SSaeed Bishara static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
98ff7b0479SSaeed Bishara 				 int index, dma_addr_t addr)
99ff7b0479SSaeed Bishara {
100ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
101e03bc654SThomas Petazzoni 	hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
102ff7b0479SSaeed Bishara 	if (desc->type == DMA_XOR)
103ff7b0479SSaeed Bishara 		hw_desc->desc_command |= (1 << index);
104ff7b0479SSaeed Bishara }
105ff7b0479SSaeed Bishara 
106ff7b0479SSaeed Bishara static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
107ff7b0479SSaeed Bishara {
1085733c38aSThomas Petazzoni 	return readl_relaxed(XOR_CURR_DESC(chan));
109ff7b0479SSaeed Bishara }
110ff7b0479SSaeed Bishara 
111ff7b0479SSaeed Bishara static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
112ff7b0479SSaeed Bishara 					u32 next_desc_addr)
113ff7b0479SSaeed Bishara {
1145733c38aSThomas Petazzoni 	writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
115ff7b0479SSaeed Bishara }
116ff7b0479SSaeed Bishara 
117ff7b0479SSaeed Bishara static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
118ff7b0479SSaeed Bishara {
1195733c38aSThomas Petazzoni 	u32 val = readl_relaxed(XOR_INTR_MASK(chan));
120ff7b0479SSaeed Bishara 	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
1215733c38aSThomas Petazzoni 	writel_relaxed(val, XOR_INTR_MASK(chan));
122ff7b0479SSaeed Bishara }
123ff7b0479SSaeed Bishara 
124ff7b0479SSaeed Bishara static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
125ff7b0479SSaeed Bishara {
1265733c38aSThomas Petazzoni 	u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
127ff7b0479SSaeed Bishara 	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
128ff7b0479SSaeed Bishara 	return intr_cause;
129ff7b0479SSaeed Bishara }
130ff7b0479SSaeed Bishara 
1310951e728SMaxime Ripard static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
132ff7b0479SSaeed Bishara {
133ba87d137SLior Amsalem 	u32 val;
134ba87d137SLior Amsalem 
135ba87d137SLior Amsalem 	val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
136ba87d137SLior Amsalem 	val = ~(val << (chan->idx * 16));
137c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
1385733c38aSThomas Petazzoni 	writel_relaxed(val, XOR_INTR_CAUSE(chan));
139ff7b0479SSaeed Bishara }
140ff7b0479SSaeed Bishara 
1410951e728SMaxime Ripard static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
142ff7b0479SSaeed Bishara {
143ff7b0479SSaeed Bishara 	u32 val = 0xFFFF0000 >> (chan->idx * 16);
1445733c38aSThomas Petazzoni 	writel_relaxed(val, XOR_INTR_CAUSE(chan));
145ff7b0479SSaeed Bishara }
146ff7b0479SSaeed Bishara 
1470951e728SMaxime Ripard static void mv_chan_set_mode(struct mv_xor_chan *chan,
14881aafb3eSThomas Petazzoni 			     u32 op_mode)
149ff7b0479SSaeed Bishara {
1505733c38aSThomas Petazzoni 	u32 config = readl_relaxed(XOR_CONFIG(chan));
151ff7b0479SSaeed Bishara 
1526f166312SLior Amsalem 	config &= ~0x7;
1536f166312SLior Amsalem 	config |= op_mode;
1546f166312SLior Amsalem 
155e03bc654SThomas Petazzoni #if defined(__BIG_ENDIAN)
156e03bc654SThomas Petazzoni 	config |= XOR_DESCRIPTOR_SWAP;
157e03bc654SThomas Petazzoni #else
158e03bc654SThomas Petazzoni 	config &= ~XOR_DESCRIPTOR_SWAP;
159e03bc654SThomas Petazzoni #endif
160e03bc654SThomas Petazzoni 
1615733c38aSThomas Petazzoni 	writel_relaxed(config, XOR_CONFIG(chan));
162ff7b0479SSaeed Bishara }
163ff7b0479SSaeed Bishara 
164ff7b0479SSaeed Bishara static void mv_chan_activate(struct mv_xor_chan *chan)
165ff7b0479SSaeed Bishara {
166c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
1675a9a55bfSEzequiel Garcia 
1685a9a55bfSEzequiel Garcia 	/* writel ensures all descriptors are flushed before activation */
1695a9a55bfSEzequiel Garcia 	writel(BIT(0), XOR_ACTIVATION(chan));
170ff7b0479SSaeed Bishara }
171ff7b0479SSaeed Bishara 
172ff7b0479SSaeed Bishara static char mv_chan_is_busy(struct mv_xor_chan *chan)
173ff7b0479SSaeed Bishara {
1745733c38aSThomas Petazzoni 	u32 state = readl_relaxed(XOR_ACTIVATION(chan));
175ff7b0479SSaeed Bishara 
176ff7b0479SSaeed Bishara 	state = (state >> 4) & 0x3;
177ff7b0479SSaeed Bishara 
178ff7b0479SSaeed Bishara 	return (state == 1) ? 1 : 0;
179ff7b0479SSaeed Bishara }
180ff7b0479SSaeed Bishara 
181ff7b0479SSaeed Bishara /*
1820951e728SMaxime Ripard  * mv_chan_start_new_chain - program the engine to operate on new
1830951e728SMaxime Ripard  * chain headed by sw_desc
184ff7b0479SSaeed Bishara  * Caller must hold &mv_chan->lock while calling this function
185ff7b0479SSaeed Bishara  */
1860951e728SMaxime Ripard static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
187ff7b0479SSaeed Bishara 				    struct mv_xor_desc_slot *sw_desc)
188ff7b0479SSaeed Bishara {
189c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
190ff7b0479SSaeed Bishara 		__func__, __LINE__, sw_desc);
191ff7b0479SSaeed Bishara 
192ff7b0479SSaeed Bishara 	/* set the hardware chain */
193ff7b0479SSaeed Bishara 	mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
19448a9db46SBartlomiej Zolnierkiewicz 
195dfc97661SLior Amsalem 	mv_chan->pending++;
19698817b99SThomas Petazzoni 	mv_xor_issue_pending(&mv_chan->dmachan);
197ff7b0479SSaeed Bishara }
198ff7b0479SSaeed Bishara 
199ff7b0479SSaeed Bishara static dma_cookie_t
2000951e728SMaxime Ripard mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
2010951e728SMaxime Ripard 				struct mv_xor_chan *mv_chan,
2020951e728SMaxime Ripard 				dma_cookie_t cookie)
203ff7b0479SSaeed Bishara {
204ff7b0479SSaeed Bishara 	BUG_ON(desc->async_tx.cookie < 0);
205ff7b0479SSaeed Bishara 
206ff7b0479SSaeed Bishara 	if (desc->async_tx.cookie > 0) {
207ff7b0479SSaeed Bishara 		cookie = desc->async_tx.cookie;
208ff7b0479SSaeed Bishara 
209ff7b0479SSaeed Bishara 		/* call the callback (must not sleep or submit new
210ff7b0479SSaeed Bishara 		 * operations to this channel)
211ff7b0479SSaeed Bishara 		 */
212ff7b0479SSaeed Bishara 		if (desc->async_tx.callback)
213ff7b0479SSaeed Bishara 			desc->async_tx.callback(
214ff7b0479SSaeed Bishara 				desc->async_tx.callback_param);
215ff7b0479SSaeed Bishara 
216d38a8c62SDan Williams 		dma_descriptor_unmap(&desc->async_tx);
217ff7b0479SSaeed Bishara 	}
218ff7b0479SSaeed Bishara 
219ff7b0479SSaeed Bishara 	/* run dependent operations */
22007f2211eSDan Williams 	dma_run_dependencies(&desc->async_tx);
221ff7b0479SSaeed Bishara 
222ff7b0479SSaeed Bishara 	return cookie;
223ff7b0479SSaeed Bishara }
224ff7b0479SSaeed Bishara 
225ff7b0479SSaeed Bishara static int
2260951e728SMaxime Ripard mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
227ff7b0479SSaeed Bishara {
228ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
229ff7b0479SSaeed Bishara 
230c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
231ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
232fbea28a2SLior Amsalem 				 node) {
233ff7b0479SSaeed Bishara 
234fbea28a2SLior Amsalem 		if (async_tx_test_ack(&iter->async_tx))
235fbea28a2SLior Amsalem 			list_move_tail(&iter->node, &mv_chan->free_slots);
236ff7b0479SSaeed Bishara 	}
237ff7b0479SSaeed Bishara 	return 0;
238ff7b0479SSaeed Bishara }
239ff7b0479SSaeed Bishara 
240ff7b0479SSaeed Bishara static int
2410951e728SMaxime Ripard mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
242ff7b0479SSaeed Bishara 		   struct mv_xor_chan *mv_chan)
243ff7b0479SSaeed Bishara {
244c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
245ff7b0479SSaeed Bishara 		__func__, __LINE__, desc, desc->async_tx.flags);
246fbea28a2SLior Amsalem 
247ff7b0479SSaeed Bishara 	/* the client is allowed to attach dependent operations
248ff7b0479SSaeed Bishara 	 * until 'ack' is set
249ff7b0479SSaeed Bishara 	 */
250fbea28a2SLior Amsalem 	if (!async_tx_test_ack(&desc->async_tx))
251ff7b0479SSaeed Bishara 		/* move this slot to the completed_slots */
252fbea28a2SLior Amsalem 		list_move_tail(&desc->node, &mv_chan->completed_slots);
253fbea28a2SLior Amsalem 	else
254fbea28a2SLior Amsalem 		list_move_tail(&desc->node, &mv_chan->free_slots);
255ff7b0479SSaeed Bishara 
256ff7b0479SSaeed Bishara 	return 0;
257ff7b0479SSaeed Bishara }
258ff7b0479SSaeed Bishara 
259fbeec99aSEzequiel Garcia /* This function must be called with the mv_xor_chan spinlock held */
2600951e728SMaxime Ripard static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
261ff7b0479SSaeed Bishara {
262ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
263ff7b0479SSaeed Bishara 	dma_cookie_t cookie = 0;
264ff7b0479SSaeed Bishara 	int busy = mv_chan_is_busy(mv_chan);
265ff7b0479SSaeed Bishara 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
2669136291fSLior Amsalem 	int current_cleaned = 0;
2679136291fSLior Amsalem 	struct mv_xor_desc *hw_desc;
268ff7b0479SSaeed Bishara 
269c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
270c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
2710951e728SMaxime Ripard 	mv_chan_clean_completed_slots(mv_chan);
272ff7b0479SSaeed Bishara 
273ff7b0479SSaeed Bishara 	/* free completed slots from the chain starting with
274ff7b0479SSaeed Bishara 	 * the oldest descriptor
275ff7b0479SSaeed Bishara 	 */
276ff7b0479SSaeed Bishara 
277ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
278fbea28a2SLior Amsalem 				 node) {
279ff7b0479SSaeed Bishara 
2809136291fSLior Amsalem 		/* clean finished descriptors */
2819136291fSLior Amsalem 		hw_desc = iter->hw_desc;
2829136291fSLior Amsalem 		if (hw_desc->status & XOR_DESC_SUCCESS) {
2830951e728SMaxime Ripard 			cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
2849136291fSLior Amsalem 								 cookie);
285ff7b0479SSaeed Bishara 
2869136291fSLior Amsalem 			/* done processing desc, clean slot */
2870951e728SMaxime Ripard 			mv_desc_clean_slot(iter, mv_chan);
2889136291fSLior Amsalem 
2899136291fSLior Amsalem 			/* break if we did cleaned the current */
290ff7b0479SSaeed Bishara 			if (iter->async_tx.phys == current_desc) {
2919136291fSLior Amsalem 				current_cleaned = 1;
292ff7b0479SSaeed Bishara 				break;
293ff7b0479SSaeed Bishara 			}
2949136291fSLior Amsalem 		} else {
2959136291fSLior Amsalem 			if (iter->async_tx.phys == current_desc) {
2969136291fSLior Amsalem 				current_cleaned = 0;
297ff7b0479SSaeed Bishara 				break;
298ff7b0479SSaeed Bishara 			}
2999136291fSLior Amsalem 		}
3009136291fSLior Amsalem 	}
301ff7b0479SSaeed Bishara 
302ff7b0479SSaeed Bishara 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
3039136291fSLior Amsalem 		if (current_cleaned) {
3049136291fSLior Amsalem 			/*
3059136291fSLior Amsalem 			 * current descriptor cleaned and removed, run
3069136291fSLior Amsalem 			 * from list head
3079136291fSLior Amsalem 			 */
3089136291fSLior Amsalem 			iter = list_entry(mv_chan->chain.next,
309ff7b0479SSaeed Bishara 					  struct mv_xor_desc_slot,
310fbea28a2SLior Amsalem 					  node);
3110951e728SMaxime Ripard 			mv_chan_start_new_chain(mv_chan, iter);
3129136291fSLior Amsalem 		} else {
313fbea28a2SLior Amsalem 			if (!list_is_last(&iter->node, &mv_chan->chain)) {
3149136291fSLior Amsalem 				/*
3159136291fSLior Amsalem 				 * descriptors are still waiting after
3169136291fSLior Amsalem 				 * current, trigger them
3179136291fSLior Amsalem 				 */
318fbea28a2SLior Amsalem 				iter = list_entry(iter->node.next,
3199136291fSLior Amsalem 						  struct mv_xor_desc_slot,
320fbea28a2SLior Amsalem 						  node);
3210951e728SMaxime Ripard 				mv_chan_start_new_chain(mv_chan, iter);
3229136291fSLior Amsalem 			} else {
3239136291fSLior Amsalem 				/*
3249136291fSLior Amsalem 				 * some descriptors are still waiting
3259136291fSLior Amsalem 				 * to be cleaned
3269136291fSLior Amsalem 				 */
3279136291fSLior Amsalem 				tasklet_schedule(&mv_chan->irq_tasklet);
3289136291fSLior Amsalem 			}
3299136291fSLior Amsalem 		}
330ff7b0479SSaeed Bishara 	}
331ff7b0479SSaeed Bishara 
332ff7b0479SSaeed Bishara 	if (cookie > 0)
33398817b99SThomas Petazzoni 		mv_chan->dmachan.completed_cookie = cookie;
334ff7b0479SSaeed Bishara }
335ff7b0479SSaeed Bishara 
336ff7b0479SSaeed Bishara static void mv_xor_tasklet(unsigned long data)
337ff7b0479SSaeed Bishara {
338ff7b0479SSaeed Bishara 	struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
339e43147acSEzequiel Garcia 
340e43147acSEzequiel Garcia 	spin_lock_bh(&chan->lock);
3410951e728SMaxime Ripard 	mv_chan_slot_cleanup(chan);
342e43147acSEzequiel Garcia 	spin_unlock_bh(&chan->lock);
343ff7b0479SSaeed Bishara }
344ff7b0479SSaeed Bishara 
345ff7b0479SSaeed Bishara static struct mv_xor_desc_slot *
3460951e728SMaxime Ripard mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
347ff7b0479SSaeed Bishara {
348fbea28a2SLior Amsalem 	struct mv_xor_desc_slot *iter;
349ff7b0479SSaeed Bishara 
350fbea28a2SLior Amsalem 	spin_lock_bh(&mv_chan->lock);
351fbea28a2SLior Amsalem 
352fbea28a2SLior Amsalem 	if (!list_empty(&mv_chan->free_slots)) {
353fbea28a2SLior Amsalem 		iter = list_first_entry(&mv_chan->free_slots,
354ff7b0479SSaeed Bishara 					struct mv_xor_desc_slot,
355fbea28a2SLior Amsalem 					node);
356ff7b0479SSaeed Bishara 
357fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->allocated_slots);
358dfc97661SLior Amsalem 
359fbea28a2SLior Amsalem 		spin_unlock_bh(&mv_chan->lock);
360ff7b0479SSaeed Bishara 
361dfc97661SLior Amsalem 		/* pre-ack descriptor */
362ff7b0479SSaeed Bishara 		async_tx_ack(&iter->async_tx);
363dfc97661SLior Amsalem 		iter->async_tx.cookie = -EBUSY;
364dfc97661SLior Amsalem 
365dfc97661SLior Amsalem 		return iter;
366dfc97661SLior Amsalem 
367ff7b0479SSaeed Bishara 	}
368fbea28a2SLior Amsalem 
369fbea28a2SLior Amsalem 	spin_unlock_bh(&mv_chan->lock);
370ff7b0479SSaeed Bishara 
371ff7b0479SSaeed Bishara 	/* try to free some slots if the allocation fails */
372ff7b0479SSaeed Bishara 	tasklet_schedule(&mv_chan->irq_tasklet);
373ff7b0479SSaeed Bishara 
374ff7b0479SSaeed Bishara 	return NULL;
375ff7b0479SSaeed Bishara }
376ff7b0479SSaeed Bishara 
377ff7b0479SSaeed Bishara /************************ DMA engine API functions ****************************/
378ff7b0479SSaeed Bishara static dma_cookie_t
379ff7b0479SSaeed Bishara mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
380ff7b0479SSaeed Bishara {
381ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
382ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
383dfc97661SLior Amsalem 	struct mv_xor_desc_slot *old_chain_tail;
384ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
385ff7b0479SSaeed Bishara 	int new_hw_chain = 1;
386ff7b0479SSaeed Bishara 
387c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
388ff7b0479SSaeed Bishara 		"%s sw_desc %p: async_tx %p\n",
389ff7b0479SSaeed Bishara 		__func__, sw_desc, &sw_desc->async_tx);
390ff7b0479SSaeed Bishara 
391ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
392884485e1SRussell King - ARM Linux 	cookie = dma_cookie_assign(tx);
393ff7b0479SSaeed Bishara 
394ff7b0479SSaeed Bishara 	if (list_empty(&mv_chan->chain))
395fbea28a2SLior Amsalem 		list_move_tail(&sw_desc->node, &mv_chan->chain);
396ff7b0479SSaeed Bishara 	else {
397ff7b0479SSaeed Bishara 		new_hw_chain = 0;
398ff7b0479SSaeed Bishara 
399ff7b0479SSaeed Bishara 		old_chain_tail = list_entry(mv_chan->chain.prev,
400ff7b0479SSaeed Bishara 					    struct mv_xor_desc_slot,
401fbea28a2SLior Amsalem 					    node);
402fbea28a2SLior Amsalem 		list_move_tail(&sw_desc->node, &mv_chan->chain);
403ff7b0479SSaeed Bishara 
40431fd8f5bSOlof Johansson 		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
40531fd8f5bSOlof Johansson 			&old_chain_tail->async_tx.phys);
406ff7b0479SSaeed Bishara 
407ff7b0479SSaeed Bishara 		/* fix up the hardware chain */
408dfc97661SLior Amsalem 		mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
409ff7b0479SSaeed Bishara 
410ff7b0479SSaeed Bishara 		/* if the channel is not busy */
411ff7b0479SSaeed Bishara 		if (!mv_chan_is_busy(mv_chan)) {
412ff7b0479SSaeed Bishara 			u32 current_desc = mv_chan_get_current_desc(mv_chan);
413ff7b0479SSaeed Bishara 			/*
414ff7b0479SSaeed Bishara 			 * and the curren desc is the end of the chain before
415ff7b0479SSaeed Bishara 			 * the append, then we need to start the channel
416ff7b0479SSaeed Bishara 			 */
417ff7b0479SSaeed Bishara 			if (current_desc == old_chain_tail->async_tx.phys)
418ff7b0479SSaeed Bishara 				new_hw_chain = 1;
419ff7b0479SSaeed Bishara 		}
420ff7b0479SSaeed Bishara 	}
421ff7b0479SSaeed Bishara 
422ff7b0479SSaeed Bishara 	if (new_hw_chain)
4230951e728SMaxime Ripard 		mv_chan_start_new_chain(mv_chan, sw_desc);
424ff7b0479SSaeed Bishara 
425ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
426ff7b0479SSaeed Bishara 
427ff7b0479SSaeed Bishara 	return cookie;
428ff7b0479SSaeed Bishara }
429ff7b0479SSaeed Bishara 
430ff7b0479SSaeed Bishara /* returns the number of allocated descriptors */
431aa1e6f1aSDan Williams static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
432ff7b0479SSaeed Bishara {
43331fd8f5bSOlof Johansson 	void *virt_desc;
43431fd8f5bSOlof Johansson 	dma_addr_t dma_desc;
435ff7b0479SSaeed Bishara 	int idx;
436ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
437ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *slot = NULL;
438b503fa01SThomas Petazzoni 	int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
439ff7b0479SSaeed Bishara 
440ff7b0479SSaeed Bishara 	/* Allocate descriptor slots */
441ff7b0479SSaeed Bishara 	idx = mv_chan->slots_allocated;
442ff7b0479SSaeed Bishara 	while (idx < num_descs_in_pool) {
443ff7b0479SSaeed Bishara 		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
444ff7b0479SSaeed Bishara 		if (!slot) {
445b8291ddeSEzequiel Garcia 			dev_info(mv_chan_to_devp(mv_chan),
446b8291ddeSEzequiel Garcia 				 "channel only initialized %d descriptor slots",
447b8291ddeSEzequiel Garcia 				 idx);
448ff7b0479SSaeed Bishara 			break;
449ff7b0479SSaeed Bishara 		}
45031fd8f5bSOlof Johansson 		virt_desc = mv_chan->dma_desc_pool_virt;
45131fd8f5bSOlof Johansson 		slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
452ff7b0479SSaeed Bishara 
453ff7b0479SSaeed Bishara 		dma_async_tx_descriptor_init(&slot->async_tx, chan);
454ff7b0479SSaeed Bishara 		slot->async_tx.tx_submit = mv_xor_tx_submit;
455fbea28a2SLior Amsalem 		INIT_LIST_HEAD(&slot->node);
45631fd8f5bSOlof Johansson 		dma_desc = mv_chan->dma_desc_pool;
45731fd8f5bSOlof Johansson 		slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
458ff7b0479SSaeed Bishara 		slot->idx = idx++;
459ff7b0479SSaeed Bishara 
460ff7b0479SSaeed Bishara 		spin_lock_bh(&mv_chan->lock);
461ff7b0479SSaeed Bishara 		mv_chan->slots_allocated = idx;
462fbea28a2SLior Amsalem 		list_add_tail(&slot->node, &mv_chan->free_slots);
463ff7b0479SSaeed Bishara 		spin_unlock_bh(&mv_chan->lock);
464ff7b0479SSaeed Bishara 	}
465ff7b0479SSaeed Bishara 
466c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
467fbea28a2SLior Amsalem 		"allocated %d descriptor slots\n",
468fbea28a2SLior Amsalem 		mv_chan->slots_allocated);
469ff7b0479SSaeed Bishara 
470ff7b0479SSaeed Bishara 	return mv_chan->slots_allocated ? : -ENOMEM;
471ff7b0479SSaeed Bishara }
472ff7b0479SSaeed Bishara 
473*77ff7a70SStefan Roese /*
474*77ff7a70SStefan Roese  * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
475*77ff7a70SStefan Roese  * a new MBus window if necessary. Use a cache for these check so that
476*77ff7a70SStefan Roese  * the MMIO mapped registers don't have to be accessed for this check
477*77ff7a70SStefan Roese  * to speed up this process.
478*77ff7a70SStefan Roese  */
479*77ff7a70SStefan Roese static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
480*77ff7a70SStefan Roese {
481*77ff7a70SStefan Roese 	struct mv_xor_device *xordev = mv_chan->xordev;
482*77ff7a70SStefan Roese 	void __iomem *base = mv_chan->mmr_high_base;
483*77ff7a70SStefan Roese 	u32 win_enable;
484*77ff7a70SStefan Roese 	u32 size;
485*77ff7a70SStefan Roese 	u8 target, attr;
486*77ff7a70SStefan Roese 	int ret;
487*77ff7a70SStefan Roese 	int i;
488*77ff7a70SStefan Roese 
489*77ff7a70SStefan Roese 	/* Nothing needs to get done for the Armada 3700 */
490*77ff7a70SStefan Roese 	if (xordev->xor_type == XOR_ARMADA_37XX)
491*77ff7a70SStefan Roese 		return 0;
492*77ff7a70SStefan Roese 
493*77ff7a70SStefan Roese 	/*
494*77ff7a70SStefan Roese 	 * Loop over the cached windows to check, if the requested area
495*77ff7a70SStefan Roese 	 * is already mapped. If this the case, nothing needs to be done
496*77ff7a70SStefan Roese 	 * and we can return.
497*77ff7a70SStefan Roese 	 */
498*77ff7a70SStefan Roese 	for (i = 0; i < WINDOW_COUNT; i++) {
499*77ff7a70SStefan Roese 		if (addr >= xordev->win_start[i] &&
500*77ff7a70SStefan Roese 		    addr <= xordev->win_end[i]) {
501*77ff7a70SStefan Roese 			/* Window is already mapped */
502*77ff7a70SStefan Roese 			return 0;
503*77ff7a70SStefan Roese 		}
504*77ff7a70SStefan Roese 	}
505*77ff7a70SStefan Roese 
506*77ff7a70SStefan Roese 	/*
507*77ff7a70SStefan Roese 	 * The window is not mapped, so we need to create the new mapping
508*77ff7a70SStefan Roese 	 */
509*77ff7a70SStefan Roese 
510*77ff7a70SStefan Roese 	/* If no IO window is found that addr has to be located in SDRAM */
511*77ff7a70SStefan Roese 	ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
512*77ff7a70SStefan Roese 	if (ret < 0)
513*77ff7a70SStefan Roese 		return 0;
514*77ff7a70SStefan Roese 
515*77ff7a70SStefan Roese 	/*
516*77ff7a70SStefan Roese 	 * Mask the base addr 'addr' according to 'size' read back from the
517*77ff7a70SStefan Roese 	 * MBus window. Otherwise we might end up with an address located
518*77ff7a70SStefan Roese 	 * somewhere in the middle of this area here.
519*77ff7a70SStefan Roese 	 */
520*77ff7a70SStefan Roese 	size -= 1;
521*77ff7a70SStefan Roese 	addr &= ~size;
522*77ff7a70SStefan Roese 
523*77ff7a70SStefan Roese 	/*
524*77ff7a70SStefan Roese 	 * Reading one of both enabled register is enough, as they are always
525*77ff7a70SStefan Roese 	 * programmed to the identical values
526*77ff7a70SStefan Roese 	 */
527*77ff7a70SStefan Roese 	win_enable = readl(base + WINDOW_BAR_ENABLE(0));
528*77ff7a70SStefan Roese 
529*77ff7a70SStefan Roese 	/* Set 'i' to the first free window to write the new values to */
530*77ff7a70SStefan Roese 	i = ffs(~win_enable) - 1;
531*77ff7a70SStefan Roese 	if (i >= WINDOW_COUNT)
532*77ff7a70SStefan Roese 		return -ENOMEM;
533*77ff7a70SStefan Roese 
534*77ff7a70SStefan Roese 	writel((addr & 0xffff0000) | (attr << 8) | target,
535*77ff7a70SStefan Roese 	       base + WINDOW_BASE(i));
536*77ff7a70SStefan Roese 	writel(size & 0xffff0000, base + WINDOW_SIZE(i));
537*77ff7a70SStefan Roese 
538*77ff7a70SStefan Roese 	/* Fill the caching variables for later use */
539*77ff7a70SStefan Roese 	xordev->win_start[i] = addr;
540*77ff7a70SStefan Roese 	xordev->win_end[i] = addr + size;
541*77ff7a70SStefan Roese 
542*77ff7a70SStefan Roese 	win_enable |= (1 << i);
543*77ff7a70SStefan Roese 	win_enable |= 3 << (16 + (2 * i));
544*77ff7a70SStefan Roese 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
545*77ff7a70SStefan Roese 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
546*77ff7a70SStefan Roese 
547*77ff7a70SStefan Roese 	return 0;
548*77ff7a70SStefan Roese }
549*77ff7a70SStefan Roese 
550ff7b0479SSaeed Bishara static struct dma_async_tx_descriptor *
551ff7b0479SSaeed Bishara mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
552ff7b0479SSaeed Bishara 		    unsigned int src_cnt, size_t len, unsigned long flags)
553ff7b0479SSaeed Bishara {
554ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
555dfc97661SLior Amsalem 	struct mv_xor_desc_slot *sw_desc;
556*77ff7a70SStefan Roese 	int ret;
557ff7b0479SSaeed Bishara 
558ff7b0479SSaeed Bishara 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
559ff7b0479SSaeed Bishara 		return NULL;
560ff7b0479SSaeed Bishara 
5617912d300SColy Li 	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
562ff7b0479SSaeed Bishara 
563c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
564bc822e12SGregory CLEMENT 		"%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
56531fd8f5bSOlof Johansson 		__func__, src_cnt, len, &dest, flags);
566ff7b0479SSaeed Bishara 
567*77ff7a70SStefan Roese 	/* Check if a new window needs to get added for 'dest' */
568*77ff7a70SStefan Roese 	ret = mv_xor_add_io_win(mv_chan, dest);
569*77ff7a70SStefan Roese 	if (ret)
570*77ff7a70SStefan Roese 		return NULL;
571*77ff7a70SStefan Roese 
5720951e728SMaxime Ripard 	sw_desc = mv_chan_alloc_slot(mv_chan);
573ff7b0479SSaeed Bishara 	if (sw_desc) {
574ff7b0479SSaeed Bishara 		sw_desc->type = DMA_XOR;
575ff7b0479SSaeed Bishara 		sw_desc->async_tx.flags = flags;
576ba87d137SLior Amsalem 		mv_desc_init(sw_desc, dest, len, flags);
5776f166312SLior Amsalem 		if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
5786f166312SLior Amsalem 			mv_desc_set_mode(sw_desc);
579*77ff7a70SStefan Roese 		while (src_cnt--) {
580*77ff7a70SStefan Roese 			/* Check if a new window needs to get added for 'src' */
581*77ff7a70SStefan Roese 			ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
582*77ff7a70SStefan Roese 			if (ret)
583*77ff7a70SStefan Roese 				return NULL;
584dfc97661SLior Amsalem 			mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
585ff7b0479SSaeed Bishara 		}
586*77ff7a70SStefan Roese 	}
587fbea28a2SLior Amsalem 
588c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
589ff7b0479SSaeed Bishara 		"%s sw_desc %p async_tx %p \n",
590ff7b0479SSaeed Bishara 		__func__, sw_desc, &sw_desc->async_tx);
591ff7b0479SSaeed Bishara 	return sw_desc ? &sw_desc->async_tx : NULL;
592ff7b0479SSaeed Bishara }
593ff7b0479SSaeed Bishara 
5943e4f52e2SLior Amsalem static struct dma_async_tx_descriptor *
5953e4f52e2SLior Amsalem mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
5963e4f52e2SLior Amsalem 		size_t len, unsigned long flags)
5973e4f52e2SLior Amsalem {
5983e4f52e2SLior Amsalem 	/*
5993e4f52e2SLior Amsalem 	 * A MEMCPY operation is identical to an XOR operation with only
6003e4f52e2SLior Amsalem 	 * a single source address.
6013e4f52e2SLior Amsalem 	 */
6023e4f52e2SLior Amsalem 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
6033e4f52e2SLior Amsalem }
6043e4f52e2SLior Amsalem 
60522843545SLior Amsalem static struct dma_async_tx_descriptor *
60622843545SLior Amsalem mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
60722843545SLior Amsalem {
60822843545SLior Amsalem 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
60922843545SLior Amsalem 	dma_addr_t src, dest;
61022843545SLior Amsalem 	size_t len;
61122843545SLior Amsalem 
61222843545SLior Amsalem 	src = mv_chan->dummy_src_addr;
61322843545SLior Amsalem 	dest = mv_chan->dummy_dst_addr;
61422843545SLior Amsalem 	len = MV_XOR_MIN_BYTE_COUNT;
61522843545SLior Amsalem 
61622843545SLior Amsalem 	/*
61722843545SLior Amsalem 	 * We implement the DMA_INTERRUPT operation as a minimum sized
61822843545SLior Amsalem 	 * XOR operation with a single dummy source address.
61922843545SLior Amsalem 	 */
62022843545SLior Amsalem 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
62122843545SLior Amsalem }
62222843545SLior Amsalem 
623ff7b0479SSaeed Bishara static void mv_xor_free_chan_resources(struct dma_chan *chan)
624ff7b0479SSaeed Bishara {
625ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
626ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
627ff7b0479SSaeed Bishara 	int in_use_descs = 0;
628ff7b0479SSaeed Bishara 
629ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
630e43147acSEzequiel Garcia 
6310951e728SMaxime Ripard 	mv_chan_slot_cleanup(mv_chan);
632ff7b0479SSaeed Bishara 
633ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
634fbea28a2SLior Amsalem 					node) {
635ff7b0479SSaeed Bishara 		in_use_descs++;
636fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->free_slots);
637ff7b0479SSaeed Bishara 	}
638ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
639fbea28a2SLior Amsalem 				 node) {
640ff7b0479SSaeed Bishara 		in_use_descs++;
641fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->free_slots);
642fbea28a2SLior Amsalem 	}
643fbea28a2SLior Amsalem 	list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
644fbea28a2SLior Amsalem 				 node) {
645fbea28a2SLior Amsalem 		in_use_descs++;
646fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->free_slots);
647ff7b0479SSaeed Bishara 	}
648ff7b0479SSaeed Bishara 	list_for_each_entry_safe_reverse(
649fbea28a2SLior Amsalem 		iter, _iter, &mv_chan->free_slots, node) {
650fbea28a2SLior Amsalem 		list_del(&iter->node);
651ff7b0479SSaeed Bishara 		kfree(iter);
652ff7b0479SSaeed Bishara 		mv_chan->slots_allocated--;
653ff7b0479SSaeed Bishara 	}
654ff7b0479SSaeed Bishara 
655c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
656ff7b0479SSaeed Bishara 		__func__, mv_chan->slots_allocated);
657ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
658ff7b0479SSaeed Bishara 
659ff7b0479SSaeed Bishara 	if (in_use_descs)
660c98c1781SThomas Petazzoni 		dev_err(mv_chan_to_devp(mv_chan),
661ff7b0479SSaeed Bishara 			"freeing %d in use descriptors!\n", in_use_descs);
662ff7b0479SSaeed Bishara }
663ff7b0479SSaeed Bishara 
664ff7b0479SSaeed Bishara /**
66507934481SLinus Walleij  * mv_xor_status - poll the status of an XOR transaction
666ff7b0479SSaeed Bishara  * @chan: XOR channel handle
667ff7b0479SSaeed Bishara  * @cookie: XOR transaction identifier
66807934481SLinus Walleij  * @txstate: XOR transactions state holder (or NULL)
669ff7b0479SSaeed Bishara  */
67007934481SLinus Walleij static enum dma_status mv_xor_status(struct dma_chan *chan,
671ff7b0479SSaeed Bishara 					  dma_cookie_t cookie,
67207934481SLinus Walleij 					  struct dma_tx_state *txstate)
673ff7b0479SSaeed Bishara {
674ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
675ff7b0479SSaeed Bishara 	enum dma_status ret;
676ff7b0479SSaeed Bishara 
67796a2af41SRussell King - ARM Linux 	ret = dma_cookie_status(chan, cookie, txstate);
678890766d2SEzequiel Garcia 	if (ret == DMA_COMPLETE)
679ff7b0479SSaeed Bishara 		return ret;
680e43147acSEzequiel Garcia 
681e43147acSEzequiel Garcia 	spin_lock_bh(&mv_chan->lock);
6820951e728SMaxime Ripard 	mv_chan_slot_cleanup(mv_chan);
683e43147acSEzequiel Garcia 	spin_unlock_bh(&mv_chan->lock);
684ff7b0479SSaeed Bishara 
68596a2af41SRussell King - ARM Linux 	return dma_cookie_status(chan, cookie, txstate);
686ff7b0479SSaeed Bishara }
687ff7b0479SSaeed Bishara 
6880951e728SMaxime Ripard static void mv_chan_dump_regs(struct mv_xor_chan *chan)
689ff7b0479SSaeed Bishara {
690ff7b0479SSaeed Bishara 	u32 val;
691ff7b0479SSaeed Bishara 
6925733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_CONFIG(chan));
6931ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
694ff7b0479SSaeed Bishara 
6955733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_ACTIVATION(chan));
6961ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
697ff7b0479SSaeed Bishara 
6985733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_INTR_CAUSE(chan));
6991ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
700ff7b0479SSaeed Bishara 
7015733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_INTR_MASK(chan));
7021ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
703ff7b0479SSaeed Bishara 
7045733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_ERROR_CAUSE(chan));
7051ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
706ff7b0479SSaeed Bishara 
7075733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_ERROR_ADDR(chan));
7081ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
709ff7b0479SSaeed Bishara }
710ff7b0479SSaeed Bishara 
7110951e728SMaxime Ripard static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
712ff7b0479SSaeed Bishara 					  u32 intr_cause)
713ff7b0479SSaeed Bishara {
7140e7488edSEzequiel Garcia 	if (intr_cause & XOR_INT_ERR_DECODE) {
7150e7488edSEzequiel Garcia 		dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
716ff7b0479SSaeed Bishara 		return;
717ff7b0479SSaeed Bishara 	}
718ff7b0479SSaeed Bishara 
7190e7488edSEzequiel Garcia 	dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
720ff7b0479SSaeed Bishara 		chan->idx, intr_cause);
721ff7b0479SSaeed Bishara 
7220951e728SMaxime Ripard 	mv_chan_dump_regs(chan);
7230e7488edSEzequiel Garcia 	WARN_ON(1);
724ff7b0479SSaeed Bishara }
725ff7b0479SSaeed Bishara 
726ff7b0479SSaeed Bishara static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
727ff7b0479SSaeed Bishara {
728ff7b0479SSaeed Bishara 	struct mv_xor_chan *chan = data;
729ff7b0479SSaeed Bishara 	u32 intr_cause = mv_chan_get_intr_cause(chan);
730ff7b0479SSaeed Bishara 
731c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
732ff7b0479SSaeed Bishara 
7330e7488edSEzequiel Garcia 	if (intr_cause & XOR_INTR_ERRORS)
7340951e728SMaxime Ripard 		mv_chan_err_interrupt_handler(chan, intr_cause);
735ff7b0479SSaeed Bishara 
736ff7b0479SSaeed Bishara 	tasklet_schedule(&chan->irq_tasklet);
737ff7b0479SSaeed Bishara 
7380951e728SMaxime Ripard 	mv_chan_clear_eoc_cause(chan);
739ff7b0479SSaeed Bishara 
740ff7b0479SSaeed Bishara 	return IRQ_HANDLED;
741ff7b0479SSaeed Bishara }
742ff7b0479SSaeed Bishara 
743ff7b0479SSaeed Bishara static void mv_xor_issue_pending(struct dma_chan *chan)
744ff7b0479SSaeed Bishara {
745ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
746ff7b0479SSaeed Bishara 
747ff7b0479SSaeed Bishara 	if (mv_chan->pending >= MV_XOR_THRESHOLD) {
748ff7b0479SSaeed Bishara 		mv_chan->pending = 0;
749ff7b0479SSaeed Bishara 		mv_chan_activate(mv_chan);
750ff7b0479SSaeed Bishara 	}
751ff7b0479SSaeed Bishara }
752ff7b0479SSaeed Bishara 
753ff7b0479SSaeed Bishara /*
754ff7b0479SSaeed Bishara  * Perform a transaction to verify the HW works.
755ff7b0479SSaeed Bishara  */
756ff7b0479SSaeed Bishara 
7570951e728SMaxime Ripard static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
758ff7b0479SSaeed Bishara {
759b8c01d25SEzequiel Garcia 	int i, ret;
760ff7b0479SSaeed Bishara 	void *src, *dest;
761ff7b0479SSaeed Bishara 	dma_addr_t src_dma, dest_dma;
762ff7b0479SSaeed Bishara 	struct dma_chan *dma_chan;
763ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
764ff7b0479SSaeed Bishara 	struct dma_async_tx_descriptor *tx;
765d16695a7SEzequiel Garcia 	struct dmaengine_unmap_data *unmap;
766ff7b0479SSaeed Bishara 	int err = 0;
767ff7b0479SSaeed Bishara 
768d16695a7SEzequiel Garcia 	src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
769ff7b0479SSaeed Bishara 	if (!src)
770ff7b0479SSaeed Bishara 		return -ENOMEM;
771ff7b0479SSaeed Bishara 
772d16695a7SEzequiel Garcia 	dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
773ff7b0479SSaeed Bishara 	if (!dest) {
774ff7b0479SSaeed Bishara 		kfree(src);
775ff7b0479SSaeed Bishara 		return -ENOMEM;
776ff7b0479SSaeed Bishara 	}
777ff7b0479SSaeed Bishara 
778ff7b0479SSaeed Bishara 	/* Fill in src buffer */
779d16695a7SEzequiel Garcia 	for (i = 0; i < PAGE_SIZE; i++)
780ff7b0479SSaeed Bishara 		((u8 *) src)[i] = (u8)i;
781ff7b0479SSaeed Bishara 
782275cc0c8SThomas Petazzoni 	dma_chan = &mv_chan->dmachan;
783aa1e6f1aSDan Williams 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
784ff7b0479SSaeed Bishara 		err = -ENODEV;
785ff7b0479SSaeed Bishara 		goto out;
786ff7b0479SSaeed Bishara 	}
787ff7b0479SSaeed Bishara 
788d16695a7SEzequiel Garcia 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
789d16695a7SEzequiel Garcia 	if (!unmap) {
790d16695a7SEzequiel Garcia 		err = -ENOMEM;
791d16695a7SEzequiel Garcia 		goto free_resources;
792d16695a7SEzequiel Garcia 	}
793ff7b0479SSaeed Bishara 
79451564635SStefan Roese 	src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
79551564635SStefan Roese 			       (size_t)src & ~PAGE_MASK, PAGE_SIZE,
79651564635SStefan Roese 			       DMA_TO_DEVICE);
797d16695a7SEzequiel Garcia 	unmap->addr[0] = src_dma;
798d16695a7SEzequiel Garcia 
799b8c01d25SEzequiel Garcia 	ret = dma_mapping_error(dma_chan->device->dev, src_dma);
800b8c01d25SEzequiel Garcia 	if (ret) {
801b8c01d25SEzequiel Garcia 		err = -ENOMEM;
802b8c01d25SEzequiel Garcia 		goto free_resources;
803b8c01d25SEzequiel Garcia 	}
804b8c01d25SEzequiel Garcia 	unmap->to_cnt = 1;
805b8c01d25SEzequiel Garcia 
80651564635SStefan Roese 	dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
80751564635SStefan Roese 				(size_t)dest & ~PAGE_MASK, PAGE_SIZE,
80851564635SStefan Roese 				DMA_FROM_DEVICE);
809d16695a7SEzequiel Garcia 	unmap->addr[1] = dest_dma;
810d16695a7SEzequiel Garcia 
811b8c01d25SEzequiel Garcia 	ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
812b8c01d25SEzequiel Garcia 	if (ret) {
813b8c01d25SEzequiel Garcia 		err = -ENOMEM;
814b8c01d25SEzequiel Garcia 		goto free_resources;
815b8c01d25SEzequiel Garcia 	}
816b8c01d25SEzequiel Garcia 	unmap->from_cnt = 1;
817d16695a7SEzequiel Garcia 	unmap->len = PAGE_SIZE;
818ff7b0479SSaeed Bishara 
819ff7b0479SSaeed Bishara 	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
820d16695a7SEzequiel Garcia 				    PAGE_SIZE, 0);
821b8c01d25SEzequiel Garcia 	if (!tx) {
822b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
823b8c01d25SEzequiel Garcia 			"Self-test cannot prepare operation, disabling\n");
824b8c01d25SEzequiel Garcia 		err = -ENODEV;
825b8c01d25SEzequiel Garcia 		goto free_resources;
826b8c01d25SEzequiel Garcia 	}
827b8c01d25SEzequiel Garcia 
828ff7b0479SSaeed Bishara 	cookie = mv_xor_tx_submit(tx);
829b8c01d25SEzequiel Garcia 	if (dma_submit_error(cookie)) {
830b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
831b8c01d25SEzequiel Garcia 			"Self-test submit error, disabling\n");
832b8c01d25SEzequiel Garcia 		err = -ENODEV;
833b8c01d25SEzequiel Garcia 		goto free_resources;
834b8c01d25SEzequiel Garcia 	}
835b8c01d25SEzequiel Garcia 
836ff7b0479SSaeed Bishara 	mv_xor_issue_pending(dma_chan);
837ff7b0479SSaeed Bishara 	async_tx_ack(tx);
838ff7b0479SSaeed Bishara 	msleep(1);
839ff7b0479SSaeed Bishara 
84007934481SLinus Walleij 	if (mv_xor_status(dma_chan, cookie, NULL) !=
841b3efb8fcSVinod Koul 	    DMA_COMPLETE) {
842a3fc74bcSThomas Petazzoni 		dev_err(dma_chan->device->dev,
843ff7b0479SSaeed Bishara 			"Self-test copy timed out, disabling\n");
844ff7b0479SSaeed Bishara 		err = -ENODEV;
845ff7b0479SSaeed Bishara 		goto free_resources;
846ff7b0479SSaeed Bishara 	}
847ff7b0479SSaeed Bishara 
848c35064c4SThomas Petazzoni 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
849d16695a7SEzequiel Garcia 				PAGE_SIZE, DMA_FROM_DEVICE);
850d16695a7SEzequiel Garcia 	if (memcmp(src, dest, PAGE_SIZE)) {
851a3fc74bcSThomas Petazzoni 		dev_err(dma_chan->device->dev,
852ff7b0479SSaeed Bishara 			"Self-test copy failed compare, disabling\n");
853ff7b0479SSaeed Bishara 		err = -ENODEV;
854ff7b0479SSaeed Bishara 		goto free_resources;
855ff7b0479SSaeed Bishara 	}
856ff7b0479SSaeed Bishara 
857ff7b0479SSaeed Bishara free_resources:
858d16695a7SEzequiel Garcia 	dmaengine_unmap_put(unmap);
859ff7b0479SSaeed Bishara 	mv_xor_free_chan_resources(dma_chan);
860ff7b0479SSaeed Bishara out:
861ff7b0479SSaeed Bishara 	kfree(src);
862ff7b0479SSaeed Bishara 	kfree(dest);
863ff7b0479SSaeed Bishara 	return err;
864ff7b0479SSaeed Bishara }
865ff7b0479SSaeed Bishara 
866ff7b0479SSaeed Bishara #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
867463a1f8bSBill Pemberton static int
8680951e728SMaxime Ripard mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
869ff7b0479SSaeed Bishara {
870b8c01d25SEzequiel Garcia 	int i, src_idx, ret;
871ff7b0479SSaeed Bishara 	struct page *dest;
872ff7b0479SSaeed Bishara 	struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
873ff7b0479SSaeed Bishara 	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
874ff7b0479SSaeed Bishara 	dma_addr_t dest_dma;
875ff7b0479SSaeed Bishara 	struct dma_async_tx_descriptor *tx;
876d16695a7SEzequiel Garcia 	struct dmaengine_unmap_data *unmap;
877ff7b0479SSaeed Bishara 	struct dma_chan *dma_chan;
878ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
879ff7b0479SSaeed Bishara 	u8 cmp_byte = 0;
880ff7b0479SSaeed Bishara 	u32 cmp_word;
881ff7b0479SSaeed Bishara 	int err = 0;
882d16695a7SEzequiel Garcia 	int src_count = MV_XOR_NUM_SRC_TEST;
883ff7b0479SSaeed Bishara 
884d16695a7SEzequiel Garcia 	for (src_idx = 0; src_idx < src_count; src_idx++) {
885ff7b0479SSaeed Bishara 		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
886a09b09aeSRoel Kluin 		if (!xor_srcs[src_idx]) {
887a09b09aeSRoel Kluin 			while (src_idx--)
888ff7b0479SSaeed Bishara 				__free_page(xor_srcs[src_idx]);
889ff7b0479SSaeed Bishara 			return -ENOMEM;
890ff7b0479SSaeed Bishara 		}
891ff7b0479SSaeed Bishara 	}
892ff7b0479SSaeed Bishara 
893ff7b0479SSaeed Bishara 	dest = alloc_page(GFP_KERNEL);
894a09b09aeSRoel Kluin 	if (!dest) {
895a09b09aeSRoel Kluin 		while (src_idx--)
896ff7b0479SSaeed Bishara 			__free_page(xor_srcs[src_idx]);
897ff7b0479SSaeed Bishara 		return -ENOMEM;
898ff7b0479SSaeed Bishara 	}
899ff7b0479SSaeed Bishara 
900ff7b0479SSaeed Bishara 	/* Fill in src buffers */
901d16695a7SEzequiel Garcia 	for (src_idx = 0; src_idx < src_count; src_idx++) {
902ff7b0479SSaeed Bishara 		u8 *ptr = page_address(xor_srcs[src_idx]);
903ff7b0479SSaeed Bishara 		for (i = 0; i < PAGE_SIZE; i++)
904ff7b0479SSaeed Bishara 			ptr[i] = (1 << src_idx);
905ff7b0479SSaeed Bishara 	}
906ff7b0479SSaeed Bishara 
907d16695a7SEzequiel Garcia 	for (src_idx = 0; src_idx < src_count; src_idx++)
908ff7b0479SSaeed Bishara 		cmp_byte ^= (u8) (1 << src_idx);
909ff7b0479SSaeed Bishara 
910ff7b0479SSaeed Bishara 	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
911ff7b0479SSaeed Bishara 		(cmp_byte << 8) | cmp_byte;
912ff7b0479SSaeed Bishara 
913ff7b0479SSaeed Bishara 	memset(page_address(dest), 0, PAGE_SIZE);
914ff7b0479SSaeed Bishara 
915275cc0c8SThomas Petazzoni 	dma_chan = &mv_chan->dmachan;
916aa1e6f1aSDan Williams 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
917ff7b0479SSaeed Bishara 		err = -ENODEV;
918ff7b0479SSaeed Bishara 		goto out;
919ff7b0479SSaeed Bishara 	}
920ff7b0479SSaeed Bishara 
921d16695a7SEzequiel Garcia 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
922d16695a7SEzequiel Garcia 					 GFP_KERNEL);
923d16695a7SEzequiel Garcia 	if (!unmap) {
924d16695a7SEzequiel Garcia 		err = -ENOMEM;
925d16695a7SEzequiel Garcia 		goto free_resources;
926d16695a7SEzequiel Garcia 	}
927ff7b0479SSaeed Bishara 
928d16695a7SEzequiel Garcia 	/* test xor */
929d16695a7SEzequiel Garcia 	for (i = 0; i < src_count; i++) {
930d16695a7SEzequiel Garcia 		unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
931ff7b0479SSaeed Bishara 					      0, PAGE_SIZE, DMA_TO_DEVICE);
932d16695a7SEzequiel Garcia 		dma_srcs[i] = unmap->addr[i];
933b8c01d25SEzequiel Garcia 		ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
934b8c01d25SEzequiel Garcia 		if (ret) {
935b8c01d25SEzequiel Garcia 			err = -ENOMEM;
936b8c01d25SEzequiel Garcia 			goto free_resources;
937b8c01d25SEzequiel Garcia 		}
938d16695a7SEzequiel Garcia 		unmap->to_cnt++;
939d16695a7SEzequiel Garcia 	}
940d16695a7SEzequiel Garcia 
941d16695a7SEzequiel Garcia 	unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
942d16695a7SEzequiel Garcia 				      DMA_FROM_DEVICE);
943d16695a7SEzequiel Garcia 	dest_dma = unmap->addr[src_count];
944b8c01d25SEzequiel Garcia 	ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
945b8c01d25SEzequiel Garcia 	if (ret) {
946b8c01d25SEzequiel Garcia 		err = -ENOMEM;
947b8c01d25SEzequiel Garcia 		goto free_resources;
948b8c01d25SEzequiel Garcia 	}
949d16695a7SEzequiel Garcia 	unmap->from_cnt = 1;
950d16695a7SEzequiel Garcia 	unmap->len = PAGE_SIZE;
951ff7b0479SSaeed Bishara 
952ff7b0479SSaeed Bishara 	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
953d16695a7SEzequiel Garcia 				 src_count, PAGE_SIZE, 0);
954b8c01d25SEzequiel Garcia 	if (!tx) {
955b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
956b8c01d25SEzequiel Garcia 			"Self-test cannot prepare operation, disabling\n");
957b8c01d25SEzequiel Garcia 		err = -ENODEV;
958b8c01d25SEzequiel Garcia 		goto free_resources;
959b8c01d25SEzequiel Garcia 	}
960ff7b0479SSaeed Bishara 
961ff7b0479SSaeed Bishara 	cookie = mv_xor_tx_submit(tx);
962b8c01d25SEzequiel Garcia 	if (dma_submit_error(cookie)) {
963b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
964b8c01d25SEzequiel Garcia 			"Self-test submit error, disabling\n");
965b8c01d25SEzequiel Garcia 		err = -ENODEV;
966b8c01d25SEzequiel Garcia 		goto free_resources;
967b8c01d25SEzequiel Garcia 	}
968b8c01d25SEzequiel Garcia 
969ff7b0479SSaeed Bishara 	mv_xor_issue_pending(dma_chan);
970ff7b0479SSaeed Bishara 	async_tx_ack(tx);
971ff7b0479SSaeed Bishara 	msleep(8);
972ff7b0479SSaeed Bishara 
97307934481SLinus Walleij 	if (mv_xor_status(dma_chan, cookie, NULL) !=
974b3efb8fcSVinod Koul 	    DMA_COMPLETE) {
975a3fc74bcSThomas Petazzoni 		dev_err(dma_chan->device->dev,
976ff7b0479SSaeed Bishara 			"Self-test xor timed out, disabling\n");
977ff7b0479SSaeed Bishara 		err = -ENODEV;
978ff7b0479SSaeed Bishara 		goto free_resources;
979ff7b0479SSaeed Bishara 	}
980ff7b0479SSaeed Bishara 
981c35064c4SThomas Petazzoni 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
982ff7b0479SSaeed Bishara 				PAGE_SIZE, DMA_FROM_DEVICE);
983ff7b0479SSaeed Bishara 	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
984ff7b0479SSaeed Bishara 		u32 *ptr = page_address(dest);
985ff7b0479SSaeed Bishara 		if (ptr[i] != cmp_word) {
986a3fc74bcSThomas Petazzoni 			dev_err(dma_chan->device->dev,
9871ba151cdSJoe Perches 				"Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
9881ba151cdSJoe Perches 				i, ptr[i], cmp_word);
989ff7b0479SSaeed Bishara 			err = -ENODEV;
990ff7b0479SSaeed Bishara 			goto free_resources;
991ff7b0479SSaeed Bishara 		}
992ff7b0479SSaeed Bishara 	}
993ff7b0479SSaeed Bishara 
994ff7b0479SSaeed Bishara free_resources:
995d16695a7SEzequiel Garcia 	dmaengine_unmap_put(unmap);
996ff7b0479SSaeed Bishara 	mv_xor_free_chan_resources(dma_chan);
997ff7b0479SSaeed Bishara out:
998d16695a7SEzequiel Garcia 	src_idx = src_count;
999ff7b0479SSaeed Bishara 	while (src_idx--)
1000ff7b0479SSaeed Bishara 		__free_page(xor_srcs[src_idx]);
1001ff7b0479SSaeed Bishara 	__free_page(dest);
1002ff7b0479SSaeed Bishara 	return err;
1003ff7b0479SSaeed Bishara }
1004ff7b0479SSaeed Bishara 
10051ef48a26SThomas Petazzoni static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1006ff7b0479SSaeed Bishara {
1007ff7b0479SSaeed Bishara 	struct dma_chan *chan, *_chan;
10081ef48a26SThomas Petazzoni 	struct device *dev = mv_chan->dmadev.dev;
1009ff7b0479SSaeed Bishara 
10101ef48a26SThomas Petazzoni 	dma_async_device_unregister(&mv_chan->dmadev);
1011ff7b0479SSaeed Bishara 
1012b503fa01SThomas Petazzoni 	dma_free_coherent(dev, MV_XOR_POOL_SIZE,
10131ef48a26SThomas Petazzoni 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
101422843545SLior Amsalem 	dma_unmap_single(dev, mv_chan->dummy_src_addr,
101522843545SLior Amsalem 			 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
101622843545SLior Amsalem 	dma_unmap_single(dev, mv_chan->dummy_dst_addr,
101722843545SLior Amsalem 			 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1018ff7b0479SSaeed Bishara 
10191ef48a26SThomas Petazzoni 	list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1020ff7b0479SSaeed Bishara 				 device_node) {
1021ff7b0479SSaeed Bishara 		list_del(&chan->device_node);
1022ff7b0479SSaeed Bishara 	}
1023ff7b0479SSaeed Bishara 
102488eb92cbSThomas Petazzoni 	free_irq(mv_chan->irq, mv_chan);
102588eb92cbSThomas Petazzoni 
1026ff7b0479SSaeed Bishara 	return 0;
1027ff7b0479SSaeed Bishara }
1028ff7b0479SSaeed Bishara 
10291ef48a26SThomas Petazzoni static struct mv_xor_chan *
1030297eedbaSThomas Petazzoni mv_xor_channel_add(struct mv_xor_device *xordev,
1031a6b4a9d2SThomas Petazzoni 		   struct platform_device *pdev,
1032dd130c65SGregory CLEMENT 		   int idx, dma_cap_mask_t cap_mask, int irq)
1033ff7b0479SSaeed Bishara {
1034ff7b0479SSaeed Bishara 	int ret = 0;
1035ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan;
1036ff7b0479SSaeed Bishara 	struct dma_device *dma_dev;
1037ff7b0479SSaeed Bishara 
10381ef48a26SThomas Petazzoni 	mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1039a577659fSSachin Kamat 	if (!mv_chan)
1040a577659fSSachin Kamat 		return ERR_PTR(-ENOMEM);
1041ff7b0479SSaeed Bishara 
10429aedbdbaSThomas Petazzoni 	mv_chan->idx = idx;
104388eb92cbSThomas Petazzoni 	mv_chan->irq = irq;
1044dd130c65SGregory CLEMENT 	if (xordev->xor_type == XOR_ORION)
1045dd130c65SGregory CLEMENT 		mv_chan->op_in_desc = XOR_MODE_IN_REG;
1046dd130c65SGregory CLEMENT 	else
1047dd130c65SGregory CLEMENT 		mv_chan->op_in_desc = XOR_MODE_IN_DESC;
1048ff7b0479SSaeed Bishara 
10491ef48a26SThomas Petazzoni 	dma_dev = &mv_chan->dmadev;
1050*77ff7a70SStefan Roese 	mv_chan->xordev = xordev;
1051ff7b0479SSaeed Bishara 
105222843545SLior Amsalem 	/*
105322843545SLior Amsalem 	 * These source and destination dummy buffers are used to implement
105422843545SLior Amsalem 	 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
105522843545SLior Amsalem 	 * Hence, we only need to map the buffers at initialization-time.
105622843545SLior Amsalem 	 */
105722843545SLior Amsalem 	mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
105822843545SLior Amsalem 		mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
105922843545SLior Amsalem 	mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
106022843545SLior Amsalem 		mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
106122843545SLior Amsalem 
1062ff7b0479SSaeed Bishara 	/* allocate coherent memory for hardware descriptors
1063ff7b0479SSaeed Bishara 	 * note: writecombine gives slightly better performance, but
1064ff7b0479SSaeed Bishara 	 * requires that we explicitly flush the writes
1065ff7b0479SSaeed Bishara 	 */
10661ef48a26SThomas Petazzoni 	mv_chan->dma_desc_pool_virt =
1067f6e45661SLuis R. Rodriguez 	  dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
1068f6e45661SLuis R. Rodriguez 		       GFP_KERNEL);
10691ef48a26SThomas Petazzoni 	if (!mv_chan->dma_desc_pool_virt)
1070a6b4a9d2SThomas Petazzoni 		return ERR_PTR(-ENOMEM);
1071ff7b0479SSaeed Bishara 
1072ff7b0479SSaeed Bishara 	/* discover transaction capabilites from the platform data */
1073a6b4a9d2SThomas Petazzoni 	dma_dev->cap_mask = cap_mask;
1074ff7b0479SSaeed Bishara 
1075ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&dma_dev->channels);
1076ff7b0479SSaeed Bishara 
1077ff7b0479SSaeed Bishara 	/* set base routines */
1078ff7b0479SSaeed Bishara 	dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1079ff7b0479SSaeed Bishara 	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
108007934481SLinus Walleij 	dma_dev->device_tx_status = mv_xor_status;
1081ff7b0479SSaeed Bishara 	dma_dev->device_issue_pending = mv_xor_issue_pending;
1082ff7b0479SSaeed Bishara 	dma_dev->dev = &pdev->dev;
1083ff7b0479SSaeed Bishara 
1084ff7b0479SSaeed Bishara 	/* set prep routines based on capability */
108522843545SLior Amsalem 	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
108622843545SLior Amsalem 		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
1087ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1088ff7b0479SSaeed Bishara 		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1089ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1090c019894eSJoe Perches 		dma_dev->max_xor = 8;
1091ff7b0479SSaeed Bishara 		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1092ff7b0479SSaeed Bishara 	}
1093ff7b0479SSaeed Bishara 
1094297eedbaSThomas Petazzoni 	mv_chan->mmr_base = xordev->xor_base;
109582a1402eSEzequiel Garcia 	mv_chan->mmr_high_base = xordev->xor_high_base;
1096ff7b0479SSaeed Bishara 	tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1097ff7b0479SSaeed Bishara 		     mv_chan);
1098ff7b0479SSaeed Bishara 
1099ff7b0479SSaeed Bishara 	/* clear errors before enabling interrupts */
11000951e728SMaxime Ripard 	mv_chan_clear_err_status(mv_chan);
1101ff7b0479SSaeed Bishara 
11022d0a0745SThomas Petazzoni 	ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1103ff7b0479SSaeed Bishara 			  0, dev_name(&pdev->dev), mv_chan);
1104ff7b0479SSaeed Bishara 	if (ret)
1105ff7b0479SSaeed Bishara 		goto err_free_dma;
1106ff7b0479SSaeed Bishara 
1107ff7b0479SSaeed Bishara 	mv_chan_unmask_interrupts(mv_chan);
1108ff7b0479SSaeed Bishara 
11096f166312SLior Amsalem 	if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
111081aafb3eSThomas Petazzoni 		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
11116f166312SLior Amsalem 	else
111281aafb3eSThomas Petazzoni 		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
1113ff7b0479SSaeed Bishara 
1114ff7b0479SSaeed Bishara 	spin_lock_init(&mv_chan->lock);
1115ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&mv_chan->chain);
1116ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&mv_chan->completed_slots);
1117fbea28a2SLior Amsalem 	INIT_LIST_HEAD(&mv_chan->free_slots);
1118fbea28a2SLior Amsalem 	INIT_LIST_HEAD(&mv_chan->allocated_slots);
111998817b99SThomas Petazzoni 	mv_chan->dmachan.device = dma_dev;
112098817b99SThomas Petazzoni 	dma_cookie_init(&mv_chan->dmachan);
1121ff7b0479SSaeed Bishara 
112298817b99SThomas Petazzoni 	list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1123ff7b0479SSaeed Bishara 
1124ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
11250951e728SMaxime Ripard 		ret = mv_chan_memcpy_self_test(mv_chan);
1126ff7b0479SSaeed Bishara 		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1127ff7b0479SSaeed Bishara 		if (ret)
11282d0a0745SThomas Petazzoni 			goto err_free_irq;
1129ff7b0479SSaeed Bishara 	}
1130ff7b0479SSaeed Bishara 
1131ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
11320951e728SMaxime Ripard 		ret = mv_chan_xor_self_test(mv_chan);
1133ff7b0479SSaeed Bishara 		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1134ff7b0479SSaeed Bishara 		if (ret)
11352d0a0745SThomas Petazzoni 			goto err_free_irq;
1136ff7b0479SSaeed Bishara 	}
1137ff7b0479SSaeed Bishara 
11386f166312SLior Amsalem 	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
11396f166312SLior Amsalem 		 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1140ff7b0479SSaeed Bishara 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1141ff7b0479SSaeed Bishara 		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1142ff7b0479SSaeed Bishara 		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1143ff7b0479SSaeed Bishara 
1144ff7b0479SSaeed Bishara 	dma_async_device_register(dma_dev);
11451ef48a26SThomas Petazzoni 	return mv_chan;
1146ff7b0479SSaeed Bishara 
11472d0a0745SThomas Petazzoni err_free_irq:
11482d0a0745SThomas Petazzoni 	free_irq(mv_chan->irq, mv_chan);
1149ff7b0479SSaeed Bishara err_free_dma:
1150b503fa01SThomas Petazzoni 	dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
11511ef48a26SThomas Petazzoni 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1152a6b4a9d2SThomas Petazzoni 	return ERR_PTR(ret);
1153ff7b0479SSaeed Bishara }
1154ff7b0479SSaeed Bishara 
1155ff7b0479SSaeed Bishara static void
1156297eedbaSThomas Petazzoni mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
115763a9332bSAndrew Lunn 			 const struct mbus_dram_target_info *dram)
1158ff7b0479SSaeed Bishara {
115982a1402eSEzequiel Garcia 	void __iomem *base = xordev->xor_high_base;
1160ff7b0479SSaeed Bishara 	u32 win_enable = 0;
1161ff7b0479SSaeed Bishara 	int i;
1162ff7b0479SSaeed Bishara 
1163ff7b0479SSaeed Bishara 	for (i = 0; i < 8; i++) {
1164ff7b0479SSaeed Bishara 		writel(0, base + WINDOW_BASE(i));
1165ff7b0479SSaeed Bishara 		writel(0, base + WINDOW_SIZE(i));
1166ff7b0479SSaeed Bishara 		if (i < 4)
1167ff7b0479SSaeed Bishara 			writel(0, base + WINDOW_REMAP_HIGH(i));
1168ff7b0479SSaeed Bishara 	}
1169ff7b0479SSaeed Bishara 
1170ff7b0479SSaeed Bishara 	for (i = 0; i < dram->num_cs; i++) {
117163a9332bSAndrew Lunn 		const struct mbus_dram_window *cs = dram->cs + i;
1172ff7b0479SSaeed Bishara 
1173ff7b0479SSaeed Bishara 		writel((cs->base & 0xffff0000) |
1174ff7b0479SSaeed Bishara 		       (cs->mbus_attr << 8) |
1175ff7b0479SSaeed Bishara 		       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1176ff7b0479SSaeed Bishara 		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1177ff7b0479SSaeed Bishara 
1178*77ff7a70SStefan Roese 		/* Fill the caching variables for later use */
1179*77ff7a70SStefan Roese 		xordev->win_start[i] = cs->base;
1180*77ff7a70SStefan Roese 		xordev->win_end[i] = cs->base + cs->size - 1;
1181*77ff7a70SStefan Roese 
1182ff7b0479SSaeed Bishara 		win_enable |= (1 << i);
1183ff7b0479SSaeed Bishara 		win_enable |= 3 << (16 + (2 * i));
1184ff7b0479SSaeed Bishara 	}
1185ff7b0479SSaeed Bishara 
1186ff7b0479SSaeed Bishara 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1187ff7b0479SSaeed Bishara 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1188c4b4b732SThomas Petazzoni 	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1189c4b4b732SThomas Petazzoni 	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1190ff7b0479SSaeed Bishara }
1191ff7b0479SSaeed Bishara 
1192ac5f0f3fSMarcin Wojtas static void
1193ac5f0f3fSMarcin Wojtas mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
1194ac5f0f3fSMarcin Wojtas {
1195ac5f0f3fSMarcin Wojtas 	void __iomem *base = xordev->xor_high_base;
1196ac5f0f3fSMarcin Wojtas 	u32 win_enable = 0;
1197ac5f0f3fSMarcin Wojtas 	int i;
1198ac5f0f3fSMarcin Wojtas 
1199ac5f0f3fSMarcin Wojtas 	for (i = 0; i < 8; i++) {
1200ac5f0f3fSMarcin Wojtas 		writel(0, base + WINDOW_BASE(i));
1201ac5f0f3fSMarcin Wojtas 		writel(0, base + WINDOW_SIZE(i));
1202ac5f0f3fSMarcin Wojtas 		if (i < 4)
1203ac5f0f3fSMarcin Wojtas 			writel(0, base + WINDOW_REMAP_HIGH(i));
1204ac5f0f3fSMarcin Wojtas 	}
1205ac5f0f3fSMarcin Wojtas 	/*
1206ac5f0f3fSMarcin Wojtas 	 * For Armada3700 open default 4GB Mbus window. The dram
1207ac5f0f3fSMarcin Wojtas 	 * related configuration are done at AXIS level.
1208ac5f0f3fSMarcin Wojtas 	 */
1209ac5f0f3fSMarcin Wojtas 	writel(0xffff0000, base + WINDOW_SIZE(0));
1210ac5f0f3fSMarcin Wojtas 	win_enable |= 1;
1211ac5f0f3fSMarcin Wojtas 	win_enable |= 3 << 16;
1212ac5f0f3fSMarcin Wojtas 
1213ac5f0f3fSMarcin Wojtas 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1214ac5f0f3fSMarcin Wojtas 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1215ac5f0f3fSMarcin Wojtas 	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1216ac5f0f3fSMarcin Wojtas 	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1217ac5f0f3fSMarcin Wojtas }
1218ac5f0f3fSMarcin Wojtas 
12198b648436SThomas Petazzoni /*
12208b648436SThomas Petazzoni  * Since this XOR driver is basically used only for RAID5, we don't
12218b648436SThomas Petazzoni  * need to care about synchronizing ->suspend with DMA activity,
12228b648436SThomas Petazzoni  * because the DMA engine will naturally be quiet due to the block
12238b648436SThomas Petazzoni  * devices being suspended.
12248b648436SThomas Petazzoni  */
12258b648436SThomas Petazzoni static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
12268b648436SThomas Petazzoni {
12278b648436SThomas Petazzoni 	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
12288b648436SThomas Petazzoni 	int i;
12298b648436SThomas Petazzoni 
12308b648436SThomas Petazzoni 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
12318b648436SThomas Petazzoni 		struct mv_xor_chan *mv_chan = xordev->channels[i];
12328b648436SThomas Petazzoni 
12338b648436SThomas Petazzoni 		if (!mv_chan)
12348b648436SThomas Petazzoni 			continue;
12358b648436SThomas Petazzoni 
12368b648436SThomas Petazzoni 		mv_chan->saved_config_reg =
12378b648436SThomas Petazzoni 			readl_relaxed(XOR_CONFIG(mv_chan));
12388b648436SThomas Petazzoni 		mv_chan->saved_int_mask_reg =
12398b648436SThomas Petazzoni 			readl_relaxed(XOR_INTR_MASK(mv_chan));
12408b648436SThomas Petazzoni 	}
12418b648436SThomas Petazzoni 
12428b648436SThomas Petazzoni 	return 0;
12438b648436SThomas Petazzoni }
12448b648436SThomas Petazzoni 
12458b648436SThomas Petazzoni static int mv_xor_resume(struct platform_device *dev)
12468b648436SThomas Petazzoni {
12478b648436SThomas Petazzoni 	struct mv_xor_device *xordev = platform_get_drvdata(dev);
12488b648436SThomas Petazzoni 	const struct mbus_dram_target_info *dram;
12498b648436SThomas Petazzoni 	int i;
12508b648436SThomas Petazzoni 
12518b648436SThomas Petazzoni 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
12528b648436SThomas Petazzoni 		struct mv_xor_chan *mv_chan = xordev->channels[i];
12538b648436SThomas Petazzoni 
12548b648436SThomas Petazzoni 		if (!mv_chan)
12558b648436SThomas Petazzoni 			continue;
12568b648436SThomas Petazzoni 
12578b648436SThomas Petazzoni 		writel_relaxed(mv_chan->saved_config_reg,
12588b648436SThomas Petazzoni 			       XOR_CONFIG(mv_chan));
12598b648436SThomas Petazzoni 		writel_relaxed(mv_chan->saved_int_mask_reg,
12608b648436SThomas Petazzoni 			       XOR_INTR_MASK(mv_chan));
12618b648436SThomas Petazzoni 	}
12628b648436SThomas Petazzoni 
1263ac5f0f3fSMarcin Wojtas 	if (xordev->xor_type == XOR_ARMADA_37XX) {
1264ac5f0f3fSMarcin Wojtas 		mv_xor_conf_mbus_windows_a3700(xordev);
1265ac5f0f3fSMarcin Wojtas 		return 0;
1266ac5f0f3fSMarcin Wojtas 	}
1267ac5f0f3fSMarcin Wojtas 
12688b648436SThomas Petazzoni 	dram = mv_mbus_dram_info();
12698b648436SThomas Petazzoni 	if (dram)
12708b648436SThomas Petazzoni 		mv_xor_conf_mbus_windows(xordev, dram);
12718b648436SThomas Petazzoni 
12728b648436SThomas Petazzoni 	return 0;
12738b648436SThomas Petazzoni }
12748b648436SThomas Petazzoni 
12756f166312SLior Amsalem static const struct of_device_id mv_xor_dt_ids[] = {
1276dd130c65SGregory CLEMENT 	{ .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
1277dd130c65SGregory CLEMENT 	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
1278ac5f0f3fSMarcin Wojtas 	{ .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
12796f166312SLior Amsalem 	{},
12806f166312SLior Amsalem };
12816f166312SLior Amsalem 
128277757291SThomas Petazzoni static unsigned int mv_xor_engine_count;
1283ff7b0479SSaeed Bishara 
1284c2714334SLinus Torvalds static int mv_xor_probe(struct platform_device *pdev)
1285ff7b0479SSaeed Bishara {
128663a9332bSAndrew Lunn 	const struct mbus_dram_target_info *dram;
1287297eedbaSThomas Petazzoni 	struct mv_xor_device *xordev;
1288d4adcc01SJingoo Han 	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1289ff7b0479SSaeed Bishara 	struct resource *res;
129077757291SThomas Petazzoni 	unsigned int max_engines, max_channels;
129160d151f3SThomas Petazzoni 	int i, ret;
1292ff7b0479SSaeed Bishara 
12931ba151cdSJoe Perches 	dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1294ff7b0479SSaeed Bishara 
1295297eedbaSThomas Petazzoni 	xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1296297eedbaSThomas Petazzoni 	if (!xordev)
1297ff7b0479SSaeed Bishara 		return -ENOMEM;
1298ff7b0479SSaeed Bishara 
1299ff7b0479SSaeed Bishara 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1300ff7b0479SSaeed Bishara 	if (!res)
1301ff7b0479SSaeed Bishara 		return -ENODEV;
1302ff7b0479SSaeed Bishara 
1303297eedbaSThomas Petazzoni 	xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
13044de1ba15SH Hartley Sweeten 					resource_size(res));
1305297eedbaSThomas Petazzoni 	if (!xordev->xor_base)
1306ff7b0479SSaeed Bishara 		return -EBUSY;
1307ff7b0479SSaeed Bishara 
1308ff7b0479SSaeed Bishara 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1309ff7b0479SSaeed Bishara 	if (!res)
1310ff7b0479SSaeed Bishara 		return -ENODEV;
1311ff7b0479SSaeed Bishara 
1312297eedbaSThomas Petazzoni 	xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
13134de1ba15SH Hartley Sweeten 					     resource_size(res));
1314297eedbaSThomas Petazzoni 	if (!xordev->xor_high_base)
1315ff7b0479SSaeed Bishara 		return -EBUSY;
1316ff7b0479SSaeed Bishara 
1317297eedbaSThomas Petazzoni 	platform_set_drvdata(pdev, xordev);
1318ff7b0479SSaeed Bishara 
1319dd130c65SGregory CLEMENT 
1320dd130c65SGregory CLEMENT 	/*
1321dd130c65SGregory CLEMENT 	 * We need to know which type of XOR device we use before
1322dd130c65SGregory CLEMENT 	 * setting up. In non-dt case it can only be the legacy one.
1323dd130c65SGregory CLEMENT 	 */
1324dd130c65SGregory CLEMENT 	xordev->xor_type = XOR_ORION;
1325dd130c65SGregory CLEMENT 	if (pdev->dev.of_node) {
1326dd130c65SGregory CLEMENT 		const struct of_device_id *of_id =
1327dd130c65SGregory CLEMENT 			of_match_device(mv_xor_dt_ids,
1328dd130c65SGregory CLEMENT 					&pdev->dev);
1329dd130c65SGregory CLEMENT 
1330dd130c65SGregory CLEMENT 		xordev->xor_type = (uintptr_t)of_id->data;
1331dd130c65SGregory CLEMENT 	}
1332dd130c65SGregory CLEMENT 
1333ff7b0479SSaeed Bishara 	/*
1334ff7b0479SSaeed Bishara 	 * (Re-)program MBUS remapping windows if we are asked to.
1335ff7b0479SSaeed Bishara 	 */
1336ac5f0f3fSMarcin Wojtas 	if (xordev->xor_type == XOR_ARMADA_37XX) {
1337ac5f0f3fSMarcin Wojtas 		mv_xor_conf_mbus_windows_a3700(xordev);
1338ac5f0f3fSMarcin Wojtas 	} else {
133963a9332bSAndrew Lunn 		dram = mv_mbus_dram_info();
134063a9332bSAndrew Lunn 		if (dram)
1341297eedbaSThomas Petazzoni 			mv_xor_conf_mbus_windows(xordev, dram);
1342ac5f0f3fSMarcin Wojtas 	}
1343ff7b0479SSaeed Bishara 
1344c510182bSAndrew Lunn 	/* Not all platforms can gate the clock, so it is not
1345c510182bSAndrew Lunn 	 * an error if the clock does not exists.
1346c510182bSAndrew Lunn 	 */
1347297eedbaSThomas Petazzoni 	xordev->clk = clk_get(&pdev->dev, NULL);
1348297eedbaSThomas Petazzoni 	if (!IS_ERR(xordev->clk))
1349297eedbaSThomas Petazzoni 		clk_prepare_enable(xordev->clk);
1350c510182bSAndrew Lunn 
135177757291SThomas Petazzoni 	/*
135277757291SThomas Petazzoni 	 * We don't want to have more than one channel per CPU in
135377757291SThomas Petazzoni 	 * order for async_tx to perform well. So we limit the number
135477757291SThomas Petazzoni 	 * of engines and channels so that we take into account this
135577757291SThomas Petazzoni 	 * constraint. Note that we also want to use channels from
1356ac5f0f3fSMarcin Wojtas 	 * separate engines when possible.  For dual-CPU Armada 3700
1357ac5f0f3fSMarcin Wojtas 	 * SoC with single XOR engine allow using its both channels.
135877757291SThomas Petazzoni 	 */
135977757291SThomas Petazzoni 	max_engines = num_present_cpus();
1360ac5f0f3fSMarcin Wojtas 	if (xordev->xor_type == XOR_ARMADA_37XX)
1361ac5f0f3fSMarcin Wojtas 		max_channels =	num_present_cpus();
1362ac5f0f3fSMarcin Wojtas 	else
136377757291SThomas Petazzoni 		max_channels = min_t(unsigned int,
136477757291SThomas Petazzoni 				     MV_XOR_MAX_CHANNELS,
136577757291SThomas Petazzoni 				     DIV_ROUND_UP(num_present_cpus(), 2));
136677757291SThomas Petazzoni 
136777757291SThomas Petazzoni 	if (mv_xor_engine_count >= max_engines)
136877757291SThomas Petazzoni 		return 0;
136977757291SThomas Petazzoni 
1370f7d12ef5SThomas Petazzoni 	if (pdev->dev.of_node) {
1371f7d12ef5SThomas Petazzoni 		struct device_node *np;
1372f7d12ef5SThomas Petazzoni 		int i = 0;
1373f7d12ef5SThomas Petazzoni 
1374f7d12ef5SThomas Petazzoni 		for_each_child_of_node(pdev->dev.of_node, np) {
13750be8253fSRussell King 			struct mv_xor_chan *chan;
1376f7d12ef5SThomas Petazzoni 			dma_cap_mask_t cap_mask;
1377f7d12ef5SThomas Petazzoni 			int irq;
1378f7d12ef5SThomas Petazzoni 
137977757291SThomas Petazzoni 			if (i >= max_channels)
138077757291SThomas Petazzoni 				continue;
138177757291SThomas Petazzoni 
1382f7d12ef5SThomas Petazzoni 			dma_cap_zero(cap_mask);
1383f7d12ef5SThomas Petazzoni 			dma_cap_set(DMA_MEMCPY, cap_mask);
1384f7d12ef5SThomas Petazzoni 			dma_cap_set(DMA_XOR, cap_mask);
1385f7d12ef5SThomas Petazzoni 			dma_cap_set(DMA_INTERRUPT, cap_mask);
1386f7d12ef5SThomas Petazzoni 
1387f7d12ef5SThomas Petazzoni 			irq = irq_of_parse_and_map(np, 0);
1388f8eb9e7dSThomas Petazzoni 			if (!irq) {
1389f8eb9e7dSThomas Petazzoni 				ret = -ENODEV;
1390f7d12ef5SThomas Petazzoni 				goto err_channel_add;
1391f7d12ef5SThomas Petazzoni 			}
1392f7d12ef5SThomas Petazzoni 
13930be8253fSRussell King 			chan = mv_xor_channel_add(xordev, pdev, i,
1394dd130c65SGregory CLEMENT 						  cap_mask, irq);
13950be8253fSRussell King 			if (IS_ERR(chan)) {
13960be8253fSRussell King 				ret = PTR_ERR(chan);
1397f7d12ef5SThomas Petazzoni 				irq_dispose_mapping(irq);
1398f7d12ef5SThomas Petazzoni 				goto err_channel_add;
1399f7d12ef5SThomas Petazzoni 			}
1400f7d12ef5SThomas Petazzoni 
14010be8253fSRussell King 			xordev->channels[i] = chan;
1402f7d12ef5SThomas Petazzoni 			i++;
1403f7d12ef5SThomas Petazzoni 		}
1404f7d12ef5SThomas Petazzoni 	} else if (pdata && pdata->channels) {
140577757291SThomas Petazzoni 		for (i = 0; i < max_channels; i++) {
1406e39f6ec1SThomas Petazzoni 			struct mv_xor_channel_data *cd;
14070be8253fSRussell King 			struct mv_xor_chan *chan;
140860d151f3SThomas Petazzoni 			int irq;
140960d151f3SThomas Petazzoni 
141060d151f3SThomas Petazzoni 			cd = &pdata->channels[i];
141160d151f3SThomas Petazzoni 			if (!cd) {
141260d151f3SThomas Petazzoni 				ret = -ENODEV;
141360d151f3SThomas Petazzoni 				goto err_channel_add;
141460d151f3SThomas Petazzoni 			}
141560d151f3SThomas Petazzoni 
141660d151f3SThomas Petazzoni 			irq = platform_get_irq(pdev, i);
141760d151f3SThomas Petazzoni 			if (irq < 0) {
141860d151f3SThomas Petazzoni 				ret = irq;
141960d151f3SThomas Petazzoni 				goto err_channel_add;
142060d151f3SThomas Petazzoni 			}
142160d151f3SThomas Petazzoni 
14220be8253fSRussell King 			chan = mv_xor_channel_add(xordev, pdev, i,
1423dd130c65SGregory CLEMENT 						  cd->cap_mask, irq);
14240be8253fSRussell King 			if (IS_ERR(chan)) {
14250be8253fSRussell King 				ret = PTR_ERR(chan);
142660d151f3SThomas Petazzoni 				goto err_channel_add;
142760d151f3SThomas Petazzoni 			}
14280be8253fSRussell King 
14290be8253fSRussell King 			xordev->channels[i] = chan;
143060d151f3SThomas Petazzoni 		}
143160d151f3SThomas Petazzoni 	}
143260d151f3SThomas Petazzoni 
1433ff7b0479SSaeed Bishara 	return 0;
143460d151f3SThomas Petazzoni 
143560d151f3SThomas Petazzoni err_channel_add:
143660d151f3SThomas Petazzoni 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1437f7d12ef5SThomas Petazzoni 		if (xordev->channels[i]) {
1438ab6e439fSThomas Petazzoni 			mv_xor_channel_remove(xordev->channels[i]);
1439f7d12ef5SThomas Petazzoni 			if (pdev->dev.of_node)
1440f7d12ef5SThomas Petazzoni 				irq_dispose_mapping(xordev->channels[i]->irq);
1441f7d12ef5SThomas Petazzoni 		}
144260d151f3SThomas Petazzoni 
1443dab92064SThomas Petazzoni 	if (!IS_ERR(xordev->clk)) {
1444297eedbaSThomas Petazzoni 		clk_disable_unprepare(xordev->clk);
1445297eedbaSThomas Petazzoni 		clk_put(xordev->clk);
1446dab92064SThomas Petazzoni 	}
1447dab92064SThomas Petazzoni 
144860d151f3SThomas Petazzoni 	return ret;
1449ff7b0479SSaeed Bishara }
1450ff7b0479SSaeed Bishara 
1451ff7b0479SSaeed Bishara static struct platform_driver mv_xor_driver = {
1452ff7b0479SSaeed Bishara 	.probe		= mv_xor_probe,
14538b648436SThomas Petazzoni 	.suspend        = mv_xor_suspend,
14548b648436SThomas Petazzoni 	.resume         = mv_xor_resume,
1455ff7b0479SSaeed Bishara 	.driver		= {
1456ff7b0479SSaeed Bishara 		.name	        = MV_XOR_NAME,
1457f7d12ef5SThomas Petazzoni 		.of_match_table = of_match_ptr(mv_xor_dt_ids),
1458ff7b0479SSaeed Bishara 	},
1459ff7b0479SSaeed Bishara };
1460ff7b0479SSaeed Bishara 
1461ff7b0479SSaeed Bishara 
1462ff7b0479SSaeed Bishara static int __init mv_xor_init(void)
1463ff7b0479SSaeed Bishara {
146461971656SThomas Petazzoni 	return platform_driver_register(&mv_xor_driver);
1465ff7b0479SSaeed Bishara }
146625cf68daSPaul Gortmaker device_initcall(mv_xor_init);
1467ff7b0479SSaeed Bishara 
146825cf68daSPaul Gortmaker /*
1469ff7b0479SSaeed Bishara MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1470ff7b0479SSaeed Bishara MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1471ff7b0479SSaeed Bishara MODULE_LICENSE("GPL");
147225cf68daSPaul Gortmaker */
1473