xref: /openbmc/linux/drivers/dma/mv_xor.c (revision 890bcd49)
12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2ff7b0479SSaeed Bishara /*
3ff7b0479SSaeed Bishara  * offload engine driver for the Marvell XOR engine
4ff7b0479SSaeed Bishara  * Copyright (C) 2007, 2008, Marvell International Ltd.
5ff7b0479SSaeed Bishara  */
6ff7b0479SSaeed Bishara 
7ff7b0479SSaeed Bishara #include <linux/init.h>
85a0e3ad6STejun Heo #include <linux/slab.h>
9ff7b0479SSaeed Bishara #include <linux/delay.h>
10ff7b0479SSaeed Bishara #include <linux/dma-mapping.h>
11ff7b0479SSaeed Bishara #include <linux/spinlock.h>
12ff7b0479SSaeed Bishara #include <linux/interrupt.h>
136f166312SLior Amsalem #include <linux/of_device.h>
14ff7b0479SSaeed Bishara #include <linux/platform_device.h>
15ff7b0479SSaeed Bishara #include <linux/memory.h>
16c510182bSAndrew Lunn #include <linux/clk.h>
17f7d12ef5SThomas Petazzoni #include <linux/of.h>
18f7d12ef5SThomas Petazzoni #include <linux/of_irq.h>
19f7d12ef5SThomas Petazzoni #include <linux/irqdomain.h>
2077757291SThomas Petazzoni #include <linux/cpumask.h>
21c02cecb9SArnd Bergmann #include <linux/platform_data/dma-mv_xor.h>
22d2ebfb33SRussell King - ARM Linux 
23d2ebfb33SRussell King - ARM Linux #include "dmaengine.h"
24ff7b0479SSaeed Bishara #include "mv_xor.h"
25ff7b0479SSaeed Bishara 
26dd130c65SGregory CLEMENT enum mv_xor_type {
27dd130c65SGregory CLEMENT 	XOR_ORION,
28dd130c65SGregory CLEMENT 	XOR_ARMADA_38X,
29ac5f0f3fSMarcin Wojtas 	XOR_ARMADA_37XX,
30dd130c65SGregory CLEMENT };
31dd130c65SGregory CLEMENT 
326f166312SLior Amsalem enum mv_xor_mode {
336f166312SLior Amsalem 	XOR_MODE_IN_REG,
346f166312SLior Amsalem 	XOR_MODE_IN_DESC,
356f166312SLior Amsalem };
366f166312SLior Amsalem 
37ff7b0479SSaeed Bishara static void mv_xor_issue_pending(struct dma_chan *chan);
38ff7b0479SSaeed Bishara 
39ff7b0479SSaeed Bishara #define to_mv_xor_chan(chan)		\
4098817b99SThomas Petazzoni 	container_of(chan, struct mv_xor_chan, dmachan)
41ff7b0479SSaeed Bishara 
42ff7b0479SSaeed Bishara #define to_mv_xor_slot(tx)		\
43ff7b0479SSaeed Bishara 	container_of(tx, struct mv_xor_desc_slot, async_tx)
44ff7b0479SSaeed Bishara 
45c98c1781SThomas Petazzoni #define mv_chan_to_devp(chan)           \
461ef48a26SThomas Petazzoni 	((chan)->dmadev.dev)
47c98c1781SThomas Petazzoni 
mv_desc_init(struct mv_xor_desc_slot * desc,dma_addr_t addr,u32 byte_count,enum dma_ctrl_flags flags)48dfc97661SLior Amsalem static void mv_desc_init(struct mv_xor_desc_slot *desc,
49ba87d137SLior Amsalem 			 dma_addr_t addr, u32 byte_count,
50ba87d137SLior Amsalem 			 enum dma_ctrl_flags flags)
51ff7b0479SSaeed Bishara {
52ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
53ff7b0479SSaeed Bishara 
540e7488edSEzequiel Garcia 	hw_desc->status = XOR_DESC_DMA_OWNED;
55ff7b0479SSaeed Bishara 	hw_desc->phy_next_desc = 0;
56ba87d137SLior Amsalem 	/* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
57ba87d137SLior Amsalem 	hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
58ba87d137SLior Amsalem 				XOR_DESC_EOD_INT_EN : 0;
59dfc97661SLior Amsalem 	hw_desc->phy_dest_addr = addr;
60ff7b0479SSaeed Bishara 	hw_desc->byte_count = byte_count;
61ff7b0479SSaeed Bishara }
62ff7b0479SSaeed Bishara 
mv_desc_set_mode(struct mv_xor_desc_slot * desc)636f166312SLior Amsalem static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
646f166312SLior Amsalem {
656f166312SLior Amsalem 	struct mv_xor_desc *hw_desc = desc->hw_desc;
666f166312SLior Amsalem 
676f166312SLior Amsalem 	switch (desc->type) {
686f166312SLior Amsalem 	case DMA_XOR:
696f166312SLior Amsalem 	case DMA_INTERRUPT:
706f166312SLior Amsalem 		hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
716f166312SLior Amsalem 		break;
726f166312SLior Amsalem 	case DMA_MEMCPY:
736f166312SLior Amsalem 		hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
746f166312SLior Amsalem 		break;
756f166312SLior Amsalem 	default:
766f166312SLior Amsalem 		BUG();
776f166312SLior Amsalem 		return;
786f166312SLior Amsalem 	}
796f166312SLior Amsalem }
806f166312SLior Amsalem 
mv_desc_set_next_desc(struct mv_xor_desc_slot * desc,u32 next_desc_addr)81ff7b0479SSaeed Bishara static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
82ff7b0479SSaeed Bishara 				  u32 next_desc_addr)
83ff7b0479SSaeed Bishara {
84ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
85ff7b0479SSaeed Bishara 	BUG_ON(hw_desc->phy_next_desc);
86ff7b0479SSaeed Bishara 	hw_desc->phy_next_desc = next_desc_addr;
87ff7b0479SSaeed Bishara }
88ff7b0479SSaeed Bishara 
mv_desc_set_src_addr(struct mv_xor_desc_slot * desc,int index,dma_addr_t addr)89ff7b0479SSaeed Bishara static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
90ff7b0479SSaeed Bishara 				 int index, dma_addr_t addr)
91ff7b0479SSaeed Bishara {
92ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
93e03bc654SThomas Petazzoni 	hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
94ff7b0479SSaeed Bishara 	if (desc->type == DMA_XOR)
95ff7b0479SSaeed Bishara 		hw_desc->desc_command |= (1 << index);
96ff7b0479SSaeed Bishara }
97ff7b0479SSaeed Bishara 
mv_chan_get_current_desc(struct mv_xor_chan * chan)98ff7b0479SSaeed Bishara static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
99ff7b0479SSaeed Bishara {
1005733c38aSThomas Petazzoni 	return readl_relaxed(XOR_CURR_DESC(chan));
101ff7b0479SSaeed Bishara }
102ff7b0479SSaeed Bishara 
mv_chan_set_next_descriptor(struct mv_xor_chan * chan,u32 next_desc_addr)103ff7b0479SSaeed Bishara static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
104ff7b0479SSaeed Bishara 					u32 next_desc_addr)
105ff7b0479SSaeed Bishara {
1065733c38aSThomas Petazzoni 	writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
107ff7b0479SSaeed Bishara }
108ff7b0479SSaeed Bishara 
mv_chan_unmask_interrupts(struct mv_xor_chan * chan)109ff7b0479SSaeed Bishara static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
110ff7b0479SSaeed Bishara {
1115733c38aSThomas Petazzoni 	u32 val = readl_relaxed(XOR_INTR_MASK(chan));
112ff7b0479SSaeed Bishara 	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
1135733c38aSThomas Petazzoni 	writel_relaxed(val, XOR_INTR_MASK(chan));
114ff7b0479SSaeed Bishara }
115ff7b0479SSaeed Bishara 
mv_chan_get_intr_cause(struct mv_xor_chan * chan)116ff7b0479SSaeed Bishara static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
117ff7b0479SSaeed Bishara {
1185733c38aSThomas Petazzoni 	u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
119ff7b0479SSaeed Bishara 	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
120ff7b0479SSaeed Bishara 	return intr_cause;
121ff7b0479SSaeed Bishara }
122ff7b0479SSaeed Bishara 
mv_chan_clear_eoc_cause(struct mv_xor_chan * chan)1230951e728SMaxime Ripard static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
124ff7b0479SSaeed Bishara {
125ba87d137SLior Amsalem 	u32 val;
126ba87d137SLior Amsalem 
127ba87d137SLior Amsalem 	val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
128ba87d137SLior Amsalem 	val = ~(val << (chan->idx * 16));
129c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
1305733c38aSThomas Petazzoni 	writel_relaxed(val, XOR_INTR_CAUSE(chan));
131ff7b0479SSaeed Bishara }
132ff7b0479SSaeed Bishara 
mv_chan_clear_err_status(struct mv_xor_chan * chan)1330951e728SMaxime Ripard static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
134ff7b0479SSaeed Bishara {
135ff7b0479SSaeed Bishara 	u32 val = 0xFFFF0000 >> (chan->idx * 16);
1365733c38aSThomas Petazzoni 	writel_relaxed(val, XOR_INTR_CAUSE(chan));
137ff7b0479SSaeed Bishara }
138ff7b0479SSaeed Bishara 
mv_chan_set_mode(struct mv_xor_chan * chan,u32 op_mode)1390951e728SMaxime Ripard static void mv_chan_set_mode(struct mv_xor_chan *chan,
14081aafb3eSThomas Petazzoni 			     u32 op_mode)
141ff7b0479SSaeed Bishara {
1425733c38aSThomas Petazzoni 	u32 config = readl_relaxed(XOR_CONFIG(chan));
143ff7b0479SSaeed Bishara 
1446f166312SLior Amsalem 	config &= ~0x7;
1456f166312SLior Amsalem 	config |= op_mode;
1466f166312SLior Amsalem 
147e03bc654SThomas Petazzoni #if defined(__BIG_ENDIAN)
148e03bc654SThomas Petazzoni 	config |= XOR_DESCRIPTOR_SWAP;
149e03bc654SThomas Petazzoni #else
150e03bc654SThomas Petazzoni 	config &= ~XOR_DESCRIPTOR_SWAP;
151e03bc654SThomas Petazzoni #endif
152e03bc654SThomas Petazzoni 
1535733c38aSThomas Petazzoni 	writel_relaxed(config, XOR_CONFIG(chan));
154ff7b0479SSaeed Bishara }
155ff7b0479SSaeed Bishara 
mv_chan_activate(struct mv_xor_chan * chan)156ff7b0479SSaeed Bishara static void mv_chan_activate(struct mv_xor_chan *chan)
157ff7b0479SSaeed Bishara {
158c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
1595a9a55bfSEzequiel Garcia 
1605a9a55bfSEzequiel Garcia 	/* writel ensures all descriptors are flushed before activation */
1615a9a55bfSEzequiel Garcia 	writel(BIT(0), XOR_ACTIVATION(chan));
162ff7b0479SSaeed Bishara }
163ff7b0479SSaeed Bishara 
mv_chan_is_busy(struct mv_xor_chan * chan)164ff7b0479SSaeed Bishara static char mv_chan_is_busy(struct mv_xor_chan *chan)
165ff7b0479SSaeed Bishara {
1665733c38aSThomas Petazzoni 	u32 state = readl_relaxed(XOR_ACTIVATION(chan));
167ff7b0479SSaeed Bishara 
168ff7b0479SSaeed Bishara 	state = (state >> 4) & 0x3;
169ff7b0479SSaeed Bishara 
170ff7b0479SSaeed Bishara 	return (state == 1) ? 1 : 0;
171ff7b0479SSaeed Bishara }
172ff7b0479SSaeed Bishara 
173ff7b0479SSaeed Bishara /*
1740951e728SMaxime Ripard  * mv_chan_start_new_chain - program the engine to operate on new
1750951e728SMaxime Ripard  * chain headed by sw_desc
176ff7b0479SSaeed Bishara  * Caller must hold &mv_chan->lock while calling this function
177ff7b0479SSaeed Bishara  */
mv_chan_start_new_chain(struct mv_xor_chan * mv_chan,struct mv_xor_desc_slot * sw_desc)1780951e728SMaxime Ripard static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
179ff7b0479SSaeed Bishara 				    struct mv_xor_desc_slot *sw_desc)
180ff7b0479SSaeed Bishara {
181c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
182ff7b0479SSaeed Bishara 		__func__, __LINE__, sw_desc);
183ff7b0479SSaeed Bishara 
184ff7b0479SSaeed Bishara 	/* set the hardware chain */
185ff7b0479SSaeed Bishara 	mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
18648a9db46SBartlomiej Zolnierkiewicz 
187dfc97661SLior Amsalem 	mv_chan->pending++;
18898817b99SThomas Petazzoni 	mv_xor_issue_pending(&mv_chan->dmachan);
189ff7b0479SSaeed Bishara }
190ff7b0479SSaeed Bishara 
191ff7b0479SSaeed Bishara static dma_cookie_t
mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot * desc,struct mv_xor_chan * mv_chan,dma_cookie_t cookie)1920951e728SMaxime Ripard mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
1930951e728SMaxime Ripard 				struct mv_xor_chan *mv_chan,
1940951e728SMaxime Ripard 				dma_cookie_t cookie)
195ff7b0479SSaeed Bishara {
196ff7b0479SSaeed Bishara 	BUG_ON(desc->async_tx.cookie < 0);
197ff7b0479SSaeed Bishara 
198ff7b0479SSaeed Bishara 	if (desc->async_tx.cookie > 0) {
199ff7b0479SSaeed Bishara 		cookie = desc->async_tx.cookie;
200ff7b0479SSaeed Bishara 
2018058e258SDave Jiang 		dma_descriptor_unmap(&desc->async_tx);
202ff7b0479SSaeed Bishara 		/* call the callback (must not sleep or submit new
203ff7b0479SSaeed Bishara 		 * operations to this channel)
204ff7b0479SSaeed Bishara 		 */
205ee7681a4SDave Jiang 		dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
206ff7b0479SSaeed Bishara 	}
207ff7b0479SSaeed Bishara 
208ff7b0479SSaeed Bishara 	/* run dependent operations */
20907f2211eSDan Williams 	dma_run_dependencies(&desc->async_tx);
210ff7b0479SSaeed Bishara 
211ff7b0479SSaeed Bishara 	return cookie;
212ff7b0479SSaeed Bishara }
213ff7b0479SSaeed Bishara 
214ff7b0479SSaeed Bishara static int
mv_chan_clean_completed_slots(struct mv_xor_chan * mv_chan)2150951e728SMaxime Ripard mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
216ff7b0479SSaeed Bishara {
217ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
218ff7b0479SSaeed Bishara 
219c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
220ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
221fbea28a2SLior Amsalem 				 node) {
222ff7b0479SSaeed Bishara 
223c5db858bSStefan Roese 		if (async_tx_test_ack(&iter->async_tx)) {
224fbea28a2SLior Amsalem 			list_move_tail(&iter->node, &mv_chan->free_slots);
225c5db858bSStefan Roese 			if (!list_empty(&iter->sg_tx_list)) {
226c5db858bSStefan Roese 				list_splice_tail_init(&iter->sg_tx_list,
227c5db858bSStefan Roese 							&mv_chan->free_slots);
228c5db858bSStefan Roese 			}
229c5db858bSStefan Roese 		}
230ff7b0479SSaeed Bishara 	}
231ff7b0479SSaeed Bishara 	return 0;
232ff7b0479SSaeed Bishara }
233ff7b0479SSaeed Bishara 
234ff7b0479SSaeed Bishara static int
mv_desc_clean_slot(struct mv_xor_desc_slot * desc,struct mv_xor_chan * mv_chan)2350951e728SMaxime Ripard mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
236ff7b0479SSaeed Bishara 		   struct mv_xor_chan *mv_chan)
237ff7b0479SSaeed Bishara {
238c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
239ff7b0479SSaeed Bishara 		__func__, __LINE__, desc, desc->async_tx.flags);
240fbea28a2SLior Amsalem 
241ff7b0479SSaeed Bishara 	/* the client is allowed to attach dependent operations
242ff7b0479SSaeed Bishara 	 * until 'ack' is set
243ff7b0479SSaeed Bishara 	 */
244c5db858bSStefan Roese 	if (!async_tx_test_ack(&desc->async_tx)) {
245ff7b0479SSaeed Bishara 		/* move this slot to the completed_slots */
246fbea28a2SLior Amsalem 		list_move_tail(&desc->node, &mv_chan->completed_slots);
247c5db858bSStefan Roese 		if (!list_empty(&desc->sg_tx_list)) {
248c5db858bSStefan Roese 			list_splice_tail_init(&desc->sg_tx_list,
249c5db858bSStefan Roese 					      &mv_chan->completed_slots);
250c5db858bSStefan Roese 		}
251c5db858bSStefan Roese 	} else {
252fbea28a2SLior Amsalem 		list_move_tail(&desc->node, &mv_chan->free_slots);
253c5db858bSStefan Roese 		if (!list_empty(&desc->sg_tx_list)) {
254c5db858bSStefan Roese 			list_splice_tail_init(&desc->sg_tx_list,
255c5db858bSStefan Roese 					      &mv_chan->free_slots);
256c5db858bSStefan Roese 		}
257c5db858bSStefan Roese 	}
258ff7b0479SSaeed Bishara 
259ff7b0479SSaeed Bishara 	return 0;
260ff7b0479SSaeed Bishara }
261ff7b0479SSaeed Bishara 
262fbeec99aSEzequiel Garcia /* This function must be called with the mv_xor_chan spinlock held */
mv_chan_slot_cleanup(struct mv_xor_chan * mv_chan)2630951e728SMaxime Ripard static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
264ff7b0479SSaeed Bishara {
265ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
266ff7b0479SSaeed Bishara 	dma_cookie_t cookie = 0;
267ff7b0479SSaeed Bishara 	int busy = mv_chan_is_busy(mv_chan);
268ff7b0479SSaeed Bishara 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
2699136291fSLior Amsalem 	int current_cleaned = 0;
2709136291fSLior Amsalem 	struct mv_xor_desc *hw_desc;
271ff7b0479SSaeed Bishara 
272c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
273c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
2740951e728SMaxime Ripard 	mv_chan_clean_completed_slots(mv_chan);
275ff7b0479SSaeed Bishara 
276ff7b0479SSaeed Bishara 	/* free completed slots from the chain starting with
277ff7b0479SSaeed Bishara 	 * the oldest descriptor
278ff7b0479SSaeed Bishara 	 */
279ff7b0479SSaeed Bishara 
280ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
281fbea28a2SLior Amsalem 				 node) {
282ff7b0479SSaeed Bishara 
2839136291fSLior Amsalem 		/* clean finished descriptors */
2849136291fSLior Amsalem 		hw_desc = iter->hw_desc;
2859136291fSLior Amsalem 		if (hw_desc->status & XOR_DESC_SUCCESS) {
2860951e728SMaxime Ripard 			cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
2879136291fSLior Amsalem 								 cookie);
288ff7b0479SSaeed Bishara 
2899136291fSLior Amsalem 			/* done processing desc, clean slot */
2900951e728SMaxime Ripard 			mv_desc_clean_slot(iter, mv_chan);
2919136291fSLior Amsalem 
2929136291fSLior Amsalem 			/* break if we did cleaned the current */
293ff7b0479SSaeed Bishara 			if (iter->async_tx.phys == current_desc) {
2949136291fSLior Amsalem 				current_cleaned = 1;
295ff7b0479SSaeed Bishara 				break;
296ff7b0479SSaeed Bishara 			}
2979136291fSLior Amsalem 		} else {
2989136291fSLior Amsalem 			if (iter->async_tx.phys == current_desc) {
2999136291fSLior Amsalem 				current_cleaned = 0;
300ff7b0479SSaeed Bishara 				break;
301ff7b0479SSaeed Bishara 			}
3029136291fSLior Amsalem 		}
3039136291fSLior Amsalem 	}
304ff7b0479SSaeed Bishara 
305ff7b0479SSaeed Bishara 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
3069136291fSLior Amsalem 		if (current_cleaned) {
3079136291fSLior Amsalem 			/*
3089136291fSLior Amsalem 			 * current descriptor cleaned and removed, run
3099136291fSLior Amsalem 			 * from list head
3109136291fSLior Amsalem 			 */
3119136291fSLior Amsalem 			iter = list_entry(mv_chan->chain.next,
312ff7b0479SSaeed Bishara 					  struct mv_xor_desc_slot,
313fbea28a2SLior Amsalem 					  node);
3140951e728SMaxime Ripard 			mv_chan_start_new_chain(mv_chan, iter);
3159136291fSLior Amsalem 		} else {
316fbea28a2SLior Amsalem 			if (!list_is_last(&iter->node, &mv_chan->chain)) {
3179136291fSLior Amsalem 				/*
3189136291fSLior Amsalem 				 * descriptors are still waiting after
3199136291fSLior Amsalem 				 * current, trigger them
3209136291fSLior Amsalem 				 */
321fbea28a2SLior Amsalem 				iter = list_entry(iter->node.next,
3229136291fSLior Amsalem 						  struct mv_xor_desc_slot,
323fbea28a2SLior Amsalem 						  node);
3240951e728SMaxime Ripard 				mv_chan_start_new_chain(mv_chan, iter);
3259136291fSLior Amsalem 			} else {
3269136291fSLior Amsalem 				/*
3279136291fSLior Amsalem 				 * some descriptors are still waiting
3289136291fSLior Amsalem 				 * to be cleaned
3299136291fSLior Amsalem 				 */
3309136291fSLior Amsalem 				tasklet_schedule(&mv_chan->irq_tasklet);
3319136291fSLior Amsalem 			}
3329136291fSLior Amsalem 		}
333ff7b0479SSaeed Bishara 	}
334ff7b0479SSaeed Bishara 
335ff7b0479SSaeed Bishara 	if (cookie > 0)
33698817b99SThomas Petazzoni 		mv_chan->dmachan.completed_cookie = cookie;
337ff7b0479SSaeed Bishara }
338ff7b0479SSaeed Bishara 
mv_xor_tasklet(struct tasklet_struct * t)33934ca9a53SAllen Pais static void mv_xor_tasklet(struct tasklet_struct *t)
340ff7b0479SSaeed Bishara {
34134ca9a53SAllen Pais 	struct mv_xor_chan *chan = from_tasklet(chan, t, irq_tasklet);
342e43147acSEzequiel Garcia 
343cbc229a4SBarry Song 	spin_lock(&chan->lock);
3440951e728SMaxime Ripard 	mv_chan_slot_cleanup(chan);
345cbc229a4SBarry Song 	spin_unlock(&chan->lock);
346ff7b0479SSaeed Bishara }
347ff7b0479SSaeed Bishara 
348ff7b0479SSaeed Bishara static struct mv_xor_desc_slot *
mv_chan_alloc_slot(struct mv_xor_chan * mv_chan)3490951e728SMaxime Ripard mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
350ff7b0479SSaeed Bishara {
351fbea28a2SLior Amsalem 	struct mv_xor_desc_slot *iter;
352ff7b0479SSaeed Bishara 
353fbea28a2SLior Amsalem 	spin_lock_bh(&mv_chan->lock);
354fbea28a2SLior Amsalem 
355fbea28a2SLior Amsalem 	if (!list_empty(&mv_chan->free_slots)) {
356fbea28a2SLior Amsalem 		iter = list_first_entry(&mv_chan->free_slots,
357ff7b0479SSaeed Bishara 					struct mv_xor_desc_slot,
358fbea28a2SLior Amsalem 					node);
359ff7b0479SSaeed Bishara 
360fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->allocated_slots);
361dfc97661SLior Amsalem 
362fbea28a2SLior Amsalem 		spin_unlock_bh(&mv_chan->lock);
363ff7b0479SSaeed Bishara 
364dfc97661SLior Amsalem 		/* pre-ack descriptor */
365ff7b0479SSaeed Bishara 		async_tx_ack(&iter->async_tx);
366dfc97661SLior Amsalem 		iter->async_tx.cookie = -EBUSY;
367dfc97661SLior Amsalem 
368dfc97661SLior Amsalem 		return iter;
369dfc97661SLior Amsalem 
370ff7b0479SSaeed Bishara 	}
371fbea28a2SLior Amsalem 
372fbea28a2SLior Amsalem 	spin_unlock_bh(&mv_chan->lock);
373ff7b0479SSaeed Bishara 
374ff7b0479SSaeed Bishara 	/* try to free some slots if the allocation fails */
375ff7b0479SSaeed Bishara 	tasklet_schedule(&mv_chan->irq_tasklet);
376ff7b0479SSaeed Bishara 
377ff7b0479SSaeed Bishara 	return NULL;
378ff7b0479SSaeed Bishara }
379ff7b0479SSaeed Bishara 
380ff7b0479SSaeed Bishara /************************ DMA engine API functions ****************************/
381ff7b0479SSaeed Bishara static dma_cookie_t
mv_xor_tx_submit(struct dma_async_tx_descriptor * tx)382ff7b0479SSaeed Bishara mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
383ff7b0479SSaeed Bishara {
384ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
385ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
386dfc97661SLior Amsalem 	struct mv_xor_desc_slot *old_chain_tail;
387ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
388ff7b0479SSaeed Bishara 	int new_hw_chain = 1;
389ff7b0479SSaeed Bishara 
390c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
391ff7b0479SSaeed Bishara 		"%s sw_desc %p: async_tx %p\n",
392ff7b0479SSaeed Bishara 		__func__, sw_desc, &sw_desc->async_tx);
393ff7b0479SSaeed Bishara 
394ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
395884485e1SRussell King - ARM Linux 	cookie = dma_cookie_assign(tx);
396ff7b0479SSaeed Bishara 
397ff7b0479SSaeed Bishara 	if (list_empty(&mv_chan->chain))
398fbea28a2SLior Amsalem 		list_move_tail(&sw_desc->node, &mv_chan->chain);
399ff7b0479SSaeed Bishara 	else {
400ff7b0479SSaeed Bishara 		new_hw_chain = 0;
401ff7b0479SSaeed Bishara 
402ff7b0479SSaeed Bishara 		old_chain_tail = list_entry(mv_chan->chain.prev,
403ff7b0479SSaeed Bishara 					    struct mv_xor_desc_slot,
404fbea28a2SLior Amsalem 					    node);
405fbea28a2SLior Amsalem 		list_move_tail(&sw_desc->node, &mv_chan->chain);
406ff7b0479SSaeed Bishara 
40731fd8f5bSOlof Johansson 		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
40831fd8f5bSOlof Johansson 			&old_chain_tail->async_tx.phys);
409ff7b0479SSaeed Bishara 
410ff7b0479SSaeed Bishara 		/* fix up the hardware chain */
411dfc97661SLior Amsalem 		mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
412ff7b0479SSaeed Bishara 
413ff7b0479SSaeed Bishara 		/* if the channel is not busy */
414ff7b0479SSaeed Bishara 		if (!mv_chan_is_busy(mv_chan)) {
415ff7b0479SSaeed Bishara 			u32 current_desc = mv_chan_get_current_desc(mv_chan);
416ff7b0479SSaeed Bishara 			/*
417ff7b0479SSaeed Bishara 			 * and the curren desc is the end of the chain before
418ff7b0479SSaeed Bishara 			 * the append, then we need to start the channel
419ff7b0479SSaeed Bishara 			 */
420ff7b0479SSaeed Bishara 			if (current_desc == old_chain_tail->async_tx.phys)
421ff7b0479SSaeed Bishara 				new_hw_chain = 1;
422ff7b0479SSaeed Bishara 		}
423ff7b0479SSaeed Bishara 	}
424ff7b0479SSaeed Bishara 
425ff7b0479SSaeed Bishara 	if (new_hw_chain)
4260951e728SMaxime Ripard 		mv_chan_start_new_chain(mv_chan, sw_desc);
427ff7b0479SSaeed Bishara 
428ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
429ff7b0479SSaeed Bishara 
430ff7b0479SSaeed Bishara 	return cookie;
431ff7b0479SSaeed Bishara }
432ff7b0479SSaeed Bishara 
433ff7b0479SSaeed Bishara /* returns the number of allocated descriptors */
mv_xor_alloc_chan_resources(struct dma_chan * chan)434aa1e6f1aSDan Williams static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
435ff7b0479SSaeed Bishara {
43631fd8f5bSOlof Johansson 	void *virt_desc;
43731fd8f5bSOlof Johansson 	dma_addr_t dma_desc;
438ff7b0479SSaeed Bishara 	int idx;
439ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
440ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *slot = NULL;
441b503fa01SThomas Petazzoni 	int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
442ff7b0479SSaeed Bishara 
443ff7b0479SSaeed Bishara 	/* Allocate descriptor slots */
444ff7b0479SSaeed Bishara 	idx = mv_chan->slots_allocated;
445ff7b0479SSaeed Bishara 	while (idx < num_descs_in_pool) {
446ff7b0479SSaeed Bishara 		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
447ff7b0479SSaeed Bishara 		if (!slot) {
448b8291ddeSEzequiel Garcia 			dev_info(mv_chan_to_devp(mv_chan),
449b8291ddeSEzequiel Garcia 				 "channel only initialized %d descriptor slots",
450b8291ddeSEzequiel Garcia 				 idx);
451ff7b0479SSaeed Bishara 			break;
452ff7b0479SSaeed Bishara 		}
45331fd8f5bSOlof Johansson 		virt_desc = mv_chan->dma_desc_pool_virt;
45431fd8f5bSOlof Johansson 		slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
455ff7b0479SSaeed Bishara 
456ff7b0479SSaeed Bishara 		dma_async_tx_descriptor_init(&slot->async_tx, chan);
457ff7b0479SSaeed Bishara 		slot->async_tx.tx_submit = mv_xor_tx_submit;
458fbea28a2SLior Amsalem 		INIT_LIST_HEAD(&slot->node);
459c5db858bSStefan Roese 		INIT_LIST_HEAD(&slot->sg_tx_list);
46031fd8f5bSOlof Johansson 		dma_desc = mv_chan->dma_desc_pool;
46131fd8f5bSOlof Johansson 		slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
462ff7b0479SSaeed Bishara 		slot->idx = idx++;
463ff7b0479SSaeed Bishara 
464ff7b0479SSaeed Bishara 		spin_lock_bh(&mv_chan->lock);
465ff7b0479SSaeed Bishara 		mv_chan->slots_allocated = idx;
466fbea28a2SLior Amsalem 		list_add_tail(&slot->node, &mv_chan->free_slots);
467ff7b0479SSaeed Bishara 		spin_unlock_bh(&mv_chan->lock);
468ff7b0479SSaeed Bishara 	}
469ff7b0479SSaeed Bishara 
470c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
471fbea28a2SLior Amsalem 		"allocated %d descriptor slots\n",
472fbea28a2SLior Amsalem 		mv_chan->slots_allocated);
473ff7b0479SSaeed Bishara 
474ff7b0479SSaeed Bishara 	return mv_chan->slots_allocated ? : -ENOMEM;
475ff7b0479SSaeed Bishara }
476ff7b0479SSaeed Bishara 
47777ff7a70SStefan Roese /*
47877ff7a70SStefan Roese  * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
47977ff7a70SStefan Roese  * a new MBus window if necessary. Use a cache for these check so that
48077ff7a70SStefan Roese  * the MMIO mapped registers don't have to be accessed for this check
48177ff7a70SStefan Roese  * to speed up this process.
48277ff7a70SStefan Roese  */
mv_xor_add_io_win(struct mv_xor_chan * mv_chan,u32 addr)48377ff7a70SStefan Roese static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
48477ff7a70SStefan Roese {
48577ff7a70SStefan Roese 	struct mv_xor_device *xordev = mv_chan->xordev;
48677ff7a70SStefan Roese 	void __iomem *base = mv_chan->mmr_high_base;
48777ff7a70SStefan Roese 	u32 win_enable;
48877ff7a70SStefan Roese 	u32 size;
48977ff7a70SStefan Roese 	u8 target, attr;
49077ff7a70SStefan Roese 	int ret;
49177ff7a70SStefan Roese 	int i;
49277ff7a70SStefan Roese 
49377ff7a70SStefan Roese 	/* Nothing needs to get done for the Armada 3700 */
49477ff7a70SStefan Roese 	if (xordev->xor_type == XOR_ARMADA_37XX)
49577ff7a70SStefan Roese 		return 0;
49677ff7a70SStefan Roese 
49777ff7a70SStefan Roese 	/*
49877ff7a70SStefan Roese 	 * Loop over the cached windows to check, if the requested area
49977ff7a70SStefan Roese 	 * is already mapped. If this the case, nothing needs to be done
50077ff7a70SStefan Roese 	 * and we can return.
50177ff7a70SStefan Roese 	 */
50277ff7a70SStefan Roese 	for (i = 0; i < WINDOW_COUNT; i++) {
50377ff7a70SStefan Roese 		if (addr >= xordev->win_start[i] &&
50477ff7a70SStefan Roese 		    addr <= xordev->win_end[i]) {
50577ff7a70SStefan Roese 			/* Window is already mapped */
50677ff7a70SStefan Roese 			return 0;
50777ff7a70SStefan Roese 		}
50877ff7a70SStefan Roese 	}
50977ff7a70SStefan Roese 
51077ff7a70SStefan Roese 	/*
51177ff7a70SStefan Roese 	 * The window is not mapped, so we need to create the new mapping
51277ff7a70SStefan Roese 	 */
51377ff7a70SStefan Roese 
51477ff7a70SStefan Roese 	/* If no IO window is found that addr has to be located in SDRAM */
51577ff7a70SStefan Roese 	ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
51677ff7a70SStefan Roese 	if (ret < 0)
51777ff7a70SStefan Roese 		return 0;
51877ff7a70SStefan Roese 
51977ff7a70SStefan Roese 	/*
52077ff7a70SStefan Roese 	 * Mask the base addr 'addr' according to 'size' read back from the
52177ff7a70SStefan Roese 	 * MBus window. Otherwise we might end up with an address located
52277ff7a70SStefan Roese 	 * somewhere in the middle of this area here.
52377ff7a70SStefan Roese 	 */
52477ff7a70SStefan Roese 	size -= 1;
52577ff7a70SStefan Roese 	addr &= ~size;
52677ff7a70SStefan Roese 
52777ff7a70SStefan Roese 	/*
52877ff7a70SStefan Roese 	 * Reading one of both enabled register is enough, as they are always
52977ff7a70SStefan Roese 	 * programmed to the identical values
53077ff7a70SStefan Roese 	 */
53177ff7a70SStefan Roese 	win_enable = readl(base + WINDOW_BAR_ENABLE(0));
53277ff7a70SStefan Roese 
53377ff7a70SStefan Roese 	/* Set 'i' to the first free window to write the new values to */
53477ff7a70SStefan Roese 	i = ffs(~win_enable) - 1;
53577ff7a70SStefan Roese 	if (i >= WINDOW_COUNT)
53677ff7a70SStefan Roese 		return -ENOMEM;
53777ff7a70SStefan Roese 
53877ff7a70SStefan Roese 	writel((addr & 0xffff0000) | (attr << 8) | target,
53977ff7a70SStefan Roese 	       base + WINDOW_BASE(i));
54077ff7a70SStefan Roese 	writel(size & 0xffff0000, base + WINDOW_SIZE(i));
54177ff7a70SStefan Roese 
54277ff7a70SStefan Roese 	/* Fill the caching variables for later use */
54377ff7a70SStefan Roese 	xordev->win_start[i] = addr;
54477ff7a70SStefan Roese 	xordev->win_end[i] = addr + size;
54577ff7a70SStefan Roese 
54677ff7a70SStefan Roese 	win_enable |= (1 << i);
54777ff7a70SStefan Roese 	win_enable |= 3 << (16 + (2 * i));
54877ff7a70SStefan Roese 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
54977ff7a70SStefan Roese 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
55077ff7a70SStefan Roese 
55177ff7a70SStefan Roese 	return 0;
55277ff7a70SStefan Roese }
55377ff7a70SStefan Roese 
554ff7b0479SSaeed Bishara static struct dma_async_tx_descriptor *
mv_xor_prep_dma_xor(struct dma_chan * chan,dma_addr_t dest,dma_addr_t * src,unsigned int src_cnt,size_t len,unsigned long flags)555ff7b0479SSaeed Bishara mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
556ff7b0479SSaeed Bishara 		    unsigned int src_cnt, size_t len, unsigned long flags)
557ff7b0479SSaeed Bishara {
558ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
559dfc97661SLior Amsalem 	struct mv_xor_desc_slot *sw_desc;
56077ff7a70SStefan Roese 	int ret;
561ff7b0479SSaeed Bishara 
562ff7b0479SSaeed Bishara 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
563ff7b0479SSaeed Bishara 		return NULL;
564ff7b0479SSaeed Bishara 
5657912d300SColy Li 	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
566ff7b0479SSaeed Bishara 
567c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
568bc822e12SGregory CLEMENT 		"%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
56931fd8f5bSOlof Johansson 		__func__, src_cnt, len, &dest, flags);
570ff7b0479SSaeed Bishara 
57177ff7a70SStefan Roese 	/* Check if a new window needs to get added for 'dest' */
57277ff7a70SStefan Roese 	ret = mv_xor_add_io_win(mv_chan, dest);
57377ff7a70SStefan Roese 	if (ret)
57477ff7a70SStefan Roese 		return NULL;
57577ff7a70SStefan Roese 
5760951e728SMaxime Ripard 	sw_desc = mv_chan_alloc_slot(mv_chan);
577ff7b0479SSaeed Bishara 	if (sw_desc) {
578ff7b0479SSaeed Bishara 		sw_desc->type = DMA_XOR;
579ff7b0479SSaeed Bishara 		sw_desc->async_tx.flags = flags;
580ba87d137SLior Amsalem 		mv_desc_init(sw_desc, dest, len, flags);
5816f166312SLior Amsalem 		if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
5826f166312SLior Amsalem 			mv_desc_set_mode(sw_desc);
58377ff7a70SStefan Roese 		while (src_cnt--) {
58477ff7a70SStefan Roese 			/* Check if a new window needs to get added for 'src' */
58577ff7a70SStefan Roese 			ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
58677ff7a70SStefan Roese 			if (ret)
58777ff7a70SStefan Roese 				return NULL;
588dfc97661SLior Amsalem 			mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
589ff7b0479SSaeed Bishara 		}
59077ff7a70SStefan Roese 	}
591fbea28a2SLior Amsalem 
592c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
593ff7b0479SSaeed Bishara 		"%s sw_desc %p async_tx %p \n",
594ff7b0479SSaeed Bishara 		__func__, sw_desc, &sw_desc->async_tx);
595ff7b0479SSaeed Bishara 	return sw_desc ? &sw_desc->async_tx : NULL;
596ff7b0479SSaeed Bishara }
597ff7b0479SSaeed Bishara 
5983e4f52e2SLior Amsalem static struct dma_async_tx_descriptor *
mv_xor_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)5993e4f52e2SLior Amsalem mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
6003e4f52e2SLior Amsalem 		size_t len, unsigned long flags)
6013e4f52e2SLior Amsalem {
6023e4f52e2SLior Amsalem 	/*
6033e4f52e2SLior Amsalem 	 * A MEMCPY operation is identical to an XOR operation with only
6043e4f52e2SLior Amsalem 	 * a single source address.
6053e4f52e2SLior Amsalem 	 */
6063e4f52e2SLior Amsalem 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
6073e4f52e2SLior Amsalem }
6083e4f52e2SLior Amsalem 
60922843545SLior Amsalem static struct dma_async_tx_descriptor *
mv_xor_prep_dma_interrupt(struct dma_chan * chan,unsigned long flags)61022843545SLior Amsalem mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
61122843545SLior Amsalem {
61222843545SLior Amsalem 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
61322843545SLior Amsalem 	dma_addr_t src, dest;
61422843545SLior Amsalem 	size_t len;
61522843545SLior Amsalem 
61622843545SLior Amsalem 	src = mv_chan->dummy_src_addr;
61722843545SLior Amsalem 	dest = mv_chan->dummy_dst_addr;
61822843545SLior Amsalem 	len = MV_XOR_MIN_BYTE_COUNT;
61922843545SLior Amsalem 
62022843545SLior Amsalem 	/*
62122843545SLior Amsalem 	 * We implement the DMA_INTERRUPT operation as a minimum sized
62222843545SLior Amsalem 	 * XOR operation with a single dummy source address.
62322843545SLior Amsalem 	 */
62422843545SLior Amsalem 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
62522843545SLior Amsalem }
62622843545SLior Amsalem 
mv_xor_free_chan_resources(struct dma_chan * chan)627ff7b0479SSaeed Bishara static void mv_xor_free_chan_resources(struct dma_chan *chan)
628ff7b0479SSaeed Bishara {
629ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
630ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
631ff7b0479SSaeed Bishara 	int in_use_descs = 0;
632ff7b0479SSaeed Bishara 
633ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
634e43147acSEzequiel Garcia 
6350951e728SMaxime Ripard 	mv_chan_slot_cleanup(mv_chan);
636ff7b0479SSaeed Bishara 
637ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
638fbea28a2SLior Amsalem 					node) {
639ff7b0479SSaeed Bishara 		in_use_descs++;
640fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->free_slots);
641ff7b0479SSaeed Bishara 	}
642ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
643fbea28a2SLior Amsalem 				 node) {
644ff7b0479SSaeed Bishara 		in_use_descs++;
645fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->free_slots);
646fbea28a2SLior Amsalem 	}
647fbea28a2SLior Amsalem 	list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
648fbea28a2SLior Amsalem 				 node) {
649fbea28a2SLior Amsalem 		in_use_descs++;
650fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->free_slots);
651ff7b0479SSaeed Bishara 	}
652ff7b0479SSaeed Bishara 	list_for_each_entry_safe_reverse(
653fbea28a2SLior Amsalem 		iter, _iter, &mv_chan->free_slots, node) {
654fbea28a2SLior Amsalem 		list_del(&iter->node);
655ff7b0479SSaeed Bishara 		kfree(iter);
656ff7b0479SSaeed Bishara 		mv_chan->slots_allocated--;
657ff7b0479SSaeed Bishara 	}
658ff7b0479SSaeed Bishara 
659c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
660ff7b0479SSaeed Bishara 		__func__, mv_chan->slots_allocated);
661ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
662ff7b0479SSaeed Bishara 
663ff7b0479SSaeed Bishara 	if (in_use_descs)
664c98c1781SThomas Petazzoni 		dev_err(mv_chan_to_devp(mv_chan),
665ff7b0479SSaeed Bishara 			"freeing %d in use descriptors!\n", in_use_descs);
666ff7b0479SSaeed Bishara }
667ff7b0479SSaeed Bishara 
668ff7b0479SSaeed Bishara /**
66907934481SLinus Walleij  * mv_xor_status - poll the status of an XOR transaction
670ff7b0479SSaeed Bishara  * @chan: XOR channel handle
671ff7b0479SSaeed Bishara  * @cookie: XOR transaction identifier
67207934481SLinus Walleij  * @txstate: XOR transactions state holder (or NULL)
673ff7b0479SSaeed Bishara  */
mv_xor_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)67407934481SLinus Walleij static enum dma_status mv_xor_status(struct dma_chan *chan,
675ff7b0479SSaeed Bishara 					  dma_cookie_t cookie,
67607934481SLinus Walleij 					  struct dma_tx_state *txstate)
677ff7b0479SSaeed Bishara {
678ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
679ff7b0479SSaeed Bishara 	enum dma_status ret;
680ff7b0479SSaeed Bishara 
68196a2af41SRussell King - ARM Linux 	ret = dma_cookie_status(chan, cookie, txstate);
682890766d2SEzequiel Garcia 	if (ret == DMA_COMPLETE)
683ff7b0479SSaeed Bishara 		return ret;
684e43147acSEzequiel Garcia 
685e43147acSEzequiel Garcia 	spin_lock_bh(&mv_chan->lock);
6860951e728SMaxime Ripard 	mv_chan_slot_cleanup(mv_chan);
687e43147acSEzequiel Garcia 	spin_unlock_bh(&mv_chan->lock);
688ff7b0479SSaeed Bishara 
68996a2af41SRussell King - ARM Linux 	return dma_cookie_status(chan, cookie, txstate);
690ff7b0479SSaeed Bishara }
691ff7b0479SSaeed Bishara 
mv_chan_dump_regs(struct mv_xor_chan * chan)6920951e728SMaxime Ripard static void mv_chan_dump_regs(struct mv_xor_chan *chan)
693ff7b0479SSaeed Bishara {
694ff7b0479SSaeed Bishara 	u32 val;
695ff7b0479SSaeed Bishara 
6965733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_CONFIG(chan));
6971ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
698ff7b0479SSaeed Bishara 
6995733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_ACTIVATION(chan));
7001ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
701ff7b0479SSaeed Bishara 
7025733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_INTR_CAUSE(chan));
7031ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
704ff7b0479SSaeed Bishara 
7055733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_INTR_MASK(chan));
7061ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
707ff7b0479SSaeed Bishara 
7085733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_ERROR_CAUSE(chan));
7091ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
710ff7b0479SSaeed Bishara 
7115733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_ERROR_ADDR(chan));
7121ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
713ff7b0479SSaeed Bishara }
714ff7b0479SSaeed Bishara 
mv_chan_err_interrupt_handler(struct mv_xor_chan * chan,u32 intr_cause)7150951e728SMaxime Ripard static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
716ff7b0479SSaeed Bishara 					  u32 intr_cause)
717ff7b0479SSaeed Bishara {
7180e7488edSEzequiel Garcia 	if (intr_cause & XOR_INT_ERR_DECODE) {
7190e7488edSEzequiel Garcia 		dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
720ff7b0479SSaeed Bishara 		return;
721ff7b0479SSaeed Bishara 	}
722ff7b0479SSaeed Bishara 
7230e7488edSEzequiel Garcia 	dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
724ff7b0479SSaeed Bishara 		chan->idx, intr_cause);
725ff7b0479SSaeed Bishara 
7260951e728SMaxime Ripard 	mv_chan_dump_regs(chan);
7270e7488edSEzequiel Garcia 	WARN_ON(1);
728ff7b0479SSaeed Bishara }
729ff7b0479SSaeed Bishara 
mv_xor_interrupt_handler(int irq,void * data)730ff7b0479SSaeed Bishara static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
731ff7b0479SSaeed Bishara {
732ff7b0479SSaeed Bishara 	struct mv_xor_chan *chan = data;
733ff7b0479SSaeed Bishara 	u32 intr_cause = mv_chan_get_intr_cause(chan);
734ff7b0479SSaeed Bishara 
735c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
736ff7b0479SSaeed Bishara 
7370e7488edSEzequiel Garcia 	if (intr_cause & XOR_INTR_ERRORS)
7380951e728SMaxime Ripard 		mv_chan_err_interrupt_handler(chan, intr_cause);
739ff7b0479SSaeed Bishara 
740ff7b0479SSaeed Bishara 	tasklet_schedule(&chan->irq_tasklet);
741ff7b0479SSaeed Bishara 
7420951e728SMaxime Ripard 	mv_chan_clear_eoc_cause(chan);
743ff7b0479SSaeed Bishara 
744ff7b0479SSaeed Bishara 	return IRQ_HANDLED;
745ff7b0479SSaeed Bishara }
746ff7b0479SSaeed Bishara 
mv_xor_issue_pending(struct dma_chan * chan)747ff7b0479SSaeed Bishara static void mv_xor_issue_pending(struct dma_chan *chan)
748ff7b0479SSaeed Bishara {
749ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
750ff7b0479SSaeed Bishara 
751ff7b0479SSaeed Bishara 	if (mv_chan->pending >= MV_XOR_THRESHOLD) {
752ff7b0479SSaeed Bishara 		mv_chan->pending = 0;
753ff7b0479SSaeed Bishara 		mv_chan_activate(mv_chan);
754ff7b0479SSaeed Bishara 	}
755ff7b0479SSaeed Bishara }
756ff7b0479SSaeed Bishara 
757ff7b0479SSaeed Bishara /*
758ff7b0479SSaeed Bishara  * Perform a transaction to verify the HW works.
759ff7b0479SSaeed Bishara  */
760ff7b0479SSaeed Bishara 
mv_chan_memcpy_self_test(struct mv_xor_chan * mv_chan)7610951e728SMaxime Ripard static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
762ff7b0479SSaeed Bishara {
763b8c01d25SEzequiel Garcia 	int i, ret;
764ff7b0479SSaeed Bishara 	void *src, *dest;
765ff7b0479SSaeed Bishara 	dma_addr_t src_dma, dest_dma;
766ff7b0479SSaeed Bishara 	struct dma_chan *dma_chan;
767ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
768ff7b0479SSaeed Bishara 	struct dma_async_tx_descriptor *tx;
769d16695a7SEzequiel Garcia 	struct dmaengine_unmap_data *unmap;
770ff7b0479SSaeed Bishara 	int err = 0;
771ff7b0479SSaeed Bishara 
7726da2ec56SKees Cook 	src = kmalloc(PAGE_SIZE, GFP_KERNEL);
773ff7b0479SSaeed Bishara 	if (!src)
774ff7b0479SSaeed Bishara 		return -ENOMEM;
775ff7b0479SSaeed Bishara 
7766396bb22SKees Cook 	dest = kzalloc(PAGE_SIZE, GFP_KERNEL);
777ff7b0479SSaeed Bishara 	if (!dest) {
778ff7b0479SSaeed Bishara 		kfree(src);
779ff7b0479SSaeed Bishara 		return -ENOMEM;
780ff7b0479SSaeed Bishara 	}
781ff7b0479SSaeed Bishara 
782ff7b0479SSaeed Bishara 	/* Fill in src buffer */
783d16695a7SEzequiel Garcia 	for (i = 0; i < PAGE_SIZE; i++)
784ff7b0479SSaeed Bishara 		((u8 *) src)[i] = (u8)i;
785ff7b0479SSaeed Bishara 
786275cc0c8SThomas Petazzoni 	dma_chan = &mv_chan->dmachan;
787aa1e6f1aSDan Williams 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
788ff7b0479SSaeed Bishara 		err = -ENODEV;
789ff7b0479SSaeed Bishara 		goto out;
790ff7b0479SSaeed Bishara 	}
791ff7b0479SSaeed Bishara 
792d16695a7SEzequiel Garcia 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
793d16695a7SEzequiel Garcia 	if (!unmap) {
794d16695a7SEzequiel Garcia 		err = -ENOMEM;
795d16695a7SEzequiel Garcia 		goto free_resources;
796d16695a7SEzequiel Garcia 	}
797ff7b0479SSaeed Bishara 
79851564635SStefan Roese 	src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
799b70e52caSGeliang Tang 			       offset_in_page(src), PAGE_SIZE,
80051564635SStefan Roese 			       DMA_TO_DEVICE);
801d16695a7SEzequiel Garcia 	unmap->addr[0] = src_dma;
802d16695a7SEzequiel Garcia 
803b8c01d25SEzequiel Garcia 	ret = dma_mapping_error(dma_chan->device->dev, src_dma);
804b8c01d25SEzequiel Garcia 	if (ret) {
805b8c01d25SEzequiel Garcia 		err = -ENOMEM;
806b8c01d25SEzequiel Garcia 		goto free_resources;
807b8c01d25SEzequiel Garcia 	}
808b8c01d25SEzequiel Garcia 	unmap->to_cnt = 1;
809b8c01d25SEzequiel Garcia 
81051564635SStefan Roese 	dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
811b70e52caSGeliang Tang 				offset_in_page(dest), PAGE_SIZE,
81251564635SStefan Roese 				DMA_FROM_DEVICE);
813d16695a7SEzequiel Garcia 	unmap->addr[1] = dest_dma;
814d16695a7SEzequiel Garcia 
815b8c01d25SEzequiel Garcia 	ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
816b8c01d25SEzequiel Garcia 	if (ret) {
817b8c01d25SEzequiel Garcia 		err = -ENOMEM;
818b8c01d25SEzequiel Garcia 		goto free_resources;
819b8c01d25SEzequiel Garcia 	}
820b8c01d25SEzequiel Garcia 	unmap->from_cnt = 1;
821d16695a7SEzequiel Garcia 	unmap->len = PAGE_SIZE;
822ff7b0479SSaeed Bishara 
823ff7b0479SSaeed Bishara 	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
824d16695a7SEzequiel Garcia 				    PAGE_SIZE, 0);
825b8c01d25SEzequiel Garcia 	if (!tx) {
826b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
827b8c01d25SEzequiel Garcia 			"Self-test cannot prepare operation, disabling\n");
828b8c01d25SEzequiel Garcia 		err = -ENODEV;
829b8c01d25SEzequiel Garcia 		goto free_resources;
830b8c01d25SEzequiel Garcia 	}
831b8c01d25SEzequiel Garcia 
832ff7b0479SSaeed Bishara 	cookie = mv_xor_tx_submit(tx);
833b8c01d25SEzequiel Garcia 	if (dma_submit_error(cookie)) {
834b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
835b8c01d25SEzequiel Garcia 			"Self-test submit error, disabling\n");
836b8c01d25SEzequiel Garcia 		err = -ENODEV;
837b8c01d25SEzequiel Garcia 		goto free_resources;
838b8c01d25SEzequiel Garcia 	}
839b8c01d25SEzequiel Garcia 
840ff7b0479SSaeed Bishara 	mv_xor_issue_pending(dma_chan);
841ff7b0479SSaeed Bishara 	async_tx_ack(tx);
842ff7b0479SSaeed Bishara 	msleep(1);
843ff7b0479SSaeed Bishara 
84407934481SLinus Walleij 	if (mv_xor_status(dma_chan, cookie, NULL) !=
845b3efb8fcSVinod Koul 	    DMA_COMPLETE) {
846a3fc74bcSThomas Petazzoni 		dev_err(dma_chan->device->dev,
847ff7b0479SSaeed Bishara 			"Self-test copy timed out, disabling\n");
848ff7b0479SSaeed Bishara 		err = -ENODEV;
849ff7b0479SSaeed Bishara 		goto free_resources;
850ff7b0479SSaeed Bishara 	}
851ff7b0479SSaeed Bishara 
852c35064c4SThomas Petazzoni 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
853d16695a7SEzequiel Garcia 				PAGE_SIZE, DMA_FROM_DEVICE);
854d16695a7SEzequiel Garcia 	if (memcmp(src, dest, PAGE_SIZE)) {
855a3fc74bcSThomas Petazzoni 		dev_err(dma_chan->device->dev,
856ff7b0479SSaeed Bishara 			"Self-test copy failed compare, disabling\n");
857ff7b0479SSaeed Bishara 		err = -ENODEV;
858ff7b0479SSaeed Bishara 		goto free_resources;
859ff7b0479SSaeed Bishara 	}
860ff7b0479SSaeed Bishara 
861ff7b0479SSaeed Bishara free_resources:
862d16695a7SEzequiel Garcia 	dmaengine_unmap_put(unmap);
863ff7b0479SSaeed Bishara 	mv_xor_free_chan_resources(dma_chan);
864ff7b0479SSaeed Bishara out:
865ff7b0479SSaeed Bishara 	kfree(src);
866ff7b0479SSaeed Bishara 	kfree(dest);
867ff7b0479SSaeed Bishara 	return err;
868ff7b0479SSaeed Bishara }
869ff7b0479SSaeed Bishara 
870ff7b0479SSaeed Bishara #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
871463a1f8bSBill Pemberton static int
mv_chan_xor_self_test(struct mv_xor_chan * mv_chan)8720951e728SMaxime Ripard mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
873ff7b0479SSaeed Bishara {
874b8c01d25SEzequiel Garcia 	int i, src_idx, ret;
875ff7b0479SSaeed Bishara 	struct page *dest;
876ff7b0479SSaeed Bishara 	struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
877ff7b0479SSaeed Bishara 	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
878ff7b0479SSaeed Bishara 	dma_addr_t dest_dma;
879ff7b0479SSaeed Bishara 	struct dma_async_tx_descriptor *tx;
880d16695a7SEzequiel Garcia 	struct dmaengine_unmap_data *unmap;
881ff7b0479SSaeed Bishara 	struct dma_chan *dma_chan;
882ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
883ff7b0479SSaeed Bishara 	u8 cmp_byte = 0;
884ff7b0479SSaeed Bishara 	u32 cmp_word;
885ff7b0479SSaeed Bishara 	int err = 0;
886d16695a7SEzequiel Garcia 	int src_count = MV_XOR_NUM_SRC_TEST;
887ff7b0479SSaeed Bishara 
888d16695a7SEzequiel Garcia 	for (src_idx = 0; src_idx < src_count; src_idx++) {
889ff7b0479SSaeed Bishara 		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
890a09b09aeSRoel Kluin 		if (!xor_srcs[src_idx]) {
891a09b09aeSRoel Kluin 			while (src_idx--)
892ff7b0479SSaeed Bishara 				__free_page(xor_srcs[src_idx]);
893ff7b0479SSaeed Bishara 			return -ENOMEM;
894ff7b0479SSaeed Bishara 		}
895ff7b0479SSaeed Bishara 	}
896ff7b0479SSaeed Bishara 
897ff7b0479SSaeed Bishara 	dest = alloc_page(GFP_KERNEL);
898a09b09aeSRoel Kluin 	if (!dest) {
899a09b09aeSRoel Kluin 		while (src_idx--)
900ff7b0479SSaeed Bishara 			__free_page(xor_srcs[src_idx]);
901ff7b0479SSaeed Bishara 		return -ENOMEM;
902ff7b0479SSaeed Bishara 	}
903ff7b0479SSaeed Bishara 
904ff7b0479SSaeed Bishara 	/* Fill in src buffers */
905d16695a7SEzequiel Garcia 	for (src_idx = 0; src_idx < src_count; src_idx++) {
906ff7b0479SSaeed Bishara 		u8 *ptr = page_address(xor_srcs[src_idx]);
907ff7b0479SSaeed Bishara 		for (i = 0; i < PAGE_SIZE; i++)
908ff7b0479SSaeed Bishara 			ptr[i] = (1 << src_idx);
909ff7b0479SSaeed Bishara 	}
910ff7b0479SSaeed Bishara 
911d16695a7SEzequiel Garcia 	for (src_idx = 0; src_idx < src_count; src_idx++)
912ff7b0479SSaeed Bishara 		cmp_byte ^= (u8) (1 << src_idx);
913ff7b0479SSaeed Bishara 
914ff7b0479SSaeed Bishara 	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
915ff7b0479SSaeed Bishara 		(cmp_byte << 8) | cmp_byte;
916ff7b0479SSaeed Bishara 
917ff7b0479SSaeed Bishara 	memset(page_address(dest), 0, PAGE_SIZE);
918ff7b0479SSaeed Bishara 
919275cc0c8SThomas Petazzoni 	dma_chan = &mv_chan->dmachan;
920aa1e6f1aSDan Williams 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
921ff7b0479SSaeed Bishara 		err = -ENODEV;
922ff7b0479SSaeed Bishara 		goto out;
923ff7b0479SSaeed Bishara 	}
924ff7b0479SSaeed Bishara 
925d16695a7SEzequiel Garcia 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
926d16695a7SEzequiel Garcia 					 GFP_KERNEL);
927d16695a7SEzequiel Garcia 	if (!unmap) {
928d16695a7SEzequiel Garcia 		err = -ENOMEM;
929d16695a7SEzequiel Garcia 		goto free_resources;
930d16695a7SEzequiel Garcia 	}
931ff7b0479SSaeed Bishara 
932d16695a7SEzequiel Garcia 	/* test xor */
933d16695a7SEzequiel Garcia 	for (i = 0; i < src_count; i++) {
934d16695a7SEzequiel Garcia 		unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
935ff7b0479SSaeed Bishara 					      0, PAGE_SIZE, DMA_TO_DEVICE);
936d16695a7SEzequiel Garcia 		dma_srcs[i] = unmap->addr[i];
937b8c01d25SEzequiel Garcia 		ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
938b8c01d25SEzequiel Garcia 		if (ret) {
939b8c01d25SEzequiel Garcia 			err = -ENOMEM;
940b8c01d25SEzequiel Garcia 			goto free_resources;
941b8c01d25SEzequiel Garcia 		}
942d16695a7SEzequiel Garcia 		unmap->to_cnt++;
943d16695a7SEzequiel Garcia 	}
944d16695a7SEzequiel Garcia 
945d16695a7SEzequiel Garcia 	unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
946d16695a7SEzequiel Garcia 				      DMA_FROM_DEVICE);
947d16695a7SEzequiel Garcia 	dest_dma = unmap->addr[src_count];
948b8c01d25SEzequiel Garcia 	ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
949b8c01d25SEzequiel Garcia 	if (ret) {
950b8c01d25SEzequiel Garcia 		err = -ENOMEM;
951b8c01d25SEzequiel Garcia 		goto free_resources;
952b8c01d25SEzequiel Garcia 	}
953d16695a7SEzequiel Garcia 	unmap->from_cnt = 1;
954d16695a7SEzequiel Garcia 	unmap->len = PAGE_SIZE;
955ff7b0479SSaeed Bishara 
956ff7b0479SSaeed Bishara 	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
957d16695a7SEzequiel Garcia 				 src_count, PAGE_SIZE, 0);
958b8c01d25SEzequiel Garcia 	if (!tx) {
959b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
960b8c01d25SEzequiel Garcia 			"Self-test cannot prepare operation, disabling\n");
961b8c01d25SEzequiel Garcia 		err = -ENODEV;
962b8c01d25SEzequiel Garcia 		goto free_resources;
963b8c01d25SEzequiel Garcia 	}
964ff7b0479SSaeed Bishara 
965ff7b0479SSaeed Bishara 	cookie = mv_xor_tx_submit(tx);
966b8c01d25SEzequiel Garcia 	if (dma_submit_error(cookie)) {
967b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
968b8c01d25SEzequiel Garcia 			"Self-test submit error, disabling\n");
969b8c01d25SEzequiel Garcia 		err = -ENODEV;
970b8c01d25SEzequiel Garcia 		goto free_resources;
971b8c01d25SEzequiel Garcia 	}
972b8c01d25SEzequiel Garcia 
973ff7b0479SSaeed Bishara 	mv_xor_issue_pending(dma_chan);
974ff7b0479SSaeed Bishara 	async_tx_ack(tx);
975ff7b0479SSaeed Bishara 	msleep(8);
976ff7b0479SSaeed Bishara 
97707934481SLinus Walleij 	if (mv_xor_status(dma_chan, cookie, NULL) !=
978b3efb8fcSVinod Koul 	    DMA_COMPLETE) {
979a3fc74bcSThomas Petazzoni 		dev_err(dma_chan->device->dev,
980ff7b0479SSaeed Bishara 			"Self-test xor timed out, disabling\n");
981ff7b0479SSaeed Bishara 		err = -ENODEV;
982ff7b0479SSaeed Bishara 		goto free_resources;
983ff7b0479SSaeed Bishara 	}
984ff7b0479SSaeed Bishara 
985c35064c4SThomas Petazzoni 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
986ff7b0479SSaeed Bishara 				PAGE_SIZE, DMA_FROM_DEVICE);
987ff7b0479SSaeed Bishara 	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
988ff7b0479SSaeed Bishara 		u32 *ptr = page_address(dest);
989ff7b0479SSaeed Bishara 		if (ptr[i] != cmp_word) {
990a3fc74bcSThomas Petazzoni 			dev_err(dma_chan->device->dev,
9911ba151cdSJoe Perches 				"Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
9921ba151cdSJoe Perches 				i, ptr[i], cmp_word);
993ff7b0479SSaeed Bishara 			err = -ENODEV;
994ff7b0479SSaeed Bishara 			goto free_resources;
995ff7b0479SSaeed Bishara 		}
996ff7b0479SSaeed Bishara 	}
997ff7b0479SSaeed Bishara 
998ff7b0479SSaeed Bishara free_resources:
999d16695a7SEzequiel Garcia 	dmaengine_unmap_put(unmap);
1000ff7b0479SSaeed Bishara 	mv_xor_free_chan_resources(dma_chan);
1001ff7b0479SSaeed Bishara out:
1002d16695a7SEzequiel Garcia 	src_idx = src_count;
1003ff7b0479SSaeed Bishara 	while (src_idx--)
1004ff7b0479SSaeed Bishara 		__free_page(xor_srcs[src_idx]);
1005ff7b0479SSaeed Bishara 	__free_page(dest);
1006ff7b0479SSaeed Bishara 	return err;
1007ff7b0479SSaeed Bishara }
1008ff7b0479SSaeed Bishara 
mv_xor_channel_remove(struct mv_xor_chan * mv_chan)10091ef48a26SThomas Petazzoni static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1010ff7b0479SSaeed Bishara {
1011ff7b0479SSaeed Bishara 	struct dma_chan *chan, *_chan;
10121ef48a26SThomas Petazzoni 	struct device *dev = mv_chan->dmadev.dev;
1013ff7b0479SSaeed Bishara 
10141ef48a26SThomas Petazzoni 	dma_async_device_unregister(&mv_chan->dmadev);
1015ff7b0479SSaeed Bishara 
1016b503fa01SThomas Petazzoni 	dma_free_coherent(dev, MV_XOR_POOL_SIZE,
10171ef48a26SThomas Petazzoni 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
101822843545SLior Amsalem 	dma_unmap_single(dev, mv_chan->dummy_src_addr,
101922843545SLior Amsalem 			 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
102022843545SLior Amsalem 	dma_unmap_single(dev, mv_chan->dummy_dst_addr,
102122843545SLior Amsalem 			 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1022ff7b0479SSaeed Bishara 
10231ef48a26SThomas Petazzoni 	list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1024ff7b0479SSaeed Bishara 				 device_node) {
1025ff7b0479SSaeed Bishara 		list_del(&chan->device_node);
1026ff7b0479SSaeed Bishara 	}
1027ff7b0479SSaeed Bishara 
102888eb92cbSThomas Petazzoni 	free_irq(mv_chan->irq, mv_chan);
102988eb92cbSThomas Petazzoni 
1030ff7b0479SSaeed Bishara 	return 0;
1031ff7b0479SSaeed Bishara }
1032ff7b0479SSaeed Bishara 
10331ef48a26SThomas Petazzoni static struct mv_xor_chan *
mv_xor_channel_add(struct mv_xor_device * xordev,struct platform_device * pdev,int idx,dma_cap_mask_t cap_mask,int irq)1034297eedbaSThomas Petazzoni mv_xor_channel_add(struct mv_xor_device *xordev,
1035a6b4a9d2SThomas Petazzoni 		   struct platform_device *pdev,
1036dd130c65SGregory CLEMENT 		   int idx, dma_cap_mask_t cap_mask, int irq)
1037ff7b0479SSaeed Bishara {
1038ff7b0479SSaeed Bishara 	int ret = 0;
1039ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan;
1040ff7b0479SSaeed Bishara 	struct dma_device *dma_dev;
1041ff7b0479SSaeed Bishara 
10421ef48a26SThomas Petazzoni 	mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1043a577659fSSachin Kamat 	if (!mv_chan)
1044a577659fSSachin Kamat 		return ERR_PTR(-ENOMEM);
1045ff7b0479SSaeed Bishara 
10469aedbdbaSThomas Petazzoni 	mv_chan->idx = idx;
104788eb92cbSThomas Petazzoni 	mv_chan->irq = irq;
1048dd130c65SGregory CLEMENT 	if (xordev->xor_type == XOR_ORION)
1049dd130c65SGregory CLEMENT 		mv_chan->op_in_desc = XOR_MODE_IN_REG;
1050dd130c65SGregory CLEMENT 	else
1051dd130c65SGregory CLEMENT 		mv_chan->op_in_desc = XOR_MODE_IN_DESC;
1052ff7b0479SSaeed Bishara 
10531ef48a26SThomas Petazzoni 	dma_dev = &mv_chan->dmadev;
10543e5daee5SRobin Murphy 	dma_dev->dev = &pdev->dev;
105577ff7a70SStefan Roese 	mv_chan->xordev = xordev;
1056ff7b0479SSaeed Bishara 
105722843545SLior Amsalem 	/*
105822843545SLior Amsalem 	 * These source and destination dummy buffers are used to implement
105922843545SLior Amsalem 	 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
106022843545SLior Amsalem 	 * Hence, we only need to map the buffers at initialization-time.
106122843545SLior Amsalem 	 */
106222843545SLior Amsalem 	mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
106322843545SLior Amsalem 		mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
106422843545SLior Amsalem 	mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
106522843545SLior Amsalem 		mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
106622843545SLior Amsalem 
1067ff7b0479SSaeed Bishara 	/* allocate coherent memory for hardware descriptors
1068ff7b0479SSaeed Bishara 	 * note: writecombine gives slightly better performance, but
1069ff7b0479SSaeed Bishara 	 * requires that we explicitly flush the writes
1070ff7b0479SSaeed Bishara 	 */
10711ef48a26SThomas Petazzoni 	mv_chan->dma_desc_pool_virt =
1072f6e45661SLuis R. Rodriguez 	  dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
1073f6e45661SLuis R. Rodriguez 		       GFP_KERNEL);
10741ef48a26SThomas Petazzoni 	if (!mv_chan->dma_desc_pool_virt)
1075a6b4a9d2SThomas Petazzoni 		return ERR_PTR(-ENOMEM);
1076ff7b0479SSaeed Bishara 
1077ff7b0479SSaeed Bishara 	/* discover transaction capabilites from the platform data */
1078a6b4a9d2SThomas Petazzoni 	dma_dev->cap_mask = cap_mask;
1079ff7b0479SSaeed Bishara 
1080ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&dma_dev->channels);
1081ff7b0479SSaeed Bishara 
1082ff7b0479SSaeed Bishara 	/* set base routines */
1083ff7b0479SSaeed Bishara 	dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1084ff7b0479SSaeed Bishara 	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
108507934481SLinus Walleij 	dma_dev->device_tx_status = mv_xor_status;
1086ff7b0479SSaeed Bishara 	dma_dev->device_issue_pending = mv_xor_issue_pending;
1087ff7b0479SSaeed Bishara 
1088ff7b0479SSaeed Bishara 	/* set prep routines based on capability */
108922843545SLior Amsalem 	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
109022843545SLior Amsalem 		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
1091ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1092ff7b0479SSaeed Bishara 		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1093ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1094c019894eSJoe Perches 		dma_dev->max_xor = 8;
1095ff7b0479SSaeed Bishara 		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1096ff7b0479SSaeed Bishara 	}
1097ff7b0479SSaeed Bishara 
1098297eedbaSThomas Petazzoni 	mv_chan->mmr_base = xordev->xor_base;
109982a1402eSEzequiel Garcia 	mv_chan->mmr_high_base = xordev->xor_high_base;
110034ca9a53SAllen Pais 	tasklet_setup(&mv_chan->irq_tasklet, mv_xor_tasklet);
1101ff7b0479SSaeed Bishara 
1102ff7b0479SSaeed Bishara 	/* clear errors before enabling interrupts */
11030951e728SMaxime Ripard 	mv_chan_clear_err_status(mv_chan);
1104ff7b0479SSaeed Bishara 
11052d0a0745SThomas Petazzoni 	ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1106ff7b0479SSaeed Bishara 			  0, dev_name(&pdev->dev), mv_chan);
1107ff7b0479SSaeed Bishara 	if (ret)
1108ff7b0479SSaeed Bishara 		goto err_free_dma;
1109ff7b0479SSaeed Bishara 
1110ff7b0479SSaeed Bishara 	mv_chan_unmask_interrupts(mv_chan);
1111ff7b0479SSaeed Bishara 
11126f166312SLior Amsalem 	if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
111381aafb3eSThomas Petazzoni 		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
11146f166312SLior Amsalem 	else
111581aafb3eSThomas Petazzoni 		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
1116ff7b0479SSaeed Bishara 
1117ff7b0479SSaeed Bishara 	spin_lock_init(&mv_chan->lock);
1118ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&mv_chan->chain);
1119ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&mv_chan->completed_slots);
1120fbea28a2SLior Amsalem 	INIT_LIST_HEAD(&mv_chan->free_slots);
1121fbea28a2SLior Amsalem 	INIT_LIST_HEAD(&mv_chan->allocated_slots);
112298817b99SThomas Petazzoni 	mv_chan->dmachan.device = dma_dev;
112398817b99SThomas Petazzoni 	dma_cookie_init(&mv_chan->dmachan);
1124ff7b0479SSaeed Bishara 
112598817b99SThomas Petazzoni 	list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1126ff7b0479SSaeed Bishara 
1127ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
11280951e728SMaxime Ripard 		ret = mv_chan_memcpy_self_test(mv_chan);
1129ff7b0479SSaeed Bishara 		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1130ff7b0479SSaeed Bishara 		if (ret)
11312d0a0745SThomas Petazzoni 			goto err_free_irq;
1132ff7b0479SSaeed Bishara 	}
1133ff7b0479SSaeed Bishara 
1134ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
11350951e728SMaxime Ripard 		ret = mv_chan_xor_self_test(mv_chan);
1136ff7b0479SSaeed Bishara 		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1137ff7b0479SSaeed Bishara 		if (ret)
11382d0a0745SThomas Petazzoni 			goto err_free_irq;
1139ff7b0479SSaeed Bishara 	}
1140ff7b0479SSaeed Bishara 
1141c678fa66SDave Jiang 	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
11426f166312SLior Amsalem 		 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1143ff7b0479SSaeed Bishara 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1144ff7b0479SSaeed Bishara 		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1145ff7b0479SSaeed Bishara 		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1146ff7b0479SSaeed Bishara 
11477c97381eSAditya Pakki 	ret = dma_async_device_register(dma_dev);
11487c97381eSAditya Pakki 	if (ret)
11497c97381eSAditya Pakki 		goto err_free_irq;
11507c97381eSAditya Pakki 
11511ef48a26SThomas Petazzoni 	return mv_chan;
1152ff7b0479SSaeed Bishara 
11532d0a0745SThomas Petazzoni err_free_irq:
11542d0a0745SThomas Petazzoni 	free_irq(mv_chan->irq, mv_chan);
1155ff7b0479SSaeed Bishara err_free_dma:
1156b503fa01SThomas Petazzoni 	dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
11571ef48a26SThomas Petazzoni 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1158a6b4a9d2SThomas Petazzoni 	return ERR_PTR(ret);
1159ff7b0479SSaeed Bishara }
1160ff7b0479SSaeed Bishara 
1161ff7b0479SSaeed Bishara static void
mv_xor_conf_mbus_windows(struct mv_xor_device * xordev,const struct mbus_dram_target_info * dram)1162297eedbaSThomas Petazzoni mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
116363a9332bSAndrew Lunn 			 const struct mbus_dram_target_info *dram)
1164ff7b0479SSaeed Bishara {
116582a1402eSEzequiel Garcia 	void __iomem *base = xordev->xor_high_base;
1166ff7b0479SSaeed Bishara 	u32 win_enable = 0;
1167ff7b0479SSaeed Bishara 	int i;
1168ff7b0479SSaeed Bishara 
1169ff7b0479SSaeed Bishara 	for (i = 0; i < 8; i++) {
1170ff7b0479SSaeed Bishara 		writel(0, base + WINDOW_BASE(i));
1171ff7b0479SSaeed Bishara 		writel(0, base + WINDOW_SIZE(i));
1172ff7b0479SSaeed Bishara 		if (i < 4)
1173ff7b0479SSaeed Bishara 			writel(0, base + WINDOW_REMAP_HIGH(i));
1174ff7b0479SSaeed Bishara 	}
1175ff7b0479SSaeed Bishara 
1176ff7b0479SSaeed Bishara 	for (i = 0; i < dram->num_cs; i++) {
117763a9332bSAndrew Lunn 		const struct mbus_dram_window *cs = dram->cs + i;
1178ff7b0479SSaeed Bishara 
1179ff7b0479SSaeed Bishara 		writel((cs->base & 0xffff0000) |
1180ff7b0479SSaeed Bishara 		       (cs->mbus_attr << 8) |
1181ff7b0479SSaeed Bishara 		       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1182ff7b0479SSaeed Bishara 		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1183ff7b0479SSaeed Bishara 
118477ff7a70SStefan Roese 		/* Fill the caching variables for later use */
118577ff7a70SStefan Roese 		xordev->win_start[i] = cs->base;
118677ff7a70SStefan Roese 		xordev->win_end[i] = cs->base + cs->size - 1;
118777ff7a70SStefan Roese 
1188ff7b0479SSaeed Bishara 		win_enable |= (1 << i);
1189ff7b0479SSaeed Bishara 		win_enable |= 3 << (16 + (2 * i));
1190ff7b0479SSaeed Bishara 	}
1191ff7b0479SSaeed Bishara 
1192ff7b0479SSaeed Bishara 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1193ff7b0479SSaeed Bishara 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1194c4b4b732SThomas Petazzoni 	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1195c4b4b732SThomas Petazzoni 	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1196ff7b0479SSaeed Bishara }
1197ff7b0479SSaeed Bishara 
1198ac5f0f3fSMarcin Wojtas static void
mv_xor_conf_mbus_windows_a3700(struct mv_xor_device * xordev)1199ac5f0f3fSMarcin Wojtas mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
1200ac5f0f3fSMarcin Wojtas {
1201ac5f0f3fSMarcin Wojtas 	void __iomem *base = xordev->xor_high_base;
1202ac5f0f3fSMarcin Wojtas 	u32 win_enable = 0;
1203ac5f0f3fSMarcin Wojtas 	int i;
1204ac5f0f3fSMarcin Wojtas 
1205ac5f0f3fSMarcin Wojtas 	for (i = 0; i < 8; i++) {
1206ac5f0f3fSMarcin Wojtas 		writel(0, base + WINDOW_BASE(i));
1207ac5f0f3fSMarcin Wojtas 		writel(0, base + WINDOW_SIZE(i));
1208ac5f0f3fSMarcin Wojtas 		if (i < 4)
1209ac5f0f3fSMarcin Wojtas 			writel(0, base + WINDOW_REMAP_HIGH(i));
1210ac5f0f3fSMarcin Wojtas 	}
1211ac5f0f3fSMarcin Wojtas 	/*
1212ac5f0f3fSMarcin Wojtas 	 * For Armada3700 open default 4GB Mbus window. The dram
1213ac5f0f3fSMarcin Wojtas 	 * related configuration are done at AXIS level.
1214ac5f0f3fSMarcin Wojtas 	 */
1215ac5f0f3fSMarcin Wojtas 	writel(0xffff0000, base + WINDOW_SIZE(0));
1216ac5f0f3fSMarcin Wojtas 	win_enable |= 1;
1217ac5f0f3fSMarcin Wojtas 	win_enable |= 3 << 16;
1218ac5f0f3fSMarcin Wojtas 
1219ac5f0f3fSMarcin Wojtas 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1220ac5f0f3fSMarcin Wojtas 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1221ac5f0f3fSMarcin Wojtas 	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1222ac5f0f3fSMarcin Wojtas 	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1223ac5f0f3fSMarcin Wojtas }
1224ac5f0f3fSMarcin Wojtas 
12258b648436SThomas Petazzoni /*
12268b648436SThomas Petazzoni  * Since this XOR driver is basically used only for RAID5, we don't
12278b648436SThomas Petazzoni  * need to care about synchronizing ->suspend with DMA activity,
12288b648436SThomas Petazzoni  * because the DMA engine will naturally be quiet due to the block
12298b648436SThomas Petazzoni  * devices being suspended.
12308b648436SThomas Petazzoni  */
mv_xor_suspend(struct platform_device * pdev,pm_message_t state)12318b648436SThomas Petazzoni static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
12328b648436SThomas Petazzoni {
12338b648436SThomas Petazzoni 	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
12348b648436SThomas Petazzoni 	int i;
12358b648436SThomas Petazzoni 
12368b648436SThomas Petazzoni 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
12378b648436SThomas Petazzoni 		struct mv_xor_chan *mv_chan = xordev->channels[i];
12388b648436SThomas Petazzoni 
12398b648436SThomas Petazzoni 		if (!mv_chan)
12408b648436SThomas Petazzoni 			continue;
12418b648436SThomas Petazzoni 
12428b648436SThomas Petazzoni 		mv_chan->saved_config_reg =
12438b648436SThomas Petazzoni 			readl_relaxed(XOR_CONFIG(mv_chan));
12448b648436SThomas Petazzoni 		mv_chan->saved_int_mask_reg =
12458b648436SThomas Petazzoni 			readl_relaxed(XOR_INTR_MASK(mv_chan));
12468b648436SThomas Petazzoni 	}
12478b648436SThomas Petazzoni 
12488b648436SThomas Petazzoni 	return 0;
12498b648436SThomas Petazzoni }
12508b648436SThomas Petazzoni 
mv_xor_resume(struct platform_device * dev)12518b648436SThomas Petazzoni static int mv_xor_resume(struct platform_device *dev)
12528b648436SThomas Petazzoni {
12538b648436SThomas Petazzoni 	struct mv_xor_device *xordev = platform_get_drvdata(dev);
12548b648436SThomas Petazzoni 	const struct mbus_dram_target_info *dram;
12558b648436SThomas Petazzoni 	int i;
12568b648436SThomas Petazzoni 
12578b648436SThomas Petazzoni 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
12588b648436SThomas Petazzoni 		struct mv_xor_chan *mv_chan = xordev->channels[i];
12598b648436SThomas Petazzoni 
12608b648436SThomas Petazzoni 		if (!mv_chan)
12618b648436SThomas Petazzoni 			continue;
12628b648436SThomas Petazzoni 
12638b648436SThomas Petazzoni 		writel_relaxed(mv_chan->saved_config_reg,
12648b648436SThomas Petazzoni 			       XOR_CONFIG(mv_chan));
12658b648436SThomas Petazzoni 		writel_relaxed(mv_chan->saved_int_mask_reg,
12668b648436SThomas Petazzoni 			       XOR_INTR_MASK(mv_chan));
12678b648436SThomas Petazzoni 	}
12688b648436SThomas Petazzoni 
1269ac5f0f3fSMarcin Wojtas 	if (xordev->xor_type == XOR_ARMADA_37XX) {
1270ac5f0f3fSMarcin Wojtas 		mv_xor_conf_mbus_windows_a3700(xordev);
1271ac5f0f3fSMarcin Wojtas 		return 0;
1272ac5f0f3fSMarcin Wojtas 	}
1273ac5f0f3fSMarcin Wojtas 
12748b648436SThomas Petazzoni 	dram = mv_mbus_dram_info();
12758b648436SThomas Petazzoni 	if (dram)
12768b648436SThomas Petazzoni 		mv_xor_conf_mbus_windows(xordev, dram);
12778b648436SThomas Petazzoni 
12788b648436SThomas Petazzoni 	return 0;
12798b648436SThomas Petazzoni }
12808b648436SThomas Petazzoni 
12816f166312SLior Amsalem static const struct of_device_id mv_xor_dt_ids[] = {
1282dd130c65SGregory CLEMENT 	{ .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
1283dd130c65SGregory CLEMENT 	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
1284ac5f0f3fSMarcin Wojtas 	{ .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
12856f166312SLior Amsalem 	{},
12866f166312SLior Amsalem };
12876f166312SLior Amsalem 
128877757291SThomas Petazzoni static unsigned int mv_xor_engine_count;
1289ff7b0479SSaeed Bishara 
mv_xor_probe(struct platform_device * pdev)1290c2714334SLinus Torvalds static int mv_xor_probe(struct platform_device *pdev)
1291ff7b0479SSaeed Bishara {
129263a9332bSAndrew Lunn 	const struct mbus_dram_target_info *dram;
1293297eedbaSThomas Petazzoni 	struct mv_xor_device *xordev;
1294d4adcc01SJingoo Han 	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1295ff7b0479SSaeed Bishara 	struct resource *res;
129677757291SThomas Petazzoni 	unsigned int max_engines, max_channels;
129760d151f3SThomas Petazzoni 	int i, ret;
1298ff7b0479SSaeed Bishara 
12991ba151cdSJoe Perches 	dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1300ff7b0479SSaeed Bishara 
1301297eedbaSThomas Petazzoni 	xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1302297eedbaSThomas Petazzoni 	if (!xordev)
1303ff7b0479SSaeed Bishara 		return -ENOMEM;
1304ff7b0479SSaeed Bishara 
1305ff7b0479SSaeed Bishara 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1306ff7b0479SSaeed Bishara 	if (!res)
1307ff7b0479SSaeed Bishara 		return -ENODEV;
1308ff7b0479SSaeed Bishara 
1309297eedbaSThomas Petazzoni 	xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
13104de1ba15SH Hartley Sweeten 					resource_size(res));
1311297eedbaSThomas Petazzoni 	if (!xordev->xor_base)
1312ff7b0479SSaeed Bishara 		return -EBUSY;
1313ff7b0479SSaeed Bishara 
1314ff7b0479SSaeed Bishara 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1315ff7b0479SSaeed Bishara 	if (!res)
1316ff7b0479SSaeed Bishara 		return -ENODEV;
1317ff7b0479SSaeed Bishara 
1318297eedbaSThomas Petazzoni 	xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
13194de1ba15SH Hartley Sweeten 					     resource_size(res));
1320297eedbaSThomas Petazzoni 	if (!xordev->xor_high_base)
1321ff7b0479SSaeed Bishara 		return -EBUSY;
1322ff7b0479SSaeed Bishara 
1323297eedbaSThomas Petazzoni 	platform_set_drvdata(pdev, xordev);
1324ff7b0479SSaeed Bishara 
1325dd130c65SGregory CLEMENT 
1326dd130c65SGregory CLEMENT 	/*
1327dd130c65SGregory CLEMENT 	 * We need to know which type of XOR device we use before
1328dd130c65SGregory CLEMENT 	 * setting up. In non-dt case it can only be the legacy one.
1329dd130c65SGregory CLEMENT 	 */
1330dd130c65SGregory CLEMENT 	xordev->xor_type = XOR_ORION;
1331dd130c65SGregory CLEMENT 	if (pdev->dev.of_node) {
1332dd130c65SGregory CLEMENT 		const struct of_device_id *of_id =
1333dd130c65SGregory CLEMENT 			of_match_device(mv_xor_dt_ids,
1334dd130c65SGregory CLEMENT 					&pdev->dev);
1335dd130c65SGregory CLEMENT 
1336dd130c65SGregory CLEMENT 		xordev->xor_type = (uintptr_t)of_id->data;
1337dd130c65SGregory CLEMENT 	}
1338dd130c65SGregory CLEMENT 
1339ff7b0479SSaeed Bishara 	/*
1340ff7b0479SSaeed Bishara 	 * (Re-)program MBUS remapping windows if we are asked to.
1341ff7b0479SSaeed Bishara 	 */
1342ac5f0f3fSMarcin Wojtas 	if (xordev->xor_type == XOR_ARMADA_37XX) {
1343ac5f0f3fSMarcin Wojtas 		mv_xor_conf_mbus_windows_a3700(xordev);
1344ac5f0f3fSMarcin Wojtas 	} else {
134563a9332bSAndrew Lunn 		dram = mv_mbus_dram_info();
134663a9332bSAndrew Lunn 		if (dram)
1347297eedbaSThomas Petazzoni 			mv_xor_conf_mbus_windows(xordev, dram);
1348ac5f0f3fSMarcin Wojtas 	}
1349ff7b0479SSaeed Bishara 
1350c510182bSAndrew Lunn 	/* Not all platforms can gate the clock, so it is not
1351c510182bSAndrew Lunn 	 * an error if the clock does not exists.
1352c510182bSAndrew Lunn 	 */
1353297eedbaSThomas Petazzoni 	xordev->clk = clk_get(&pdev->dev, NULL);
1354297eedbaSThomas Petazzoni 	if (!IS_ERR(xordev->clk))
1355297eedbaSThomas Petazzoni 		clk_prepare_enable(xordev->clk);
1356c510182bSAndrew Lunn 
135777757291SThomas Petazzoni 	/*
135877757291SThomas Petazzoni 	 * We don't want to have more than one channel per CPU in
135977757291SThomas Petazzoni 	 * order for async_tx to perform well. So we limit the number
136077757291SThomas Petazzoni 	 * of engines and channels so that we take into account this
136177757291SThomas Petazzoni 	 * constraint. Note that we also want to use channels from
1362ac5f0f3fSMarcin Wojtas 	 * separate engines when possible.  For dual-CPU Armada 3700
1363ac5f0f3fSMarcin Wojtas 	 * SoC with single XOR engine allow using its both channels.
136477757291SThomas Petazzoni 	 */
136577757291SThomas Petazzoni 	max_engines = num_present_cpus();
1366ac5f0f3fSMarcin Wojtas 	if (xordev->xor_type == XOR_ARMADA_37XX)
1367ac5f0f3fSMarcin Wojtas 		max_channels =	num_present_cpus();
1368ac5f0f3fSMarcin Wojtas 	else
136977757291SThomas Petazzoni 		max_channels = min_t(unsigned int,
137077757291SThomas Petazzoni 				     MV_XOR_MAX_CHANNELS,
137177757291SThomas Petazzoni 				     DIV_ROUND_UP(num_present_cpus(), 2));
137277757291SThomas Petazzoni 
137377757291SThomas Petazzoni 	if (mv_xor_engine_count >= max_engines)
137477757291SThomas Petazzoni 		return 0;
137577757291SThomas Petazzoni 
1376f7d12ef5SThomas Petazzoni 	if (pdev->dev.of_node) {
1377f7d12ef5SThomas Petazzoni 		struct device_node *np;
1378f7d12ef5SThomas Petazzoni 		int i = 0;
1379f7d12ef5SThomas Petazzoni 
1380f7d12ef5SThomas Petazzoni 		for_each_child_of_node(pdev->dev.of_node, np) {
13810be8253fSRussell King 			struct mv_xor_chan *chan;
1382f7d12ef5SThomas Petazzoni 			dma_cap_mask_t cap_mask;
1383f7d12ef5SThomas Petazzoni 			int irq;
1384f7d12ef5SThomas Petazzoni 
138577757291SThomas Petazzoni 			if (i >= max_channels)
138677757291SThomas Petazzoni 				continue;
138777757291SThomas Petazzoni 
1388f7d12ef5SThomas Petazzoni 			dma_cap_zero(cap_mask);
1389f7d12ef5SThomas Petazzoni 			dma_cap_set(DMA_MEMCPY, cap_mask);
1390f7d12ef5SThomas Petazzoni 			dma_cap_set(DMA_XOR, cap_mask);
1391f7d12ef5SThomas Petazzoni 			dma_cap_set(DMA_INTERRUPT, cap_mask);
1392f7d12ef5SThomas Petazzoni 
1393f7d12ef5SThomas Petazzoni 			irq = irq_of_parse_and_map(np, 0);
1394f8eb9e7dSThomas Petazzoni 			if (!irq) {
1395f8eb9e7dSThomas Petazzoni 				ret = -ENODEV;
1396f7d12ef5SThomas Petazzoni 				goto err_channel_add;
1397f7d12ef5SThomas Petazzoni 			}
1398f7d12ef5SThomas Petazzoni 
13990be8253fSRussell King 			chan = mv_xor_channel_add(xordev, pdev, i,
1400dd130c65SGregory CLEMENT 						  cap_mask, irq);
14010be8253fSRussell King 			if (IS_ERR(chan)) {
14020be8253fSRussell King 				ret = PTR_ERR(chan);
1403f7d12ef5SThomas Petazzoni 				irq_dispose_mapping(irq);
1404f7d12ef5SThomas Petazzoni 				goto err_channel_add;
1405f7d12ef5SThomas Petazzoni 			}
1406f7d12ef5SThomas Petazzoni 
14070be8253fSRussell King 			xordev->channels[i] = chan;
1408f7d12ef5SThomas Petazzoni 			i++;
1409f7d12ef5SThomas Petazzoni 		}
1410f7d12ef5SThomas Petazzoni 	} else if (pdata && pdata->channels) {
141177757291SThomas Petazzoni 		for (i = 0; i < max_channels; i++) {
1412e39f6ec1SThomas Petazzoni 			struct mv_xor_channel_data *cd;
14130be8253fSRussell King 			struct mv_xor_chan *chan;
141460d151f3SThomas Petazzoni 			int irq;
141560d151f3SThomas Petazzoni 
141660d151f3SThomas Petazzoni 			cd = &pdata->channels[i];
141760d151f3SThomas Petazzoni 			irq = platform_get_irq(pdev, i);
141860d151f3SThomas Petazzoni 			if (irq < 0) {
141960d151f3SThomas Petazzoni 				ret = irq;
142060d151f3SThomas Petazzoni 				goto err_channel_add;
142160d151f3SThomas Petazzoni 			}
142260d151f3SThomas Petazzoni 
14230be8253fSRussell King 			chan = mv_xor_channel_add(xordev, pdev, i,
1424dd130c65SGregory CLEMENT 						  cd->cap_mask, irq);
14250be8253fSRussell King 			if (IS_ERR(chan)) {
14260be8253fSRussell King 				ret = PTR_ERR(chan);
142760d151f3SThomas Petazzoni 				goto err_channel_add;
142860d151f3SThomas Petazzoni 			}
14290be8253fSRussell King 
14300be8253fSRussell King 			xordev->channels[i] = chan;
143160d151f3SThomas Petazzoni 		}
143260d151f3SThomas Petazzoni 	}
143360d151f3SThomas Petazzoni 
1434ff7b0479SSaeed Bishara 	return 0;
143560d151f3SThomas Petazzoni 
143660d151f3SThomas Petazzoni err_channel_add:
143760d151f3SThomas Petazzoni 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1438f7d12ef5SThomas Petazzoni 		if (xordev->channels[i]) {
1439ab6e439fSThomas Petazzoni 			mv_xor_channel_remove(xordev->channels[i]);
1440f7d12ef5SThomas Petazzoni 			if (pdev->dev.of_node)
1441f7d12ef5SThomas Petazzoni 				irq_dispose_mapping(xordev->channels[i]->irq);
1442f7d12ef5SThomas Petazzoni 		}
144360d151f3SThomas Petazzoni 
1444dab92064SThomas Petazzoni 	if (!IS_ERR(xordev->clk)) {
1445297eedbaSThomas Petazzoni 		clk_disable_unprepare(xordev->clk);
1446297eedbaSThomas Petazzoni 		clk_put(xordev->clk);
1447dab92064SThomas Petazzoni 	}
1448dab92064SThomas Petazzoni 
144960d151f3SThomas Petazzoni 	return ret;
1450ff7b0479SSaeed Bishara }
1451ff7b0479SSaeed Bishara 
1452ff7b0479SSaeed Bishara static struct platform_driver mv_xor_driver = {
1453ff7b0479SSaeed Bishara 	.probe		= mv_xor_probe,
14548b648436SThomas Petazzoni 	.suspend        = mv_xor_suspend,
14558b648436SThomas Petazzoni 	.resume         = mv_xor_resume,
1456ff7b0479SSaeed Bishara 	.driver		= {
1457ff7b0479SSaeed Bishara 		.name	        = MV_XOR_NAME,
1458*890bcd49SKrzysztof Kozlowski 		.of_match_table = mv_xor_dt_ids,
1459ff7b0479SSaeed Bishara 	},
1460ff7b0479SSaeed Bishara };
1461ff7b0479SSaeed Bishara 
1462812608d1SGeliang Tang builtin_platform_driver(mv_xor_driver);
1463ff7b0479SSaeed Bishara 
146425cf68daSPaul Gortmaker /*
1465ff7b0479SSaeed Bishara MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1466ff7b0479SSaeed Bishara MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1467ff7b0479SSaeed Bishara MODULE_LICENSE("GPL");
146825cf68daSPaul Gortmaker */
1469