xref: /openbmc/linux/drivers/dma/mv_xor.c (revision 812608d1)
1ff7b0479SSaeed Bishara /*
2ff7b0479SSaeed Bishara  * offload engine driver for the Marvell XOR engine
3ff7b0479SSaeed Bishara  * Copyright (C) 2007, 2008, Marvell International Ltd.
4ff7b0479SSaeed Bishara  *
5ff7b0479SSaeed Bishara  * This program is free software; you can redistribute it and/or modify it
6ff7b0479SSaeed Bishara  * under the terms and conditions of the GNU General Public License,
7ff7b0479SSaeed Bishara  * version 2, as published by the Free Software Foundation.
8ff7b0479SSaeed Bishara  *
9ff7b0479SSaeed Bishara  * This program is distributed in the hope it will be useful, but WITHOUT
10ff7b0479SSaeed Bishara  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11ff7b0479SSaeed Bishara  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12ff7b0479SSaeed Bishara  * more details.
13ff7b0479SSaeed Bishara  */
14ff7b0479SSaeed Bishara 
15ff7b0479SSaeed Bishara #include <linux/init.h>
165a0e3ad6STejun Heo #include <linux/slab.h>
17ff7b0479SSaeed Bishara #include <linux/delay.h>
18ff7b0479SSaeed Bishara #include <linux/dma-mapping.h>
19ff7b0479SSaeed Bishara #include <linux/spinlock.h>
20ff7b0479SSaeed Bishara #include <linux/interrupt.h>
216f166312SLior Amsalem #include <linux/of_device.h>
22ff7b0479SSaeed Bishara #include <linux/platform_device.h>
23ff7b0479SSaeed Bishara #include <linux/memory.h>
24c510182bSAndrew Lunn #include <linux/clk.h>
25f7d12ef5SThomas Petazzoni #include <linux/of.h>
26f7d12ef5SThomas Petazzoni #include <linux/of_irq.h>
27f7d12ef5SThomas Petazzoni #include <linux/irqdomain.h>
2877757291SThomas Petazzoni #include <linux/cpumask.h>
29c02cecb9SArnd Bergmann #include <linux/platform_data/dma-mv_xor.h>
30d2ebfb33SRussell King - ARM Linux 
31d2ebfb33SRussell King - ARM Linux #include "dmaengine.h"
32ff7b0479SSaeed Bishara #include "mv_xor.h"
33ff7b0479SSaeed Bishara 
34dd130c65SGregory CLEMENT enum mv_xor_type {
35dd130c65SGregory CLEMENT 	XOR_ORION,
36dd130c65SGregory CLEMENT 	XOR_ARMADA_38X,
37ac5f0f3fSMarcin Wojtas 	XOR_ARMADA_37XX,
38dd130c65SGregory CLEMENT };
39dd130c65SGregory CLEMENT 
406f166312SLior Amsalem enum mv_xor_mode {
416f166312SLior Amsalem 	XOR_MODE_IN_REG,
426f166312SLior Amsalem 	XOR_MODE_IN_DESC,
436f166312SLior Amsalem };
446f166312SLior Amsalem 
45ff7b0479SSaeed Bishara static void mv_xor_issue_pending(struct dma_chan *chan);
46ff7b0479SSaeed Bishara 
47ff7b0479SSaeed Bishara #define to_mv_xor_chan(chan)		\
4898817b99SThomas Petazzoni 	container_of(chan, struct mv_xor_chan, dmachan)
49ff7b0479SSaeed Bishara 
50ff7b0479SSaeed Bishara #define to_mv_xor_slot(tx)		\
51ff7b0479SSaeed Bishara 	container_of(tx, struct mv_xor_desc_slot, async_tx)
52ff7b0479SSaeed Bishara 
53c98c1781SThomas Petazzoni #define mv_chan_to_devp(chan)           \
541ef48a26SThomas Petazzoni 	((chan)->dmadev.dev)
55c98c1781SThomas Petazzoni 
56dfc97661SLior Amsalem static void mv_desc_init(struct mv_xor_desc_slot *desc,
57ba87d137SLior Amsalem 			 dma_addr_t addr, u32 byte_count,
58ba87d137SLior Amsalem 			 enum dma_ctrl_flags flags)
59ff7b0479SSaeed Bishara {
60ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
61ff7b0479SSaeed Bishara 
620e7488edSEzequiel Garcia 	hw_desc->status = XOR_DESC_DMA_OWNED;
63ff7b0479SSaeed Bishara 	hw_desc->phy_next_desc = 0;
64ba87d137SLior Amsalem 	/* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
65ba87d137SLior Amsalem 	hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
66ba87d137SLior Amsalem 				XOR_DESC_EOD_INT_EN : 0;
67dfc97661SLior Amsalem 	hw_desc->phy_dest_addr = addr;
68ff7b0479SSaeed Bishara 	hw_desc->byte_count = byte_count;
69ff7b0479SSaeed Bishara }
70ff7b0479SSaeed Bishara 
71c5db858bSStefan Roese /* Populate the descriptor */
72c5db858bSStefan Roese static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc,
73c5db858bSStefan Roese 				     dma_addr_t dma_src, dma_addr_t dma_dst,
74c5db858bSStefan Roese 				     u32 len, struct mv_xor_desc_slot *prev)
75c5db858bSStefan Roese {
76c5db858bSStefan Roese 	struct mv_xor_desc *hw_desc = desc->hw_desc;
77c5db858bSStefan Roese 
78c5db858bSStefan Roese 	hw_desc->status = XOR_DESC_DMA_OWNED;
79c5db858bSStefan Roese 	hw_desc->phy_next_desc = 0;
80c5db858bSStefan Roese 	/* Configure for XOR with only one src address -> MEMCPY */
81c5db858bSStefan Roese 	hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0);
82c5db858bSStefan Roese 	hw_desc->phy_dest_addr = dma_dst;
83c5db858bSStefan Roese 	hw_desc->phy_src_addr[0] = dma_src;
84c5db858bSStefan Roese 	hw_desc->byte_count = len;
85c5db858bSStefan Roese 
86c5db858bSStefan Roese 	if (prev) {
87c5db858bSStefan Roese 		struct mv_xor_desc *hw_prev = prev->hw_desc;
88c5db858bSStefan Roese 
89c5db858bSStefan Roese 		hw_prev->phy_next_desc = desc->async_tx.phys;
90c5db858bSStefan Roese 	}
91c5db858bSStefan Roese }
92c5db858bSStefan Roese 
93c5db858bSStefan Roese static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc)
94c5db858bSStefan Roese {
95c5db858bSStefan Roese 	struct mv_xor_desc *hw_desc = desc->hw_desc;
96c5db858bSStefan Roese 
97c5db858bSStefan Roese 	/* Enable end-of-descriptor interrupt */
98c5db858bSStefan Roese 	hw_desc->desc_command |= XOR_DESC_EOD_INT_EN;
99c5db858bSStefan Roese }
100c5db858bSStefan Roese 
1016f166312SLior Amsalem static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
1026f166312SLior Amsalem {
1036f166312SLior Amsalem 	struct mv_xor_desc *hw_desc = desc->hw_desc;
1046f166312SLior Amsalem 
1056f166312SLior Amsalem 	switch (desc->type) {
1066f166312SLior Amsalem 	case DMA_XOR:
1076f166312SLior Amsalem 	case DMA_INTERRUPT:
1086f166312SLior Amsalem 		hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
1096f166312SLior Amsalem 		break;
1106f166312SLior Amsalem 	case DMA_MEMCPY:
1116f166312SLior Amsalem 		hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
1126f166312SLior Amsalem 		break;
1136f166312SLior Amsalem 	default:
1146f166312SLior Amsalem 		BUG();
1156f166312SLior Amsalem 		return;
1166f166312SLior Amsalem 	}
1176f166312SLior Amsalem }
1186f166312SLior Amsalem 
119ff7b0479SSaeed Bishara static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
120ff7b0479SSaeed Bishara 				  u32 next_desc_addr)
121ff7b0479SSaeed Bishara {
122ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
123ff7b0479SSaeed Bishara 	BUG_ON(hw_desc->phy_next_desc);
124ff7b0479SSaeed Bishara 	hw_desc->phy_next_desc = next_desc_addr;
125ff7b0479SSaeed Bishara }
126ff7b0479SSaeed Bishara 
127ff7b0479SSaeed Bishara static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
128ff7b0479SSaeed Bishara 				 int index, dma_addr_t addr)
129ff7b0479SSaeed Bishara {
130ff7b0479SSaeed Bishara 	struct mv_xor_desc *hw_desc = desc->hw_desc;
131e03bc654SThomas Petazzoni 	hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
132ff7b0479SSaeed Bishara 	if (desc->type == DMA_XOR)
133ff7b0479SSaeed Bishara 		hw_desc->desc_command |= (1 << index);
134ff7b0479SSaeed Bishara }
135ff7b0479SSaeed Bishara 
136ff7b0479SSaeed Bishara static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
137ff7b0479SSaeed Bishara {
1385733c38aSThomas Petazzoni 	return readl_relaxed(XOR_CURR_DESC(chan));
139ff7b0479SSaeed Bishara }
140ff7b0479SSaeed Bishara 
141ff7b0479SSaeed Bishara static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
142ff7b0479SSaeed Bishara 					u32 next_desc_addr)
143ff7b0479SSaeed Bishara {
1445733c38aSThomas Petazzoni 	writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
145ff7b0479SSaeed Bishara }
146ff7b0479SSaeed Bishara 
147ff7b0479SSaeed Bishara static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
148ff7b0479SSaeed Bishara {
1495733c38aSThomas Petazzoni 	u32 val = readl_relaxed(XOR_INTR_MASK(chan));
150ff7b0479SSaeed Bishara 	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
1515733c38aSThomas Petazzoni 	writel_relaxed(val, XOR_INTR_MASK(chan));
152ff7b0479SSaeed Bishara }
153ff7b0479SSaeed Bishara 
154ff7b0479SSaeed Bishara static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
155ff7b0479SSaeed Bishara {
1565733c38aSThomas Petazzoni 	u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
157ff7b0479SSaeed Bishara 	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
158ff7b0479SSaeed Bishara 	return intr_cause;
159ff7b0479SSaeed Bishara }
160ff7b0479SSaeed Bishara 
1610951e728SMaxime Ripard static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
162ff7b0479SSaeed Bishara {
163ba87d137SLior Amsalem 	u32 val;
164ba87d137SLior Amsalem 
165ba87d137SLior Amsalem 	val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
166ba87d137SLior Amsalem 	val = ~(val << (chan->idx * 16));
167c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
1685733c38aSThomas Petazzoni 	writel_relaxed(val, XOR_INTR_CAUSE(chan));
169ff7b0479SSaeed Bishara }
170ff7b0479SSaeed Bishara 
1710951e728SMaxime Ripard static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
172ff7b0479SSaeed Bishara {
173ff7b0479SSaeed Bishara 	u32 val = 0xFFFF0000 >> (chan->idx * 16);
1745733c38aSThomas Petazzoni 	writel_relaxed(val, XOR_INTR_CAUSE(chan));
175ff7b0479SSaeed Bishara }
176ff7b0479SSaeed Bishara 
1770951e728SMaxime Ripard static void mv_chan_set_mode(struct mv_xor_chan *chan,
17881aafb3eSThomas Petazzoni 			     u32 op_mode)
179ff7b0479SSaeed Bishara {
1805733c38aSThomas Petazzoni 	u32 config = readl_relaxed(XOR_CONFIG(chan));
181ff7b0479SSaeed Bishara 
1826f166312SLior Amsalem 	config &= ~0x7;
1836f166312SLior Amsalem 	config |= op_mode;
1846f166312SLior Amsalem 
185e03bc654SThomas Petazzoni #if defined(__BIG_ENDIAN)
186e03bc654SThomas Petazzoni 	config |= XOR_DESCRIPTOR_SWAP;
187e03bc654SThomas Petazzoni #else
188e03bc654SThomas Petazzoni 	config &= ~XOR_DESCRIPTOR_SWAP;
189e03bc654SThomas Petazzoni #endif
190e03bc654SThomas Petazzoni 
1915733c38aSThomas Petazzoni 	writel_relaxed(config, XOR_CONFIG(chan));
192ff7b0479SSaeed Bishara }
193ff7b0479SSaeed Bishara 
194ff7b0479SSaeed Bishara static void mv_chan_activate(struct mv_xor_chan *chan)
195ff7b0479SSaeed Bishara {
196c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
1975a9a55bfSEzequiel Garcia 
1985a9a55bfSEzequiel Garcia 	/* writel ensures all descriptors are flushed before activation */
1995a9a55bfSEzequiel Garcia 	writel(BIT(0), XOR_ACTIVATION(chan));
200ff7b0479SSaeed Bishara }
201ff7b0479SSaeed Bishara 
202ff7b0479SSaeed Bishara static char mv_chan_is_busy(struct mv_xor_chan *chan)
203ff7b0479SSaeed Bishara {
2045733c38aSThomas Petazzoni 	u32 state = readl_relaxed(XOR_ACTIVATION(chan));
205ff7b0479SSaeed Bishara 
206ff7b0479SSaeed Bishara 	state = (state >> 4) & 0x3;
207ff7b0479SSaeed Bishara 
208ff7b0479SSaeed Bishara 	return (state == 1) ? 1 : 0;
209ff7b0479SSaeed Bishara }
210ff7b0479SSaeed Bishara 
211ff7b0479SSaeed Bishara /*
2120951e728SMaxime Ripard  * mv_chan_start_new_chain - program the engine to operate on new
2130951e728SMaxime Ripard  * chain headed by sw_desc
214ff7b0479SSaeed Bishara  * Caller must hold &mv_chan->lock while calling this function
215ff7b0479SSaeed Bishara  */
2160951e728SMaxime Ripard static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
217ff7b0479SSaeed Bishara 				    struct mv_xor_desc_slot *sw_desc)
218ff7b0479SSaeed Bishara {
219c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
220ff7b0479SSaeed Bishara 		__func__, __LINE__, sw_desc);
221ff7b0479SSaeed Bishara 
222ff7b0479SSaeed Bishara 	/* set the hardware chain */
223ff7b0479SSaeed Bishara 	mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
22448a9db46SBartlomiej Zolnierkiewicz 
225dfc97661SLior Amsalem 	mv_chan->pending++;
22698817b99SThomas Petazzoni 	mv_xor_issue_pending(&mv_chan->dmachan);
227ff7b0479SSaeed Bishara }
228ff7b0479SSaeed Bishara 
229ff7b0479SSaeed Bishara static dma_cookie_t
2300951e728SMaxime Ripard mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
2310951e728SMaxime Ripard 				struct mv_xor_chan *mv_chan,
2320951e728SMaxime Ripard 				dma_cookie_t cookie)
233ff7b0479SSaeed Bishara {
234ff7b0479SSaeed Bishara 	BUG_ON(desc->async_tx.cookie < 0);
235ff7b0479SSaeed Bishara 
236ff7b0479SSaeed Bishara 	if (desc->async_tx.cookie > 0) {
237ff7b0479SSaeed Bishara 		cookie = desc->async_tx.cookie;
238ff7b0479SSaeed Bishara 
2398058e258SDave Jiang 		dma_descriptor_unmap(&desc->async_tx);
240ff7b0479SSaeed Bishara 		/* call the callback (must not sleep or submit new
241ff7b0479SSaeed Bishara 		 * operations to this channel)
242ff7b0479SSaeed Bishara 		 */
243ee7681a4SDave Jiang 		dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
244ff7b0479SSaeed Bishara 	}
245ff7b0479SSaeed Bishara 
246ff7b0479SSaeed Bishara 	/* run dependent operations */
24707f2211eSDan Williams 	dma_run_dependencies(&desc->async_tx);
248ff7b0479SSaeed Bishara 
249ff7b0479SSaeed Bishara 	return cookie;
250ff7b0479SSaeed Bishara }
251ff7b0479SSaeed Bishara 
252ff7b0479SSaeed Bishara static int
2530951e728SMaxime Ripard mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
254ff7b0479SSaeed Bishara {
255ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
256ff7b0479SSaeed Bishara 
257c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
258ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
259fbea28a2SLior Amsalem 				 node) {
260ff7b0479SSaeed Bishara 
261c5db858bSStefan Roese 		if (async_tx_test_ack(&iter->async_tx)) {
262fbea28a2SLior Amsalem 			list_move_tail(&iter->node, &mv_chan->free_slots);
263c5db858bSStefan Roese 			if (!list_empty(&iter->sg_tx_list)) {
264c5db858bSStefan Roese 				list_splice_tail_init(&iter->sg_tx_list,
265c5db858bSStefan Roese 							&mv_chan->free_slots);
266c5db858bSStefan Roese 			}
267c5db858bSStefan Roese 		}
268ff7b0479SSaeed Bishara 	}
269ff7b0479SSaeed Bishara 	return 0;
270ff7b0479SSaeed Bishara }
271ff7b0479SSaeed Bishara 
272ff7b0479SSaeed Bishara static int
2730951e728SMaxime Ripard mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
274ff7b0479SSaeed Bishara 		   struct mv_xor_chan *mv_chan)
275ff7b0479SSaeed Bishara {
276c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
277ff7b0479SSaeed Bishara 		__func__, __LINE__, desc, desc->async_tx.flags);
278fbea28a2SLior Amsalem 
279ff7b0479SSaeed Bishara 	/* the client is allowed to attach dependent operations
280ff7b0479SSaeed Bishara 	 * until 'ack' is set
281ff7b0479SSaeed Bishara 	 */
282c5db858bSStefan Roese 	if (!async_tx_test_ack(&desc->async_tx)) {
283ff7b0479SSaeed Bishara 		/* move this slot to the completed_slots */
284fbea28a2SLior Amsalem 		list_move_tail(&desc->node, &mv_chan->completed_slots);
285c5db858bSStefan Roese 		if (!list_empty(&desc->sg_tx_list)) {
286c5db858bSStefan Roese 			list_splice_tail_init(&desc->sg_tx_list,
287c5db858bSStefan Roese 					      &mv_chan->completed_slots);
288c5db858bSStefan Roese 		}
289c5db858bSStefan Roese 	} else {
290fbea28a2SLior Amsalem 		list_move_tail(&desc->node, &mv_chan->free_slots);
291c5db858bSStefan Roese 		if (!list_empty(&desc->sg_tx_list)) {
292c5db858bSStefan Roese 			list_splice_tail_init(&desc->sg_tx_list,
293c5db858bSStefan Roese 					      &mv_chan->free_slots);
294c5db858bSStefan Roese 		}
295c5db858bSStefan Roese 	}
296ff7b0479SSaeed Bishara 
297ff7b0479SSaeed Bishara 	return 0;
298ff7b0479SSaeed Bishara }
299ff7b0479SSaeed Bishara 
300fbeec99aSEzequiel Garcia /* This function must be called with the mv_xor_chan spinlock held */
3010951e728SMaxime Ripard static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
302ff7b0479SSaeed Bishara {
303ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
304ff7b0479SSaeed Bishara 	dma_cookie_t cookie = 0;
305ff7b0479SSaeed Bishara 	int busy = mv_chan_is_busy(mv_chan);
306ff7b0479SSaeed Bishara 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
3079136291fSLior Amsalem 	int current_cleaned = 0;
3089136291fSLior Amsalem 	struct mv_xor_desc *hw_desc;
309ff7b0479SSaeed Bishara 
310c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
311c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
3120951e728SMaxime Ripard 	mv_chan_clean_completed_slots(mv_chan);
313ff7b0479SSaeed Bishara 
314ff7b0479SSaeed Bishara 	/* free completed slots from the chain starting with
315ff7b0479SSaeed Bishara 	 * the oldest descriptor
316ff7b0479SSaeed Bishara 	 */
317ff7b0479SSaeed Bishara 
318ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
319fbea28a2SLior Amsalem 				 node) {
320ff7b0479SSaeed Bishara 
3219136291fSLior Amsalem 		/* clean finished descriptors */
3229136291fSLior Amsalem 		hw_desc = iter->hw_desc;
3239136291fSLior Amsalem 		if (hw_desc->status & XOR_DESC_SUCCESS) {
3240951e728SMaxime Ripard 			cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
3259136291fSLior Amsalem 								 cookie);
326ff7b0479SSaeed Bishara 
3279136291fSLior Amsalem 			/* done processing desc, clean slot */
3280951e728SMaxime Ripard 			mv_desc_clean_slot(iter, mv_chan);
3299136291fSLior Amsalem 
3309136291fSLior Amsalem 			/* break if we did cleaned the current */
331ff7b0479SSaeed Bishara 			if (iter->async_tx.phys == current_desc) {
3329136291fSLior Amsalem 				current_cleaned = 1;
333ff7b0479SSaeed Bishara 				break;
334ff7b0479SSaeed Bishara 			}
3359136291fSLior Amsalem 		} else {
3369136291fSLior Amsalem 			if (iter->async_tx.phys == current_desc) {
3379136291fSLior Amsalem 				current_cleaned = 0;
338ff7b0479SSaeed Bishara 				break;
339ff7b0479SSaeed Bishara 			}
3409136291fSLior Amsalem 		}
3419136291fSLior Amsalem 	}
342ff7b0479SSaeed Bishara 
343ff7b0479SSaeed Bishara 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
3449136291fSLior Amsalem 		if (current_cleaned) {
3459136291fSLior Amsalem 			/*
3469136291fSLior Amsalem 			 * current descriptor cleaned and removed, run
3479136291fSLior Amsalem 			 * from list head
3489136291fSLior Amsalem 			 */
3499136291fSLior Amsalem 			iter = list_entry(mv_chan->chain.next,
350ff7b0479SSaeed Bishara 					  struct mv_xor_desc_slot,
351fbea28a2SLior Amsalem 					  node);
3520951e728SMaxime Ripard 			mv_chan_start_new_chain(mv_chan, iter);
3539136291fSLior Amsalem 		} else {
354fbea28a2SLior Amsalem 			if (!list_is_last(&iter->node, &mv_chan->chain)) {
3559136291fSLior Amsalem 				/*
3569136291fSLior Amsalem 				 * descriptors are still waiting after
3579136291fSLior Amsalem 				 * current, trigger them
3589136291fSLior Amsalem 				 */
359fbea28a2SLior Amsalem 				iter = list_entry(iter->node.next,
3609136291fSLior Amsalem 						  struct mv_xor_desc_slot,
361fbea28a2SLior Amsalem 						  node);
3620951e728SMaxime Ripard 				mv_chan_start_new_chain(mv_chan, iter);
3639136291fSLior Amsalem 			} else {
3649136291fSLior Amsalem 				/*
3659136291fSLior Amsalem 				 * some descriptors are still waiting
3669136291fSLior Amsalem 				 * to be cleaned
3679136291fSLior Amsalem 				 */
3689136291fSLior Amsalem 				tasklet_schedule(&mv_chan->irq_tasklet);
3699136291fSLior Amsalem 			}
3709136291fSLior Amsalem 		}
371ff7b0479SSaeed Bishara 	}
372ff7b0479SSaeed Bishara 
373ff7b0479SSaeed Bishara 	if (cookie > 0)
37498817b99SThomas Petazzoni 		mv_chan->dmachan.completed_cookie = cookie;
375ff7b0479SSaeed Bishara }
376ff7b0479SSaeed Bishara 
377ff7b0479SSaeed Bishara static void mv_xor_tasklet(unsigned long data)
378ff7b0479SSaeed Bishara {
379ff7b0479SSaeed Bishara 	struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
380e43147acSEzequiel Garcia 
381e43147acSEzequiel Garcia 	spin_lock_bh(&chan->lock);
3820951e728SMaxime Ripard 	mv_chan_slot_cleanup(chan);
383e43147acSEzequiel Garcia 	spin_unlock_bh(&chan->lock);
384ff7b0479SSaeed Bishara }
385ff7b0479SSaeed Bishara 
386ff7b0479SSaeed Bishara static struct mv_xor_desc_slot *
3870951e728SMaxime Ripard mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
388ff7b0479SSaeed Bishara {
389fbea28a2SLior Amsalem 	struct mv_xor_desc_slot *iter;
390ff7b0479SSaeed Bishara 
391fbea28a2SLior Amsalem 	spin_lock_bh(&mv_chan->lock);
392fbea28a2SLior Amsalem 
393fbea28a2SLior Amsalem 	if (!list_empty(&mv_chan->free_slots)) {
394fbea28a2SLior Amsalem 		iter = list_first_entry(&mv_chan->free_slots,
395ff7b0479SSaeed Bishara 					struct mv_xor_desc_slot,
396fbea28a2SLior Amsalem 					node);
397ff7b0479SSaeed Bishara 
398fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->allocated_slots);
399dfc97661SLior Amsalem 
400fbea28a2SLior Amsalem 		spin_unlock_bh(&mv_chan->lock);
401ff7b0479SSaeed Bishara 
402dfc97661SLior Amsalem 		/* pre-ack descriptor */
403ff7b0479SSaeed Bishara 		async_tx_ack(&iter->async_tx);
404dfc97661SLior Amsalem 		iter->async_tx.cookie = -EBUSY;
405dfc97661SLior Amsalem 
406dfc97661SLior Amsalem 		return iter;
407dfc97661SLior Amsalem 
408ff7b0479SSaeed Bishara 	}
409fbea28a2SLior Amsalem 
410fbea28a2SLior Amsalem 	spin_unlock_bh(&mv_chan->lock);
411ff7b0479SSaeed Bishara 
412ff7b0479SSaeed Bishara 	/* try to free some slots if the allocation fails */
413ff7b0479SSaeed Bishara 	tasklet_schedule(&mv_chan->irq_tasklet);
414ff7b0479SSaeed Bishara 
415ff7b0479SSaeed Bishara 	return NULL;
416ff7b0479SSaeed Bishara }
417ff7b0479SSaeed Bishara 
418ff7b0479SSaeed Bishara /************************ DMA engine API functions ****************************/
419ff7b0479SSaeed Bishara static dma_cookie_t
420ff7b0479SSaeed Bishara mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
421ff7b0479SSaeed Bishara {
422ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
423ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
424dfc97661SLior Amsalem 	struct mv_xor_desc_slot *old_chain_tail;
425ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
426ff7b0479SSaeed Bishara 	int new_hw_chain = 1;
427ff7b0479SSaeed Bishara 
428c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
429ff7b0479SSaeed Bishara 		"%s sw_desc %p: async_tx %p\n",
430ff7b0479SSaeed Bishara 		__func__, sw_desc, &sw_desc->async_tx);
431ff7b0479SSaeed Bishara 
432ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
433884485e1SRussell King - ARM Linux 	cookie = dma_cookie_assign(tx);
434ff7b0479SSaeed Bishara 
435ff7b0479SSaeed Bishara 	if (list_empty(&mv_chan->chain))
436fbea28a2SLior Amsalem 		list_move_tail(&sw_desc->node, &mv_chan->chain);
437ff7b0479SSaeed Bishara 	else {
438ff7b0479SSaeed Bishara 		new_hw_chain = 0;
439ff7b0479SSaeed Bishara 
440ff7b0479SSaeed Bishara 		old_chain_tail = list_entry(mv_chan->chain.prev,
441ff7b0479SSaeed Bishara 					    struct mv_xor_desc_slot,
442fbea28a2SLior Amsalem 					    node);
443fbea28a2SLior Amsalem 		list_move_tail(&sw_desc->node, &mv_chan->chain);
444ff7b0479SSaeed Bishara 
44531fd8f5bSOlof Johansson 		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
44631fd8f5bSOlof Johansson 			&old_chain_tail->async_tx.phys);
447ff7b0479SSaeed Bishara 
448ff7b0479SSaeed Bishara 		/* fix up the hardware chain */
449dfc97661SLior Amsalem 		mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
450ff7b0479SSaeed Bishara 
451ff7b0479SSaeed Bishara 		/* if the channel is not busy */
452ff7b0479SSaeed Bishara 		if (!mv_chan_is_busy(mv_chan)) {
453ff7b0479SSaeed Bishara 			u32 current_desc = mv_chan_get_current_desc(mv_chan);
454ff7b0479SSaeed Bishara 			/*
455ff7b0479SSaeed Bishara 			 * and the curren desc is the end of the chain before
456ff7b0479SSaeed Bishara 			 * the append, then we need to start the channel
457ff7b0479SSaeed Bishara 			 */
458ff7b0479SSaeed Bishara 			if (current_desc == old_chain_tail->async_tx.phys)
459ff7b0479SSaeed Bishara 				new_hw_chain = 1;
460ff7b0479SSaeed Bishara 		}
461ff7b0479SSaeed Bishara 	}
462ff7b0479SSaeed Bishara 
463ff7b0479SSaeed Bishara 	if (new_hw_chain)
4640951e728SMaxime Ripard 		mv_chan_start_new_chain(mv_chan, sw_desc);
465ff7b0479SSaeed Bishara 
466ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
467ff7b0479SSaeed Bishara 
468ff7b0479SSaeed Bishara 	return cookie;
469ff7b0479SSaeed Bishara }
470ff7b0479SSaeed Bishara 
471ff7b0479SSaeed Bishara /* returns the number of allocated descriptors */
472aa1e6f1aSDan Williams static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
473ff7b0479SSaeed Bishara {
47431fd8f5bSOlof Johansson 	void *virt_desc;
47531fd8f5bSOlof Johansson 	dma_addr_t dma_desc;
476ff7b0479SSaeed Bishara 	int idx;
477ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
478ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *slot = NULL;
479b503fa01SThomas Petazzoni 	int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
480ff7b0479SSaeed Bishara 
481ff7b0479SSaeed Bishara 	/* Allocate descriptor slots */
482ff7b0479SSaeed Bishara 	idx = mv_chan->slots_allocated;
483ff7b0479SSaeed Bishara 	while (idx < num_descs_in_pool) {
484ff7b0479SSaeed Bishara 		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
485ff7b0479SSaeed Bishara 		if (!slot) {
486b8291ddeSEzequiel Garcia 			dev_info(mv_chan_to_devp(mv_chan),
487b8291ddeSEzequiel Garcia 				 "channel only initialized %d descriptor slots",
488b8291ddeSEzequiel Garcia 				 idx);
489ff7b0479SSaeed Bishara 			break;
490ff7b0479SSaeed Bishara 		}
49131fd8f5bSOlof Johansson 		virt_desc = mv_chan->dma_desc_pool_virt;
49231fd8f5bSOlof Johansson 		slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
493ff7b0479SSaeed Bishara 
494ff7b0479SSaeed Bishara 		dma_async_tx_descriptor_init(&slot->async_tx, chan);
495ff7b0479SSaeed Bishara 		slot->async_tx.tx_submit = mv_xor_tx_submit;
496fbea28a2SLior Amsalem 		INIT_LIST_HEAD(&slot->node);
497c5db858bSStefan Roese 		INIT_LIST_HEAD(&slot->sg_tx_list);
49831fd8f5bSOlof Johansson 		dma_desc = mv_chan->dma_desc_pool;
49931fd8f5bSOlof Johansson 		slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
500ff7b0479SSaeed Bishara 		slot->idx = idx++;
501ff7b0479SSaeed Bishara 
502ff7b0479SSaeed Bishara 		spin_lock_bh(&mv_chan->lock);
503ff7b0479SSaeed Bishara 		mv_chan->slots_allocated = idx;
504fbea28a2SLior Amsalem 		list_add_tail(&slot->node, &mv_chan->free_slots);
505ff7b0479SSaeed Bishara 		spin_unlock_bh(&mv_chan->lock);
506ff7b0479SSaeed Bishara 	}
507ff7b0479SSaeed Bishara 
508c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
509fbea28a2SLior Amsalem 		"allocated %d descriptor slots\n",
510fbea28a2SLior Amsalem 		mv_chan->slots_allocated);
511ff7b0479SSaeed Bishara 
512ff7b0479SSaeed Bishara 	return mv_chan->slots_allocated ? : -ENOMEM;
513ff7b0479SSaeed Bishara }
514ff7b0479SSaeed Bishara 
51577ff7a70SStefan Roese /*
51677ff7a70SStefan Roese  * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
51777ff7a70SStefan Roese  * a new MBus window if necessary. Use a cache for these check so that
51877ff7a70SStefan Roese  * the MMIO mapped registers don't have to be accessed for this check
51977ff7a70SStefan Roese  * to speed up this process.
52077ff7a70SStefan Roese  */
52177ff7a70SStefan Roese static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
52277ff7a70SStefan Roese {
52377ff7a70SStefan Roese 	struct mv_xor_device *xordev = mv_chan->xordev;
52477ff7a70SStefan Roese 	void __iomem *base = mv_chan->mmr_high_base;
52577ff7a70SStefan Roese 	u32 win_enable;
52677ff7a70SStefan Roese 	u32 size;
52777ff7a70SStefan Roese 	u8 target, attr;
52877ff7a70SStefan Roese 	int ret;
52977ff7a70SStefan Roese 	int i;
53077ff7a70SStefan Roese 
53177ff7a70SStefan Roese 	/* Nothing needs to get done for the Armada 3700 */
53277ff7a70SStefan Roese 	if (xordev->xor_type == XOR_ARMADA_37XX)
53377ff7a70SStefan Roese 		return 0;
53477ff7a70SStefan Roese 
53577ff7a70SStefan Roese 	/*
53677ff7a70SStefan Roese 	 * Loop over the cached windows to check, if the requested area
53777ff7a70SStefan Roese 	 * is already mapped. If this the case, nothing needs to be done
53877ff7a70SStefan Roese 	 * and we can return.
53977ff7a70SStefan Roese 	 */
54077ff7a70SStefan Roese 	for (i = 0; i < WINDOW_COUNT; i++) {
54177ff7a70SStefan Roese 		if (addr >= xordev->win_start[i] &&
54277ff7a70SStefan Roese 		    addr <= xordev->win_end[i]) {
54377ff7a70SStefan Roese 			/* Window is already mapped */
54477ff7a70SStefan Roese 			return 0;
54577ff7a70SStefan Roese 		}
54677ff7a70SStefan Roese 	}
54777ff7a70SStefan Roese 
54877ff7a70SStefan Roese 	/*
54977ff7a70SStefan Roese 	 * The window is not mapped, so we need to create the new mapping
55077ff7a70SStefan Roese 	 */
55177ff7a70SStefan Roese 
55277ff7a70SStefan Roese 	/* If no IO window is found that addr has to be located in SDRAM */
55377ff7a70SStefan Roese 	ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
55477ff7a70SStefan Roese 	if (ret < 0)
55577ff7a70SStefan Roese 		return 0;
55677ff7a70SStefan Roese 
55777ff7a70SStefan Roese 	/*
55877ff7a70SStefan Roese 	 * Mask the base addr 'addr' according to 'size' read back from the
55977ff7a70SStefan Roese 	 * MBus window. Otherwise we might end up with an address located
56077ff7a70SStefan Roese 	 * somewhere in the middle of this area here.
56177ff7a70SStefan Roese 	 */
56277ff7a70SStefan Roese 	size -= 1;
56377ff7a70SStefan Roese 	addr &= ~size;
56477ff7a70SStefan Roese 
56577ff7a70SStefan Roese 	/*
56677ff7a70SStefan Roese 	 * Reading one of both enabled register is enough, as they are always
56777ff7a70SStefan Roese 	 * programmed to the identical values
56877ff7a70SStefan Roese 	 */
56977ff7a70SStefan Roese 	win_enable = readl(base + WINDOW_BAR_ENABLE(0));
57077ff7a70SStefan Roese 
57177ff7a70SStefan Roese 	/* Set 'i' to the first free window to write the new values to */
57277ff7a70SStefan Roese 	i = ffs(~win_enable) - 1;
57377ff7a70SStefan Roese 	if (i >= WINDOW_COUNT)
57477ff7a70SStefan Roese 		return -ENOMEM;
57577ff7a70SStefan Roese 
57677ff7a70SStefan Roese 	writel((addr & 0xffff0000) | (attr << 8) | target,
57777ff7a70SStefan Roese 	       base + WINDOW_BASE(i));
57877ff7a70SStefan Roese 	writel(size & 0xffff0000, base + WINDOW_SIZE(i));
57977ff7a70SStefan Roese 
58077ff7a70SStefan Roese 	/* Fill the caching variables for later use */
58177ff7a70SStefan Roese 	xordev->win_start[i] = addr;
58277ff7a70SStefan Roese 	xordev->win_end[i] = addr + size;
58377ff7a70SStefan Roese 
58477ff7a70SStefan Roese 	win_enable |= (1 << i);
58577ff7a70SStefan Roese 	win_enable |= 3 << (16 + (2 * i));
58677ff7a70SStefan Roese 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
58777ff7a70SStefan Roese 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
58877ff7a70SStefan Roese 
58977ff7a70SStefan Roese 	return 0;
59077ff7a70SStefan Roese }
59177ff7a70SStefan Roese 
592ff7b0479SSaeed Bishara static struct dma_async_tx_descriptor *
593ff7b0479SSaeed Bishara mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
594ff7b0479SSaeed Bishara 		    unsigned int src_cnt, size_t len, unsigned long flags)
595ff7b0479SSaeed Bishara {
596ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
597dfc97661SLior Amsalem 	struct mv_xor_desc_slot *sw_desc;
59877ff7a70SStefan Roese 	int ret;
599ff7b0479SSaeed Bishara 
600ff7b0479SSaeed Bishara 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
601ff7b0479SSaeed Bishara 		return NULL;
602ff7b0479SSaeed Bishara 
6037912d300SColy Li 	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
604ff7b0479SSaeed Bishara 
605c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
606bc822e12SGregory CLEMENT 		"%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
60731fd8f5bSOlof Johansson 		__func__, src_cnt, len, &dest, flags);
608ff7b0479SSaeed Bishara 
60977ff7a70SStefan Roese 	/* Check if a new window needs to get added for 'dest' */
61077ff7a70SStefan Roese 	ret = mv_xor_add_io_win(mv_chan, dest);
61177ff7a70SStefan Roese 	if (ret)
61277ff7a70SStefan Roese 		return NULL;
61377ff7a70SStefan Roese 
6140951e728SMaxime Ripard 	sw_desc = mv_chan_alloc_slot(mv_chan);
615ff7b0479SSaeed Bishara 	if (sw_desc) {
616ff7b0479SSaeed Bishara 		sw_desc->type = DMA_XOR;
617ff7b0479SSaeed Bishara 		sw_desc->async_tx.flags = flags;
618ba87d137SLior Amsalem 		mv_desc_init(sw_desc, dest, len, flags);
6196f166312SLior Amsalem 		if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
6206f166312SLior Amsalem 			mv_desc_set_mode(sw_desc);
62177ff7a70SStefan Roese 		while (src_cnt--) {
62277ff7a70SStefan Roese 			/* Check if a new window needs to get added for 'src' */
62377ff7a70SStefan Roese 			ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
62477ff7a70SStefan Roese 			if (ret)
62577ff7a70SStefan Roese 				return NULL;
626dfc97661SLior Amsalem 			mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
627ff7b0479SSaeed Bishara 		}
62877ff7a70SStefan Roese 	}
629fbea28a2SLior Amsalem 
630c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan),
631ff7b0479SSaeed Bishara 		"%s sw_desc %p async_tx %p \n",
632ff7b0479SSaeed Bishara 		__func__, sw_desc, &sw_desc->async_tx);
633ff7b0479SSaeed Bishara 	return sw_desc ? &sw_desc->async_tx : NULL;
634ff7b0479SSaeed Bishara }
635ff7b0479SSaeed Bishara 
6363e4f52e2SLior Amsalem static struct dma_async_tx_descriptor *
6373e4f52e2SLior Amsalem mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
6383e4f52e2SLior Amsalem 		size_t len, unsigned long flags)
6393e4f52e2SLior Amsalem {
6403e4f52e2SLior Amsalem 	/*
6413e4f52e2SLior Amsalem 	 * A MEMCPY operation is identical to an XOR operation with only
6423e4f52e2SLior Amsalem 	 * a single source address.
6433e4f52e2SLior Amsalem 	 */
6443e4f52e2SLior Amsalem 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
6453e4f52e2SLior Amsalem }
6463e4f52e2SLior Amsalem 
64722843545SLior Amsalem static struct dma_async_tx_descriptor *
64822843545SLior Amsalem mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
64922843545SLior Amsalem {
65022843545SLior Amsalem 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
65122843545SLior Amsalem 	dma_addr_t src, dest;
65222843545SLior Amsalem 	size_t len;
65322843545SLior Amsalem 
65422843545SLior Amsalem 	src = mv_chan->dummy_src_addr;
65522843545SLior Amsalem 	dest = mv_chan->dummy_dst_addr;
65622843545SLior Amsalem 	len = MV_XOR_MIN_BYTE_COUNT;
65722843545SLior Amsalem 
65822843545SLior Amsalem 	/*
65922843545SLior Amsalem 	 * We implement the DMA_INTERRUPT operation as a minimum sized
66022843545SLior Amsalem 	 * XOR operation with a single dummy source address.
66122843545SLior Amsalem 	 */
66222843545SLior Amsalem 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
66322843545SLior Amsalem }
66422843545SLior Amsalem 
665c5db858bSStefan Roese /**
666c5db858bSStefan Roese  * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction
667c5db858bSStefan Roese  * @chan: DMA channel
668c5db858bSStefan Roese  * @dst_sg: Destination scatter list
669c5db858bSStefan Roese  * @dst_sg_len: Number of entries in destination scatter list
670c5db858bSStefan Roese  * @src_sg: Source scatter list
671c5db858bSStefan Roese  * @src_sg_len: Number of entries in source scatter list
672c5db858bSStefan Roese  * @flags: transfer ack flags
673c5db858bSStefan Roese  *
674c5db858bSStefan Roese  * Return: Async transaction descriptor on success and NULL on failure
675c5db858bSStefan Roese  */
676c5db858bSStefan Roese static struct dma_async_tx_descriptor *
677c5db858bSStefan Roese mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
678c5db858bSStefan Roese 		   unsigned int dst_sg_len, struct scatterlist *src_sg,
679c5db858bSStefan Roese 		   unsigned int src_sg_len, unsigned long flags)
680c5db858bSStefan Roese {
681c5db858bSStefan Roese 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
682c5db858bSStefan Roese 	struct mv_xor_desc_slot *new;
683c5db858bSStefan Roese 	struct mv_xor_desc_slot *first = NULL;
684c5db858bSStefan Roese 	struct mv_xor_desc_slot *prev = NULL;
685c5db858bSStefan Roese 	size_t len, dst_avail, src_avail;
686c5db858bSStefan Roese 	dma_addr_t dma_dst, dma_src;
687c5db858bSStefan Roese 	int desc_cnt = 0;
688c5db858bSStefan Roese 	int ret;
689c5db858bSStefan Roese 
690c5db858bSStefan Roese 	dev_dbg(mv_chan_to_devp(mv_chan),
691c5db858bSStefan Roese 		"%s dst_sg_len: %d src_sg_len: %d flags: %ld\n",
692c5db858bSStefan Roese 		__func__, dst_sg_len, src_sg_len, flags);
693c5db858bSStefan Roese 
694c5db858bSStefan Roese 	dst_avail = sg_dma_len(dst_sg);
695c5db858bSStefan Roese 	src_avail = sg_dma_len(src_sg);
696c5db858bSStefan Roese 
697c5db858bSStefan Roese 	/* Run until we are out of scatterlist entries */
698c5db858bSStefan Roese 	while (true) {
699c5db858bSStefan Roese 		/* Allocate and populate the descriptor */
700c5db858bSStefan Roese 		desc_cnt++;
701c5db858bSStefan Roese 		new = mv_chan_alloc_slot(mv_chan);
702c5db858bSStefan Roese 		if (!new) {
703c5db858bSStefan Roese 			dev_err(mv_chan_to_devp(mv_chan),
704c5db858bSStefan Roese 				"Out of descriptors (desc_cnt=%d)!\n",
705c5db858bSStefan Roese 				desc_cnt);
706c5db858bSStefan Roese 			goto err;
707c5db858bSStefan Roese 		}
708c5db858bSStefan Roese 
709c5db858bSStefan Roese 		len = min_t(size_t, src_avail, dst_avail);
710c5db858bSStefan Roese 		len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT);
711c5db858bSStefan Roese 		if (len == 0)
712c5db858bSStefan Roese 			goto fetch;
713c5db858bSStefan Roese 
714c5db858bSStefan Roese 		if (len < MV_XOR_MIN_BYTE_COUNT) {
715c5db858bSStefan Roese 			dev_err(mv_chan_to_devp(mv_chan),
716c5db858bSStefan Roese 				"Transfer size of %zu too small!\n", len);
717c5db858bSStefan Roese 			goto err;
718c5db858bSStefan Roese 		}
719c5db858bSStefan Roese 
720c5db858bSStefan Roese 		dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
721c5db858bSStefan Roese 			dst_avail;
722c5db858bSStefan Roese 		dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
723c5db858bSStefan Roese 			src_avail;
724c5db858bSStefan Roese 
725c5db858bSStefan Roese 		/* Check if a new window needs to get added for 'dst' */
726c5db858bSStefan Roese 		ret = mv_xor_add_io_win(mv_chan, dma_dst);
727c5db858bSStefan Roese 		if (ret)
728c5db858bSStefan Roese 			goto err;
729c5db858bSStefan Roese 
730c5db858bSStefan Roese 		/* Check if a new window needs to get added for 'src' */
731c5db858bSStefan Roese 		ret = mv_xor_add_io_win(mv_chan, dma_src);
732c5db858bSStefan Roese 		if (ret)
733c5db858bSStefan Roese 			goto err;
734c5db858bSStefan Roese 
735c5db858bSStefan Roese 		/* Populate the descriptor */
736c5db858bSStefan Roese 		mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev);
737c5db858bSStefan Roese 		prev = new;
738c5db858bSStefan Roese 		dst_avail -= len;
739c5db858bSStefan Roese 		src_avail -= len;
740c5db858bSStefan Roese 
741c5db858bSStefan Roese 		if (!first)
742c5db858bSStefan Roese 			first = new;
743c5db858bSStefan Roese 		else
744c5db858bSStefan Roese 			list_move_tail(&new->node, &first->sg_tx_list);
745c5db858bSStefan Roese 
746c5db858bSStefan Roese fetch:
747c5db858bSStefan Roese 		/* Fetch the next dst scatterlist entry */
748c5db858bSStefan Roese 		if (dst_avail == 0) {
749c5db858bSStefan Roese 			if (dst_sg_len == 0)
750c5db858bSStefan Roese 				break;
751c5db858bSStefan Roese 
752c5db858bSStefan Roese 			/* Fetch the next entry: if there are no more: done */
753c5db858bSStefan Roese 			dst_sg = sg_next(dst_sg);
754c5db858bSStefan Roese 			if (dst_sg == NULL)
755c5db858bSStefan Roese 				break;
756c5db858bSStefan Roese 
757c5db858bSStefan Roese 			dst_sg_len--;
758c5db858bSStefan Roese 			dst_avail = sg_dma_len(dst_sg);
759c5db858bSStefan Roese 		}
760c5db858bSStefan Roese 
761c5db858bSStefan Roese 		/* Fetch the next src scatterlist entry */
762c5db858bSStefan Roese 		if (src_avail == 0) {
763c5db858bSStefan Roese 			if (src_sg_len == 0)
764c5db858bSStefan Roese 				break;
765c5db858bSStefan Roese 
766c5db858bSStefan Roese 			/* Fetch the next entry: if there are no more: done */
767c5db858bSStefan Roese 			src_sg = sg_next(src_sg);
768c5db858bSStefan Roese 			if (src_sg == NULL)
769c5db858bSStefan Roese 				break;
770c5db858bSStefan Roese 
771c5db858bSStefan Roese 			src_sg_len--;
772c5db858bSStefan Roese 			src_avail = sg_dma_len(src_sg);
773c5db858bSStefan Roese 		}
774c5db858bSStefan Roese 	}
775c5db858bSStefan Roese 
776c5db858bSStefan Roese 	/* Set the EOD flag in the last descriptor */
777c5db858bSStefan Roese 	mv_xor_desc_config_eod(new);
778c5db858bSStefan Roese 	first->async_tx.flags = flags;
779c5db858bSStefan Roese 
780c5db858bSStefan Roese 	return &first->async_tx;
781c5db858bSStefan Roese 
782c5db858bSStefan Roese err:
783c5db858bSStefan Roese 	/* Cleanup: Move all descriptors back into the free list */
784c5db858bSStefan Roese 	spin_lock_bh(&mv_chan->lock);
785c5db858bSStefan Roese 	mv_desc_clean_slot(first, mv_chan);
786c5db858bSStefan Roese 	spin_unlock_bh(&mv_chan->lock);
787c5db858bSStefan Roese 
788c5db858bSStefan Roese 	return NULL;
789c5db858bSStefan Roese }
790c5db858bSStefan Roese 
791ff7b0479SSaeed Bishara static void mv_xor_free_chan_resources(struct dma_chan *chan)
792ff7b0479SSaeed Bishara {
793ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
794ff7b0479SSaeed Bishara 	struct mv_xor_desc_slot *iter, *_iter;
795ff7b0479SSaeed Bishara 	int in_use_descs = 0;
796ff7b0479SSaeed Bishara 
797ff7b0479SSaeed Bishara 	spin_lock_bh(&mv_chan->lock);
798e43147acSEzequiel Garcia 
7990951e728SMaxime Ripard 	mv_chan_slot_cleanup(mv_chan);
800ff7b0479SSaeed Bishara 
801ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
802fbea28a2SLior Amsalem 					node) {
803ff7b0479SSaeed Bishara 		in_use_descs++;
804fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->free_slots);
805ff7b0479SSaeed Bishara 	}
806ff7b0479SSaeed Bishara 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
807fbea28a2SLior Amsalem 				 node) {
808ff7b0479SSaeed Bishara 		in_use_descs++;
809fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->free_slots);
810fbea28a2SLior Amsalem 	}
811fbea28a2SLior Amsalem 	list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
812fbea28a2SLior Amsalem 				 node) {
813fbea28a2SLior Amsalem 		in_use_descs++;
814fbea28a2SLior Amsalem 		list_move_tail(&iter->node, &mv_chan->free_slots);
815ff7b0479SSaeed Bishara 	}
816ff7b0479SSaeed Bishara 	list_for_each_entry_safe_reverse(
817fbea28a2SLior Amsalem 		iter, _iter, &mv_chan->free_slots, node) {
818fbea28a2SLior Amsalem 		list_del(&iter->node);
819ff7b0479SSaeed Bishara 		kfree(iter);
820ff7b0479SSaeed Bishara 		mv_chan->slots_allocated--;
821ff7b0479SSaeed Bishara 	}
822ff7b0479SSaeed Bishara 
823c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
824ff7b0479SSaeed Bishara 		__func__, mv_chan->slots_allocated);
825ff7b0479SSaeed Bishara 	spin_unlock_bh(&mv_chan->lock);
826ff7b0479SSaeed Bishara 
827ff7b0479SSaeed Bishara 	if (in_use_descs)
828c98c1781SThomas Petazzoni 		dev_err(mv_chan_to_devp(mv_chan),
829ff7b0479SSaeed Bishara 			"freeing %d in use descriptors!\n", in_use_descs);
830ff7b0479SSaeed Bishara }
831ff7b0479SSaeed Bishara 
832ff7b0479SSaeed Bishara /**
83307934481SLinus Walleij  * mv_xor_status - poll the status of an XOR transaction
834ff7b0479SSaeed Bishara  * @chan: XOR channel handle
835ff7b0479SSaeed Bishara  * @cookie: XOR transaction identifier
83607934481SLinus Walleij  * @txstate: XOR transactions state holder (or NULL)
837ff7b0479SSaeed Bishara  */
83807934481SLinus Walleij static enum dma_status mv_xor_status(struct dma_chan *chan,
839ff7b0479SSaeed Bishara 					  dma_cookie_t cookie,
84007934481SLinus Walleij 					  struct dma_tx_state *txstate)
841ff7b0479SSaeed Bishara {
842ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
843ff7b0479SSaeed Bishara 	enum dma_status ret;
844ff7b0479SSaeed Bishara 
84596a2af41SRussell King - ARM Linux 	ret = dma_cookie_status(chan, cookie, txstate);
846890766d2SEzequiel Garcia 	if (ret == DMA_COMPLETE)
847ff7b0479SSaeed Bishara 		return ret;
848e43147acSEzequiel Garcia 
849e43147acSEzequiel Garcia 	spin_lock_bh(&mv_chan->lock);
8500951e728SMaxime Ripard 	mv_chan_slot_cleanup(mv_chan);
851e43147acSEzequiel Garcia 	spin_unlock_bh(&mv_chan->lock);
852ff7b0479SSaeed Bishara 
85396a2af41SRussell King - ARM Linux 	return dma_cookie_status(chan, cookie, txstate);
854ff7b0479SSaeed Bishara }
855ff7b0479SSaeed Bishara 
8560951e728SMaxime Ripard static void mv_chan_dump_regs(struct mv_xor_chan *chan)
857ff7b0479SSaeed Bishara {
858ff7b0479SSaeed Bishara 	u32 val;
859ff7b0479SSaeed Bishara 
8605733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_CONFIG(chan));
8611ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
862ff7b0479SSaeed Bishara 
8635733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_ACTIVATION(chan));
8641ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
865ff7b0479SSaeed Bishara 
8665733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_INTR_CAUSE(chan));
8671ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
868ff7b0479SSaeed Bishara 
8695733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_INTR_MASK(chan));
8701ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
871ff7b0479SSaeed Bishara 
8725733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_ERROR_CAUSE(chan));
8731ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
874ff7b0479SSaeed Bishara 
8755733c38aSThomas Petazzoni 	val = readl_relaxed(XOR_ERROR_ADDR(chan));
8761ba151cdSJoe Perches 	dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
877ff7b0479SSaeed Bishara }
878ff7b0479SSaeed Bishara 
8790951e728SMaxime Ripard static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
880ff7b0479SSaeed Bishara 					  u32 intr_cause)
881ff7b0479SSaeed Bishara {
8820e7488edSEzequiel Garcia 	if (intr_cause & XOR_INT_ERR_DECODE) {
8830e7488edSEzequiel Garcia 		dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
884ff7b0479SSaeed Bishara 		return;
885ff7b0479SSaeed Bishara 	}
886ff7b0479SSaeed Bishara 
8870e7488edSEzequiel Garcia 	dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
888ff7b0479SSaeed Bishara 		chan->idx, intr_cause);
889ff7b0479SSaeed Bishara 
8900951e728SMaxime Ripard 	mv_chan_dump_regs(chan);
8910e7488edSEzequiel Garcia 	WARN_ON(1);
892ff7b0479SSaeed Bishara }
893ff7b0479SSaeed Bishara 
894ff7b0479SSaeed Bishara static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
895ff7b0479SSaeed Bishara {
896ff7b0479SSaeed Bishara 	struct mv_xor_chan *chan = data;
897ff7b0479SSaeed Bishara 	u32 intr_cause = mv_chan_get_intr_cause(chan);
898ff7b0479SSaeed Bishara 
899c98c1781SThomas Petazzoni 	dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
900ff7b0479SSaeed Bishara 
9010e7488edSEzequiel Garcia 	if (intr_cause & XOR_INTR_ERRORS)
9020951e728SMaxime Ripard 		mv_chan_err_interrupt_handler(chan, intr_cause);
903ff7b0479SSaeed Bishara 
904ff7b0479SSaeed Bishara 	tasklet_schedule(&chan->irq_tasklet);
905ff7b0479SSaeed Bishara 
9060951e728SMaxime Ripard 	mv_chan_clear_eoc_cause(chan);
907ff7b0479SSaeed Bishara 
908ff7b0479SSaeed Bishara 	return IRQ_HANDLED;
909ff7b0479SSaeed Bishara }
910ff7b0479SSaeed Bishara 
911ff7b0479SSaeed Bishara static void mv_xor_issue_pending(struct dma_chan *chan)
912ff7b0479SSaeed Bishara {
913ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
914ff7b0479SSaeed Bishara 
915ff7b0479SSaeed Bishara 	if (mv_chan->pending >= MV_XOR_THRESHOLD) {
916ff7b0479SSaeed Bishara 		mv_chan->pending = 0;
917ff7b0479SSaeed Bishara 		mv_chan_activate(mv_chan);
918ff7b0479SSaeed Bishara 	}
919ff7b0479SSaeed Bishara }
920ff7b0479SSaeed Bishara 
921ff7b0479SSaeed Bishara /*
922ff7b0479SSaeed Bishara  * Perform a transaction to verify the HW works.
923ff7b0479SSaeed Bishara  */
924ff7b0479SSaeed Bishara 
9250951e728SMaxime Ripard static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
926ff7b0479SSaeed Bishara {
927b8c01d25SEzequiel Garcia 	int i, ret;
928ff7b0479SSaeed Bishara 	void *src, *dest;
929ff7b0479SSaeed Bishara 	dma_addr_t src_dma, dest_dma;
930ff7b0479SSaeed Bishara 	struct dma_chan *dma_chan;
931ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
932ff7b0479SSaeed Bishara 	struct dma_async_tx_descriptor *tx;
933d16695a7SEzequiel Garcia 	struct dmaengine_unmap_data *unmap;
934ff7b0479SSaeed Bishara 	int err = 0;
935ff7b0479SSaeed Bishara 
936d16695a7SEzequiel Garcia 	src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
937ff7b0479SSaeed Bishara 	if (!src)
938ff7b0479SSaeed Bishara 		return -ENOMEM;
939ff7b0479SSaeed Bishara 
940d16695a7SEzequiel Garcia 	dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
941ff7b0479SSaeed Bishara 	if (!dest) {
942ff7b0479SSaeed Bishara 		kfree(src);
943ff7b0479SSaeed Bishara 		return -ENOMEM;
944ff7b0479SSaeed Bishara 	}
945ff7b0479SSaeed Bishara 
946ff7b0479SSaeed Bishara 	/* Fill in src buffer */
947d16695a7SEzequiel Garcia 	for (i = 0; i < PAGE_SIZE; i++)
948ff7b0479SSaeed Bishara 		((u8 *) src)[i] = (u8)i;
949ff7b0479SSaeed Bishara 
950275cc0c8SThomas Petazzoni 	dma_chan = &mv_chan->dmachan;
951aa1e6f1aSDan Williams 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
952ff7b0479SSaeed Bishara 		err = -ENODEV;
953ff7b0479SSaeed Bishara 		goto out;
954ff7b0479SSaeed Bishara 	}
955ff7b0479SSaeed Bishara 
956d16695a7SEzequiel Garcia 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
957d16695a7SEzequiel Garcia 	if (!unmap) {
958d16695a7SEzequiel Garcia 		err = -ENOMEM;
959d16695a7SEzequiel Garcia 		goto free_resources;
960d16695a7SEzequiel Garcia 	}
961ff7b0479SSaeed Bishara 
96251564635SStefan Roese 	src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
96351564635SStefan Roese 			       (size_t)src & ~PAGE_MASK, PAGE_SIZE,
96451564635SStefan Roese 			       DMA_TO_DEVICE);
965d16695a7SEzequiel Garcia 	unmap->addr[0] = src_dma;
966d16695a7SEzequiel Garcia 
967b8c01d25SEzequiel Garcia 	ret = dma_mapping_error(dma_chan->device->dev, src_dma);
968b8c01d25SEzequiel Garcia 	if (ret) {
969b8c01d25SEzequiel Garcia 		err = -ENOMEM;
970b8c01d25SEzequiel Garcia 		goto free_resources;
971b8c01d25SEzequiel Garcia 	}
972b8c01d25SEzequiel Garcia 	unmap->to_cnt = 1;
973b8c01d25SEzequiel Garcia 
97451564635SStefan Roese 	dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
97551564635SStefan Roese 				(size_t)dest & ~PAGE_MASK, PAGE_SIZE,
97651564635SStefan Roese 				DMA_FROM_DEVICE);
977d16695a7SEzequiel Garcia 	unmap->addr[1] = dest_dma;
978d16695a7SEzequiel Garcia 
979b8c01d25SEzequiel Garcia 	ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
980b8c01d25SEzequiel Garcia 	if (ret) {
981b8c01d25SEzequiel Garcia 		err = -ENOMEM;
982b8c01d25SEzequiel Garcia 		goto free_resources;
983b8c01d25SEzequiel Garcia 	}
984b8c01d25SEzequiel Garcia 	unmap->from_cnt = 1;
985d16695a7SEzequiel Garcia 	unmap->len = PAGE_SIZE;
986ff7b0479SSaeed Bishara 
987ff7b0479SSaeed Bishara 	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
988d16695a7SEzequiel Garcia 				    PAGE_SIZE, 0);
989b8c01d25SEzequiel Garcia 	if (!tx) {
990b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
991b8c01d25SEzequiel Garcia 			"Self-test cannot prepare operation, disabling\n");
992b8c01d25SEzequiel Garcia 		err = -ENODEV;
993b8c01d25SEzequiel Garcia 		goto free_resources;
994b8c01d25SEzequiel Garcia 	}
995b8c01d25SEzequiel Garcia 
996ff7b0479SSaeed Bishara 	cookie = mv_xor_tx_submit(tx);
997b8c01d25SEzequiel Garcia 	if (dma_submit_error(cookie)) {
998b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
999b8c01d25SEzequiel Garcia 			"Self-test submit error, disabling\n");
1000b8c01d25SEzequiel Garcia 		err = -ENODEV;
1001b8c01d25SEzequiel Garcia 		goto free_resources;
1002b8c01d25SEzequiel Garcia 	}
1003b8c01d25SEzequiel Garcia 
1004ff7b0479SSaeed Bishara 	mv_xor_issue_pending(dma_chan);
1005ff7b0479SSaeed Bishara 	async_tx_ack(tx);
1006ff7b0479SSaeed Bishara 	msleep(1);
1007ff7b0479SSaeed Bishara 
100807934481SLinus Walleij 	if (mv_xor_status(dma_chan, cookie, NULL) !=
1009b3efb8fcSVinod Koul 	    DMA_COMPLETE) {
1010a3fc74bcSThomas Petazzoni 		dev_err(dma_chan->device->dev,
1011ff7b0479SSaeed Bishara 			"Self-test copy timed out, disabling\n");
1012ff7b0479SSaeed Bishara 		err = -ENODEV;
1013ff7b0479SSaeed Bishara 		goto free_resources;
1014ff7b0479SSaeed Bishara 	}
1015ff7b0479SSaeed Bishara 
1016c35064c4SThomas Petazzoni 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
1017d16695a7SEzequiel Garcia 				PAGE_SIZE, DMA_FROM_DEVICE);
1018d16695a7SEzequiel Garcia 	if (memcmp(src, dest, PAGE_SIZE)) {
1019a3fc74bcSThomas Petazzoni 		dev_err(dma_chan->device->dev,
1020ff7b0479SSaeed Bishara 			"Self-test copy failed compare, disabling\n");
1021ff7b0479SSaeed Bishara 		err = -ENODEV;
1022ff7b0479SSaeed Bishara 		goto free_resources;
1023ff7b0479SSaeed Bishara 	}
1024ff7b0479SSaeed Bishara 
1025ff7b0479SSaeed Bishara free_resources:
1026d16695a7SEzequiel Garcia 	dmaengine_unmap_put(unmap);
1027ff7b0479SSaeed Bishara 	mv_xor_free_chan_resources(dma_chan);
1028ff7b0479SSaeed Bishara out:
1029ff7b0479SSaeed Bishara 	kfree(src);
1030ff7b0479SSaeed Bishara 	kfree(dest);
1031ff7b0479SSaeed Bishara 	return err;
1032ff7b0479SSaeed Bishara }
1033ff7b0479SSaeed Bishara 
1034ff7b0479SSaeed Bishara #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
1035463a1f8bSBill Pemberton static int
10360951e728SMaxime Ripard mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
1037ff7b0479SSaeed Bishara {
1038b8c01d25SEzequiel Garcia 	int i, src_idx, ret;
1039ff7b0479SSaeed Bishara 	struct page *dest;
1040ff7b0479SSaeed Bishara 	struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
1041ff7b0479SSaeed Bishara 	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
1042ff7b0479SSaeed Bishara 	dma_addr_t dest_dma;
1043ff7b0479SSaeed Bishara 	struct dma_async_tx_descriptor *tx;
1044d16695a7SEzequiel Garcia 	struct dmaengine_unmap_data *unmap;
1045ff7b0479SSaeed Bishara 	struct dma_chan *dma_chan;
1046ff7b0479SSaeed Bishara 	dma_cookie_t cookie;
1047ff7b0479SSaeed Bishara 	u8 cmp_byte = 0;
1048ff7b0479SSaeed Bishara 	u32 cmp_word;
1049ff7b0479SSaeed Bishara 	int err = 0;
1050d16695a7SEzequiel Garcia 	int src_count = MV_XOR_NUM_SRC_TEST;
1051ff7b0479SSaeed Bishara 
1052d16695a7SEzequiel Garcia 	for (src_idx = 0; src_idx < src_count; src_idx++) {
1053ff7b0479SSaeed Bishara 		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1054a09b09aeSRoel Kluin 		if (!xor_srcs[src_idx]) {
1055a09b09aeSRoel Kluin 			while (src_idx--)
1056ff7b0479SSaeed Bishara 				__free_page(xor_srcs[src_idx]);
1057ff7b0479SSaeed Bishara 			return -ENOMEM;
1058ff7b0479SSaeed Bishara 		}
1059ff7b0479SSaeed Bishara 	}
1060ff7b0479SSaeed Bishara 
1061ff7b0479SSaeed Bishara 	dest = alloc_page(GFP_KERNEL);
1062a09b09aeSRoel Kluin 	if (!dest) {
1063a09b09aeSRoel Kluin 		while (src_idx--)
1064ff7b0479SSaeed Bishara 			__free_page(xor_srcs[src_idx]);
1065ff7b0479SSaeed Bishara 		return -ENOMEM;
1066ff7b0479SSaeed Bishara 	}
1067ff7b0479SSaeed Bishara 
1068ff7b0479SSaeed Bishara 	/* Fill in src buffers */
1069d16695a7SEzequiel Garcia 	for (src_idx = 0; src_idx < src_count; src_idx++) {
1070ff7b0479SSaeed Bishara 		u8 *ptr = page_address(xor_srcs[src_idx]);
1071ff7b0479SSaeed Bishara 		for (i = 0; i < PAGE_SIZE; i++)
1072ff7b0479SSaeed Bishara 			ptr[i] = (1 << src_idx);
1073ff7b0479SSaeed Bishara 	}
1074ff7b0479SSaeed Bishara 
1075d16695a7SEzequiel Garcia 	for (src_idx = 0; src_idx < src_count; src_idx++)
1076ff7b0479SSaeed Bishara 		cmp_byte ^= (u8) (1 << src_idx);
1077ff7b0479SSaeed Bishara 
1078ff7b0479SSaeed Bishara 	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1079ff7b0479SSaeed Bishara 		(cmp_byte << 8) | cmp_byte;
1080ff7b0479SSaeed Bishara 
1081ff7b0479SSaeed Bishara 	memset(page_address(dest), 0, PAGE_SIZE);
1082ff7b0479SSaeed Bishara 
1083275cc0c8SThomas Petazzoni 	dma_chan = &mv_chan->dmachan;
1084aa1e6f1aSDan Williams 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1085ff7b0479SSaeed Bishara 		err = -ENODEV;
1086ff7b0479SSaeed Bishara 		goto out;
1087ff7b0479SSaeed Bishara 	}
1088ff7b0479SSaeed Bishara 
1089d16695a7SEzequiel Garcia 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
1090d16695a7SEzequiel Garcia 					 GFP_KERNEL);
1091d16695a7SEzequiel Garcia 	if (!unmap) {
1092d16695a7SEzequiel Garcia 		err = -ENOMEM;
1093d16695a7SEzequiel Garcia 		goto free_resources;
1094d16695a7SEzequiel Garcia 	}
1095ff7b0479SSaeed Bishara 
1096d16695a7SEzequiel Garcia 	/* test xor */
1097d16695a7SEzequiel Garcia 	for (i = 0; i < src_count; i++) {
1098d16695a7SEzequiel Garcia 		unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1099ff7b0479SSaeed Bishara 					      0, PAGE_SIZE, DMA_TO_DEVICE);
1100d16695a7SEzequiel Garcia 		dma_srcs[i] = unmap->addr[i];
1101b8c01d25SEzequiel Garcia 		ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
1102b8c01d25SEzequiel Garcia 		if (ret) {
1103b8c01d25SEzequiel Garcia 			err = -ENOMEM;
1104b8c01d25SEzequiel Garcia 			goto free_resources;
1105b8c01d25SEzequiel Garcia 		}
1106d16695a7SEzequiel Garcia 		unmap->to_cnt++;
1107d16695a7SEzequiel Garcia 	}
1108d16695a7SEzequiel Garcia 
1109d16695a7SEzequiel Garcia 	unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1110d16695a7SEzequiel Garcia 				      DMA_FROM_DEVICE);
1111d16695a7SEzequiel Garcia 	dest_dma = unmap->addr[src_count];
1112b8c01d25SEzequiel Garcia 	ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
1113b8c01d25SEzequiel Garcia 	if (ret) {
1114b8c01d25SEzequiel Garcia 		err = -ENOMEM;
1115b8c01d25SEzequiel Garcia 		goto free_resources;
1116b8c01d25SEzequiel Garcia 	}
1117d16695a7SEzequiel Garcia 	unmap->from_cnt = 1;
1118d16695a7SEzequiel Garcia 	unmap->len = PAGE_SIZE;
1119ff7b0479SSaeed Bishara 
1120ff7b0479SSaeed Bishara 	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1121d16695a7SEzequiel Garcia 				 src_count, PAGE_SIZE, 0);
1122b8c01d25SEzequiel Garcia 	if (!tx) {
1123b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
1124b8c01d25SEzequiel Garcia 			"Self-test cannot prepare operation, disabling\n");
1125b8c01d25SEzequiel Garcia 		err = -ENODEV;
1126b8c01d25SEzequiel Garcia 		goto free_resources;
1127b8c01d25SEzequiel Garcia 	}
1128ff7b0479SSaeed Bishara 
1129ff7b0479SSaeed Bishara 	cookie = mv_xor_tx_submit(tx);
1130b8c01d25SEzequiel Garcia 	if (dma_submit_error(cookie)) {
1131b8c01d25SEzequiel Garcia 		dev_err(dma_chan->device->dev,
1132b8c01d25SEzequiel Garcia 			"Self-test submit error, disabling\n");
1133b8c01d25SEzequiel Garcia 		err = -ENODEV;
1134b8c01d25SEzequiel Garcia 		goto free_resources;
1135b8c01d25SEzequiel Garcia 	}
1136b8c01d25SEzequiel Garcia 
1137ff7b0479SSaeed Bishara 	mv_xor_issue_pending(dma_chan);
1138ff7b0479SSaeed Bishara 	async_tx_ack(tx);
1139ff7b0479SSaeed Bishara 	msleep(8);
1140ff7b0479SSaeed Bishara 
114107934481SLinus Walleij 	if (mv_xor_status(dma_chan, cookie, NULL) !=
1142b3efb8fcSVinod Koul 	    DMA_COMPLETE) {
1143a3fc74bcSThomas Petazzoni 		dev_err(dma_chan->device->dev,
1144ff7b0479SSaeed Bishara 			"Self-test xor timed out, disabling\n");
1145ff7b0479SSaeed Bishara 		err = -ENODEV;
1146ff7b0479SSaeed Bishara 		goto free_resources;
1147ff7b0479SSaeed Bishara 	}
1148ff7b0479SSaeed Bishara 
1149c35064c4SThomas Petazzoni 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
1150ff7b0479SSaeed Bishara 				PAGE_SIZE, DMA_FROM_DEVICE);
1151ff7b0479SSaeed Bishara 	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1152ff7b0479SSaeed Bishara 		u32 *ptr = page_address(dest);
1153ff7b0479SSaeed Bishara 		if (ptr[i] != cmp_word) {
1154a3fc74bcSThomas Petazzoni 			dev_err(dma_chan->device->dev,
11551ba151cdSJoe Perches 				"Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
11561ba151cdSJoe Perches 				i, ptr[i], cmp_word);
1157ff7b0479SSaeed Bishara 			err = -ENODEV;
1158ff7b0479SSaeed Bishara 			goto free_resources;
1159ff7b0479SSaeed Bishara 		}
1160ff7b0479SSaeed Bishara 	}
1161ff7b0479SSaeed Bishara 
1162ff7b0479SSaeed Bishara free_resources:
1163d16695a7SEzequiel Garcia 	dmaengine_unmap_put(unmap);
1164ff7b0479SSaeed Bishara 	mv_xor_free_chan_resources(dma_chan);
1165ff7b0479SSaeed Bishara out:
1166d16695a7SEzequiel Garcia 	src_idx = src_count;
1167ff7b0479SSaeed Bishara 	while (src_idx--)
1168ff7b0479SSaeed Bishara 		__free_page(xor_srcs[src_idx]);
1169ff7b0479SSaeed Bishara 	__free_page(dest);
1170ff7b0479SSaeed Bishara 	return err;
1171ff7b0479SSaeed Bishara }
1172ff7b0479SSaeed Bishara 
11731ef48a26SThomas Petazzoni static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1174ff7b0479SSaeed Bishara {
1175ff7b0479SSaeed Bishara 	struct dma_chan *chan, *_chan;
11761ef48a26SThomas Petazzoni 	struct device *dev = mv_chan->dmadev.dev;
1177ff7b0479SSaeed Bishara 
11781ef48a26SThomas Petazzoni 	dma_async_device_unregister(&mv_chan->dmadev);
1179ff7b0479SSaeed Bishara 
1180b503fa01SThomas Petazzoni 	dma_free_coherent(dev, MV_XOR_POOL_SIZE,
11811ef48a26SThomas Petazzoni 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
118222843545SLior Amsalem 	dma_unmap_single(dev, mv_chan->dummy_src_addr,
118322843545SLior Amsalem 			 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
118422843545SLior Amsalem 	dma_unmap_single(dev, mv_chan->dummy_dst_addr,
118522843545SLior Amsalem 			 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1186ff7b0479SSaeed Bishara 
11871ef48a26SThomas Petazzoni 	list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1188ff7b0479SSaeed Bishara 				 device_node) {
1189ff7b0479SSaeed Bishara 		list_del(&chan->device_node);
1190ff7b0479SSaeed Bishara 	}
1191ff7b0479SSaeed Bishara 
119288eb92cbSThomas Petazzoni 	free_irq(mv_chan->irq, mv_chan);
119388eb92cbSThomas Petazzoni 
1194ff7b0479SSaeed Bishara 	return 0;
1195ff7b0479SSaeed Bishara }
1196ff7b0479SSaeed Bishara 
11971ef48a26SThomas Petazzoni static struct mv_xor_chan *
1198297eedbaSThomas Petazzoni mv_xor_channel_add(struct mv_xor_device *xordev,
1199a6b4a9d2SThomas Petazzoni 		   struct platform_device *pdev,
1200dd130c65SGregory CLEMENT 		   int idx, dma_cap_mask_t cap_mask, int irq)
1201ff7b0479SSaeed Bishara {
1202ff7b0479SSaeed Bishara 	int ret = 0;
1203ff7b0479SSaeed Bishara 	struct mv_xor_chan *mv_chan;
1204ff7b0479SSaeed Bishara 	struct dma_device *dma_dev;
1205ff7b0479SSaeed Bishara 
12061ef48a26SThomas Petazzoni 	mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1207a577659fSSachin Kamat 	if (!mv_chan)
1208a577659fSSachin Kamat 		return ERR_PTR(-ENOMEM);
1209ff7b0479SSaeed Bishara 
12109aedbdbaSThomas Petazzoni 	mv_chan->idx = idx;
121188eb92cbSThomas Petazzoni 	mv_chan->irq = irq;
1212dd130c65SGregory CLEMENT 	if (xordev->xor_type == XOR_ORION)
1213dd130c65SGregory CLEMENT 		mv_chan->op_in_desc = XOR_MODE_IN_REG;
1214dd130c65SGregory CLEMENT 	else
1215dd130c65SGregory CLEMENT 		mv_chan->op_in_desc = XOR_MODE_IN_DESC;
1216ff7b0479SSaeed Bishara 
12171ef48a26SThomas Petazzoni 	dma_dev = &mv_chan->dmadev;
121877ff7a70SStefan Roese 	mv_chan->xordev = xordev;
1219ff7b0479SSaeed Bishara 
122022843545SLior Amsalem 	/*
122122843545SLior Amsalem 	 * These source and destination dummy buffers are used to implement
122222843545SLior Amsalem 	 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
122322843545SLior Amsalem 	 * Hence, we only need to map the buffers at initialization-time.
122422843545SLior Amsalem 	 */
122522843545SLior Amsalem 	mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
122622843545SLior Amsalem 		mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
122722843545SLior Amsalem 	mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
122822843545SLior Amsalem 		mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
122922843545SLior Amsalem 
1230ff7b0479SSaeed Bishara 	/* allocate coherent memory for hardware descriptors
1231ff7b0479SSaeed Bishara 	 * note: writecombine gives slightly better performance, but
1232ff7b0479SSaeed Bishara 	 * requires that we explicitly flush the writes
1233ff7b0479SSaeed Bishara 	 */
12341ef48a26SThomas Petazzoni 	mv_chan->dma_desc_pool_virt =
1235f6e45661SLuis R. Rodriguez 	  dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
1236f6e45661SLuis R. Rodriguez 		       GFP_KERNEL);
12371ef48a26SThomas Petazzoni 	if (!mv_chan->dma_desc_pool_virt)
1238a6b4a9d2SThomas Petazzoni 		return ERR_PTR(-ENOMEM);
1239ff7b0479SSaeed Bishara 
1240ff7b0479SSaeed Bishara 	/* discover transaction capabilites from the platform data */
1241a6b4a9d2SThomas Petazzoni 	dma_dev->cap_mask = cap_mask;
1242ff7b0479SSaeed Bishara 
1243ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&dma_dev->channels);
1244ff7b0479SSaeed Bishara 
1245ff7b0479SSaeed Bishara 	/* set base routines */
1246ff7b0479SSaeed Bishara 	dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1247ff7b0479SSaeed Bishara 	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
124807934481SLinus Walleij 	dma_dev->device_tx_status = mv_xor_status;
1249ff7b0479SSaeed Bishara 	dma_dev->device_issue_pending = mv_xor_issue_pending;
1250ff7b0479SSaeed Bishara 	dma_dev->dev = &pdev->dev;
1251ff7b0479SSaeed Bishara 
1252ff7b0479SSaeed Bishara 	/* set prep routines based on capability */
125322843545SLior Amsalem 	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
125422843545SLior Amsalem 		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
1255ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1256ff7b0479SSaeed Bishara 		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1257c5db858bSStefan Roese 	if (dma_has_cap(DMA_SG, dma_dev->cap_mask))
1258c5db858bSStefan Roese 		dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg;
1259ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1260c019894eSJoe Perches 		dma_dev->max_xor = 8;
1261ff7b0479SSaeed Bishara 		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1262ff7b0479SSaeed Bishara 	}
1263ff7b0479SSaeed Bishara 
1264297eedbaSThomas Petazzoni 	mv_chan->mmr_base = xordev->xor_base;
126582a1402eSEzequiel Garcia 	mv_chan->mmr_high_base = xordev->xor_high_base;
1266ff7b0479SSaeed Bishara 	tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1267ff7b0479SSaeed Bishara 		     mv_chan);
1268ff7b0479SSaeed Bishara 
1269ff7b0479SSaeed Bishara 	/* clear errors before enabling interrupts */
12700951e728SMaxime Ripard 	mv_chan_clear_err_status(mv_chan);
1271ff7b0479SSaeed Bishara 
12722d0a0745SThomas Petazzoni 	ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1273ff7b0479SSaeed Bishara 			  0, dev_name(&pdev->dev), mv_chan);
1274ff7b0479SSaeed Bishara 	if (ret)
1275ff7b0479SSaeed Bishara 		goto err_free_dma;
1276ff7b0479SSaeed Bishara 
1277ff7b0479SSaeed Bishara 	mv_chan_unmask_interrupts(mv_chan);
1278ff7b0479SSaeed Bishara 
12796f166312SLior Amsalem 	if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
128081aafb3eSThomas Petazzoni 		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
12816f166312SLior Amsalem 	else
128281aafb3eSThomas Petazzoni 		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
1283ff7b0479SSaeed Bishara 
1284ff7b0479SSaeed Bishara 	spin_lock_init(&mv_chan->lock);
1285ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&mv_chan->chain);
1286ff7b0479SSaeed Bishara 	INIT_LIST_HEAD(&mv_chan->completed_slots);
1287fbea28a2SLior Amsalem 	INIT_LIST_HEAD(&mv_chan->free_slots);
1288fbea28a2SLior Amsalem 	INIT_LIST_HEAD(&mv_chan->allocated_slots);
128998817b99SThomas Petazzoni 	mv_chan->dmachan.device = dma_dev;
129098817b99SThomas Petazzoni 	dma_cookie_init(&mv_chan->dmachan);
1291ff7b0479SSaeed Bishara 
129298817b99SThomas Petazzoni 	list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1293ff7b0479SSaeed Bishara 
1294ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
12950951e728SMaxime Ripard 		ret = mv_chan_memcpy_self_test(mv_chan);
1296ff7b0479SSaeed Bishara 		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1297ff7b0479SSaeed Bishara 		if (ret)
12982d0a0745SThomas Petazzoni 			goto err_free_irq;
1299ff7b0479SSaeed Bishara 	}
1300ff7b0479SSaeed Bishara 
1301ff7b0479SSaeed Bishara 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
13020951e728SMaxime Ripard 		ret = mv_chan_xor_self_test(mv_chan);
1303ff7b0479SSaeed Bishara 		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1304ff7b0479SSaeed Bishara 		if (ret)
13052d0a0745SThomas Petazzoni 			goto err_free_irq;
1306ff7b0479SSaeed Bishara 	}
1307ff7b0479SSaeed Bishara 
1308c5db858bSStefan Roese 	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n",
13096f166312SLior Amsalem 		 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1310ff7b0479SSaeed Bishara 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1311ff7b0479SSaeed Bishara 		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1312c5db858bSStefan Roese 		 dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "",
1313ff7b0479SSaeed Bishara 		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1314ff7b0479SSaeed Bishara 
1315ff7b0479SSaeed Bishara 	dma_async_device_register(dma_dev);
13161ef48a26SThomas Petazzoni 	return mv_chan;
1317ff7b0479SSaeed Bishara 
13182d0a0745SThomas Petazzoni err_free_irq:
13192d0a0745SThomas Petazzoni 	free_irq(mv_chan->irq, mv_chan);
1320ff7b0479SSaeed Bishara err_free_dma:
1321b503fa01SThomas Petazzoni 	dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
13221ef48a26SThomas Petazzoni 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1323a6b4a9d2SThomas Petazzoni 	return ERR_PTR(ret);
1324ff7b0479SSaeed Bishara }
1325ff7b0479SSaeed Bishara 
1326ff7b0479SSaeed Bishara static void
1327297eedbaSThomas Petazzoni mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
132863a9332bSAndrew Lunn 			 const struct mbus_dram_target_info *dram)
1329ff7b0479SSaeed Bishara {
133082a1402eSEzequiel Garcia 	void __iomem *base = xordev->xor_high_base;
1331ff7b0479SSaeed Bishara 	u32 win_enable = 0;
1332ff7b0479SSaeed Bishara 	int i;
1333ff7b0479SSaeed Bishara 
1334ff7b0479SSaeed Bishara 	for (i = 0; i < 8; i++) {
1335ff7b0479SSaeed Bishara 		writel(0, base + WINDOW_BASE(i));
1336ff7b0479SSaeed Bishara 		writel(0, base + WINDOW_SIZE(i));
1337ff7b0479SSaeed Bishara 		if (i < 4)
1338ff7b0479SSaeed Bishara 			writel(0, base + WINDOW_REMAP_HIGH(i));
1339ff7b0479SSaeed Bishara 	}
1340ff7b0479SSaeed Bishara 
1341ff7b0479SSaeed Bishara 	for (i = 0; i < dram->num_cs; i++) {
134263a9332bSAndrew Lunn 		const struct mbus_dram_window *cs = dram->cs + i;
1343ff7b0479SSaeed Bishara 
1344ff7b0479SSaeed Bishara 		writel((cs->base & 0xffff0000) |
1345ff7b0479SSaeed Bishara 		       (cs->mbus_attr << 8) |
1346ff7b0479SSaeed Bishara 		       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1347ff7b0479SSaeed Bishara 		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1348ff7b0479SSaeed Bishara 
134977ff7a70SStefan Roese 		/* Fill the caching variables for later use */
135077ff7a70SStefan Roese 		xordev->win_start[i] = cs->base;
135177ff7a70SStefan Roese 		xordev->win_end[i] = cs->base + cs->size - 1;
135277ff7a70SStefan Roese 
1353ff7b0479SSaeed Bishara 		win_enable |= (1 << i);
1354ff7b0479SSaeed Bishara 		win_enable |= 3 << (16 + (2 * i));
1355ff7b0479SSaeed Bishara 	}
1356ff7b0479SSaeed Bishara 
1357ff7b0479SSaeed Bishara 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1358ff7b0479SSaeed Bishara 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1359c4b4b732SThomas Petazzoni 	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1360c4b4b732SThomas Petazzoni 	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1361ff7b0479SSaeed Bishara }
1362ff7b0479SSaeed Bishara 
1363ac5f0f3fSMarcin Wojtas static void
1364ac5f0f3fSMarcin Wojtas mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
1365ac5f0f3fSMarcin Wojtas {
1366ac5f0f3fSMarcin Wojtas 	void __iomem *base = xordev->xor_high_base;
1367ac5f0f3fSMarcin Wojtas 	u32 win_enable = 0;
1368ac5f0f3fSMarcin Wojtas 	int i;
1369ac5f0f3fSMarcin Wojtas 
1370ac5f0f3fSMarcin Wojtas 	for (i = 0; i < 8; i++) {
1371ac5f0f3fSMarcin Wojtas 		writel(0, base + WINDOW_BASE(i));
1372ac5f0f3fSMarcin Wojtas 		writel(0, base + WINDOW_SIZE(i));
1373ac5f0f3fSMarcin Wojtas 		if (i < 4)
1374ac5f0f3fSMarcin Wojtas 			writel(0, base + WINDOW_REMAP_HIGH(i));
1375ac5f0f3fSMarcin Wojtas 	}
1376ac5f0f3fSMarcin Wojtas 	/*
1377ac5f0f3fSMarcin Wojtas 	 * For Armada3700 open default 4GB Mbus window. The dram
1378ac5f0f3fSMarcin Wojtas 	 * related configuration are done at AXIS level.
1379ac5f0f3fSMarcin Wojtas 	 */
1380ac5f0f3fSMarcin Wojtas 	writel(0xffff0000, base + WINDOW_SIZE(0));
1381ac5f0f3fSMarcin Wojtas 	win_enable |= 1;
1382ac5f0f3fSMarcin Wojtas 	win_enable |= 3 << 16;
1383ac5f0f3fSMarcin Wojtas 
1384ac5f0f3fSMarcin Wojtas 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1385ac5f0f3fSMarcin Wojtas 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1386ac5f0f3fSMarcin Wojtas 	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1387ac5f0f3fSMarcin Wojtas 	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1388ac5f0f3fSMarcin Wojtas }
1389ac5f0f3fSMarcin Wojtas 
13908b648436SThomas Petazzoni /*
13918b648436SThomas Petazzoni  * Since this XOR driver is basically used only for RAID5, we don't
13928b648436SThomas Petazzoni  * need to care about synchronizing ->suspend with DMA activity,
13938b648436SThomas Petazzoni  * because the DMA engine will naturally be quiet due to the block
13948b648436SThomas Petazzoni  * devices being suspended.
13958b648436SThomas Petazzoni  */
13968b648436SThomas Petazzoni static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
13978b648436SThomas Petazzoni {
13988b648436SThomas Petazzoni 	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
13998b648436SThomas Petazzoni 	int i;
14008b648436SThomas Petazzoni 
14018b648436SThomas Petazzoni 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
14028b648436SThomas Petazzoni 		struct mv_xor_chan *mv_chan = xordev->channels[i];
14038b648436SThomas Petazzoni 
14048b648436SThomas Petazzoni 		if (!mv_chan)
14058b648436SThomas Petazzoni 			continue;
14068b648436SThomas Petazzoni 
14078b648436SThomas Petazzoni 		mv_chan->saved_config_reg =
14088b648436SThomas Petazzoni 			readl_relaxed(XOR_CONFIG(mv_chan));
14098b648436SThomas Petazzoni 		mv_chan->saved_int_mask_reg =
14108b648436SThomas Petazzoni 			readl_relaxed(XOR_INTR_MASK(mv_chan));
14118b648436SThomas Petazzoni 	}
14128b648436SThomas Petazzoni 
14138b648436SThomas Petazzoni 	return 0;
14148b648436SThomas Petazzoni }
14158b648436SThomas Petazzoni 
14168b648436SThomas Petazzoni static int mv_xor_resume(struct platform_device *dev)
14178b648436SThomas Petazzoni {
14188b648436SThomas Petazzoni 	struct mv_xor_device *xordev = platform_get_drvdata(dev);
14198b648436SThomas Petazzoni 	const struct mbus_dram_target_info *dram;
14208b648436SThomas Petazzoni 	int i;
14218b648436SThomas Petazzoni 
14228b648436SThomas Petazzoni 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
14238b648436SThomas Petazzoni 		struct mv_xor_chan *mv_chan = xordev->channels[i];
14248b648436SThomas Petazzoni 
14258b648436SThomas Petazzoni 		if (!mv_chan)
14268b648436SThomas Petazzoni 			continue;
14278b648436SThomas Petazzoni 
14288b648436SThomas Petazzoni 		writel_relaxed(mv_chan->saved_config_reg,
14298b648436SThomas Petazzoni 			       XOR_CONFIG(mv_chan));
14308b648436SThomas Petazzoni 		writel_relaxed(mv_chan->saved_int_mask_reg,
14318b648436SThomas Petazzoni 			       XOR_INTR_MASK(mv_chan));
14328b648436SThomas Petazzoni 	}
14338b648436SThomas Petazzoni 
1434ac5f0f3fSMarcin Wojtas 	if (xordev->xor_type == XOR_ARMADA_37XX) {
1435ac5f0f3fSMarcin Wojtas 		mv_xor_conf_mbus_windows_a3700(xordev);
1436ac5f0f3fSMarcin Wojtas 		return 0;
1437ac5f0f3fSMarcin Wojtas 	}
1438ac5f0f3fSMarcin Wojtas 
14398b648436SThomas Petazzoni 	dram = mv_mbus_dram_info();
14408b648436SThomas Petazzoni 	if (dram)
14418b648436SThomas Petazzoni 		mv_xor_conf_mbus_windows(xordev, dram);
14428b648436SThomas Petazzoni 
14438b648436SThomas Petazzoni 	return 0;
14448b648436SThomas Petazzoni }
14458b648436SThomas Petazzoni 
14466f166312SLior Amsalem static const struct of_device_id mv_xor_dt_ids[] = {
1447dd130c65SGregory CLEMENT 	{ .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
1448dd130c65SGregory CLEMENT 	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
1449ac5f0f3fSMarcin Wojtas 	{ .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
14506f166312SLior Amsalem 	{},
14516f166312SLior Amsalem };
14526f166312SLior Amsalem 
145377757291SThomas Petazzoni static unsigned int mv_xor_engine_count;
1454ff7b0479SSaeed Bishara 
1455c2714334SLinus Torvalds static int mv_xor_probe(struct platform_device *pdev)
1456ff7b0479SSaeed Bishara {
145763a9332bSAndrew Lunn 	const struct mbus_dram_target_info *dram;
1458297eedbaSThomas Petazzoni 	struct mv_xor_device *xordev;
1459d4adcc01SJingoo Han 	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1460ff7b0479SSaeed Bishara 	struct resource *res;
146177757291SThomas Petazzoni 	unsigned int max_engines, max_channels;
146260d151f3SThomas Petazzoni 	int i, ret;
1463ff7b0479SSaeed Bishara 
14641ba151cdSJoe Perches 	dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1465ff7b0479SSaeed Bishara 
1466297eedbaSThomas Petazzoni 	xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1467297eedbaSThomas Petazzoni 	if (!xordev)
1468ff7b0479SSaeed Bishara 		return -ENOMEM;
1469ff7b0479SSaeed Bishara 
1470ff7b0479SSaeed Bishara 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1471ff7b0479SSaeed Bishara 	if (!res)
1472ff7b0479SSaeed Bishara 		return -ENODEV;
1473ff7b0479SSaeed Bishara 
1474297eedbaSThomas Petazzoni 	xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
14754de1ba15SH Hartley Sweeten 					resource_size(res));
1476297eedbaSThomas Petazzoni 	if (!xordev->xor_base)
1477ff7b0479SSaeed Bishara 		return -EBUSY;
1478ff7b0479SSaeed Bishara 
1479ff7b0479SSaeed Bishara 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1480ff7b0479SSaeed Bishara 	if (!res)
1481ff7b0479SSaeed Bishara 		return -ENODEV;
1482ff7b0479SSaeed Bishara 
1483297eedbaSThomas Petazzoni 	xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
14844de1ba15SH Hartley Sweeten 					     resource_size(res));
1485297eedbaSThomas Petazzoni 	if (!xordev->xor_high_base)
1486ff7b0479SSaeed Bishara 		return -EBUSY;
1487ff7b0479SSaeed Bishara 
1488297eedbaSThomas Petazzoni 	platform_set_drvdata(pdev, xordev);
1489ff7b0479SSaeed Bishara 
1490dd130c65SGregory CLEMENT 
1491dd130c65SGregory CLEMENT 	/*
1492dd130c65SGregory CLEMENT 	 * We need to know which type of XOR device we use before
1493dd130c65SGregory CLEMENT 	 * setting up. In non-dt case it can only be the legacy one.
1494dd130c65SGregory CLEMENT 	 */
1495dd130c65SGregory CLEMENT 	xordev->xor_type = XOR_ORION;
1496dd130c65SGregory CLEMENT 	if (pdev->dev.of_node) {
1497dd130c65SGregory CLEMENT 		const struct of_device_id *of_id =
1498dd130c65SGregory CLEMENT 			of_match_device(mv_xor_dt_ids,
1499dd130c65SGregory CLEMENT 					&pdev->dev);
1500dd130c65SGregory CLEMENT 
1501dd130c65SGregory CLEMENT 		xordev->xor_type = (uintptr_t)of_id->data;
1502dd130c65SGregory CLEMENT 	}
1503dd130c65SGregory CLEMENT 
1504ff7b0479SSaeed Bishara 	/*
1505ff7b0479SSaeed Bishara 	 * (Re-)program MBUS remapping windows if we are asked to.
1506ff7b0479SSaeed Bishara 	 */
1507ac5f0f3fSMarcin Wojtas 	if (xordev->xor_type == XOR_ARMADA_37XX) {
1508ac5f0f3fSMarcin Wojtas 		mv_xor_conf_mbus_windows_a3700(xordev);
1509ac5f0f3fSMarcin Wojtas 	} else {
151063a9332bSAndrew Lunn 		dram = mv_mbus_dram_info();
151163a9332bSAndrew Lunn 		if (dram)
1512297eedbaSThomas Petazzoni 			mv_xor_conf_mbus_windows(xordev, dram);
1513ac5f0f3fSMarcin Wojtas 	}
1514ff7b0479SSaeed Bishara 
1515c510182bSAndrew Lunn 	/* Not all platforms can gate the clock, so it is not
1516c510182bSAndrew Lunn 	 * an error if the clock does not exists.
1517c510182bSAndrew Lunn 	 */
1518297eedbaSThomas Petazzoni 	xordev->clk = clk_get(&pdev->dev, NULL);
1519297eedbaSThomas Petazzoni 	if (!IS_ERR(xordev->clk))
1520297eedbaSThomas Petazzoni 		clk_prepare_enable(xordev->clk);
1521c510182bSAndrew Lunn 
152277757291SThomas Petazzoni 	/*
152377757291SThomas Petazzoni 	 * We don't want to have more than one channel per CPU in
152477757291SThomas Petazzoni 	 * order for async_tx to perform well. So we limit the number
152577757291SThomas Petazzoni 	 * of engines and channels so that we take into account this
152677757291SThomas Petazzoni 	 * constraint. Note that we also want to use channels from
1527ac5f0f3fSMarcin Wojtas 	 * separate engines when possible.  For dual-CPU Armada 3700
1528ac5f0f3fSMarcin Wojtas 	 * SoC with single XOR engine allow using its both channels.
152977757291SThomas Petazzoni 	 */
153077757291SThomas Petazzoni 	max_engines = num_present_cpus();
1531ac5f0f3fSMarcin Wojtas 	if (xordev->xor_type == XOR_ARMADA_37XX)
1532ac5f0f3fSMarcin Wojtas 		max_channels =	num_present_cpus();
1533ac5f0f3fSMarcin Wojtas 	else
153477757291SThomas Petazzoni 		max_channels = min_t(unsigned int,
153577757291SThomas Petazzoni 				     MV_XOR_MAX_CHANNELS,
153677757291SThomas Petazzoni 				     DIV_ROUND_UP(num_present_cpus(), 2));
153777757291SThomas Petazzoni 
153877757291SThomas Petazzoni 	if (mv_xor_engine_count >= max_engines)
153977757291SThomas Petazzoni 		return 0;
154077757291SThomas Petazzoni 
1541f7d12ef5SThomas Petazzoni 	if (pdev->dev.of_node) {
1542f7d12ef5SThomas Petazzoni 		struct device_node *np;
1543f7d12ef5SThomas Petazzoni 		int i = 0;
1544f7d12ef5SThomas Petazzoni 
1545f7d12ef5SThomas Petazzoni 		for_each_child_of_node(pdev->dev.of_node, np) {
15460be8253fSRussell King 			struct mv_xor_chan *chan;
1547f7d12ef5SThomas Petazzoni 			dma_cap_mask_t cap_mask;
1548f7d12ef5SThomas Petazzoni 			int irq;
1549f7d12ef5SThomas Petazzoni 
155077757291SThomas Petazzoni 			if (i >= max_channels)
155177757291SThomas Petazzoni 				continue;
155277757291SThomas Petazzoni 
1553f7d12ef5SThomas Petazzoni 			dma_cap_zero(cap_mask);
1554f7d12ef5SThomas Petazzoni 			dma_cap_set(DMA_MEMCPY, cap_mask);
1555c5db858bSStefan Roese 			dma_cap_set(DMA_SG, cap_mask);
1556f7d12ef5SThomas Petazzoni 			dma_cap_set(DMA_XOR, cap_mask);
1557f7d12ef5SThomas Petazzoni 			dma_cap_set(DMA_INTERRUPT, cap_mask);
1558f7d12ef5SThomas Petazzoni 
1559f7d12ef5SThomas Petazzoni 			irq = irq_of_parse_and_map(np, 0);
1560f8eb9e7dSThomas Petazzoni 			if (!irq) {
1561f8eb9e7dSThomas Petazzoni 				ret = -ENODEV;
1562f7d12ef5SThomas Petazzoni 				goto err_channel_add;
1563f7d12ef5SThomas Petazzoni 			}
1564f7d12ef5SThomas Petazzoni 
15650be8253fSRussell King 			chan = mv_xor_channel_add(xordev, pdev, i,
1566dd130c65SGregory CLEMENT 						  cap_mask, irq);
15670be8253fSRussell King 			if (IS_ERR(chan)) {
15680be8253fSRussell King 				ret = PTR_ERR(chan);
1569f7d12ef5SThomas Petazzoni 				irq_dispose_mapping(irq);
1570f7d12ef5SThomas Petazzoni 				goto err_channel_add;
1571f7d12ef5SThomas Petazzoni 			}
1572f7d12ef5SThomas Petazzoni 
15730be8253fSRussell King 			xordev->channels[i] = chan;
1574f7d12ef5SThomas Petazzoni 			i++;
1575f7d12ef5SThomas Petazzoni 		}
1576f7d12ef5SThomas Petazzoni 	} else if (pdata && pdata->channels) {
157777757291SThomas Petazzoni 		for (i = 0; i < max_channels; i++) {
1578e39f6ec1SThomas Petazzoni 			struct mv_xor_channel_data *cd;
15790be8253fSRussell King 			struct mv_xor_chan *chan;
158060d151f3SThomas Petazzoni 			int irq;
158160d151f3SThomas Petazzoni 
158260d151f3SThomas Petazzoni 			cd = &pdata->channels[i];
158360d151f3SThomas Petazzoni 			if (!cd) {
158460d151f3SThomas Petazzoni 				ret = -ENODEV;
158560d151f3SThomas Petazzoni 				goto err_channel_add;
158660d151f3SThomas Petazzoni 			}
158760d151f3SThomas Petazzoni 
158860d151f3SThomas Petazzoni 			irq = platform_get_irq(pdev, i);
158960d151f3SThomas Petazzoni 			if (irq < 0) {
159060d151f3SThomas Petazzoni 				ret = irq;
159160d151f3SThomas Petazzoni 				goto err_channel_add;
159260d151f3SThomas Petazzoni 			}
159360d151f3SThomas Petazzoni 
15940be8253fSRussell King 			chan = mv_xor_channel_add(xordev, pdev, i,
1595dd130c65SGregory CLEMENT 						  cd->cap_mask, irq);
15960be8253fSRussell King 			if (IS_ERR(chan)) {
15970be8253fSRussell King 				ret = PTR_ERR(chan);
159860d151f3SThomas Petazzoni 				goto err_channel_add;
159960d151f3SThomas Petazzoni 			}
16000be8253fSRussell King 
16010be8253fSRussell King 			xordev->channels[i] = chan;
160260d151f3SThomas Petazzoni 		}
160360d151f3SThomas Petazzoni 	}
160460d151f3SThomas Petazzoni 
1605ff7b0479SSaeed Bishara 	return 0;
160660d151f3SThomas Petazzoni 
160760d151f3SThomas Petazzoni err_channel_add:
160860d151f3SThomas Petazzoni 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1609f7d12ef5SThomas Petazzoni 		if (xordev->channels[i]) {
1610ab6e439fSThomas Petazzoni 			mv_xor_channel_remove(xordev->channels[i]);
1611f7d12ef5SThomas Petazzoni 			if (pdev->dev.of_node)
1612f7d12ef5SThomas Petazzoni 				irq_dispose_mapping(xordev->channels[i]->irq);
1613f7d12ef5SThomas Petazzoni 		}
161460d151f3SThomas Petazzoni 
1615dab92064SThomas Petazzoni 	if (!IS_ERR(xordev->clk)) {
1616297eedbaSThomas Petazzoni 		clk_disable_unprepare(xordev->clk);
1617297eedbaSThomas Petazzoni 		clk_put(xordev->clk);
1618dab92064SThomas Petazzoni 	}
1619dab92064SThomas Petazzoni 
162060d151f3SThomas Petazzoni 	return ret;
1621ff7b0479SSaeed Bishara }
1622ff7b0479SSaeed Bishara 
1623ff7b0479SSaeed Bishara static struct platform_driver mv_xor_driver = {
1624ff7b0479SSaeed Bishara 	.probe		= mv_xor_probe,
16258b648436SThomas Petazzoni 	.suspend        = mv_xor_suspend,
16268b648436SThomas Petazzoni 	.resume         = mv_xor_resume,
1627ff7b0479SSaeed Bishara 	.driver		= {
1628ff7b0479SSaeed Bishara 		.name	        = MV_XOR_NAME,
1629f7d12ef5SThomas Petazzoni 		.of_match_table = of_match_ptr(mv_xor_dt_ids),
1630ff7b0479SSaeed Bishara 	},
1631ff7b0479SSaeed Bishara };
1632ff7b0479SSaeed Bishara 
1633812608d1SGeliang Tang builtin_platform_driver(mv_xor_driver);
1634ff7b0479SSaeed Bishara 
163525cf68daSPaul Gortmaker /*
1636ff7b0479SSaeed Bishara MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1637ff7b0479SSaeed Bishara MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1638ff7b0479SSaeed Bishara MODULE_LICENSE("GPL");
163925cf68daSPaul Gortmaker */
1640