xref: /openbmc/linux/drivers/dma/mv_xor.c (revision 78700c0a)
1 /*
2  * offload engine driver for the Marvell XOR engine
3  * Copyright (C) 2007, 2008, Marvell International Ltd.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/spinlock.h>
20 #include <linux/interrupt.h>
21 #include <linux/of_device.h>
22 #include <linux/platform_device.h>
23 #include <linux/memory.h>
24 #include <linux/clk.h>
25 #include <linux/of.h>
26 #include <linux/of_irq.h>
27 #include <linux/irqdomain.h>
28 #include <linux/cpumask.h>
29 #include <linux/platform_data/dma-mv_xor.h>
30 
31 #include "dmaengine.h"
32 #include "mv_xor.h"
33 
34 enum mv_xor_type {
35 	XOR_ORION,
36 	XOR_ARMADA_38X,
37 	XOR_ARMADA_37XX,
38 };
39 
40 enum mv_xor_mode {
41 	XOR_MODE_IN_REG,
42 	XOR_MODE_IN_DESC,
43 };
44 
45 static void mv_xor_issue_pending(struct dma_chan *chan);
46 
47 #define to_mv_xor_chan(chan)		\
48 	container_of(chan, struct mv_xor_chan, dmachan)
49 
50 #define to_mv_xor_slot(tx)		\
51 	container_of(tx, struct mv_xor_desc_slot, async_tx)
52 
53 #define mv_chan_to_devp(chan)           \
54 	((chan)->dmadev.dev)
55 
56 static void mv_desc_init(struct mv_xor_desc_slot *desc,
57 			 dma_addr_t addr, u32 byte_count,
58 			 enum dma_ctrl_flags flags)
59 {
60 	struct mv_xor_desc *hw_desc = desc->hw_desc;
61 
62 	hw_desc->status = XOR_DESC_DMA_OWNED;
63 	hw_desc->phy_next_desc = 0;
64 	/* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
65 	hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
66 				XOR_DESC_EOD_INT_EN : 0;
67 	hw_desc->phy_dest_addr = addr;
68 	hw_desc->byte_count = byte_count;
69 }
70 
71 static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
72 {
73 	struct mv_xor_desc *hw_desc = desc->hw_desc;
74 
75 	switch (desc->type) {
76 	case DMA_XOR:
77 	case DMA_INTERRUPT:
78 		hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
79 		break;
80 	case DMA_MEMCPY:
81 		hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
82 		break;
83 	default:
84 		BUG();
85 		return;
86 	}
87 }
88 
89 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
90 				  u32 next_desc_addr)
91 {
92 	struct mv_xor_desc *hw_desc = desc->hw_desc;
93 	BUG_ON(hw_desc->phy_next_desc);
94 	hw_desc->phy_next_desc = next_desc_addr;
95 }
96 
97 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
98 				 int index, dma_addr_t addr)
99 {
100 	struct mv_xor_desc *hw_desc = desc->hw_desc;
101 	hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
102 	if (desc->type == DMA_XOR)
103 		hw_desc->desc_command |= (1 << index);
104 }
105 
106 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
107 {
108 	return readl_relaxed(XOR_CURR_DESC(chan));
109 }
110 
111 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
112 					u32 next_desc_addr)
113 {
114 	writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
115 }
116 
117 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
118 {
119 	u32 val = readl_relaxed(XOR_INTR_MASK(chan));
120 	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
121 	writel_relaxed(val, XOR_INTR_MASK(chan));
122 }
123 
124 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
125 {
126 	u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
127 	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
128 	return intr_cause;
129 }
130 
131 static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
132 {
133 	u32 val;
134 
135 	val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
136 	val = ~(val << (chan->idx * 16));
137 	dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
138 	writel_relaxed(val, XOR_INTR_CAUSE(chan));
139 }
140 
141 static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
142 {
143 	u32 val = 0xFFFF0000 >> (chan->idx * 16);
144 	writel_relaxed(val, XOR_INTR_CAUSE(chan));
145 }
146 
147 static void mv_chan_set_mode(struct mv_xor_chan *chan,
148 			     u32 op_mode)
149 {
150 	u32 config = readl_relaxed(XOR_CONFIG(chan));
151 
152 	config &= ~0x7;
153 	config |= op_mode;
154 
155 #if defined(__BIG_ENDIAN)
156 	config |= XOR_DESCRIPTOR_SWAP;
157 #else
158 	config &= ~XOR_DESCRIPTOR_SWAP;
159 #endif
160 
161 	writel_relaxed(config, XOR_CONFIG(chan));
162 }
163 
164 static void mv_chan_activate(struct mv_xor_chan *chan)
165 {
166 	dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
167 
168 	/* writel ensures all descriptors are flushed before activation */
169 	writel(BIT(0), XOR_ACTIVATION(chan));
170 }
171 
172 static char mv_chan_is_busy(struct mv_xor_chan *chan)
173 {
174 	u32 state = readl_relaxed(XOR_ACTIVATION(chan));
175 
176 	state = (state >> 4) & 0x3;
177 
178 	return (state == 1) ? 1 : 0;
179 }
180 
181 /*
182  * mv_chan_start_new_chain - program the engine to operate on new
183  * chain headed by sw_desc
184  * Caller must hold &mv_chan->lock while calling this function
185  */
186 static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
187 				    struct mv_xor_desc_slot *sw_desc)
188 {
189 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
190 		__func__, __LINE__, sw_desc);
191 
192 	/* set the hardware chain */
193 	mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
194 
195 	mv_chan->pending++;
196 	mv_xor_issue_pending(&mv_chan->dmachan);
197 }
198 
199 static dma_cookie_t
200 mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
201 				struct mv_xor_chan *mv_chan,
202 				dma_cookie_t cookie)
203 {
204 	BUG_ON(desc->async_tx.cookie < 0);
205 
206 	if (desc->async_tx.cookie > 0) {
207 		cookie = desc->async_tx.cookie;
208 
209 		/* call the callback (must not sleep or submit new
210 		 * operations to this channel)
211 		 */
212 		if (desc->async_tx.callback)
213 			desc->async_tx.callback(
214 				desc->async_tx.callback_param);
215 
216 		dma_descriptor_unmap(&desc->async_tx);
217 	}
218 
219 	/* run dependent operations */
220 	dma_run_dependencies(&desc->async_tx);
221 
222 	return cookie;
223 }
224 
225 static int
226 mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
227 {
228 	struct mv_xor_desc_slot *iter, *_iter;
229 
230 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
231 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
232 				 node) {
233 
234 		if (async_tx_test_ack(&iter->async_tx))
235 			list_move_tail(&iter->node, &mv_chan->free_slots);
236 	}
237 	return 0;
238 }
239 
240 static int
241 mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
242 		   struct mv_xor_chan *mv_chan)
243 {
244 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
245 		__func__, __LINE__, desc, desc->async_tx.flags);
246 
247 	/* the client is allowed to attach dependent operations
248 	 * until 'ack' is set
249 	 */
250 	if (!async_tx_test_ack(&desc->async_tx))
251 		/* move this slot to the completed_slots */
252 		list_move_tail(&desc->node, &mv_chan->completed_slots);
253 	else
254 		list_move_tail(&desc->node, &mv_chan->free_slots);
255 
256 	return 0;
257 }
258 
259 /* This function must be called with the mv_xor_chan spinlock held */
260 static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
261 {
262 	struct mv_xor_desc_slot *iter, *_iter;
263 	dma_cookie_t cookie = 0;
264 	int busy = mv_chan_is_busy(mv_chan);
265 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
266 	int current_cleaned = 0;
267 	struct mv_xor_desc *hw_desc;
268 
269 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
270 	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
271 	mv_chan_clean_completed_slots(mv_chan);
272 
273 	/* free completed slots from the chain starting with
274 	 * the oldest descriptor
275 	 */
276 
277 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
278 				 node) {
279 
280 		/* clean finished descriptors */
281 		hw_desc = iter->hw_desc;
282 		if (hw_desc->status & XOR_DESC_SUCCESS) {
283 			cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
284 								 cookie);
285 
286 			/* done processing desc, clean slot */
287 			mv_desc_clean_slot(iter, mv_chan);
288 
289 			/* break if we did cleaned the current */
290 			if (iter->async_tx.phys == current_desc) {
291 				current_cleaned = 1;
292 				break;
293 			}
294 		} else {
295 			if (iter->async_tx.phys == current_desc) {
296 				current_cleaned = 0;
297 				break;
298 			}
299 		}
300 	}
301 
302 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
303 		if (current_cleaned) {
304 			/*
305 			 * current descriptor cleaned and removed, run
306 			 * from list head
307 			 */
308 			iter = list_entry(mv_chan->chain.next,
309 					  struct mv_xor_desc_slot,
310 					  node);
311 			mv_chan_start_new_chain(mv_chan, iter);
312 		} else {
313 			if (!list_is_last(&iter->node, &mv_chan->chain)) {
314 				/*
315 				 * descriptors are still waiting after
316 				 * current, trigger them
317 				 */
318 				iter = list_entry(iter->node.next,
319 						  struct mv_xor_desc_slot,
320 						  node);
321 				mv_chan_start_new_chain(mv_chan, iter);
322 			} else {
323 				/*
324 				 * some descriptors are still waiting
325 				 * to be cleaned
326 				 */
327 				tasklet_schedule(&mv_chan->irq_tasklet);
328 			}
329 		}
330 	}
331 
332 	if (cookie > 0)
333 		mv_chan->dmachan.completed_cookie = cookie;
334 }
335 
336 static void mv_xor_tasklet(unsigned long data)
337 {
338 	struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
339 
340 	spin_lock_bh(&chan->lock);
341 	mv_chan_slot_cleanup(chan);
342 	spin_unlock_bh(&chan->lock);
343 }
344 
345 static struct mv_xor_desc_slot *
346 mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
347 {
348 	struct mv_xor_desc_slot *iter;
349 
350 	spin_lock_bh(&mv_chan->lock);
351 
352 	if (!list_empty(&mv_chan->free_slots)) {
353 		iter = list_first_entry(&mv_chan->free_slots,
354 					struct mv_xor_desc_slot,
355 					node);
356 
357 		list_move_tail(&iter->node, &mv_chan->allocated_slots);
358 
359 		spin_unlock_bh(&mv_chan->lock);
360 
361 		/* pre-ack descriptor */
362 		async_tx_ack(&iter->async_tx);
363 		iter->async_tx.cookie = -EBUSY;
364 
365 		return iter;
366 
367 	}
368 
369 	spin_unlock_bh(&mv_chan->lock);
370 
371 	/* try to free some slots if the allocation fails */
372 	tasklet_schedule(&mv_chan->irq_tasklet);
373 
374 	return NULL;
375 }
376 
377 /************************ DMA engine API functions ****************************/
378 static dma_cookie_t
379 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
380 {
381 	struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
382 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
383 	struct mv_xor_desc_slot *old_chain_tail;
384 	dma_cookie_t cookie;
385 	int new_hw_chain = 1;
386 
387 	dev_dbg(mv_chan_to_devp(mv_chan),
388 		"%s sw_desc %p: async_tx %p\n",
389 		__func__, sw_desc, &sw_desc->async_tx);
390 
391 	spin_lock_bh(&mv_chan->lock);
392 	cookie = dma_cookie_assign(tx);
393 
394 	if (list_empty(&mv_chan->chain))
395 		list_move_tail(&sw_desc->node, &mv_chan->chain);
396 	else {
397 		new_hw_chain = 0;
398 
399 		old_chain_tail = list_entry(mv_chan->chain.prev,
400 					    struct mv_xor_desc_slot,
401 					    node);
402 		list_move_tail(&sw_desc->node, &mv_chan->chain);
403 
404 		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
405 			&old_chain_tail->async_tx.phys);
406 
407 		/* fix up the hardware chain */
408 		mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
409 
410 		/* if the channel is not busy */
411 		if (!mv_chan_is_busy(mv_chan)) {
412 			u32 current_desc = mv_chan_get_current_desc(mv_chan);
413 			/*
414 			 * and the curren desc is the end of the chain before
415 			 * the append, then we need to start the channel
416 			 */
417 			if (current_desc == old_chain_tail->async_tx.phys)
418 				new_hw_chain = 1;
419 		}
420 	}
421 
422 	if (new_hw_chain)
423 		mv_chan_start_new_chain(mv_chan, sw_desc);
424 
425 	spin_unlock_bh(&mv_chan->lock);
426 
427 	return cookie;
428 }
429 
430 /* returns the number of allocated descriptors */
431 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
432 {
433 	void *virt_desc;
434 	dma_addr_t dma_desc;
435 	int idx;
436 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
437 	struct mv_xor_desc_slot *slot = NULL;
438 	int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
439 
440 	/* Allocate descriptor slots */
441 	idx = mv_chan->slots_allocated;
442 	while (idx < num_descs_in_pool) {
443 		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
444 		if (!slot) {
445 			dev_info(mv_chan_to_devp(mv_chan),
446 				 "channel only initialized %d descriptor slots",
447 				 idx);
448 			break;
449 		}
450 		virt_desc = mv_chan->dma_desc_pool_virt;
451 		slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
452 
453 		dma_async_tx_descriptor_init(&slot->async_tx, chan);
454 		slot->async_tx.tx_submit = mv_xor_tx_submit;
455 		INIT_LIST_HEAD(&slot->node);
456 		dma_desc = mv_chan->dma_desc_pool;
457 		slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
458 		slot->idx = idx++;
459 
460 		spin_lock_bh(&mv_chan->lock);
461 		mv_chan->slots_allocated = idx;
462 		list_add_tail(&slot->node, &mv_chan->free_slots);
463 		spin_unlock_bh(&mv_chan->lock);
464 	}
465 
466 	dev_dbg(mv_chan_to_devp(mv_chan),
467 		"allocated %d descriptor slots\n",
468 		mv_chan->slots_allocated);
469 
470 	return mv_chan->slots_allocated ? : -ENOMEM;
471 }
472 
473 static struct dma_async_tx_descriptor *
474 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
475 		    unsigned int src_cnt, size_t len, unsigned long flags)
476 {
477 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
478 	struct mv_xor_desc_slot *sw_desc;
479 
480 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
481 		return NULL;
482 
483 	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
484 
485 	dev_dbg(mv_chan_to_devp(mv_chan),
486 		"%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
487 		__func__, src_cnt, len, &dest, flags);
488 
489 	sw_desc = mv_chan_alloc_slot(mv_chan);
490 	if (sw_desc) {
491 		sw_desc->type = DMA_XOR;
492 		sw_desc->async_tx.flags = flags;
493 		mv_desc_init(sw_desc, dest, len, flags);
494 		if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
495 			mv_desc_set_mode(sw_desc);
496 		while (src_cnt--)
497 			mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
498 	}
499 
500 	dev_dbg(mv_chan_to_devp(mv_chan),
501 		"%s sw_desc %p async_tx %p \n",
502 		__func__, sw_desc, &sw_desc->async_tx);
503 	return sw_desc ? &sw_desc->async_tx : NULL;
504 }
505 
506 static struct dma_async_tx_descriptor *
507 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
508 		size_t len, unsigned long flags)
509 {
510 	/*
511 	 * A MEMCPY operation is identical to an XOR operation with only
512 	 * a single source address.
513 	 */
514 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
515 }
516 
517 static struct dma_async_tx_descriptor *
518 mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
519 {
520 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
521 	dma_addr_t src, dest;
522 	size_t len;
523 
524 	src = mv_chan->dummy_src_addr;
525 	dest = mv_chan->dummy_dst_addr;
526 	len = MV_XOR_MIN_BYTE_COUNT;
527 
528 	/*
529 	 * We implement the DMA_INTERRUPT operation as a minimum sized
530 	 * XOR operation with a single dummy source address.
531 	 */
532 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
533 }
534 
535 static void mv_xor_free_chan_resources(struct dma_chan *chan)
536 {
537 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
538 	struct mv_xor_desc_slot *iter, *_iter;
539 	int in_use_descs = 0;
540 
541 	spin_lock_bh(&mv_chan->lock);
542 
543 	mv_chan_slot_cleanup(mv_chan);
544 
545 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
546 					node) {
547 		in_use_descs++;
548 		list_move_tail(&iter->node, &mv_chan->free_slots);
549 	}
550 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
551 				 node) {
552 		in_use_descs++;
553 		list_move_tail(&iter->node, &mv_chan->free_slots);
554 	}
555 	list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
556 				 node) {
557 		in_use_descs++;
558 		list_move_tail(&iter->node, &mv_chan->free_slots);
559 	}
560 	list_for_each_entry_safe_reverse(
561 		iter, _iter, &mv_chan->free_slots, node) {
562 		list_del(&iter->node);
563 		kfree(iter);
564 		mv_chan->slots_allocated--;
565 	}
566 
567 	dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
568 		__func__, mv_chan->slots_allocated);
569 	spin_unlock_bh(&mv_chan->lock);
570 
571 	if (in_use_descs)
572 		dev_err(mv_chan_to_devp(mv_chan),
573 			"freeing %d in use descriptors!\n", in_use_descs);
574 }
575 
576 /**
577  * mv_xor_status - poll the status of an XOR transaction
578  * @chan: XOR channel handle
579  * @cookie: XOR transaction identifier
580  * @txstate: XOR transactions state holder (or NULL)
581  */
582 static enum dma_status mv_xor_status(struct dma_chan *chan,
583 					  dma_cookie_t cookie,
584 					  struct dma_tx_state *txstate)
585 {
586 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
587 	enum dma_status ret;
588 
589 	ret = dma_cookie_status(chan, cookie, txstate);
590 	if (ret == DMA_COMPLETE)
591 		return ret;
592 
593 	spin_lock_bh(&mv_chan->lock);
594 	mv_chan_slot_cleanup(mv_chan);
595 	spin_unlock_bh(&mv_chan->lock);
596 
597 	return dma_cookie_status(chan, cookie, txstate);
598 }
599 
600 static void mv_chan_dump_regs(struct mv_xor_chan *chan)
601 {
602 	u32 val;
603 
604 	val = readl_relaxed(XOR_CONFIG(chan));
605 	dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
606 
607 	val = readl_relaxed(XOR_ACTIVATION(chan));
608 	dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
609 
610 	val = readl_relaxed(XOR_INTR_CAUSE(chan));
611 	dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
612 
613 	val = readl_relaxed(XOR_INTR_MASK(chan));
614 	dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
615 
616 	val = readl_relaxed(XOR_ERROR_CAUSE(chan));
617 	dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
618 
619 	val = readl_relaxed(XOR_ERROR_ADDR(chan));
620 	dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
621 }
622 
623 static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
624 					  u32 intr_cause)
625 {
626 	if (intr_cause & XOR_INT_ERR_DECODE) {
627 		dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
628 		return;
629 	}
630 
631 	dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
632 		chan->idx, intr_cause);
633 
634 	mv_chan_dump_regs(chan);
635 	WARN_ON(1);
636 }
637 
638 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
639 {
640 	struct mv_xor_chan *chan = data;
641 	u32 intr_cause = mv_chan_get_intr_cause(chan);
642 
643 	dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
644 
645 	if (intr_cause & XOR_INTR_ERRORS)
646 		mv_chan_err_interrupt_handler(chan, intr_cause);
647 
648 	tasklet_schedule(&chan->irq_tasklet);
649 
650 	mv_chan_clear_eoc_cause(chan);
651 
652 	return IRQ_HANDLED;
653 }
654 
655 static void mv_xor_issue_pending(struct dma_chan *chan)
656 {
657 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
658 
659 	if (mv_chan->pending >= MV_XOR_THRESHOLD) {
660 		mv_chan->pending = 0;
661 		mv_chan_activate(mv_chan);
662 	}
663 }
664 
665 /*
666  * Perform a transaction to verify the HW works.
667  */
668 
669 static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
670 {
671 	int i, ret;
672 	void *src, *dest;
673 	dma_addr_t src_dma, dest_dma;
674 	struct dma_chan *dma_chan;
675 	dma_cookie_t cookie;
676 	struct dma_async_tx_descriptor *tx;
677 	struct dmaengine_unmap_data *unmap;
678 	int err = 0;
679 
680 	src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
681 	if (!src)
682 		return -ENOMEM;
683 
684 	dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
685 	if (!dest) {
686 		kfree(src);
687 		return -ENOMEM;
688 	}
689 
690 	/* Fill in src buffer */
691 	for (i = 0; i < PAGE_SIZE; i++)
692 		((u8 *) src)[i] = (u8)i;
693 
694 	dma_chan = &mv_chan->dmachan;
695 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
696 		err = -ENODEV;
697 		goto out;
698 	}
699 
700 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
701 	if (!unmap) {
702 		err = -ENOMEM;
703 		goto free_resources;
704 	}
705 
706 	src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
707 				 PAGE_SIZE, DMA_TO_DEVICE);
708 	unmap->addr[0] = src_dma;
709 
710 	ret = dma_mapping_error(dma_chan->device->dev, src_dma);
711 	if (ret) {
712 		err = -ENOMEM;
713 		goto free_resources;
714 	}
715 	unmap->to_cnt = 1;
716 
717 	dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
718 				  PAGE_SIZE, DMA_FROM_DEVICE);
719 	unmap->addr[1] = dest_dma;
720 
721 	ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
722 	if (ret) {
723 		err = -ENOMEM;
724 		goto free_resources;
725 	}
726 	unmap->from_cnt = 1;
727 	unmap->len = PAGE_SIZE;
728 
729 	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
730 				    PAGE_SIZE, 0);
731 	if (!tx) {
732 		dev_err(dma_chan->device->dev,
733 			"Self-test cannot prepare operation, disabling\n");
734 		err = -ENODEV;
735 		goto free_resources;
736 	}
737 
738 	cookie = mv_xor_tx_submit(tx);
739 	if (dma_submit_error(cookie)) {
740 		dev_err(dma_chan->device->dev,
741 			"Self-test submit error, disabling\n");
742 		err = -ENODEV;
743 		goto free_resources;
744 	}
745 
746 	mv_xor_issue_pending(dma_chan);
747 	async_tx_ack(tx);
748 	msleep(1);
749 
750 	if (mv_xor_status(dma_chan, cookie, NULL) !=
751 	    DMA_COMPLETE) {
752 		dev_err(dma_chan->device->dev,
753 			"Self-test copy timed out, disabling\n");
754 		err = -ENODEV;
755 		goto free_resources;
756 	}
757 
758 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
759 				PAGE_SIZE, DMA_FROM_DEVICE);
760 	if (memcmp(src, dest, PAGE_SIZE)) {
761 		dev_err(dma_chan->device->dev,
762 			"Self-test copy failed compare, disabling\n");
763 		err = -ENODEV;
764 		goto free_resources;
765 	}
766 
767 free_resources:
768 	dmaengine_unmap_put(unmap);
769 	mv_xor_free_chan_resources(dma_chan);
770 out:
771 	kfree(src);
772 	kfree(dest);
773 	return err;
774 }
775 
776 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
777 static int
778 mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
779 {
780 	int i, src_idx, ret;
781 	struct page *dest;
782 	struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
783 	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
784 	dma_addr_t dest_dma;
785 	struct dma_async_tx_descriptor *tx;
786 	struct dmaengine_unmap_data *unmap;
787 	struct dma_chan *dma_chan;
788 	dma_cookie_t cookie;
789 	u8 cmp_byte = 0;
790 	u32 cmp_word;
791 	int err = 0;
792 	int src_count = MV_XOR_NUM_SRC_TEST;
793 
794 	for (src_idx = 0; src_idx < src_count; src_idx++) {
795 		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
796 		if (!xor_srcs[src_idx]) {
797 			while (src_idx--)
798 				__free_page(xor_srcs[src_idx]);
799 			return -ENOMEM;
800 		}
801 	}
802 
803 	dest = alloc_page(GFP_KERNEL);
804 	if (!dest) {
805 		while (src_idx--)
806 			__free_page(xor_srcs[src_idx]);
807 		return -ENOMEM;
808 	}
809 
810 	/* Fill in src buffers */
811 	for (src_idx = 0; src_idx < src_count; src_idx++) {
812 		u8 *ptr = page_address(xor_srcs[src_idx]);
813 		for (i = 0; i < PAGE_SIZE; i++)
814 			ptr[i] = (1 << src_idx);
815 	}
816 
817 	for (src_idx = 0; src_idx < src_count; src_idx++)
818 		cmp_byte ^= (u8) (1 << src_idx);
819 
820 	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
821 		(cmp_byte << 8) | cmp_byte;
822 
823 	memset(page_address(dest), 0, PAGE_SIZE);
824 
825 	dma_chan = &mv_chan->dmachan;
826 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
827 		err = -ENODEV;
828 		goto out;
829 	}
830 
831 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
832 					 GFP_KERNEL);
833 	if (!unmap) {
834 		err = -ENOMEM;
835 		goto free_resources;
836 	}
837 
838 	/* test xor */
839 	for (i = 0; i < src_count; i++) {
840 		unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
841 					      0, PAGE_SIZE, DMA_TO_DEVICE);
842 		dma_srcs[i] = unmap->addr[i];
843 		ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
844 		if (ret) {
845 			err = -ENOMEM;
846 			goto free_resources;
847 		}
848 		unmap->to_cnt++;
849 	}
850 
851 	unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
852 				      DMA_FROM_DEVICE);
853 	dest_dma = unmap->addr[src_count];
854 	ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
855 	if (ret) {
856 		err = -ENOMEM;
857 		goto free_resources;
858 	}
859 	unmap->from_cnt = 1;
860 	unmap->len = PAGE_SIZE;
861 
862 	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
863 				 src_count, PAGE_SIZE, 0);
864 	if (!tx) {
865 		dev_err(dma_chan->device->dev,
866 			"Self-test cannot prepare operation, disabling\n");
867 		err = -ENODEV;
868 		goto free_resources;
869 	}
870 
871 	cookie = mv_xor_tx_submit(tx);
872 	if (dma_submit_error(cookie)) {
873 		dev_err(dma_chan->device->dev,
874 			"Self-test submit error, disabling\n");
875 		err = -ENODEV;
876 		goto free_resources;
877 	}
878 
879 	mv_xor_issue_pending(dma_chan);
880 	async_tx_ack(tx);
881 	msleep(8);
882 
883 	if (mv_xor_status(dma_chan, cookie, NULL) !=
884 	    DMA_COMPLETE) {
885 		dev_err(dma_chan->device->dev,
886 			"Self-test xor timed out, disabling\n");
887 		err = -ENODEV;
888 		goto free_resources;
889 	}
890 
891 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
892 				PAGE_SIZE, DMA_FROM_DEVICE);
893 	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
894 		u32 *ptr = page_address(dest);
895 		if (ptr[i] != cmp_word) {
896 			dev_err(dma_chan->device->dev,
897 				"Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
898 				i, ptr[i], cmp_word);
899 			err = -ENODEV;
900 			goto free_resources;
901 		}
902 	}
903 
904 free_resources:
905 	dmaengine_unmap_put(unmap);
906 	mv_xor_free_chan_resources(dma_chan);
907 out:
908 	src_idx = src_count;
909 	while (src_idx--)
910 		__free_page(xor_srcs[src_idx]);
911 	__free_page(dest);
912 	return err;
913 }
914 
915 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
916 {
917 	struct dma_chan *chan, *_chan;
918 	struct device *dev = mv_chan->dmadev.dev;
919 
920 	dma_async_device_unregister(&mv_chan->dmadev);
921 
922 	dma_free_coherent(dev, MV_XOR_POOL_SIZE,
923 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
924 	dma_unmap_single(dev, mv_chan->dummy_src_addr,
925 			 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
926 	dma_unmap_single(dev, mv_chan->dummy_dst_addr,
927 			 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
928 
929 	list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
930 				 device_node) {
931 		list_del(&chan->device_node);
932 	}
933 
934 	free_irq(mv_chan->irq, mv_chan);
935 
936 	return 0;
937 }
938 
939 static struct mv_xor_chan *
940 mv_xor_channel_add(struct mv_xor_device *xordev,
941 		   struct platform_device *pdev,
942 		   int idx, dma_cap_mask_t cap_mask, int irq)
943 {
944 	int ret = 0;
945 	struct mv_xor_chan *mv_chan;
946 	struct dma_device *dma_dev;
947 
948 	mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
949 	if (!mv_chan)
950 		return ERR_PTR(-ENOMEM);
951 
952 	mv_chan->idx = idx;
953 	mv_chan->irq = irq;
954 	if (xordev->xor_type == XOR_ORION)
955 		mv_chan->op_in_desc = XOR_MODE_IN_REG;
956 	else
957 		mv_chan->op_in_desc = XOR_MODE_IN_DESC;
958 
959 	dma_dev = &mv_chan->dmadev;
960 
961 	/*
962 	 * These source and destination dummy buffers are used to implement
963 	 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
964 	 * Hence, we only need to map the buffers at initialization-time.
965 	 */
966 	mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
967 		mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
968 	mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
969 		mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
970 
971 	/* allocate coherent memory for hardware descriptors
972 	 * note: writecombine gives slightly better performance, but
973 	 * requires that we explicitly flush the writes
974 	 */
975 	mv_chan->dma_desc_pool_virt =
976 	  dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
977 		       GFP_KERNEL);
978 	if (!mv_chan->dma_desc_pool_virt)
979 		return ERR_PTR(-ENOMEM);
980 
981 	/* discover transaction capabilites from the platform data */
982 	dma_dev->cap_mask = cap_mask;
983 
984 	INIT_LIST_HEAD(&dma_dev->channels);
985 
986 	/* set base routines */
987 	dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
988 	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
989 	dma_dev->device_tx_status = mv_xor_status;
990 	dma_dev->device_issue_pending = mv_xor_issue_pending;
991 	dma_dev->dev = &pdev->dev;
992 
993 	/* set prep routines based on capability */
994 	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
995 		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
996 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
997 		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
998 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
999 		dma_dev->max_xor = 8;
1000 		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1001 	}
1002 
1003 	mv_chan->mmr_base = xordev->xor_base;
1004 	mv_chan->mmr_high_base = xordev->xor_high_base;
1005 	tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1006 		     mv_chan);
1007 
1008 	/* clear errors before enabling interrupts */
1009 	mv_chan_clear_err_status(mv_chan);
1010 
1011 	ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1012 			  0, dev_name(&pdev->dev), mv_chan);
1013 	if (ret)
1014 		goto err_free_dma;
1015 
1016 	mv_chan_unmask_interrupts(mv_chan);
1017 
1018 	if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
1019 		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
1020 	else
1021 		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
1022 
1023 	spin_lock_init(&mv_chan->lock);
1024 	INIT_LIST_HEAD(&mv_chan->chain);
1025 	INIT_LIST_HEAD(&mv_chan->completed_slots);
1026 	INIT_LIST_HEAD(&mv_chan->free_slots);
1027 	INIT_LIST_HEAD(&mv_chan->allocated_slots);
1028 	mv_chan->dmachan.device = dma_dev;
1029 	dma_cookie_init(&mv_chan->dmachan);
1030 
1031 	list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1032 
1033 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1034 		ret = mv_chan_memcpy_self_test(mv_chan);
1035 		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1036 		if (ret)
1037 			goto err_free_irq;
1038 	}
1039 
1040 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1041 		ret = mv_chan_xor_self_test(mv_chan);
1042 		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1043 		if (ret)
1044 			goto err_free_irq;
1045 	}
1046 
1047 	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
1048 		 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1049 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1050 		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1051 		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1052 
1053 	dma_async_device_register(dma_dev);
1054 	return mv_chan;
1055 
1056 err_free_irq:
1057 	free_irq(mv_chan->irq, mv_chan);
1058  err_free_dma:
1059 	dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1060 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1061 	return ERR_PTR(ret);
1062 }
1063 
1064 static void
1065 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1066 			 const struct mbus_dram_target_info *dram)
1067 {
1068 	void __iomem *base = xordev->xor_high_base;
1069 	u32 win_enable = 0;
1070 	int i;
1071 
1072 	for (i = 0; i < 8; i++) {
1073 		writel(0, base + WINDOW_BASE(i));
1074 		writel(0, base + WINDOW_SIZE(i));
1075 		if (i < 4)
1076 			writel(0, base + WINDOW_REMAP_HIGH(i));
1077 	}
1078 
1079 	for (i = 0; i < dram->num_cs; i++) {
1080 		const struct mbus_dram_window *cs = dram->cs + i;
1081 
1082 		writel((cs->base & 0xffff0000) |
1083 		       (cs->mbus_attr << 8) |
1084 		       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1085 		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1086 
1087 		win_enable |= (1 << i);
1088 		win_enable |= 3 << (16 + (2 * i));
1089 	}
1090 
1091 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1092 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1093 	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1094 	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1095 }
1096 
1097 static void
1098 mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
1099 {
1100 	void __iomem *base = xordev->xor_high_base;
1101 	u32 win_enable = 0;
1102 	int i;
1103 
1104 	for (i = 0; i < 8; i++) {
1105 		writel(0, base + WINDOW_BASE(i));
1106 		writel(0, base + WINDOW_SIZE(i));
1107 		if (i < 4)
1108 			writel(0, base + WINDOW_REMAP_HIGH(i));
1109 	}
1110 	/*
1111 	 * For Armada3700 open default 4GB Mbus window. The dram
1112 	 * related configuration are done at AXIS level.
1113 	 */
1114 	writel(0xffff0000, base + WINDOW_SIZE(0));
1115 	win_enable |= 1;
1116 	win_enable |= 3 << 16;
1117 
1118 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1119 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1120 	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1121 	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1122 }
1123 
1124 /*
1125  * Since this XOR driver is basically used only for RAID5, we don't
1126  * need to care about synchronizing ->suspend with DMA activity,
1127  * because the DMA engine will naturally be quiet due to the block
1128  * devices being suspended.
1129  */
1130 static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
1131 {
1132 	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1133 	int i;
1134 
1135 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1136 		struct mv_xor_chan *mv_chan = xordev->channels[i];
1137 
1138 		if (!mv_chan)
1139 			continue;
1140 
1141 		mv_chan->saved_config_reg =
1142 			readl_relaxed(XOR_CONFIG(mv_chan));
1143 		mv_chan->saved_int_mask_reg =
1144 			readl_relaxed(XOR_INTR_MASK(mv_chan));
1145 	}
1146 
1147 	return 0;
1148 }
1149 
1150 static int mv_xor_resume(struct platform_device *dev)
1151 {
1152 	struct mv_xor_device *xordev = platform_get_drvdata(dev);
1153 	const struct mbus_dram_target_info *dram;
1154 	int i;
1155 
1156 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1157 		struct mv_xor_chan *mv_chan = xordev->channels[i];
1158 
1159 		if (!mv_chan)
1160 			continue;
1161 
1162 		writel_relaxed(mv_chan->saved_config_reg,
1163 			       XOR_CONFIG(mv_chan));
1164 		writel_relaxed(mv_chan->saved_int_mask_reg,
1165 			       XOR_INTR_MASK(mv_chan));
1166 	}
1167 
1168 	if (xordev->xor_type == XOR_ARMADA_37XX) {
1169 		mv_xor_conf_mbus_windows_a3700(xordev);
1170 		return 0;
1171 	}
1172 
1173 	dram = mv_mbus_dram_info();
1174 	if (dram)
1175 		mv_xor_conf_mbus_windows(xordev, dram);
1176 
1177 	return 0;
1178 }
1179 
1180 static const struct of_device_id mv_xor_dt_ids[] = {
1181 	{ .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
1182 	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
1183 	{ .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
1184 	{},
1185 };
1186 
1187 static unsigned int mv_xor_engine_count;
1188 
1189 static int mv_xor_probe(struct platform_device *pdev)
1190 {
1191 	const struct mbus_dram_target_info *dram;
1192 	struct mv_xor_device *xordev;
1193 	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1194 	struct resource *res;
1195 	unsigned int max_engines, max_channels;
1196 	int i, ret;
1197 
1198 	dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1199 
1200 	xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1201 	if (!xordev)
1202 		return -ENOMEM;
1203 
1204 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1205 	if (!res)
1206 		return -ENODEV;
1207 
1208 	xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1209 					resource_size(res));
1210 	if (!xordev->xor_base)
1211 		return -EBUSY;
1212 
1213 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1214 	if (!res)
1215 		return -ENODEV;
1216 
1217 	xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1218 					     resource_size(res));
1219 	if (!xordev->xor_high_base)
1220 		return -EBUSY;
1221 
1222 	platform_set_drvdata(pdev, xordev);
1223 
1224 
1225 	/*
1226 	 * We need to know which type of XOR device we use before
1227 	 * setting up. In non-dt case it can only be the legacy one.
1228 	 */
1229 	xordev->xor_type = XOR_ORION;
1230 	if (pdev->dev.of_node) {
1231 		const struct of_device_id *of_id =
1232 			of_match_device(mv_xor_dt_ids,
1233 					&pdev->dev);
1234 
1235 		xordev->xor_type = (uintptr_t)of_id->data;
1236 	}
1237 
1238 	/*
1239 	 * (Re-)program MBUS remapping windows if we are asked to.
1240 	 */
1241 	if (xordev->xor_type == XOR_ARMADA_37XX) {
1242 		mv_xor_conf_mbus_windows_a3700(xordev);
1243 	} else {
1244 		dram = mv_mbus_dram_info();
1245 		if (dram)
1246 			mv_xor_conf_mbus_windows(xordev, dram);
1247 	}
1248 
1249 	/* Not all platforms can gate the clock, so it is not
1250 	 * an error if the clock does not exists.
1251 	 */
1252 	xordev->clk = clk_get(&pdev->dev, NULL);
1253 	if (!IS_ERR(xordev->clk))
1254 		clk_prepare_enable(xordev->clk);
1255 
1256 	/*
1257 	 * We don't want to have more than one channel per CPU in
1258 	 * order for async_tx to perform well. So we limit the number
1259 	 * of engines and channels so that we take into account this
1260 	 * constraint. Note that we also want to use channels from
1261 	 * separate engines when possible.  For dual-CPU Armada 3700
1262 	 * SoC with single XOR engine allow using its both channels.
1263 	 */
1264 	max_engines = num_present_cpus();
1265 	if (xordev->xor_type == XOR_ARMADA_37XX)
1266 		max_channels =	num_present_cpus();
1267 	else
1268 		max_channels = min_t(unsigned int,
1269 				     MV_XOR_MAX_CHANNELS,
1270 				     DIV_ROUND_UP(num_present_cpus(), 2));
1271 
1272 	if (mv_xor_engine_count >= max_engines)
1273 		return 0;
1274 
1275 	if (pdev->dev.of_node) {
1276 		struct device_node *np;
1277 		int i = 0;
1278 
1279 		for_each_child_of_node(pdev->dev.of_node, np) {
1280 			struct mv_xor_chan *chan;
1281 			dma_cap_mask_t cap_mask;
1282 			int irq;
1283 
1284 			if (i >= max_channels)
1285 				continue;
1286 
1287 			dma_cap_zero(cap_mask);
1288 			dma_cap_set(DMA_MEMCPY, cap_mask);
1289 			dma_cap_set(DMA_XOR, cap_mask);
1290 			dma_cap_set(DMA_INTERRUPT, cap_mask);
1291 
1292 			irq = irq_of_parse_and_map(np, 0);
1293 			if (!irq) {
1294 				ret = -ENODEV;
1295 				goto err_channel_add;
1296 			}
1297 
1298 			chan = mv_xor_channel_add(xordev, pdev, i,
1299 						  cap_mask, irq);
1300 			if (IS_ERR(chan)) {
1301 				ret = PTR_ERR(chan);
1302 				irq_dispose_mapping(irq);
1303 				goto err_channel_add;
1304 			}
1305 
1306 			xordev->channels[i] = chan;
1307 			i++;
1308 		}
1309 	} else if (pdata && pdata->channels) {
1310 		for (i = 0; i < max_channels; i++) {
1311 			struct mv_xor_channel_data *cd;
1312 			struct mv_xor_chan *chan;
1313 			int irq;
1314 
1315 			cd = &pdata->channels[i];
1316 			if (!cd) {
1317 				ret = -ENODEV;
1318 				goto err_channel_add;
1319 			}
1320 
1321 			irq = platform_get_irq(pdev, i);
1322 			if (irq < 0) {
1323 				ret = irq;
1324 				goto err_channel_add;
1325 			}
1326 
1327 			chan = mv_xor_channel_add(xordev, pdev, i,
1328 						  cd->cap_mask, irq);
1329 			if (IS_ERR(chan)) {
1330 				ret = PTR_ERR(chan);
1331 				goto err_channel_add;
1332 			}
1333 
1334 			xordev->channels[i] = chan;
1335 		}
1336 	}
1337 
1338 	return 0;
1339 
1340 err_channel_add:
1341 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1342 		if (xordev->channels[i]) {
1343 			mv_xor_channel_remove(xordev->channels[i]);
1344 			if (pdev->dev.of_node)
1345 				irq_dispose_mapping(xordev->channels[i]->irq);
1346 		}
1347 
1348 	if (!IS_ERR(xordev->clk)) {
1349 		clk_disable_unprepare(xordev->clk);
1350 		clk_put(xordev->clk);
1351 	}
1352 
1353 	return ret;
1354 }
1355 
1356 static struct platform_driver mv_xor_driver = {
1357 	.probe		= mv_xor_probe,
1358 	.suspend        = mv_xor_suspend,
1359 	.resume         = mv_xor_resume,
1360 	.driver		= {
1361 		.name	        = MV_XOR_NAME,
1362 		.of_match_table = of_match_ptr(mv_xor_dt_ids),
1363 	},
1364 };
1365 
1366 
1367 static int __init mv_xor_init(void)
1368 {
1369 	return platform_driver_register(&mv_xor_driver);
1370 }
1371 device_initcall(mv_xor_init);
1372 
1373 /*
1374 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1375 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1376 MODULE_LICENSE("GPL");
1377 */
1378