xref: /openbmc/linux/drivers/dma/mv_xor.c (revision 88eb92cb)
1 /*
2  * offload engine driver for the Marvell XOR engine
3  * Copyright (C) 2007, 2008, Marvell International Ltd.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  */
18 
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/memory.h>
28 #include <linux/clk.h>
29 #include <linux/platform_data/dma-mv_xor.h>
30 
31 #include "dmaengine.h"
32 #include "mv_xor.h"
33 
34 static void mv_xor_issue_pending(struct dma_chan *chan);
35 
36 #define to_mv_xor_chan(chan)		\
37 	container_of(chan, struct mv_xor_chan, dmachan)
38 
39 #define to_mv_xor_slot(tx)		\
40 	container_of(tx, struct mv_xor_desc_slot, async_tx)
41 
42 #define mv_chan_to_devp(chan)           \
43 	((chan)->dmadev.dev)
44 
45 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
46 {
47 	struct mv_xor_desc *hw_desc = desc->hw_desc;
48 
49 	hw_desc->status = (1 << 31);
50 	hw_desc->phy_next_desc = 0;
51 	hw_desc->desc_command = (1 << 31);
52 }
53 
54 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
55 {
56 	struct mv_xor_desc *hw_desc = desc->hw_desc;
57 	return hw_desc->phy_dest_addr;
58 }
59 
60 static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
61 				int src_idx)
62 {
63 	struct mv_xor_desc *hw_desc = desc->hw_desc;
64 	return hw_desc->phy_src_addr[src_idx];
65 }
66 
67 
68 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
69 				   u32 byte_count)
70 {
71 	struct mv_xor_desc *hw_desc = desc->hw_desc;
72 	hw_desc->byte_count = byte_count;
73 }
74 
75 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
76 				  u32 next_desc_addr)
77 {
78 	struct mv_xor_desc *hw_desc = desc->hw_desc;
79 	BUG_ON(hw_desc->phy_next_desc);
80 	hw_desc->phy_next_desc = next_desc_addr;
81 }
82 
83 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
84 {
85 	struct mv_xor_desc *hw_desc = desc->hw_desc;
86 	hw_desc->phy_next_desc = 0;
87 }
88 
89 static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
90 {
91 	desc->value = val;
92 }
93 
94 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
95 				  dma_addr_t addr)
96 {
97 	struct mv_xor_desc *hw_desc = desc->hw_desc;
98 	hw_desc->phy_dest_addr = addr;
99 }
100 
101 static int mv_chan_memset_slot_count(size_t len)
102 {
103 	return 1;
104 }
105 
106 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
107 
108 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
109 				 int index, dma_addr_t addr)
110 {
111 	struct mv_xor_desc *hw_desc = desc->hw_desc;
112 	hw_desc->phy_src_addr[index] = addr;
113 	if (desc->type == DMA_XOR)
114 		hw_desc->desc_command |= (1 << index);
115 }
116 
117 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
118 {
119 	return __raw_readl(XOR_CURR_DESC(chan));
120 }
121 
122 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
123 					u32 next_desc_addr)
124 {
125 	__raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
126 }
127 
128 static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
129 {
130 	__raw_writel(desc_addr, XOR_DEST_POINTER(chan));
131 }
132 
133 static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
134 {
135 	__raw_writel(block_size, XOR_BLOCK_SIZE(chan));
136 }
137 
138 static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
139 {
140 	__raw_writel(value, XOR_INIT_VALUE_LOW(chan));
141 	__raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
142 }
143 
144 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
145 {
146 	u32 val = __raw_readl(XOR_INTR_MASK(chan));
147 	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
148 	__raw_writel(val, XOR_INTR_MASK(chan));
149 }
150 
151 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
152 {
153 	u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
154 	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
155 	return intr_cause;
156 }
157 
158 static int mv_is_err_intr(u32 intr_cause)
159 {
160 	if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
161 		return 1;
162 
163 	return 0;
164 }
165 
166 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
167 {
168 	u32 val = ~(1 << (chan->idx * 16));
169 	dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
170 	__raw_writel(val, XOR_INTR_CAUSE(chan));
171 }
172 
173 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
174 {
175 	u32 val = 0xFFFF0000 >> (chan->idx * 16);
176 	__raw_writel(val, XOR_INTR_CAUSE(chan));
177 }
178 
179 static int mv_can_chain(struct mv_xor_desc_slot *desc)
180 {
181 	struct mv_xor_desc_slot *chain_old_tail = list_entry(
182 		desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
183 
184 	if (chain_old_tail->type != desc->type)
185 		return 0;
186 	if (desc->type == DMA_MEMSET)
187 		return 0;
188 
189 	return 1;
190 }
191 
192 static void mv_set_mode(struct mv_xor_chan *chan,
193 			       enum dma_transaction_type type)
194 {
195 	u32 op_mode;
196 	u32 config = __raw_readl(XOR_CONFIG(chan));
197 
198 	switch (type) {
199 	case DMA_XOR:
200 		op_mode = XOR_OPERATION_MODE_XOR;
201 		break;
202 	case DMA_MEMCPY:
203 		op_mode = XOR_OPERATION_MODE_MEMCPY;
204 		break;
205 	case DMA_MEMSET:
206 		op_mode = XOR_OPERATION_MODE_MEMSET;
207 		break;
208 	default:
209 		dev_err(mv_chan_to_devp(chan),
210 			"error: unsupported operation %d.\n",
211 			type);
212 		BUG();
213 		return;
214 	}
215 
216 	config &= ~0x7;
217 	config |= op_mode;
218 	__raw_writel(config, XOR_CONFIG(chan));
219 	chan->current_type = type;
220 }
221 
222 static void mv_chan_activate(struct mv_xor_chan *chan)
223 {
224 	u32 activation;
225 
226 	dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
227 	activation = __raw_readl(XOR_ACTIVATION(chan));
228 	activation |= 0x1;
229 	__raw_writel(activation, XOR_ACTIVATION(chan));
230 }
231 
232 static char mv_chan_is_busy(struct mv_xor_chan *chan)
233 {
234 	u32 state = __raw_readl(XOR_ACTIVATION(chan));
235 
236 	state = (state >> 4) & 0x3;
237 
238 	return (state == 1) ? 1 : 0;
239 }
240 
241 static int mv_chan_xor_slot_count(size_t len, int src_cnt)
242 {
243 	return 1;
244 }
245 
246 /**
247  * mv_xor_free_slots - flags descriptor slots for reuse
248  * @slot: Slot to free
249  * Caller must hold &mv_chan->lock while calling this function
250  */
251 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
252 			      struct mv_xor_desc_slot *slot)
253 {
254 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
255 		__func__, __LINE__, slot);
256 
257 	slot->slots_per_op = 0;
258 
259 }
260 
261 /*
262  * mv_xor_start_new_chain - program the engine to operate on new chain headed by
263  * sw_desc
264  * Caller must hold &mv_chan->lock while calling this function
265  */
266 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
267 				   struct mv_xor_desc_slot *sw_desc)
268 {
269 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
270 		__func__, __LINE__, sw_desc);
271 	if (sw_desc->type != mv_chan->current_type)
272 		mv_set_mode(mv_chan, sw_desc->type);
273 
274 	if (sw_desc->type == DMA_MEMSET) {
275 		/* for memset requests we need to program the engine, no
276 		 * descriptors used.
277 		 */
278 		struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
279 		mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
280 		mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
281 		mv_chan_set_value(mv_chan, sw_desc->value);
282 	} else {
283 		/* set the hardware chain */
284 		mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
285 	}
286 	mv_chan->pending += sw_desc->slot_cnt;
287 	mv_xor_issue_pending(&mv_chan->dmachan);
288 }
289 
290 static dma_cookie_t
291 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
292 	struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
293 {
294 	BUG_ON(desc->async_tx.cookie < 0);
295 
296 	if (desc->async_tx.cookie > 0) {
297 		cookie = desc->async_tx.cookie;
298 
299 		/* call the callback (must not sleep or submit new
300 		 * operations to this channel)
301 		 */
302 		if (desc->async_tx.callback)
303 			desc->async_tx.callback(
304 				desc->async_tx.callback_param);
305 
306 		/* unmap dma addresses
307 		 * (unmap_single vs unmap_page?)
308 		 */
309 		if (desc->group_head && desc->unmap_len) {
310 			struct mv_xor_desc_slot *unmap = desc->group_head;
311 			struct device *dev = mv_chan_to_devp(mv_chan);
312 			u32 len = unmap->unmap_len;
313 			enum dma_ctrl_flags flags = desc->async_tx.flags;
314 			u32 src_cnt;
315 			dma_addr_t addr;
316 			dma_addr_t dest;
317 
318 			src_cnt = unmap->unmap_src_cnt;
319 			dest = mv_desc_get_dest_addr(unmap);
320 			if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
321 				enum dma_data_direction dir;
322 
323 				if (src_cnt > 1) /* is xor ? */
324 					dir = DMA_BIDIRECTIONAL;
325 				else
326 					dir = DMA_FROM_DEVICE;
327 				dma_unmap_page(dev, dest, len, dir);
328 			}
329 
330 			if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
331 				while (src_cnt--) {
332 					addr = mv_desc_get_src_addr(unmap,
333 								    src_cnt);
334 					if (addr == dest)
335 						continue;
336 					dma_unmap_page(dev, addr, len,
337 						       DMA_TO_DEVICE);
338 				}
339 			}
340 			desc->group_head = NULL;
341 		}
342 	}
343 
344 	/* run dependent operations */
345 	dma_run_dependencies(&desc->async_tx);
346 
347 	return cookie;
348 }
349 
350 static int
351 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
352 {
353 	struct mv_xor_desc_slot *iter, *_iter;
354 
355 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
356 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
357 				 completed_node) {
358 
359 		if (async_tx_test_ack(&iter->async_tx)) {
360 			list_del(&iter->completed_node);
361 			mv_xor_free_slots(mv_chan, iter);
362 		}
363 	}
364 	return 0;
365 }
366 
367 static int
368 mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
369 	struct mv_xor_chan *mv_chan)
370 {
371 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
372 		__func__, __LINE__, desc, desc->async_tx.flags);
373 	list_del(&desc->chain_node);
374 	/* the client is allowed to attach dependent operations
375 	 * until 'ack' is set
376 	 */
377 	if (!async_tx_test_ack(&desc->async_tx)) {
378 		/* move this slot to the completed_slots */
379 		list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
380 		return 0;
381 	}
382 
383 	mv_xor_free_slots(mv_chan, desc);
384 	return 0;
385 }
386 
387 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
388 {
389 	struct mv_xor_desc_slot *iter, *_iter;
390 	dma_cookie_t cookie = 0;
391 	int busy = mv_chan_is_busy(mv_chan);
392 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
393 	int seen_current = 0;
394 
395 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
396 	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
397 	mv_xor_clean_completed_slots(mv_chan);
398 
399 	/* free completed slots from the chain starting with
400 	 * the oldest descriptor
401 	 */
402 
403 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
404 					chain_node) {
405 		prefetch(_iter);
406 		prefetch(&_iter->async_tx);
407 
408 		/* do not advance past the current descriptor loaded into the
409 		 * hardware channel, subsequent descriptors are either in
410 		 * process or have not been submitted
411 		 */
412 		if (seen_current)
413 			break;
414 
415 		/* stop the search if we reach the current descriptor and the
416 		 * channel is busy
417 		 */
418 		if (iter->async_tx.phys == current_desc) {
419 			seen_current = 1;
420 			if (busy)
421 				break;
422 		}
423 
424 		cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
425 
426 		if (mv_xor_clean_slot(iter, mv_chan))
427 			break;
428 	}
429 
430 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
431 		struct mv_xor_desc_slot *chain_head;
432 		chain_head = list_entry(mv_chan->chain.next,
433 					struct mv_xor_desc_slot,
434 					chain_node);
435 
436 		mv_xor_start_new_chain(mv_chan, chain_head);
437 	}
438 
439 	if (cookie > 0)
440 		mv_chan->dmachan.completed_cookie = cookie;
441 }
442 
443 static void
444 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
445 {
446 	spin_lock_bh(&mv_chan->lock);
447 	__mv_xor_slot_cleanup(mv_chan);
448 	spin_unlock_bh(&mv_chan->lock);
449 }
450 
451 static void mv_xor_tasklet(unsigned long data)
452 {
453 	struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
454 	mv_xor_slot_cleanup(chan);
455 }
456 
457 static struct mv_xor_desc_slot *
458 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
459 		    int slots_per_op)
460 {
461 	struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
462 	LIST_HEAD(chain);
463 	int slots_found, retry = 0;
464 
465 	/* start search from the last allocated descrtiptor
466 	 * if a contiguous allocation can not be found start searching
467 	 * from the beginning of the list
468 	 */
469 retry:
470 	slots_found = 0;
471 	if (retry == 0)
472 		iter = mv_chan->last_used;
473 	else
474 		iter = list_entry(&mv_chan->all_slots,
475 			struct mv_xor_desc_slot,
476 			slot_node);
477 
478 	list_for_each_entry_safe_continue(
479 		iter, _iter, &mv_chan->all_slots, slot_node) {
480 		prefetch(_iter);
481 		prefetch(&_iter->async_tx);
482 		if (iter->slots_per_op) {
483 			/* give up after finding the first busy slot
484 			 * on the second pass through the list
485 			 */
486 			if (retry)
487 				break;
488 
489 			slots_found = 0;
490 			continue;
491 		}
492 
493 		/* start the allocation if the slot is correctly aligned */
494 		if (!slots_found++)
495 			alloc_start = iter;
496 
497 		if (slots_found == num_slots) {
498 			struct mv_xor_desc_slot *alloc_tail = NULL;
499 			struct mv_xor_desc_slot *last_used = NULL;
500 			iter = alloc_start;
501 			while (num_slots) {
502 				int i;
503 
504 				/* pre-ack all but the last descriptor */
505 				async_tx_ack(&iter->async_tx);
506 
507 				list_add_tail(&iter->chain_node, &chain);
508 				alloc_tail = iter;
509 				iter->async_tx.cookie = 0;
510 				iter->slot_cnt = num_slots;
511 				iter->xor_check_result = NULL;
512 				for (i = 0; i < slots_per_op; i++) {
513 					iter->slots_per_op = slots_per_op - i;
514 					last_used = iter;
515 					iter = list_entry(iter->slot_node.next,
516 						struct mv_xor_desc_slot,
517 						slot_node);
518 				}
519 				num_slots -= slots_per_op;
520 			}
521 			alloc_tail->group_head = alloc_start;
522 			alloc_tail->async_tx.cookie = -EBUSY;
523 			list_splice(&chain, &alloc_tail->tx_list);
524 			mv_chan->last_used = last_used;
525 			mv_desc_clear_next_desc(alloc_start);
526 			mv_desc_clear_next_desc(alloc_tail);
527 			return alloc_tail;
528 		}
529 	}
530 	if (!retry++)
531 		goto retry;
532 
533 	/* try to free some slots if the allocation fails */
534 	tasklet_schedule(&mv_chan->irq_tasklet);
535 
536 	return NULL;
537 }
538 
539 /************************ DMA engine API functions ****************************/
540 static dma_cookie_t
541 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
542 {
543 	struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
544 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
545 	struct mv_xor_desc_slot *grp_start, *old_chain_tail;
546 	dma_cookie_t cookie;
547 	int new_hw_chain = 1;
548 
549 	dev_dbg(mv_chan_to_devp(mv_chan),
550 		"%s sw_desc %p: async_tx %p\n",
551 		__func__, sw_desc, &sw_desc->async_tx);
552 
553 	grp_start = sw_desc->group_head;
554 
555 	spin_lock_bh(&mv_chan->lock);
556 	cookie = dma_cookie_assign(tx);
557 
558 	if (list_empty(&mv_chan->chain))
559 		list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
560 	else {
561 		new_hw_chain = 0;
562 
563 		old_chain_tail = list_entry(mv_chan->chain.prev,
564 					    struct mv_xor_desc_slot,
565 					    chain_node);
566 		list_splice_init(&grp_start->tx_list,
567 				 &old_chain_tail->chain_node);
568 
569 		if (!mv_can_chain(grp_start))
570 			goto submit_done;
571 
572 		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
573 			old_chain_tail->async_tx.phys);
574 
575 		/* fix up the hardware chain */
576 		mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
577 
578 		/* if the channel is not busy */
579 		if (!mv_chan_is_busy(mv_chan)) {
580 			u32 current_desc = mv_chan_get_current_desc(mv_chan);
581 			/*
582 			 * and the curren desc is the end of the chain before
583 			 * the append, then we need to start the channel
584 			 */
585 			if (current_desc == old_chain_tail->async_tx.phys)
586 				new_hw_chain = 1;
587 		}
588 	}
589 
590 	if (new_hw_chain)
591 		mv_xor_start_new_chain(mv_chan, grp_start);
592 
593 submit_done:
594 	spin_unlock_bh(&mv_chan->lock);
595 
596 	return cookie;
597 }
598 
599 /* returns the number of allocated descriptors */
600 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
601 {
602 	char *hw_desc;
603 	int idx;
604 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
605 	struct mv_xor_desc_slot *slot = NULL;
606 	int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
607 
608 	/* Allocate descriptor slots */
609 	idx = mv_chan->slots_allocated;
610 	while (idx < num_descs_in_pool) {
611 		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
612 		if (!slot) {
613 			printk(KERN_INFO "MV XOR Channel only initialized"
614 				" %d descriptor slots", idx);
615 			break;
616 		}
617 		hw_desc = (char *) mv_chan->dma_desc_pool_virt;
618 		slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
619 
620 		dma_async_tx_descriptor_init(&slot->async_tx, chan);
621 		slot->async_tx.tx_submit = mv_xor_tx_submit;
622 		INIT_LIST_HEAD(&slot->chain_node);
623 		INIT_LIST_HEAD(&slot->slot_node);
624 		INIT_LIST_HEAD(&slot->tx_list);
625 		hw_desc = (char *) mv_chan->dma_desc_pool;
626 		slot->async_tx.phys =
627 			(dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
628 		slot->idx = idx++;
629 
630 		spin_lock_bh(&mv_chan->lock);
631 		mv_chan->slots_allocated = idx;
632 		list_add_tail(&slot->slot_node, &mv_chan->all_slots);
633 		spin_unlock_bh(&mv_chan->lock);
634 	}
635 
636 	if (mv_chan->slots_allocated && !mv_chan->last_used)
637 		mv_chan->last_used = list_entry(mv_chan->all_slots.next,
638 					struct mv_xor_desc_slot,
639 					slot_node);
640 
641 	dev_dbg(mv_chan_to_devp(mv_chan),
642 		"allocated %d descriptor slots last_used: %p\n",
643 		mv_chan->slots_allocated, mv_chan->last_used);
644 
645 	return mv_chan->slots_allocated ? : -ENOMEM;
646 }
647 
648 static struct dma_async_tx_descriptor *
649 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
650 		size_t len, unsigned long flags)
651 {
652 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
653 	struct mv_xor_desc_slot *sw_desc, *grp_start;
654 	int slot_cnt;
655 
656 	dev_dbg(mv_chan_to_devp(mv_chan),
657 		"%s dest: %x src %x len: %u flags: %ld\n",
658 		__func__, dest, src, len, flags);
659 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
660 		return NULL;
661 
662 	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
663 
664 	spin_lock_bh(&mv_chan->lock);
665 	slot_cnt = mv_chan_memcpy_slot_count(len);
666 	sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
667 	if (sw_desc) {
668 		sw_desc->type = DMA_MEMCPY;
669 		sw_desc->async_tx.flags = flags;
670 		grp_start = sw_desc->group_head;
671 		mv_desc_init(grp_start, flags);
672 		mv_desc_set_byte_count(grp_start, len);
673 		mv_desc_set_dest_addr(sw_desc->group_head, dest);
674 		mv_desc_set_src_addr(grp_start, 0, src);
675 		sw_desc->unmap_src_cnt = 1;
676 		sw_desc->unmap_len = len;
677 	}
678 	spin_unlock_bh(&mv_chan->lock);
679 
680 	dev_dbg(mv_chan_to_devp(mv_chan),
681 		"%s sw_desc %p async_tx %p\n",
682 		__func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
683 
684 	return sw_desc ? &sw_desc->async_tx : NULL;
685 }
686 
687 static struct dma_async_tx_descriptor *
688 mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
689 		       size_t len, unsigned long flags)
690 {
691 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
692 	struct mv_xor_desc_slot *sw_desc, *grp_start;
693 	int slot_cnt;
694 
695 	dev_dbg(mv_chan_to_devp(mv_chan),
696 		"%s dest: %x len: %u flags: %ld\n",
697 		__func__, dest, len, flags);
698 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
699 		return NULL;
700 
701 	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
702 
703 	spin_lock_bh(&mv_chan->lock);
704 	slot_cnt = mv_chan_memset_slot_count(len);
705 	sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
706 	if (sw_desc) {
707 		sw_desc->type = DMA_MEMSET;
708 		sw_desc->async_tx.flags = flags;
709 		grp_start = sw_desc->group_head;
710 		mv_desc_init(grp_start, flags);
711 		mv_desc_set_byte_count(grp_start, len);
712 		mv_desc_set_dest_addr(sw_desc->group_head, dest);
713 		mv_desc_set_block_fill_val(grp_start, value);
714 		sw_desc->unmap_src_cnt = 1;
715 		sw_desc->unmap_len = len;
716 	}
717 	spin_unlock_bh(&mv_chan->lock);
718 	dev_dbg(mv_chan_to_devp(mv_chan),
719 		"%s sw_desc %p async_tx %p \n",
720 		__func__, sw_desc, &sw_desc->async_tx);
721 	return sw_desc ? &sw_desc->async_tx : NULL;
722 }
723 
724 static struct dma_async_tx_descriptor *
725 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
726 		    unsigned int src_cnt, size_t len, unsigned long flags)
727 {
728 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
729 	struct mv_xor_desc_slot *sw_desc, *grp_start;
730 	int slot_cnt;
731 
732 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
733 		return NULL;
734 
735 	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
736 
737 	dev_dbg(mv_chan_to_devp(mv_chan),
738 		"%s src_cnt: %d len: dest %x %u flags: %ld\n",
739 		__func__, src_cnt, len, dest, flags);
740 
741 	spin_lock_bh(&mv_chan->lock);
742 	slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
743 	sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
744 	if (sw_desc) {
745 		sw_desc->type = DMA_XOR;
746 		sw_desc->async_tx.flags = flags;
747 		grp_start = sw_desc->group_head;
748 		mv_desc_init(grp_start, flags);
749 		/* the byte count field is the same as in memcpy desc*/
750 		mv_desc_set_byte_count(grp_start, len);
751 		mv_desc_set_dest_addr(sw_desc->group_head, dest);
752 		sw_desc->unmap_src_cnt = src_cnt;
753 		sw_desc->unmap_len = len;
754 		while (src_cnt--)
755 			mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
756 	}
757 	spin_unlock_bh(&mv_chan->lock);
758 	dev_dbg(mv_chan_to_devp(mv_chan),
759 		"%s sw_desc %p async_tx %p \n",
760 		__func__, sw_desc, &sw_desc->async_tx);
761 	return sw_desc ? &sw_desc->async_tx : NULL;
762 }
763 
764 static void mv_xor_free_chan_resources(struct dma_chan *chan)
765 {
766 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
767 	struct mv_xor_desc_slot *iter, *_iter;
768 	int in_use_descs = 0;
769 
770 	mv_xor_slot_cleanup(mv_chan);
771 
772 	spin_lock_bh(&mv_chan->lock);
773 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
774 					chain_node) {
775 		in_use_descs++;
776 		list_del(&iter->chain_node);
777 	}
778 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
779 				 completed_node) {
780 		in_use_descs++;
781 		list_del(&iter->completed_node);
782 	}
783 	list_for_each_entry_safe_reverse(
784 		iter, _iter, &mv_chan->all_slots, slot_node) {
785 		list_del(&iter->slot_node);
786 		kfree(iter);
787 		mv_chan->slots_allocated--;
788 	}
789 	mv_chan->last_used = NULL;
790 
791 	dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
792 		__func__, mv_chan->slots_allocated);
793 	spin_unlock_bh(&mv_chan->lock);
794 
795 	if (in_use_descs)
796 		dev_err(mv_chan_to_devp(mv_chan),
797 			"freeing %d in use descriptors!\n", in_use_descs);
798 }
799 
800 /**
801  * mv_xor_status - poll the status of an XOR transaction
802  * @chan: XOR channel handle
803  * @cookie: XOR transaction identifier
804  * @txstate: XOR transactions state holder (or NULL)
805  */
806 static enum dma_status mv_xor_status(struct dma_chan *chan,
807 					  dma_cookie_t cookie,
808 					  struct dma_tx_state *txstate)
809 {
810 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
811 	enum dma_status ret;
812 
813 	ret = dma_cookie_status(chan, cookie, txstate);
814 	if (ret == DMA_SUCCESS) {
815 		mv_xor_clean_completed_slots(mv_chan);
816 		return ret;
817 	}
818 	mv_xor_slot_cleanup(mv_chan);
819 
820 	return dma_cookie_status(chan, cookie, txstate);
821 }
822 
823 static void mv_dump_xor_regs(struct mv_xor_chan *chan)
824 {
825 	u32 val;
826 
827 	val = __raw_readl(XOR_CONFIG(chan));
828 	dev_err(mv_chan_to_devp(chan),
829 		"config       0x%08x.\n", val);
830 
831 	val = __raw_readl(XOR_ACTIVATION(chan));
832 	dev_err(mv_chan_to_devp(chan),
833 		"activation   0x%08x.\n", val);
834 
835 	val = __raw_readl(XOR_INTR_CAUSE(chan));
836 	dev_err(mv_chan_to_devp(chan),
837 		"intr cause   0x%08x.\n", val);
838 
839 	val = __raw_readl(XOR_INTR_MASK(chan));
840 	dev_err(mv_chan_to_devp(chan),
841 		"intr mask    0x%08x.\n", val);
842 
843 	val = __raw_readl(XOR_ERROR_CAUSE(chan));
844 	dev_err(mv_chan_to_devp(chan),
845 		"error cause  0x%08x.\n", val);
846 
847 	val = __raw_readl(XOR_ERROR_ADDR(chan));
848 	dev_err(mv_chan_to_devp(chan),
849 		"error addr   0x%08x.\n", val);
850 }
851 
852 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
853 					 u32 intr_cause)
854 {
855 	if (intr_cause & (1 << 4)) {
856 	     dev_dbg(mv_chan_to_devp(chan),
857 		     "ignore this error\n");
858 	     return;
859 	}
860 
861 	dev_err(mv_chan_to_devp(chan),
862 		"error on chan %d. intr cause 0x%08x.\n",
863 		chan->idx, intr_cause);
864 
865 	mv_dump_xor_regs(chan);
866 	BUG();
867 }
868 
869 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
870 {
871 	struct mv_xor_chan *chan = data;
872 	u32 intr_cause = mv_chan_get_intr_cause(chan);
873 
874 	dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
875 
876 	if (mv_is_err_intr(intr_cause))
877 		mv_xor_err_interrupt_handler(chan, intr_cause);
878 
879 	tasklet_schedule(&chan->irq_tasklet);
880 
881 	mv_xor_device_clear_eoc_cause(chan);
882 
883 	return IRQ_HANDLED;
884 }
885 
886 static void mv_xor_issue_pending(struct dma_chan *chan)
887 {
888 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
889 
890 	if (mv_chan->pending >= MV_XOR_THRESHOLD) {
891 		mv_chan->pending = 0;
892 		mv_chan_activate(mv_chan);
893 	}
894 }
895 
896 /*
897  * Perform a transaction to verify the HW works.
898  */
899 #define MV_XOR_TEST_SIZE 2000
900 
901 static int __devinit mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
902 {
903 	int i;
904 	void *src, *dest;
905 	dma_addr_t src_dma, dest_dma;
906 	struct dma_chan *dma_chan;
907 	dma_cookie_t cookie;
908 	struct dma_async_tx_descriptor *tx;
909 	int err = 0;
910 
911 	src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
912 	if (!src)
913 		return -ENOMEM;
914 
915 	dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
916 	if (!dest) {
917 		kfree(src);
918 		return -ENOMEM;
919 	}
920 
921 	/* Fill in src buffer */
922 	for (i = 0; i < MV_XOR_TEST_SIZE; i++)
923 		((u8 *) src)[i] = (u8)i;
924 
925 	dma_chan = &mv_chan->dmachan;
926 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
927 		err = -ENODEV;
928 		goto out;
929 	}
930 
931 	dest_dma = dma_map_single(dma_chan->device->dev, dest,
932 				  MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
933 
934 	src_dma = dma_map_single(dma_chan->device->dev, src,
935 				 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
936 
937 	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
938 				    MV_XOR_TEST_SIZE, 0);
939 	cookie = mv_xor_tx_submit(tx);
940 	mv_xor_issue_pending(dma_chan);
941 	async_tx_ack(tx);
942 	msleep(1);
943 
944 	if (mv_xor_status(dma_chan, cookie, NULL) !=
945 	    DMA_SUCCESS) {
946 		dev_err(dma_chan->device->dev,
947 			"Self-test copy timed out, disabling\n");
948 		err = -ENODEV;
949 		goto free_resources;
950 	}
951 
952 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
953 				MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
954 	if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
955 		dev_err(dma_chan->device->dev,
956 			"Self-test copy failed compare, disabling\n");
957 		err = -ENODEV;
958 		goto free_resources;
959 	}
960 
961 free_resources:
962 	mv_xor_free_chan_resources(dma_chan);
963 out:
964 	kfree(src);
965 	kfree(dest);
966 	return err;
967 }
968 
969 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
970 static int __devinit
971 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
972 {
973 	int i, src_idx;
974 	struct page *dest;
975 	struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
976 	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
977 	dma_addr_t dest_dma;
978 	struct dma_async_tx_descriptor *tx;
979 	struct dma_chan *dma_chan;
980 	dma_cookie_t cookie;
981 	u8 cmp_byte = 0;
982 	u32 cmp_word;
983 	int err = 0;
984 
985 	for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
986 		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
987 		if (!xor_srcs[src_idx]) {
988 			while (src_idx--)
989 				__free_page(xor_srcs[src_idx]);
990 			return -ENOMEM;
991 		}
992 	}
993 
994 	dest = alloc_page(GFP_KERNEL);
995 	if (!dest) {
996 		while (src_idx--)
997 			__free_page(xor_srcs[src_idx]);
998 		return -ENOMEM;
999 	}
1000 
1001 	/* Fill in src buffers */
1002 	for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1003 		u8 *ptr = page_address(xor_srcs[src_idx]);
1004 		for (i = 0; i < PAGE_SIZE; i++)
1005 			ptr[i] = (1 << src_idx);
1006 	}
1007 
1008 	for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1009 		cmp_byte ^= (u8) (1 << src_idx);
1010 
1011 	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1012 		(cmp_byte << 8) | cmp_byte;
1013 
1014 	memset(page_address(dest), 0, PAGE_SIZE);
1015 
1016 	dma_chan = &mv_chan->dmachan;
1017 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1018 		err = -ENODEV;
1019 		goto out;
1020 	}
1021 
1022 	/* test xor */
1023 	dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1024 				DMA_FROM_DEVICE);
1025 
1026 	for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1027 		dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1028 					   0, PAGE_SIZE, DMA_TO_DEVICE);
1029 
1030 	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1031 				 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1032 
1033 	cookie = mv_xor_tx_submit(tx);
1034 	mv_xor_issue_pending(dma_chan);
1035 	async_tx_ack(tx);
1036 	msleep(8);
1037 
1038 	if (mv_xor_status(dma_chan, cookie, NULL) !=
1039 	    DMA_SUCCESS) {
1040 		dev_err(dma_chan->device->dev,
1041 			"Self-test xor timed out, disabling\n");
1042 		err = -ENODEV;
1043 		goto free_resources;
1044 	}
1045 
1046 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
1047 				PAGE_SIZE, DMA_FROM_DEVICE);
1048 	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1049 		u32 *ptr = page_address(dest);
1050 		if (ptr[i] != cmp_word) {
1051 			dev_err(dma_chan->device->dev,
1052 				"Self-test xor failed compare, disabling."
1053 				" index %d, data %x, expected %x\n", i,
1054 				ptr[i], cmp_word);
1055 			err = -ENODEV;
1056 			goto free_resources;
1057 		}
1058 	}
1059 
1060 free_resources:
1061 	mv_xor_free_chan_resources(dma_chan);
1062 out:
1063 	src_idx = MV_XOR_NUM_SRC_TEST;
1064 	while (src_idx--)
1065 		__free_page(xor_srcs[src_idx]);
1066 	__free_page(dest);
1067 	return err;
1068 }
1069 
1070 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1071 {
1072 	struct dma_chan *chan, *_chan;
1073 	struct device *dev = mv_chan->dmadev.dev;
1074 
1075 	dma_async_device_unregister(&mv_chan->dmadev);
1076 
1077 	dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1078 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1079 
1080 	list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1081 				 device_node) {
1082 		list_del(&chan->device_node);
1083 	}
1084 
1085 	free_irq(mv_chan->irq, mv_chan);
1086 
1087 	return 0;
1088 }
1089 
1090 static struct mv_xor_chan *
1091 mv_xor_channel_add(struct mv_xor_device *xordev,
1092 		   struct platform_device *pdev,
1093 		   int idx, dma_cap_mask_t cap_mask, int irq)
1094 {
1095 	int ret = 0;
1096 	struct mv_xor_chan *mv_chan;
1097 	struct dma_device *dma_dev;
1098 
1099 	mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1100 	if (!mv_chan) {
1101 		ret = -ENOMEM;
1102 		goto err_free_dma;
1103 	}
1104 
1105 	mv_chan->idx = idx;
1106 	mv_chan->irq = irq;
1107 
1108 	dma_dev = &mv_chan->dmadev;
1109 
1110 	/* allocate coherent memory for hardware descriptors
1111 	 * note: writecombine gives slightly better performance, but
1112 	 * requires that we explicitly flush the writes
1113 	 */
1114 	mv_chan->dma_desc_pool_virt =
1115 	  dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1116 				 &mv_chan->dma_desc_pool, GFP_KERNEL);
1117 	if (!mv_chan->dma_desc_pool_virt)
1118 		return ERR_PTR(-ENOMEM);
1119 
1120 	/* discover transaction capabilites from the platform data */
1121 	dma_dev->cap_mask = cap_mask;
1122 
1123 	INIT_LIST_HEAD(&dma_dev->channels);
1124 
1125 	/* set base routines */
1126 	dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1127 	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1128 	dma_dev->device_tx_status = mv_xor_status;
1129 	dma_dev->device_issue_pending = mv_xor_issue_pending;
1130 	dma_dev->dev = &pdev->dev;
1131 
1132 	/* set prep routines based on capability */
1133 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1134 		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1135 	if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1136 		dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1137 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1138 		dma_dev->max_xor = 8;
1139 		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1140 	}
1141 
1142 	mv_chan->mmr_base = xordev->xor_base;
1143 	if (!mv_chan->mmr_base) {
1144 		ret = -ENOMEM;
1145 		goto err_free_dma;
1146 	}
1147 	tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1148 		     mv_chan);
1149 
1150 	/* clear errors before enabling interrupts */
1151 	mv_xor_device_clear_err_status(mv_chan);
1152 
1153 	ret = devm_request_irq(&pdev->dev, mv_chan->irq,
1154 			       mv_xor_interrupt_handler,
1155 			       0, dev_name(&pdev->dev), mv_chan);
1156 	if (ret)
1157 		goto err_free_dma;
1158 
1159 	mv_chan_unmask_interrupts(mv_chan);
1160 
1161 	mv_set_mode(mv_chan, DMA_MEMCPY);
1162 
1163 	spin_lock_init(&mv_chan->lock);
1164 	INIT_LIST_HEAD(&mv_chan->chain);
1165 	INIT_LIST_HEAD(&mv_chan->completed_slots);
1166 	INIT_LIST_HEAD(&mv_chan->all_slots);
1167 	mv_chan->dmachan.device = dma_dev;
1168 	dma_cookie_init(&mv_chan->dmachan);
1169 
1170 	list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1171 
1172 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1173 		ret = mv_xor_memcpy_self_test(mv_chan);
1174 		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1175 		if (ret)
1176 			goto err_free_dma;
1177 	}
1178 
1179 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1180 		ret = mv_xor_xor_self_test(mv_chan);
1181 		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1182 		if (ret)
1183 			goto err_free_dma;
1184 	}
1185 
1186 	dev_info(&pdev->dev, "Marvell XOR: "
1187 	  "( %s%s%s%s)\n",
1188 	  dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1189 	  dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)  ? "fill " : "",
1190 	  dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1191 	  dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1192 
1193 	dma_async_device_register(dma_dev);
1194 	return mv_chan;
1195 
1196  err_free_dma:
1197 	dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1198 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1199 	return ERR_PTR(ret);
1200 }
1201 
1202 static void
1203 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1204 			 const struct mbus_dram_target_info *dram)
1205 {
1206 	void __iomem *base = xordev->xor_base;
1207 	u32 win_enable = 0;
1208 	int i;
1209 
1210 	for (i = 0; i < 8; i++) {
1211 		writel(0, base + WINDOW_BASE(i));
1212 		writel(0, base + WINDOW_SIZE(i));
1213 		if (i < 4)
1214 			writel(0, base + WINDOW_REMAP_HIGH(i));
1215 	}
1216 
1217 	for (i = 0; i < dram->num_cs; i++) {
1218 		const struct mbus_dram_window *cs = dram->cs + i;
1219 
1220 		writel((cs->base & 0xffff0000) |
1221 		       (cs->mbus_attr << 8) |
1222 		       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1223 		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1224 
1225 		win_enable |= (1 << i);
1226 		win_enable |= 3 << (16 + (2 * i));
1227 	}
1228 
1229 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1230 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1231 }
1232 
1233 static int mv_xor_probe(struct platform_device *pdev)
1234 {
1235 	const struct mbus_dram_target_info *dram;
1236 	struct mv_xor_device *xordev;
1237 	struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
1238 	struct resource *res;
1239 	int i, ret;
1240 
1241 	dev_notice(&pdev->dev, "Marvell XOR driver\n");
1242 
1243 	xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1244 	if (!xordev)
1245 		return -ENOMEM;
1246 
1247 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1248 	if (!res)
1249 		return -ENODEV;
1250 
1251 	xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1252 					resource_size(res));
1253 	if (!xordev->xor_base)
1254 		return -EBUSY;
1255 
1256 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1257 	if (!res)
1258 		return -ENODEV;
1259 
1260 	xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1261 					     resource_size(res));
1262 	if (!xordev->xor_high_base)
1263 		return -EBUSY;
1264 
1265 	platform_set_drvdata(pdev, xordev);
1266 
1267 	/*
1268 	 * (Re-)program MBUS remapping windows if we are asked to.
1269 	 */
1270 	dram = mv_mbus_dram_info();
1271 	if (dram)
1272 		mv_xor_conf_mbus_windows(xordev, dram);
1273 
1274 	/* Not all platforms can gate the clock, so it is not
1275 	 * an error if the clock does not exists.
1276 	 */
1277 	xordev->clk = clk_get(&pdev->dev, NULL);
1278 	if (!IS_ERR(xordev->clk))
1279 		clk_prepare_enable(xordev->clk);
1280 
1281 	if (pdata && pdata->channels) {
1282 		for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1283 			struct mv_xor_channel_data *cd;
1284 			int irq;
1285 
1286 			cd = &pdata->channels[i];
1287 			if (!cd) {
1288 				ret = -ENODEV;
1289 				goto err_channel_add;
1290 			}
1291 
1292 			irq = platform_get_irq(pdev, i);
1293 			if (irq < 0) {
1294 				ret = irq;
1295 				goto err_channel_add;
1296 			}
1297 
1298 			xordev->channels[i] =
1299 				mv_xor_channel_add(xordev, pdev, i,
1300 						   cd->cap_mask, irq);
1301 			if (IS_ERR(xordev->channels[i])) {
1302 				ret = PTR_ERR(xordev->channels[i]);
1303 				goto err_channel_add;
1304 			}
1305 		}
1306 	}
1307 
1308 	return 0;
1309 
1310 err_channel_add:
1311 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1312 		if (xordev->channels[i])
1313 			mv_xor_channel_remove(xordev->channels[i]);
1314 
1315 	clk_disable_unprepare(xordev->clk);
1316 	clk_put(xordev->clk);
1317 	return ret;
1318 }
1319 
1320 static int mv_xor_remove(struct platform_device *pdev)
1321 {
1322 	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1323 	int i;
1324 
1325 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1326 		if (xordev->channels[i])
1327 			mv_xor_channel_remove(xordev->channels[i]);
1328 	}
1329 
1330 	if (!IS_ERR(xordev->clk)) {
1331 		clk_disable_unprepare(xordev->clk);
1332 		clk_put(xordev->clk);
1333 	}
1334 
1335 	return 0;
1336 }
1337 
1338 static struct platform_driver mv_xor_driver = {
1339 	.probe		= mv_xor_probe,
1340 	.remove		= mv_xor_remove,
1341 	.driver		= {
1342 		.owner	= THIS_MODULE,
1343 		.name	= MV_XOR_NAME,
1344 	},
1345 };
1346 
1347 
1348 static int __init mv_xor_init(void)
1349 {
1350 	return platform_driver_register(&mv_xor_driver);
1351 }
1352 module_init(mv_xor_init);
1353 
1354 /* it's currently unsafe to unload this module */
1355 #if 0
1356 static void __exit mv_xor_exit(void)
1357 {
1358 	platform_driver_unregister(&mv_xor_driver);
1359 	return;
1360 }
1361 
1362 module_exit(mv_xor_exit);
1363 #endif
1364 
1365 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1366 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1367 MODULE_LICENSE("GPL");
1368