xref: /openbmc/linux/drivers/dma/nbpfaxi.c (revision 4f6cce39)
1 /*
2  * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
3  * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  */
9 
10 #include <linux/bitmap.h>
11 #include <linux/bitops.h>
12 #include <linux/clk.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmaengine.h>
15 #include <linux/err.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/log2.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/of_dma.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 
26 #include <dt-bindings/dma/nbpfaxi.h>
27 
28 #include "dmaengine.h"
29 
30 #define NBPF_REG_CHAN_OFFSET	0
31 #define NBPF_REG_CHAN_SIZE	0x40
32 
33 /* Channel Current Transaction Byte register */
34 #define NBPF_CHAN_CUR_TR_BYTE	0x20
35 
36 /* Channel Status register */
37 #define NBPF_CHAN_STAT	0x24
38 #define NBPF_CHAN_STAT_EN	1
39 #define NBPF_CHAN_STAT_TACT	4
40 #define NBPF_CHAN_STAT_ERR	0x10
41 #define NBPF_CHAN_STAT_END	0x20
42 #define NBPF_CHAN_STAT_TC	0x40
43 #define NBPF_CHAN_STAT_DER	0x400
44 
45 /* Channel Control register */
46 #define NBPF_CHAN_CTRL	0x28
47 #define NBPF_CHAN_CTRL_SETEN	1
48 #define NBPF_CHAN_CTRL_CLREN	2
49 #define NBPF_CHAN_CTRL_STG	4
50 #define NBPF_CHAN_CTRL_SWRST	8
51 #define NBPF_CHAN_CTRL_CLRRQ	0x10
52 #define NBPF_CHAN_CTRL_CLREND	0x20
53 #define NBPF_CHAN_CTRL_CLRTC	0x40
54 #define NBPF_CHAN_CTRL_SETSUS	0x100
55 #define NBPF_CHAN_CTRL_CLRSUS	0x200
56 
57 /* Channel Configuration register */
58 #define NBPF_CHAN_CFG	0x2c
59 #define NBPF_CHAN_CFG_SEL	7		/* terminal SELect: 0..7 */
60 #define NBPF_CHAN_CFG_REQD	8		/* REQuest Direction: DMAREQ is 0: input, 1: output */
61 #define NBPF_CHAN_CFG_LOEN	0x10		/* LOw ENable: low DMA request line is: 0: inactive, 1: active */
62 #define NBPF_CHAN_CFG_HIEN	0x20		/* HIgh ENable: high DMA request line is: 0: inactive, 1: active */
63 #define NBPF_CHAN_CFG_LVL	0x40		/* LeVeL: DMA request line is sensed as 0: edge, 1: level */
64 #define NBPF_CHAN_CFG_AM	0x700		/* ACK Mode: 0: Pulse mode, 1: Level mode, b'1x: Bus Cycle */
65 #define NBPF_CHAN_CFG_SDS	0xf000		/* Source Data Size: 0: 8 bits,... , 7: 1024 bits */
66 #define NBPF_CHAN_CFG_DDS	0xf0000		/* Destination Data Size: as above */
67 #define NBPF_CHAN_CFG_SAD	0x100000	/* Source ADdress counting: 0: increment, 1: fixed */
68 #define NBPF_CHAN_CFG_DAD	0x200000	/* Destination ADdress counting: 0: increment, 1: fixed */
69 #define NBPF_CHAN_CFG_TM	0x400000	/* Transfer Mode: 0: single, 1: block TM */
70 #define NBPF_CHAN_CFG_DEM	0x1000000	/* DMAEND interrupt Mask */
71 #define NBPF_CHAN_CFG_TCM	0x2000000	/* DMATCO interrupt Mask */
72 #define NBPF_CHAN_CFG_SBE	0x8000000	/* Sweep Buffer Enable */
73 #define NBPF_CHAN_CFG_RSEL	0x10000000	/* RM: Register Set sELect */
74 #define NBPF_CHAN_CFG_RSW	0x20000000	/* RM: Register Select sWitch */
75 #define NBPF_CHAN_CFG_REN	0x40000000	/* RM: Register Set Enable */
76 #define NBPF_CHAN_CFG_DMS	0x80000000	/* 0: register mode (RM), 1: link mode (LM) */
77 
78 #define NBPF_CHAN_NXLA	0x38
79 #define NBPF_CHAN_CRLA	0x3c
80 
81 /* Link Header field */
82 #define NBPF_HEADER_LV	1
83 #define NBPF_HEADER_LE	2
84 #define NBPF_HEADER_WBD	4
85 #define NBPF_HEADER_DIM	8
86 
87 #define NBPF_CTRL	0x300
88 #define NBPF_CTRL_PR	1		/* 0: fixed priority, 1: round robin */
89 #define NBPF_CTRL_LVINT	2		/* DMAEND and DMAERR signalling: 0: pulse, 1: level */
90 
91 #define NBPF_DSTAT_ER	0x314
92 #define NBPF_DSTAT_END	0x318
93 
94 #define NBPF_DMA_BUSWIDTHS \
95 	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
96 	 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
97 	 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
98 	 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
99 	 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
100 
101 struct nbpf_config {
102 	int num_channels;
103 	int buffer_size;
104 };
105 
106 /*
107  * We've got 3 types of objects, used to describe DMA transfers:
108  * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object
109  *	in it, used to communicate with the user
110  * 2. hardware DMA link descriptors, that we pass to DMAC for DMA transfer
111  *	queuing, these must be DMAable, using either the streaming DMA API or
112  *	allocated from coherent memory - one per SG segment
113  * 3. one per SG segment descriptors, used to manage HW link descriptors from
114  *	(2). They do not have to be DMAable. They can either be (a) allocated
115  *	together with link descriptors as mixed (DMA / CPU) objects, or (b)
116  *	separately. Even if allocated separately it would be best to link them
117  *	to link descriptors once during channel resource allocation and always
118  *	use them as a single object.
119  * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be
120  * treated as a single SG segment descriptor.
121  */
122 
123 struct nbpf_link_reg {
124 	u32	header;
125 	u32	src_addr;
126 	u32	dst_addr;
127 	u32	transaction_size;
128 	u32	config;
129 	u32	interval;
130 	u32	extension;
131 	u32	next;
132 } __packed;
133 
134 struct nbpf_device;
135 struct nbpf_channel;
136 struct nbpf_desc;
137 
138 struct nbpf_link_desc {
139 	struct nbpf_link_reg *hwdesc;
140 	dma_addr_t hwdesc_dma_addr;
141 	struct nbpf_desc *desc;
142 	struct list_head node;
143 };
144 
145 /**
146  * struct nbpf_desc - DMA transfer descriptor
147  * @async_tx:	dmaengine object
148  * @user_wait:	waiting for a user ack
149  * @length:	total transfer length
150  * @sg:		list of hardware descriptors, represented by struct nbpf_link_desc
151  * @node:	member in channel descriptor lists
152  */
153 struct nbpf_desc {
154 	struct dma_async_tx_descriptor async_tx;
155 	bool user_wait;
156 	size_t length;
157 	struct nbpf_channel *chan;
158 	struct list_head sg;
159 	struct list_head node;
160 };
161 
162 /* Take a wild guess: allocate 4 segments per descriptor */
163 #define NBPF_SEGMENTS_PER_DESC 4
164 #define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) /	\
165 	(sizeof(struct nbpf_desc) +					\
166 	 NBPF_SEGMENTS_PER_DESC *					\
167 	 (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg))))
168 #define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE)
169 
170 struct nbpf_desc_page {
171 	struct list_head node;
172 	struct nbpf_desc desc[NBPF_DESCS_PER_PAGE];
173 	struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE];
174 	struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE];
175 };
176 
177 /**
178  * struct nbpf_channel - one DMAC channel
179  * @dma_chan:	standard dmaengine channel object
180  * @base:	register address base
181  * @nbpf:	DMAC
182  * @name:	IRQ name
183  * @irq:	IRQ number
184  * @slave_addr:	address for slave DMA
185  * @slave_width:slave data size in bytes
186  * @slave_burst:maximum slave burst size in bytes
187  * @terminal:	DMA terminal, assigned to this channel
188  * @dmarq_cfg:	DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG
189  * @flags:	configuration flags from DT
190  * @lock:	protect descriptor lists
191  * @free_links:	list of free link descriptors
192  * @free:	list of free descriptors
193  * @queued:	list of queued descriptors
194  * @active:	list of descriptors, scheduled for processing
195  * @done:	list of completed descriptors, waiting post-processing
196  * @desc_page:	list of additionally allocated descriptor pages - if any
197  */
198 struct nbpf_channel {
199 	struct dma_chan dma_chan;
200 	struct tasklet_struct tasklet;
201 	void __iomem *base;
202 	struct nbpf_device *nbpf;
203 	char name[16];
204 	int irq;
205 	dma_addr_t slave_src_addr;
206 	size_t slave_src_width;
207 	size_t slave_src_burst;
208 	dma_addr_t slave_dst_addr;
209 	size_t slave_dst_width;
210 	size_t slave_dst_burst;
211 	unsigned int terminal;
212 	u32 dmarq_cfg;
213 	unsigned long flags;
214 	spinlock_t lock;
215 	struct list_head free_links;
216 	struct list_head free;
217 	struct list_head queued;
218 	struct list_head active;
219 	struct list_head done;
220 	struct list_head desc_page;
221 	struct nbpf_desc *running;
222 	bool paused;
223 };
224 
225 struct nbpf_device {
226 	struct dma_device dma_dev;
227 	void __iomem *base;
228 	u32 max_burst_mem_read;
229 	u32 max_burst_mem_write;
230 	struct clk *clk;
231 	const struct nbpf_config *config;
232 	unsigned int eirq;
233 	struct nbpf_channel chan[];
234 };
235 
236 enum nbpf_model {
237 	NBPF1B4,
238 	NBPF1B8,
239 	NBPF1B16,
240 	NBPF4B4,
241 	NBPF4B8,
242 	NBPF4B16,
243 	NBPF8B4,
244 	NBPF8B8,
245 	NBPF8B16,
246 };
247 
248 static struct nbpf_config nbpf_cfg[] = {
249 	[NBPF1B4] = {
250 		.num_channels = 1,
251 		.buffer_size = 4,
252 	},
253 	[NBPF1B8] = {
254 		.num_channels = 1,
255 		.buffer_size = 8,
256 	},
257 	[NBPF1B16] = {
258 		.num_channels = 1,
259 		.buffer_size = 16,
260 	},
261 	[NBPF4B4] = {
262 		.num_channels = 4,
263 		.buffer_size = 4,
264 	},
265 	[NBPF4B8] = {
266 		.num_channels = 4,
267 		.buffer_size = 8,
268 	},
269 	[NBPF4B16] = {
270 		.num_channels = 4,
271 		.buffer_size = 16,
272 	},
273 	[NBPF8B4] = {
274 		.num_channels = 8,
275 		.buffer_size = 4,
276 	},
277 	[NBPF8B8] = {
278 		.num_channels = 8,
279 		.buffer_size = 8,
280 	},
281 	[NBPF8B16] = {
282 		.num_channels = 8,
283 		.buffer_size = 16,
284 	},
285 };
286 
287 #define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan)
288 
289 /*
290  * dmaengine drivers seem to have a lot in common and instead of sharing more
291  * code, they reimplement those common algorithms independently. In this driver
292  * we try to separate the hardware-specific part from the (largely) generic
293  * part. This improves code readability and makes it possible in the future to
294  * reuse the generic code in form of a helper library. That generic code should
295  * be suitable for various DMA controllers, using transfer descriptors in RAM
296  * and pushing one SG list at a time to the DMA controller.
297  */
298 
299 /*		Hardware-specific part		*/
300 
301 static inline u32 nbpf_chan_read(struct nbpf_channel *chan,
302 				 unsigned int offset)
303 {
304 	u32 data = ioread32(chan->base + offset);
305 	dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
306 		__func__, chan->base, offset, data);
307 	return data;
308 }
309 
310 static inline void nbpf_chan_write(struct nbpf_channel *chan,
311 				   unsigned int offset, u32 data)
312 {
313 	iowrite32(data, chan->base + offset);
314 	dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
315 		__func__, chan->base, offset, data);
316 }
317 
318 static inline u32 nbpf_read(struct nbpf_device *nbpf,
319 			    unsigned int offset)
320 {
321 	u32 data = ioread32(nbpf->base + offset);
322 	dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
323 		__func__, nbpf->base, offset, data);
324 	return data;
325 }
326 
327 static inline void nbpf_write(struct nbpf_device *nbpf,
328 			      unsigned int offset, u32 data)
329 {
330 	iowrite32(data, nbpf->base + offset);
331 	dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
332 		__func__, nbpf->base, offset, data);
333 }
334 
335 static void nbpf_chan_halt(struct nbpf_channel *chan)
336 {
337 	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
338 }
339 
340 static bool nbpf_status_get(struct nbpf_channel *chan)
341 {
342 	u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END);
343 
344 	return status & BIT(chan - chan->nbpf->chan);
345 }
346 
347 static void nbpf_status_ack(struct nbpf_channel *chan)
348 {
349 	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND);
350 }
351 
352 static u32 nbpf_error_get(struct nbpf_device *nbpf)
353 {
354 	return nbpf_read(nbpf, NBPF_DSTAT_ER);
355 }
356 
357 static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error)
358 {
359 	return nbpf->chan + __ffs(error);
360 }
361 
362 static void nbpf_error_clear(struct nbpf_channel *chan)
363 {
364 	u32 status;
365 	int i;
366 
367 	/* Stop the channel, make sure DMA has been aborted */
368 	nbpf_chan_halt(chan);
369 
370 	for (i = 1000; i; i--) {
371 		status = nbpf_chan_read(chan, NBPF_CHAN_STAT);
372 		if (!(status & NBPF_CHAN_STAT_TACT))
373 			break;
374 		cpu_relax();
375 	}
376 
377 	if (!i)
378 		dev_err(chan->dma_chan.device->dev,
379 			"%s(): abort timeout, channel status 0x%x\n", __func__, status);
380 
381 	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST);
382 }
383 
384 static int nbpf_start(struct nbpf_desc *desc)
385 {
386 	struct nbpf_channel *chan = desc->chan;
387 	struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node);
388 
389 	nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr);
390 	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS);
391 	chan->paused = false;
392 
393 	/* Software trigger MEMCPY - only MEMCPY uses the block mode */
394 	if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM)
395 		nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG);
396 
397 	dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__,
398 		nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA));
399 
400 	return 0;
401 }
402 
403 static void nbpf_chan_prepare(struct nbpf_channel *chan)
404 {
405 	chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) |
406 		(chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) |
407 		(chan->flags & NBPF_SLAVE_RQ_LEVEL ?
408 		 NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) |
409 		chan->terminal;
410 }
411 
412 static void nbpf_chan_prepare_default(struct nbpf_channel *chan)
413 {
414 	/* Don't output DMAACK */
415 	chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400;
416 	chan->terminal = 0;
417 	chan->flags = 0;
418 }
419 
420 static void nbpf_chan_configure(struct nbpf_channel *chan)
421 {
422 	/*
423 	 * We assume, that only the link mode and DMA request line configuration
424 	 * have to be set in the configuration register manually. Dynamic
425 	 * per-transfer configuration will be loaded from transfer descriptors.
426 	 */
427 	nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
428 }
429 
430 static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
431 			enum dma_transfer_direction direction)
432 {
433 	int max_burst = nbpf->config->buffer_size * 8;
434 
435 	if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) {
436 		switch (direction) {
437 		case DMA_MEM_TO_MEM:
438 			max_burst = min_not_zero(nbpf->max_burst_mem_read,
439 						 nbpf->max_burst_mem_write);
440 			break;
441 		case DMA_MEM_TO_DEV:
442 			if (nbpf->max_burst_mem_read)
443 				max_burst = nbpf->max_burst_mem_read;
444 			break;
445 		case DMA_DEV_TO_MEM:
446 			if (nbpf->max_burst_mem_write)
447 				max_burst = nbpf->max_burst_mem_write;
448 			break;
449 		case DMA_DEV_TO_DEV:
450 		default:
451 			break;
452 		}
453 	}
454 
455 	/* Maximum supported bursts depend on the buffer size */
456 	return min_t(int, __ffs(size), ilog2(max_burst));
457 }
458 
459 static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
460 			     enum dma_slave_buswidth width, u32 burst)
461 {
462 	size_t size;
463 
464 	if (!burst)
465 		burst = 1;
466 
467 	switch (width) {
468 	case DMA_SLAVE_BUSWIDTH_8_BYTES:
469 		size = 8 * burst;
470 		break;
471 
472 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
473 		size = 4 * burst;
474 		break;
475 
476 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
477 		size = 2 * burst;
478 		break;
479 
480 	default:
481 		pr_warn("%s(): invalid bus width %u\n", __func__, width);
482 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
483 		size = burst;
484 	}
485 
486 	return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE);
487 }
488 
489 /*
490  * We need a way to recognise slaves, whose data is sent "raw" over the bus,
491  * i.e. it isn't known in advance how many bytes will be received. Therefore
492  * the slave driver has to provide a "large enough" buffer and either read the
493  * buffer, when it is full, or detect, that some data has arrived, then wait for
494  * a timeout, if no more data arrives - receive what's already there. We want to
495  * handle such slaves in a special way to allow an optimised mode for other
496  * users, for whom the amount of data is known in advance. So far there's no way
497  * to recognise such slaves. We use a data-width check to distinguish between
498  * the SD host and the PL011 UART.
499  */
500 
501 static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
502 			 enum dma_transfer_direction direction,
503 			 dma_addr_t src, dma_addr_t dst, size_t size, bool last)
504 {
505 	struct nbpf_link_reg *hwdesc = ldesc->hwdesc;
506 	struct nbpf_desc *desc = ldesc->desc;
507 	struct nbpf_channel *chan = desc->chan;
508 	struct device *dev = chan->dma_chan.device->dev;
509 	size_t mem_xfer, slave_xfer;
510 	bool can_burst;
511 
512 	hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV |
513 		(last ? NBPF_HEADER_LE : 0);
514 
515 	hwdesc->src_addr = src;
516 	hwdesc->dst_addr = dst;
517 	hwdesc->transaction_size = size;
518 
519 	/*
520 	 * set config: SAD, DAD, DDS, SDS, etc.
521 	 * Note on transfer sizes: the DMAC can perform unaligned DMA transfers,
522 	 * but it is important to have transaction size a multiple of both
523 	 * receiver and transmitter transfer sizes. It is also possible to use
524 	 * different RAM and device transfer sizes, and it does work well with
525 	 * some devices, e.g. with V08R07S01E SD host controllers, which can use
526 	 * 128 byte transfers. But this doesn't work with other devices,
527 	 * especially when the transaction size is unknown. This is the case,
528 	 * e.g. with serial drivers like amba-pl011.c. For reception it sets up
529 	 * the transaction size of 4K and if fewer bytes are received, it
530 	 * pauses DMA and reads out data received via DMA as well as those left
531 	 * in the Rx FIFO. For this to work with the RAM side using burst
532 	 * transfers we enable the SBE bit and terminate the transfer in our
533 	 * .device_pause handler.
534 	 */
535 	mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction);
536 
537 	switch (direction) {
538 	case DMA_DEV_TO_MEM:
539 		can_burst = chan->slave_src_width >= 3;
540 		slave_xfer = min(mem_xfer, can_burst ?
541 				 chan->slave_src_burst : chan->slave_src_width);
542 		/*
543 		 * Is the slave narrower than 64 bits, i.e. isn't using the full
544 		 * bus width and cannot use bursts?
545 		 */
546 		if (mem_xfer > chan->slave_src_burst && !can_burst)
547 			mem_xfer = chan->slave_src_burst;
548 		/* Device-to-RAM DMA is unreliable without REQD set */
549 		hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) |
550 			(NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD |
551 			NBPF_CHAN_CFG_SBE;
552 		break;
553 
554 	case DMA_MEM_TO_DEV:
555 		slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ?
556 				 chan->slave_dst_burst : chan->slave_dst_width);
557 		hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
558 			(NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD;
559 		break;
560 
561 	case DMA_MEM_TO_MEM:
562 		hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM |
563 			(NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
564 			(NBPF_CHAN_CFG_DDS & (mem_xfer << 16));
565 		break;
566 
567 	default:
568 		return -EINVAL;
569 	}
570 
571 	hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) |
572 		NBPF_CHAN_CFG_DMS;
573 
574 	dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n",
575 		__func__, &ldesc->hwdesc_dma_addr, hwdesc->header,
576 		hwdesc->config, size, &src, &dst);
577 
578 	dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc),
579 				   DMA_TO_DEVICE);
580 
581 	return 0;
582 }
583 
584 static size_t nbpf_bytes_left(struct nbpf_channel *chan)
585 {
586 	return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE);
587 }
588 
589 static void nbpf_configure(struct nbpf_device *nbpf)
590 {
591 	nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
592 }
593 
594 /*		Generic part			*/
595 
596 /* DMA ENGINE functions */
597 static void nbpf_issue_pending(struct dma_chan *dchan)
598 {
599 	struct nbpf_channel *chan = nbpf_to_chan(dchan);
600 	unsigned long flags;
601 
602 	dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
603 
604 	spin_lock_irqsave(&chan->lock, flags);
605 	if (list_empty(&chan->queued))
606 		goto unlock;
607 
608 	list_splice_tail_init(&chan->queued, &chan->active);
609 
610 	if (!chan->running) {
611 		struct nbpf_desc *desc = list_first_entry(&chan->active,
612 						struct nbpf_desc, node);
613 		if (!nbpf_start(desc))
614 			chan->running = desc;
615 	}
616 
617 unlock:
618 	spin_unlock_irqrestore(&chan->lock, flags);
619 }
620 
621 static enum dma_status nbpf_tx_status(struct dma_chan *dchan,
622 		dma_cookie_t cookie, struct dma_tx_state *state)
623 {
624 	struct nbpf_channel *chan = nbpf_to_chan(dchan);
625 	enum dma_status status = dma_cookie_status(dchan, cookie, state);
626 
627 	if (state) {
628 		dma_cookie_t running;
629 		unsigned long flags;
630 
631 		spin_lock_irqsave(&chan->lock, flags);
632 		running = chan->running ? chan->running->async_tx.cookie : -EINVAL;
633 
634 		if (cookie == running) {
635 			state->residue = nbpf_bytes_left(chan);
636 			dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__,
637 				state->residue);
638 		} else if (status == DMA_IN_PROGRESS) {
639 			struct nbpf_desc *desc;
640 			bool found = false;
641 
642 			list_for_each_entry(desc, &chan->active, node)
643 				if (desc->async_tx.cookie == cookie) {
644 					found = true;
645 					break;
646 				}
647 
648 			if (!found)
649 				list_for_each_entry(desc, &chan->queued, node)
650 					if (desc->async_tx.cookie == cookie) {
651 						found = true;
652 						break;
653 
654 					}
655 
656 			state->residue = found ? desc->length : 0;
657 		}
658 
659 		spin_unlock_irqrestore(&chan->lock, flags);
660 	}
661 
662 	if (chan->paused)
663 		status = DMA_PAUSED;
664 
665 	return status;
666 }
667 
668 static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx)
669 {
670 	struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx);
671 	struct nbpf_channel *chan = desc->chan;
672 	unsigned long flags;
673 	dma_cookie_t cookie;
674 
675 	spin_lock_irqsave(&chan->lock, flags);
676 	cookie = dma_cookie_assign(tx);
677 	list_add_tail(&desc->node, &chan->queued);
678 	spin_unlock_irqrestore(&chan->lock, flags);
679 
680 	dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie);
681 
682 	return cookie;
683 }
684 
685 static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
686 {
687 	struct dma_chan *dchan = &chan->dma_chan;
688 	struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
689 	struct nbpf_link_desc *ldesc;
690 	struct nbpf_link_reg *hwdesc;
691 	struct nbpf_desc *desc;
692 	LIST_HEAD(head);
693 	LIST_HEAD(lhead);
694 	int i;
695 	struct device *dev = dchan->device->dev;
696 
697 	if (!dpage)
698 		return -ENOMEM;
699 
700 	dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n",
701 		__func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage));
702 
703 	for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc;
704 	     i < ARRAY_SIZE(dpage->ldesc);
705 	     i++, ldesc++, hwdesc++) {
706 		ldesc->hwdesc = hwdesc;
707 		list_add_tail(&ldesc->node, &lhead);
708 		ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
709 					hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
710 
711 		dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
712 			hwdesc, &ldesc->hwdesc_dma_addr);
713 	}
714 
715 	for (i = 0, desc = dpage->desc;
716 	     i < ARRAY_SIZE(dpage->desc);
717 	     i++, desc++) {
718 		dma_async_tx_descriptor_init(&desc->async_tx, dchan);
719 		desc->async_tx.tx_submit = nbpf_tx_submit;
720 		desc->chan = chan;
721 		INIT_LIST_HEAD(&desc->sg);
722 		list_add_tail(&desc->node, &head);
723 	}
724 
725 	/*
726 	 * This function cannot be called from interrupt context, so, no need to
727 	 * save flags
728 	 */
729 	spin_lock_irq(&chan->lock);
730 	list_splice_tail(&lhead, &chan->free_links);
731 	list_splice_tail(&head, &chan->free);
732 	list_add(&dpage->node, &chan->desc_page);
733 	spin_unlock_irq(&chan->lock);
734 
735 	return ARRAY_SIZE(dpage->desc);
736 }
737 
738 static void nbpf_desc_put(struct nbpf_desc *desc)
739 {
740 	struct nbpf_channel *chan = desc->chan;
741 	struct nbpf_link_desc *ldesc, *tmp;
742 	unsigned long flags;
743 
744 	spin_lock_irqsave(&chan->lock, flags);
745 	list_for_each_entry_safe(ldesc, tmp, &desc->sg, node)
746 		list_move(&ldesc->node, &chan->free_links);
747 
748 	list_add(&desc->node, &chan->free);
749 	spin_unlock_irqrestore(&chan->lock, flags);
750 }
751 
752 static void nbpf_scan_acked(struct nbpf_channel *chan)
753 {
754 	struct nbpf_desc *desc, *tmp;
755 	unsigned long flags;
756 	LIST_HEAD(head);
757 
758 	spin_lock_irqsave(&chan->lock, flags);
759 	list_for_each_entry_safe(desc, tmp, &chan->done, node)
760 		if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) {
761 			list_move(&desc->node, &head);
762 			desc->user_wait = false;
763 		}
764 	spin_unlock_irqrestore(&chan->lock, flags);
765 
766 	list_for_each_entry_safe(desc, tmp, &head, node) {
767 		list_del(&desc->node);
768 		nbpf_desc_put(desc);
769 	}
770 }
771 
772 /*
773  * We have to allocate descriptors with the channel lock dropped. This means,
774  * before we re-acquire the lock buffers can be taken already, so we have to
775  * re-check after re-acquiring the lock and possibly retry, if buffers are gone
776  * again.
777  */
778 static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len)
779 {
780 	struct nbpf_desc *desc = NULL;
781 	struct nbpf_link_desc *ldesc, *prev = NULL;
782 
783 	nbpf_scan_acked(chan);
784 
785 	spin_lock_irq(&chan->lock);
786 
787 	do {
788 		int i = 0, ret;
789 
790 		if (list_empty(&chan->free)) {
791 			/* No more free descriptors */
792 			spin_unlock_irq(&chan->lock);
793 			ret = nbpf_desc_page_alloc(chan);
794 			if (ret < 0)
795 				return NULL;
796 			spin_lock_irq(&chan->lock);
797 			continue;
798 		}
799 		desc = list_first_entry(&chan->free, struct nbpf_desc, node);
800 		list_del(&desc->node);
801 
802 		do {
803 			if (list_empty(&chan->free_links)) {
804 				/* No more free link descriptors */
805 				spin_unlock_irq(&chan->lock);
806 				ret = nbpf_desc_page_alloc(chan);
807 				if (ret < 0) {
808 					nbpf_desc_put(desc);
809 					return NULL;
810 				}
811 				spin_lock_irq(&chan->lock);
812 				continue;
813 			}
814 
815 			ldesc = list_first_entry(&chan->free_links,
816 						 struct nbpf_link_desc, node);
817 			ldesc->desc = desc;
818 			if (prev)
819 				prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr;
820 
821 			prev = ldesc;
822 			list_move_tail(&ldesc->node, &desc->sg);
823 
824 			i++;
825 		} while (i < len);
826 	} while (!desc);
827 
828 	prev->hwdesc->next = 0;
829 
830 	spin_unlock_irq(&chan->lock);
831 
832 	return desc;
833 }
834 
835 static void nbpf_chan_idle(struct nbpf_channel *chan)
836 {
837 	struct nbpf_desc *desc, *tmp;
838 	unsigned long flags;
839 	LIST_HEAD(head);
840 
841 	spin_lock_irqsave(&chan->lock, flags);
842 
843 	list_splice_init(&chan->done, &head);
844 	list_splice_init(&chan->active, &head);
845 	list_splice_init(&chan->queued, &head);
846 
847 	chan->running = NULL;
848 
849 	spin_unlock_irqrestore(&chan->lock, flags);
850 
851 	list_for_each_entry_safe(desc, tmp, &head, node) {
852 		dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n",
853 			__func__, desc, desc->async_tx.cookie);
854 		list_del(&desc->node);
855 		nbpf_desc_put(desc);
856 	}
857 }
858 
859 static int nbpf_pause(struct dma_chan *dchan)
860 {
861 	struct nbpf_channel *chan = nbpf_to_chan(dchan);
862 
863 	dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
864 
865 	chan->paused = true;
866 	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
867 	/* See comment in nbpf_prep_one() */
868 	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
869 
870 	return 0;
871 }
872 
873 static int nbpf_terminate_all(struct dma_chan *dchan)
874 {
875 	struct nbpf_channel *chan = nbpf_to_chan(dchan);
876 
877 	dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
878 	dev_dbg(dchan->device->dev, "Terminating\n");
879 
880 	nbpf_chan_halt(chan);
881 	nbpf_chan_idle(chan);
882 
883 	return 0;
884 }
885 
886 static int nbpf_config(struct dma_chan *dchan,
887 		       struct dma_slave_config *config)
888 {
889 	struct nbpf_channel *chan = nbpf_to_chan(dchan);
890 
891 	dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
892 
893 	/*
894 	 * We could check config->slave_id to match chan->terminal here,
895 	 * but with DT they would be coming from the same source, so
896 	 * such a check would be superflous
897 	 */
898 
899 	chan->slave_dst_addr = config->dst_addr;
900 	chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
901 					       config->dst_addr_width, 1);
902 	chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
903 					       config->dst_addr_width,
904 					       config->dst_maxburst);
905 	chan->slave_src_addr = config->src_addr;
906 	chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
907 					       config->src_addr_width, 1);
908 	chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
909 					       config->src_addr_width,
910 					       config->src_maxburst);
911 
912 	return 0;
913 }
914 
915 static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan,
916 		struct scatterlist *src_sg, struct scatterlist *dst_sg,
917 		size_t len, enum dma_transfer_direction direction,
918 		unsigned long flags)
919 {
920 	struct nbpf_link_desc *ldesc;
921 	struct scatterlist *mem_sg;
922 	struct nbpf_desc *desc;
923 	bool inc_src, inc_dst;
924 	size_t data_len = 0;
925 	int i = 0;
926 
927 	switch (direction) {
928 	case DMA_DEV_TO_MEM:
929 		mem_sg = dst_sg;
930 		inc_src = false;
931 		inc_dst = true;
932 		break;
933 
934 	case DMA_MEM_TO_DEV:
935 		mem_sg = src_sg;
936 		inc_src = true;
937 		inc_dst = false;
938 		break;
939 
940 	default:
941 	case DMA_MEM_TO_MEM:
942 		mem_sg = src_sg;
943 		inc_src = true;
944 		inc_dst = true;
945 	}
946 
947 	desc = nbpf_desc_get(chan, len);
948 	if (!desc)
949 		return NULL;
950 
951 	desc->async_tx.flags = flags;
952 	desc->async_tx.cookie = -EBUSY;
953 	desc->user_wait = false;
954 
955 	/*
956 	 * This is a private descriptor list, and we own the descriptor. No need
957 	 * to lock.
958 	 */
959 	list_for_each_entry(ldesc, &desc->sg, node) {
960 		int ret = nbpf_prep_one(ldesc, direction,
961 					sg_dma_address(src_sg),
962 					sg_dma_address(dst_sg),
963 					sg_dma_len(mem_sg),
964 					i == len - 1);
965 		if (ret < 0) {
966 			nbpf_desc_put(desc);
967 			return NULL;
968 		}
969 		data_len += sg_dma_len(mem_sg);
970 		if (inc_src)
971 			src_sg = sg_next(src_sg);
972 		if (inc_dst)
973 			dst_sg = sg_next(dst_sg);
974 		mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg;
975 		i++;
976 	}
977 
978 	desc->length = data_len;
979 
980 	/* The user has to return the descriptor to us ASAP via .tx_submit() */
981 	return &desc->async_tx;
982 }
983 
984 static struct dma_async_tx_descriptor *nbpf_prep_memcpy(
985 	struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
986 	size_t len, unsigned long flags)
987 {
988 	struct nbpf_channel *chan = nbpf_to_chan(dchan);
989 	struct scatterlist dst_sg;
990 	struct scatterlist src_sg;
991 
992 	sg_init_table(&dst_sg, 1);
993 	sg_init_table(&src_sg, 1);
994 
995 	sg_dma_address(&dst_sg) = dst;
996 	sg_dma_address(&src_sg) = src;
997 
998 	sg_dma_len(&dst_sg) = len;
999 	sg_dma_len(&src_sg) = len;
1000 
1001 	dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n",
1002 		__func__, len, &src, &dst);
1003 
1004 	return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1,
1005 			    DMA_MEM_TO_MEM, flags);
1006 }
1007 
1008 static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg(
1009 	struct dma_chan *dchan,
1010 	struct scatterlist *dst_sg, unsigned int dst_nents,
1011 	struct scatterlist *src_sg, unsigned int src_nents,
1012 	unsigned long flags)
1013 {
1014 	struct nbpf_channel *chan = nbpf_to_chan(dchan);
1015 
1016 	if (dst_nents != src_nents)
1017 		return NULL;
1018 
1019 	return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents,
1020 			    DMA_MEM_TO_MEM, flags);
1021 }
1022 
1023 static struct dma_async_tx_descriptor *nbpf_prep_slave_sg(
1024 	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1025 	enum dma_transfer_direction direction, unsigned long flags, void *context)
1026 {
1027 	struct nbpf_channel *chan = nbpf_to_chan(dchan);
1028 	struct scatterlist slave_sg;
1029 
1030 	dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
1031 
1032 	sg_init_table(&slave_sg, 1);
1033 
1034 	switch (direction) {
1035 	case DMA_MEM_TO_DEV:
1036 		sg_dma_address(&slave_sg) = chan->slave_dst_addr;
1037 		return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len,
1038 				    direction, flags);
1039 
1040 	case DMA_DEV_TO_MEM:
1041 		sg_dma_address(&slave_sg) = chan->slave_src_addr;
1042 		return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len,
1043 				    direction, flags);
1044 
1045 	default:
1046 		return NULL;
1047 	}
1048 }
1049 
1050 static int nbpf_alloc_chan_resources(struct dma_chan *dchan)
1051 {
1052 	struct nbpf_channel *chan = nbpf_to_chan(dchan);
1053 	int ret;
1054 
1055 	INIT_LIST_HEAD(&chan->free);
1056 	INIT_LIST_HEAD(&chan->free_links);
1057 	INIT_LIST_HEAD(&chan->queued);
1058 	INIT_LIST_HEAD(&chan->active);
1059 	INIT_LIST_HEAD(&chan->done);
1060 
1061 	ret = nbpf_desc_page_alloc(chan);
1062 	if (ret < 0)
1063 		return ret;
1064 
1065 	dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__,
1066 		chan->terminal);
1067 
1068 	nbpf_chan_configure(chan);
1069 
1070 	return ret;
1071 }
1072 
1073 static void nbpf_free_chan_resources(struct dma_chan *dchan)
1074 {
1075 	struct nbpf_channel *chan = nbpf_to_chan(dchan);
1076 	struct nbpf_desc_page *dpage, *tmp;
1077 
1078 	dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
1079 
1080 	nbpf_chan_halt(chan);
1081 	nbpf_chan_idle(chan);
1082 	/* Clean up for if a channel is re-used for MEMCPY after slave DMA */
1083 	nbpf_chan_prepare_default(chan);
1084 
1085 	list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) {
1086 		struct nbpf_link_desc *ldesc;
1087 		int i;
1088 		list_del(&dpage->node);
1089 		for (i = 0, ldesc = dpage->ldesc;
1090 		     i < ARRAY_SIZE(dpage->ldesc);
1091 		     i++, ldesc++)
1092 			dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
1093 					 sizeof(*ldesc->hwdesc), DMA_TO_DEVICE);
1094 		free_page((unsigned long)dpage);
1095 	}
1096 }
1097 
1098 static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
1099 				      struct of_dma *ofdma)
1100 {
1101 	struct nbpf_device *nbpf = ofdma->of_dma_data;
1102 	struct dma_chan *dchan;
1103 	struct nbpf_channel *chan;
1104 
1105 	if (dma_spec->args_count != 2)
1106 		return NULL;
1107 
1108 	dchan = dma_get_any_slave_channel(&nbpf->dma_dev);
1109 	if (!dchan)
1110 		return NULL;
1111 
1112 	dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__,
1113 		dma_spec->np->name);
1114 
1115 	chan = nbpf_to_chan(dchan);
1116 
1117 	chan->terminal = dma_spec->args[0];
1118 	chan->flags = dma_spec->args[1];
1119 
1120 	nbpf_chan_prepare(chan);
1121 	nbpf_chan_configure(chan);
1122 
1123 	return dchan;
1124 }
1125 
1126 static void nbpf_chan_tasklet(unsigned long data)
1127 {
1128 	struct nbpf_channel *chan = (struct nbpf_channel *)data;
1129 	struct nbpf_desc *desc, *tmp;
1130 	struct dmaengine_desc_callback cb;
1131 
1132 	while (!list_empty(&chan->done)) {
1133 		bool found = false, must_put, recycling = false;
1134 
1135 		spin_lock_irq(&chan->lock);
1136 
1137 		list_for_each_entry_safe(desc, tmp, &chan->done, node) {
1138 			if (!desc->user_wait) {
1139 				/* Newly completed descriptor, have to process */
1140 				found = true;
1141 				break;
1142 			} else if (async_tx_test_ack(&desc->async_tx)) {
1143 				/*
1144 				 * This descriptor was waiting for a user ACK,
1145 				 * it can be recycled now.
1146 				 */
1147 				list_del(&desc->node);
1148 				spin_unlock_irq(&chan->lock);
1149 				nbpf_desc_put(desc);
1150 				recycling = true;
1151 				break;
1152 			}
1153 		}
1154 
1155 		if (recycling)
1156 			continue;
1157 
1158 		if (!found) {
1159 			/* This can happen if TERMINATE_ALL has been called */
1160 			spin_unlock_irq(&chan->lock);
1161 			break;
1162 		}
1163 
1164 		dma_cookie_complete(&desc->async_tx);
1165 
1166 		/*
1167 		 * With released lock we cannot dereference desc, maybe it's
1168 		 * still on the "done" list
1169 		 */
1170 		if (async_tx_test_ack(&desc->async_tx)) {
1171 			list_del(&desc->node);
1172 			must_put = true;
1173 		} else {
1174 			desc->user_wait = true;
1175 			must_put = false;
1176 		}
1177 
1178 		dmaengine_desc_get_callback(&desc->async_tx, &cb);
1179 
1180 		/* ack and callback completed descriptor */
1181 		spin_unlock_irq(&chan->lock);
1182 
1183 		dmaengine_desc_callback_invoke(&cb, NULL);
1184 
1185 		if (must_put)
1186 			nbpf_desc_put(desc);
1187 	}
1188 }
1189 
1190 static irqreturn_t nbpf_chan_irq(int irq, void *dev)
1191 {
1192 	struct nbpf_channel *chan = dev;
1193 	bool done = nbpf_status_get(chan);
1194 	struct nbpf_desc *desc;
1195 	irqreturn_t ret;
1196 	bool bh = false;
1197 
1198 	if (!done)
1199 		return IRQ_NONE;
1200 
1201 	nbpf_status_ack(chan);
1202 
1203 	dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__);
1204 
1205 	spin_lock(&chan->lock);
1206 	desc = chan->running;
1207 	if (WARN_ON(!desc)) {
1208 		ret = IRQ_NONE;
1209 		goto unlock;
1210 	} else {
1211 		ret = IRQ_HANDLED;
1212 		bh = true;
1213 	}
1214 
1215 	list_move_tail(&desc->node, &chan->done);
1216 	chan->running = NULL;
1217 
1218 	if (!list_empty(&chan->active)) {
1219 		desc = list_first_entry(&chan->active,
1220 					struct nbpf_desc, node);
1221 		if (!nbpf_start(desc))
1222 			chan->running = desc;
1223 	}
1224 
1225 unlock:
1226 	spin_unlock(&chan->lock);
1227 
1228 	if (bh)
1229 		tasklet_schedule(&chan->tasklet);
1230 
1231 	return ret;
1232 }
1233 
1234 static irqreturn_t nbpf_err_irq(int irq, void *dev)
1235 {
1236 	struct nbpf_device *nbpf = dev;
1237 	u32 error = nbpf_error_get(nbpf);
1238 
1239 	dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq);
1240 
1241 	if (!error)
1242 		return IRQ_NONE;
1243 
1244 	do {
1245 		struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error);
1246 		/* On error: abort all queued transfers, no callback */
1247 		nbpf_error_clear(chan);
1248 		nbpf_chan_idle(chan);
1249 		error = nbpf_error_get(nbpf);
1250 	} while (error);
1251 
1252 	return IRQ_HANDLED;
1253 }
1254 
1255 static int nbpf_chan_probe(struct nbpf_device *nbpf, int n)
1256 {
1257 	struct dma_device *dma_dev = &nbpf->dma_dev;
1258 	struct nbpf_channel *chan = nbpf->chan + n;
1259 	int ret;
1260 
1261 	chan->nbpf = nbpf;
1262 	chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n;
1263 	INIT_LIST_HEAD(&chan->desc_page);
1264 	spin_lock_init(&chan->lock);
1265 	chan->dma_chan.device = dma_dev;
1266 	dma_cookie_init(&chan->dma_chan);
1267 	nbpf_chan_prepare_default(chan);
1268 
1269 	dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base);
1270 
1271 	snprintf(chan->name, sizeof(chan->name), "nbpf %d", n);
1272 
1273 	tasklet_init(&chan->tasklet, nbpf_chan_tasklet, (unsigned long)chan);
1274 	ret = devm_request_irq(dma_dev->dev, chan->irq,
1275 			nbpf_chan_irq, IRQF_SHARED,
1276 			chan->name, chan);
1277 	if (ret < 0)
1278 		return ret;
1279 
1280 	/* Add the channel to DMA device channel list */
1281 	list_add_tail(&chan->dma_chan.device_node,
1282 		      &dma_dev->channels);
1283 
1284 	return 0;
1285 }
1286 
1287 static const struct of_device_id nbpf_match[] = {
1288 	{.compatible = "renesas,nbpfaxi64dmac1b4",	.data = &nbpf_cfg[NBPF1B4]},
1289 	{.compatible = "renesas,nbpfaxi64dmac1b8",	.data = &nbpf_cfg[NBPF1B8]},
1290 	{.compatible = "renesas,nbpfaxi64dmac1b16",	.data = &nbpf_cfg[NBPF1B16]},
1291 	{.compatible = "renesas,nbpfaxi64dmac4b4",	.data = &nbpf_cfg[NBPF4B4]},
1292 	{.compatible = "renesas,nbpfaxi64dmac4b8",	.data = &nbpf_cfg[NBPF4B8]},
1293 	{.compatible = "renesas,nbpfaxi64dmac4b16",	.data = &nbpf_cfg[NBPF4B16]},
1294 	{.compatible = "renesas,nbpfaxi64dmac8b4",	.data = &nbpf_cfg[NBPF8B4]},
1295 	{.compatible = "renesas,nbpfaxi64dmac8b8",	.data = &nbpf_cfg[NBPF8B8]},
1296 	{.compatible = "renesas,nbpfaxi64dmac8b16",	.data = &nbpf_cfg[NBPF8B16]},
1297 	{}
1298 };
1299 MODULE_DEVICE_TABLE(of, nbpf_match);
1300 
1301 static int nbpf_probe(struct platform_device *pdev)
1302 {
1303 	struct device *dev = &pdev->dev;
1304 	const struct of_device_id *of_id = of_match_device(nbpf_match, dev);
1305 	struct device_node *np = dev->of_node;
1306 	struct nbpf_device *nbpf;
1307 	struct dma_device *dma_dev;
1308 	struct resource *iomem, *irq_res;
1309 	const struct nbpf_config *cfg;
1310 	int num_channels;
1311 	int ret, irq, eirq, i;
1312 	int irqbuf[9] /* maximum 8 channels + error IRQ */;
1313 	unsigned int irqs = 0;
1314 
1315 	BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE);
1316 
1317 	/* DT only */
1318 	if (!np || !of_id || !of_id->data)
1319 		return -ENODEV;
1320 
1321 	cfg = of_id->data;
1322 	num_channels = cfg->num_channels;
1323 
1324 	nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels *
1325 			    sizeof(nbpf->chan[0]), GFP_KERNEL);
1326 	if (!nbpf)
1327 		return -ENOMEM;
1328 
1329 	dma_dev = &nbpf->dma_dev;
1330 	dma_dev->dev = dev;
1331 
1332 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1333 	nbpf->base = devm_ioremap_resource(dev, iomem);
1334 	if (IS_ERR(nbpf->base))
1335 		return PTR_ERR(nbpf->base);
1336 
1337 	nbpf->clk = devm_clk_get(dev, NULL);
1338 	if (IS_ERR(nbpf->clk))
1339 		return PTR_ERR(nbpf->clk);
1340 
1341 	of_property_read_u32(np, "max-burst-mem-read",
1342 			     &nbpf->max_burst_mem_read);
1343 	of_property_read_u32(np, "max-burst-mem-write",
1344 			     &nbpf->max_burst_mem_write);
1345 
1346 	nbpf->config = cfg;
1347 
1348 	for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
1349 		irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1350 		if (!irq_res)
1351 			break;
1352 
1353 		for (irq = irq_res->start; irq <= irq_res->end;
1354 		     irq++, irqs++)
1355 			irqbuf[irqs] = irq;
1356 	}
1357 
1358 	/*
1359 	 * 3 IRQ resource schemes are supported:
1360 	 * 1. 1 shared IRQ for error and all channels
1361 	 * 2. 2 IRQs: one for error and one shared for all channels
1362 	 * 3. 1 IRQ for error and an own IRQ for each channel
1363 	 */
1364 	if (irqs != 1 && irqs != 2 && irqs != num_channels + 1)
1365 		return -ENXIO;
1366 
1367 	if (irqs == 1) {
1368 		eirq = irqbuf[0];
1369 
1370 		for (i = 0; i <= num_channels; i++)
1371 			nbpf->chan[i].irq = irqbuf[0];
1372 	} else {
1373 		eirq = platform_get_irq_byname(pdev, "error");
1374 		if (eirq < 0)
1375 			return eirq;
1376 
1377 		if (irqs == num_channels + 1) {
1378 			struct nbpf_channel *chan;
1379 
1380 			for (i = 0, chan = nbpf->chan; i <= num_channels;
1381 			     i++, chan++) {
1382 				/* Skip the error IRQ */
1383 				if (irqbuf[i] == eirq)
1384 					i++;
1385 				chan->irq = irqbuf[i];
1386 			}
1387 
1388 			if (chan != nbpf->chan + num_channels)
1389 				return -EINVAL;
1390 		} else {
1391 			/* 2 IRQs and more than one channel */
1392 			if (irqbuf[0] == eirq)
1393 				irq = irqbuf[1];
1394 			else
1395 				irq = irqbuf[0];
1396 
1397 			for (i = 0; i <= num_channels; i++)
1398 				nbpf->chan[i].irq = irq;
1399 		}
1400 	}
1401 
1402 	ret = devm_request_irq(dev, eirq, nbpf_err_irq,
1403 			       IRQF_SHARED, "dma error", nbpf);
1404 	if (ret < 0)
1405 		return ret;
1406 	nbpf->eirq = eirq;
1407 
1408 	INIT_LIST_HEAD(&dma_dev->channels);
1409 
1410 	/* Create DMA Channel */
1411 	for (i = 0; i < num_channels; i++) {
1412 		ret = nbpf_chan_probe(nbpf, i);
1413 		if (ret < 0)
1414 			return ret;
1415 	}
1416 
1417 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1418 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1419 	dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1420 	dma_cap_set(DMA_SG, dma_dev->cap_mask);
1421 
1422 	/* Common and MEMCPY operations */
1423 	dma_dev->device_alloc_chan_resources
1424 		= nbpf_alloc_chan_resources;
1425 	dma_dev->device_free_chan_resources = nbpf_free_chan_resources;
1426 	dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg;
1427 	dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
1428 	dma_dev->device_tx_status = nbpf_tx_status;
1429 	dma_dev->device_issue_pending = nbpf_issue_pending;
1430 
1431 	/*
1432 	 * If we drop support for unaligned MEMCPY buffer addresses and / or
1433 	 * lengths by setting
1434 	 * dma_dev->copy_align = 4;
1435 	 * then we can set transfer length to 4 bytes in nbpf_prep_one() for
1436 	 * DMA_MEM_TO_MEM
1437 	 */
1438 
1439 	/* Compulsory for DMA_SLAVE fields */
1440 	dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg;
1441 	dma_dev->device_config = nbpf_config;
1442 	dma_dev->device_pause = nbpf_pause;
1443 	dma_dev->device_terminate_all = nbpf_terminate_all;
1444 
1445 	dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS;
1446 	dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS;
1447 	dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1448 
1449 	platform_set_drvdata(pdev, nbpf);
1450 
1451 	ret = clk_prepare_enable(nbpf->clk);
1452 	if (ret < 0)
1453 		return ret;
1454 
1455 	nbpf_configure(nbpf);
1456 
1457 	ret = dma_async_device_register(dma_dev);
1458 	if (ret < 0)
1459 		goto e_clk_off;
1460 
1461 	ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf);
1462 	if (ret < 0)
1463 		goto e_dma_dev_unreg;
1464 
1465 	return 0;
1466 
1467 e_dma_dev_unreg:
1468 	dma_async_device_unregister(dma_dev);
1469 e_clk_off:
1470 	clk_disable_unprepare(nbpf->clk);
1471 
1472 	return ret;
1473 }
1474 
1475 static int nbpf_remove(struct platform_device *pdev)
1476 {
1477 	struct nbpf_device *nbpf = platform_get_drvdata(pdev);
1478 	int i;
1479 
1480 	devm_free_irq(&pdev->dev, nbpf->eirq, nbpf);
1481 
1482 	for (i = 0; i < nbpf->config->num_channels; i++) {
1483 		struct nbpf_channel *chan = nbpf->chan + i;
1484 
1485 		devm_free_irq(&pdev->dev, chan->irq, chan);
1486 
1487 		tasklet_kill(&chan->tasklet);
1488 	}
1489 
1490 	of_dma_controller_free(pdev->dev.of_node);
1491 	dma_async_device_unregister(&nbpf->dma_dev);
1492 	clk_disable_unprepare(nbpf->clk);
1493 
1494 	return 0;
1495 }
1496 
1497 static const struct platform_device_id nbpf_ids[] = {
1498 	{"nbpfaxi64dmac1b4",	(kernel_ulong_t)&nbpf_cfg[NBPF1B4]},
1499 	{"nbpfaxi64dmac1b8",	(kernel_ulong_t)&nbpf_cfg[NBPF1B8]},
1500 	{"nbpfaxi64dmac1b16",	(kernel_ulong_t)&nbpf_cfg[NBPF1B16]},
1501 	{"nbpfaxi64dmac4b4",	(kernel_ulong_t)&nbpf_cfg[NBPF4B4]},
1502 	{"nbpfaxi64dmac4b8",	(kernel_ulong_t)&nbpf_cfg[NBPF4B8]},
1503 	{"nbpfaxi64dmac4b16",	(kernel_ulong_t)&nbpf_cfg[NBPF4B16]},
1504 	{"nbpfaxi64dmac8b4",	(kernel_ulong_t)&nbpf_cfg[NBPF8B4]},
1505 	{"nbpfaxi64dmac8b8",	(kernel_ulong_t)&nbpf_cfg[NBPF8B8]},
1506 	{"nbpfaxi64dmac8b16",	(kernel_ulong_t)&nbpf_cfg[NBPF8B16]},
1507 	{},
1508 };
1509 MODULE_DEVICE_TABLE(platform, nbpf_ids);
1510 
1511 #ifdef CONFIG_PM
1512 static int nbpf_runtime_suspend(struct device *dev)
1513 {
1514 	struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev));
1515 	clk_disable_unprepare(nbpf->clk);
1516 	return 0;
1517 }
1518 
1519 static int nbpf_runtime_resume(struct device *dev)
1520 {
1521 	struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev));
1522 	return clk_prepare_enable(nbpf->clk);
1523 }
1524 #endif
1525 
1526 static const struct dev_pm_ops nbpf_pm_ops = {
1527 	SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL)
1528 };
1529 
1530 static struct platform_driver nbpf_driver = {
1531 	.driver = {
1532 		.name = "dma-nbpf",
1533 		.of_match_table = nbpf_match,
1534 		.pm = &nbpf_pm_ops,
1535 	},
1536 	.id_table = nbpf_ids,
1537 	.probe = nbpf_probe,
1538 	.remove = nbpf_remove,
1539 };
1540 
1541 module_platform_driver(nbpf_driver);
1542 
1543 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
1544 MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs");
1545 MODULE_LICENSE("GPL v2");
1546