xref: /openbmc/linux/drivers/dma/sa11x0-dma.c (revision d0e22329)
1 /*
2  * SA11x0 DMAengine support
3  *
4  * Copyright (C) 2012 Russell King
5  *   Derived in part from arch/arm/mach-sa1100/dma.c,
6  *   Copyright (C) 2000, 2001 by Nicolas Pitre
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/sched.h>
13 #include <linux/device.h>
14 #include <linux/dmaengine.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 
23 #include "virt-dma.h"
24 
25 #define NR_PHY_CHAN	6
26 #define DMA_ALIGN	3
27 #define DMA_MAX_SIZE	0x1fff
28 #define DMA_CHUNK_SIZE	0x1000
29 
30 #define DMA_DDAR	0x00
31 #define DMA_DCSR_S	0x04
32 #define DMA_DCSR_C	0x08
33 #define DMA_DCSR_R	0x0c
34 #define DMA_DBSA	0x10
35 #define DMA_DBTA	0x14
36 #define DMA_DBSB	0x18
37 #define DMA_DBTB	0x1c
38 #define DMA_SIZE	0x20
39 
40 #define DCSR_RUN	(1 << 0)
41 #define DCSR_IE		(1 << 1)
42 #define DCSR_ERROR	(1 << 2)
43 #define DCSR_DONEA	(1 << 3)
44 #define DCSR_STRTA	(1 << 4)
45 #define DCSR_DONEB	(1 << 5)
46 #define DCSR_STRTB	(1 << 6)
47 #define DCSR_BIU	(1 << 7)
48 
49 #define DDAR_RW		(1 << 0)	/* 0 = W, 1 = R */
50 #define DDAR_E		(1 << 1)	/* 0 = LE, 1 = BE */
51 #define DDAR_BS		(1 << 2)	/* 0 = BS4, 1 = BS8 */
52 #define DDAR_DW		(1 << 3)	/* 0 = 8b, 1 = 16b */
53 #define DDAR_Ser0UDCTr	(0x0 << 4)
54 #define DDAR_Ser0UDCRc	(0x1 << 4)
55 #define DDAR_Ser1SDLCTr	(0x2 << 4)
56 #define DDAR_Ser1SDLCRc	(0x3 << 4)
57 #define DDAR_Ser1UARTTr	(0x4 << 4)
58 #define DDAR_Ser1UARTRc	(0x5 << 4)
59 #define DDAR_Ser2ICPTr	(0x6 << 4)
60 #define DDAR_Ser2ICPRc	(0x7 << 4)
61 #define DDAR_Ser3UARTTr	(0x8 << 4)
62 #define DDAR_Ser3UARTRc	(0x9 << 4)
63 #define DDAR_Ser4MCP0Tr	(0xa << 4)
64 #define DDAR_Ser4MCP0Rc	(0xb << 4)
65 #define DDAR_Ser4MCP1Tr	(0xc << 4)
66 #define DDAR_Ser4MCP1Rc	(0xd << 4)
67 #define DDAR_Ser4SSPTr	(0xe << 4)
68 #define DDAR_Ser4SSPRc	(0xf << 4)
69 
70 struct sa11x0_dma_sg {
71 	u32			addr;
72 	u32			len;
73 };
74 
75 struct sa11x0_dma_desc {
76 	struct virt_dma_desc	vd;
77 
78 	u32			ddar;
79 	size_t			size;
80 	unsigned		period;
81 	bool			cyclic;
82 
83 	unsigned		sglen;
84 	struct sa11x0_dma_sg	sg[0];
85 };
86 
87 struct sa11x0_dma_phy;
88 
89 struct sa11x0_dma_chan {
90 	struct virt_dma_chan	vc;
91 
92 	/* protected by c->vc.lock */
93 	struct sa11x0_dma_phy	*phy;
94 	enum dma_status		status;
95 
96 	/* protected by d->lock */
97 	struct list_head	node;
98 
99 	u32			ddar;
100 	const char		*name;
101 };
102 
103 struct sa11x0_dma_phy {
104 	void __iomem		*base;
105 	struct sa11x0_dma_dev	*dev;
106 	unsigned		num;
107 
108 	struct sa11x0_dma_chan	*vchan;
109 
110 	/* Protected by c->vc.lock */
111 	unsigned		sg_load;
112 	struct sa11x0_dma_desc	*txd_load;
113 	unsigned		sg_done;
114 	struct sa11x0_dma_desc	*txd_done;
115 	u32			dbs[2];
116 	u32			dbt[2];
117 	u32			dcsr;
118 };
119 
120 struct sa11x0_dma_dev {
121 	struct dma_device	slave;
122 	void __iomem		*base;
123 	spinlock_t		lock;
124 	struct tasklet_struct	task;
125 	struct list_head	chan_pending;
126 	struct sa11x0_dma_phy	phy[NR_PHY_CHAN];
127 };
128 
129 static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
130 {
131 	return container_of(chan, struct sa11x0_dma_chan, vc.chan);
132 }
133 
134 static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
135 {
136 	return container_of(dmadev, struct sa11x0_dma_dev, slave);
137 }
138 
139 static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
140 {
141 	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
142 
143 	return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
144 }
145 
146 static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
147 {
148 	kfree(container_of(vd, struct sa11x0_dma_desc, vd));
149 }
150 
151 static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
152 {
153 	list_del(&txd->vd.node);
154 	p->txd_load = txd;
155 	p->sg_load = 0;
156 
157 	dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
158 		p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
159 }
160 
161 static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
162 	struct sa11x0_dma_chan *c)
163 {
164 	struct sa11x0_dma_desc *txd = p->txd_load;
165 	struct sa11x0_dma_sg *sg;
166 	void __iomem *base = p->base;
167 	unsigned dbsx, dbtx;
168 	u32 dcsr;
169 
170 	if (!txd)
171 		return;
172 
173 	dcsr = readl_relaxed(base + DMA_DCSR_R);
174 
175 	/* Don't try to load the next transfer if both buffers are started */
176 	if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
177 		return;
178 
179 	if (p->sg_load == txd->sglen) {
180 		if (!txd->cyclic) {
181 			struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
182 
183 			/*
184 			 * We have reached the end of the current descriptor.
185 			 * Peek at the next descriptor, and if compatible with
186 			 * the current, start processing it.
187 			 */
188 			if (txn && txn->ddar == txd->ddar) {
189 				txd = txn;
190 				sa11x0_dma_start_desc(p, txn);
191 			} else {
192 				p->txd_load = NULL;
193 				return;
194 			}
195 		} else {
196 			/* Cyclic: reset back to beginning */
197 			p->sg_load = 0;
198 		}
199 	}
200 
201 	sg = &txd->sg[p->sg_load++];
202 
203 	/* Select buffer to load according to channel status */
204 	if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
205 	    ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
206 		dbsx = DMA_DBSA;
207 		dbtx = DMA_DBTA;
208 		dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
209 	} else {
210 		dbsx = DMA_DBSB;
211 		dbtx = DMA_DBTB;
212 		dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
213 	}
214 
215 	writel_relaxed(sg->addr, base + dbsx);
216 	writel_relaxed(sg->len, base + dbtx);
217 	writel(dcsr, base + DMA_DCSR_S);
218 
219 	dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
220 		p->num, dcsr,
221 		'A' + (dbsx == DMA_DBSB), sg->addr,
222 		'A' + (dbtx == DMA_DBTB), sg->len);
223 }
224 
225 static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
226 	struct sa11x0_dma_chan *c)
227 {
228 	struct sa11x0_dma_desc *txd = p->txd_done;
229 
230 	if (++p->sg_done == txd->sglen) {
231 		if (!txd->cyclic) {
232 			vchan_cookie_complete(&txd->vd);
233 
234 			p->sg_done = 0;
235 			p->txd_done = p->txd_load;
236 
237 			if (!p->txd_done)
238 				tasklet_schedule(&p->dev->task);
239 		} else {
240 			if ((p->sg_done % txd->period) == 0)
241 				vchan_cyclic_callback(&txd->vd);
242 
243 			/* Cyclic: reset back to beginning */
244 			p->sg_done = 0;
245 		}
246 	}
247 
248 	sa11x0_dma_start_sg(p, c);
249 }
250 
251 static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
252 {
253 	struct sa11x0_dma_phy *p = dev_id;
254 	struct sa11x0_dma_dev *d = p->dev;
255 	struct sa11x0_dma_chan *c;
256 	u32 dcsr;
257 
258 	dcsr = readl_relaxed(p->base + DMA_DCSR_R);
259 	if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
260 		return IRQ_NONE;
261 
262 	/* Clear reported status bits */
263 	writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
264 		p->base + DMA_DCSR_C);
265 
266 	dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
267 
268 	if (dcsr & DCSR_ERROR) {
269 		dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
270 			p->num, dcsr,
271 			readl_relaxed(p->base + DMA_DDAR),
272 			readl_relaxed(p->base + DMA_DBSA),
273 			readl_relaxed(p->base + DMA_DBTA),
274 			readl_relaxed(p->base + DMA_DBSB),
275 			readl_relaxed(p->base + DMA_DBTB));
276 	}
277 
278 	c = p->vchan;
279 	if (c) {
280 		unsigned long flags;
281 
282 		spin_lock_irqsave(&c->vc.lock, flags);
283 		/*
284 		 * Now that we're holding the lock, check that the vchan
285 		 * really is associated with this pchan before touching the
286 		 * hardware.  This should always succeed, because we won't
287 		 * change p->vchan or c->phy while the channel is actively
288 		 * transferring.
289 		 */
290 		if (c->phy == p) {
291 			if (dcsr & DCSR_DONEA)
292 				sa11x0_dma_complete(p, c);
293 			if (dcsr & DCSR_DONEB)
294 				sa11x0_dma_complete(p, c);
295 		}
296 		spin_unlock_irqrestore(&c->vc.lock, flags);
297 	}
298 
299 	return IRQ_HANDLED;
300 }
301 
302 static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
303 {
304 	struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
305 
306 	/* If the issued list is empty, we have no further txds to process */
307 	if (txd) {
308 		struct sa11x0_dma_phy *p = c->phy;
309 
310 		sa11x0_dma_start_desc(p, txd);
311 		p->txd_done = txd;
312 		p->sg_done = 0;
313 
314 		/* The channel should not have any transfers started */
315 		WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
316 				      (DCSR_STRTA | DCSR_STRTB));
317 
318 		/* Clear the run and start bits before changing DDAR */
319 		writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
320 			       p->base + DMA_DCSR_C);
321 		writel_relaxed(txd->ddar, p->base + DMA_DDAR);
322 
323 		/* Try to start both buffers */
324 		sa11x0_dma_start_sg(p, c);
325 		sa11x0_dma_start_sg(p, c);
326 	}
327 }
328 
329 static void sa11x0_dma_tasklet(unsigned long arg)
330 {
331 	struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
332 	struct sa11x0_dma_phy *p;
333 	struct sa11x0_dma_chan *c;
334 	unsigned pch, pch_alloc = 0;
335 
336 	dev_dbg(d->slave.dev, "tasklet enter\n");
337 
338 	list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
339 		spin_lock_irq(&c->vc.lock);
340 		p = c->phy;
341 		if (p && !p->txd_done) {
342 			sa11x0_dma_start_txd(c);
343 			if (!p->txd_done) {
344 				/* No current txd associated with this channel */
345 				dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
346 
347 				/* Mark this channel free */
348 				c->phy = NULL;
349 				p->vchan = NULL;
350 			}
351 		}
352 		spin_unlock_irq(&c->vc.lock);
353 	}
354 
355 	spin_lock_irq(&d->lock);
356 	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
357 		p = &d->phy[pch];
358 
359 		if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
360 			c = list_first_entry(&d->chan_pending,
361 				struct sa11x0_dma_chan, node);
362 			list_del_init(&c->node);
363 
364 			pch_alloc |= 1 << pch;
365 
366 			/* Mark this channel allocated */
367 			p->vchan = c;
368 
369 			dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
370 		}
371 	}
372 	spin_unlock_irq(&d->lock);
373 
374 	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
375 		if (pch_alloc & (1 << pch)) {
376 			p = &d->phy[pch];
377 			c = p->vchan;
378 
379 			spin_lock_irq(&c->vc.lock);
380 			c->phy = p;
381 
382 			sa11x0_dma_start_txd(c);
383 			spin_unlock_irq(&c->vc.lock);
384 		}
385 	}
386 
387 	dev_dbg(d->slave.dev, "tasklet exit\n");
388 }
389 
390 
391 static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
392 {
393 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
394 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
395 	unsigned long flags;
396 
397 	spin_lock_irqsave(&d->lock, flags);
398 	list_del_init(&c->node);
399 	spin_unlock_irqrestore(&d->lock, flags);
400 
401 	vchan_free_chan_resources(&c->vc);
402 }
403 
404 static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
405 {
406 	unsigned reg;
407 	u32 dcsr;
408 
409 	dcsr = readl_relaxed(p->base + DMA_DCSR_R);
410 
411 	if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
412 	    (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
413 		reg = DMA_DBSA;
414 	else
415 		reg = DMA_DBSB;
416 
417 	return readl_relaxed(p->base + reg);
418 }
419 
420 static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
421 	dma_cookie_t cookie, struct dma_tx_state *state)
422 {
423 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
424 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
425 	struct sa11x0_dma_phy *p;
426 	struct virt_dma_desc *vd;
427 	unsigned long flags;
428 	enum dma_status ret;
429 
430 	ret = dma_cookie_status(&c->vc.chan, cookie, state);
431 	if (ret == DMA_COMPLETE)
432 		return ret;
433 
434 	if (!state)
435 		return c->status;
436 
437 	spin_lock_irqsave(&c->vc.lock, flags);
438 	p = c->phy;
439 
440 	/*
441 	 * If the cookie is on our issue queue, then the residue is
442 	 * its total size.
443 	 */
444 	vd = vchan_find_desc(&c->vc, cookie);
445 	if (vd) {
446 		state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
447 	} else if (!p) {
448 		state->residue = 0;
449 	} else {
450 		struct sa11x0_dma_desc *txd;
451 		size_t bytes = 0;
452 
453 		if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
454 			txd = p->txd_done;
455 		else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
456 			txd = p->txd_load;
457 		else
458 			txd = NULL;
459 
460 		ret = c->status;
461 		if (txd) {
462 			dma_addr_t addr = sa11x0_dma_pos(p);
463 			unsigned i;
464 
465 			dev_vdbg(d->slave.dev, "tx_status: addr:%pad\n", &addr);
466 
467 			for (i = 0; i < txd->sglen; i++) {
468 				dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
469 					i, txd->sg[i].addr, txd->sg[i].len);
470 				if (addr >= txd->sg[i].addr &&
471 				    addr < txd->sg[i].addr + txd->sg[i].len) {
472 					unsigned len;
473 
474 					len = txd->sg[i].len -
475 						(addr - txd->sg[i].addr);
476 					dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
477 						i, len);
478 					bytes += len;
479 					i++;
480 					break;
481 				}
482 			}
483 			for (; i < txd->sglen; i++) {
484 				dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
485 					i, txd->sg[i].addr, txd->sg[i].len);
486 				bytes += txd->sg[i].len;
487 			}
488 		}
489 		state->residue = bytes;
490 	}
491 	spin_unlock_irqrestore(&c->vc.lock, flags);
492 
493 	dev_vdbg(d->slave.dev, "tx_status: bytes 0x%x\n", state->residue);
494 
495 	return ret;
496 }
497 
498 /*
499  * Move pending txds to the issued list, and re-init pending list.
500  * If not already pending, add this channel to the list of pending
501  * channels and trigger the tasklet to run.
502  */
503 static void sa11x0_dma_issue_pending(struct dma_chan *chan)
504 {
505 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
506 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
507 	unsigned long flags;
508 
509 	spin_lock_irqsave(&c->vc.lock, flags);
510 	if (vchan_issue_pending(&c->vc)) {
511 		if (!c->phy) {
512 			spin_lock(&d->lock);
513 			if (list_empty(&c->node)) {
514 				list_add_tail(&c->node, &d->chan_pending);
515 				tasklet_schedule(&d->task);
516 				dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
517 			}
518 			spin_unlock(&d->lock);
519 		}
520 	} else
521 		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
522 	spin_unlock_irqrestore(&c->vc.lock, flags);
523 }
524 
525 static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
526 	struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
527 	enum dma_transfer_direction dir, unsigned long flags, void *context)
528 {
529 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
530 	struct sa11x0_dma_desc *txd;
531 	struct scatterlist *sgent;
532 	unsigned i, j = sglen;
533 	size_t size = 0;
534 
535 	/* SA11x0 channels can only operate in their native direction */
536 	if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
537 		dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
538 			&c->vc, c->ddar, dir);
539 		return NULL;
540 	}
541 
542 	/* Do not allow zero-sized txds */
543 	if (sglen == 0)
544 		return NULL;
545 
546 	for_each_sg(sg, sgent, sglen, i) {
547 		dma_addr_t addr = sg_dma_address(sgent);
548 		unsigned int len = sg_dma_len(sgent);
549 
550 		if (len > DMA_MAX_SIZE)
551 			j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
552 		if (addr & DMA_ALIGN) {
553 			dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n",
554 				&c->vc, &addr);
555 			return NULL;
556 		}
557 	}
558 
559 	txd = kzalloc(struct_size(txd, sg, j), GFP_ATOMIC);
560 	if (!txd) {
561 		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
562 		return NULL;
563 	}
564 
565 	j = 0;
566 	for_each_sg(sg, sgent, sglen, i) {
567 		dma_addr_t addr = sg_dma_address(sgent);
568 		unsigned len = sg_dma_len(sgent);
569 
570 		size += len;
571 
572 		do {
573 			unsigned tlen = len;
574 
575 			/*
576 			 * Check whether the transfer will fit.  If not, try
577 			 * to split the transfer up such that we end up with
578 			 * equal chunks - but make sure that we preserve the
579 			 * alignment.  This avoids small segments.
580 			 */
581 			if (tlen > DMA_MAX_SIZE) {
582 				unsigned mult = DIV_ROUND_UP(tlen,
583 					DMA_MAX_SIZE & ~DMA_ALIGN);
584 
585 				tlen = (tlen / mult) & ~DMA_ALIGN;
586 			}
587 
588 			txd->sg[j].addr = addr;
589 			txd->sg[j].len = tlen;
590 
591 			addr += tlen;
592 			len -= tlen;
593 			j++;
594 		} while (len);
595 	}
596 
597 	txd->ddar = c->ddar;
598 	txd->size = size;
599 	txd->sglen = j;
600 
601 	dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n",
602 		&c->vc, &txd->vd, txd->size, txd->sglen);
603 
604 	return vchan_tx_prep(&c->vc, &txd->vd, flags);
605 }
606 
607 static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
608 	struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
609 	enum dma_transfer_direction dir, unsigned long flags)
610 {
611 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
612 	struct sa11x0_dma_desc *txd;
613 	unsigned i, j, k, sglen, sgperiod;
614 
615 	/* SA11x0 channels can only operate in their native direction */
616 	if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
617 		dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
618 			&c->vc, c->ddar, dir);
619 		return NULL;
620 	}
621 
622 	sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
623 	sglen = size * sgperiod / period;
624 
625 	/* Do not allow zero-sized txds */
626 	if (sglen == 0)
627 		return NULL;
628 
629 	txd = kzalloc(struct_size(txd, sg, sglen), GFP_ATOMIC);
630 	if (!txd) {
631 		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
632 		return NULL;
633 	}
634 
635 	for (i = k = 0; i < size / period; i++) {
636 		size_t tlen, len = period;
637 
638 		for (j = 0; j < sgperiod; j++, k++) {
639 			tlen = len;
640 
641 			if (tlen > DMA_MAX_SIZE) {
642 				unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
643 				tlen = (tlen / mult) & ~DMA_ALIGN;
644 			}
645 
646 			txd->sg[k].addr = addr;
647 			txd->sg[k].len = tlen;
648 			addr += tlen;
649 			len -= tlen;
650 		}
651 
652 		WARN_ON(len != 0);
653 	}
654 
655 	WARN_ON(k != sglen);
656 
657 	txd->ddar = c->ddar;
658 	txd->size = size;
659 	txd->sglen = sglen;
660 	txd->cyclic = 1;
661 	txd->period = sgperiod;
662 
663 	return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
664 }
665 
666 static int sa11x0_dma_device_config(struct dma_chan *chan,
667 				    struct dma_slave_config *cfg)
668 {
669 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
670 	u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
671 	dma_addr_t addr;
672 	enum dma_slave_buswidth width;
673 	u32 maxburst;
674 
675 	if (ddar & DDAR_RW) {
676 		addr = cfg->src_addr;
677 		width = cfg->src_addr_width;
678 		maxburst = cfg->src_maxburst;
679 	} else {
680 		addr = cfg->dst_addr;
681 		width = cfg->dst_addr_width;
682 		maxburst = cfg->dst_maxburst;
683 	}
684 
685 	if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
686 	     width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
687 	    (maxburst != 4 && maxburst != 8))
688 		return -EINVAL;
689 
690 	if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
691 		ddar |= DDAR_DW;
692 	if (maxburst == 8)
693 		ddar |= DDAR_BS;
694 
695 	dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n",
696 		&c->vc, &addr, width, maxburst);
697 
698 	c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
699 
700 	return 0;
701 }
702 
703 static int sa11x0_dma_device_pause(struct dma_chan *chan)
704 {
705 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
706 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
707 	struct sa11x0_dma_phy *p;
708 	LIST_HEAD(head);
709 	unsigned long flags;
710 
711 	dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
712 	spin_lock_irqsave(&c->vc.lock, flags);
713 	if (c->status == DMA_IN_PROGRESS) {
714 		c->status = DMA_PAUSED;
715 
716 		p = c->phy;
717 		if (p) {
718 			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
719 		} else {
720 			spin_lock(&d->lock);
721 			list_del_init(&c->node);
722 			spin_unlock(&d->lock);
723 		}
724 	}
725 	spin_unlock_irqrestore(&c->vc.lock, flags);
726 
727 	return 0;
728 }
729 
730 static int sa11x0_dma_device_resume(struct dma_chan *chan)
731 {
732 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
733 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
734 	struct sa11x0_dma_phy *p;
735 	LIST_HEAD(head);
736 	unsigned long flags;
737 
738 	dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
739 	spin_lock_irqsave(&c->vc.lock, flags);
740 	if (c->status == DMA_PAUSED) {
741 		c->status = DMA_IN_PROGRESS;
742 
743 		p = c->phy;
744 		if (p) {
745 			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
746 		} else if (!list_empty(&c->vc.desc_issued)) {
747 			spin_lock(&d->lock);
748 			list_add_tail(&c->node, &d->chan_pending);
749 			spin_unlock(&d->lock);
750 		}
751 	}
752 	spin_unlock_irqrestore(&c->vc.lock, flags);
753 
754 	return 0;
755 }
756 
757 static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
758 {
759 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
760 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
761 	struct sa11x0_dma_phy *p;
762 	LIST_HEAD(head);
763 	unsigned long flags;
764 
765 	dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
766 	/* Clear the tx descriptor lists */
767 	spin_lock_irqsave(&c->vc.lock, flags);
768 	vchan_get_all_descriptors(&c->vc, &head);
769 
770 	p = c->phy;
771 	if (p) {
772 		dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
773 		/* vchan is assigned to a pchan - stop the channel */
774 		writel(DCSR_RUN | DCSR_IE |
775 		       DCSR_STRTA | DCSR_DONEA |
776 		       DCSR_STRTB | DCSR_DONEB,
777 		       p->base + DMA_DCSR_C);
778 
779 		if (p->txd_load) {
780 			if (p->txd_load != p->txd_done)
781 				list_add_tail(&p->txd_load->vd.node, &head);
782 			p->txd_load = NULL;
783 		}
784 		if (p->txd_done) {
785 			list_add_tail(&p->txd_done->vd.node, &head);
786 			p->txd_done = NULL;
787 		}
788 		c->phy = NULL;
789 		spin_lock(&d->lock);
790 		p->vchan = NULL;
791 		spin_unlock(&d->lock);
792 		tasklet_schedule(&d->task);
793 	}
794 	spin_unlock_irqrestore(&c->vc.lock, flags);
795 	vchan_dma_desc_free_list(&c->vc, &head);
796 
797 	return 0;
798 }
799 
800 struct sa11x0_dma_channel_desc {
801 	u32 ddar;
802 	const char *name;
803 };
804 
805 #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
806 static const struct sa11x0_dma_channel_desc chan_desc[] = {
807 	CD(Ser0UDCTr, 0),
808 	CD(Ser0UDCRc, DDAR_RW),
809 	CD(Ser1SDLCTr, 0),
810 	CD(Ser1SDLCRc, DDAR_RW),
811 	CD(Ser1UARTTr, 0),
812 	CD(Ser1UARTRc, DDAR_RW),
813 	CD(Ser2ICPTr, 0),
814 	CD(Ser2ICPRc, DDAR_RW),
815 	CD(Ser3UARTTr, 0),
816 	CD(Ser3UARTRc, DDAR_RW),
817 	CD(Ser4MCP0Tr, 0),
818 	CD(Ser4MCP0Rc, DDAR_RW),
819 	CD(Ser4MCP1Tr, 0),
820 	CD(Ser4MCP1Rc, DDAR_RW),
821 	CD(Ser4SSPTr, 0),
822 	CD(Ser4SSPRc, DDAR_RW),
823 };
824 
825 static const struct dma_slave_map sa11x0_dma_map[] = {
826 	{ "sa11x0-ir", "tx", "Ser2ICPTr" },
827 	{ "sa11x0-ir", "rx", "Ser2ICPRc" },
828 	{ "sa11x0-ssp", "tx", "Ser4SSPTr" },
829 	{ "sa11x0-ssp", "rx", "Ser4SSPRc" },
830 };
831 
832 static bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
833 {
834 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
835 	const char *p = param;
836 
837 	return !strcmp(c->name, p);
838 }
839 
840 static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
841 	struct device *dev)
842 {
843 	unsigned i;
844 
845 	INIT_LIST_HEAD(&dmadev->channels);
846 	dmadev->dev = dev;
847 	dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
848 	dmadev->device_config = sa11x0_dma_device_config;
849 	dmadev->device_pause = sa11x0_dma_device_pause;
850 	dmadev->device_resume = sa11x0_dma_device_resume;
851 	dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
852 	dmadev->device_tx_status = sa11x0_dma_tx_status;
853 	dmadev->device_issue_pending = sa11x0_dma_issue_pending;
854 
855 	for (i = 0; i < ARRAY_SIZE(chan_desc); i++) {
856 		struct sa11x0_dma_chan *c;
857 
858 		c = kzalloc(sizeof(*c), GFP_KERNEL);
859 		if (!c) {
860 			dev_err(dev, "no memory for channel %u\n", i);
861 			return -ENOMEM;
862 		}
863 
864 		c->status = DMA_IN_PROGRESS;
865 		c->ddar = chan_desc[i].ddar;
866 		c->name = chan_desc[i].name;
867 		INIT_LIST_HEAD(&c->node);
868 
869 		c->vc.desc_free = sa11x0_dma_free_desc;
870 		vchan_init(&c->vc, dmadev);
871 	}
872 
873 	return dma_async_device_register(dmadev);
874 }
875 
876 static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
877 	void *data)
878 {
879 	int irq = platform_get_irq(pdev, nr);
880 
881 	if (irq <= 0)
882 		return -ENXIO;
883 
884 	return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
885 }
886 
887 static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
888 	void *data)
889 {
890 	int irq = platform_get_irq(pdev, nr);
891 	if (irq > 0)
892 		free_irq(irq, data);
893 }
894 
895 static void sa11x0_dma_free_channels(struct dma_device *dmadev)
896 {
897 	struct sa11x0_dma_chan *c, *cn;
898 
899 	list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
900 		list_del(&c->vc.chan.device_node);
901 		tasklet_kill(&c->vc.task);
902 		kfree(c);
903 	}
904 }
905 
906 static int sa11x0_dma_probe(struct platform_device *pdev)
907 {
908 	struct sa11x0_dma_dev *d;
909 	struct resource *res;
910 	unsigned i;
911 	int ret;
912 
913 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
914 	if (!res)
915 		return -ENXIO;
916 
917 	d = kzalloc(sizeof(*d), GFP_KERNEL);
918 	if (!d) {
919 		ret = -ENOMEM;
920 		goto err_alloc;
921 	}
922 
923 	spin_lock_init(&d->lock);
924 	INIT_LIST_HEAD(&d->chan_pending);
925 
926 	d->slave.filter.fn = sa11x0_dma_filter_fn;
927 	d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map);
928 	d->slave.filter.map = sa11x0_dma_map;
929 
930 	d->base = ioremap(res->start, resource_size(res));
931 	if (!d->base) {
932 		ret = -ENOMEM;
933 		goto err_ioremap;
934 	}
935 
936 	tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
937 
938 	for (i = 0; i < NR_PHY_CHAN; i++) {
939 		struct sa11x0_dma_phy *p = &d->phy[i];
940 
941 		p->dev = d;
942 		p->num = i;
943 		p->base = d->base + i * DMA_SIZE;
944 		writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
945 			DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
946 			p->base + DMA_DCSR_C);
947 		writel_relaxed(0, p->base + DMA_DDAR);
948 
949 		ret = sa11x0_dma_request_irq(pdev, i, p);
950 		if (ret) {
951 			while (i) {
952 				i--;
953 				sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
954 			}
955 			goto err_irq;
956 		}
957 	}
958 
959 	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
960 	dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
961 	d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
962 	d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
963 	d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
964 	d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
965 	d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
966 				   BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
967 	d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
968 				   BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
969 	ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
970 	if (ret) {
971 		dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
972 			ret);
973 		goto err_slave_reg;
974 	}
975 
976 	platform_set_drvdata(pdev, d);
977 	return 0;
978 
979  err_slave_reg:
980 	sa11x0_dma_free_channels(&d->slave);
981 	for (i = 0; i < NR_PHY_CHAN; i++)
982 		sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
983  err_irq:
984 	tasklet_kill(&d->task);
985 	iounmap(d->base);
986  err_ioremap:
987 	kfree(d);
988  err_alloc:
989 	return ret;
990 }
991 
992 static int sa11x0_dma_remove(struct platform_device *pdev)
993 {
994 	struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
995 	unsigned pch;
996 
997 	dma_async_device_unregister(&d->slave);
998 
999 	sa11x0_dma_free_channels(&d->slave);
1000 	for (pch = 0; pch < NR_PHY_CHAN; pch++)
1001 		sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
1002 	tasklet_kill(&d->task);
1003 	iounmap(d->base);
1004 	kfree(d);
1005 
1006 	return 0;
1007 }
1008 
1009 static int sa11x0_dma_suspend(struct device *dev)
1010 {
1011 	struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1012 	unsigned pch;
1013 
1014 	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1015 		struct sa11x0_dma_phy *p = &d->phy[pch];
1016 		u32 dcsr, saved_dcsr;
1017 
1018 		dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1019 		if (dcsr & DCSR_RUN) {
1020 			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
1021 			dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1022 		}
1023 
1024 		saved_dcsr &= DCSR_RUN | DCSR_IE;
1025 		if (dcsr & DCSR_BIU) {
1026 			p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
1027 			p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
1028 			p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
1029 			p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
1030 			saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
1031 				      (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
1032 		} else {
1033 			p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
1034 			p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
1035 			p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
1036 			p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
1037 			saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
1038 		}
1039 		p->dcsr = saved_dcsr;
1040 
1041 		writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
1042 	}
1043 
1044 	return 0;
1045 }
1046 
1047 static int sa11x0_dma_resume(struct device *dev)
1048 {
1049 	struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1050 	unsigned pch;
1051 
1052 	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1053 		struct sa11x0_dma_phy *p = &d->phy[pch];
1054 		struct sa11x0_dma_desc *txd = NULL;
1055 		u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1056 
1057 		WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
1058 
1059 		if (p->txd_done)
1060 			txd = p->txd_done;
1061 		else if (p->txd_load)
1062 			txd = p->txd_load;
1063 
1064 		if (!txd)
1065 			continue;
1066 
1067 		writel_relaxed(txd->ddar, p->base + DMA_DDAR);
1068 
1069 		writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
1070 		writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
1071 		writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
1072 		writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
1073 		writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
1074 	}
1075 
1076 	return 0;
1077 }
1078 
1079 static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1080 	.suspend_noirq = sa11x0_dma_suspend,
1081 	.resume_noirq = sa11x0_dma_resume,
1082 	.freeze_noirq = sa11x0_dma_suspend,
1083 	.thaw_noirq = sa11x0_dma_resume,
1084 	.poweroff_noirq = sa11x0_dma_suspend,
1085 	.restore_noirq = sa11x0_dma_resume,
1086 };
1087 
1088 static struct platform_driver sa11x0_dma_driver = {
1089 	.driver = {
1090 		.name	= "sa11x0-dma",
1091 		.pm	= &sa11x0_dma_pm_ops,
1092 	},
1093 	.probe		= sa11x0_dma_probe,
1094 	.remove		= sa11x0_dma_remove,
1095 };
1096 
1097 static int __init sa11x0_dma_init(void)
1098 {
1099 	return platform_driver_register(&sa11x0_dma_driver);
1100 }
1101 subsys_initcall(sa11x0_dma_init);
1102 
1103 static void __exit sa11x0_dma_exit(void)
1104 {
1105 	platform_driver_unregister(&sa11x0_dma_driver);
1106 }
1107 module_exit(sa11x0_dma_exit);
1108 
1109 MODULE_AUTHOR("Russell King");
1110 MODULE_DESCRIPTION("SA-11x0 DMA driver");
1111 MODULE_LICENSE("GPL v2");
1112 MODULE_ALIAS("platform:sa11x0-dma");
1113