1 /*
2  * Texas Instruments CPDMA Driver
3  *
4  * Copyright (C) 2010 Texas Instruments
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 #include <linux/kernel.h>
16 #include <linux/spinlock.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/io.h>
23 #include <linux/delay.h>
24 
25 #include "davinci_cpdma.h"
26 
27 /* DMA Registers */
28 #define CPDMA_TXIDVER		0x00
29 #define CPDMA_TXCONTROL		0x04
30 #define CPDMA_TXTEARDOWN	0x08
31 #define CPDMA_RXIDVER		0x10
32 #define CPDMA_RXCONTROL		0x14
33 #define CPDMA_SOFTRESET		0x1c
34 #define CPDMA_RXTEARDOWN	0x18
35 #define CPDMA_TXINTSTATRAW	0x80
36 #define CPDMA_TXINTSTATMASKED	0x84
37 #define CPDMA_TXINTMASKSET	0x88
38 #define CPDMA_TXINTMASKCLEAR	0x8c
39 #define CPDMA_MACINVECTOR	0x90
40 #define CPDMA_MACEOIVECTOR	0x94
41 #define CPDMA_RXINTSTATRAW	0xa0
42 #define CPDMA_RXINTSTATMASKED	0xa4
43 #define CPDMA_RXINTMASKSET	0xa8
44 #define CPDMA_RXINTMASKCLEAR	0xac
45 #define CPDMA_DMAINTSTATRAW	0xb0
46 #define CPDMA_DMAINTSTATMASKED	0xb4
47 #define CPDMA_DMAINTMASKSET	0xb8
48 #define CPDMA_DMAINTMASKCLEAR	0xbc
49 #define CPDMA_DMAINT_HOSTERR	BIT(1)
50 
51 /* the following exist only if has_ext_regs is set */
52 #define CPDMA_DMACONTROL	0x20
53 #define CPDMA_DMASTATUS		0x24
54 #define CPDMA_RXBUFFOFS		0x28
55 #define CPDMA_EM_CONTROL	0x2c
56 
57 /* Descriptor mode bits */
58 #define CPDMA_DESC_SOP		BIT(31)
59 #define CPDMA_DESC_EOP		BIT(30)
60 #define CPDMA_DESC_OWNER	BIT(29)
61 #define CPDMA_DESC_EOQ		BIT(28)
62 #define CPDMA_DESC_TD_COMPLETE	BIT(27)
63 #define CPDMA_DESC_PASS_CRC	BIT(26)
64 #define CPDMA_DESC_TO_PORT_EN	BIT(20)
65 #define CPDMA_TO_PORT_SHIFT	16
66 #define CPDMA_DESC_PORT_MASK	(BIT(18) | BIT(17) | BIT(16))
67 #define CPDMA_DESC_CRC_LEN	4
68 
69 #define CPDMA_TEARDOWN_VALUE	0xfffffffc
70 
71 struct cpdma_desc {
72 	/* hardware fields */
73 	u32			hw_next;
74 	u32			hw_buffer;
75 	u32			hw_len;
76 	u32			hw_mode;
77 	/* software fields */
78 	void			*sw_token;
79 	u32			sw_buffer;
80 	u32			sw_len;
81 };
82 
83 struct cpdma_desc_pool {
84 	phys_addr_t		phys;
85 	u32			hw_addr;
86 	void __iomem		*iomap;		/* ioremap map */
87 	void			*cpumap;	/* dma_alloc map */
88 	int			desc_size, mem_size;
89 	int			num_desc, used_desc;
90 	unsigned long		*bitmap;
91 	struct device		*dev;
92 	spinlock_t		lock;
93 };
94 
95 enum cpdma_state {
96 	CPDMA_STATE_IDLE,
97 	CPDMA_STATE_ACTIVE,
98 	CPDMA_STATE_TEARDOWN,
99 };
100 
101 static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
102 
103 struct cpdma_ctlr {
104 	enum cpdma_state	state;
105 	struct cpdma_params	params;
106 	struct device		*dev;
107 	struct cpdma_desc_pool	*pool;
108 	spinlock_t		lock;
109 	struct cpdma_chan	*channels[2 * CPDMA_MAX_CHANNELS];
110 };
111 
112 struct cpdma_chan {
113 	struct cpdma_desc __iomem	*head, *tail;
114 	void __iomem			*hdp, *cp, *rxfree;
115 	enum cpdma_state		state;
116 	struct cpdma_ctlr		*ctlr;
117 	int				chan_num;
118 	spinlock_t			lock;
119 	int				count;
120 	u32				mask;
121 	cpdma_handler_fn		handler;
122 	enum dma_data_direction		dir;
123 	struct cpdma_chan_stats		stats;
124 	/* offsets into dmaregs */
125 	int	int_set, int_clear, td;
126 };
127 
128 /* The following make access to common cpdma_ctlr params more readable */
129 #define dmaregs		params.dmaregs
130 #define num_chan	params.num_chan
131 
132 /* various accessors */
133 #define dma_reg_read(ctlr, ofs)		__raw_readl((ctlr)->dmaregs + (ofs))
134 #define chan_read(chan, fld)		__raw_readl((chan)->fld)
135 #define desc_read(desc, fld)		__raw_readl(&(desc)->fld)
136 #define dma_reg_write(ctlr, ofs, v)	__raw_writel(v, (ctlr)->dmaregs + (ofs))
137 #define chan_write(chan, fld, v)	__raw_writel(v, (chan)->fld)
138 #define desc_write(desc, fld, v)	__raw_writel((u32)(v), &(desc)->fld)
139 
140 #define cpdma_desc_to_port(chan, mode, directed)			\
141 	do {								\
142 		if (!is_rx_chan(chan) && ((directed == 1) ||		\
143 					  (directed == 2)))		\
144 			mode |= (CPDMA_DESC_TO_PORT_EN |		\
145 				 (directed << CPDMA_TO_PORT_SHIFT));	\
146 	} while (0)
147 
148 /*
149  * Utility constructs for a cpdma descriptor pool.  Some devices (e.g. davinci
150  * emac) have dedicated on-chip memory for these descriptors.  Some other
151  * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
152  * abstract out these details
153  */
154 static struct cpdma_desc_pool *
155 cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
156 				int size, int align)
157 {
158 	int bitmap_size;
159 	struct cpdma_desc_pool *pool;
160 
161 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
162 	if (!pool)
163 		return NULL;
164 
165 	spin_lock_init(&pool->lock);
166 
167 	pool->dev	= dev;
168 	pool->mem_size	= size;
169 	pool->desc_size	= ALIGN(sizeof(struct cpdma_desc), align);
170 	pool->num_desc	= size / pool->desc_size;
171 
172 	bitmap_size  = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
173 	pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
174 	if (!pool->bitmap)
175 		goto fail;
176 
177 	if (phys) {
178 		pool->phys  = phys;
179 		pool->iomap = ioremap(phys, size);
180 		pool->hw_addr = hw_addr;
181 	} else {
182 		pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
183 						  GFP_KERNEL);
184 		pool->iomap = pool->cpumap;
185 		pool->hw_addr = pool->phys;
186 	}
187 
188 	if (pool->iomap)
189 		return pool;
190 
191 fail:
192 	kfree(pool->bitmap);
193 	kfree(pool);
194 	return NULL;
195 }
196 
197 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
198 {
199 	unsigned long flags;
200 
201 	if (!pool)
202 		return;
203 
204 	spin_lock_irqsave(&pool->lock, flags);
205 	WARN_ON(pool->used_desc);
206 	kfree(pool->bitmap);
207 	if (pool->cpumap) {
208 		dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
209 				  pool->phys);
210 	} else {
211 		iounmap(pool->iomap);
212 	}
213 	spin_unlock_irqrestore(&pool->lock, flags);
214 	kfree(pool);
215 }
216 
217 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
218 		  struct cpdma_desc __iomem *desc)
219 {
220 	if (!desc)
221 		return 0;
222 	return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
223 }
224 
225 static inline struct cpdma_desc __iomem *
226 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
227 {
228 	return dma ? pool->iomap + dma - pool->hw_addr : NULL;
229 }
230 
231 static struct cpdma_desc __iomem *
232 cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
233 {
234 	unsigned long flags;
235 	int index;
236 	int desc_start;
237 	int desc_end;
238 	struct cpdma_desc __iomem *desc = NULL;
239 
240 	spin_lock_irqsave(&pool->lock, flags);
241 
242 	if (is_rx) {
243 		desc_start = 0;
244 		desc_end = pool->num_desc/2;
245 	 } else {
246 		desc_start = pool->num_desc/2;
247 		desc_end = pool->num_desc;
248 	}
249 
250 	index = bitmap_find_next_zero_area(pool->bitmap,
251 				desc_end, desc_start, num_desc, 0);
252 	if (index < desc_end) {
253 		bitmap_set(pool->bitmap, index, num_desc);
254 		desc = pool->iomap + pool->desc_size * index;
255 		pool->used_desc++;
256 	}
257 
258 	spin_unlock_irqrestore(&pool->lock, flags);
259 	return desc;
260 }
261 
262 static void cpdma_desc_free(struct cpdma_desc_pool *pool,
263 			    struct cpdma_desc __iomem *desc, int num_desc)
264 {
265 	unsigned long flags, index;
266 
267 	index = ((unsigned long)desc - (unsigned long)pool->iomap) /
268 		pool->desc_size;
269 	spin_lock_irqsave(&pool->lock, flags);
270 	bitmap_clear(pool->bitmap, index, num_desc);
271 	pool->used_desc--;
272 	spin_unlock_irqrestore(&pool->lock, flags);
273 }
274 
275 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
276 {
277 	struct cpdma_ctlr *ctlr;
278 
279 	ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
280 	if (!ctlr)
281 		return NULL;
282 
283 	ctlr->state = CPDMA_STATE_IDLE;
284 	ctlr->params = *params;
285 	ctlr->dev = params->dev;
286 	spin_lock_init(&ctlr->lock);
287 
288 	ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
289 					    ctlr->params.desc_mem_phys,
290 					    ctlr->params.desc_hw_addr,
291 					    ctlr->params.desc_mem_size,
292 					    ctlr->params.desc_align);
293 	if (!ctlr->pool) {
294 		kfree(ctlr);
295 		return NULL;
296 	}
297 
298 	if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
299 		ctlr->num_chan = CPDMA_MAX_CHANNELS;
300 	return ctlr;
301 }
302 EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
303 
304 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
305 {
306 	unsigned long flags;
307 	int i;
308 
309 	spin_lock_irqsave(&ctlr->lock, flags);
310 	if (ctlr->state != CPDMA_STATE_IDLE) {
311 		spin_unlock_irqrestore(&ctlr->lock, flags);
312 		return -EBUSY;
313 	}
314 
315 	if (ctlr->params.has_soft_reset) {
316 		unsigned timeout = 10 * 100;
317 
318 		dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
319 		while (timeout) {
320 			if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
321 				break;
322 			udelay(10);
323 			timeout--;
324 		}
325 		WARN_ON(!timeout);
326 	}
327 
328 	for (i = 0; i < ctlr->num_chan; i++) {
329 		__raw_writel(0, ctlr->params.txhdp + 4 * i);
330 		__raw_writel(0, ctlr->params.rxhdp + 4 * i);
331 		__raw_writel(0, ctlr->params.txcp + 4 * i);
332 		__raw_writel(0, ctlr->params.rxcp + 4 * i);
333 	}
334 
335 	dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
336 	dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
337 
338 	dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
339 	dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
340 
341 	ctlr->state = CPDMA_STATE_ACTIVE;
342 
343 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
344 		if (ctlr->channels[i])
345 			cpdma_chan_start(ctlr->channels[i]);
346 	}
347 	spin_unlock_irqrestore(&ctlr->lock, flags);
348 	return 0;
349 }
350 EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
351 
352 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
353 {
354 	unsigned long flags;
355 	int i;
356 
357 	spin_lock_irqsave(&ctlr->lock, flags);
358 	if (ctlr->state == CPDMA_STATE_TEARDOWN) {
359 		spin_unlock_irqrestore(&ctlr->lock, flags);
360 		return -EINVAL;
361 	}
362 
363 	ctlr->state = CPDMA_STATE_TEARDOWN;
364 
365 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
366 		if (ctlr->channels[i])
367 			cpdma_chan_stop(ctlr->channels[i]);
368 	}
369 
370 	dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
371 	dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
372 
373 	dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
374 	dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
375 
376 	ctlr->state = CPDMA_STATE_IDLE;
377 
378 	spin_unlock_irqrestore(&ctlr->lock, flags);
379 	return 0;
380 }
381 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
382 
383 int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
384 {
385 	struct device *dev = ctlr->dev;
386 	unsigned long flags;
387 	int i;
388 
389 	spin_lock_irqsave(&ctlr->lock, flags);
390 
391 	dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
392 
393 	dev_info(dev, "CPDMA: txidver: %x",
394 		 dma_reg_read(ctlr, CPDMA_TXIDVER));
395 	dev_info(dev, "CPDMA: txcontrol: %x",
396 		 dma_reg_read(ctlr, CPDMA_TXCONTROL));
397 	dev_info(dev, "CPDMA: txteardown: %x",
398 		 dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
399 	dev_info(dev, "CPDMA: rxidver: %x",
400 		 dma_reg_read(ctlr, CPDMA_RXIDVER));
401 	dev_info(dev, "CPDMA: rxcontrol: %x",
402 		 dma_reg_read(ctlr, CPDMA_RXCONTROL));
403 	dev_info(dev, "CPDMA: softreset: %x",
404 		 dma_reg_read(ctlr, CPDMA_SOFTRESET));
405 	dev_info(dev, "CPDMA: rxteardown: %x",
406 		 dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
407 	dev_info(dev, "CPDMA: txintstatraw: %x",
408 		 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
409 	dev_info(dev, "CPDMA: txintstatmasked: %x",
410 		 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
411 	dev_info(dev, "CPDMA: txintmaskset: %x",
412 		 dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
413 	dev_info(dev, "CPDMA: txintmaskclear: %x",
414 		 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
415 	dev_info(dev, "CPDMA: macinvector: %x",
416 		 dma_reg_read(ctlr, CPDMA_MACINVECTOR));
417 	dev_info(dev, "CPDMA: maceoivector: %x",
418 		 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
419 	dev_info(dev, "CPDMA: rxintstatraw: %x",
420 		 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
421 	dev_info(dev, "CPDMA: rxintstatmasked: %x",
422 		 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
423 	dev_info(dev, "CPDMA: rxintmaskset: %x",
424 		 dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
425 	dev_info(dev, "CPDMA: rxintmaskclear: %x",
426 		 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
427 	dev_info(dev, "CPDMA: dmaintstatraw: %x",
428 		 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
429 	dev_info(dev, "CPDMA: dmaintstatmasked: %x",
430 		 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
431 	dev_info(dev, "CPDMA: dmaintmaskset: %x",
432 		 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
433 	dev_info(dev, "CPDMA: dmaintmaskclear: %x",
434 		 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
435 
436 	if (!ctlr->params.has_ext_regs) {
437 		dev_info(dev, "CPDMA: dmacontrol: %x",
438 			 dma_reg_read(ctlr, CPDMA_DMACONTROL));
439 		dev_info(dev, "CPDMA: dmastatus: %x",
440 			 dma_reg_read(ctlr, CPDMA_DMASTATUS));
441 		dev_info(dev, "CPDMA: rxbuffofs: %x",
442 			 dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
443 	}
444 
445 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
446 		if (ctlr->channels[i])
447 			cpdma_chan_dump(ctlr->channels[i]);
448 
449 	spin_unlock_irqrestore(&ctlr->lock, flags);
450 	return 0;
451 }
452 EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
453 
454 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
455 {
456 	unsigned long flags;
457 	int ret = 0, i;
458 
459 	if (!ctlr)
460 		return -EINVAL;
461 
462 	spin_lock_irqsave(&ctlr->lock, flags);
463 	if (ctlr->state != CPDMA_STATE_IDLE)
464 		cpdma_ctlr_stop(ctlr);
465 
466 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
467 		cpdma_chan_destroy(ctlr->channels[i]);
468 
469 	cpdma_desc_pool_destroy(ctlr->pool);
470 	spin_unlock_irqrestore(&ctlr->lock, flags);
471 	kfree(ctlr);
472 	return ret;
473 }
474 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
475 
476 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
477 {
478 	unsigned long flags;
479 	int i, reg;
480 
481 	spin_lock_irqsave(&ctlr->lock, flags);
482 	if (ctlr->state != CPDMA_STATE_ACTIVE) {
483 		spin_unlock_irqrestore(&ctlr->lock, flags);
484 		return -EINVAL;
485 	}
486 
487 	reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
488 	dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
489 
490 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
491 		if (ctlr->channels[i])
492 			cpdma_chan_int_ctrl(ctlr->channels[i], enable);
493 	}
494 
495 	spin_unlock_irqrestore(&ctlr->lock, flags);
496 	return 0;
497 }
498 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
499 
500 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
501 {
502 	dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
503 }
504 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
505 
506 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
507 				     cpdma_handler_fn handler)
508 {
509 	struct cpdma_chan *chan;
510 	int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
511 	unsigned long flags;
512 
513 	if (__chan_linear(chan_num) >= ctlr->num_chan)
514 		return NULL;
515 
516 	ret = -ENOMEM;
517 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
518 	if (!chan)
519 		goto err_chan_alloc;
520 
521 	spin_lock_irqsave(&ctlr->lock, flags);
522 	ret = -EBUSY;
523 	if (ctlr->channels[chan_num])
524 		goto err_chan_busy;
525 
526 	chan->ctlr	= ctlr;
527 	chan->state	= CPDMA_STATE_IDLE;
528 	chan->chan_num	= chan_num;
529 	chan->handler	= handler;
530 
531 	if (is_rx_chan(chan)) {
532 		chan->hdp	= ctlr->params.rxhdp + offset;
533 		chan->cp	= ctlr->params.rxcp + offset;
534 		chan->rxfree	= ctlr->params.rxfree + offset;
535 		chan->int_set	= CPDMA_RXINTMASKSET;
536 		chan->int_clear	= CPDMA_RXINTMASKCLEAR;
537 		chan->td	= CPDMA_RXTEARDOWN;
538 		chan->dir	= DMA_FROM_DEVICE;
539 	} else {
540 		chan->hdp	= ctlr->params.txhdp + offset;
541 		chan->cp	= ctlr->params.txcp + offset;
542 		chan->int_set	= CPDMA_TXINTMASKSET;
543 		chan->int_clear	= CPDMA_TXINTMASKCLEAR;
544 		chan->td	= CPDMA_TXTEARDOWN;
545 		chan->dir	= DMA_TO_DEVICE;
546 	}
547 	chan->mask = BIT(chan_linear(chan));
548 
549 	spin_lock_init(&chan->lock);
550 
551 	ctlr->channels[chan_num] = chan;
552 	spin_unlock_irqrestore(&ctlr->lock, flags);
553 	return chan;
554 
555 err_chan_busy:
556 	spin_unlock_irqrestore(&ctlr->lock, flags);
557 	kfree(chan);
558 err_chan_alloc:
559 	return ERR_PTR(ret);
560 }
561 EXPORT_SYMBOL_GPL(cpdma_chan_create);
562 
563 int cpdma_chan_destroy(struct cpdma_chan *chan)
564 {
565 	struct cpdma_ctlr *ctlr;
566 	unsigned long flags;
567 
568 	if (!chan)
569 		return -EINVAL;
570 	ctlr = chan->ctlr;
571 
572 	spin_lock_irqsave(&ctlr->lock, flags);
573 	if (chan->state != CPDMA_STATE_IDLE)
574 		cpdma_chan_stop(chan);
575 	ctlr->channels[chan->chan_num] = NULL;
576 	spin_unlock_irqrestore(&ctlr->lock, flags);
577 	kfree(chan);
578 	return 0;
579 }
580 EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
581 
582 int cpdma_chan_get_stats(struct cpdma_chan *chan,
583 			 struct cpdma_chan_stats *stats)
584 {
585 	unsigned long flags;
586 	if (!chan)
587 		return -EINVAL;
588 	spin_lock_irqsave(&chan->lock, flags);
589 	memcpy(stats, &chan->stats, sizeof(*stats));
590 	spin_unlock_irqrestore(&chan->lock, flags);
591 	return 0;
592 }
593 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
594 
595 int cpdma_chan_dump(struct cpdma_chan *chan)
596 {
597 	unsigned long flags;
598 	struct device *dev = chan->ctlr->dev;
599 
600 	spin_lock_irqsave(&chan->lock, flags);
601 
602 	dev_info(dev, "channel %d (%s %d) state %s",
603 		 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
604 		 chan_linear(chan), cpdma_state_str[chan->state]);
605 	dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
606 	dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
607 	if (chan->rxfree) {
608 		dev_info(dev, "\trxfree: %x\n",
609 			 chan_read(chan, rxfree));
610 	}
611 
612 	dev_info(dev, "\tstats head_enqueue: %d\n",
613 		 chan->stats.head_enqueue);
614 	dev_info(dev, "\tstats tail_enqueue: %d\n",
615 		 chan->stats.tail_enqueue);
616 	dev_info(dev, "\tstats pad_enqueue: %d\n",
617 		 chan->stats.pad_enqueue);
618 	dev_info(dev, "\tstats misqueued: %d\n",
619 		 chan->stats.misqueued);
620 	dev_info(dev, "\tstats desc_alloc_fail: %d\n",
621 		 chan->stats.desc_alloc_fail);
622 	dev_info(dev, "\tstats pad_alloc_fail: %d\n",
623 		 chan->stats.pad_alloc_fail);
624 	dev_info(dev, "\tstats runt_receive_buff: %d\n",
625 		 chan->stats.runt_receive_buff);
626 	dev_info(dev, "\tstats runt_transmit_buff: %d\n",
627 		 chan->stats.runt_transmit_buff);
628 	dev_info(dev, "\tstats empty_dequeue: %d\n",
629 		 chan->stats.empty_dequeue);
630 	dev_info(dev, "\tstats busy_dequeue: %d\n",
631 		 chan->stats.busy_dequeue);
632 	dev_info(dev, "\tstats good_dequeue: %d\n",
633 		 chan->stats.good_dequeue);
634 	dev_info(dev, "\tstats requeue: %d\n",
635 		 chan->stats.requeue);
636 	dev_info(dev, "\tstats teardown_dequeue: %d\n",
637 		 chan->stats.teardown_dequeue);
638 
639 	spin_unlock_irqrestore(&chan->lock, flags);
640 	return 0;
641 }
642 
643 static void __cpdma_chan_submit(struct cpdma_chan *chan,
644 				struct cpdma_desc __iomem *desc)
645 {
646 	struct cpdma_ctlr		*ctlr = chan->ctlr;
647 	struct cpdma_desc __iomem	*prev = chan->tail;
648 	struct cpdma_desc_pool		*pool = ctlr->pool;
649 	dma_addr_t			desc_dma;
650 	u32				mode;
651 
652 	desc_dma = desc_phys(pool, desc);
653 
654 	/* simple case - idle channel */
655 	if (!chan->head) {
656 		chan->stats.head_enqueue++;
657 		chan->head = desc;
658 		chan->tail = desc;
659 		if (chan->state == CPDMA_STATE_ACTIVE)
660 			chan_write(chan, hdp, desc_dma);
661 		return;
662 	}
663 
664 	/* first chain the descriptor at the tail of the list */
665 	desc_write(prev, hw_next, desc_dma);
666 	chan->tail = desc;
667 	chan->stats.tail_enqueue++;
668 
669 	/* next check if EOQ has been triggered already */
670 	mode = desc_read(prev, hw_mode);
671 	if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
672 	    (chan->state == CPDMA_STATE_ACTIVE)) {
673 		desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
674 		chan_write(chan, hdp, desc_dma);
675 		chan->stats.misqueued++;
676 	}
677 }
678 
679 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
680 		      int len, int directed)
681 {
682 	struct cpdma_ctlr		*ctlr = chan->ctlr;
683 	struct cpdma_desc __iomem	*desc;
684 	dma_addr_t			buffer;
685 	unsigned long			flags;
686 	u32				mode;
687 	int				ret = 0;
688 
689 	spin_lock_irqsave(&chan->lock, flags);
690 
691 	if (chan->state == CPDMA_STATE_TEARDOWN) {
692 		ret = -EINVAL;
693 		goto unlock_ret;
694 	}
695 
696 	desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
697 	if (!desc) {
698 		chan->stats.desc_alloc_fail++;
699 		ret = -ENOMEM;
700 		goto unlock_ret;
701 	}
702 
703 	if (len < ctlr->params.min_packet_size) {
704 		len = ctlr->params.min_packet_size;
705 		chan->stats.runt_transmit_buff++;
706 	}
707 
708 	buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
709 	ret = dma_mapping_error(ctlr->dev, buffer);
710 	if (ret) {
711 		cpdma_desc_free(ctlr->pool, desc, 1);
712 		ret = -EINVAL;
713 		goto unlock_ret;
714 	}
715 
716 	mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
717 	cpdma_desc_to_port(chan, mode, directed);
718 
719 	desc_write(desc, hw_next,   0);
720 	desc_write(desc, hw_buffer, buffer);
721 	desc_write(desc, hw_len,    len);
722 	desc_write(desc, hw_mode,   mode | len);
723 	desc_write(desc, sw_token,  token);
724 	desc_write(desc, sw_buffer, buffer);
725 	desc_write(desc, sw_len,    len);
726 
727 	__cpdma_chan_submit(chan, desc);
728 
729 	if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
730 		chan_write(chan, rxfree, 1);
731 
732 	chan->count++;
733 
734 unlock_ret:
735 	spin_unlock_irqrestore(&chan->lock, flags);
736 	return ret;
737 }
738 EXPORT_SYMBOL_GPL(cpdma_chan_submit);
739 
740 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
741 {
742 	unsigned long flags;
743 	int index;
744 	bool ret;
745 	struct cpdma_ctlr	*ctlr = chan->ctlr;
746 	struct cpdma_desc_pool	*pool = ctlr->pool;
747 
748 	spin_lock_irqsave(&pool->lock, flags);
749 
750 	index = bitmap_find_next_zero_area(pool->bitmap,
751 				pool->num_desc, pool->num_desc/2, 1, 0);
752 
753 	if (index < pool->num_desc)
754 		ret = true;
755 	else
756 		ret = false;
757 
758 	spin_unlock_irqrestore(&pool->lock, flags);
759 	return ret;
760 }
761 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
762 
763 static void __cpdma_chan_free(struct cpdma_chan *chan,
764 			      struct cpdma_desc __iomem *desc,
765 			      int outlen, int status)
766 {
767 	struct cpdma_ctlr		*ctlr = chan->ctlr;
768 	struct cpdma_desc_pool		*pool = ctlr->pool;
769 	dma_addr_t			buff_dma;
770 	int				origlen;
771 	void				*token;
772 
773 	token      = (void *)desc_read(desc, sw_token);
774 	buff_dma   = desc_read(desc, sw_buffer);
775 	origlen    = desc_read(desc, sw_len);
776 
777 	dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
778 	cpdma_desc_free(pool, desc, 1);
779 	(*chan->handler)(token, outlen, status);
780 }
781 
782 static int __cpdma_chan_process(struct cpdma_chan *chan)
783 {
784 	struct cpdma_ctlr		*ctlr = chan->ctlr;
785 	struct cpdma_desc __iomem	*desc;
786 	int				status, outlen;
787 	int				cb_status = 0;
788 	struct cpdma_desc_pool		*pool = ctlr->pool;
789 	dma_addr_t			desc_dma;
790 	unsigned long			flags;
791 
792 	spin_lock_irqsave(&chan->lock, flags);
793 
794 	desc = chan->head;
795 	if (!desc) {
796 		chan->stats.empty_dequeue++;
797 		status = -ENOENT;
798 		goto unlock_ret;
799 	}
800 	desc_dma = desc_phys(pool, desc);
801 
802 	status	= __raw_readl(&desc->hw_mode);
803 	outlen	= status & 0x7ff;
804 	if (status & CPDMA_DESC_OWNER) {
805 		chan->stats.busy_dequeue++;
806 		status = -EBUSY;
807 		goto unlock_ret;
808 	}
809 
810 	if (status & CPDMA_DESC_PASS_CRC)
811 		outlen -= CPDMA_DESC_CRC_LEN;
812 
813 	status	= status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
814 			    CPDMA_DESC_PORT_MASK);
815 
816 	chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
817 	chan_write(chan, cp, desc_dma);
818 	chan->count--;
819 	chan->stats.good_dequeue++;
820 
821 	if (status & CPDMA_DESC_EOQ) {
822 		chan->stats.requeue++;
823 		chan_write(chan, hdp, desc_phys(pool, chan->head));
824 	}
825 
826 	spin_unlock_irqrestore(&chan->lock, flags);
827 	if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
828 		cb_status = -ENOSYS;
829 	else
830 		cb_status = status;
831 
832 	__cpdma_chan_free(chan, desc, outlen, cb_status);
833 	return status;
834 
835 unlock_ret:
836 	spin_unlock_irqrestore(&chan->lock, flags);
837 	return status;
838 }
839 
840 int cpdma_chan_process(struct cpdma_chan *chan, int quota)
841 {
842 	int used = 0, ret = 0;
843 
844 	if (chan->state != CPDMA_STATE_ACTIVE)
845 		return -EINVAL;
846 
847 	while (used < quota) {
848 		ret = __cpdma_chan_process(chan);
849 		if (ret < 0)
850 			break;
851 		used++;
852 	}
853 	return used;
854 }
855 EXPORT_SYMBOL_GPL(cpdma_chan_process);
856 
857 int cpdma_chan_start(struct cpdma_chan *chan)
858 {
859 	struct cpdma_ctlr	*ctlr = chan->ctlr;
860 	struct cpdma_desc_pool	*pool = ctlr->pool;
861 	unsigned long		flags;
862 
863 	spin_lock_irqsave(&chan->lock, flags);
864 	if (chan->state != CPDMA_STATE_IDLE) {
865 		spin_unlock_irqrestore(&chan->lock, flags);
866 		return -EBUSY;
867 	}
868 	if (ctlr->state != CPDMA_STATE_ACTIVE) {
869 		spin_unlock_irqrestore(&chan->lock, flags);
870 		return -EINVAL;
871 	}
872 	dma_reg_write(ctlr, chan->int_set, chan->mask);
873 	chan->state = CPDMA_STATE_ACTIVE;
874 	if (chan->head) {
875 		chan_write(chan, hdp, desc_phys(pool, chan->head));
876 		if (chan->rxfree)
877 			chan_write(chan, rxfree, chan->count);
878 	}
879 
880 	spin_unlock_irqrestore(&chan->lock, flags);
881 	return 0;
882 }
883 EXPORT_SYMBOL_GPL(cpdma_chan_start);
884 
885 int cpdma_chan_stop(struct cpdma_chan *chan)
886 {
887 	struct cpdma_ctlr	*ctlr = chan->ctlr;
888 	struct cpdma_desc_pool	*pool = ctlr->pool;
889 	unsigned long		flags;
890 	int			ret;
891 	unsigned		timeout;
892 
893 	spin_lock_irqsave(&chan->lock, flags);
894 	if (chan->state == CPDMA_STATE_TEARDOWN) {
895 		spin_unlock_irqrestore(&chan->lock, flags);
896 		return -EINVAL;
897 	}
898 
899 	chan->state = CPDMA_STATE_TEARDOWN;
900 	dma_reg_write(ctlr, chan->int_clear, chan->mask);
901 
902 	/* trigger teardown */
903 	dma_reg_write(ctlr, chan->td, chan_linear(chan));
904 
905 	/* wait for teardown complete */
906 	timeout = 100 * 100; /* 100 ms */
907 	while (timeout) {
908 		u32 cp = chan_read(chan, cp);
909 		if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
910 			break;
911 		udelay(10);
912 		timeout--;
913 	}
914 	WARN_ON(!timeout);
915 	chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
916 
917 	/* handle completed packets */
918 	spin_unlock_irqrestore(&chan->lock, flags);
919 	do {
920 		ret = __cpdma_chan_process(chan);
921 		if (ret < 0)
922 			break;
923 	} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
924 	spin_lock_irqsave(&chan->lock, flags);
925 
926 	/* remaining packets haven't been tx/rx'ed, clean them up */
927 	while (chan->head) {
928 		struct cpdma_desc __iomem *desc = chan->head;
929 		dma_addr_t next_dma;
930 
931 		next_dma = desc_read(desc, hw_next);
932 		chan->head = desc_from_phys(pool, next_dma);
933 		chan->count--;
934 		chan->stats.teardown_dequeue++;
935 
936 		/* issue callback without locks held */
937 		spin_unlock_irqrestore(&chan->lock, flags);
938 		__cpdma_chan_free(chan, desc, 0, -ENOSYS);
939 		spin_lock_irqsave(&chan->lock, flags);
940 	}
941 
942 	chan->state = CPDMA_STATE_IDLE;
943 	spin_unlock_irqrestore(&chan->lock, flags);
944 	return 0;
945 }
946 EXPORT_SYMBOL_GPL(cpdma_chan_stop);
947 
948 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
949 {
950 	unsigned long flags;
951 
952 	spin_lock_irqsave(&chan->lock, flags);
953 	if (chan->state != CPDMA_STATE_ACTIVE) {
954 		spin_unlock_irqrestore(&chan->lock, flags);
955 		return -EINVAL;
956 	}
957 
958 	dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
959 		      chan->mask);
960 	spin_unlock_irqrestore(&chan->lock, flags);
961 
962 	return 0;
963 }
964 
965 struct cpdma_control_info {
966 	u32		reg;
967 	u32		shift, mask;
968 	int		access;
969 #define ACCESS_RO	BIT(0)
970 #define ACCESS_WO	BIT(1)
971 #define ACCESS_RW	(ACCESS_RO | ACCESS_WO)
972 };
973 
974 static struct cpdma_control_info controls[] = {
975 	[CPDMA_CMD_IDLE]	  = {CPDMA_DMACONTROL,	3,  1,      ACCESS_WO},
976 	[CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL,	4,  1,      ACCESS_RW},
977 	[CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL,	2,  1,      ACCESS_RW},
978 	[CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL,	1,  1,      ACCESS_RW},
979 	[CPDMA_TX_PRIO_FIXED]	  = {CPDMA_DMACONTROL,	0,  1,      ACCESS_RW},
980 	[CPDMA_STAT_IDLE]	  = {CPDMA_DMASTATUS,	31, 1,      ACCESS_RO},
981 	[CPDMA_STAT_TX_ERR_CODE]  = {CPDMA_DMASTATUS,	20, 0xf,    ACCESS_RW},
982 	[CPDMA_STAT_TX_ERR_CHAN]  = {CPDMA_DMASTATUS,	16, 0x7,    ACCESS_RW},
983 	[CPDMA_STAT_RX_ERR_CODE]  = {CPDMA_DMASTATUS,	12, 0xf,    ACCESS_RW},
984 	[CPDMA_STAT_RX_ERR_CHAN]  = {CPDMA_DMASTATUS,	8,  0x7,    ACCESS_RW},
985 	[CPDMA_RX_BUFFER_OFFSET]  = {CPDMA_RXBUFFOFS,	0,  0xffff, ACCESS_RW},
986 };
987 
988 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
989 {
990 	unsigned long flags;
991 	struct cpdma_control_info *info = &controls[control];
992 	int ret;
993 
994 	spin_lock_irqsave(&ctlr->lock, flags);
995 
996 	ret = -ENOTSUPP;
997 	if (!ctlr->params.has_ext_regs)
998 		goto unlock_ret;
999 
1000 	ret = -EINVAL;
1001 	if (ctlr->state != CPDMA_STATE_ACTIVE)
1002 		goto unlock_ret;
1003 
1004 	ret = -ENOENT;
1005 	if (control < 0 || control >= ARRAY_SIZE(controls))
1006 		goto unlock_ret;
1007 
1008 	ret = -EPERM;
1009 	if ((info->access & ACCESS_RO) != ACCESS_RO)
1010 		goto unlock_ret;
1011 
1012 	ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
1013 
1014 unlock_ret:
1015 	spin_unlock_irqrestore(&ctlr->lock, flags);
1016 	return ret;
1017 }
1018 
1019 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
1020 {
1021 	unsigned long flags;
1022 	struct cpdma_control_info *info = &controls[control];
1023 	int ret;
1024 	u32 val;
1025 
1026 	spin_lock_irqsave(&ctlr->lock, flags);
1027 
1028 	ret = -ENOTSUPP;
1029 	if (!ctlr->params.has_ext_regs)
1030 		goto unlock_ret;
1031 
1032 	ret = -EINVAL;
1033 	if (ctlr->state != CPDMA_STATE_ACTIVE)
1034 		goto unlock_ret;
1035 
1036 	ret = -ENOENT;
1037 	if (control < 0 || control >= ARRAY_SIZE(controls))
1038 		goto unlock_ret;
1039 
1040 	ret = -EPERM;
1041 	if ((info->access & ACCESS_WO) != ACCESS_WO)
1042 		goto unlock_ret;
1043 
1044 	val  = dma_reg_read(ctlr, info->reg);
1045 	val &= ~(info->mask << info->shift);
1046 	val |= (value & info->mask) << info->shift;
1047 	dma_reg_write(ctlr, info->reg, val);
1048 	ret = 0;
1049 
1050 unlock_ret:
1051 	spin_unlock_irqrestore(&ctlr->lock, flags);
1052 	return ret;
1053 }
1054 EXPORT_SYMBOL_GPL(cpdma_control_set);
1055 
1056 MODULE_LICENSE("GPL");
1057