1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments CPDMA Driver
4  *
5  * Copyright (C) 2010 Texas Instruments
6  *
7  */
8 #include <linux/kernel.h>
9 #include <linux/spinlock.h>
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/io.h>
16 #include <linux/delay.h>
17 #include <linux/genalloc.h>
18 #include "davinci_cpdma.h"
19 
20 /* DMA Registers */
21 #define CPDMA_TXIDVER		0x00
22 #define CPDMA_TXCONTROL		0x04
23 #define CPDMA_TXTEARDOWN	0x08
24 #define CPDMA_RXIDVER		0x10
25 #define CPDMA_RXCONTROL		0x14
26 #define CPDMA_SOFTRESET		0x1c
27 #define CPDMA_RXTEARDOWN	0x18
28 #define CPDMA_TX_PRI0_RATE	0x30
29 #define CPDMA_TXINTSTATRAW	0x80
30 #define CPDMA_TXINTSTATMASKED	0x84
31 #define CPDMA_TXINTMASKSET	0x88
32 #define CPDMA_TXINTMASKCLEAR	0x8c
33 #define CPDMA_MACINVECTOR	0x90
34 #define CPDMA_MACEOIVECTOR	0x94
35 #define CPDMA_RXINTSTATRAW	0xa0
36 #define CPDMA_RXINTSTATMASKED	0xa4
37 #define CPDMA_RXINTMASKSET	0xa8
38 #define CPDMA_RXINTMASKCLEAR	0xac
39 #define CPDMA_DMAINTSTATRAW	0xb0
40 #define CPDMA_DMAINTSTATMASKED	0xb4
41 #define CPDMA_DMAINTMASKSET	0xb8
42 #define CPDMA_DMAINTMASKCLEAR	0xbc
43 #define CPDMA_DMAINT_HOSTERR	BIT(1)
44 
45 /* the following exist only if has_ext_regs is set */
46 #define CPDMA_DMACONTROL	0x20
47 #define CPDMA_DMASTATUS		0x24
48 #define CPDMA_RXBUFFOFS		0x28
49 #define CPDMA_EM_CONTROL	0x2c
50 
51 /* Descriptor mode bits */
52 #define CPDMA_DESC_SOP		BIT(31)
53 #define CPDMA_DESC_EOP		BIT(30)
54 #define CPDMA_DESC_OWNER	BIT(29)
55 #define CPDMA_DESC_EOQ		BIT(28)
56 #define CPDMA_DESC_TD_COMPLETE	BIT(27)
57 #define CPDMA_DESC_PASS_CRC	BIT(26)
58 #define CPDMA_DESC_TO_PORT_EN	BIT(20)
59 #define CPDMA_TO_PORT_SHIFT	16
60 #define CPDMA_DESC_PORT_MASK	(BIT(18) | BIT(17) | BIT(16))
61 #define CPDMA_DESC_CRC_LEN	4
62 
63 #define CPDMA_TEARDOWN_VALUE	0xfffffffc
64 
65 #define CPDMA_MAX_RLIM_CNT	16384
66 
67 struct cpdma_desc {
68 	/* hardware fields */
69 	u32			hw_next;
70 	u32			hw_buffer;
71 	u32			hw_len;
72 	u32			hw_mode;
73 	/* software fields */
74 	void			*sw_token;
75 	u32			sw_buffer;
76 	u32			sw_len;
77 };
78 
79 struct cpdma_desc_pool {
80 	phys_addr_t		phys;
81 	dma_addr_t		hw_addr;
82 	void __iomem		*iomap;		/* ioremap map */
83 	void			*cpumap;	/* dma_alloc map */
84 	int			desc_size, mem_size;
85 	int			num_desc;
86 	struct device		*dev;
87 	struct gen_pool		*gen_pool;
88 };
89 
90 enum cpdma_state {
91 	CPDMA_STATE_IDLE,
92 	CPDMA_STATE_ACTIVE,
93 	CPDMA_STATE_TEARDOWN,
94 };
95 
96 struct cpdma_ctlr {
97 	enum cpdma_state	state;
98 	struct cpdma_params	params;
99 	struct device		*dev;
100 	struct cpdma_desc_pool	*pool;
101 	spinlock_t		lock;
102 	struct cpdma_chan	*channels[2 * CPDMA_MAX_CHANNELS];
103 	int chan_num;
104 	int			num_rx_desc; /* RX descriptors number */
105 	int			num_tx_desc; /* TX descriptors number */
106 };
107 
108 struct cpdma_chan {
109 	struct cpdma_desc __iomem	*head, *tail;
110 	void __iomem			*hdp, *cp, *rxfree;
111 	enum cpdma_state		state;
112 	struct cpdma_ctlr		*ctlr;
113 	int				chan_num;
114 	spinlock_t			lock;
115 	int				count;
116 	u32				desc_num;
117 	u32				mask;
118 	cpdma_handler_fn		handler;
119 	enum dma_data_direction		dir;
120 	struct cpdma_chan_stats		stats;
121 	/* offsets into dmaregs */
122 	int	int_set, int_clear, td;
123 	int				weight;
124 	u32				rate_factor;
125 	u32				rate;
126 };
127 
128 struct cpdma_control_info {
129 	u32		reg;
130 	u32		shift, mask;
131 	int		access;
132 #define ACCESS_RO	BIT(0)
133 #define ACCESS_WO	BIT(1)
134 #define ACCESS_RW	(ACCESS_RO | ACCESS_WO)
135 };
136 
137 struct submit_info {
138 	struct cpdma_chan *chan;
139 	int directed;
140 	void *token;
141 	void *data_virt;
142 	dma_addr_t data_dma;
143 	int len;
144 };
145 
146 static struct cpdma_control_info controls[] = {
147 	[CPDMA_TX_RLIM]		  = {CPDMA_DMACONTROL,	8,  0xffff, ACCESS_RW},
148 	[CPDMA_CMD_IDLE]	  = {CPDMA_DMACONTROL,	3,  1,      ACCESS_WO},
149 	[CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL,	4,  1,      ACCESS_RW},
150 	[CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL,	2,  1,      ACCESS_RW},
151 	[CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL,	1,  1,      ACCESS_RW},
152 	[CPDMA_TX_PRIO_FIXED]	  = {CPDMA_DMACONTROL,	0,  1,      ACCESS_RW},
153 	[CPDMA_STAT_IDLE]	  = {CPDMA_DMASTATUS,	31, 1,      ACCESS_RO},
154 	[CPDMA_STAT_TX_ERR_CODE]  = {CPDMA_DMASTATUS,	20, 0xf,    ACCESS_RW},
155 	[CPDMA_STAT_TX_ERR_CHAN]  = {CPDMA_DMASTATUS,	16, 0x7,    ACCESS_RW},
156 	[CPDMA_STAT_RX_ERR_CODE]  = {CPDMA_DMASTATUS,	12, 0xf,    ACCESS_RW},
157 	[CPDMA_STAT_RX_ERR_CHAN]  = {CPDMA_DMASTATUS,	8,  0x7,    ACCESS_RW},
158 	[CPDMA_RX_BUFFER_OFFSET]  = {CPDMA_RXBUFFOFS,	0,  0xffff, ACCESS_RW},
159 };
160 
161 #define tx_chan_num(chan)	(chan)
162 #define rx_chan_num(chan)	((chan) + CPDMA_MAX_CHANNELS)
163 #define is_rx_chan(chan)	((chan)->chan_num >= CPDMA_MAX_CHANNELS)
164 #define is_tx_chan(chan)	(!is_rx_chan(chan))
165 #define __chan_linear(chan_num)	((chan_num) & (CPDMA_MAX_CHANNELS - 1))
166 #define chan_linear(chan)	__chan_linear((chan)->chan_num)
167 
168 /* The following make access to common cpdma_ctlr params more readable */
169 #define dmaregs		params.dmaregs
170 #define num_chan	params.num_chan
171 
172 /* various accessors */
173 #define dma_reg_read(ctlr, ofs)		readl((ctlr)->dmaregs + (ofs))
174 #define chan_read(chan, fld)		readl((chan)->fld)
175 #define desc_read(desc, fld)		readl(&(desc)->fld)
176 #define dma_reg_write(ctlr, ofs, v)	writel(v, (ctlr)->dmaregs + (ofs))
177 #define chan_write(chan, fld, v)	writel(v, (chan)->fld)
178 #define desc_write(desc, fld, v)	writel((u32)(v), &(desc)->fld)
179 
180 #define cpdma_desc_to_port(chan, mode, directed)			\
181 	do {								\
182 		if (!is_rx_chan(chan) && ((directed == 1) ||		\
183 					  (directed == 2)))		\
184 			mode |= (CPDMA_DESC_TO_PORT_EN |		\
185 				 (directed << CPDMA_TO_PORT_SHIFT));	\
186 	} while (0)
187 
188 #define CPDMA_DMA_EXT_MAP		BIT(16)
189 
190 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
191 {
192 	struct cpdma_desc_pool *pool = ctlr->pool;
193 
194 	if (!pool)
195 		return;
196 
197 	WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
198 	     "cpdma_desc_pool size %zd != avail %zd",
199 	     gen_pool_size(pool->gen_pool),
200 	     gen_pool_avail(pool->gen_pool));
201 	if (pool->cpumap)
202 		dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
203 				  pool->phys);
204 }
205 
206 /*
207  * Utility constructs for a cpdma descriptor pool.  Some devices (e.g. davinci
208  * emac) have dedicated on-chip memory for these descriptors.  Some other
209  * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
210  * abstract out these details
211  */
212 static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
213 {
214 	struct cpdma_params *cpdma_params = &ctlr->params;
215 	struct cpdma_desc_pool *pool;
216 	int ret = -ENOMEM;
217 
218 	pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
219 	if (!pool)
220 		goto gen_pool_create_fail;
221 	ctlr->pool = pool;
222 
223 	pool->mem_size	= cpdma_params->desc_mem_size;
224 	pool->desc_size	= ALIGN(sizeof(struct cpdma_desc),
225 				cpdma_params->desc_align);
226 	pool->num_desc	= pool->mem_size / pool->desc_size;
227 
228 	if (cpdma_params->descs_pool_size) {
229 		/* recalculate memory size required cpdma descriptor pool
230 		 * basing on number of descriptors specified by user and
231 		 * if memory size > CPPI internal RAM size (desc_mem_size)
232 		 * then switch to use DDR
233 		 */
234 		pool->num_desc = cpdma_params->descs_pool_size;
235 		pool->mem_size = pool->desc_size * pool->num_desc;
236 		if (pool->mem_size > cpdma_params->desc_mem_size)
237 			cpdma_params->desc_mem_phys = 0;
238 	}
239 
240 	pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
241 					      -1, "cpdma");
242 	if (IS_ERR(pool->gen_pool)) {
243 		ret = PTR_ERR(pool->gen_pool);
244 		dev_err(ctlr->dev, "pool create failed %d\n", ret);
245 		goto gen_pool_create_fail;
246 	}
247 
248 	if (cpdma_params->desc_mem_phys) {
249 		pool->phys  = cpdma_params->desc_mem_phys;
250 		pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
251 					   pool->mem_size);
252 		pool->hw_addr = cpdma_params->desc_hw_addr;
253 	} else {
254 		pool->cpumap = dma_alloc_coherent(ctlr->dev,  pool->mem_size,
255 						  &pool->hw_addr, GFP_KERNEL);
256 		pool->iomap = (void __iomem __force *)pool->cpumap;
257 		pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
258 	}
259 
260 	if (!pool->iomap)
261 		goto gen_pool_create_fail;
262 
263 	ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
264 				pool->phys, pool->mem_size, -1);
265 	if (ret < 0) {
266 		dev_err(ctlr->dev, "pool add failed %d\n", ret);
267 		goto gen_pool_add_virt_fail;
268 	}
269 
270 	return 0;
271 
272 gen_pool_add_virt_fail:
273 	cpdma_desc_pool_destroy(ctlr);
274 gen_pool_create_fail:
275 	ctlr->pool = NULL;
276 	return ret;
277 }
278 
279 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
280 		  struct cpdma_desc __iomem *desc)
281 {
282 	if (!desc)
283 		return 0;
284 	return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
285 }
286 
287 static inline struct cpdma_desc __iomem *
288 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
289 {
290 	return dma ? pool->iomap + dma - pool->hw_addr : NULL;
291 }
292 
293 static struct cpdma_desc __iomem *
294 cpdma_desc_alloc(struct cpdma_desc_pool *pool)
295 {
296 	return (struct cpdma_desc __iomem *)
297 		gen_pool_alloc(pool->gen_pool, pool->desc_size);
298 }
299 
300 static void cpdma_desc_free(struct cpdma_desc_pool *pool,
301 			    struct cpdma_desc __iomem *desc, int num_desc)
302 {
303 	gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
304 }
305 
306 static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
307 {
308 	struct cpdma_control_info *info = &controls[control];
309 	u32 val;
310 
311 	if (!ctlr->params.has_ext_regs)
312 		return -ENOTSUPP;
313 
314 	if (ctlr->state != CPDMA_STATE_ACTIVE)
315 		return -EINVAL;
316 
317 	if (control < 0 || control >= ARRAY_SIZE(controls))
318 		return -ENOENT;
319 
320 	if ((info->access & ACCESS_WO) != ACCESS_WO)
321 		return -EPERM;
322 
323 	val  = dma_reg_read(ctlr, info->reg);
324 	val &= ~(info->mask << info->shift);
325 	val |= (value & info->mask) << info->shift;
326 	dma_reg_write(ctlr, info->reg, val);
327 
328 	return 0;
329 }
330 
331 static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
332 {
333 	struct cpdma_control_info *info = &controls[control];
334 	int ret;
335 
336 	if (!ctlr->params.has_ext_regs)
337 		return -ENOTSUPP;
338 
339 	if (ctlr->state != CPDMA_STATE_ACTIVE)
340 		return -EINVAL;
341 
342 	if (control < 0 || control >= ARRAY_SIZE(controls))
343 		return -ENOENT;
344 
345 	if ((info->access & ACCESS_RO) != ACCESS_RO)
346 		return -EPERM;
347 
348 	ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
349 	return ret;
350 }
351 
352 /* cpdma_chan_set_chan_shaper - set shaper for a channel
353  * Has to be called under ctlr lock
354  */
355 static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan)
356 {
357 	struct cpdma_ctlr *ctlr = chan->ctlr;
358 	u32 rate_reg;
359 	u32 rmask;
360 	int ret;
361 
362 	if (!chan->rate)
363 		return 0;
364 
365 	rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num;
366 	dma_reg_write(ctlr, rate_reg, chan->rate_factor);
367 
368 	rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
369 	rmask |= chan->mask;
370 
371 	ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
372 	return ret;
373 }
374 
375 static int cpdma_chan_on(struct cpdma_chan *chan)
376 {
377 	struct cpdma_ctlr *ctlr = chan->ctlr;
378 	struct cpdma_desc_pool	*pool = ctlr->pool;
379 	unsigned long flags;
380 
381 	spin_lock_irqsave(&chan->lock, flags);
382 	if (chan->state != CPDMA_STATE_IDLE) {
383 		spin_unlock_irqrestore(&chan->lock, flags);
384 		return -EBUSY;
385 	}
386 	if (ctlr->state != CPDMA_STATE_ACTIVE) {
387 		spin_unlock_irqrestore(&chan->lock, flags);
388 		return -EINVAL;
389 	}
390 	dma_reg_write(ctlr, chan->int_set, chan->mask);
391 	chan->state = CPDMA_STATE_ACTIVE;
392 	if (chan->head) {
393 		chan_write(chan, hdp, desc_phys(pool, chan->head));
394 		if (chan->rxfree)
395 			chan_write(chan, rxfree, chan->count);
396 	}
397 
398 	spin_unlock_irqrestore(&chan->lock, flags);
399 	return 0;
400 }
401 
402 /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
403  * rmask - mask of rate limited channels
404  * Returns min rate in Kb/s
405  */
406 static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
407 			       u32 *rmask, int *prio_mode)
408 {
409 	struct cpdma_ctlr *ctlr = ch->ctlr;
410 	struct cpdma_chan *chan;
411 	u32 old_rate = ch->rate;
412 	u32 new_rmask = 0;
413 	int rlim = 0;
414 	int i;
415 
416 	for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
417 		chan = ctlr->channels[i];
418 		if (!chan)
419 			continue;
420 
421 		if (chan == ch)
422 			chan->rate = rate;
423 
424 		if (chan->rate) {
425 			rlim = 1;
426 			new_rmask |= chan->mask;
427 			continue;
428 		}
429 
430 		if (rlim)
431 			goto err;
432 	}
433 
434 	*rmask = new_rmask;
435 	*prio_mode = rlim;
436 	return 0;
437 
438 err:
439 	ch->rate = old_rate;
440 	dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
441 		chan->chan_num);
442 	return -EINVAL;
443 }
444 
445 static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
446 				  struct cpdma_chan *ch)
447 {
448 	u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX;
449 	u32 best_send_cnt = 0, best_idle_cnt = 0;
450 	u32 new_rate, best_rate = 0, rate_reg;
451 	u64 send_cnt, idle_cnt;
452 	u32 min_send_cnt, freq;
453 	u64 divident, divisor;
454 
455 	if (!ch->rate) {
456 		ch->rate_factor = 0;
457 		goto set_factor;
458 	}
459 
460 	freq = ctlr->params.bus_freq_mhz * 1000 * 32;
461 	if (!freq) {
462 		dev_err(ctlr->dev, "The bus frequency is not set\n");
463 		return -EINVAL;
464 	}
465 
466 	min_send_cnt = freq - ch->rate;
467 	send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate);
468 	while (send_cnt <= CPDMA_MAX_RLIM_CNT) {
469 		divident = ch->rate * send_cnt;
470 		divisor = min_send_cnt;
471 		idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor);
472 
473 		divident = freq * idle_cnt;
474 		divisor = idle_cnt + send_cnt;
475 		new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor);
476 
477 		delta = new_rate >= ch->rate ? new_rate - ch->rate : delta;
478 		if (delta < best_delta) {
479 			best_delta = delta;
480 			best_send_cnt = send_cnt;
481 			best_idle_cnt = idle_cnt;
482 			best_rate = new_rate;
483 
484 			if (!delta)
485 				break;
486 		}
487 
488 		if (prev_delta >= delta) {
489 			prev_delta = delta;
490 			send_cnt++;
491 			continue;
492 		}
493 
494 		idle_cnt++;
495 		divident = freq * idle_cnt;
496 		send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate);
497 		send_cnt -= idle_cnt;
498 		prev_delta = UINT_MAX;
499 	}
500 
501 	ch->rate = best_rate;
502 	ch->rate_factor = best_send_cnt | (best_idle_cnt << 16);
503 
504 set_factor:
505 	rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num;
506 	dma_reg_write(ctlr, rate_reg, ch->rate_factor);
507 	return 0;
508 }
509 
510 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
511 {
512 	struct cpdma_ctlr *ctlr;
513 
514 	ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
515 	if (!ctlr)
516 		return NULL;
517 
518 	ctlr->state = CPDMA_STATE_IDLE;
519 	ctlr->params = *params;
520 	ctlr->dev = params->dev;
521 	ctlr->chan_num = 0;
522 	spin_lock_init(&ctlr->lock);
523 
524 	if (cpdma_desc_pool_create(ctlr))
525 		return NULL;
526 	/* split pool equally between RX/TX by default */
527 	ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
528 	ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
529 
530 	if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
531 		ctlr->num_chan = CPDMA_MAX_CHANNELS;
532 	return ctlr;
533 }
534 
535 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
536 {
537 	struct cpdma_chan *chan;
538 	unsigned long flags;
539 	int i, prio_mode;
540 
541 	spin_lock_irqsave(&ctlr->lock, flags);
542 	if (ctlr->state != CPDMA_STATE_IDLE) {
543 		spin_unlock_irqrestore(&ctlr->lock, flags);
544 		return -EBUSY;
545 	}
546 
547 	if (ctlr->params.has_soft_reset) {
548 		unsigned timeout = 10 * 100;
549 
550 		dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
551 		while (timeout) {
552 			if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
553 				break;
554 			udelay(10);
555 			timeout--;
556 		}
557 		WARN_ON(!timeout);
558 	}
559 
560 	for (i = 0; i < ctlr->num_chan; i++) {
561 		writel(0, ctlr->params.txhdp + 4 * i);
562 		writel(0, ctlr->params.rxhdp + 4 * i);
563 		writel(0, ctlr->params.txcp + 4 * i);
564 		writel(0, ctlr->params.rxcp + 4 * i);
565 	}
566 
567 	dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
568 	dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
569 
570 	dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
571 	dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
572 
573 	ctlr->state = CPDMA_STATE_ACTIVE;
574 
575 	prio_mode = 0;
576 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
577 		chan = ctlr->channels[i];
578 		if (chan) {
579 			cpdma_chan_set_chan_shaper(chan);
580 			cpdma_chan_on(chan);
581 
582 			/* off prio mode if all tx channels are rate limited */
583 			if (is_tx_chan(chan) && !chan->rate)
584 				prio_mode = 1;
585 		}
586 	}
587 
588 	_cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
589 	_cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
590 
591 	spin_unlock_irqrestore(&ctlr->lock, flags);
592 	return 0;
593 }
594 
595 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
596 {
597 	unsigned long flags;
598 	int i;
599 
600 	spin_lock_irqsave(&ctlr->lock, flags);
601 	if (ctlr->state != CPDMA_STATE_ACTIVE) {
602 		spin_unlock_irqrestore(&ctlr->lock, flags);
603 		return -EINVAL;
604 	}
605 
606 	ctlr->state = CPDMA_STATE_TEARDOWN;
607 	spin_unlock_irqrestore(&ctlr->lock, flags);
608 
609 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
610 		if (ctlr->channels[i])
611 			cpdma_chan_stop(ctlr->channels[i]);
612 	}
613 
614 	spin_lock_irqsave(&ctlr->lock, flags);
615 	dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
616 	dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
617 
618 	dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
619 	dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
620 
621 	ctlr->state = CPDMA_STATE_IDLE;
622 
623 	spin_unlock_irqrestore(&ctlr->lock, flags);
624 	return 0;
625 }
626 
627 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
628 {
629 	int ret = 0, i;
630 
631 	if (!ctlr)
632 		return -EINVAL;
633 
634 	if (ctlr->state != CPDMA_STATE_IDLE)
635 		cpdma_ctlr_stop(ctlr);
636 
637 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
638 		cpdma_chan_destroy(ctlr->channels[i]);
639 
640 	cpdma_desc_pool_destroy(ctlr);
641 	return ret;
642 }
643 
644 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
645 {
646 	unsigned long flags;
647 	int i;
648 
649 	spin_lock_irqsave(&ctlr->lock, flags);
650 	if (ctlr->state != CPDMA_STATE_ACTIVE) {
651 		spin_unlock_irqrestore(&ctlr->lock, flags);
652 		return -EINVAL;
653 	}
654 
655 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
656 		if (ctlr->channels[i])
657 			cpdma_chan_int_ctrl(ctlr->channels[i], enable);
658 	}
659 
660 	spin_unlock_irqrestore(&ctlr->lock, flags);
661 	return 0;
662 }
663 
664 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
665 {
666 	dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
667 }
668 
669 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
670 {
671 	return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
672 }
673 
674 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
675 {
676 	return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
677 }
678 
679 static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
680 				 int rx, int desc_num,
681 				 int per_ch_desc)
682 {
683 	struct cpdma_chan *chan, *most_chan = NULL;
684 	int desc_cnt = desc_num;
685 	int most_dnum = 0;
686 	int min, max, i;
687 
688 	if (!desc_num)
689 		return;
690 
691 	if (rx) {
692 		min = rx_chan_num(0);
693 		max = rx_chan_num(CPDMA_MAX_CHANNELS);
694 	} else {
695 		min = tx_chan_num(0);
696 		max = tx_chan_num(CPDMA_MAX_CHANNELS);
697 	}
698 
699 	for (i = min; i < max; i++) {
700 		chan = ctlr->channels[i];
701 		if (!chan)
702 			continue;
703 
704 		if (chan->weight)
705 			chan->desc_num = (chan->weight * desc_num) / 100;
706 		else
707 			chan->desc_num = per_ch_desc;
708 
709 		desc_cnt -= chan->desc_num;
710 
711 		if (most_dnum < chan->desc_num) {
712 			most_dnum = chan->desc_num;
713 			most_chan = chan;
714 		}
715 	}
716 	/* use remains */
717 	if (most_chan)
718 		most_chan->desc_num += desc_cnt;
719 }
720 
721 /**
722  * cpdma_chan_split_pool - Splits ctrl pool between all channels.
723  * Has to be called under ctlr lock
724  */
725 static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
726 {
727 	int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
728 	int free_rx_num = 0, free_tx_num = 0;
729 	int rx_weight = 0, tx_weight = 0;
730 	int tx_desc_num, rx_desc_num;
731 	struct cpdma_chan *chan;
732 	int i;
733 
734 	if (!ctlr->chan_num)
735 		return 0;
736 
737 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
738 		chan = ctlr->channels[i];
739 		if (!chan)
740 			continue;
741 
742 		if (is_rx_chan(chan)) {
743 			if (!chan->weight)
744 				free_rx_num++;
745 			rx_weight += chan->weight;
746 		} else {
747 			if (!chan->weight)
748 				free_tx_num++;
749 			tx_weight += chan->weight;
750 		}
751 	}
752 
753 	if (rx_weight > 100 || tx_weight > 100)
754 		return -EINVAL;
755 
756 	tx_desc_num = ctlr->num_tx_desc;
757 	rx_desc_num = ctlr->num_rx_desc;
758 
759 	if (free_tx_num) {
760 		tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
761 		tx_per_ch_desc /= free_tx_num;
762 	}
763 	if (free_rx_num) {
764 		rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
765 		rx_per_ch_desc /= free_rx_num;
766 	}
767 
768 	cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
769 	cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
770 
771 	return 0;
772 }
773 
774 
775 /* cpdma_chan_set_weight - set weight of a channel in percentage.
776  * Tx and Rx channels have separate weights. That is 100% for RX
777  * and 100% for Tx. The weight is used to split cpdma resources
778  * in correct proportion required by the channels, including number
779  * of descriptors. The channel rate is not enough to know the
780  * weight of a channel as the maximum rate of an interface is needed.
781  * If weight = 0, then channel uses rest of descriptors leaved by
782  * weighted channels.
783  */
784 int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
785 {
786 	struct cpdma_ctlr *ctlr = ch->ctlr;
787 	unsigned long flags, ch_flags;
788 	int ret;
789 
790 	spin_lock_irqsave(&ctlr->lock, flags);
791 	spin_lock_irqsave(&ch->lock, ch_flags);
792 	if (ch->weight == weight) {
793 		spin_unlock_irqrestore(&ch->lock, ch_flags);
794 		spin_unlock_irqrestore(&ctlr->lock, flags);
795 		return 0;
796 	}
797 	ch->weight = weight;
798 	spin_unlock_irqrestore(&ch->lock, ch_flags);
799 
800 	/* re-split pool using new channel weight */
801 	ret = cpdma_chan_split_pool(ctlr);
802 	spin_unlock_irqrestore(&ctlr->lock, flags);
803 	return ret;
804 }
805 
806 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel
807  * Should be called before cpdma_chan_set_rate.
808  * Returns min rate in Kb/s
809  */
810 u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
811 {
812 	unsigned int divident, divisor;
813 
814 	divident = ctlr->params.bus_freq_mhz * 32 * 1000;
815 	divisor = 1 + CPDMA_MAX_RLIM_CNT;
816 
817 	return DIV_ROUND_UP(divident, divisor);
818 }
819 
820 /* cpdma_chan_set_rate - limits bandwidth for transmit channel.
821  * The bandwidth * limited channels have to be in order beginning from lowest.
822  * ch - transmit channel the bandwidth is configured for
823  * rate - bandwidth in Kb/s, if 0 - then off shaper
824  */
825 int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
826 {
827 	unsigned long flags, ch_flags;
828 	struct cpdma_ctlr *ctlr;
829 	int ret, prio_mode;
830 	u32 rmask;
831 
832 	if (!ch || !is_tx_chan(ch))
833 		return -EINVAL;
834 
835 	if (ch->rate == rate)
836 		return rate;
837 
838 	ctlr = ch->ctlr;
839 	spin_lock_irqsave(&ctlr->lock, flags);
840 	spin_lock_irqsave(&ch->lock, ch_flags);
841 
842 	ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode);
843 	if (ret)
844 		goto err;
845 
846 	ret = cpdma_chan_set_factors(ctlr, ch);
847 	if (ret)
848 		goto err;
849 
850 	spin_unlock_irqrestore(&ch->lock, ch_flags);
851 
852 	/* on shapers */
853 	_cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
854 	_cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
855 	spin_unlock_irqrestore(&ctlr->lock, flags);
856 	return ret;
857 
858 err:
859 	spin_unlock_irqrestore(&ch->lock, ch_flags);
860 	spin_unlock_irqrestore(&ctlr->lock, flags);
861 	return ret;
862 }
863 
864 u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
865 {
866 	unsigned long flags;
867 	u32 rate;
868 
869 	spin_lock_irqsave(&ch->lock, flags);
870 	rate = ch->rate;
871 	spin_unlock_irqrestore(&ch->lock, flags);
872 
873 	return rate;
874 }
875 
876 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
877 				     cpdma_handler_fn handler, int rx_type)
878 {
879 	int offset = chan_num * 4;
880 	struct cpdma_chan *chan;
881 	unsigned long flags;
882 
883 	chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
884 
885 	if (__chan_linear(chan_num) >= ctlr->num_chan)
886 		return ERR_PTR(-EINVAL);
887 
888 	chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
889 	if (!chan)
890 		return ERR_PTR(-ENOMEM);
891 
892 	spin_lock_irqsave(&ctlr->lock, flags);
893 	if (ctlr->channels[chan_num]) {
894 		spin_unlock_irqrestore(&ctlr->lock, flags);
895 		devm_kfree(ctlr->dev, chan);
896 		return ERR_PTR(-EBUSY);
897 	}
898 
899 	chan->ctlr	= ctlr;
900 	chan->state	= CPDMA_STATE_IDLE;
901 	chan->chan_num	= chan_num;
902 	chan->handler	= handler;
903 	chan->rate	= 0;
904 	chan->weight	= 0;
905 
906 	if (is_rx_chan(chan)) {
907 		chan->hdp	= ctlr->params.rxhdp + offset;
908 		chan->cp	= ctlr->params.rxcp + offset;
909 		chan->rxfree	= ctlr->params.rxfree + offset;
910 		chan->int_set	= CPDMA_RXINTMASKSET;
911 		chan->int_clear	= CPDMA_RXINTMASKCLEAR;
912 		chan->td	= CPDMA_RXTEARDOWN;
913 		chan->dir	= DMA_FROM_DEVICE;
914 	} else {
915 		chan->hdp	= ctlr->params.txhdp + offset;
916 		chan->cp	= ctlr->params.txcp + offset;
917 		chan->int_set	= CPDMA_TXINTMASKSET;
918 		chan->int_clear	= CPDMA_TXINTMASKCLEAR;
919 		chan->td	= CPDMA_TXTEARDOWN;
920 		chan->dir	= DMA_TO_DEVICE;
921 	}
922 	chan->mask = BIT(chan_linear(chan));
923 
924 	spin_lock_init(&chan->lock);
925 
926 	ctlr->channels[chan_num] = chan;
927 	ctlr->chan_num++;
928 
929 	cpdma_chan_split_pool(ctlr);
930 
931 	spin_unlock_irqrestore(&ctlr->lock, flags);
932 	return chan;
933 }
934 
935 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
936 {
937 	unsigned long flags;
938 	int desc_num;
939 
940 	spin_lock_irqsave(&chan->lock, flags);
941 	desc_num = chan->desc_num;
942 	spin_unlock_irqrestore(&chan->lock, flags);
943 
944 	return desc_num;
945 }
946 
947 int cpdma_chan_destroy(struct cpdma_chan *chan)
948 {
949 	struct cpdma_ctlr *ctlr;
950 	unsigned long flags;
951 
952 	if (!chan)
953 		return -EINVAL;
954 	ctlr = chan->ctlr;
955 
956 	spin_lock_irqsave(&ctlr->lock, flags);
957 	if (chan->state != CPDMA_STATE_IDLE)
958 		cpdma_chan_stop(chan);
959 	ctlr->channels[chan->chan_num] = NULL;
960 	ctlr->chan_num--;
961 	devm_kfree(ctlr->dev, chan);
962 	cpdma_chan_split_pool(ctlr);
963 
964 	spin_unlock_irqrestore(&ctlr->lock, flags);
965 	return 0;
966 }
967 
968 int cpdma_chan_get_stats(struct cpdma_chan *chan,
969 			 struct cpdma_chan_stats *stats)
970 {
971 	unsigned long flags;
972 	if (!chan)
973 		return -EINVAL;
974 	spin_lock_irqsave(&chan->lock, flags);
975 	memcpy(stats, &chan->stats, sizeof(*stats));
976 	spin_unlock_irqrestore(&chan->lock, flags);
977 	return 0;
978 }
979 
980 static void __cpdma_chan_submit(struct cpdma_chan *chan,
981 				struct cpdma_desc __iomem *desc)
982 {
983 	struct cpdma_ctlr		*ctlr = chan->ctlr;
984 	struct cpdma_desc __iomem	*prev = chan->tail;
985 	struct cpdma_desc_pool		*pool = ctlr->pool;
986 	dma_addr_t			desc_dma;
987 	u32				mode;
988 
989 	desc_dma = desc_phys(pool, desc);
990 
991 	/* simple case - idle channel */
992 	if (!chan->head) {
993 		chan->stats.head_enqueue++;
994 		chan->head = desc;
995 		chan->tail = desc;
996 		if (chan->state == CPDMA_STATE_ACTIVE)
997 			chan_write(chan, hdp, desc_dma);
998 		return;
999 	}
1000 
1001 	/* first chain the descriptor at the tail of the list */
1002 	desc_write(prev, hw_next, desc_dma);
1003 	chan->tail = desc;
1004 	chan->stats.tail_enqueue++;
1005 
1006 	/* next check if EOQ has been triggered already */
1007 	mode = desc_read(prev, hw_mode);
1008 	if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
1009 	    (chan->state == CPDMA_STATE_ACTIVE)) {
1010 		desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
1011 		chan_write(chan, hdp, desc_dma);
1012 		chan->stats.misqueued++;
1013 	}
1014 }
1015 
1016 static int cpdma_chan_submit_si(struct submit_info *si)
1017 {
1018 	struct cpdma_chan		*chan = si->chan;
1019 	struct cpdma_ctlr		*ctlr = chan->ctlr;
1020 	int				len = si->len;
1021 	int				swlen = len;
1022 	struct cpdma_desc __iomem	*desc;
1023 	dma_addr_t			buffer;
1024 	u32				mode;
1025 	int				ret;
1026 
1027 	if (chan->count >= chan->desc_num)	{
1028 		chan->stats.desc_alloc_fail++;
1029 		return -ENOMEM;
1030 	}
1031 
1032 	desc = cpdma_desc_alloc(ctlr->pool);
1033 	if (!desc) {
1034 		chan->stats.desc_alloc_fail++;
1035 		return -ENOMEM;
1036 	}
1037 
1038 	if (len < ctlr->params.min_packet_size) {
1039 		len = ctlr->params.min_packet_size;
1040 		chan->stats.runt_transmit_buff++;
1041 	}
1042 
1043 	mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
1044 	cpdma_desc_to_port(chan, mode, si->directed);
1045 
1046 	if (si->data_dma) {
1047 		buffer = si->data_dma;
1048 		dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
1049 		swlen |= CPDMA_DMA_EXT_MAP;
1050 	} else {
1051 		buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
1052 		ret = dma_mapping_error(ctlr->dev, buffer);
1053 		if (ret) {
1054 			cpdma_desc_free(ctlr->pool, desc, 1);
1055 			return -EINVAL;
1056 		}
1057 	}
1058 
1059 	/* Relaxed IO accessors can be used here as there is read barrier
1060 	 * at the end of write sequence.
1061 	 */
1062 	writel_relaxed(0, &desc->hw_next);
1063 	writel_relaxed(buffer, &desc->hw_buffer);
1064 	writel_relaxed(len, &desc->hw_len);
1065 	writel_relaxed(mode | len, &desc->hw_mode);
1066 	writel_relaxed((uintptr_t)si->token, &desc->sw_token);
1067 	writel_relaxed(buffer, &desc->sw_buffer);
1068 	writel_relaxed(swlen, &desc->sw_len);
1069 	desc_read(desc, sw_len);
1070 
1071 	__cpdma_chan_submit(chan, desc);
1072 
1073 	if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
1074 		chan_write(chan, rxfree, 1);
1075 
1076 	chan->count++;
1077 	return 0;
1078 }
1079 
1080 int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
1081 			   int len, int directed)
1082 {
1083 	struct submit_info si;
1084 	unsigned long flags;
1085 	int ret;
1086 
1087 	si.chan = chan;
1088 	si.token = token;
1089 	si.data_virt = data;
1090 	si.data_dma = 0;
1091 	si.len = len;
1092 	si.directed = directed;
1093 
1094 	spin_lock_irqsave(&chan->lock, flags);
1095 	if (chan->state == CPDMA_STATE_TEARDOWN) {
1096 		spin_unlock_irqrestore(&chan->lock, flags);
1097 		return -EINVAL;
1098 	}
1099 
1100 	ret = cpdma_chan_submit_si(&si);
1101 	spin_unlock_irqrestore(&chan->lock, flags);
1102 	return ret;
1103 }
1104 
1105 int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
1106 				  dma_addr_t data, int len, int directed)
1107 {
1108 	struct submit_info si;
1109 	unsigned long flags;
1110 	int ret;
1111 
1112 	si.chan = chan;
1113 	si.token = token;
1114 	si.data_virt = NULL;
1115 	si.data_dma = data;
1116 	si.len = len;
1117 	si.directed = directed;
1118 
1119 	spin_lock_irqsave(&chan->lock, flags);
1120 	if (chan->state == CPDMA_STATE_TEARDOWN) {
1121 		spin_unlock_irqrestore(&chan->lock, flags);
1122 		return -EINVAL;
1123 	}
1124 
1125 	ret = cpdma_chan_submit_si(&si);
1126 	spin_unlock_irqrestore(&chan->lock, flags);
1127 	return ret;
1128 }
1129 
1130 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
1131 		      int len, int directed)
1132 {
1133 	struct submit_info si;
1134 	unsigned long flags;
1135 	int ret;
1136 
1137 	si.chan = chan;
1138 	si.token = token;
1139 	si.data_virt = data;
1140 	si.data_dma = 0;
1141 	si.len = len;
1142 	si.directed = directed;
1143 
1144 	spin_lock_irqsave(&chan->lock, flags);
1145 	if (chan->state != CPDMA_STATE_ACTIVE) {
1146 		spin_unlock_irqrestore(&chan->lock, flags);
1147 		return -EINVAL;
1148 	}
1149 
1150 	ret = cpdma_chan_submit_si(&si);
1151 	spin_unlock_irqrestore(&chan->lock, flags);
1152 	return ret;
1153 }
1154 
1155 int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
1156 			     dma_addr_t data, int len, int directed)
1157 {
1158 	struct submit_info si;
1159 	unsigned long flags;
1160 	int ret;
1161 
1162 	si.chan = chan;
1163 	si.token = token;
1164 	si.data_virt = NULL;
1165 	si.data_dma = data;
1166 	si.len = len;
1167 	si.directed = directed;
1168 
1169 	spin_lock_irqsave(&chan->lock, flags);
1170 	if (chan->state != CPDMA_STATE_ACTIVE) {
1171 		spin_unlock_irqrestore(&chan->lock, flags);
1172 		return -EINVAL;
1173 	}
1174 
1175 	ret = cpdma_chan_submit_si(&si);
1176 	spin_unlock_irqrestore(&chan->lock, flags);
1177 	return ret;
1178 }
1179 
1180 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
1181 {
1182 	struct cpdma_ctlr	*ctlr = chan->ctlr;
1183 	struct cpdma_desc_pool	*pool = ctlr->pool;
1184 	bool			free_tx_desc;
1185 	unsigned long		flags;
1186 
1187 	spin_lock_irqsave(&chan->lock, flags);
1188 	free_tx_desc = (chan->count < chan->desc_num) &&
1189 			 gen_pool_avail(pool->gen_pool);
1190 	spin_unlock_irqrestore(&chan->lock, flags);
1191 	return free_tx_desc;
1192 }
1193 
1194 static void __cpdma_chan_free(struct cpdma_chan *chan,
1195 			      struct cpdma_desc __iomem *desc,
1196 			      int outlen, int status)
1197 {
1198 	struct cpdma_ctlr		*ctlr = chan->ctlr;
1199 	struct cpdma_desc_pool		*pool = ctlr->pool;
1200 	dma_addr_t			buff_dma;
1201 	int				origlen;
1202 	uintptr_t			token;
1203 
1204 	token      = desc_read(desc, sw_token);
1205 	origlen    = desc_read(desc, sw_len);
1206 
1207 	buff_dma   = desc_read(desc, sw_buffer);
1208 	if (origlen & CPDMA_DMA_EXT_MAP) {
1209 		origlen &= ~CPDMA_DMA_EXT_MAP;
1210 		dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen,
1211 					chan->dir);
1212 	} else {
1213 		dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
1214 	}
1215 
1216 	cpdma_desc_free(pool, desc, 1);
1217 	(*chan->handler)((void *)token, outlen, status);
1218 }
1219 
1220 static int __cpdma_chan_process(struct cpdma_chan *chan)
1221 {
1222 	struct cpdma_ctlr		*ctlr = chan->ctlr;
1223 	struct cpdma_desc __iomem	*desc;
1224 	int				status, outlen;
1225 	int				cb_status = 0;
1226 	struct cpdma_desc_pool		*pool = ctlr->pool;
1227 	dma_addr_t			desc_dma;
1228 	unsigned long			flags;
1229 
1230 	spin_lock_irqsave(&chan->lock, flags);
1231 
1232 	desc = chan->head;
1233 	if (!desc) {
1234 		chan->stats.empty_dequeue++;
1235 		status = -ENOENT;
1236 		goto unlock_ret;
1237 	}
1238 	desc_dma = desc_phys(pool, desc);
1239 
1240 	status	= desc_read(desc, hw_mode);
1241 	outlen	= status & 0x7ff;
1242 	if (status & CPDMA_DESC_OWNER) {
1243 		chan->stats.busy_dequeue++;
1244 		status = -EBUSY;
1245 		goto unlock_ret;
1246 	}
1247 
1248 	if (status & CPDMA_DESC_PASS_CRC)
1249 		outlen -= CPDMA_DESC_CRC_LEN;
1250 
1251 	status	= status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
1252 			    CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP);
1253 
1254 	chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
1255 	chan_write(chan, cp, desc_dma);
1256 	chan->count--;
1257 	chan->stats.good_dequeue++;
1258 
1259 	if ((status & CPDMA_DESC_EOQ) && chan->head) {
1260 		chan->stats.requeue++;
1261 		chan_write(chan, hdp, desc_phys(pool, chan->head));
1262 	}
1263 
1264 	spin_unlock_irqrestore(&chan->lock, flags);
1265 	if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
1266 		cb_status = -ENOSYS;
1267 	else
1268 		cb_status = status;
1269 
1270 	__cpdma_chan_free(chan, desc, outlen, cb_status);
1271 	return status;
1272 
1273 unlock_ret:
1274 	spin_unlock_irqrestore(&chan->lock, flags);
1275 	return status;
1276 }
1277 
1278 int cpdma_chan_process(struct cpdma_chan *chan, int quota)
1279 {
1280 	int used = 0, ret = 0;
1281 
1282 	if (chan->state != CPDMA_STATE_ACTIVE)
1283 		return -EINVAL;
1284 
1285 	while (used < quota) {
1286 		ret = __cpdma_chan_process(chan);
1287 		if (ret < 0)
1288 			break;
1289 		used++;
1290 	}
1291 	return used;
1292 }
1293 
1294 int cpdma_chan_start(struct cpdma_chan *chan)
1295 {
1296 	struct cpdma_ctlr *ctlr = chan->ctlr;
1297 	unsigned long flags;
1298 	int ret;
1299 
1300 	spin_lock_irqsave(&ctlr->lock, flags);
1301 	ret = cpdma_chan_set_chan_shaper(chan);
1302 	spin_unlock_irqrestore(&ctlr->lock, flags);
1303 	if (ret)
1304 		return ret;
1305 
1306 	ret = cpdma_chan_on(chan);
1307 	if (ret)
1308 		return ret;
1309 
1310 	return 0;
1311 }
1312 
1313 int cpdma_chan_stop(struct cpdma_chan *chan)
1314 {
1315 	struct cpdma_ctlr	*ctlr = chan->ctlr;
1316 	struct cpdma_desc_pool	*pool = ctlr->pool;
1317 	unsigned long		flags;
1318 	int			ret;
1319 	unsigned		timeout;
1320 
1321 	spin_lock_irqsave(&chan->lock, flags);
1322 	if (chan->state == CPDMA_STATE_TEARDOWN) {
1323 		spin_unlock_irqrestore(&chan->lock, flags);
1324 		return -EINVAL;
1325 	}
1326 
1327 	chan->state = CPDMA_STATE_TEARDOWN;
1328 	dma_reg_write(ctlr, chan->int_clear, chan->mask);
1329 
1330 	/* trigger teardown */
1331 	dma_reg_write(ctlr, chan->td, chan_linear(chan));
1332 
1333 	/* wait for teardown complete */
1334 	timeout = 100 * 100; /* 100 ms */
1335 	while (timeout) {
1336 		u32 cp = chan_read(chan, cp);
1337 		if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
1338 			break;
1339 		udelay(10);
1340 		timeout--;
1341 	}
1342 	WARN_ON(!timeout);
1343 	chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
1344 
1345 	/* handle completed packets */
1346 	spin_unlock_irqrestore(&chan->lock, flags);
1347 	do {
1348 		ret = __cpdma_chan_process(chan);
1349 		if (ret < 0)
1350 			break;
1351 	} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
1352 	spin_lock_irqsave(&chan->lock, flags);
1353 
1354 	/* remaining packets haven't been tx/rx'ed, clean them up */
1355 	while (chan->head) {
1356 		struct cpdma_desc __iomem *desc = chan->head;
1357 		dma_addr_t next_dma;
1358 
1359 		next_dma = desc_read(desc, hw_next);
1360 		chan->head = desc_from_phys(pool, next_dma);
1361 		chan->count--;
1362 		chan->stats.teardown_dequeue++;
1363 
1364 		/* issue callback without locks held */
1365 		spin_unlock_irqrestore(&chan->lock, flags);
1366 		__cpdma_chan_free(chan, desc, 0, -ENOSYS);
1367 		spin_lock_irqsave(&chan->lock, flags);
1368 	}
1369 
1370 	chan->state = CPDMA_STATE_IDLE;
1371 	spin_unlock_irqrestore(&chan->lock, flags);
1372 	return 0;
1373 }
1374 
1375 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
1376 {
1377 	unsigned long flags;
1378 
1379 	spin_lock_irqsave(&chan->lock, flags);
1380 	if (chan->state != CPDMA_STATE_ACTIVE) {
1381 		spin_unlock_irqrestore(&chan->lock, flags);
1382 		return -EINVAL;
1383 	}
1384 
1385 	dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
1386 		      chan->mask);
1387 	spin_unlock_irqrestore(&chan->lock, flags);
1388 
1389 	return 0;
1390 }
1391 
1392 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
1393 {
1394 	unsigned long flags;
1395 	int ret;
1396 
1397 	spin_lock_irqsave(&ctlr->lock, flags);
1398 	ret = _cpdma_control_get(ctlr, control);
1399 	spin_unlock_irqrestore(&ctlr->lock, flags);
1400 
1401 	return ret;
1402 }
1403 
1404 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
1405 {
1406 	unsigned long flags;
1407 	int ret;
1408 
1409 	spin_lock_irqsave(&ctlr->lock, flags);
1410 	ret = _cpdma_control_set(ctlr, control, value);
1411 	spin_unlock_irqrestore(&ctlr->lock, flags);
1412 
1413 	return ret;
1414 }
1415 
1416 int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
1417 {
1418 	return ctlr->num_rx_desc;
1419 }
1420 
1421 int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
1422 {
1423 	return ctlr->num_tx_desc;
1424 }
1425 
1426 int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
1427 {
1428 	unsigned long flags;
1429 	int temp, ret;
1430 
1431 	spin_lock_irqsave(&ctlr->lock, flags);
1432 
1433 	temp = ctlr->num_rx_desc;
1434 	ctlr->num_rx_desc = num_rx_desc;
1435 	ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
1436 	ret = cpdma_chan_split_pool(ctlr);
1437 	if (ret) {
1438 		ctlr->num_rx_desc = temp;
1439 		ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
1440 	}
1441 
1442 	spin_unlock_irqrestore(&ctlr->lock, flags);
1443 
1444 	return ret;
1445 }
1446