xref: /openbmc/linux/drivers/soc/ti/knav_qmss_queue.c (revision 9a29ad52)
1 /*
2  * Keystone Queue Manager subsystem driver
3  *
4  * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
5  * Authors:	Sandeep Nair <sandeep_n@ti.com>
6  *		Cyril Chemparathy <cyril@ti.com>
7  *		Santosh Shilimkar <santosh.shilimkar@ti.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * version 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include <linux/debugfs.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/firmware.h>
22 #include <linux/interrupt.h>
23 #include <linux/io.h>
24 #include <linux/module.h>
25 #include <linux/of_address.h>
26 #include <linux/of_device.h>
27 #include <linux/of_irq.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/soc/ti/knav_qmss.h>
31 
32 #include "knav_qmss.h"
33 
34 static struct knav_device *kdev;
35 static DEFINE_MUTEX(knav_dev_lock);
36 
37 /* Queue manager register indices in DTS */
38 #define KNAV_QUEUE_PEEK_REG_INDEX	0
39 #define KNAV_QUEUE_STATUS_REG_INDEX	1
40 #define KNAV_QUEUE_CONFIG_REG_INDEX	2
41 #define KNAV_QUEUE_REGION_REG_INDEX	3
42 #define KNAV_QUEUE_PUSH_REG_INDEX	4
43 #define KNAV_QUEUE_POP_REG_INDEX	5
44 
45 /* Queue manager register indices in DTS for QMSS in K2G NAVSS.
46  * There are no status and vbusm push registers on this version
47  * of QMSS. Push registers are same as pop, So all indices above 1
48  * are to be re-defined
49  */
50 #define KNAV_L_QUEUE_CONFIG_REG_INDEX	1
51 #define KNAV_L_QUEUE_REGION_REG_INDEX	2
52 #define KNAV_L_QUEUE_PUSH_REG_INDEX	3
53 
54 /* PDSP register indices in DTS */
55 #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX	0
56 #define KNAV_QUEUE_PDSP_REGS_REG_INDEX	1
57 #define KNAV_QUEUE_PDSP_INTD_REG_INDEX	2
58 #define KNAV_QUEUE_PDSP_CMD_REG_INDEX	3
59 
60 #define knav_queue_idx_to_inst(kdev, idx)			\
61 	(kdev->instances + (idx << kdev->inst_shift))
62 
63 #define for_each_handle_rcu(qh, inst)			\
64 	list_for_each_entry_rcu(qh, &inst->handles, list)
65 
66 #define for_each_instance(idx, inst, kdev)		\
67 	for (idx = 0, inst = kdev->instances;		\
68 	     idx < (kdev)->num_queues_in_use;			\
69 	     idx++, inst = knav_queue_idx_to_inst(kdev, idx))
70 
71 /* All firmware file names end up here. List the firmware file names below.
72  * Newest followed by older ones. Search is done from start of the array
73  * until a firmware file is found.
74  */
75 const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
76 
77 static bool device_ready;
78 bool knav_qmss_device_ready(void)
79 {
80 	return device_ready;
81 }
82 EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
83 
84 /**
85  * knav_queue_notify: qmss queue notfier call
86  *
87  * @inst:		qmss queue instance like accumulator
88  */
89 void knav_queue_notify(struct knav_queue_inst *inst)
90 {
91 	struct knav_queue *qh;
92 
93 	if (!inst)
94 		return;
95 
96 	rcu_read_lock();
97 	for_each_handle_rcu(qh, inst) {
98 		if (atomic_read(&qh->notifier_enabled) <= 0)
99 			continue;
100 		if (WARN_ON(!qh->notifier_fn))
101 			continue;
102 		this_cpu_inc(qh->stats->notifies);
103 		qh->notifier_fn(qh->notifier_fn_arg);
104 	}
105 	rcu_read_unlock();
106 }
107 EXPORT_SYMBOL_GPL(knav_queue_notify);
108 
109 static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
110 {
111 	struct knav_queue_inst *inst = _instdata;
112 
113 	knav_queue_notify(inst);
114 	return IRQ_HANDLED;
115 }
116 
117 static int knav_queue_setup_irq(struct knav_range_info *range,
118 			  struct knav_queue_inst *inst)
119 {
120 	unsigned queue = inst->id - range->queue_base;
121 	unsigned long cpu_map;
122 	int ret = 0, irq;
123 
124 	if (range->flags & RANGE_HAS_IRQ) {
125 		irq = range->irqs[queue].irq;
126 		cpu_map = range->irqs[queue].cpu_map;
127 		ret = request_irq(irq, knav_queue_int_handler, 0,
128 					inst->irq_name, inst);
129 		if (ret)
130 			return ret;
131 		disable_irq(irq);
132 		if (cpu_map) {
133 			ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
134 			if (ret) {
135 				dev_warn(range->kdev->dev,
136 					 "Failed to set IRQ affinity\n");
137 				return ret;
138 			}
139 		}
140 	}
141 	return ret;
142 }
143 
144 static void knav_queue_free_irq(struct knav_queue_inst *inst)
145 {
146 	struct knav_range_info *range = inst->range;
147 	unsigned queue = inst->id - inst->range->queue_base;
148 	int irq;
149 
150 	if (range->flags & RANGE_HAS_IRQ) {
151 		irq = range->irqs[queue].irq;
152 		irq_set_affinity_hint(irq, NULL);
153 		free_irq(irq, inst);
154 	}
155 }
156 
157 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
158 {
159 	return !list_empty(&inst->handles);
160 }
161 
162 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
163 {
164 	return inst->range->flags & RANGE_RESERVED;
165 }
166 
167 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
168 {
169 	struct knav_queue *tmp;
170 
171 	rcu_read_lock();
172 	for_each_handle_rcu(tmp, inst) {
173 		if (tmp->flags & KNAV_QUEUE_SHARED) {
174 			rcu_read_unlock();
175 			return true;
176 		}
177 	}
178 	rcu_read_unlock();
179 	return false;
180 }
181 
182 static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
183 						unsigned type)
184 {
185 	if ((type == KNAV_QUEUE_QPEND) &&
186 	    (inst->range->flags & RANGE_HAS_IRQ)) {
187 		return true;
188 	} else if ((type == KNAV_QUEUE_ACC) &&
189 		(inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
190 		return true;
191 	} else if ((type == KNAV_QUEUE_GP) &&
192 		!(inst->range->flags &
193 			(RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
194 		return true;
195 	}
196 	return false;
197 }
198 
199 static inline struct knav_queue_inst *
200 knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
201 {
202 	struct knav_queue_inst *inst;
203 	int idx;
204 
205 	for_each_instance(idx, inst, kdev) {
206 		if (inst->id == id)
207 			return inst;
208 	}
209 	return NULL;
210 }
211 
212 static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
213 {
214 	if (kdev->base_id <= id &&
215 	    kdev->base_id + kdev->num_queues > id) {
216 		id -= kdev->base_id;
217 		return knav_queue_match_id_to_inst(kdev, id);
218 	}
219 	return NULL;
220 }
221 
222 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
223 				      const char *name, unsigned flags)
224 {
225 	struct knav_queue *qh;
226 	unsigned id;
227 	int ret = 0;
228 
229 	qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
230 	if (!qh)
231 		return ERR_PTR(-ENOMEM);
232 
233 	qh->stats = alloc_percpu(struct knav_queue_stats);
234 	if (!qh->stats) {
235 		ret = -ENOMEM;
236 		goto err;
237 	}
238 
239 	qh->flags = flags;
240 	qh->inst = inst;
241 	id = inst->id - inst->qmgr->start_queue;
242 	qh->reg_push = &inst->qmgr->reg_push[id];
243 	qh->reg_pop = &inst->qmgr->reg_pop[id];
244 	qh->reg_peek = &inst->qmgr->reg_peek[id];
245 
246 	/* first opener? */
247 	if (!knav_queue_is_busy(inst)) {
248 		struct knav_range_info *range = inst->range;
249 
250 		inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
251 		if (range->ops && range->ops->open_queue)
252 			ret = range->ops->open_queue(range, inst, flags);
253 
254 		if (ret)
255 			goto err;
256 	}
257 	list_add_tail_rcu(&qh->list, &inst->handles);
258 	return qh;
259 
260 err:
261 	if (qh->stats)
262 		free_percpu(qh->stats);
263 	devm_kfree(inst->kdev->dev, qh);
264 	return ERR_PTR(ret);
265 }
266 
267 static struct knav_queue *
268 knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
269 {
270 	struct knav_queue_inst *inst;
271 	struct knav_queue *qh;
272 
273 	mutex_lock(&knav_dev_lock);
274 
275 	qh = ERR_PTR(-ENODEV);
276 	inst = knav_queue_find_by_id(id);
277 	if (!inst)
278 		goto unlock_ret;
279 
280 	qh = ERR_PTR(-EEXIST);
281 	if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
282 		goto unlock_ret;
283 
284 	qh = ERR_PTR(-EBUSY);
285 	if ((flags & KNAV_QUEUE_SHARED) &&
286 	    (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
287 		goto unlock_ret;
288 
289 	qh = __knav_queue_open(inst, name, flags);
290 
291 unlock_ret:
292 	mutex_unlock(&knav_dev_lock);
293 
294 	return qh;
295 }
296 
297 static struct knav_queue *knav_queue_open_by_type(const char *name,
298 						unsigned type, unsigned flags)
299 {
300 	struct knav_queue_inst *inst;
301 	struct knav_queue *qh = ERR_PTR(-EINVAL);
302 	int idx;
303 
304 	mutex_lock(&knav_dev_lock);
305 
306 	for_each_instance(idx, inst, kdev) {
307 		if (knav_queue_is_reserved(inst))
308 			continue;
309 		if (!knav_queue_match_type(inst, type))
310 			continue;
311 		if (knav_queue_is_busy(inst))
312 			continue;
313 		qh = __knav_queue_open(inst, name, flags);
314 		goto unlock_ret;
315 	}
316 
317 unlock_ret:
318 	mutex_unlock(&knav_dev_lock);
319 	return qh;
320 }
321 
322 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
323 {
324 	struct knav_range_info *range = inst->range;
325 
326 	if (range->ops && range->ops->set_notify)
327 		range->ops->set_notify(range, inst, enabled);
328 }
329 
330 static int knav_queue_enable_notifier(struct knav_queue *qh)
331 {
332 	struct knav_queue_inst *inst = qh->inst;
333 	bool first;
334 
335 	if (WARN_ON(!qh->notifier_fn))
336 		return -EINVAL;
337 
338 	/* Adjust the per handle notifier count */
339 	first = (atomic_inc_return(&qh->notifier_enabled) == 1);
340 	if (!first)
341 		return 0; /* nothing to do */
342 
343 	/* Now adjust the per instance notifier count */
344 	first = (atomic_inc_return(&inst->num_notifiers) == 1);
345 	if (first)
346 		knav_queue_set_notify(inst, true);
347 
348 	return 0;
349 }
350 
351 static int knav_queue_disable_notifier(struct knav_queue *qh)
352 {
353 	struct knav_queue_inst *inst = qh->inst;
354 	bool last;
355 
356 	last = (atomic_dec_return(&qh->notifier_enabled) == 0);
357 	if (!last)
358 		return 0; /* nothing to do */
359 
360 	last = (atomic_dec_return(&inst->num_notifiers) == 0);
361 	if (last)
362 		knav_queue_set_notify(inst, false);
363 
364 	return 0;
365 }
366 
367 static int knav_queue_set_notifier(struct knav_queue *qh,
368 				struct knav_queue_notify_config *cfg)
369 {
370 	knav_queue_notify_fn old_fn = qh->notifier_fn;
371 
372 	if (!cfg)
373 		return -EINVAL;
374 
375 	if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
376 		return -ENOTSUPP;
377 
378 	if (!cfg->fn && old_fn)
379 		knav_queue_disable_notifier(qh);
380 
381 	qh->notifier_fn = cfg->fn;
382 	qh->notifier_fn_arg = cfg->fn_arg;
383 
384 	if (cfg->fn && !old_fn)
385 		knav_queue_enable_notifier(qh);
386 
387 	return 0;
388 }
389 
390 static int knav_gp_set_notify(struct knav_range_info *range,
391 			       struct knav_queue_inst *inst,
392 			       bool enabled)
393 {
394 	unsigned queue;
395 
396 	if (range->flags & RANGE_HAS_IRQ) {
397 		queue = inst->id - range->queue_base;
398 		if (enabled)
399 			enable_irq(range->irqs[queue].irq);
400 		else
401 			disable_irq_nosync(range->irqs[queue].irq);
402 	}
403 	return 0;
404 }
405 
406 static int knav_gp_open_queue(struct knav_range_info *range,
407 				struct knav_queue_inst *inst, unsigned flags)
408 {
409 	return knav_queue_setup_irq(range, inst);
410 }
411 
412 static int knav_gp_close_queue(struct knav_range_info *range,
413 				struct knav_queue_inst *inst)
414 {
415 	knav_queue_free_irq(inst);
416 	return 0;
417 }
418 
419 struct knav_range_ops knav_gp_range_ops = {
420 	.set_notify	= knav_gp_set_notify,
421 	.open_queue	= knav_gp_open_queue,
422 	.close_queue	= knav_gp_close_queue,
423 };
424 
425 
426 static int knav_queue_get_count(void *qhandle)
427 {
428 	struct knav_queue *qh = qhandle;
429 	struct knav_queue_inst *inst = qh->inst;
430 
431 	return readl_relaxed(&qh->reg_peek[0].entry_count) +
432 		atomic_read(&inst->desc_count);
433 }
434 
435 static void knav_queue_debug_show_instance(struct seq_file *s,
436 					struct knav_queue_inst *inst)
437 {
438 	struct knav_device *kdev = inst->kdev;
439 	struct knav_queue *qh;
440 	int cpu = 0;
441 	int pushes = 0;
442 	int pops = 0;
443 	int push_errors = 0;
444 	int pop_errors = 0;
445 	int notifies = 0;
446 
447 	if (!knav_queue_is_busy(inst))
448 		return;
449 
450 	seq_printf(s, "\tqueue id %d (%s)\n",
451 		   kdev->base_id + inst->id, inst->name);
452 	for_each_handle_rcu(qh, inst) {
453 		for_each_possible_cpu(cpu) {
454 			pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
455 			pops += per_cpu_ptr(qh->stats, cpu)->pops;
456 			push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
457 			pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
458 			notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
459 		}
460 
461 		seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
462 				qh,
463 				pushes,
464 				pops,
465 				knav_queue_get_count(qh),
466 				notifies,
467 				push_errors,
468 				pop_errors);
469 	}
470 }
471 
472 static int knav_queue_debug_show(struct seq_file *s, void *v)
473 {
474 	struct knav_queue_inst *inst;
475 	int idx;
476 
477 	mutex_lock(&knav_dev_lock);
478 	seq_printf(s, "%s: %u-%u\n",
479 		   dev_name(kdev->dev), kdev->base_id,
480 		   kdev->base_id + kdev->num_queues - 1);
481 	for_each_instance(idx, inst, kdev)
482 		knav_queue_debug_show_instance(s, inst);
483 	mutex_unlock(&knav_dev_lock);
484 
485 	return 0;
486 }
487 
488 static int knav_queue_debug_open(struct inode *inode, struct file *file)
489 {
490 	return single_open(file, knav_queue_debug_show, NULL);
491 }
492 
493 static const struct file_operations knav_queue_debug_ops = {
494 	.open		= knav_queue_debug_open,
495 	.read		= seq_read,
496 	.llseek		= seq_lseek,
497 	.release	= single_release,
498 };
499 
500 static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
501 					u32 flags)
502 {
503 	unsigned long end;
504 	u32 val = 0;
505 
506 	end = jiffies + msecs_to_jiffies(timeout);
507 	while (time_after(end, jiffies)) {
508 		val = readl_relaxed(addr);
509 		if (flags)
510 			val &= flags;
511 		if (!val)
512 			break;
513 		cpu_relax();
514 	}
515 	return val ? -ETIMEDOUT : 0;
516 }
517 
518 
519 static int knav_queue_flush(struct knav_queue *qh)
520 {
521 	struct knav_queue_inst *inst = qh->inst;
522 	unsigned id = inst->id - inst->qmgr->start_queue;
523 
524 	atomic_set(&inst->desc_count, 0);
525 	writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
526 	return 0;
527 }
528 
529 /**
530  * knav_queue_open()	- open a hardware queue
531  * @name		- name to give the queue handle
532  * @id			- desired queue number if any or specifes the type
533  *			  of queue
534  * @flags		- the following flags are applicable to queues:
535  *	KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
536  *			     exclusive by default.
537  *			     Subsequent attempts to open a shared queue should
538  *			     also have this flag.
539  *
540  * Returns a handle to the open hardware queue if successful. Use IS_ERR()
541  * to check the returned value for error codes.
542  */
543 void *knav_queue_open(const char *name, unsigned id,
544 					unsigned flags)
545 {
546 	struct knav_queue *qh = ERR_PTR(-EINVAL);
547 
548 	switch (id) {
549 	case KNAV_QUEUE_QPEND:
550 	case KNAV_QUEUE_ACC:
551 	case KNAV_QUEUE_GP:
552 		qh = knav_queue_open_by_type(name, id, flags);
553 		break;
554 
555 	default:
556 		qh = knav_queue_open_by_id(name, id, flags);
557 		break;
558 	}
559 	return qh;
560 }
561 EXPORT_SYMBOL_GPL(knav_queue_open);
562 
563 /**
564  * knav_queue_close()	- close a hardware queue handle
565  * @qh			- handle to close
566  */
567 void knav_queue_close(void *qhandle)
568 {
569 	struct knav_queue *qh = qhandle;
570 	struct knav_queue_inst *inst = qh->inst;
571 
572 	while (atomic_read(&qh->notifier_enabled) > 0)
573 		knav_queue_disable_notifier(qh);
574 
575 	mutex_lock(&knav_dev_lock);
576 	list_del_rcu(&qh->list);
577 	mutex_unlock(&knav_dev_lock);
578 	synchronize_rcu();
579 	if (!knav_queue_is_busy(inst)) {
580 		struct knav_range_info *range = inst->range;
581 
582 		if (range->ops && range->ops->close_queue)
583 			range->ops->close_queue(range, inst);
584 	}
585 	free_percpu(qh->stats);
586 	devm_kfree(inst->kdev->dev, qh);
587 }
588 EXPORT_SYMBOL_GPL(knav_queue_close);
589 
590 /**
591  * knav_queue_device_control()	- Perform control operations on a queue
592  * @qh				- queue handle
593  * @cmd				- control commands
594  * @arg				- command argument
595  *
596  * Returns 0 on success, errno otherwise.
597  */
598 int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
599 				unsigned long arg)
600 {
601 	struct knav_queue *qh = qhandle;
602 	struct knav_queue_notify_config *cfg;
603 	int ret;
604 
605 	switch ((int)cmd) {
606 	case KNAV_QUEUE_GET_ID:
607 		ret = qh->inst->kdev->base_id + qh->inst->id;
608 		break;
609 
610 	case KNAV_QUEUE_FLUSH:
611 		ret = knav_queue_flush(qh);
612 		break;
613 
614 	case KNAV_QUEUE_SET_NOTIFIER:
615 		cfg = (void *)arg;
616 		ret = knav_queue_set_notifier(qh, cfg);
617 		break;
618 
619 	case KNAV_QUEUE_ENABLE_NOTIFY:
620 		ret = knav_queue_enable_notifier(qh);
621 		break;
622 
623 	case KNAV_QUEUE_DISABLE_NOTIFY:
624 		ret = knav_queue_disable_notifier(qh);
625 		break;
626 
627 	case KNAV_QUEUE_GET_COUNT:
628 		ret = knav_queue_get_count(qh);
629 		break;
630 
631 	default:
632 		ret = -ENOTSUPP;
633 		break;
634 	}
635 	return ret;
636 }
637 EXPORT_SYMBOL_GPL(knav_queue_device_control);
638 
639 
640 
641 /**
642  * knav_queue_push()	- push data (or descriptor) to the tail of a queue
643  * @qh			- hardware queue handle
644  * @data		- data to push
645  * @size		- size of data to push
646  * @flags		- can be used to pass additional information
647  *
648  * Returns 0 on success, errno otherwise.
649  */
650 int knav_queue_push(void *qhandle, dma_addr_t dma,
651 					unsigned size, unsigned flags)
652 {
653 	struct knav_queue *qh = qhandle;
654 	u32 val;
655 
656 	val = (u32)dma | ((size / 16) - 1);
657 	writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
658 
659 	this_cpu_inc(qh->stats->pushes);
660 	return 0;
661 }
662 EXPORT_SYMBOL_GPL(knav_queue_push);
663 
664 /**
665  * knav_queue_pop()	- pop data (or descriptor) from the head of a queue
666  * @qh			- hardware queue handle
667  * @size		- (optional) size of the data pop'ed.
668  *
669  * Returns a DMA address on success, 0 on failure.
670  */
671 dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
672 {
673 	struct knav_queue *qh = qhandle;
674 	struct knav_queue_inst *inst = qh->inst;
675 	dma_addr_t dma;
676 	u32 val, idx;
677 
678 	/* are we accumulated? */
679 	if (inst->descs) {
680 		if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
681 			atomic_inc(&inst->desc_count);
682 			return 0;
683 		}
684 		idx  = atomic_inc_return(&inst->desc_head);
685 		idx &= ACC_DESCS_MASK;
686 		val = inst->descs[idx];
687 	} else {
688 		val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
689 		if (unlikely(!val))
690 			return 0;
691 	}
692 
693 	dma = val & DESC_PTR_MASK;
694 	if (size)
695 		*size = ((val & DESC_SIZE_MASK) + 1) * 16;
696 
697 	this_cpu_inc(qh->stats->pops);
698 	return dma;
699 }
700 EXPORT_SYMBOL_GPL(knav_queue_pop);
701 
702 /* carve out descriptors and push into queue */
703 static void kdesc_fill_pool(struct knav_pool *pool)
704 {
705 	struct knav_region *region;
706 	int i;
707 
708 	region = pool->region;
709 	pool->desc_size = region->desc_size;
710 	for (i = 0; i < pool->num_desc; i++) {
711 		int index = pool->region_offset + i;
712 		dma_addr_t dma_addr;
713 		unsigned dma_size;
714 		dma_addr = region->dma_start + (region->desc_size * index);
715 		dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
716 		dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
717 					   DMA_TO_DEVICE);
718 		knav_queue_push(pool->queue, dma_addr, dma_size, 0);
719 	}
720 }
721 
722 /* pop out descriptors and close the queue */
723 static void kdesc_empty_pool(struct knav_pool *pool)
724 {
725 	dma_addr_t dma;
726 	unsigned size;
727 	void *desc;
728 	int i;
729 
730 	if (!pool->queue)
731 		return;
732 
733 	for (i = 0;; i++) {
734 		dma = knav_queue_pop(pool->queue, &size);
735 		if (!dma)
736 			break;
737 		desc = knav_pool_desc_dma_to_virt(pool, dma);
738 		if (!desc) {
739 			dev_dbg(pool->kdev->dev,
740 				"couldn't unmap desc, continuing\n");
741 			continue;
742 		}
743 	}
744 	WARN_ON(i != pool->num_desc);
745 	knav_queue_close(pool->queue);
746 }
747 
748 
749 /* Get the DMA address of a descriptor */
750 dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
751 {
752 	struct knav_pool *pool = ph;
753 	return pool->region->dma_start + (virt - pool->region->virt_start);
754 }
755 EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
756 
757 void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
758 {
759 	struct knav_pool *pool = ph;
760 	return pool->region->virt_start + (dma - pool->region->dma_start);
761 }
762 EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
763 
764 /**
765  * knav_pool_create()	- Create a pool of descriptors
766  * @name		- name to give the pool handle
767  * @num_desc		- numbers of descriptors in the pool
768  * @region_id		- QMSS region id from which the descriptors are to be
769  *			  allocated.
770  *
771  * Returns a pool handle on success.
772  * Use IS_ERR_OR_NULL() to identify error values on return.
773  */
774 void *knav_pool_create(const char *name,
775 					int num_desc, int region_id)
776 {
777 	struct knav_region *reg_itr, *region = NULL;
778 	struct knav_pool *pool, *pi;
779 	struct list_head *node;
780 	unsigned last_offset;
781 	bool slot_found;
782 	int ret;
783 
784 	if (!kdev)
785 		return ERR_PTR(-EPROBE_DEFER);
786 
787 	if (!kdev->dev)
788 		return ERR_PTR(-ENODEV);
789 
790 	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
791 	if (!pool) {
792 		dev_err(kdev->dev, "out of memory allocating pool\n");
793 		return ERR_PTR(-ENOMEM);
794 	}
795 
796 	for_each_region(kdev, reg_itr) {
797 		if (reg_itr->id != region_id)
798 			continue;
799 		region = reg_itr;
800 		break;
801 	}
802 
803 	if (!region) {
804 		dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
805 		ret = -EINVAL;
806 		goto err;
807 	}
808 
809 	pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
810 	if (IS_ERR_OR_NULL(pool->queue)) {
811 		dev_err(kdev->dev,
812 			"failed to open queue for pool(%s), error %ld\n",
813 			name, PTR_ERR(pool->queue));
814 		ret = PTR_ERR(pool->queue);
815 		goto err;
816 	}
817 
818 	pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
819 	pool->kdev = kdev;
820 	pool->dev = kdev->dev;
821 
822 	mutex_lock(&knav_dev_lock);
823 
824 	if (num_desc > (region->num_desc - region->used_desc)) {
825 		dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
826 			region_id, name);
827 		ret = -ENOMEM;
828 		goto err_unlock;
829 	}
830 
831 	/* Region maintains a sorted (by region offset) list of pools
832 	 * use the first free slot which is large enough to accomodate
833 	 * the request
834 	 */
835 	last_offset = 0;
836 	slot_found = false;
837 	node = &region->pools;
838 	list_for_each_entry(pi, &region->pools, region_inst) {
839 		if ((pi->region_offset - last_offset) >= num_desc) {
840 			slot_found = true;
841 			break;
842 		}
843 		last_offset = pi->region_offset + pi->num_desc;
844 	}
845 	node = &pi->region_inst;
846 
847 	if (slot_found) {
848 		pool->region = region;
849 		pool->num_desc = num_desc;
850 		pool->region_offset = last_offset;
851 		region->used_desc += num_desc;
852 		list_add_tail(&pool->list, &kdev->pools);
853 		list_add_tail(&pool->region_inst, node);
854 	} else {
855 		dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
856 			name, region_id);
857 		ret = -ENOMEM;
858 		goto err_unlock;
859 	}
860 
861 	mutex_unlock(&knav_dev_lock);
862 	kdesc_fill_pool(pool);
863 	return pool;
864 
865 err_unlock:
866 	mutex_unlock(&knav_dev_lock);
867 err:
868 	kfree(pool->name);
869 	devm_kfree(kdev->dev, pool);
870 	return ERR_PTR(ret);
871 }
872 EXPORT_SYMBOL_GPL(knav_pool_create);
873 
874 /**
875  * knav_pool_destroy()	- Free a pool of descriptors
876  * @pool		- pool handle
877  */
878 void knav_pool_destroy(void *ph)
879 {
880 	struct knav_pool *pool = ph;
881 
882 	if (!pool)
883 		return;
884 
885 	if (!pool->region)
886 		return;
887 
888 	kdesc_empty_pool(pool);
889 	mutex_lock(&knav_dev_lock);
890 
891 	pool->region->used_desc -= pool->num_desc;
892 	list_del(&pool->region_inst);
893 	list_del(&pool->list);
894 
895 	mutex_unlock(&knav_dev_lock);
896 	kfree(pool->name);
897 	devm_kfree(kdev->dev, pool);
898 }
899 EXPORT_SYMBOL_GPL(knav_pool_destroy);
900 
901 
902 /**
903  * knav_pool_desc_get()	- Get a descriptor from the pool
904  * @pool			- pool handle
905  *
906  * Returns descriptor from the pool.
907  */
908 void *knav_pool_desc_get(void *ph)
909 {
910 	struct knav_pool *pool = ph;
911 	dma_addr_t dma;
912 	unsigned size;
913 	void *data;
914 
915 	dma = knav_queue_pop(pool->queue, &size);
916 	if (unlikely(!dma))
917 		return ERR_PTR(-ENOMEM);
918 	data = knav_pool_desc_dma_to_virt(pool, dma);
919 	return data;
920 }
921 EXPORT_SYMBOL_GPL(knav_pool_desc_get);
922 
923 /**
924  * knav_pool_desc_put()	- return a descriptor to the pool
925  * @pool			- pool handle
926  */
927 void knav_pool_desc_put(void *ph, void *desc)
928 {
929 	struct knav_pool *pool = ph;
930 	dma_addr_t dma;
931 	dma = knav_pool_desc_virt_to_dma(pool, desc);
932 	knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
933 }
934 EXPORT_SYMBOL_GPL(knav_pool_desc_put);
935 
936 /**
937  * knav_pool_desc_map()	- Map descriptor for DMA transfer
938  * @pool			- pool handle
939  * @desc			- address of descriptor to map
940  * @size			- size of descriptor to map
941  * @dma				- DMA address return pointer
942  * @dma_sz			- adjusted return pointer
943  *
944  * Returns 0 on success, errno otherwise.
945  */
946 int knav_pool_desc_map(void *ph, void *desc, unsigned size,
947 					dma_addr_t *dma, unsigned *dma_sz)
948 {
949 	struct knav_pool *pool = ph;
950 	*dma = knav_pool_desc_virt_to_dma(pool, desc);
951 	size = min(size, pool->region->desc_size);
952 	size = ALIGN(size, SMP_CACHE_BYTES);
953 	*dma_sz = size;
954 	dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
955 
956 	/* Ensure the descriptor reaches to the memory */
957 	__iowmb();
958 
959 	return 0;
960 }
961 EXPORT_SYMBOL_GPL(knav_pool_desc_map);
962 
963 /**
964  * knav_pool_desc_unmap()	- Unmap descriptor after DMA transfer
965  * @pool			- pool handle
966  * @dma				- DMA address of descriptor to unmap
967  * @dma_sz			- size of descriptor to unmap
968  *
969  * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
970  * error values on return.
971  */
972 void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
973 {
974 	struct knav_pool *pool = ph;
975 	unsigned desc_sz;
976 	void *desc;
977 
978 	desc_sz = min(dma_sz, pool->region->desc_size);
979 	desc = knav_pool_desc_dma_to_virt(pool, dma);
980 	dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
981 	prefetch(desc);
982 	return desc;
983 }
984 EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
985 
986 /**
987  * knav_pool_count()	- Get the number of descriptors in pool.
988  * @pool		- pool handle
989  * Returns number of elements in the pool.
990  */
991 int knav_pool_count(void *ph)
992 {
993 	struct knav_pool *pool = ph;
994 	return knav_queue_get_count(pool->queue);
995 }
996 EXPORT_SYMBOL_GPL(knav_pool_count);
997 
998 static void knav_queue_setup_region(struct knav_device *kdev,
999 					struct knav_region *region)
1000 {
1001 	unsigned hw_num_desc, hw_desc_size, size;
1002 	struct knav_reg_region __iomem  *regs;
1003 	struct knav_qmgr_info *qmgr;
1004 	struct knav_pool *pool;
1005 	int id = region->id;
1006 	struct page *page;
1007 
1008 	/* unused region? */
1009 	if (!region->num_desc) {
1010 		dev_warn(kdev->dev, "unused region %s\n", region->name);
1011 		return;
1012 	}
1013 
1014 	/* get hardware descriptor value */
1015 	hw_num_desc = ilog2(region->num_desc - 1) + 1;
1016 
1017 	/* did we force fit ourselves into nothingness? */
1018 	if (region->num_desc < 32) {
1019 		region->num_desc = 0;
1020 		dev_warn(kdev->dev, "too few descriptors in region %s\n",
1021 			 region->name);
1022 		return;
1023 	}
1024 
1025 	size = region->num_desc * region->desc_size;
1026 	region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
1027 						GFP_DMA32);
1028 	if (!region->virt_start) {
1029 		region->num_desc = 0;
1030 		dev_err(kdev->dev, "memory alloc failed for region %s\n",
1031 			region->name);
1032 		return;
1033 	}
1034 	region->virt_end = region->virt_start + size;
1035 	page = virt_to_page(region->virt_start);
1036 
1037 	region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1038 					 DMA_BIDIRECTIONAL);
1039 	if (dma_mapping_error(kdev->dev, region->dma_start)) {
1040 		dev_err(kdev->dev, "dma map failed for region %s\n",
1041 			region->name);
1042 		goto fail;
1043 	}
1044 	region->dma_end = region->dma_start + size;
1045 
1046 	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1047 	if (!pool) {
1048 		dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1049 		goto fail;
1050 	}
1051 	pool->num_desc = 0;
1052 	pool->region_offset = region->num_desc;
1053 	list_add(&pool->region_inst, &region->pools);
1054 
1055 	dev_dbg(kdev->dev,
1056 		"region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1057 		region->name, id, region->desc_size, region->num_desc,
1058 		region->link_index, &region->dma_start, &region->dma_end,
1059 		region->virt_start, region->virt_end);
1060 
1061 	hw_desc_size = (region->desc_size / 16) - 1;
1062 	hw_num_desc -= 5;
1063 
1064 	for_each_qmgr(kdev, qmgr) {
1065 		regs = qmgr->reg_region + id;
1066 		writel_relaxed((u32)region->dma_start, &regs->base);
1067 		writel_relaxed(region->link_index, &regs->start_index);
1068 		writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1069 			       &regs->size_count);
1070 	}
1071 	return;
1072 
1073 fail:
1074 	if (region->dma_start)
1075 		dma_unmap_page(kdev->dev, region->dma_start, size,
1076 				DMA_BIDIRECTIONAL);
1077 	if (region->virt_start)
1078 		free_pages_exact(region->virt_start, size);
1079 	region->num_desc = 0;
1080 	return;
1081 }
1082 
1083 static const char *knav_queue_find_name(struct device_node *node)
1084 {
1085 	const char *name;
1086 
1087 	if (of_property_read_string(node, "label", &name) < 0)
1088 		name = node->name;
1089 	if (!name)
1090 		name = "unknown";
1091 	return name;
1092 }
1093 
1094 static int knav_queue_setup_regions(struct knav_device *kdev,
1095 					struct device_node *regions)
1096 {
1097 	struct device *dev = kdev->dev;
1098 	struct knav_region *region;
1099 	struct device_node *child;
1100 	u32 temp[2];
1101 	int ret;
1102 
1103 	for_each_child_of_node(regions, child) {
1104 		region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1105 		if (!region) {
1106 			dev_err(dev, "out of memory allocating region\n");
1107 			return -ENOMEM;
1108 		}
1109 
1110 		region->name = knav_queue_find_name(child);
1111 		of_property_read_u32(child, "id", &region->id);
1112 		ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1113 		if (!ret) {
1114 			region->num_desc  = temp[0];
1115 			region->desc_size = temp[1];
1116 		} else {
1117 			dev_err(dev, "invalid region info %s\n", region->name);
1118 			devm_kfree(dev, region);
1119 			continue;
1120 		}
1121 
1122 		if (!of_get_property(child, "link-index", NULL)) {
1123 			dev_err(dev, "No link info for %s\n", region->name);
1124 			devm_kfree(dev, region);
1125 			continue;
1126 		}
1127 		ret = of_property_read_u32(child, "link-index",
1128 					   &region->link_index);
1129 		if (ret) {
1130 			dev_err(dev, "link index not found for %s\n",
1131 				region->name);
1132 			devm_kfree(dev, region);
1133 			continue;
1134 		}
1135 
1136 		INIT_LIST_HEAD(&region->pools);
1137 		list_add_tail(&region->list, &kdev->regions);
1138 	}
1139 	if (list_empty(&kdev->regions)) {
1140 		dev_err(dev, "no valid region information found\n");
1141 		return -ENODEV;
1142 	}
1143 
1144 	/* Next, we run through the regions and set things up */
1145 	for_each_region(kdev, region)
1146 		knav_queue_setup_region(kdev, region);
1147 
1148 	return 0;
1149 }
1150 
1151 static int knav_get_link_ram(struct knav_device *kdev,
1152 				       const char *name,
1153 				       struct knav_link_ram_block *block)
1154 {
1155 	struct platform_device *pdev = to_platform_device(kdev->dev);
1156 	struct device_node *node = pdev->dev.of_node;
1157 	u32 temp[2];
1158 
1159 	/*
1160 	 * Note: link ram resources are specified in "entry" sized units. In
1161 	 * reality, although entries are ~40bits in hardware, we treat them as
1162 	 * 64-bit entities here.
1163 	 *
1164 	 * For example, to specify the internal link ram for Keystone-I class
1165 	 * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1166 	 *
1167 	 * This gets a bit weird when other link rams are used.  For example,
1168 	 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1169 	 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1170 	 * which accounts for 64-bits per entry, for 16K entries.
1171 	 */
1172 	if (!of_property_read_u32_array(node, name , temp, 2)) {
1173 		if (temp[0]) {
1174 			/*
1175 			 * queue_base specified => using internal or onchip
1176 			 * link ram WARNING - we do not "reserve" this block
1177 			 */
1178 			block->dma = (dma_addr_t)temp[0];
1179 			block->virt = NULL;
1180 			block->size = temp[1];
1181 		} else {
1182 			block->size = temp[1];
1183 			/* queue_base not specific => allocate requested size */
1184 			block->virt = dmam_alloc_coherent(kdev->dev,
1185 						  8 * block->size, &block->dma,
1186 						  GFP_KERNEL);
1187 			if (!block->virt) {
1188 				dev_err(kdev->dev, "failed to alloc linkram\n");
1189 				return -ENOMEM;
1190 			}
1191 		}
1192 	} else {
1193 		return -ENODEV;
1194 	}
1195 	return 0;
1196 }
1197 
1198 static int knav_queue_setup_link_ram(struct knav_device *kdev)
1199 {
1200 	struct knav_link_ram_block *block;
1201 	struct knav_qmgr_info *qmgr;
1202 
1203 	for_each_qmgr(kdev, qmgr) {
1204 		block = &kdev->link_rams[0];
1205 		dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1206 			&block->dma, block->virt, block->size);
1207 		writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1208 		if (kdev->version == QMSS_66AK2G)
1209 			writel_relaxed(block->size,
1210 				       &qmgr->reg_config->link_ram_size0);
1211 		else
1212 			writel_relaxed(block->size - 1,
1213 				       &qmgr->reg_config->link_ram_size0);
1214 		block++;
1215 		if (!block->size)
1216 			continue;
1217 
1218 		dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1219 			&block->dma, block->virt, block->size);
1220 		writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 static int knav_setup_queue_range(struct knav_device *kdev,
1227 					struct device_node *node)
1228 {
1229 	struct device *dev = kdev->dev;
1230 	struct knav_range_info *range;
1231 	struct knav_qmgr_info *qmgr;
1232 	u32 temp[2], start, end, id, index;
1233 	int ret, i;
1234 
1235 	range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1236 	if (!range) {
1237 		dev_err(dev, "out of memory allocating range\n");
1238 		return -ENOMEM;
1239 	}
1240 
1241 	range->kdev = kdev;
1242 	range->name = knav_queue_find_name(node);
1243 	ret = of_property_read_u32_array(node, "qrange", temp, 2);
1244 	if (!ret) {
1245 		range->queue_base = temp[0] - kdev->base_id;
1246 		range->num_queues = temp[1];
1247 	} else {
1248 		dev_err(dev, "invalid queue range %s\n", range->name);
1249 		devm_kfree(dev, range);
1250 		return -EINVAL;
1251 	}
1252 
1253 	for (i = 0; i < RANGE_MAX_IRQS; i++) {
1254 		struct of_phandle_args oirq;
1255 
1256 		if (of_irq_parse_one(node, i, &oirq))
1257 			break;
1258 
1259 		range->irqs[i].irq = irq_create_of_mapping(&oirq);
1260 		if (range->irqs[i].irq == IRQ_NONE)
1261 			break;
1262 
1263 		range->num_irqs++;
1264 
1265 		if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3)
1266 			range->irqs[i].cpu_map =
1267 				(oirq.args[2] & 0x0000ff00) >> 8;
1268 	}
1269 
1270 	range->num_irqs = min(range->num_irqs, range->num_queues);
1271 	if (range->num_irqs)
1272 		range->flags |= RANGE_HAS_IRQ;
1273 
1274 	if (of_get_property(node, "qalloc-by-id", NULL))
1275 		range->flags |= RANGE_RESERVED;
1276 
1277 	if (of_get_property(node, "accumulator", NULL)) {
1278 		ret = knav_init_acc_range(kdev, node, range);
1279 		if (ret < 0) {
1280 			devm_kfree(dev, range);
1281 			return ret;
1282 		}
1283 	} else {
1284 		range->ops = &knav_gp_range_ops;
1285 	}
1286 
1287 	/* set threshold to 1, and flush out the queues */
1288 	for_each_qmgr(kdev, qmgr) {
1289 		start = max(qmgr->start_queue, range->queue_base);
1290 		end   = min(qmgr->start_queue + qmgr->num_queues,
1291 			    range->queue_base + range->num_queues);
1292 		for (id = start; id < end; id++) {
1293 			index = id - qmgr->start_queue;
1294 			writel_relaxed(THRESH_GTE | 1,
1295 				       &qmgr->reg_peek[index].ptr_size_thresh);
1296 			writel_relaxed(0,
1297 				       &qmgr->reg_push[index].ptr_size_thresh);
1298 		}
1299 	}
1300 
1301 	list_add_tail(&range->list, &kdev->queue_ranges);
1302 	dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1303 		range->name, range->queue_base,
1304 		range->queue_base + range->num_queues - 1,
1305 		range->num_irqs,
1306 		(range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1307 		(range->flags & RANGE_RESERVED) ? ", reserved" : "",
1308 		(range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1309 	kdev->num_queues_in_use += range->num_queues;
1310 	return 0;
1311 }
1312 
1313 static int knav_setup_queue_pools(struct knav_device *kdev,
1314 				   struct device_node *queue_pools)
1315 {
1316 	struct device_node *type, *range;
1317 	int ret;
1318 
1319 	for_each_child_of_node(queue_pools, type) {
1320 		for_each_child_of_node(type, range) {
1321 			ret = knav_setup_queue_range(kdev, range);
1322 			/* return value ignored, we init the rest... */
1323 		}
1324 	}
1325 
1326 	/* ... and barf if they all failed! */
1327 	if (list_empty(&kdev->queue_ranges)) {
1328 		dev_err(kdev->dev, "no valid queue range found\n");
1329 		return -ENODEV;
1330 	}
1331 	return 0;
1332 }
1333 
1334 static void knav_free_queue_range(struct knav_device *kdev,
1335 				  struct knav_range_info *range)
1336 {
1337 	if (range->ops && range->ops->free_range)
1338 		range->ops->free_range(range);
1339 	list_del(&range->list);
1340 	devm_kfree(kdev->dev, range);
1341 }
1342 
1343 static void knav_free_queue_ranges(struct knav_device *kdev)
1344 {
1345 	struct knav_range_info *range;
1346 
1347 	for (;;) {
1348 		range = first_queue_range(kdev);
1349 		if (!range)
1350 			break;
1351 		knav_free_queue_range(kdev, range);
1352 	}
1353 }
1354 
1355 static void knav_queue_free_regions(struct knav_device *kdev)
1356 {
1357 	struct knav_region *region;
1358 	struct knav_pool *pool, *tmp;
1359 	unsigned size;
1360 
1361 	for (;;) {
1362 		region = first_region(kdev);
1363 		if (!region)
1364 			break;
1365 		list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
1366 			knav_pool_destroy(pool);
1367 
1368 		size = region->virt_end - region->virt_start;
1369 		if (size)
1370 			free_pages_exact(region->virt_start, size);
1371 		list_del(&region->list);
1372 		devm_kfree(kdev->dev, region);
1373 	}
1374 }
1375 
1376 static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1377 					struct device_node *node, int index)
1378 {
1379 	struct resource res;
1380 	void __iomem *regs;
1381 	int ret;
1382 
1383 	ret = of_address_to_resource(node, index, &res);
1384 	if (ret) {
1385 		dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n",
1386 			node->name, index);
1387 		return ERR_PTR(ret);
1388 	}
1389 
1390 	regs = devm_ioremap_resource(kdev->dev, &res);
1391 	if (IS_ERR(regs))
1392 		dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n",
1393 			index, node->name);
1394 	return regs;
1395 }
1396 
1397 static int knav_queue_init_qmgrs(struct knav_device *kdev,
1398 					struct device_node *qmgrs)
1399 {
1400 	struct device *dev = kdev->dev;
1401 	struct knav_qmgr_info *qmgr;
1402 	struct device_node *child;
1403 	u32 temp[2];
1404 	int ret;
1405 
1406 	for_each_child_of_node(qmgrs, child) {
1407 		qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1408 		if (!qmgr) {
1409 			dev_err(dev, "out of memory allocating qmgr\n");
1410 			return -ENOMEM;
1411 		}
1412 
1413 		ret = of_property_read_u32_array(child, "managed-queues",
1414 						 temp, 2);
1415 		if (!ret) {
1416 			qmgr->start_queue = temp[0];
1417 			qmgr->num_queues = temp[1];
1418 		} else {
1419 			dev_err(dev, "invalid qmgr queue range\n");
1420 			devm_kfree(dev, qmgr);
1421 			continue;
1422 		}
1423 
1424 		dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1425 			 qmgr->start_queue, qmgr->num_queues);
1426 
1427 		qmgr->reg_peek =
1428 			knav_queue_map_reg(kdev, child,
1429 					   KNAV_QUEUE_PEEK_REG_INDEX);
1430 
1431 		if (kdev->version == QMSS) {
1432 			qmgr->reg_status =
1433 				knav_queue_map_reg(kdev, child,
1434 						   KNAV_QUEUE_STATUS_REG_INDEX);
1435 		}
1436 
1437 		qmgr->reg_config =
1438 			knav_queue_map_reg(kdev, child,
1439 					   (kdev->version == QMSS_66AK2G) ?
1440 					   KNAV_L_QUEUE_CONFIG_REG_INDEX :
1441 					   KNAV_QUEUE_CONFIG_REG_INDEX);
1442 		qmgr->reg_region =
1443 			knav_queue_map_reg(kdev, child,
1444 					   (kdev->version == QMSS_66AK2G) ?
1445 					   KNAV_L_QUEUE_REGION_REG_INDEX :
1446 					   KNAV_QUEUE_REGION_REG_INDEX);
1447 
1448 		qmgr->reg_push =
1449 			knav_queue_map_reg(kdev, child,
1450 					   (kdev->version == QMSS_66AK2G) ?
1451 					    KNAV_L_QUEUE_PUSH_REG_INDEX :
1452 					    KNAV_QUEUE_PUSH_REG_INDEX);
1453 
1454 		if (kdev->version == QMSS) {
1455 			qmgr->reg_pop =
1456 				knav_queue_map_reg(kdev, child,
1457 						   KNAV_QUEUE_POP_REG_INDEX);
1458 		}
1459 
1460 		if (IS_ERR(qmgr->reg_peek) ||
1461 		    ((kdev->version == QMSS) &&
1462 		    (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
1463 		    IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1464 		    IS_ERR(qmgr->reg_push)) {
1465 			dev_err(dev, "failed to map qmgr regs\n");
1466 			if (kdev->version == QMSS) {
1467 				if (!IS_ERR(qmgr->reg_status))
1468 					devm_iounmap(dev, qmgr->reg_status);
1469 				if (!IS_ERR(qmgr->reg_pop))
1470 					devm_iounmap(dev, qmgr->reg_pop);
1471 			}
1472 			if (!IS_ERR(qmgr->reg_peek))
1473 				devm_iounmap(dev, qmgr->reg_peek);
1474 			if (!IS_ERR(qmgr->reg_config))
1475 				devm_iounmap(dev, qmgr->reg_config);
1476 			if (!IS_ERR(qmgr->reg_region))
1477 				devm_iounmap(dev, qmgr->reg_region);
1478 			if (!IS_ERR(qmgr->reg_push))
1479 				devm_iounmap(dev, qmgr->reg_push);
1480 			devm_kfree(dev, qmgr);
1481 			continue;
1482 		}
1483 
1484 		/* Use same push register for pop as well */
1485 		if (kdev->version == QMSS_66AK2G)
1486 			qmgr->reg_pop = qmgr->reg_push;
1487 
1488 		list_add_tail(&qmgr->list, &kdev->qmgrs);
1489 		dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1490 			 qmgr->start_queue, qmgr->num_queues,
1491 			 qmgr->reg_peek, qmgr->reg_status,
1492 			 qmgr->reg_config, qmgr->reg_region,
1493 			 qmgr->reg_push, qmgr->reg_pop);
1494 	}
1495 	return 0;
1496 }
1497 
1498 static int knav_queue_init_pdsps(struct knav_device *kdev,
1499 					struct device_node *pdsps)
1500 {
1501 	struct device *dev = kdev->dev;
1502 	struct knav_pdsp_info *pdsp;
1503 	struct device_node *child;
1504 
1505 	for_each_child_of_node(pdsps, child) {
1506 		pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1507 		if (!pdsp) {
1508 			dev_err(dev, "out of memory allocating pdsp\n");
1509 			return -ENOMEM;
1510 		}
1511 		pdsp->name = knav_queue_find_name(child);
1512 		pdsp->iram =
1513 			knav_queue_map_reg(kdev, child,
1514 					   KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1515 		pdsp->regs =
1516 			knav_queue_map_reg(kdev, child,
1517 					   KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1518 		pdsp->intd =
1519 			knav_queue_map_reg(kdev, child,
1520 					   KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1521 		pdsp->command =
1522 			knav_queue_map_reg(kdev, child,
1523 					   KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1524 
1525 		if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1526 		    IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1527 			dev_err(dev, "failed to map pdsp %s regs\n",
1528 				pdsp->name);
1529 			if (!IS_ERR(pdsp->command))
1530 				devm_iounmap(dev, pdsp->command);
1531 			if (!IS_ERR(pdsp->iram))
1532 				devm_iounmap(dev, pdsp->iram);
1533 			if (!IS_ERR(pdsp->regs))
1534 				devm_iounmap(dev, pdsp->regs);
1535 			if (!IS_ERR(pdsp->intd))
1536 				devm_iounmap(dev, pdsp->intd);
1537 			devm_kfree(dev, pdsp);
1538 			continue;
1539 		}
1540 		of_property_read_u32(child, "id", &pdsp->id);
1541 		list_add_tail(&pdsp->list, &kdev->pdsps);
1542 		dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1543 			pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1544 			pdsp->intd);
1545 	}
1546 	return 0;
1547 }
1548 
1549 static int knav_queue_stop_pdsp(struct knav_device *kdev,
1550 			  struct knav_pdsp_info *pdsp)
1551 {
1552 	u32 val, timeout = 1000;
1553 	int ret;
1554 
1555 	val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1556 	writel_relaxed(val, &pdsp->regs->control);
1557 	ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1558 					PDSP_CTRL_RUNNING);
1559 	if (ret < 0) {
1560 		dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1561 		return ret;
1562 	}
1563 	pdsp->loaded = false;
1564 	pdsp->started = false;
1565 	return 0;
1566 }
1567 
1568 static int knav_queue_load_pdsp(struct knav_device *kdev,
1569 			  struct knav_pdsp_info *pdsp)
1570 {
1571 	int i, ret, fwlen;
1572 	const struct firmware *fw;
1573 	bool found = false;
1574 	u32 *fwdata;
1575 
1576 	for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1577 		if (knav_acc_firmwares[i]) {
1578 			ret = request_firmware_direct(&fw,
1579 						      knav_acc_firmwares[i],
1580 						      kdev->dev);
1581 			if (!ret) {
1582 				found = true;
1583 				break;
1584 			}
1585 		}
1586 	}
1587 
1588 	if (!found) {
1589 		dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1590 		return -ENODEV;
1591 	}
1592 
1593 	dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1594 		 knav_acc_firmwares[i]);
1595 
1596 	writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1597 	/* download the firmware */
1598 	fwdata = (u32 *)fw->data;
1599 	fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1600 	for (i = 0; i < fwlen; i++)
1601 		writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1602 
1603 	release_firmware(fw);
1604 	return 0;
1605 }
1606 
1607 static int knav_queue_start_pdsp(struct knav_device *kdev,
1608 			   struct knav_pdsp_info *pdsp)
1609 {
1610 	u32 val, timeout = 1000;
1611 	int ret;
1612 
1613 	/* write a command for sync */
1614 	writel_relaxed(0xffffffff, pdsp->command);
1615 	while (readl_relaxed(pdsp->command) != 0xffffffff)
1616 		cpu_relax();
1617 
1618 	/* soft reset the PDSP */
1619 	val  = readl_relaxed(&pdsp->regs->control);
1620 	val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1621 	writel_relaxed(val, &pdsp->regs->control);
1622 
1623 	/* enable pdsp */
1624 	val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1625 	writel_relaxed(val, &pdsp->regs->control);
1626 
1627 	/* wait for command register to clear */
1628 	ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1629 	if (ret < 0) {
1630 		dev_err(kdev->dev,
1631 			"timed out on pdsp %s command register wait\n",
1632 			pdsp->name);
1633 		return ret;
1634 	}
1635 	return 0;
1636 }
1637 
1638 static void knav_queue_stop_pdsps(struct knav_device *kdev)
1639 {
1640 	struct knav_pdsp_info *pdsp;
1641 
1642 	/* disable all pdsps */
1643 	for_each_pdsp(kdev, pdsp)
1644 		knav_queue_stop_pdsp(kdev, pdsp);
1645 }
1646 
1647 static int knav_queue_start_pdsps(struct knav_device *kdev)
1648 {
1649 	struct knav_pdsp_info *pdsp;
1650 	int ret;
1651 
1652 	knav_queue_stop_pdsps(kdev);
1653 	/* now load them all. We return success even if pdsp
1654 	 * is not loaded as acc channels are optional on having
1655 	 * firmware availability in the system. We set the loaded
1656 	 * and stated flag and when initialize the acc range, check
1657 	 * it and init the range only if pdsp is started.
1658 	 */
1659 	for_each_pdsp(kdev, pdsp) {
1660 		ret = knav_queue_load_pdsp(kdev, pdsp);
1661 		if (!ret)
1662 			pdsp->loaded = true;
1663 	}
1664 
1665 	for_each_pdsp(kdev, pdsp) {
1666 		if (pdsp->loaded) {
1667 			ret = knav_queue_start_pdsp(kdev, pdsp);
1668 			if (!ret)
1669 				pdsp->started = true;
1670 		}
1671 	}
1672 	return 0;
1673 }
1674 
1675 static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1676 {
1677 	struct knav_qmgr_info *qmgr;
1678 
1679 	for_each_qmgr(kdev, qmgr) {
1680 		if ((id >= qmgr->start_queue) &&
1681 		    (id < qmgr->start_queue + qmgr->num_queues))
1682 			return qmgr;
1683 	}
1684 	return NULL;
1685 }
1686 
1687 static int knav_queue_init_queue(struct knav_device *kdev,
1688 					struct knav_range_info *range,
1689 					struct knav_queue_inst *inst,
1690 					unsigned id)
1691 {
1692 	char irq_name[KNAV_NAME_SIZE];
1693 	inst->qmgr = knav_find_qmgr(id);
1694 	if (!inst->qmgr)
1695 		return -1;
1696 
1697 	INIT_LIST_HEAD(&inst->handles);
1698 	inst->kdev = kdev;
1699 	inst->range = range;
1700 	inst->irq_num = -1;
1701 	inst->id = id;
1702 	scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1703 	inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1704 
1705 	if (range->ops && range->ops->init_queue)
1706 		return range->ops->init_queue(range, inst);
1707 	else
1708 		return 0;
1709 }
1710 
1711 static int knav_queue_init_queues(struct knav_device *kdev)
1712 {
1713 	struct knav_range_info *range;
1714 	int size, id, base_idx;
1715 	int idx = 0, ret = 0;
1716 
1717 	/* how much do we need for instance data? */
1718 	size = sizeof(struct knav_queue_inst);
1719 
1720 	/* round this up to a power of 2, keep the index to instance
1721 	 * arithmetic fast.
1722 	 * */
1723 	kdev->inst_shift = order_base_2(size);
1724 	size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1725 	kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1726 	if (!kdev->instances)
1727 		return -ENOMEM;
1728 
1729 	for_each_queue_range(kdev, range) {
1730 		if (range->ops && range->ops->init_range)
1731 			range->ops->init_range(range);
1732 		base_idx = idx;
1733 		for (id = range->queue_base;
1734 		     id < range->queue_base + range->num_queues; id++, idx++) {
1735 			ret = knav_queue_init_queue(kdev, range,
1736 					knav_queue_idx_to_inst(kdev, idx), id);
1737 			if (ret < 0)
1738 				return ret;
1739 		}
1740 		range->queue_base_inst =
1741 			knav_queue_idx_to_inst(kdev, base_idx);
1742 	}
1743 	return 0;
1744 }
1745 
1746 /* Match table for of_platform binding */
1747 static const struct of_device_id keystone_qmss_of_match[] = {
1748 	{
1749 		.compatible = "ti,keystone-navigator-qmss",
1750 	},
1751 	{
1752 		.compatible = "ti,66ak2g-navss-qm",
1753 		.data	= (void *)QMSS_66AK2G,
1754 	},
1755 	{},
1756 };
1757 MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1758 
1759 static int knav_queue_probe(struct platform_device *pdev)
1760 {
1761 	struct device_node *node = pdev->dev.of_node;
1762 	struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
1763 	const struct of_device_id *match;
1764 	struct device *dev = &pdev->dev;
1765 	u32 temp[2];
1766 	int ret;
1767 
1768 	if (!node) {
1769 		dev_err(dev, "device tree info unavailable\n");
1770 		return -ENODEV;
1771 	}
1772 
1773 	kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1774 	if (!kdev) {
1775 		dev_err(dev, "memory allocation failed\n");
1776 		return -ENOMEM;
1777 	}
1778 
1779 	match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev);
1780 	if (match && match->data)
1781 		kdev->version = QMSS_66AK2G;
1782 
1783 	platform_set_drvdata(pdev, kdev);
1784 	kdev->dev = dev;
1785 	INIT_LIST_HEAD(&kdev->queue_ranges);
1786 	INIT_LIST_HEAD(&kdev->qmgrs);
1787 	INIT_LIST_HEAD(&kdev->pools);
1788 	INIT_LIST_HEAD(&kdev->regions);
1789 	INIT_LIST_HEAD(&kdev->pdsps);
1790 
1791 	pm_runtime_enable(&pdev->dev);
1792 	ret = pm_runtime_get_sync(&pdev->dev);
1793 	if (ret < 0) {
1794 		dev_err(dev, "Failed to enable QMSS\n");
1795 		return ret;
1796 	}
1797 
1798 	if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1799 		dev_err(dev, "queue-range not specified\n");
1800 		ret = -ENODEV;
1801 		goto err;
1802 	}
1803 	kdev->base_id    = temp[0];
1804 	kdev->num_queues = temp[1];
1805 
1806 	/* Initialize queue managers using device tree configuration */
1807 	qmgrs =  of_get_child_by_name(node, "qmgrs");
1808 	if (!qmgrs) {
1809 		dev_err(dev, "queue manager info not specified\n");
1810 		ret = -ENODEV;
1811 		goto err;
1812 	}
1813 	ret = knav_queue_init_qmgrs(kdev, qmgrs);
1814 	of_node_put(qmgrs);
1815 	if (ret)
1816 		goto err;
1817 
1818 	/* get pdsp configuration values from device tree */
1819 	pdsps =  of_get_child_by_name(node, "pdsps");
1820 	if (pdsps) {
1821 		ret = knav_queue_init_pdsps(kdev, pdsps);
1822 		if (ret)
1823 			goto err;
1824 
1825 		ret = knav_queue_start_pdsps(kdev);
1826 		if (ret)
1827 			goto err;
1828 	}
1829 	of_node_put(pdsps);
1830 
1831 	/* get usable queue range values from device tree */
1832 	queue_pools = of_get_child_by_name(node, "queue-pools");
1833 	if (!queue_pools) {
1834 		dev_err(dev, "queue-pools not specified\n");
1835 		ret = -ENODEV;
1836 		goto err;
1837 	}
1838 	ret = knav_setup_queue_pools(kdev, queue_pools);
1839 	of_node_put(queue_pools);
1840 	if (ret)
1841 		goto err;
1842 
1843 	ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1844 	if (ret) {
1845 		dev_err(kdev->dev, "could not setup linking ram\n");
1846 		goto err;
1847 	}
1848 
1849 	ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1850 	if (ret) {
1851 		/*
1852 		 * nothing really, we have one linking ram already, so we just
1853 		 * live within our means
1854 		 */
1855 	}
1856 
1857 	ret = knav_queue_setup_link_ram(kdev);
1858 	if (ret)
1859 		goto err;
1860 
1861 	regions =  of_get_child_by_name(node, "descriptor-regions");
1862 	if (!regions) {
1863 		dev_err(dev, "descriptor-regions not specified\n");
1864 		goto err;
1865 	}
1866 	ret = knav_queue_setup_regions(kdev, regions);
1867 	of_node_put(regions);
1868 	if (ret)
1869 		goto err;
1870 
1871 	ret = knav_queue_init_queues(kdev);
1872 	if (ret < 0) {
1873 		dev_err(dev, "hwqueue initialization failed\n");
1874 		goto err;
1875 	}
1876 
1877 	debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1878 			    &knav_queue_debug_ops);
1879 	device_ready = true;
1880 	return 0;
1881 
1882 err:
1883 	knav_queue_stop_pdsps(kdev);
1884 	knav_queue_free_regions(kdev);
1885 	knav_free_queue_ranges(kdev);
1886 	pm_runtime_put_sync(&pdev->dev);
1887 	pm_runtime_disable(&pdev->dev);
1888 	return ret;
1889 }
1890 
1891 static int knav_queue_remove(struct platform_device *pdev)
1892 {
1893 	/* TODO: Free resources */
1894 	pm_runtime_put_sync(&pdev->dev);
1895 	pm_runtime_disable(&pdev->dev);
1896 	return 0;
1897 }
1898 
1899 static struct platform_driver keystone_qmss_driver = {
1900 	.probe		= knav_queue_probe,
1901 	.remove		= knav_queue_remove,
1902 	.driver		= {
1903 		.name	= "keystone-navigator-qmss",
1904 		.of_match_table = keystone_qmss_of_match,
1905 	},
1906 };
1907 module_platform_driver(keystone_qmss_driver);
1908 
1909 MODULE_LICENSE("GPL v2");
1910 MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1911 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1912 MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
1913