xref: /openbmc/linux/drivers/interconnect/core.c (revision ca5999fd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interconnect framework core driver
4  *
5  * Copyright (c) 2017-2019, Linaro Ltd.
6  * Author: Georgi Djakov <georgi.djakov@linaro.org>
7  */
8 
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/idr.h>
12 #include <linux/init.h>
13 #include <linux/interconnect.h>
14 #include <linux/interconnect-provider.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include <linux/overflow.h>
21 
22 #include "internal.h"
23 
24 #define CREATE_TRACE_POINTS
25 #include "trace.h"
26 
27 static DEFINE_IDR(icc_idr);
28 static LIST_HEAD(icc_providers);
29 static DEFINE_MUTEX(icc_lock);
30 static struct dentry *icc_debugfs_dir;
31 
32 static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
33 {
34 	if (!n)
35 		return;
36 
37 	seq_printf(s, "%-42s %12u %12u\n",
38 		   n->name, n->avg_bw, n->peak_bw);
39 }
40 
41 static int icc_summary_show(struct seq_file *s, void *data)
42 {
43 	struct icc_provider *provider;
44 
45 	seq_puts(s, " node                                  tag          avg         peak\n");
46 	seq_puts(s, "--------------------------------------------------------------------\n");
47 
48 	mutex_lock(&icc_lock);
49 
50 	list_for_each_entry(provider, &icc_providers, provider_list) {
51 		struct icc_node *n;
52 
53 		list_for_each_entry(n, &provider->nodes, node_list) {
54 			struct icc_req *r;
55 
56 			icc_summary_show_one(s, n);
57 			hlist_for_each_entry(r, &n->req_list, req_node) {
58 				if (!r->dev)
59 					continue;
60 
61 				seq_printf(s, "  %-27s %12u %12u %12u\n",
62 					   dev_name(r->dev), r->tag, r->avg_bw,
63 					   r->peak_bw);
64 			}
65 		}
66 	}
67 
68 	mutex_unlock(&icc_lock);
69 
70 	return 0;
71 }
72 DEFINE_SHOW_ATTRIBUTE(icc_summary);
73 
74 static void icc_graph_show_link(struct seq_file *s, int level,
75 				struct icc_node *n, struct icc_node *m)
76 {
77 	seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n",
78 		   level == 2 ? "\t\t" : "\t",
79 		   n->id, n->name, m->id, m->name);
80 }
81 
82 static void icc_graph_show_node(struct seq_file *s, struct icc_node *n)
83 {
84 	seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s",
85 		   n->id, n->name, n->id, n->name);
86 	seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw);
87 	seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw);
88 	seq_puts(s, "\"]\n");
89 }
90 
91 static int icc_graph_show(struct seq_file *s, void *data)
92 {
93 	struct icc_provider *provider;
94 	struct icc_node *n;
95 	int cluster_index = 0;
96 	int i;
97 
98 	seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
99 	mutex_lock(&icc_lock);
100 
101 	/* draw providers as cluster subgraphs */
102 	cluster_index = 0;
103 	list_for_each_entry(provider, &icc_providers, provider_list) {
104 		seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index);
105 		if (provider->dev)
106 			seq_printf(s, "\t\tlabel = \"%s\"\n",
107 				   dev_name(provider->dev));
108 
109 		/* draw nodes */
110 		list_for_each_entry(n, &provider->nodes, node_list)
111 			icc_graph_show_node(s, n);
112 
113 		/* draw internal links */
114 		list_for_each_entry(n, &provider->nodes, node_list)
115 			for (i = 0; i < n->num_links; ++i)
116 				if (n->provider == n->links[i]->provider)
117 					icc_graph_show_link(s, 2, n,
118 							    n->links[i]);
119 
120 		seq_puts(s, "\t}\n");
121 	}
122 
123 	/* draw external links */
124 	list_for_each_entry(provider, &icc_providers, provider_list)
125 		list_for_each_entry(n, &provider->nodes, node_list)
126 			for (i = 0; i < n->num_links; ++i)
127 				if (n->provider != n->links[i]->provider)
128 					icc_graph_show_link(s, 1, n,
129 							    n->links[i]);
130 
131 	mutex_unlock(&icc_lock);
132 	seq_puts(s, "}");
133 
134 	return 0;
135 }
136 DEFINE_SHOW_ATTRIBUTE(icc_graph);
137 
138 static struct icc_node *node_find(const int id)
139 {
140 	return idr_find(&icc_idr, id);
141 }
142 
143 static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
144 				  ssize_t num_nodes)
145 {
146 	struct icc_node *node = dst;
147 	struct icc_path *path;
148 	int i;
149 
150 	path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
151 	if (!path)
152 		return ERR_PTR(-ENOMEM);
153 
154 	path->num_nodes = num_nodes;
155 
156 	for (i = num_nodes - 1; i >= 0; i--) {
157 		node->provider->users++;
158 		hlist_add_head(&path->reqs[i].req_node, &node->req_list);
159 		path->reqs[i].node = node;
160 		path->reqs[i].dev = dev;
161 		path->reqs[i].enabled = true;
162 		/* reference to previous node was saved during path traversal */
163 		node = node->reverse;
164 	}
165 
166 	return path;
167 }
168 
169 static struct icc_path *path_find(struct device *dev, struct icc_node *src,
170 				  struct icc_node *dst)
171 {
172 	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
173 	struct icc_node *n, *node = NULL;
174 	struct list_head traverse_list;
175 	struct list_head edge_list;
176 	struct list_head visited_list;
177 	size_t i, depth = 1;
178 	bool found = false;
179 
180 	INIT_LIST_HEAD(&traverse_list);
181 	INIT_LIST_HEAD(&edge_list);
182 	INIT_LIST_HEAD(&visited_list);
183 
184 	list_add(&src->search_list, &traverse_list);
185 	src->reverse = NULL;
186 
187 	do {
188 		list_for_each_entry_safe(node, n, &traverse_list, search_list) {
189 			if (node == dst) {
190 				found = true;
191 				list_splice_init(&edge_list, &visited_list);
192 				list_splice_init(&traverse_list, &visited_list);
193 				break;
194 			}
195 			for (i = 0; i < node->num_links; i++) {
196 				struct icc_node *tmp = node->links[i];
197 
198 				if (!tmp) {
199 					path = ERR_PTR(-ENOENT);
200 					goto out;
201 				}
202 
203 				if (tmp->is_traversed)
204 					continue;
205 
206 				tmp->is_traversed = true;
207 				tmp->reverse = node;
208 				list_add_tail(&tmp->search_list, &edge_list);
209 			}
210 		}
211 
212 		if (found)
213 			break;
214 
215 		list_splice_init(&traverse_list, &visited_list);
216 		list_splice_init(&edge_list, &traverse_list);
217 
218 		/* count the hops including the source */
219 		depth++;
220 
221 	} while (!list_empty(&traverse_list));
222 
223 out:
224 
225 	/* reset the traversed state */
226 	list_for_each_entry_reverse(n, &visited_list, search_list)
227 		n->is_traversed = false;
228 
229 	if (found)
230 		path = path_init(dev, dst, depth);
231 
232 	return path;
233 }
234 
235 /*
236  * We want the path to honor all bandwidth requests, so the average and peak
237  * bandwidth requirements from each consumer are aggregated at each node.
238  * The aggregation is platform specific, so each platform can customize it by
239  * implementing its own aggregate() function.
240  */
241 
242 static int aggregate_requests(struct icc_node *node)
243 {
244 	struct icc_provider *p = node->provider;
245 	struct icc_req *r;
246 
247 	node->avg_bw = 0;
248 	node->peak_bw = 0;
249 
250 	if (p->pre_aggregate)
251 		p->pre_aggregate(node);
252 
253 	hlist_for_each_entry(r, &node->req_list, req_node) {
254 		if (!r->enabled)
255 			continue;
256 		p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
257 			     &node->avg_bw, &node->peak_bw);
258 	}
259 
260 	return 0;
261 }
262 
263 static int apply_constraints(struct icc_path *path)
264 {
265 	struct icc_node *next, *prev = NULL;
266 	int ret = -EINVAL;
267 	int i;
268 
269 	for (i = 0; i < path->num_nodes; i++) {
270 		next = path->reqs[i].node;
271 
272 		/*
273 		 * Both endpoints should be valid master-slave pairs of the
274 		 * same interconnect provider that will be configured.
275 		 */
276 		if (!prev || next->provider != prev->provider) {
277 			prev = next;
278 			continue;
279 		}
280 
281 		/* set the constraints */
282 		ret = next->provider->set(prev, next);
283 		if (ret)
284 			goto out;
285 
286 		prev = next;
287 	}
288 out:
289 	return ret;
290 }
291 
292 int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
293 		      u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
294 {
295 	*agg_avg += avg_bw;
296 	*agg_peak = max(*agg_peak, peak_bw);
297 
298 	return 0;
299 }
300 EXPORT_SYMBOL_GPL(icc_std_aggregate);
301 
302 /* of_icc_xlate_onecell() - Translate function using a single index.
303  * @spec: OF phandle args to map into an interconnect node.
304  * @data: private data (pointer to struct icc_onecell_data)
305  *
306  * This is a generic translate function that can be used to model simple
307  * interconnect providers that have one device tree node and provide
308  * multiple interconnect nodes. A single cell is used as an index into
309  * an array of icc nodes specified in the icc_onecell_data struct when
310  * registering the provider.
311  */
312 struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
313 				      void *data)
314 {
315 	struct icc_onecell_data *icc_data = data;
316 	unsigned int idx = spec->args[0];
317 
318 	if (idx >= icc_data->num_nodes) {
319 		pr_err("%s: invalid index %u\n", __func__, idx);
320 		return ERR_PTR(-EINVAL);
321 	}
322 
323 	return icc_data->nodes[idx];
324 }
325 EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
326 
327 /**
328  * of_icc_get_from_provider() - Look-up interconnect node
329  * @spec: OF phandle args to use for look-up
330  *
331  * Looks for interconnect provider under the node specified by @spec and if
332  * found, uses xlate function of the provider to map phandle args to node.
333  *
334  * Returns a valid pointer to struct icc_node on success or ERR_PTR()
335  * on failure.
336  */
337 static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
338 {
339 	struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
340 	struct icc_provider *provider;
341 
342 	if (!spec || spec->args_count != 1)
343 		return ERR_PTR(-EINVAL);
344 
345 	mutex_lock(&icc_lock);
346 	list_for_each_entry(provider, &icc_providers, provider_list) {
347 		if (provider->dev->of_node == spec->np)
348 			node = provider->xlate(spec, provider->data);
349 		if (!IS_ERR(node))
350 			break;
351 	}
352 	mutex_unlock(&icc_lock);
353 
354 	return node;
355 }
356 
357 static void devm_icc_release(struct device *dev, void *res)
358 {
359 	icc_put(*(struct icc_path **)res);
360 }
361 
362 struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
363 {
364 	struct icc_path **ptr, *path;
365 
366 	ptr = devres_alloc(devm_icc_release, sizeof(**ptr), GFP_KERNEL);
367 	if (!ptr)
368 		return ERR_PTR(-ENOMEM);
369 
370 	path = of_icc_get(dev, name);
371 	if (!IS_ERR(path)) {
372 		*ptr = path;
373 		devres_add(dev, ptr);
374 	} else {
375 		devres_free(ptr);
376 	}
377 
378 	return path;
379 }
380 EXPORT_SYMBOL_GPL(devm_of_icc_get);
381 
382 /**
383  * of_icc_get_by_index() - get a path handle from a DT node based on index
384  * @dev: device pointer for the consumer device
385  * @idx: interconnect path index
386  *
387  * This function will search for a path between two endpoints and return an
388  * icc_path handle on success. Use icc_put() to release constraints when they
389  * are not needed anymore.
390  * If the interconnect API is disabled, NULL is returned and the consumer
391  * drivers will still build. Drivers are free to handle this specifically,
392  * but they don't have to.
393  *
394  * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
395  * when the API is disabled or the "interconnects" DT property is missing.
396  */
397 struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
398 {
399 	struct icc_path *path;
400 	struct icc_node *src_node, *dst_node;
401 	struct device_node *np;
402 	struct of_phandle_args src_args, dst_args;
403 	int ret;
404 
405 	if (!dev || !dev->of_node)
406 		return ERR_PTR(-ENODEV);
407 
408 	np = dev->of_node;
409 
410 	/*
411 	 * When the consumer DT node do not have "interconnects" property
412 	 * return a NULL path to skip setting constraints.
413 	 */
414 	if (!of_find_property(np, "interconnects", NULL))
415 		return NULL;
416 
417 	/*
418 	 * We use a combination of phandle and specifier for endpoint. For now
419 	 * lets support only global ids and extend this in the future if needed
420 	 * without breaking DT compatibility.
421 	 */
422 	ret = of_parse_phandle_with_args(np, "interconnects",
423 					 "#interconnect-cells", idx * 2,
424 					 &src_args);
425 	if (ret)
426 		return ERR_PTR(ret);
427 
428 	of_node_put(src_args.np);
429 
430 	ret = of_parse_phandle_with_args(np, "interconnects",
431 					 "#interconnect-cells", idx * 2 + 1,
432 					 &dst_args);
433 	if (ret)
434 		return ERR_PTR(ret);
435 
436 	of_node_put(dst_args.np);
437 
438 	src_node = of_icc_get_from_provider(&src_args);
439 
440 	if (IS_ERR(src_node)) {
441 		if (PTR_ERR(src_node) != -EPROBE_DEFER)
442 			dev_err(dev, "error finding src node: %ld\n",
443 				PTR_ERR(src_node));
444 		return ERR_CAST(src_node);
445 	}
446 
447 	dst_node = of_icc_get_from_provider(&dst_args);
448 
449 	if (IS_ERR(dst_node)) {
450 		if (PTR_ERR(dst_node) != -EPROBE_DEFER)
451 			dev_err(dev, "error finding dst node: %ld\n",
452 				PTR_ERR(dst_node));
453 		return ERR_CAST(dst_node);
454 	}
455 
456 	mutex_lock(&icc_lock);
457 	path = path_find(dev, src_node, dst_node);
458 	mutex_unlock(&icc_lock);
459 	if (IS_ERR(path)) {
460 		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
461 		return path;
462 	}
463 
464 	path->name = kasprintf(GFP_KERNEL, "%s-%s",
465 			       src_node->name, dst_node->name);
466 	if (!path->name) {
467 		kfree(path);
468 		return ERR_PTR(-ENOMEM);
469 	}
470 
471 	return path;
472 }
473 EXPORT_SYMBOL_GPL(of_icc_get_by_index);
474 
475 /**
476  * of_icc_get() - get a path handle from a DT node based on name
477  * @dev: device pointer for the consumer device
478  * @name: interconnect path name
479  *
480  * This function will search for a path between two endpoints and return an
481  * icc_path handle on success. Use icc_put() to release constraints when they
482  * are not needed anymore.
483  * If the interconnect API is disabled, NULL is returned and the consumer
484  * drivers will still build. Drivers are free to handle this specifically,
485  * but they don't have to.
486  *
487  * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
488  * when the API is disabled or the "interconnects" DT property is missing.
489  */
490 struct icc_path *of_icc_get(struct device *dev, const char *name)
491 {
492 	struct device_node *np;
493 	int idx = 0;
494 
495 	if (!dev || !dev->of_node)
496 		return ERR_PTR(-ENODEV);
497 
498 	np = dev->of_node;
499 
500 	/*
501 	 * When the consumer DT node do not have "interconnects" property
502 	 * return a NULL path to skip setting constraints.
503 	 */
504 	if (!of_find_property(np, "interconnects", NULL))
505 		return NULL;
506 
507 	/*
508 	 * We use a combination of phandle and specifier for endpoint. For now
509 	 * lets support only global ids and extend this in the future if needed
510 	 * without breaking DT compatibility.
511 	 */
512 	if (name) {
513 		idx = of_property_match_string(np, "interconnect-names", name);
514 		if (idx < 0)
515 			return ERR_PTR(idx);
516 	}
517 
518 	return of_icc_get_by_index(dev, idx);
519 }
520 EXPORT_SYMBOL_GPL(of_icc_get);
521 
522 /**
523  * icc_set_tag() - set an optional tag on a path
524  * @path: the path we want to tag
525  * @tag: the tag value
526  *
527  * This function allows consumers to append a tag to the requests associated
528  * with a path, so that a different aggregation could be done based on this tag.
529  */
530 void icc_set_tag(struct icc_path *path, u32 tag)
531 {
532 	int i;
533 
534 	if (!path)
535 		return;
536 
537 	mutex_lock(&icc_lock);
538 
539 	for (i = 0; i < path->num_nodes; i++)
540 		path->reqs[i].tag = tag;
541 
542 	mutex_unlock(&icc_lock);
543 }
544 EXPORT_SYMBOL_GPL(icc_set_tag);
545 
546 /**
547  * icc_set_bw() - set bandwidth constraints on an interconnect path
548  * @path: reference to the path returned by icc_get()
549  * @avg_bw: average bandwidth in kilobytes per second
550  * @peak_bw: peak bandwidth in kilobytes per second
551  *
552  * This function is used by an interconnect consumer to express its own needs
553  * in terms of bandwidth for a previously requested path between two endpoints.
554  * The requests are aggregated and each node is updated accordingly. The entire
555  * path is locked by a mutex to ensure that the set() is completed.
556  * The @path can be NULL when the "interconnects" DT properties is missing,
557  * which will mean that no constraints will be set.
558  *
559  * Returns 0 on success, or an appropriate error code otherwise.
560  */
561 int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
562 {
563 	struct icc_node *node;
564 	u32 old_avg, old_peak;
565 	size_t i;
566 	int ret;
567 
568 	if (!path)
569 		return 0;
570 
571 	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
572 		return -EINVAL;
573 
574 	mutex_lock(&icc_lock);
575 
576 	old_avg = path->reqs[0].avg_bw;
577 	old_peak = path->reqs[0].peak_bw;
578 
579 	for (i = 0; i < path->num_nodes; i++) {
580 		node = path->reqs[i].node;
581 
582 		/* update the consumer request for this path */
583 		path->reqs[i].avg_bw = avg_bw;
584 		path->reqs[i].peak_bw = peak_bw;
585 
586 		/* aggregate requests for this node */
587 		aggregate_requests(node);
588 
589 		trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
590 	}
591 
592 	ret = apply_constraints(path);
593 	if (ret) {
594 		pr_debug("interconnect: error applying constraints (%d)\n",
595 			 ret);
596 
597 		for (i = 0; i < path->num_nodes; i++) {
598 			node = path->reqs[i].node;
599 			path->reqs[i].avg_bw = old_avg;
600 			path->reqs[i].peak_bw = old_peak;
601 			aggregate_requests(node);
602 		}
603 		apply_constraints(path);
604 	}
605 
606 	mutex_unlock(&icc_lock);
607 
608 	trace_icc_set_bw_end(path, ret);
609 
610 	return ret;
611 }
612 EXPORT_SYMBOL_GPL(icc_set_bw);
613 
614 static int __icc_enable(struct icc_path *path, bool enable)
615 {
616 	int i;
617 
618 	if (!path)
619 		return 0;
620 
621 	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
622 		return -EINVAL;
623 
624 	mutex_lock(&icc_lock);
625 
626 	for (i = 0; i < path->num_nodes; i++)
627 		path->reqs[i].enabled = enable;
628 
629 	mutex_unlock(&icc_lock);
630 
631 	return icc_set_bw(path, path->reqs[0].avg_bw,
632 			  path->reqs[0].peak_bw);
633 }
634 
635 int icc_enable(struct icc_path *path)
636 {
637 	return __icc_enable(path, true);
638 }
639 EXPORT_SYMBOL_GPL(icc_enable);
640 
641 int icc_disable(struct icc_path *path)
642 {
643 	return __icc_enable(path, false);
644 }
645 EXPORT_SYMBOL_GPL(icc_disable);
646 
647 /**
648  * icc_get() - return a handle for path between two endpoints
649  * @dev: the device requesting the path
650  * @src_id: source device port id
651  * @dst_id: destination device port id
652  *
653  * This function will search for a path between two endpoints and return an
654  * icc_path handle on success. Use icc_put() to release
655  * constraints when they are not needed anymore.
656  * If the interconnect API is disabled, NULL is returned and the consumer
657  * drivers will still build. Drivers are free to handle this specifically,
658  * but they don't have to.
659  *
660  * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
661  * interconnect API is disabled.
662  */
663 struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
664 {
665 	struct icc_node *src, *dst;
666 	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
667 
668 	mutex_lock(&icc_lock);
669 
670 	src = node_find(src_id);
671 	if (!src)
672 		goto out;
673 
674 	dst = node_find(dst_id);
675 	if (!dst)
676 		goto out;
677 
678 	path = path_find(dev, src, dst);
679 	if (IS_ERR(path)) {
680 		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
681 		goto out;
682 	}
683 
684 	path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
685 	if (!path->name) {
686 		kfree(path);
687 		path = ERR_PTR(-ENOMEM);
688 	}
689 out:
690 	mutex_unlock(&icc_lock);
691 	return path;
692 }
693 EXPORT_SYMBOL_GPL(icc_get);
694 
695 /**
696  * icc_put() - release the reference to the icc_path
697  * @path: interconnect path
698  *
699  * Use this function to release the constraints on a path when the path is
700  * no longer needed. The constraints will be re-aggregated.
701  */
702 void icc_put(struct icc_path *path)
703 {
704 	struct icc_node *node;
705 	size_t i;
706 	int ret;
707 
708 	if (!path || WARN_ON(IS_ERR(path)))
709 		return;
710 
711 	ret = icc_set_bw(path, 0, 0);
712 	if (ret)
713 		pr_err("%s: error (%d)\n", __func__, ret);
714 
715 	mutex_lock(&icc_lock);
716 	for (i = 0; i < path->num_nodes; i++) {
717 		node = path->reqs[i].node;
718 		hlist_del(&path->reqs[i].req_node);
719 		if (!WARN_ON(!node->provider->users))
720 			node->provider->users--;
721 	}
722 	mutex_unlock(&icc_lock);
723 
724 	kfree_const(path->name);
725 	kfree(path);
726 }
727 EXPORT_SYMBOL_GPL(icc_put);
728 
729 static struct icc_node *icc_node_create_nolock(int id)
730 {
731 	struct icc_node *node;
732 
733 	/* check if node already exists */
734 	node = node_find(id);
735 	if (node)
736 		return node;
737 
738 	node = kzalloc(sizeof(*node), GFP_KERNEL);
739 	if (!node)
740 		return ERR_PTR(-ENOMEM);
741 
742 	id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
743 	if (id < 0) {
744 		WARN(1, "%s: couldn't get idr\n", __func__);
745 		kfree(node);
746 		return ERR_PTR(id);
747 	}
748 
749 	node->id = id;
750 
751 	return node;
752 }
753 
754 /**
755  * icc_node_create() - create a node
756  * @id: node id
757  *
758  * Return: icc_node pointer on success, or ERR_PTR() on error
759  */
760 struct icc_node *icc_node_create(int id)
761 {
762 	struct icc_node *node;
763 
764 	mutex_lock(&icc_lock);
765 
766 	node = icc_node_create_nolock(id);
767 
768 	mutex_unlock(&icc_lock);
769 
770 	return node;
771 }
772 EXPORT_SYMBOL_GPL(icc_node_create);
773 
774 /**
775  * icc_node_destroy() - destroy a node
776  * @id: node id
777  */
778 void icc_node_destroy(int id)
779 {
780 	struct icc_node *node;
781 
782 	mutex_lock(&icc_lock);
783 
784 	node = node_find(id);
785 	if (node) {
786 		idr_remove(&icc_idr, node->id);
787 		WARN_ON(!hlist_empty(&node->req_list));
788 	}
789 
790 	mutex_unlock(&icc_lock);
791 
792 	kfree(node);
793 }
794 EXPORT_SYMBOL_GPL(icc_node_destroy);
795 
796 /**
797  * icc_link_create() - create a link between two nodes
798  * @node: source node id
799  * @dst_id: destination node id
800  *
801  * Create a link between two nodes. The nodes might belong to different
802  * interconnect providers and the @dst_id node might not exist (if the
803  * provider driver has not probed yet). So just create the @dst_id node
804  * and when the actual provider driver is probed, the rest of the node
805  * data is filled.
806  *
807  * Return: 0 on success, or an error code otherwise
808  */
809 int icc_link_create(struct icc_node *node, const int dst_id)
810 {
811 	struct icc_node *dst;
812 	struct icc_node **new;
813 	int ret = 0;
814 
815 	if (!node->provider)
816 		return -EINVAL;
817 
818 	mutex_lock(&icc_lock);
819 
820 	dst = node_find(dst_id);
821 	if (!dst) {
822 		dst = icc_node_create_nolock(dst_id);
823 
824 		if (IS_ERR(dst)) {
825 			ret = PTR_ERR(dst);
826 			goto out;
827 		}
828 	}
829 
830 	new = krealloc(node->links,
831 		       (node->num_links + 1) * sizeof(*node->links),
832 		       GFP_KERNEL);
833 	if (!new) {
834 		ret = -ENOMEM;
835 		goto out;
836 	}
837 
838 	node->links = new;
839 	node->links[node->num_links++] = dst;
840 
841 out:
842 	mutex_unlock(&icc_lock);
843 
844 	return ret;
845 }
846 EXPORT_SYMBOL_GPL(icc_link_create);
847 
848 /**
849  * icc_link_destroy() - destroy a link between two nodes
850  * @src: pointer to source node
851  * @dst: pointer to destination node
852  *
853  * Return: 0 on success, or an error code otherwise
854  */
855 int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
856 {
857 	struct icc_node **new;
858 	size_t slot;
859 	int ret = 0;
860 
861 	if (IS_ERR_OR_NULL(src))
862 		return -EINVAL;
863 
864 	if (IS_ERR_OR_NULL(dst))
865 		return -EINVAL;
866 
867 	mutex_lock(&icc_lock);
868 
869 	for (slot = 0; slot < src->num_links; slot++)
870 		if (src->links[slot] == dst)
871 			break;
872 
873 	if (WARN_ON(slot == src->num_links)) {
874 		ret = -ENXIO;
875 		goto out;
876 	}
877 
878 	src->links[slot] = src->links[--src->num_links];
879 
880 	new = krealloc(src->links, src->num_links * sizeof(*src->links),
881 		       GFP_KERNEL);
882 	if (new)
883 		src->links = new;
884 
885 out:
886 	mutex_unlock(&icc_lock);
887 
888 	return ret;
889 }
890 EXPORT_SYMBOL_GPL(icc_link_destroy);
891 
892 /**
893  * icc_node_add() - add interconnect node to interconnect provider
894  * @node: pointer to the interconnect node
895  * @provider: pointer to the interconnect provider
896  */
897 void icc_node_add(struct icc_node *node, struct icc_provider *provider)
898 {
899 	mutex_lock(&icc_lock);
900 
901 	node->provider = provider;
902 	list_add_tail(&node->node_list, &provider->nodes);
903 
904 	mutex_unlock(&icc_lock);
905 }
906 EXPORT_SYMBOL_GPL(icc_node_add);
907 
908 /**
909  * icc_node_del() - delete interconnect node from interconnect provider
910  * @node: pointer to the interconnect node
911  */
912 void icc_node_del(struct icc_node *node)
913 {
914 	mutex_lock(&icc_lock);
915 
916 	list_del(&node->node_list);
917 
918 	mutex_unlock(&icc_lock);
919 }
920 EXPORT_SYMBOL_GPL(icc_node_del);
921 
922 /**
923  * icc_nodes_remove() - remove all previously added nodes from provider
924  * @provider: the interconnect provider we are removing nodes from
925  *
926  * Return: 0 on success, or an error code otherwise
927  */
928 int icc_nodes_remove(struct icc_provider *provider)
929 {
930 	struct icc_node *n, *tmp;
931 
932 	if (WARN_ON(IS_ERR_OR_NULL(provider)))
933 		return -EINVAL;
934 
935 	list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
936 		icc_node_del(n);
937 		icc_node_destroy(n->id);
938 	}
939 
940 	return 0;
941 }
942 EXPORT_SYMBOL_GPL(icc_nodes_remove);
943 
944 /**
945  * icc_provider_add() - add a new interconnect provider
946  * @provider: the interconnect provider that will be added into topology
947  *
948  * Return: 0 on success, or an error code otherwise
949  */
950 int icc_provider_add(struct icc_provider *provider)
951 {
952 	if (WARN_ON(!provider->set))
953 		return -EINVAL;
954 	if (WARN_ON(!provider->xlate))
955 		return -EINVAL;
956 
957 	mutex_lock(&icc_lock);
958 
959 	INIT_LIST_HEAD(&provider->nodes);
960 	list_add_tail(&provider->provider_list, &icc_providers);
961 
962 	mutex_unlock(&icc_lock);
963 
964 	dev_dbg(provider->dev, "interconnect provider added to topology\n");
965 
966 	return 0;
967 }
968 EXPORT_SYMBOL_GPL(icc_provider_add);
969 
970 /**
971  * icc_provider_del() - delete previously added interconnect provider
972  * @provider: the interconnect provider that will be removed from topology
973  *
974  * Return: 0 on success, or an error code otherwise
975  */
976 int icc_provider_del(struct icc_provider *provider)
977 {
978 	mutex_lock(&icc_lock);
979 	if (provider->users) {
980 		pr_warn("interconnect provider still has %d users\n",
981 			provider->users);
982 		mutex_unlock(&icc_lock);
983 		return -EBUSY;
984 	}
985 
986 	if (!list_empty(&provider->nodes)) {
987 		pr_warn("interconnect provider still has nodes\n");
988 		mutex_unlock(&icc_lock);
989 		return -EBUSY;
990 	}
991 
992 	list_del(&provider->provider_list);
993 	mutex_unlock(&icc_lock);
994 
995 	return 0;
996 }
997 EXPORT_SYMBOL_GPL(icc_provider_del);
998 
999 static int __init icc_init(void)
1000 {
1001 	icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
1002 	debugfs_create_file("interconnect_summary", 0444,
1003 			    icc_debugfs_dir, NULL, &icc_summary_fops);
1004 	debugfs_create_file("interconnect_graph", 0444,
1005 			    icc_debugfs_dir, NULL, &icc_graph_fops);
1006 	return 0;
1007 }
1008 
1009 device_initcall(icc_init);
1010 
1011 MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
1012 MODULE_DESCRIPTION("Interconnect Driver Core");
1013 MODULE_LICENSE("GPL v2");
1014