xref: /openbmc/linux/drivers/interconnect/core.c (revision 62e59c4e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interconnect framework core driver
4  *
5  * Copyright (c) 2017-2019, Linaro Ltd.
6  * Author: Georgi Djakov <georgi.djakov@linaro.org>
7  */
8 
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/idr.h>
12 #include <linux/init.h>
13 #include <linux/interconnect.h>
14 #include <linux/interconnect-provider.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include <linux/overflow.h>
21 
22 static DEFINE_IDR(icc_idr);
23 static LIST_HEAD(icc_providers);
24 static DEFINE_MUTEX(icc_lock);
25 static struct dentry *icc_debugfs_dir;
26 
27 /**
28  * struct icc_req - constraints that are attached to each node
29  * @req_node: entry in list of requests for the particular @node
30  * @node: the interconnect node to which this constraint applies
31  * @dev: reference to the device that sets the constraints
32  * @avg_bw: an integer describing the average bandwidth in kBps
33  * @peak_bw: an integer describing the peak bandwidth in kBps
34  */
35 struct icc_req {
36 	struct hlist_node req_node;
37 	struct icc_node *node;
38 	struct device *dev;
39 	u32 avg_bw;
40 	u32 peak_bw;
41 };
42 
43 /**
44  * struct icc_path - interconnect path structure
45  * @num_nodes: number of hops (nodes)
46  * @reqs: array of the requests applicable to this path of nodes
47  */
48 struct icc_path {
49 	size_t num_nodes;
50 	struct icc_req reqs[];
51 };
52 
53 static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
54 {
55 	if (!n)
56 		return;
57 
58 	seq_printf(s, "%-30s %12u %12u\n",
59 		   n->name, n->avg_bw, n->peak_bw);
60 }
61 
62 static int icc_summary_show(struct seq_file *s, void *data)
63 {
64 	struct icc_provider *provider;
65 
66 	seq_puts(s, " node                                   avg         peak\n");
67 	seq_puts(s, "--------------------------------------------------------\n");
68 
69 	mutex_lock(&icc_lock);
70 
71 	list_for_each_entry(provider, &icc_providers, provider_list) {
72 		struct icc_node *n;
73 
74 		list_for_each_entry(n, &provider->nodes, node_list) {
75 			struct icc_req *r;
76 
77 			icc_summary_show_one(s, n);
78 			hlist_for_each_entry(r, &n->req_list, req_node) {
79 				if (!r->dev)
80 					continue;
81 
82 				seq_printf(s, "    %-26s %12u %12u\n",
83 					   dev_name(r->dev), r->avg_bw,
84 					   r->peak_bw);
85 			}
86 		}
87 	}
88 
89 	mutex_unlock(&icc_lock);
90 
91 	return 0;
92 }
93 
94 static int icc_summary_open(struct inode *inode, struct file *file)
95 {
96 	return single_open(file, icc_summary_show, inode->i_private);
97 }
98 
99 static const struct file_operations icc_summary_fops = {
100 	.open		= icc_summary_open,
101 	.read		= seq_read,
102 	.llseek		= seq_lseek,
103 	.release	= single_release,
104 };
105 
106 static struct icc_node *node_find(const int id)
107 {
108 	return idr_find(&icc_idr, id);
109 }
110 
111 static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
112 				  ssize_t num_nodes)
113 {
114 	struct icc_node *node = dst;
115 	struct icc_path *path;
116 	int i;
117 
118 	path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
119 	if (!path)
120 		return ERR_PTR(-ENOMEM);
121 
122 	path->num_nodes = num_nodes;
123 
124 	for (i = num_nodes - 1; i >= 0; i--) {
125 		node->provider->users++;
126 		hlist_add_head(&path->reqs[i].req_node, &node->req_list);
127 		path->reqs[i].node = node;
128 		path->reqs[i].dev = dev;
129 		/* reference to previous node was saved during path traversal */
130 		node = node->reverse;
131 	}
132 
133 	return path;
134 }
135 
136 static struct icc_path *path_find(struct device *dev, struct icc_node *src,
137 				  struct icc_node *dst)
138 {
139 	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
140 	struct icc_node *n, *node = NULL;
141 	struct list_head traverse_list;
142 	struct list_head edge_list;
143 	struct list_head visited_list;
144 	size_t i, depth = 1;
145 	bool found = false;
146 
147 	INIT_LIST_HEAD(&traverse_list);
148 	INIT_LIST_HEAD(&edge_list);
149 	INIT_LIST_HEAD(&visited_list);
150 
151 	list_add(&src->search_list, &traverse_list);
152 	src->reverse = NULL;
153 
154 	do {
155 		list_for_each_entry_safe(node, n, &traverse_list, search_list) {
156 			if (node == dst) {
157 				found = true;
158 				list_splice_init(&edge_list, &visited_list);
159 				list_splice_init(&traverse_list, &visited_list);
160 				break;
161 			}
162 			for (i = 0; i < node->num_links; i++) {
163 				struct icc_node *tmp = node->links[i];
164 
165 				if (!tmp) {
166 					path = ERR_PTR(-ENOENT);
167 					goto out;
168 				}
169 
170 				if (tmp->is_traversed)
171 					continue;
172 
173 				tmp->is_traversed = true;
174 				tmp->reverse = node;
175 				list_add_tail(&tmp->search_list, &edge_list);
176 			}
177 		}
178 
179 		if (found)
180 			break;
181 
182 		list_splice_init(&traverse_list, &visited_list);
183 		list_splice_init(&edge_list, &traverse_list);
184 
185 		/* count the hops including the source */
186 		depth++;
187 
188 	} while (!list_empty(&traverse_list));
189 
190 out:
191 
192 	/* reset the traversed state */
193 	list_for_each_entry_reverse(n, &visited_list, search_list)
194 		n->is_traversed = false;
195 
196 	if (found)
197 		path = path_init(dev, dst, depth);
198 
199 	return path;
200 }
201 
202 /*
203  * We want the path to honor all bandwidth requests, so the average and peak
204  * bandwidth requirements from each consumer are aggregated at each node.
205  * The aggregation is platform specific, so each platform can customize it by
206  * implementing its own aggregate() function.
207  */
208 
209 static int aggregate_requests(struct icc_node *node)
210 {
211 	struct icc_provider *p = node->provider;
212 	struct icc_req *r;
213 
214 	node->avg_bw = 0;
215 	node->peak_bw = 0;
216 
217 	hlist_for_each_entry(r, &node->req_list, req_node)
218 		p->aggregate(node, r->avg_bw, r->peak_bw,
219 			     &node->avg_bw, &node->peak_bw);
220 
221 	return 0;
222 }
223 
224 static int apply_constraints(struct icc_path *path)
225 {
226 	struct icc_node *next, *prev = NULL;
227 	int ret = -EINVAL;
228 	int i;
229 
230 	for (i = 0; i < path->num_nodes; i++) {
231 		next = path->reqs[i].node;
232 
233 		/*
234 		 * Both endpoints should be valid master-slave pairs of the
235 		 * same interconnect provider that will be configured.
236 		 */
237 		if (!prev || next->provider != prev->provider) {
238 			prev = next;
239 			continue;
240 		}
241 
242 		/* set the constraints */
243 		ret = next->provider->set(prev, next);
244 		if (ret)
245 			goto out;
246 
247 		prev = next;
248 	}
249 out:
250 	return ret;
251 }
252 
253 /* of_icc_xlate_onecell() - Translate function using a single index.
254  * @spec: OF phandle args to map into an interconnect node.
255  * @data: private data (pointer to struct icc_onecell_data)
256  *
257  * This is a generic translate function that can be used to model simple
258  * interconnect providers that have one device tree node and provide
259  * multiple interconnect nodes. A single cell is used as an index into
260  * an array of icc nodes specified in the icc_onecell_data struct when
261  * registering the provider.
262  */
263 struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
264 				      void *data)
265 {
266 	struct icc_onecell_data *icc_data = data;
267 	unsigned int idx = spec->args[0];
268 
269 	if (idx >= icc_data->num_nodes) {
270 		pr_err("%s: invalid index %u\n", __func__, idx);
271 		return ERR_PTR(-EINVAL);
272 	}
273 
274 	return icc_data->nodes[idx];
275 }
276 EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
277 
278 /**
279  * of_icc_get_from_provider() - Look-up interconnect node
280  * @spec: OF phandle args to use for look-up
281  *
282  * Looks for interconnect provider under the node specified by @spec and if
283  * found, uses xlate function of the provider to map phandle args to node.
284  *
285  * Returns a valid pointer to struct icc_node on success or ERR_PTR()
286  * on failure.
287  */
288 static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
289 {
290 	struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
291 	struct icc_provider *provider;
292 
293 	if (!spec || spec->args_count != 1)
294 		return ERR_PTR(-EINVAL);
295 
296 	mutex_lock(&icc_lock);
297 	list_for_each_entry(provider, &icc_providers, provider_list) {
298 		if (provider->dev->of_node == spec->np)
299 			node = provider->xlate(spec, provider->data);
300 		if (!IS_ERR(node))
301 			break;
302 	}
303 	mutex_unlock(&icc_lock);
304 
305 	return node;
306 }
307 
308 /**
309  * of_icc_get() - get a path handle from a DT node based on name
310  * @dev: device pointer for the consumer device
311  * @name: interconnect path name
312  *
313  * This function will search for a path between two endpoints and return an
314  * icc_path handle on success. Use icc_put() to release constraints when they
315  * are not needed anymore.
316  * If the interconnect API is disabled, NULL is returned and the consumer
317  * drivers will still build. Drivers are free to handle this specifically,
318  * but they don't have to.
319  *
320  * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
321  * when the API is disabled or the "interconnects" DT property is missing.
322  */
323 struct icc_path *of_icc_get(struct device *dev, const char *name)
324 {
325 	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
326 	struct icc_node *src_node, *dst_node;
327 	struct device_node *np = NULL;
328 	struct of_phandle_args src_args, dst_args;
329 	int idx = 0;
330 	int ret;
331 
332 	if (!dev || !dev->of_node)
333 		return ERR_PTR(-ENODEV);
334 
335 	np = dev->of_node;
336 
337 	/*
338 	 * When the consumer DT node do not have "interconnects" property
339 	 * return a NULL path to skip setting constraints.
340 	 */
341 	if (!of_find_property(np, "interconnects", NULL))
342 		return NULL;
343 
344 	/*
345 	 * We use a combination of phandle and specifier for endpoint. For now
346 	 * lets support only global ids and extend this in the future if needed
347 	 * without breaking DT compatibility.
348 	 */
349 	if (name) {
350 		idx = of_property_match_string(np, "interconnect-names", name);
351 		if (idx < 0)
352 			return ERR_PTR(idx);
353 	}
354 
355 	ret = of_parse_phandle_with_args(np, "interconnects",
356 					 "#interconnect-cells", idx * 2,
357 					 &src_args);
358 	if (ret)
359 		return ERR_PTR(ret);
360 
361 	of_node_put(src_args.np);
362 
363 	ret = of_parse_phandle_with_args(np, "interconnects",
364 					 "#interconnect-cells", idx * 2 + 1,
365 					 &dst_args);
366 	if (ret)
367 		return ERR_PTR(ret);
368 
369 	of_node_put(dst_args.np);
370 
371 	src_node = of_icc_get_from_provider(&src_args);
372 
373 	if (IS_ERR(src_node)) {
374 		if (PTR_ERR(src_node) != -EPROBE_DEFER)
375 			dev_err(dev, "error finding src node: %ld\n",
376 				PTR_ERR(src_node));
377 		return ERR_CAST(src_node);
378 	}
379 
380 	dst_node = of_icc_get_from_provider(&dst_args);
381 
382 	if (IS_ERR(dst_node)) {
383 		if (PTR_ERR(dst_node) != -EPROBE_DEFER)
384 			dev_err(dev, "error finding dst node: %ld\n",
385 				PTR_ERR(dst_node));
386 		return ERR_CAST(dst_node);
387 	}
388 
389 	mutex_lock(&icc_lock);
390 	path = path_find(dev, src_node, dst_node);
391 	if (IS_ERR(path))
392 		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
393 	mutex_unlock(&icc_lock);
394 
395 	return path;
396 }
397 EXPORT_SYMBOL_GPL(of_icc_get);
398 
399 /**
400  * icc_set_bw() - set bandwidth constraints on an interconnect path
401  * @path: reference to the path returned by icc_get()
402  * @avg_bw: average bandwidth in kilobytes per second
403  * @peak_bw: peak bandwidth in kilobytes per second
404  *
405  * This function is used by an interconnect consumer to express its own needs
406  * in terms of bandwidth for a previously requested path between two endpoints.
407  * The requests are aggregated and each node is updated accordingly. The entire
408  * path is locked by a mutex to ensure that the set() is completed.
409  * The @path can be NULL when the "interconnects" DT properties is missing,
410  * which will mean that no constraints will be set.
411  *
412  * Returns 0 on success, or an appropriate error code otherwise.
413  */
414 int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
415 {
416 	struct icc_node *node;
417 	u32 old_avg, old_peak;
418 	size_t i;
419 	int ret;
420 
421 	if (!path || !path->num_nodes)
422 		return 0;
423 
424 	mutex_lock(&icc_lock);
425 
426 	old_avg = path->reqs[0].avg_bw;
427 	old_peak = path->reqs[0].peak_bw;
428 
429 	for (i = 0; i < path->num_nodes; i++) {
430 		node = path->reqs[i].node;
431 
432 		/* update the consumer request for this path */
433 		path->reqs[i].avg_bw = avg_bw;
434 		path->reqs[i].peak_bw = peak_bw;
435 
436 		/* aggregate requests for this node */
437 		aggregate_requests(node);
438 	}
439 
440 	ret = apply_constraints(path);
441 	if (ret) {
442 		pr_debug("interconnect: error applying constraints (%d)\n",
443 			 ret);
444 
445 		for (i = 0; i < path->num_nodes; i++) {
446 			node = path->reqs[i].node;
447 			path->reqs[i].avg_bw = old_avg;
448 			path->reqs[i].peak_bw = old_peak;
449 			aggregate_requests(node);
450 		}
451 		apply_constraints(path);
452 	}
453 
454 	mutex_unlock(&icc_lock);
455 
456 	return ret;
457 }
458 EXPORT_SYMBOL_GPL(icc_set_bw);
459 
460 /**
461  * icc_get() - return a handle for path between two endpoints
462  * @dev: the device requesting the path
463  * @src_id: source device port id
464  * @dst_id: destination device port id
465  *
466  * This function will search for a path between two endpoints and return an
467  * icc_path handle on success. Use icc_put() to release
468  * constraints when they are not needed anymore.
469  * If the interconnect API is disabled, NULL is returned and the consumer
470  * drivers will still build. Drivers are free to handle this specifically,
471  * but they don't have to.
472  *
473  * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
474  * interconnect API is disabled.
475  */
476 struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
477 {
478 	struct icc_node *src, *dst;
479 	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
480 
481 	mutex_lock(&icc_lock);
482 
483 	src = node_find(src_id);
484 	if (!src)
485 		goto out;
486 
487 	dst = node_find(dst_id);
488 	if (!dst)
489 		goto out;
490 
491 	path = path_find(dev, src, dst);
492 	if (IS_ERR(path))
493 		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
494 
495 out:
496 	mutex_unlock(&icc_lock);
497 	return path;
498 }
499 EXPORT_SYMBOL_GPL(icc_get);
500 
501 /**
502  * icc_put() - release the reference to the icc_path
503  * @path: interconnect path
504  *
505  * Use this function to release the constraints on a path when the path is
506  * no longer needed. The constraints will be re-aggregated.
507  */
508 void icc_put(struct icc_path *path)
509 {
510 	struct icc_node *node;
511 	size_t i;
512 	int ret;
513 
514 	if (!path || WARN_ON(IS_ERR(path)))
515 		return;
516 
517 	ret = icc_set_bw(path, 0, 0);
518 	if (ret)
519 		pr_err("%s: error (%d)\n", __func__, ret);
520 
521 	mutex_lock(&icc_lock);
522 	for (i = 0; i < path->num_nodes; i++) {
523 		node = path->reqs[i].node;
524 		hlist_del(&path->reqs[i].req_node);
525 		if (!WARN_ON(!node->provider->users))
526 			node->provider->users--;
527 	}
528 	mutex_unlock(&icc_lock);
529 
530 	kfree(path);
531 }
532 EXPORT_SYMBOL_GPL(icc_put);
533 
534 static struct icc_node *icc_node_create_nolock(int id)
535 {
536 	struct icc_node *node;
537 
538 	/* check if node already exists */
539 	node = node_find(id);
540 	if (node)
541 		return node;
542 
543 	node = kzalloc(sizeof(*node), GFP_KERNEL);
544 	if (!node)
545 		return ERR_PTR(-ENOMEM);
546 
547 	id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
548 	if (id < 0) {
549 		WARN(1, "%s: couldn't get idr\n", __func__);
550 		kfree(node);
551 		return ERR_PTR(id);
552 	}
553 
554 	node->id = id;
555 
556 	return node;
557 }
558 
559 /**
560  * icc_node_create() - create a node
561  * @id: node id
562  *
563  * Return: icc_node pointer on success, or ERR_PTR() on error
564  */
565 struct icc_node *icc_node_create(int id)
566 {
567 	struct icc_node *node;
568 
569 	mutex_lock(&icc_lock);
570 
571 	node = icc_node_create_nolock(id);
572 
573 	mutex_unlock(&icc_lock);
574 
575 	return node;
576 }
577 EXPORT_SYMBOL_GPL(icc_node_create);
578 
579 /**
580  * icc_node_destroy() - destroy a node
581  * @id: node id
582  */
583 void icc_node_destroy(int id)
584 {
585 	struct icc_node *node;
586 
587 	mutex_lock(&icc_lock);
588 
589 	node = node_find(id);
590 	if (node) {
591 		idr_remove(&icc_idr, node->id);
592 		WARN_ON(!hlist_empty(&node->req_list));
593 	}
594 
595 	mutex_unlock(&icc_lock);
596 
597 	kfree(node);
598 }
599 EXPORT_SYMBOL_GPL(icc_node_destroy);
600 
601 /**
602  * icc_link_create() - create a link between two nodes
603  * @node: source node id
604  * @dst_id: destination node id
605  *
606  * Create a link between two nodes. The nodes might belong to different
607  * interconnect providers and the @dst_id node might not exist (if the
608  * provider driver has not probed yet). So just create the @dst_id node
609  * and when the actual provider driver is probed, the rest of the node
610  * data is filled.
611  *
612  * Return: 0 on success, or an error code otherwise
613  */
614 int icc_link_create(struct icc_node *node, const int dst_id)
615 {
616 	struct icc_node *dst;
617 	struct icc_node **new;
618 	int ret = 0;
619 
620 	if (!node->provider)
621 		return -EINVAL;
622 
623 	mutex_lock(&icc_lock);
624 
625 	dst = node_find(dst_id);
626 	if (!dst) {
627 		dst = icc_node_create_nolock(dst_id);
628 
629 		if (IS_ERR(dst)) {
630 			ret = PTR_ERR(dst);
631 			goto out;
632 		}
633 	}
634 
635 	new = krealloc(node->links,
636 		       (node->num_links + 1) * sizeof(*node->links),
637 		       GFP_KERNEL);
638 	if (!new) {
639 		ret = -ENOMEM;
640 		goto out;
641 	}
642 
643 	node->links = new;
644 	node->links[node->num_links++] = dst;
645 
646 out:
647 	mutex_unlock(&icc_lock);
648 
649 	return ret;
650 }
651 EXPORT_SYMBOL_GPL(icc_link_create);
652 
653 /**
654  * icc_link_destroy() - destroy a link between two nodes
655  * @src: pointer to source node
656  * @dst: pointer to destination node
657  *
658  * Return: 0 on success, or an error code otherwise
659  */
660 int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
661 {
662 	struct icc_node **new;
663 	size_t slot;
664 	int ret = 0;
665 
666 	if (IS_ERR_OR_NULL(src))
667 		return -EINVAL;
668 
669 	if (IS_ERR_OR_NULL(dst))
670 		return -EINVAL;
671 
672 	mutex_lock(&icc_lock);
673 
674 	for (slot = 0; slot < src->num_links; slot++)
675 		if (src->links[slot] == dst)
676 			break;
677 
678 	if (WARN_ON(slot == src->num_links)) {
679 		ret = -ENXIO;
680 		goto out;
681 	}
682 
683 	src->links[slot] = src->links[--src->num_links];
684 
685 	new = krealloc(src->links, src->num_links * sizeof(*src->links),
686 		       GFP_KERNEL);
687 	if (new)
688 		src->links = new;
689 
690 out:
691 	mutex_unlock(&icc_lock);
692 
693 	return ret;
694 }
695 EXPORT_SYMBOL_GPL(icc_link_destroy);
696 
697 /**
698  * icc_node_add() - add interconnect node to interconnect provider
699  * @node: pointer to the interconnect node
700  * @provider: pointer to the interconnect provider
701  */
702 void icc_node_add(struct icc_node *node, struct icc_provider *provider)
703 {
704 	mutex_lock(&icc_lock);
705 
706 	node->provider = provider;
707 	list_add_tail(&node->node_list, &provider->nodes);
708 
709 	mutex_unlock(&icc_lock);
710 }
711 EXPORT_SYMBOL_GPL(icc_node_add);
712 
713 /**
714  * icc_node_del() - delete interconnect node from interconnect provider
715  * @node: pointer to the interconnect node
716  */
717 void icc_node_del(struct icc_node *node)
718 {
719 	mutex_lock(&icc_lock);
720 
721 	list_del(&node->node_list);
722 
723 	mutex_unlock(&icc_lock);
724 }
725 EXPORT_SYMBOL_GPL(icc_node_del);
726 
727 /**
728  * icc_provider_add() - add a new interconnect provider
729  * @provider: the interconnect provider that will be added into topology
730  *
731  * Return: 0 on success, or an error code otherwise
732  */
733 int icc_provider_add(struct icc_provider *provider)
734 {
735 	if (WARN_ON(!provider->set))
736 		return -EINVAL;
737 	if (WARN_ON(!provider->xlate))
738 		return -EINVAL;
739 
740 	mutex_lock(&icc_lock);
741 
742 	INIT_LIST_HEAD(&provider->nodes);
743 	list_add_tail(&provider->provider_list, &icc_providers);
744 
745 	mutex_unlock(&icc_lock);
746 
747 	dev_dbg(provider->dev, "interconnect provider added to topology\n");
748 
749 	return 0;
750 }
751 EXPORT_SYMBOL_GPL(icc_provider_add);
752 
753 /**
754  * icc_provider_del() - delete previously added interconnect provider
755  * @provider: the interconnect provider that will be removed from topology
756  *
757  * Return: 0 on success, or an error code otherwise
758  */
759 int icc_provider_del(struct icc_provider *provider)
760 {
761 	mutex_lock(&icc_lock);
762 	if (provider->users) {
763 		pr_warn("interconnect provider still has %d users\n",
764 			provider->users);
765 		mutex_unlock(&icc_lock);
766 		return -EBUSY;
767 	}
768 
769 	if (!list_empty(&provider->nodes)) {
770 		pr_warn("interconnect provider still has nodes\n");
771 		mutex_unlock(&icc_lock);
772 		return -EBUSY;
773 	}
774 
775 	list_del(&provider->provider_list);
776 	mutex_unlock(&icc_lock);
777 
778 	return 0;
779 }
780 EXPORT_SYMBOL_GPL(icc_provider_del);
781 
782 static int __init icc_init(void)
783 {
784 	icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
785 	debugfs_create_file("interconnect_summary", 0444,
786 			    icc_debugfs_dir, NULL, &icc_summary_fops);
787 	return 0;
788 }
789 
790 static void __exit icc_exit(void)
791 {
792 	debugfs_remove_recursive(icc_debugfs_dir);
793 }
794 module_init(icc_init);
795 module_exit(icc_exit);
796 
797 MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
798 MODULE_DESCRIPTION("Interconnect Driver Core");
799 MODULE_LICENSE("GPL v2");
800