xref: /openbmc/linux/arch/powerpc/kernel/cacheinfo.c (revision b627b4ed)
1 /*
2  * Processor cache information made available to userspace via sysfs;
3  * intended to be compatible with x86 intel_cacheinfo implementation.
4  *
5  * Copyright 2008 IBM Corporation
6  * Author: Nathan Lynch
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  */
12 
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/kobject.h>
18 #include <linux/list.h>
19 #include <linux/notifier.h>
20 #include <linux/of.h>
21 #include <linux/percpu.h>
22 #include <asm/prom.h>
23 
24 #include "cacheinfo.h"
25 
26 /* per-cpu object for tracking:
27  * - a "cache" kobject for the top-level directory
28  * - a list of "index" objects representing the cpu's local cache hierarchy
29  */
30 struct cache_dir {
31 	struct kobject *kobj; /* bare (not embedded) kobject for cache
32 			       * directory */
33 	struct cache_index_dir *index; /* list of index objects */
34 };
35 
36 /* "index" object: each cpu's cache directory has an index
37  * subdirectory corresponding to a cache object associated with the
38  * cpu.  This object's lifetime is managed via the embedded kobject.
39  */
40 struct cache_index_dir {
41 	struct kobject kobj;
42 	struct cache_index_dir *next; /* next index in parent directory */
43 	struct cache *cache;
44 };
45 
46 /* Template for determining which OF properties to query for a given
47  * cache type */
48 struct cache_type_info {
49 	const char *name;
50 	const char *size_prop;
51 
52 	/* Allow for both [di]-cache-line-size and
53 	 * [di]-cache-block-size properties.  According to the PowerPC
54 	 * Processor binding, -line-size should be provided if it
55 	 * differs from the cache block size (that which is operated
56 	 * on by cache instructions), so we look for -line-size first.
57 	 * See cache_get_line_size(). */
58 
59 	const char *line_size_props[2];
60 	const char *nr_sets_prop;
61 };
62 
63 /* These are used to index the cache_type_info array. */
64 #define CACHE_TYPE_UNIFIED     0
65 #define CACHE_TYPE_INSTRUCTION 1
66 #define CACHE_TYPE_DATA        2
67 
68 static const struct cache_type_info cache_type_info[] = {
69 	{
70 		/* PowerPC Processor binding says the [di]-cache-*
71 		 * must be equal on unified caches, so just use
72 		 * d-cache properties. */
73 		.name            = "Unified",
74 		.size_prop       = "d-cache-size",
75 		.line_size_props = { "d-cache-line-size",
76 				     "d-cache-block-size", },
77 		.nr_sets_prop    = "d-cache-sets",
78 	},
79 	{
80 		.name            = "Instruction",
81 		.size_prop       = "i-cache-size",
82 		.line_size_props = { "i-cache-line-size",
83 				     "i-cache-block-size", },
84 		.nr_sets_prop    = "i-cache-sets",
85 	},
86 	{
87 		.name            = "Data",
88 		.size_prop       = "d-cache-size",
89 		.line_size_props = { "d-cache-line-size",
90 				     "d-cache-block-size", },
91 		.nr_sets_prop    = "d-cache-sets",
92 	},
93 };
94 
95 /* Cache object: each instance of this corresponds to a distinct cache
96  * in the system.  There are separate objects for Harvard caches: one
97  * each for instruction and data, and each refers to the same OF node.
98  * The refcount of the OF node is elevated for the lifetime of the
99  * cache object.  A cache object is released when its shared_cpu_map
100  * is cleared (see cache_cpu_clear).
101  *
102  * A cache object is on two lists: an unsorted global list
103  * (cache_list) of cache objects; and a singly-linked list
104  * representing the local cache hierarchy, which is ordered by level
105  * (e.g. L1d -> L1i -> L2 -> L3).
106  */
107 struct cache {
108 	struct device_node *ofnode;    /* OF node for this cache, may be cpu */
109 	struct cpumask shared_cpu_map; /* online CPUs using this cache */
110 	int type;                      /* split cache disambiguation */
111 	int level;                     /* level not explicit in device tree */
112 	struct list_head list;         /* global list of cache objects */
113 	struct cache *next_local;      /* next cache of >= level */
114 };
115 
116 static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
117 
118 /* traversal/modification of this list occurs only at cpu hotplug time;
119  * access is serialized by cpu hotplug locking
120  */
121 static LIST_HEAD(cache_list);
122 
123 static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
124 {
125 	return container_of(k, struct cache_index_dir, kobj);
126 }
127 
128 static const char *cache_type_string(const struct cache *cache)
129 {
130 	return cache_type_info[cache->type].name;
131 }
132 
133 static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode)
134 {
135 	cache->type = type;
136 	cache->level = level;
137 	cache->ofnode = of_node_get(ofnode);
138 	INIT_LIST_HEAD(&cache->list);
139 	list_add(&cache->list, &cache_list);
140 }
141 
142 static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode)
143 {
144 	struct cache *cache;
145 
146 	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
147 	if (cache)
148 		cache_init(cache, type, level, ofnode);
149 
150 	return cache;
151 }
152 
153 static void release_cache_debugcheck(struct cache *cache)
154 {
155 	struct cache *iter;
156 
157 	list_for_each_entry(iter, &cache_list, list)
158 		WARN_ONCE(iter->next_local == cache,
159 			  "cache for %s(%s) refers to cache for %s(%s)\n",
160 			  iter->ofnode->full_name,
161 			  cache_type_string(iter),
162 			  cache->ofnode->full_name,
163 			  cache_type_string(cache));
164 }
165 
166 static void release_cache(struct cache *cache)
167 {
168 	if (!cache)
169 		return;
170 
171 	pr_debug("freeing L%d %s cache for %s\n", cache->level,
172 		 cache_type_string(cache), cache->ofnode->full_name);
173 
174 	release_cache_debugcheck(cache);
175 	list_del(&cache->list);
176 	of_node_put(cache->ofnode);
177 	kfree(cache);
178 }
179 
180 static void cache_cpu_set(struct cache *cache, int cpu)
181 {
182 	struct cache *next = cache;
183 
184 	while (next) {
185 		WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
186 			  "CPU %i already accounted in %s(%s)\n",
187 			  cpu, next->ofnode->full_name,
188 			  cache_type_string(next));
189 		cpumask_set_cpu(cpu, &next->shared_cpu_map);
190 		next = next->next_local;
191 	}
192 }
193 
194 static int cache_size(const struct cache *cache, unsigned int *ret)
195 {
196 	const char *propname;
197 	const u32 *cache_size;
198 
199 	propname = cache_type_info[cache->type].size_prop;
200 
201 	cache_size = of_get_property(cache->ofnode, propname, NULL);
202 	if (!cache_size)
203 		return -ENODEV;
204 
205 	*ret = *cache_size;
206 	return 0;
207 }
208 
209 static int cache_size_kb(const struct cache *cache, unsigned int *ret)
210 {
211 	unsigned int size;
212 
213 	if (cache_size(cache, &size))
214 		return -ENODEV;
215 
216 	*ret = size / 1024;
217 	return 0;
218 }
219 
220 /* not cache_line_size() because that's a macro in include/linux/cache.h */
221 static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
222 {
223 	const u32 *line_size;
224 	int i, lim;
225 
226 	lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
227 
228 	for (i = 0; i < lim; i++) {
229 		const char *propname;
230 
231 		propname = cache_type_info[cache->type].line_size_props[i];
232 		line_size = of_get_property(cache->ofnode, propname, NULL);
233 		if (line_size)
234 			break;
235 	}
236 
237 	if (!line_size)
238 		return -ENODEV;
239 
240 	*ret = *line_size;
241 	return 0;
242 }
243 
244 static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
245 {
246 	const char *propname;
247 	const u32 *nr_sets;
248 
249 	propname = cache_type_info[cache->type].nr_sets_prop;
250 
251 	nr_sets = of_get_property(cache->ofnode, propname, NULL);
252 	if (!nr_sets)
253 		return -ENODEV;
254 
255 	*ret = *nr_sets;
256 	return 0;
257 }
258 
259 static int cache_associativity(const struct cache *cache, unsigned int *ret)
260 {
261 	unsigned int line_size;
262 	unsigned int nr_sets;
263 	unsigned int size;
264 
265 	if (cache_nr_sets(cache, &nr_sets))
266 		goto err;
267 
268 	/* If the cache is fully associative, there is no need to
269 	 * check the other properties.
270 	 */
271 	if (nr_sets == 1) {
272 		*ret = 0;
273 		return 0;
274 	}
275 
276 	if (cache_get_line_size(cache, &line_size))
277 		goto err;
278 	if (cache_size(cache, &size))
279 		goto err;
280 
281 	if (!(nr_sets > 0 && size > 0 && line_size > 0))
282 		goto err;
283 
284 	*ret = (size / nr_sets) / line_size;
285 	return 0;
286 err:
287 	return -ENODEV;
288 }
289 
290 /* helper for dealing with split caches */
291 static struct cache *cache_find_first_sibling(struct cache *cache)
292 {
293 	struct cache *iter;
294 
295 	if (cache->type == CACHE_TYPE_UNIFIED)
296 		return cache;
297 
298 	list_for_each_entry(iter, &cache_list, list)
299 		if (iter->ofnode == cache->ofnode && iter->next_local == cache)
300 			return iter;
301 
302 	return cache;
303 }
304 
305 /* return the first cache on a local list matching node */
306 static struct cache *cache_lookup_by_node(const struct device_node *node)
307 {
308 	struct cache *cache = NULL;
309 	struct cache *iter;
310 
311 	list_for_each_entry(iter, &cache_list, list) {
312 		if (iter->ofnode != node)
313 			continue;
314 		cache = cache_find_first_sibling(iter);
315 		break;
316 	}
317 
318 	return cache;
319 }
320 
321 static bool cache_node_is_unified(const struct device_node *np)
322 {
323 	return of_get_property(np, "cache-unified", NULL);
324 }
325 
326 static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
327 {
328 	struct cache *cache;
329 
330 	pr_debug("creating L%d ucache for %s\n", level, node->full_name);
331 
332 	cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
333 
334 	return cache;
335 }
336 
337 static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
338 {
339 	struct cache *dcache, *icache;
340 
341 	pr_debug("creating L%d dcache and icache for %s\n", level,
342 		 node->full_name);
343 
344 	dcache = new_cache(CACHE_TYPE_DATA, level, node);
345 	icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
346 
347 	if (!dcache || !icache)
348 		goto err;
349 
350 	dcache->next_local = icache;
351 
352 	return dcache;
353 err:
354 	release_cache(dcache);
355 	release_cache(icache);
356 	return NULL;
357 }
358 
359 static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level)
360 {
361 	struct cache *cache;
362 
363 	if (cache_node_is_unified(node))
364 		cache = cache_do_one_devnode_unified(node, level);
365 	else
366 		cache = cache_do_one_devnode_split(node, level);
367 
368 	return cache;
369 }
370 
371 static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level)
372 {
373 	struct cache *cache;
374 
375 	cache = cache_lookup_by_node(node);
376 
377 	WARN_ONCE(cache && cache->level != level,
378 		  "cache level mismatch on lookup (got %d, expected %d)\n",
379 		  cache->level, level);
380 
381 	if (!cache)
382 		cache = cache_do_one_devnode(node, level);
383 
384 	return cache;
385 }
386 
387 static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger)
388 {
389 	while (smaller->next_local) {
390 		if (smaller->next_local == bigger)
391 			return; /* already linked */
392 		smaller = smaller->next_local;
393 	}
394 
395 	smaller->next_local = bigger;
396 }
397 
398 static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache)
399 {
400 	WARN_ON_ONCE(cache->level != 1);
401 	WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
402 }
403 
404 static void __cpuinit do_subsidiary_caches(struct cache *cache)
405 {
406 	struct device_node *subcache_node;
407 	int level = cache->level;
408 
409 	do_subsidiary_caches_debugcheck(cache);
410 
411 	while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
412 		struct cache *subcache;
413 
414 		level++;
415 		subcache = cache_lookup_or_instantiate(subcache_node, level);
416 		of_node_put(subcache_node);
417 		if (!subcache)
418 			break;
419 
420 		link_cache_lists(cache, subcache);
421 		cache = subcache;
422 	}
423 }
424 
425 static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id)
426 {
427 	struct device_node *cpu_node;
428 	struct cache *cpu_cache = NULL;
429 
430 	pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
431 
432 	cpu_node = of_get_cpu_node(cpu_id, NULL);
433 	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
434 	if (!cpu_node)
435 		goto out;
436 
437 	cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
438 	if (!cpu_cache)
439 		goto out;
440 
441 	do_subsidiary_caches(cpu_cache);
442 
443 	cache_cpu_set(cpu_cache, cpu_id);
444 out:
445 	of_node_put(cpu_node);
446 
447 	return cpu_cache;
448 }
449 
450 static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
451 {
452 	struct cache_dir *cache_dir;
453 	struct sys_device *sysdev;
454 	struct kobject *kobj = NULL;
455 
456 	sysdev = get_cpu_sysdev(cpu_id);
457 	WARN_ONCE(!sysdev, "no sysdev for CPU %i\n", cpu_id);
458 	if (!sysdev)
459 		goto err;
460 
461 	kobj = kobject_create_and_add("cache", &sysdev->kobj);
462 	if (!kobj)
463 		goto err;
464 
465 	cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
466 	if (!cache_dir)
467 		goto err;
468 
469 	cache_dir->kobj = kobj;
470 
471 	WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
472 
473 	per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
474 
475 	return cache_dir;
476 err:
477 	kobject_put(kobj);
478 	return NULL;
479 }
480 
481 static void cache_index_release(struct kobject *kobj)
482 {
483 	struct cache_index_dir *index;
484 
485 	index = kobj_to_cache_index_dir(kobj);
486 
487 	pr_debug("freeing index directory for L%d %s cache\n",
488 		 index->cache->level, cache_type_string(index->cache));
489 
490 	kfree(index);
491 }
492 
493 static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
494 {
495 	struct kobj_attribute *kobj_attr;
496 
497 	kobj_attr = container_of(attr, struct kobj_attribute, attr);
498 
499 	return kobj_attr->show(k, kobj_attr, buf);
500 }
501 
502 static struct cache *index_kobj_to_cache(struct kobject *k)
503 {
504 	struct cache_index_dir *index;
505 
506 	index = kobj_to_cache_index_dir(k);
507 
508 	return index->cache;
509 }
510 
511 static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
512 {
513 	unsigned int size_kb;
514 	struct cache *cache;
515 
516 	cache = index_kobj_to_cache(k);
517 
518 	if (cache_size_kb(cache, &size_kb))
519 		return -ENODEV;
520 
521 	return sprintf(buf, "%uK\n", size_kb);
522 }
523 
524 static struct kobj_attribute cache_size_attr =
525 	__ATTR(size, 0444, size_show, NULL);
526 
527 
528 static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
529 {
530 	unsigned int line_size;
531 	struct cache *cache;
532 
533 	cache = index_kobj_to_cache(k);
534 
535 	if (cache_get_line_size(cache, &line_size))
536 		return -ENODEV;
537 
538 	return sprintf(buf, "%u\n", line_size);
539 }
540 
541 static struct kobj_attribute cache_line_size_attr =
542 	__ATTR(coherency_line_size, 0444, line_size_show, NULL);
543 
544 static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
545 {
546 	unsigned int nr_sets;
547 	struct cache *cache;
548 
549 	cache = index_kobj_to_cache(k);
550 
551 	if (cache_nr_sets(cache, &nr_sets))
552 		return -ENODEV;
553 
554 	return sprintf(buf, "%u\n", nr_sets);
555 }
556 
557 static struct kobj_attribute cache_nr_sets_attr =
558 	__ATTR(number_of_sets, 0444, nr_sets_show, NULL);
559 
560 static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
561 {
562 	unsigned int associativity;
563 	struct cache *cache;
564 
565 	cache = index_kobj_to_cache(k);
566 
567 	if (cache_associativity(cache, &associativity))
568 		return -ENODEV;
569 
570 	return sprintf(buf, "%u\n", associativity);
571 }
572 
573 static struct kobj_attribute cache_assoc_attr =
574 	__ATTR(ways_of_associativity, 0444, associativity_show, NULL);
575 
576 static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
577 {
578 	struct cache *cache;
579 
580 	cache = index_kobj_to_cache(k);
581 
582 	return sprintf(buf, "%s\n", cache_type_string(cache));
583 }
584 
585 static struct kobj_attribute cache_type_attr =
586 	__ATTR(type, 0444, type_show, NULL);
587 
588 static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
589 {
590 	struct cache_index_dir *index;
591 	struct cache *cache;
592 
593 	index = kobj_to_cache_index_dir(k);
594 	cache = index->cache;
595 
596 	return sprintf(buf, "%d\n", cache->level);
597 }
598 
599 static struct kobj_attribute cache_level_attr =
600 	__ATTR(level, 0444, level_show, NULL);
601 
602 static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
603 {
604 	struct cache_index_dir *index;
605 	struct cache *cache;
606 	int len;
607 	int n = 0;
608 
609 	index = kobj_to_cache_index_dir(k);
610 	cache = index->cache;
611 	len = PAGE_SIZE - 2;
612 
613 	if (len > 1) {
614 		n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
615 		buf[n++] = '\n';
616 		buf[n] = '\0';
617 	}
618 	return n;
619 }
620 
621 static struct kobj_attribute cache_shared_cpu_map_attr =
622 	__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
623 
624 /* Attributes which should always be created -- the kobject/sysfs core
625  * does this automatically via kobj_type->default_attrs.  This is the
626  * minimum data required to uniquely identify a cache.
627  */
628 static struct attribute *cache_index_default_attrs[] = {
629 	&cache_type_attr.attr,
630 	&cache_level_attr.attr,
631 	&cache_shared_cpu_map_attr.attr,
632 	NULL,
633 };
634 
635 /* Attributes which should be created if the cache device node has the
636  * right properties -- see cacheinfo_create_index_opt_attrs
637  */
638 static struct kobj_attribute *cache_index_opt_attrs[] = {
639 	&cache_size_attr,
640 	&cache_line_size_attr,
641 	&cache_nr_sets_attr,
642 	&cache_assoc_attr,
643 };
644 
645 static struct sysfs_ops cache_index_ops = {
646 	.show = cache_index_show,
647 };
648 
649 static struct kobj_type cache_index_type = {
650 	.release = cache_index_release,
651 	.sysfs_ops = &cache_index_ops,
652 	.default_attrs = cache_index_default_attrs,
653 };
654 
655 static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
656 {
657 	const char *cache_name;
658 	const char *cache_type;
659 	struct cache *cache;
660 	char *buf;
661 	int i;
662 
663 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
664 	if (!buf)
665 		return;
666 
667 	cache = dir->cache;
668 	cache_name = cache->ofnode->full_name;
669 	cache_type = cache_type_string(cache);
670 
671 	/* We don't want to create an attribute that can't provide a
672 	 * meaningful value.  Check the return value of each optional
673 	 * attribute's ->show method before registering the
674 	 * attribute.
675 	 */
676 	for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
677 		struct kobj_attribute *attr;
678 		ssize_t rc;
679 
680 		attr = cache_index_opt_attrs[i];
681 
682 		rc = attr->show(&dir->kobj, attr, buf);
683 		if (rc <= 0) {
684 			pr_debug("not creating %s attribute for "
685 				 "%s(%s) (rc = %zd)\n",
686 				 attr->attr.name, cache_name,
687 				 cache_type, rc);
688 			continue;
689 		}
690 		if (sysfs_create_file(&dir->kobj, &attr->attr))
691 			pr_debug("could not create %s attribute for %s(%s)\n",
692 				 attr->attr.name, cache_name, cache_type);
693 	}
694 
695 	kfree(buf);
696 }
697 
698 static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
699 {
700 	struct cache_index_dir *index_dir;
701 	int rc;
702 
703 	index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
704 	if (!index_dir)
705 		goto err;
706 
707 	index_dir->cache = cache;
708 
709 	rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
710 				  cache_dir->kobj, "index%d", index);
711 	if (rc)
712 		goto err;
713 
714 	index_dir->next = cache_dir->index;
715 	cache_dir->index = index_dir;
716 
717 	cacheinfo_create_index_opt_attrs(index_dir);
718 
719 	return;
720 err:
721 	kfree(index_dir);
722 }
723 
724 static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list)
725 {
726 	struct cache_dir *cache_dir;
727 	struct cache *cache;
728 	int index = 0;
729 
730 	cache_dir = cacheinfo_create_cache_dir(cpu_id);
731 	if (!cache_dir)
732 		return;
733 
734 	cache = cache_list;
735 	while (cache) {
736 		cacheinfo_create_index_dir(cache, index, cache_dir);
737 		index++;
738 		cache = cache->next_local;
739 	}
740 }
741 
742 void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id)
743 {
744 	struct cache *cache;
745 
746 	cache = cache_chain_instantiate(cpu_id);
747 	if (!cache)
748 		return;
749 
750 	cacheinfo_sysfs_populate(cpu_id, cache);
751 }
752 
753 #ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */
754 
755 static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
756 {
757 	struct device_node *cpu_node;
758 	struct cache *cache;
759 
760 	cpu_node = of_get_cpu_node(cpu_id, NULL);
761 	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
762 	if (!cpu_node)
763 		return NULL;
764 
765 	cache = cache_lookup_by_node(cpu_node);
766 	of_node_put(cpu_node);
767 
768 	return cache;
769 }
770 
771 static void remove_index_dirs(struct cache_dir *cache_dir)
772 {
773 	struct cache_index_dir *index;
774 
775 	index = cache_dir->index;
776 
777 	while (index) {
778 		struct cache_index_dir *next;
779 
780 		next = index->next;
781 		kobject_put(&index->kobj);
782 		index = next;
783 	}
784 }
785 
786 static void remove_cache_dir(struct cache_dir *cache_dir)
787 {
788 	remove_index_dirs(cache_dir);
789 
790 	kobject_put(cache_dir->kobj);
791 
792 	kfree(cache_dir);
793 }
794 
795 static void cache_cpu_clear(struct cache *cache, int cpu)
796 {
797 	while (cache) {
798 		struct cache *next = cache->next_local;
799 
800 		WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
801 			  "CPU %i not accounted in %s(%s)\n",
802 			  cpu, cache->ofnode->full_name,
803 			  cache_type_string(cache));
804 
805 		cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
806 
807 		/* Release the cache object if all the cpus using it
808 		 * are offline */
809 		if (cpumask_empty(&cache->shared_cpu_map))
810 			release_cache(cache);
811 
812 		cache = next;
813 	}
814 }
815 
816 void cacheinfo_cpu_offline(unsigned int cpu_id)
817 {
818 	struct cache_dir *cache_dir;
819 	struct cache *cache;
820 
821 	/* Prevent userspace from seeing inconsistent state - remove
822 	 * the sysfs hierarchy first */
823 	cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
824 
825 	/* careful, sysfs population may have failed */
826 	if (cache_dir)
827 		remove_cache_dir(cache_dir);
828 
829 	per_cpu(cache_dir_pcpu, cpu_id) = NULL;
830 
831 	/* clear the CPU's bit in its cache chain, possibly freeing
832 	 * cache objects */
833 	cache = cache_lookup_by_cpu(cpu_id);
834 	if (cache)
835 		cache_cpu_clear(cache, cpu_id);
836 }
837 #endif /* CONFIG_HOTPLUG_CPU */
838