xref: /openbmc/linux/arch/powerpc/kernel/cacheinfo.c (revision 93d90ad7)
1 /*
2  * Processor cache information made available to userspace via sysfs;
3  * intended to be compatible with x86 intel_cacheinfo implementation.
4  *
5  * Copyright 2008 IBM Corporation
6  * Author: Nathan Lynch
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  */
12 
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/kernel.h>
16 #include <linux/kobject.h>
17 #include <linux/list.h>
18 #include <linux/notifier.h>
19 #include <linux/of.h>
20 #include <linux/percpu.h>
21 #include <linux/slab.h>
22 #include <asm/prom.h>
23 
24 #include "cacheinfo.h"
25 
26 /* per-cpu object for tracking:
27  * - a "cache" kobject for the top-level directory
28  * - a list of "index" objects representing the cpu's local cache hierarchy
29  */
30 struct cache_dir {
31 	struct kobject *kobj; /* bare (not embedded) kobject for cache
32 			       * directory */
33 	struct cache_index_dir *index; /* list of index objects */
34 };
35 
36 /* "index" object: each cpu's cache directory has an index
37  * subdirectory corresponding to a cache object associated with the
38  * cpu.  This object's lifetime is managed via the embedded kobject.
39  */
40 struct cache_index_dir {
41 	struct kobject kobj;
42 	struct cache_index_dir *next; /* next index in parent directory */
43 	struct cache *cache;
44 };
45 
46 /* Template for determining which OF properties to query for a given
47  * cache type */
48 struct cache_type_info {
49 	const char *name;
50 	const char *size_prop;
51 
52 	/* Allow for both [di]-cache-line-size and
53 	 * [di]-cache-block-size properties.  According to the PowerPC
54 	 * Processor binding, -line-size should be provided if it
55 	 * differs from the cache block size (that which is operated
56 	 * on by cache instructions), so we look for -line-size first.
57 	 * See cache_get_line_size(). */
58 
59 	const char *line_size_props[2];
60 	const char *nr_sets_prop;
61 };
62 
63 /* These are used to index the cache_type_info array. */
64 #define CACHE_TYPE_UNIFIED     0
65 #define CACHE_TYPE_INSTRUCTION 1
66 #define CACHE_TYPE_DATA        2
67 
68 static const struct cache_type_info cache_type_info[] = {
69 	{
70 		/* PowerPC Processor binding says the [di]-cache-*
71 		 * must be equal on unified caches, so just use
72 		 * d-cache properties. */
73 		.name            = "Unified",
74 		.size_prop       = "d-cache-size",
75 		.line_size_props = { "d-cache-line-size",
76 				     "d-cache-block-size", },
77 		.nr_sets_prop    = "d-cache-sets",
78 	},
79 	{
80 		.name            = "Instruction",
81 		.size_prop       = "i-cache-size",
82 		.line_size_props = { "i-cache-line-size",
83 				     "i-cache-block-size", },
84 		.nr_sets_prop    = "i-cache-sets",
85 	},
86 	{
87 		.name            = "Data",
88 		.size_prop       = "d-cache-size",
89 		.line_size_props = { "d-cache-line-size",
90 				     "d-cache-block-size", },
91 		.nr_sets_prop    = "d-cache-sets",
92 	},
93 };
94 
95 /* Cache object: each instance of this corresponds to a distinct cache
96  * in the system.  There are separate objects for Harvard caches: one
97  * each for instruction and data, and each refers to the same OF node.
98  * The refcount of the OF node is elevated for the lifetime of the
99  * cache object.  A cache object is released when its shared_cpu_map
100  * is cleared (see cache_cpu_clear).
101  *
102  * A cache object is on two lists: an unsorted global list
103  * (cache_list) of cache objects; and a singly-linked list
104  * representing the local cache hierarchy, which is ordered by level
105  * (e.g. L1d -> L1i -> L2 -> L3).
106  */
107 struct cache {
108 	struct device_node *ofnode;    /* OF node for this cache, may be cpu */
109 	struct cpumask shared_cpu_map; /* online CPUs using this cache */
110 	int type;                      /* split cache disambiguation */
111 	int level;                     /* level not explicit in device tree */
112 	struct list_head list;         /* global list of cache objects */
113 	struct cache *next_local;      /* next cache of >= level */
114 };
115 
116 static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
117 
118 /* traversal/modification of this list occurs only at cpu hotplug time;
119  * access is serialized by cpu hotplug locking
120  */
121 static LIST_HEAD(cache_list);
122 
123 static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
124 {
125 	return container_of(k, struct cache_index_dir, kobj);
126 }
127 
128 static const char *cache_type_string(const struct cache *cache)
129 {
130 	return cache_type_info[cache->type].name;
131 }
132 
133 static void cache_init(struct cache *cache, int type, int level,
134 		       struct device_node *ofnode)
135 {
136 	cache->type = type;
137 	cache->level = level;
138 	cache->ofnode = of_node_get(ofnode);
139 	INIT_LIST_HEAD(&cache->list);
140 	list_add(&cache->list, &cache_list);
141 }
142 
143 static struct cache *new_cache(int type, int level, struct device_node *ofnode)
144 {
145 	struct cache *cache;
146 
147 	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
148 	if (cache)
149 		cache_init(cache, type, level, ofnode);
150 
151 	return cache;
152 }
153 
154 static void release_cache_debugcheck(struct cache *cache)
155 {
156 	struct cache *iter;
157 
158 	list_for_each_entry(iter, &cache_list, list)
159 		WARN_ONCE(iter->next_local == cache,
160 			  "cache for %s(%s) refers to cache for %s(%s)\n",
161 			  iter->ofnode->full_name,
162 			  cache_type_string(iter),
163 			  cache->ofnode->full_name,
164 			  cache_type_string(cache));
165 }
166 
167 static void release_cache(struct cache *cache)
168 {
169 	if (!cache)
170 		return;
171 
172 	pr_debug("freeing L%d %s cache for %s\n", cache->level,
173 		 cache_type_string(cache), cache->ofnode->full_name);
174 
175 	release_cache_debugcheck(cache);
176 	list_del(&cache->list);
177 	of_node_put(cache->ofnode);
178 	kfree(cache);
179 }
180 
181 static void cache_cpu_set(struct cache *cache, int cpu)
182 {
183 	struct cache *next = cache;
184 
185 	while (next) {
186 		WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
187 			  "CPU %i already accounted in %s(%s)\n",
188 			  cpu, next->ofnode->full_name,
189 			  cache_type_string(next));
190 		cpumask_set_cpu(cpu, &next->shared_cpu_map);
191 		next = next->next_local;
192 	}
193 }
194 
195 static int cache_size(const struct cache *cache, unsigned int *ret)
196 {
197 	const char *propname;
198 	const __be32 *cache_size;
199 
200 	propname = cache_type_info[cache->type].size_prop;
201 
202 	cache_size = of_get_property(cache->ofnode, propname, NULL);
203 	if (!cache_size)
204 		return -ENODEV;
205 
206 	*ret = of_read_number(cache_size, 1);
207 	return 0;
208 }
209 
210 static int cache_size_kb(const struct cache *cache, unsigned int *ret)
211 {
212 	unsigned int size;
213 
214 	if (cache_size(cache, &size))
215 		return -ENODEV;
216 
217 	*ret = size / 1024;
218 	return 0;
219 }
220 
221 /* not cache_line_size() because that's a macro in include/linux/cache.h */
222 static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
223 {
224 	const __be32 *line_size;
225 	int i, lim;
226 
227 	lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
228 
229 	for (i = 0; i < lim; i++) {
230 		const char *propname;
231 
232 		propname = cache_type_info[cache->type].line_size_props[i];
233 		line_size = of_get_property(cache->ofnode, propname, NULL);
234 		if (line_size)
235 			break;
236 	}
237 
238 	if (!line_size)
239 		return -ENODEV;
240 
241 	*ret = of_read_number(line_size, 1);
242 	return 0;
243 }
244 
245 static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
246 {
247 	const char *propname;
248 	const __be32 *nr_sets;
249 
250 	propname = cache_type_info[cache->type].nr_sets_prop;
251 
252 	nr_sets = of_get_property(cache->ofnode, propname, NULL);
253 	if (!nr_sets)
254 		return -ENODEV;
255 
256 	*ret = of_read_number(nr_sets, 1);
257 	return 0;
258 }
259 
260 static int cache_associativity(const struct cache *cache, unsigned int *ret)
261 {
262 	unsigned int line_size;
263 	unsigned int nr_sets;
264 	unsigned int size;
265 
266 	if (cache_nr_sets(cache, &nr_sets))
267 		goto err;
268 
269 	/* If the cache is fully associative, there is no need to
270 	 * check the other properties.
271 	 */
272 	if (nr_sets == 1) {
273 		*ret = 0;
274 		return 0;
275 	}
276 
277 	if (cache_get_line_size(cache, &line_size))
278 		goto err;
279 	if (cache_size(cache, &size))
280 		goto err;
281 
282 	if (!(nr_sets > 0 && size > 0 && line_size > 0))
283 		goto err;
284 
285 	*ret = (size / nr_sets) / line_size;
286 	return 0;
287 err:
288 	return -ENODEV;
289 }
290 
291 /* helper for dealing with split caches */
292 static struct cache *cache_find_first_sibling(struct cache *cache)
293 {
294 	struct cache *iter;
295 
296 	if (cache->type == CACHE_TYPE_UNIFIED)
297 		return cache;
298 
299 	list_for_each_entry(iter, &cache_list, list)
300 		if (iter->ofnode == cache->ofnode && iter->next_local == cache)
301 			return iter;
302 
303 	return cache;
304 }
305 
306 /* return the first cache on a local list matching node */
307 static struct cache *cache_lookup_by_node(const struct device_node *node)
308 {
309 	struct cache *cache = NULL;
310 	struct cache *iter;
311 
312 	list_for_each_entry(iter, &cache_list, list) {
313 		if (iter->ofnode != node)
314 			continue;
315 		cache = cache_find_first_sibling(iter);
316 		break;
317 	}
318 
319 	return cache;
320 }
321 
322 static bool cache_node_is_unified(const struct device_node *np)
323 {
324 	return of_get_property(np, "cache-unified", NULL);
325 }
326 
327 static struct cache *cache_do_one_devnode_unified(struct device_node *node,
328 						  int level)
329 {
330 	struct cache *cache;
331 
332 	pr_debug("creating L%d ucache for %s\n", level, node->full_name);
333 
334 	cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
335 
336 	return cache;
337 }
338 
339 static struct cache *cache_do_one_devnode_split(struct device_node *node,
340 						int level)
341 {
342 	struct cache *dcache, *icache;
343 
344 	pr_debug("creating L%d dcache and icache for %s\n", level,
345 		 node->full_name);
346 
347 	dcache = new_cache(CACHE_TYPE_DATA, level, node);
348 	icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
349 
350 	if (!dcache || !icache)
351 		goto err;
352 
353 	dcache->next_local = icache;
354 
355 	return dcache;
356 err:
357 	release_cache(dcache);
358 	release_cache(icache);
359 	return NULL;
360 }
361 
362 static struct cache *cache_do_one_devnode(struct device_node *node, int level)
363 {
364 	struct cache *cache;
365 
366 	if (cache_node_is_unified(node))
367 		cache = cache_do_one_devnode_unified(node, level);
368 	else
369 		cache = cache_do_one_devnode_split(node, level);
370 
371 	return cache;
372 }
373 
374 static struct cache *cache_lookup_or_instantiate(struct device_node *node,
375 						 int level)
376 {
377 	struct cache *cache;
378 
379 	cache = cache_lookup_by_node(node);
380 
381 	WARN_ONCE(cache && cache->level != level,
382 		  "cache level mismatch on lookup (got %d, expected %d)\n",
383 		  cache->level, level);
384 
385 	if (!cache)
386 		cache = cache_do_one_devnode(node, level);
387 
388 	return cache;
389 }
390 
391 static void link_cache_lists(struct cache *smaller, struct cache *bigger)
392 {
393 	while (smaller->next_local) {
394 		if (smaller->next_local == bigger)
395 			return; /* already linked */
396 		smaller = smaller->next_local;
397 	}
398 
399 	smaller->next_local = bigger;
400 }
401 
402 static void do_subsidiary_caches_debugcheck(struct cache *cache)
403 {
404 	WARN_ON_ONCE(cache->level != 1);
405 	WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
406 }
407 
408 static void do_subsidiary_caches(struct cache *cache)
409 {
410 	struct device_node *subcache_node;
411 	int level = cache->level;
412 
413 	do_subsidiary_caches_debugcheck(cache);
414 
415 	while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
416 		struct cache *subcache;
417 
418 		level++;
419 		subcache = cache_lookup_or_instantiate(subcache_node, level);
420 		of_node_put(subcache_node);
421 		if (!subcache)
422 			break;
423 
424 		link_cache_lists(cache, subcache);
425 		cache = subcache;
426 	}
427 }
428 
429 static struct cache *cache_chain_instantiate(unsigned int cpu_id)
430 {
431 	struct device_node *cpu_node;
432 	struct cache *cpu_cache = NULL;
433 
434 	pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
435 
436 	cpu_node = of_get_cpu_node(cpu_id, NULL);
437 	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
438 	if (!cpu_node)
439 		goto out;
440 
441 	cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
442 	if (!cpu_cache)
443 		goto out;
444 
445 	do_subsidiary_caches(cpu_cache);
446 
447 	cache_cpu_set(cpu_cache, cpu_id);
448 out:
449 	of_node_put(cpu_node);
450 
451 	return cpu_cache;
452 }
453 
454 static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
455 {
456 	struct cache_dir *cache_dir;
457 	struct device *dev;
458 	struct kobject *kobj = NULL;
459 
460 	dev = get_cpu_device(cpu_id);
461 	WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
462 	if (!dev)
463 		goto err;
464 
465 	kobj = kobject_create_and_add("cache", &dev->kobj);
466 	if (!kobj)
467 		goto err;
468 
469 	cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
470 	if (!cache_dir)
471 		goto err;
472 
473 	cache_dir->kobj = kobj;
474 
475 	WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
476 
477 	per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
478 
479 	return cache_dir;
480 err:
481 	kobject_put(kobj);
482 	return NULL;
483 }
484 
485 static void cache_index_release(struct kobject *kobj)
486 {
487 	struct cache_index_dir *index;
488 
489 	index = kobj_to_cache_index_dir(kobj);
490 
491 	pr_debug("freeing index directory for L%d %s cache\n",
492 		 index->cache->level, cache_type_string(index->cache));
493 
494 	kfree(index);
495 }
496 
497 static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
498 {
499 	struct kobj_attribute *kobj_attr;
500 
501 	kobj_attr = container_of(attr, struct kobj_attribute, attr);
502 
503 	return kobj_attr->show(k, kobj_attr, buf);
504 }
505 
506 static struct cache *index_kobj_to_cache(struct kobject *k)
507 {
508 	struct cache_index_dir *index;
509 
510 	index = kobj_to_cache_index_dir(k);
511 
512 	return index->cache;
513 }
514 
515 static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
516 {
517 	unsigned int size_kb;
518 	struct cache *cache;
519 
520 	cache = index_kobj_to_cache(k);
521 
522 	if (cache_size_kb(cache, &size_kb))
523 		return -ENODEV;
524 
525 	return sprintf(buf, "%uK\n", size_kb);
526 }
527 
528 static struct kobj_attribute cache_size_attr =
529 	__ATTR(size, 0444, size_show, NULL);
530 
531 
532 static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
533 {
534 	unsigned int line_size;
535 	struct cache *cache;
536 
537 	cache = index_kobj_to_cache(k);
538 
539 	if (cache_get_line_size(cache, &line_size))
540 		return -ENODEV;
541 
542 	return sprintf(buf, "%u\n", line_size);
543 }
544 
545 static struct kobj_attribute cache_line_size_attr =
546 	__ATTR(coherency_line_size, 0444, line_size_show, NULL);
547 
548 static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
549 {
550 	unsigned int nr_sets;
551 	struct cache *cache;
552 
553 	cache = index_kobj_to_cache(k);
554 
555 	if (cache_nr_sets(cache, &nr_sets))
556 		return -ENODEV;
557 
558 	return sprintf(buf, "%u\n", nr_sets);
559 }
560 
561 static struct kobj_attribute cache_nr_sets_attr =
562 	__ATTR(number_of_sets, 0444, nr_sets_show, NULL);
563 
564 static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
565 {
566 	unsigned int associativity;
567 	struct cache *cache;
568 
569 	cache = index_kobj_to_cache(k);
570 
571 	if (cache_associativity(cache, &associativity))
572 		return -ENODEV;
573 
574 	return sprintf(buf, "%u\n", associativity);
575 }
576 
577 static struct kobj_attribute cache_assoc_attr =
578 	__ATTR(ways_of_associativity, 0444, associativity_show, NULL);
579 
580 static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
581 {
582 	struct cache *cache;
583 
584 	cache = index_kobj_to_cache(k);
585 
586 	return sprintf(buf, "%s\n", cache_type_string(cache));
587 }
588 
589 static struct kobj_attribute cache_type_attr =
590 	__ATTR(type, 0444, type_show, NULL);
591 
592 static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
593 {
594 	struct cache_index_dir *index;
595 	struct cache *cache;
596 
597 	index = kobj_to_cache_index_dir(k);
598 	cache = index->cache;
599 
600 	return sprintf(buf, "%d\n", cache->level);
601 }
602 
603 static struct kobj_attribute cache_level_attr =
604 	__ATTR(level, 0444, level_show, NULL);
605 
606 static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
607 {
608 	struct cache_index_dir *index;
609 	struct cache *cache;
610 	int len;
611 	int n = 0;
612 
613 	index = kobj_to_cache_index_dir(k);
614 	cache = index->cache;
615 	len = PAGE_SIZE - 2;
616 
617 	if (len > 1) {
618 		n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
619 		buf[n++] = '\n';
620 		buf[n] = '\0';
621 	}
622 	return n;
623 }
624 
625 static struct kobj_attribute cache_shared_cpu_map_attr =
626 	__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
627 
628 /* Attributes which should always be created -- the kobject/sysfs core
629  * does this automatically via kobj_type->default_attrs.  This is the
630  * minimum data required to uniquely identify a cache.
631  */
632 static struct attribute *cache_index_default_attrs[] = {
633 	&cache_type_attr.attr,
634 	&cache_level_attr.attr,
635 	&cache_shared_cpu_map_attr.attr,
636 	NULL,
637 };
638 
639 /* Attributes which should be created if the cache device node has the
640  * right properties -- see cacheinfo_create_index_opt_attrs
641  */
642 static struct kobj_attribute *cache_index_opt_attrs[] = {
643 	&cache_size_attr,
644 	&cache_line_size_attr,
645 	&cache_nr_sets_attr,
646 	&cache_assoc_attr,
647 };
648 
649 static const struct sysfs_ops cache_index_ops = {
650 	.show = cache_index_show,
651 };
652 
653 static struct kobj_type cache_index_type = {
654 	.release = cache_index_release,
655 	.sysfs_ops = &cache_index_ops,
656 	.default_attrs = cache_index_default_attrs,
657 };
658 
659 static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
660 {
661 	const char *cache_name;
662 	const char *cache_type;
663 	struct cache *cache;
664 	char *buf;
665 	int i;
666 
667 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
668 	if (!buf)
669 		return;
670 
671 	cache = dir->cache;
672 	cache_name = cache->ofnode->full_name;
673 	cache_type = cache_type_string(cache);
674 
675 	/* We don't want to create an attribute that can't provide a
676 	 * meaningful value.  Check the return value of each optional
677 	 * attribute's ->show method before registering the
678 	 * attribute.
679 	 */
680 	for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
681 		struct kobj_attribute *attr;
682 		ssize_t rc;
683 
684 		attr = cache_index_opt_attrs[i];
685 
686 		rc = attr->show(&dir->kobj, attr, buf);
687 		if (rc <= 0) {
688 			pr_debug("not creating %s attribute for "
689 				 "%s(%s) (rc = %zd)\n",
690 				 attr->attr.name, cache_name,
691 				 cache_type, rc);
692 			continue;
693 		}
694 		if (sysfs_create_file(&dir->kobj, &attr->attr))
695 			pr_debug("could not create %s attribute for %s(%s)\n",
696 				 attr->attr.name, cache_name, cache_type);
697 	}
698 
699 	kfree(buf);
700 }
701 
702 static void cacheinfo_create_index_dir(struct cache *cache, int index,
703 				       struct cache_dir *cache_dir)
704 {
705 	struct cache_index_dir *index_dir;
706 	int rc;
707 
708 	index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
709 	if (!index_dir)
710 		goto err;
711 
712 	index_dir->cache = cache;
713 
714 	rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
715 				  cache_dir->kobj, "index%d", index);
716 	if (rc)
717 		goto err;
718 
719 	index_dir->next = cache_dir->index;
720 	cache_dir->index = index_dir;
721 
722 	cacheinfo_create_index_opt_attrs(index_dir);
723 
724 	return;
725 err:
726 	kfree(index_dir);
727 }
728 
729 static void cacheinfo_sysfs_populate(unsigned int cpu_id,
730 				     struct cache *cache_list)
731 {
732 	struct cache_dir *cache_dir;
733 	struct cache *cache;
734 	int index = 0;
735 
736 	cache_dir = cacheinfo_create_cache_dir(cpu_id);
737 	if (!cache_dir)
738 		return;
739 
740 	cache = cache_list;
741 	while (cache) {
742 		cacheinfo_create_index_dir(cache, index, cache_dir);
743 		index++;
744 		cache = cache->next_local;
745 	}
746 }
747 
748 void cacheinfo_cpu_online(unsigned int cpu_id)
749 {
750 	struct cache *cache;
751 
752 	cache = cache_chain_instantiate(cpu_id);
753 	if (!cache)
754 		return;
755 
756 	cacheinfo_sysfs_populate(cpu_id, cache);
757 }
758 
759 /* functions needed to remove cache entry for cpu offline or suspend/resume */
760 
761 #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
762     defined(CONFIG_HOTPLUG_CPU)
763 
764 static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
765 {
766 	struct device_node *cpu_node;
767 	struct cache *cache;
768 
769 	cpu_node = of_get_cpu_node(cpu_id, NULL);
770 	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
771 	if (!cpu_node)
772 		return NULL;
773 
774 	cache = cache_lookup_by_node(cpu_node);
775 	of_node_put(cpu_node);
776 
777 	return cache;
778 }
779 
780 static void remove_index_dirs(struct cache_dir *cache_dir)
781 {
782 	struct cache_index_dir *index;
783 
784 	index = cache_dir->index;
785 
786 	while (index) {
787 		struct cache_index_dir *next;
788 
789 		next = index->next;
790 		kobject_put(&index->kobj);
791 		index = next;
792 	}
793 }
794 
795 static void remove_cache_dir(struct cache_dir *cache_dir)
796 {
797 	remove_index_dirs(cache_dir);
798 
799 	/* Remove cache dir from sysfs */
800 	kobject_del(cache_dir->kobj);
801 
802 	kobject_put(cache_dir->kobj);
803 
804 	kfree(cache_dir);
805 }
806 
807 static void cache_cpu_clear(struct cache *cache, int cpu)
808 {
809 	while (cache) {
810 		struct cache *next = cache->next_local;
811 
812 		WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
813 			  "CPU %i not accounted in %s(%s)\n",
814 			  cpu, cache->ofnode->full_name,
815 			  cache_type_string(cache));
816 
817 		cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
818 
819 		/* Release the cache object if all the cpus using it
820 		 * are offline */
821 		if (cpumask_empty(&cache->shared_cpu_map))
822 			release_cache(cache);
823 
824 		cache = next;
825 	}
826 }
827 
828 void cacheinfo_cpu_offline(unsigned int cpu_id)
829 {
830 	struct cache_dir *cache_dir;
831 	struct cache *cache;
832 
833 	/* Prevent userspace from seeing inconsistent state - remove
834 	 * the sysfs hierarchy first */
835 	cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
836 
837 	/* careful, sysfs population may have failed */
838 	if (cache_dir)
839 		remove_cache_dir(cache_dir);
840 
841 	per_cpu(cache_dir_pcpu, cpu_id) = NULL;
842 
843 	/* clear the CPU's bit in its cache chain, possibly freeing
844 	 * cache objects */
845 	cache = cache_lookup_by_cpu(cpu_id);
846 	if (cache)
847 		cache_cpu_clear(cache, cpu_id);
848 }
849 #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */
850