xref: /openbmc/linux/arch/powerpc/kernel/cacheinfo.c (revision e657c18a)
1 /*
2  * Processor cache information made available to userspace via sysfs;
3  * intended to be compatible with x86 intel_cacheinfo implementation.
4  *
5  * Copyright 2008 IBM Corporation
6  * Author: Nathan Lynch
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  */
12 
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/kernel.h>
16 #include <linux/kobject.h>
17 #include <linux/list.h>
18 #include <linux/notifier.h>
19 #include <linux/of.h>
20 #include <linux/percpu.h>
21 #include <linux/slab.h>
22 #include <asm/prom.h>
23 #include <asm/cputhreads.h>
24 #include <asm/smp.h>
25 
26 #include "cacheinfo.h"
27 
28 /* per-cpu object for tracking:
29  * - a "cache" kobject for the top-level directory
30  * - a list of "index" objects representing the cpu's local cache hierarchy
31  */
32 struct cache_dir {
33 	struct kobject *kobj; /* bare (not embedded) kobject for cache
34 			       * directory */
35 	struct cache_index_dir *index; /* list of index objects */
36 };
37 
38 /* "index" object: each cpu's cache directory has an index
39  * subdirectory corresponding to a cache object associated with the
40  * cpu.  This object's lifetime is managed via the embedded kobject.
41  */
42 struct cache_index_dir {
43 	struct kobject kobj;
44 	struct cache_index_dir *next; /* next index in parent directory */
45 	struct cache *cache;
46 };
47 
48 /* Template for determining which OF properties to query for a given
49  * cache type */
50 struct cache_type_info {
51 	const char *name;
52 	const char *size_prop;
53 
54 	/* Allow for both [di]-cache-line-size and
55 	 * [di]-cache-block-size properties.  According to the PowerPC
56 	 * Processor binding, -line-size should be provided if it
57 	 * differs from the cache block size (that which is operated
58 	 * on by cache instructions), so we look for -line-size first.
59 	 * See cache_get_line_size(). */
60 
61 	const char *line_size_props[2];
62 	const char *nr_sets_prop;
63 };
64 
65 /* These are used to index the cache_type_info array. */
66 #define CACHE_TYPE_UNIFIED     0 /* cache-size, cache-block-size, etc. */
67 #define CACHE_TYPE_UNIFIED_D   1 /* d-cache-size, d-cache-block-size, etc */
68 #define CACHE_TYPE_INSTRUCTION 2
69 #define CACHE_TYPE_DATA        3
70 
71 static const struct cache_type_info cache_type_info[] = {
72 	{
73 		/* Embedded systems that use cache-size, cache-block-size,
74 		 * etc. for the Unified (typically L2) cache. */
75 		.name            = "Unified",
76 		.size_prop       = "cache-size",
77 		.line_size_props = { "cache-line-size",
78 				     "cache-block-size", },
79 		.nr_sets_prop    = "cache-sets",
80 	},
81 	{
82 		/* PowerPC Processor binding says the [di]-cache-*
83 		 * must be equal on unified caches, so just use
84 		 * d-cache properties. */
85 		.name            = "Unified",
86 		.size_prop       = "d-cache-size",
87 		.line_size_props = { "d-cache-line-size",
88 				     "d-cache-block-size", },
89 		.nr_sets_prop    = "d-cache-sets",
90 	},
91 	{
92 		.name            = "Instruction",
93 		.size_prop       = "i-cache-size",
94 		.line_size_props = { "i-cache-line-size",
95 				     "i-cache-block-size", },
96 		.nr_sets_prop    = "i-cache-sets",
97 	},
98 	{
99 		.name            = "Data",
100 		.size_prop       = "d-cache-size",
101 		.line_size_props = { "d-cache-line-size",
102 				     "d-cache-block-size", },
103 		.nr_sets_prop    = "d-cache-sets",
104 	},
105 };
106 
107 /* Cache object: each instance of this corresponds to a distinct cache
108  * in the system.  There are separate objects for Harvard caches: one
109  * each for instruction and data, and each refers to the same OF node.
110  * The refcount of the OF node is elevated for the lifetime of the
111  * cache object.  A cache object is released when its shared_cpu_map
112  * is cleared (see cache_cpu_clear).
113  *
114  * A cache object is on two lists: an unsorted global list
115  * (cache_list) of cache objects; and a singly-linked list
116  * representing the local cache hierarchy, which is ordered by level
117  * (e.g. L1d -> L1i -> L2 -> L3).
118  */
119 struct cache {
120 	struct device_node *ofnode;    /* OF node for this cache, may be cpu */
121 	struct cpumask shared_cpu_map; /* online CPUs using this cache */
122 	int type;                      /* split cache disambiguation */
123 	int level;                     /* level not explicit in device tree */
124 	struct list_head list;         /* global list of cache objects */
125 	struct cache *next_local;      /* next cache of >= level */
126 };
127 
128 static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
129 
130 /* traversal/modification of this list occurs only at cpu hotplug time;
131  * access is serialized by cpu hotplug locking
132  */
133 static LIST_HEAD(cache_list);
134 
135 static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
136 {
137 	return container_of(k, struct cache_index_dir, kobj);
138 }
139 
140 static const char *cache_type_string(const struct cache *cache)
141 {
142 	return cache_type_info[cache->type].name;
143 }
144 
145 static void cache_init(struct cache *cache, int type, int level,
146 		       struct device_node *ofnode)
147 {
148 	cache->type = type;
149 	cache->level = level;
150 	cache->ofnode = of_node_get(ofnode);
151 	INIT_LIST_HEAD(&cache->list);
152 	list_add(&cache->list, &cache_list);
153 }
154 
155 static struct cache *new_cache(int type, int level, struct device_node *ofnode)
156 {
157 	struct cache *cache;
158 
159 	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
160 	if (cache)
161 		cache_init(cache, type, level, ofnode);
162 
163 	return cache;
164 }
165 
166 static void release_cache_debugcheck(struct cache *cache)
167 {
168 	struct cache *iter;
169 
170 	list_for_each_entry(iter, &cache_list, list)
171 		WARN_ONCE(iter->next_local == cache,
172 			  "cache for %pOF(%s) refers to cache for %pOF(%s)\n",
173 			  iter->ofnode,
174 			  cache_type_string(iter),
175 			  cache->ofnode,
176 			  cache_type_string(cache));
177 }
178 
179 static void release_cache(struct cache *cache)
180 {
181 	if (!cache)
182 		return;
183 
184 	pr_debug("freeing L%d %s cache for %pOF\n", cache->level,
185 		 cache_type_string(cache), cache->ofnode);
186 
187 	release_cache_debugcheck(cache);
188 	list_del(&cache->list);
189 	of_node_put(cache->ofnode);
190 	kfree(cache);
191 }
192 
193 static void cache_cpu_set(struct cache *cache, int cpu)
194 {
195 	struct cache *next = cache;
196 
197 	while (next) {
198 		WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
199 			  "CPU %i already accounted in %pOF(%s)\n",
200 			  cpu, next->ofnode,
201 			  cache_type_string(next));
202 		cpumask_set_cpu(cpu, &next->shared_cpu_map);
203 		next = next->next_local;
204 	}
205 }
206 
207 static int cache_size(const struct cache *cache, unsigned int *ret)
208 {
209 	const char *propname;
210 	const __be32 *cache_size;
211 
212 	propname = cache_type_info[cache->type].size_prop;
213 
214 	cache_size = of_get_property(cache->ofnode, propname, NULL);
215 	if (!cache_size)
216 		return -ENODEV;
217 
218 	*ret = of_read_number(cache_size, 1);
219 	return 0;
220 }
221 
222 static int cache_size_kb(const struct cache *cache, unsigned int *ret)
223 {
224 	unsigned int size;
225 
226 	if (cache_size(cache, &size))
227 		return -ENODEV;
228 
229 	*ret = size / 1024;
230 	return 0;
231 }
232 
233 /* not cache_line_size() because that's a macro in include/linux/cache.h */
234 static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
235 {
236 	const __be32 *line_size;
237 	int i, lim;
238 
239 	lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
240 
241 	for (i = 0; i < lim; i++) {
242 		const char *propname;
243 
244 		propname = cache_type_info[cache->type].line_size_props[i];
245 		line_size = of_get_property(cache->ofnode, propname, NULL);
246 		if (line_size)
247 			break;
248 	}
249 
250 	if (!line_size)
251 		return -ENODEV;
252 
253 	*ret = of_read_number(line_size, 1);
254 	return 0;
255 }
256 
257 static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
258 {
259 	const char *propname;
260 	const __be32 *nr_sets;
261 
262 	propname = cache_type_info[cache->type].nr_sets_prop;
263 
264 	nr_sets = of_get_property(cache->ofnode, propname, NULL);
265 	if (!nr_sets)
266 		return -ENODEV;
267 
268 	*ret = of_read_number(nr_sets, 1);
269 	return 0;
270 }
271 
272 static int cache_associativity(const struct cache *cache, unsigned int *ret)
273 {
274 	unsigned int line_size;
275 	unsigned int nr_sets;
276 	unsigned int size;
277 
278 	if (cache_nr_sets(cache, &nr_sets))
279 		goto err;
280 
281 	/* If the cache is fully associative, there is no need to
282 	 * check the other properties.
283 	 */
284 	if (nr_sets == 1) {
285 		*ret = 0;
286 		return 0;
287 	}
288 
289 	if (cache_get_line_size(cache, &line_size))
290 		goto err;
291 	if (cache_size(cache, &size))
292 		goto err;
293 
294 	if (!(nr_sets > 0 && size > 0 && line_size > 0))
295 		goto err;
296 
297 	*ret = (size / nr_sets) / line_size;
298 	return 0;
299 err:
300 	return -ENODEV;
301 }
302 
303 /* helper for dealing with split caches */
304 static struct cache *cache_find_first_sibling(struct cache *cache)
305 {
306 	struct cache *iter;
307 
308 	if (cache->type == CACHE_TYPE_UNIFIED ||
309 	    cache->type == CACHE_TYPE_UNIFIED_D)
310 		return cache;
311 
312 	list_for_each_entry(iter, &cache_list, list)
313 		if (iter->ofnode == cache->ofnode && iter->next_local == cache)
314 			return iter;
315 
316 	return cache;
317 }
318 
319 /* return the first cache on a local list matching node */
320 static struct cache *cache_lookup_by_node(const struct device_node *node)
321 {
322 	struct cache *cache = NULL;
323 	struct cache *iter;
324 
325 	list_for_each_entry(iter, &cache_list, list) {
326 		if (iter->ofnode != node)
327 			continue;
328 		cache = cache_find_first_sibling(iter);
329 		break;
330 	}
331 
332 	return cache;
333 }
334 
335 static bool cache_node_is_unified(const struct device_node *np)
336 {
337 	return of_get_property(np, "cache-unified", NULL);
338 }
339 
340 /*
341  * Unified caches can have two different sets of tags.  Most embedded
342  * use cache-size, etc. for the unified cache size, but open firmware systems
343  * use d-cache-size, etc.   Check on initialization for which type we have, and
344  * return the appropriate structure type.  Assume it's embedded if it isn't
345  * open firmware.  If it's yet a 3rd type, then there will be missing entries
346  * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
347  * to be extended further.
348  */
349 static int cache_is_unified_d(const struct device_node *np)
350 {
351 	return of_get_property(np,
352 		cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
353 		CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
354 }
355 
356 /*
357  */
358 static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
359 {
360 	pr_debug("creating L%d ucache for %pOF\n", level, node);
361 
362 	return new_cache(cache_is_unified_d(node), level, node);
363 }
364 
365 static struct cache *cache_do_one_devnode_split(struct device_node *node,
366 						int level)
367 {
368 	struct cache *dcache, *icache;
369 
370 	pr_debug("creating L%d dcache and icache for %pOF\n", level,
371 		 node);
372 
373 	dcache = new_cache(CACHE_TYPE_DATA, level, node);
374 	icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
375 
376 	if (!dcache || !icache)
377 		goto err;
378 
379 	dcache->next_local = icache;
380 
381 	return dcache;
382 err:
383 	release_cache(dcache);
384 	release_cache(icache);
385 	return NULL;
386 }
387 
388 static struct cache *cache_do_one_devnode(struct device_node *node, int level)
389 {
390 	struct cache *cache;
391 
392 	if (cache_node_is_unified(node))
393 		cache = cache_do_one_devnode_unified(node, level);
394 	else
395 		cache = cache_do_one_devnode_split(node, level);
396 
397 	return cache;
398 }
399 
400 static struct cache *cache_lookup_or_instantiate(struct device_node *node,
401 						 int level)
402 {
403 	struct cache *cache;
404 
405 	cache = cache_lookup_by_node(node);
406 
407 	WARN_ONCE(cache && cache->level != level,
408 		  "cache level mismatch on lookup (got %d, expected %d)\n",
409 		  cache->level, level);
410 
411 	if (!cache)
412 		cache = cache_do_one_devnode(node, level);
413 
414 	return cache;
415 }
416 
417 static void link_cache_lists(struct cache *smaller, struct cache *bigger)
418 {
419 	while (smaller->next_local) {
420 		if (smaller->next_local == bigger)
421 			return; /* already linked */
422 		smaller = smaller->next_local;
423 	}
424 
425 	smaller->next_local = bigger;
426 }
427 
428 static void do_subsidiary_caches_debugcheck(struct cache *cache)
429 {
430 	WARN_ON_ONCE(cache->level != 1);
431 	WARN_ON_ONCE(!of_node_is_type(cache->ofnode, "cpu"));
432 }
433 
434 static void do_subsidiary_caches(struct cache *cache)
435 {
436 	struct device_node *subcache_node;
437 	int level = cache->level;
438 
439 	do_subsidiary_caches_debugcheck(cache);
440 
441 	while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
442 		struct cache *subcache;
443 
444 		level++;
445 		subcache = cache_lookup_or_instantiate(subcache_node, level);
446 		of_node_put(subcache_node);
447 		if (!subcache)
448 			break;
449 
450 		link_cache_lists(cache, subcache);
451 		cache = subcache;
452 	}
453 }
454 
455 static struct cache *cache_chain_instantiate(unsigned int cpu_id)
456 {
457 	struct device_node *cpu_node;
458 	struct cache *cpu_cache = NULL;
459 
460 	pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
461 
462 	cpu_node = of_get_cpu_node(cpu_id, NULL);
463 	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
464 	if (!cpu_node)
465 		goto out;
466 
467 	cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
468 	if (!cpu_cache)
469 		goto out;
470 
471 	do_subsidiary_caches(cpu_cache);
472 
473 	cache_cpu_set(cpu_cache, cpu_id);
474 out:
475 	of_node_put(cpu_node);
476 
477 	return cpu_cache;
478 }
479 
480 static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
481 {
482 	struct cache_dir *cache_dir;
483 	struct device *dev;
484 	struct kobject *kobj = NULL;
485 
486 	dev = get_cpu_device(cpu_id);
487 	WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
488 	if (!dev)
489 		goto err;
490 
491 	kobj = kobject_create_and_add("cache", &dev->kobj);
492 	if (!kobj)
493 		goto err;
494 
495 	cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
496 	if (!cache_dir)
497 		goto err;
498 
499 	cache_dir->kobj = kobj;
500 
501 	WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
502 
503 	per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
504 
505 	return cache_dir;
506 err:
507 	kobject_put(kobj);
508 	return NULL;
509 }
510 
511 static void cache_index_release(struct kobject *kobj)
512 {
513 	struct cache_index_dir *index;
514 
515 	index = kobj_to_cache_index_dir(kobj);
516 
517 	pr_debug("freeing index directory for L%d %s cache\n",
518 		 index->cache->level, cache_type_string(index->cache));
519 
520 	kfree(index);
521 }
522 
523 static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
524 {
525 	struct kobj_attribute *kobj_attr;
526 
527 	kobj_attr = container_of(attr, struct kobj_attribute, attr);
528 
529 	return kobj_attr->show(k, kobj_attr, buf);
530 }
531 
532 static struct cache *index_kobj_to_cache(struct kobject *k)
533 {
534 	struct cache_index_dir *index;
535 
536 	index = kobj_to_cache_index_dir(k);
537 
538 	return index->cache;
539 }
540 
541 static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
542 {
543 	unsigned int size_kb;
544 	struct cache *cache;
545 
546 	cache = index_kobj_to_cache(k);
547 
548 	if (cache_size_kb(cache, &size_kb))
549 		return -ENODEV;
550 
551 	return sprintf(buf, "%uK\n", size_kb);
552 }
553 
554 static struct kobj_attribute cache_size_attr =
555 	__ATTR(size, 0444, size_show, NULL);
556 
557 
558 static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
559 {
560 	unsigned int line_size;
561 	struct cache *cache;
562 
563 	cache = index_kobj_to_cache(k);
564 
565 	if (cache_get_line_size(cache, &line_size))
566 		return -ENODEV;
567 
568 	return sprintf(buf, "%u\n", line_size);
569 }
570 
571 static struct kobj_attribute cache_line_size_attr =
572 	__ATTR(coherency_line_size, 0444, line_size_show, NULL);
573 
574 static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
575 {
576 	unsigned int nr_sets;
577 	struct cache *cache;
578 
579 	cache = index_kobj_to_cache(k);
580 
581 	if (cache_nr_sets(cache, &nr_sets))
582 		return -ENODEV;
583 
584 	return sprintf(buf, "%u\n", nr_sets);
585 }
586 
587 static struct kobj_attribute cache_nr_sets_attr =
588 	__ATTR(number_of_sets, 0444, nr_sets_show, NULL);
589 
590 static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
591 {
592 	unsigned int associativity;
593 	struct cache *cache;
594 
595 	cache = index_kobj_to_cache(k);
596 
597 	if (cache_associativity(cache, &associativity))
598 		return -ENODEV;
599 
600 	return sprintf(buf, "%u\n", associativity);
601 }
602 
603 static struct kobj_attribute cache_assoc_attr =
604 	__ATTR(ways_of_associativity, 0444, associativity_show, NULL);
605 
606 static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
607 {
608 	struct cache *cache;
609 
610 	cache = index_kobj_to_cache(k);
611 
612 	return sprintf(buf, "%s\n", cache_type_string(cache));
613 }
614 
615 static struct kobj_attribute cache_type_attr =
616 	__ATTR(type, 0444, type_show, NULL);
617 
618 static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
619 {
620 	struct cache_index_dir *index;
621 	struct cache *cache;
622 
623 	index = kobj_to_cache_index_dir(k);
624 	cache = index->cache;
625 
626 	return sprintf(buf, "%d\n", cache->level);
627 }
628 
629 static struct kobj_attribute cache_level_attr =
630 	__ATTR(level, 0444, level_show, NULL);
631 
632 static unsigned int index_dir_to_cpu(struct cache_index_dir *index)
633 {
634 	struct kobject *index_dir_kobj = &index->kobj;
635 	struct kobject *cache_dir_kobj = index_dir_kobj->parent;
636 	struct kobject *cpu_dev_kobj = cache_dir_kobj->parent;
637 	struct device *dev = kobj_to_dev(cpu_dev_kobj);
638 
639 	return dev->id;
640 }
641 
642 /*
643  * On big-core systems, each core has two groups of CPUs each of which
644  * has its own L1-cache. The thread-siblings which share l1-cache with
645  * @cpu can be obtained via cpu_smallcore_mask().
646  */
647 static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache)
648 {
649 	if (cache->level == 1)
650 		return cpu_smallcore_mask(cpu);
651 
652 	return &cache->shared_cpu_map;
653 }
654 
655 static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
656 {
657 	struct cache_index_dir *index;
658 	struct cache *cache;
659 	const struct cpumask *mask;
660 	int ret, cpu;
661 
662 	index = kobj_to_cache_index_dir(k);
663 	cache = index->cache;
664 
665 	if (has_big_cores) {
666 		cpu = index_dir_to_cpu(index);
667 		mask = get_big_core_shared_cpu_map(cpu, cache);
668 	} else {
669 		mask  = &cache->shared_cpu_map;
670 	}
671 
672 	ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb\n",
673 			cpumask_pr_args(mask));
674 	buf[ret++] = '\n';
675 	buf[ret] = '\0';
676 	return ret;
677 }
678 
679 static struct kobj_attribute cache_shared_cpu_map_attr =
680 	__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
681 
682 /* Attributes which should always be created -- the kobject/sysfs core
683  * does this automatically via kobj_type->default_attrs.  This is the
684  * minimum data required to uniquely identify a cache.
685  */
686 static struct attribute *cache_index_default_attrs[] = {
687 	&cache_type_attr.attr,
688 	&cache_level_attr.attr,
689 	&cache_shared_cpu_map_attr.attr,
690 	NULL,
691 };
692 
693 /* Attributes which should be created if the cache device node has the
694  * right properties -- see cacheinfo_create_index_opt_attrs
695  */
696 static struct kobj_attribute *cache_index_opt_attrs[] = {
697 	&cache_size_attr,
698 	&cache_line_size_attr,
699 	&cache_nr_sets_attr,
700 	&cache_assoc_attr,
701 };
702 
703 static const struct sysfs_ops cache_index_ops = {
704 	.show = cache_index_show,
705 };
706 
707 static struct kobj_type cache_index_type = {
708 	.release = cache_index_release,
709 	.sysfs_ops = &cache_index_ops,
710 	.default_attrs = cache_index_default_attrs,
711 };
712 
713 static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
714 {
715 	const char *cache_type;
716 	struct cache *cache;
717 	char *buf;
718 	int i;
719 
720 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
721 	if (!buf)
722 		return;
723 
724 	cache = dir->cache;
725 	cache_type = cache_type_string(cache);
726 
727 	/* We don't want to create an attribute that can't provide a
728 	 * meaningful value.  Check the return value of each optional
729 	 * attribute's ->show method before registering the
730 	 * attribute.
731 	 */
732 	for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
733 		struct kobj_attribute *attr;
734 		ssize_t rc;
735 
736 		attr = cache_index_opt_attrs[i];
737 
738 		rc = attr->show(&dir->kobj, attr, buf);
739 		if (rc <= 0) {
740 			pr_debug("not creating %s attribute for "
741 				 "%pOF(%s) (rc = %zd)\n",
742 				 attr->attr.name, cache->ofnode,
743 				 cache_type, rc);
744 			continue;
745 		}
746 		if (sysfs_create_file(&dir->kobj, &attr->attr))
747 			pr_debug("could not create %s attribute for %pOF(%s)\n",
748 				 attr->attr.name, cache->ofnode, cache_type);
749 	}
750 
751 	kfree(buf);
752 }
753 
754 static void cacheinfo_create_index_dir(struct cache *cache, int index,
755 				       struct cache_dir *cache_dir)
756 {
757 	struct cache_index_dir *index_dir;
758 	int rc;
759 
760 	index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
761 	if (!index_dir)
762 		goto err;
763 
764 	index_dir->cache = cache;
765 
766 	rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
767 				  cache_dir->kobj, "index%d", index);
768 	if (rc)
769 		goto err;
770 
771 	index_dir->next = cache_dir->index;
772 	cache_dir->index = index_dir;
773 
774 	cacheinfo_create_index_opt_attrs(index_dir);
775 
776 	return;
777 err:
778 	kfree(index_dir);
779 }
780 
781 static void cacheinfo_sysfs_populate(unsigned int cpu_id,
782 				     struct cache *cache_list)
783 {
784 	struct cache_dir *cache_dir;
785 	struct cache *cache;
786 	int index = 0;
787 
788 	cache_dir = cacheinfo_create_cache_dir(cpu_id);
789 	if (!cache_dir)
790 		return;
791 
792 	cache = cache_list;
793 	while (cache) {
794 		cacheinfo_create_index_dir(cache, index, cache_dir);
795 		index++;
796 		cache = cache->next_local;
797 	}
798 }
799 
800 void cacheinfo_cpu_online(unsigned int cpu_id)
801 {
802 	struct cache *cache;
803 
804 	cache = cache_chain_instantiate(cpu_id);
805 	if (!cache)
806 		return;
807 
808 	cacheinfo_sysfs_populate(cpu_id, cache);
809 }
810 
811 /* functions needed to remove cache entry for cpu offline or suspend/resume */
812 
813 #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
814     defined(CONFIG_HOTPLUG_CPU)
815 
816 static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
817 {
818 	struct device_node *cpu_node;
819 	struct cache *cache;
820 
821 	cpu_node = of_get_cpu_node(cpu_id, NULL);
822 	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
823 	if (!cpu_node)
824 		return NULL;
825 
826 	cache = cache_lookup_by_node(cpu_node);
827 	of_node_put(cpu_node);
828 
829 	return cache;
830 }
831 
832 static void remove_index_dirs(struct cache_dir *cache_dir)
833 {
834 	struct cache_index_dir *index;
835 
836 	index = cache_dir->index;
837 
838 	while (index) {
839 		struct cache_index_dir *next;
840 
841 		next = index->next;
842 		kobject_put(&index->kobj);
843 		index = next;
844 	}
845 }
846 
847 static void remove_cache_dir(struct cache_dir *cache_dir)
848 {
849 	remove_index_dirs(cache_dir);
850 
851 	/* Remove cache dir from sysfs */
852 	kobject_del(cache_dir->kobj);
853 
854 	kobject_put(cache_dir->kobj);
855 
856 	kfree(cache_dir);
857 }
858 
859 static void cache_cpu_clear(struct cache *cache, int cpu)
860 {
861 	while (cache) {
862 		struct cache *next = cache->next_local;
863 
864 		WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
865 			  "CPU %i not accounted in %pOF(%s)\n",
866 			  cpu, cache->ofnode,
867 			  cache_type_string(cache));
868 
869 		cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
870 
871 		/* Release the cache object if all the cpus using it
872 		 * are offline */
873 		if (cpumask_empty(&cache->shared_cpu_map))
874 			release_cache(cache);
875 
876 		cache = next;
877 	}
878 }
879 
880 void cacheinfo_cpu_offline(unsigned int cpu_id)
881 {
882 	struct cache_dir *cache_dir;
883 	struct cache *cache;
884 
885 	/* Prevent userspace from seeing inconsistent state - remove
886 	 * the sysfs hierarchy first */
887 	cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
888 
889 	/* careful, sysfs population may have failed */
890 	if (cache_dir)
891 		remove_cache_dir(cache_dir);
892 
893 	per_cpu(cache_dir_pcpu, cpu_id) = NULL;
894 
895 	/* clear the CPU's bit in its cache chain, possibly freeing
896 	 * cache objects */
897 	cache = cache_lookup_by_cpu(cpu_id);
898 	if (cache)
899 		cache_cpu_clear(cache, cpu_id);
900 }
901 #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */
902