xref: /openbmc/linux/arch/powerpc/kernel/cacheinfo.c (revision 14474950)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Processor cache information made available to userspace via sysfs;
4  * intended to be compatible with x86 intel_cacheinfo implementation.
5  *
6  * Copyright 2008 IBM Corporation
7  * Author: Nathan Lynch
8  */
9 
10 #include <linux/cpu.h>
11 #include <linux/cpumask.h>
12 #include <linux/kernel.h>
13 #include <linux/kobject.h>
14 #include <linux/list.h>
15 #include <linux/notifier.h>
16 #include <linux/of.h>
17 #include <linux/percpu.h>
18 #include <linux/slab.h>
19 #include <asm/prom.h>
20 #include <asm/cputhreads.h>
21 #include <asm/smp.h>
22 
23 #include "cacheinfo.h"
24 
25 /* per-cpu object for tracking:
26  * - a "cache" kobject for the top-level directory
27  * - a list of "index" objects representing the cpu's local cache hierarchy
28  */
29 struct cache_dir {
30 	struct kobject *kobj; /* bare (not embedded) kobject for cache
31 			       * directory */
32 	struct cache_index_dir *index; /* list of index objects */
33 };
34 
35 /* "index" object: each cpu's cache directory has an index
36  * subdirectory corresponding to a cache object associated with the
37  * cpu.  This object's lifetime is managed via the embedded kobject.
38  */
39 struct cache_index_dir {
40 	struct kobject kobj;
41 	struct cache_index_dir *next; /* next index in parent directory */
42 	struct cache *cache;
43 };
44 
45 /* Template for determining which OF properties to query for a given
46  * cache type */
47 struct cache_type_info {
48 	const char *name;
49 	const char *size_prop;
50 
51 	/* Allow for both [di]-cache-line-size and
52 	 * [di]-cache-block-size properties.  According to the PowerPC
53 	 * Processor binding, -line-size should be provided if it
54 	 * differs from the cache block size (that which is operated
55 	 * on by cache instructions), so we look for -line-size first.
56 	 * See cache_get_line_size(). */
57 
58 	const char *line_size_props[2];
59 	const char *nr_sets_prop;
60 };
61 
62 /* These are used to index the cache_type_info array. */
63 #define CACHE_TYPE_UNIFIED     0 /* cache-size, cache-block-size, etc. */
64 #define CACHE_TYPE_UNIFIED_D   1 /* d-cache-size, d-cache-block-size, etc */
65 #define CACHE_TYPE_INSTRUCTION 2
66 #define CACHE_TYPE_DATA        3
67 
68 static const struct cache_type_info cache_type_info[] = {
69 	{
70 		/* Embedded systems that use cache-size, cache-block-size,
71 		 * etc. for the Unified (typically L2) cache. */
72 		.name            = "Unified",
73 		.size_prop       = "cache-size",
74 		.line_size_props = { "cache-line-size",
75 				     "cache-block-size", },
76 		.nr_sets_prop    = "cache-sets",
77 	},
78 	{
79 		/* PowerPC Processor binding says the [di]-cache-*
80 		 * must be equal on unified caches, so just use
81 		 * d-cache properties. */
82 		.name            = "Unified",
83 		.size_prop       = "d-cache-size",
84 		.line_size_props = { "d-cache-line-size",
85 				     "d-cache-block-size", },
86 		.nr_sets_prop    = "d-cache-sets",
87 	},
88 	{
89 		.name            = "Instruction",
90 		.size_prop       = "i-cache-size",
91 		.line_size_props = { "i-cache-line-size",
92 				     "i-cache-block-size", },
93 		.nr_sets_prop    = "i-cache-sets",
94 	},
95 	{
96 		.name            = "Data",
97 		.size_prop       = "d-cache-size",
98 		.line_size_props = { "d-cache-line-size",
99 				     "d-cache-block-size", },
100 		.nr_sets_prop    = "d-cache-sets",
101 	},
102 };
103 
104 /* Cache object: each instance of this corresponds to a distinct cache
105  * in the system.  There are separate objects for Harvard caches: one
106  * each for instruction and data, and each refers to the same OF node.
107  * The refcount of the OF node is elevated for the lifetime of the
108  * cache object.  A cache object is released when its shared_cpu_map
109  * is cleared (see cache_cpu_clear).
110  *
111  * A cache object is on two lists: an unsorted global list
112  * (cache_list) of cache objects; and a singly-linked list
113  * representing the local cache hierarchy, which is ordered by level
114  * (e.g. L1d -> L1i -> L2 -> L3).
115  */
116 struct cache {
117 	struct device_node *ofnode;    /* OF node for this cache, may be cpu */
118 	struct cpumask shared_cpu_map; /* online CPUs using this cache */
119 	int type;                      /* split cache disambiguation */
120 	int level;                     /* level not explicit in device tree */
121 	struct list_head list;         /* global list of cache objects */
122 	struct cache *next_local;      /* next cache of >= level */
123 };
124 
125 static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
126 
127 /* traversal/modification of this list occurs only at cpu hotplug time;
128  * access is serialized by cpu hotplug locking
129  */
130 static LIST_HEAD(cache_list);
131 
132 static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
133 {
134 	return container_of(k, struct cache_index_dir, kobj);
135 }
136 
137 static const char *cache_type_string(const struct cache *cache)
138 {
139 	return cache_type_info[cache->type].name;
140 }
141 
142 static void cache_init(struct cache *cache, int type, int level,
143 		       struct device_node *ofnode)
144 {
145 	cache->type = type;
146 	cache->level = level;
147 	cache->ofnode = of_node_get(ofnode);
148 	INIT_LIST_HEAD(&cache->list);
149 	list_add(&cache->list, &cache_list);
150 }
151 
152 static struct cache *new_cache(int type, int level, struct device_node *ofnode)
153 {
154 	struct cache *cache;
155 
156 	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
157 	if (cache)
158 		cache_init(cache, type, level, ofnode);
159 
160 	return cache;
161 }
162 
163 static void release_cache_debugcheck(struct cache *cache)
164 {
165 	struct cache *iter;
166 
167 	list_for_each_entry(iter, &cache_list, list)
168 		WARN_ONCE(iter->next_local == cache,
169 			  "cache for %pOF(%s) refers to cache for %pOF(%s)\n",
170 			  iter->ofnode,
171 			  cache_type_string(iter),
172 			  cache->ofnode,
173 			  cache_type_string(cache));
174 }
175 
176 static void release_cache(struct cache *cache)
177 {
178 	if (!cache)
179 		return;
180 
181 	pr_debug("freeing L%d %s cache for %pOF\n", cache->level,
182 		 cache_type_string(cache), cache->ofnode);
183 
184 	release_cache_debugcheck(cache);
185 	list_del(&cache->list);
186 	of_node_put(cache->ofnode);
187 	kfree(cache);
188 }
189 
190 static void cache_cpu_set(struct cache *cache, int cpu)
191 {
192 	struct cache *next = cache;
193 
194 	while (next) {
195 		WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
196 			  "CPU %i already accounted in %pOF(%s)\n",
197 			  cpu, next->ofnode,
198 			  cache_type_string(next));
199 		cpumask_set_cpu(cpu, &next->shared_cpu_map);
200 		next = next->next_local;
201 	}
202 }
203 
204 static int cache_size(const struct cache *cache, unsigned int *ret)
205 {
206 	const char *propname;
207 	const __be32 *cache_size;
208 
209 	propname = cache_type_info[cache->type].size_prop;
210 
211 	cache_size = of_get_property(cache->ofnode, propname, NULL);
212 	if (!cache_size)
213 		return -ENODEV;
214 
215 	*ret = of_read_number(cache_size, 1);
216 	return 0;
217 }
218 
219 static int cache_size_kb(const struct cache *cache, unsigned int *ret)
220 {
221 	unsigned int size;
222 
223 	if (cache_size(cache, &size))
224 		return -ENODEV;
225 
226 	*ret = size / 1024;
227 	return 0;
228 }
229 
230 /* not cache_line_size() because that's a macro in include/linux/cache.h */
231 static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
232 {
233 	const __be32 *line_size;
234 	int i, lim;
235 
236 	lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
237 
238 	for (i = 0; i < lim; i++) {
239 		const char *propname;
240 
241 		propname = cache_type_info[cache->type].line_size_props[i];
242 		line_size = of_get_property(cache->ofnode, propname, NULL);
243 		if (line_size)
244 			break;
245 	}
246 
247 	if (!line_size)
248 		return -ENODEV;
249 
250 	*ret = of_read_number(line_size, 1);
251 	return 0;
252 }
253 
254 static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
255 {
256 	const char *propname;
257 	const __be32 *nr_sets;
258 
259 	propname = cache_type_info[cache->type].nr_sets_prop;
260 
261 	nr_sets = of_get_property(cache->ofnode, propname, NULL);
262 	if (!nr_sets)
263 		return -ENODEV;
264 
265 	*ret = of_read_number(nr_sets, 1);
266 	return 0;
267 }
268 
269 static int cache_associativity(const struct cache *cache, unsigned int *ret)
270 {
271 	unsigned int line_size;
272 	unsigned int nr_sets;
273 	unsigned int size;
274 
275 	if (cache_nr_sets(cache, &nr_sets))
276 		goto err;
277 
278 	/* If the cache is fully associative, there is no need to
279 	 * check the other properties.
280 	 */
281 	if (nr_sets == 1) {
282 		*ret = 0;
283 		return 0;
284 	}
285 
286 	if (cache_get_line_size(cache, &line_size))
287 		goto err;
288 	if (cache_size(cache, &size))
289 		goto err;
290 
291 	if (!(nr_sets > 0 && size > 0 && line_size > 0))
292 		goto err;
293 
294 	*ret = (size / nr_sets) / line_size;
295 	return 0;
296 err:
297 	return -ENODEV;
298 }
299 
300 /* helper for dealing with split caches */
301 static struct cache *cache_find_first_sibling(struct cache *cache)
302 {
303 	struct cache *iter;
304 
305 	if (cache->type == CACHE_TYPE_UNIFIED ||
306 	    cache->type == CACHE_TYPE_UNIFIED_D)
307 		return cache;
308 
309 	list_for_each_entry(iter, &cache_list, list)
310 		if (iter->ofnode == cache->ofnode && iter->next_local == cache)
311 			return iter;
312 
313 	return cache;
314 }
315 
316 /* return the first cache on a local list matching node */
317 static struct cache *cache_lookup_by_node(const struct device_node *node)
318 {
319 	struct cache *cache = NULL;
320 	struct cache *iter;
321 
322 	list_for_each_entry(iter, &cache_list, list) {
323 		if (iter->ofnode != node)
324 			continue;
325 		cache = cache_find_first_sibling(iter);
326 		break;
327 	}
328 
329 	return cache;
330 }
331 
332 static bool cache_node_is_unified(const struct device_node *np)
333 {
334 	return of_get_property(np, "cache-unified", NULL);
335 }
336 
337 /*
338  * Unified caches can have two different sets of tags.  Most embedded
339  * use cache-size, etc. for the unified cache size, but open firmware systems
340  * use d-cache-size, etc.   Check on initialization for which type we have, and
341  * return the appropriate structure type.  Assume it's embedded if it isn't
342  * open firmware.  If it's yet a 3rd type, then there will be missing entries
343  * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
344  * to be extended further.
345  */
346 static int cache_is_unified_d(const struct device_node *np)
347 {
348 	return of_get_property(np,
349 		cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
350 		CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
351 }
352 
353 static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
354 {
355 	pr_debug("creating L%d ucache for %pOF\n", level, node);
356 
357 	return new_cache(cache_is_unified_d(node), level, node);
358 }
359 
360 static struct cache *cache_do_one_devnode_split(struct device_node *node,
361 						int level)
362 {
363 	struct cache *dcache, *icache;
364 
365 	pr_debug("creating L%d dcache and icache for %pOF\n", level,
366 		 node);
367 
368 	dcache = new_cache(CACHE_TYPE_DATA, level, node);
369 	icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
370 
371 	if (!dcache || !icache)
372 		goto err;
373 
374 	dcache->next_local = icache;
375 
376 	return dcache;
377 err:
378 	release_cache(dcache);
379 	release_cache(icache);
380 	return NULL;
381 }
382 
383 static struct cache *cache_do_one_devnode(struct device_node *node, int level)
384 {
385 	struct cache *cache;
386 
387 	if (cache_node_is_unified(node))
388 		cache = cache_do_one_devnode_unified(node, level);
389 	else
390 		cache = cache_do_one_devnode_split(node, level);
391 
392 	return cache;
393 }
394 
395 static struct cache *cache_lookup_or_instantiate(struct device_node *node,
396 						 int level)
397 {
398 	struct cache *cache;
399 
400 	cache = cache_lookup_by_node(node);
401 
402 	WARN_ONCE(cache && cache->level != level,
403 		  "cache level mismatch on lookup (got %d, expected %d)\n",
404 		  cache->level, level);
405 
406 	if (!cache)
407 		cache = cache_do_one_devnode(node, level);
408 
409 	return cache;
410 }
411 
412 static void link_cache_lists(struct cache *smaller, struct cache *bigger)
413 {
414 	while (smaller->next_local) {
415 		if (smaller->next_local == bigger)
416 			return; /* already linked */
417 		smaller = smaller->next_local;
418 	}
419 
420 	smaller->next_local = bigger;
421 }
422 
423 static void do_subsidiary_caches_debugcheck(struct cache *cache)
424 {
425 	WARN_ON_ONCE(cache->level != 1);
426 	WARN_ON_ONCE(!of_node_is_type(cache->ofnode, "cpu"));
427 }
428 
429 static void do_subsidiary_caches(struct cache *cache)
430 {
431 	struct device_node *subcache_node;
432 	int level = cache->level;
433 
434 	do_subsidiary_caches_debugcheck(cache);
435 
436 	while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
437 		struct cache *subcache;
438 
439 		level++;
440 		subcache = cache_lookup_or_instantiate(subcache_node, level);
441 		of_node_put(subcache_node);
442 		if (!subcache)
443 			break;
444 
445 		link_cache_lists(cache, subcache);
446 		cache = subcache;
447 	}
448 }
449 
450 static struct cache *cache_chain_instantiate(unsigned int cpu_id)
451 {
452 	struct device_node *cpu_node;
453 	struct cache *cpu_cache = NULL;
454 
455 	pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
456 
457 	cpu_node = of_get_cpu_node(cpu_id, NULL);
458 	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
459 	if (!cpu_node)
460 		goto out;
461 
462 	cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
463 	if (!cpu_cache)
464 		goto out;
465 
466 	do_subsidiary_caches(cpu_cache);
467 
468 	cache_cpu_set(cpu_cache, cpu_id);
469 out:
470 	of_node_put(cpu_node);
471 
472 	return cpu_cache;
473 }
474 
475 static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
476 {
477 	struct cache_dir *cache_dir;
478 	struct device *dev;
479 	struct kobject *kobj = NULL;
480 
481 	dev = get_cpu_device(cpu_id);
482 	WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
483 	if (!dev)
484 		goto err;
485 
486 	kobj = kobject_create_and_add("cache", &dev->kobj);
487 	if (!kobj)
488 		goto err;
489 
490 	cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
491 	if (!cache_dir)
492 		goto err;
493 
494 	cache_dir->kobj = kobj;
495 
496 	WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
497 
498 	per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
499 
500 	return cache_dir;
501 err:
502 	kobject_put(kobj);
503 	return NULL;
504 }
505 
506 static void cache_index_release(struct kobject *kobj)
507 {
508 	struct cache_index_dir *index;
509 
510 	index = kobj_to_cache_index_dir(kobj);
511 
512 	pr_debug("freeing index directory for L%d %s cache\n",
513 		 index->cache->level, cache_type_string(index->cache));
514 
515 	kfree(index);
516 }
517 
518 static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
519 {
520 	struct kobj_attribute *kobj_attr;
521 
522 	kobj_attr = container_of(attr, struct kobj_attribute, attr);
523 
524 	return kobj_attr->show(k, kobj_attr, buf);
525 }
526 
527 static struct cache *index_kobj_to_cache(struct kobject *k)
528 {
529 	struct cache_index_dir *index;
530 
531 	index = kobj_to_cache_index_dir(k);
532 
533 	return index->cache;
534 }
535 
536 static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
537 {
538 	unsigned int size_kb;
539 	struct cache *cache;
540 
541 	cache = index_kobj_to_cache(k);
542 
543 	if (cache_size_kb(cache, &size_kb))
544 		return -ENODEV;
545 
546 	return sprintf(buf, "%uK\n", size_kb);
547 }
548 
549 static struct kobj_attribute cache_size_attr =
550 	__ATTR(size, 0444, size_show, NULL);
551 
552 
553 static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
554 {
555 	unsigned int line_size;
556 	struct cache *cache;
557 
558 	cache = index_kobj_to_cache(k);
559 
560 	if (cache_get_line_size(cache, &line_size))
561 		return -ENODEV;
562 
563 	return sprintf(buf, "%u\n", line_size);
564 }
565 
566 static struct kobj_attribute cache_line_size_attr =
567 	__ATTR(coherency_line_size, 0444, line_size_show, NULL);
568 
569 static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
570 {
571 	unsigned int nr_sets;
572 	struct cache *cache;
573 
574 	cache = index_kobj_to_cache(k);
575 
576 	if (cache_nr_sets(cache, &nr_sets))
577 		return -ENODEV;
578 
579 	return sprintf(buf, "%u\n", nr_sets);
580 }
581 
582 static struct kobj_attribute cache_nr_sets_attr =
583 	__ATTR(number_of_sets, 0444, nr_sets_show, NULL);
584 
585 static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
586 {
587 	unsigned int associativity;
588 	struct cache *cache;
589 
590 	cache = index_kobj_to_cache(k);
591 
592 	if (cache_associativity(cache, &associativity))
593 		return -ENODEV;
594 
595 	return sprintf(buf, "%u\n", associativity);
596 }
597 
598 static struct kobj_attribute cache_assoc_attr =
599 	__ATTR(ways_of_associativity, 0444, associativity_show, NULL);
600 
601 static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
602 {
603 	struct cache *cache;
604 
605 	cache = index_kobj_to_cache(k);
606 
607 	return sprintf(buf, "%s\n", cache_type_string(cache));
608 }
609 
610 static struct kobj_attribute cache_type_attr =
611 	__ATTR(type, 0444, type_show, NULL);
612 
613 static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
614 {
615 	struct cache_index_dir *index;
616 	struct cache *cache;
617 
618 	index = kobj_to_cache_index_dir(k);
619 	cache = index->cache;
620 
621 	return sprintf(buf, "%d\n", cache->level);
622 }
623 
624 static struct kobj_attribute cache_level_attr =
625 	__ATTR(level, 0444, level_show, NULL);
626 
627 static unsigned int index_dir_to_cpu(struct cache_index_dir *index)
628 {
629 	struct kobject *index_dir_kobj = &index->kobj;
630 	struct kobject *cache_dir_kobj = index_dir_kobj->parent;
631 	struct kobject *cpu_dev_kobj = cache_dir_kobj->parent;
632 	struct device *dev = kobj_to_dev(cpu_dev_kobj);
633 
634 	return dev->id;
635 }
636 
637 /*
638  * On big-core systems, each core has two groups of CPUs each of which
639  * has its own L1-cache. The thread-siblings which share l1-cache with
640  * @cpu can be obtained via cpu_smallcore_mask().
641  */
642 static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache)
643 {
644 	if (cache->level == 1)
645 		return cpu_smallcore_mask(cpu);
646 
647 	return &cache->shared_cpu_map;
648 }
649 
650 static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
651 {
652 	struct cache_index_dir *index;
653 	struct cache *cache;
654 	const struct cpumask *mask;
655 	int ret, cpu;
656 
657 	index = kobj_to_cache_index_dir(k);
658 	cache = index->cache;
659 
660 	if (has_big_cores) {
661 		cpu = index_dir_to_cpu(index);
662 		mask = get_big_core_shared_cpu_map(cpu, cache);
663 	} else {
664 		mask  = &cache->shared_cpu_map;
665 	}
666 
667 	ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb\n",
668 			cpumask_pr_args(mask));
669 	buf[ret++] = '\n';
670 	buf[ret] = '\0';
671 	return ret;
672 }
673 
674 static struct kobj_attribute cache_shared_cpu_map_attr =
675 	__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
676 
677 /* Attributes which should always be created -- the kobject/sysfs core
678  * does this automatically via kobj_type->default_attrs.  This is the
679  * minimum data required to uniquely identify a cache.
680  */
681 static struct attribute *cache_index_default_attrs[] = {
682 	&cache_type_attr.attr,
683 	&cache_level_attr.attr,
684 	&cache_shared_cpu_map_attr.attr,
685 	NULL,
686 };
687 
688 /* Attributes which should be created if the cache device node has the
689  * right properties -- see cacheinfo_create_index_opt_attrs
690  */
691 static struct kobj_attribute *cache_index_opt_attrs[] = {
692 	&cache_size_attr,
693 	&cache_line_size_attr,
694 	&cache_nr_sets_attr,
695 	&cache_assoc_attr,
696 };
697 
698 static const struct sysfs_ops cache_index_ops = {
699 	.show = cache_index_show,
700 };
701 
702 static struct kobj_type cache_index_type = {
703 	.release = cache_index_release,
704 	.sysfs_ops = &cache_index_ops,
705 	.default_attrs = cache_index_default_attrs,
706 };
707 
708 static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
709 {
710 	const char *cache_type;
711 	struct cache *cache;
712 	char *buf;
713 	int i;
714 
715 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
716 	if (!buf)
717 		return;
718 
719 	cache = dir->cache;
720 	cache_type = cache_type_string(cache);
721 
722 	/* We don't want to create an attribute that can't provide a
723 	 * meaningful value.  Check the return value of each optional
724 	 * attribute's ->show method before registering the
725 	 * attribute.
726 	 */
727 	for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
728 		struct kobj_attribute *attr;
729 		ssize_t rc;
730 
731 		attr = cache_index_opt_attrs[i];
732 
733 		rc = attr->show(&dir->kobj, attr, buf);
734 		if (rc <= 0) {
735 			pr_debug("not creating %s attribute for "
736 				 "%pOF(%s) (rc = %zd)\n",
737 				 attr->attr.name, cache->ofnode,
738 				 cache_type, rc);
739 			continue;
740 		}
741 		if (sysfs_create_file(&dir->kobj, &attr->attr))
742 			pr_debug("could not create %s attribute for %pOF(%s)\n",
743 				 attr->attr.name, cache->ofnode, cache_type);
744 	}
745 
746 	kfree(buf);
747 }
748 
749 static void cacheinfo_create_index_dir(struct cache *cache, int index,
750 				       struct cache_dir *cache_dir)
751 {
752 	struct cache_index_dir *index_dir;
753 	int rc;
754 
755 	index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
756 	if (!index_dir)
757 		return;
758 
759 	index_dir->cache = cache;
760 
761 	rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
762 				  cache_dir->kobj, "index%d", index);
763 	if (rc) {
764 		kobject_put(&index_dir->kobj);
765 		return;
766 	}
767 
768 	index_dir->next = cache_dir->index;
769 	cache_dir->index = index_dir;
770 
771 	cacheinfo_create_index_opt_attrs(index_dir);
772 }
773 
774 static void cacheinfo_sysfs_populate(unsigned int cpu_id,
775 				     struct cache *cache_list)
776 {
777 	struct cache_dir *cache_dir;
778 	struct cache *cache;
779 	int index = 0;
780 
781 	cache_dir = cacheinfo_create_cache_dir(cpu_id);
782 	if (!cache_dir)
783 		return;
784 
785 	cache = cache_list;
786 	while (cache) {
787 		cacheinfo_create_index_dir(cache, index, cache_dir);
788 		index++;
789 		cache = cache->next_local;
790 	}
791 }
792 
793 void cacheinfo_cpu_online(unsigned int cpu_id)
794 {
795 	struct cache *cache;
796 
797 	cache = cache_chain_instantiate(cpu_id);
798 	if (!cache)
799 		return;
800 
801 	cacheinfo_sysfs_populate(cpu_id, cache);
802 }
803 
804 /* functions needed to remove cache entry for cpu offline or suspend/resume */
805 
806 #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
807     defined(CONFIG_HOTPLUG_CPU)
808 
809 static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
810 {
811 	struct device_node *cpu_node;
812 	struct cache *cache;
813 
814 	cpu_node = of_get_cpu_node(cpu_id, NULL);
815 	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
816 	if (!cpu_node)
817 		return NULL;
818 
819 	cache = cache_lookup_by_node(cpu_node);
820 	of_node_put(cpu_node);
821 
822 	return cache;
823 }
824 
825 static void remove_index_dirs(struct cache_dir *cache_dir)
826 {
827 	struct cache_index_dir *index;
828 
829 	index = cache_dir->index;
830 
831 	while (index) {
832 		struct cache_index_dir *next;
833 
834 		next = index->next;
835 		kobject_put(&index->kobj);
836 		index = next;
837 	}
838 }
839 
840 static void remove_cache_dir(struct cache_dir *cache_dir)
841 {
842 	remove_index_dirs(cache_dir);
843 
844 	/* Remove cache dir from sysfs */
845 	kobject_del(cache_dir->kobj);
846 
847 	kobject_put(cache_dir->kobj);
848 
849 	kfree(cache_dir);
850 }
851 
852 static void cache_cpu_clear(struct cache *cache, int cpu)
853 {
854 	while (cache) {
855 		struct cache *next = cache->next_local;
856 
857 		WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
858 			  "CPU %i not accounted in %pOF(%s)\n",
859 			  cpu, cache->ofnode,
860 			  cache_type_string(cache));
861 
862 		cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
863 
864 		/* Release the cache object if all the cpus using it
865 		 * are offline */
866 		if (cpumask_empty(&cache->shared_cpu_map))
867 			release_cache(cache);
868 
869 		cache = next;
870 	}
871 }
872 
873 void cacheinfo_cpu_offline(unsigned int cpu_id)
874 {
875 	struct cache_dir *cache_dir;
876 	struct cache *cache;
877 
878 	/* Prevent userspace from seeing inconsistent state - remove
879 	 * the sysfs hierarchy first */
880 	cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
881 
882 	/* careful, sysfs population may have failed */
883 	if (cache_dir)
884 		remove_cache_dir(cache_dir);
885 
886 	per_cpu(cache_dir_pcpu, cpu_id) = NULL;
887 
888 	/* clear the CPU's bit in its cache chain, possibly freeing
889 	 * cache objects */
890 	cache = cache_lookup_by_cpu(cpu_id);
891 	if (cache)
892 		cache_cpu_clear(cache, cpu_id);
893 }
894 
895 void cacheinfo_teardown(void)
896 {
897 	unsigned int cpu;
898 
899 	lockdep_assert_cpus_held();
900 
901 	for_each_online_cpu(cpu)
902 		cacheinfo_cpu_offline(cpu);
903 }
904 
905 void cacheinfo_rebuild(void)
906 {
907 	unsigned int cpu;
908 
909 	lockdep_assert_cpus_held();
910 
911 	for_each_online_cpu(cpu)
912 		cacheinfo_cpu_online(cpu);
913 }
914 
915 #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */
916