xref: /openbmc/linux/drivers/base/cacheinfo.c (revision 4cd81356)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * cacheinfo support - processor cache information via sysfs
4  *
5  * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6  * Author: Sudeep Holla <sudeep.holla@arm.com>
7  */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/acpi.h>
11 #include <linux/bitops.h>
12 #include <linux/cacheinfo.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/device.h>
16 #include <linux/init.h>
17 #include <linux/of_device.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/sysfs.h>
22 
23 /* pointer to per cpu cacheinfo */
24 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
25 #define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
26 #define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
27 #define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
28 #define per_cpu_cacheinfo_idx(cpu, idx)		\
29 				(per_cpu_cacheinfo(cpu) + (idx))
30 
31 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
32 {
33 	return ci_cacheinfo(cpu);
34 }
35 
36 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
37 					   struct cacheinfo *sib_leaf)
38 {
39 	/*
40 	 * For non DT/ACPI systems, assume unique level 1 caches,
41 	 * system-wide shared caches for all other levels. This will be used
42 	 * only if arch specific code has not populated shared_cpu_map
43 	 */
44 	if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
45 		return !(this_leaf->level == 1);
46 
47 	if ((sib_leaf->attributes & CACHE_ID) &&
48 	    (this_leaf->attributes & CACHE_ID))
49 		return sib_leaf->id == this_leaf->id;
50 
51 	return sib_leaf->fw_token == this_leaf->fw_token;
52 }
53 
54 bool last_level_cache_is_valid(unsigned int cpu)
55 {
56 	struct cacheinfo *llc;
57 
58 	if (!cache_leaves(cpu))
59 		return false;
60 
61 	llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
62 
63 	return (llc->attributes & CACHE_ID) || !!llc->fw_token;
64 
65 }
66 
67 bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
68 {
69 	struct cacheinfo *llc_x, *llc_y;
70 
71 	if (!last_level_cache_is_valid(cpu_x) ||
72 	    !last_level_cache_is_valid(cpu_y))
73 		return false;
74 
75 	llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
76 	llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
77 
78 	return cache_leaves_are_shared(llc_x, llc_y);
79 }
80 
81 #ifdef CONFIG_OF
82 /* OF properties to query for a given cache type */
83 struct cache_type_info {
84 	const char *size_prop;
85 	const char *line_size_props[2];
86 	const char *nr_sets_prop;
87 };
88 
89 static const struct cache_type_info cache_type_info[] = {
90 	{
91 		.size_prop       = "cache-size",
92 		.line_size_props = { "cache-line-size",
93 				     "cache-block-size", },
94 		.nr_sets_prop    = "cache-sets",
95 	}, {
96 		.size_prop       = "i-cache-size",
97 		.line_size_props = { "i-cache-line-size",
98 				     "i-cache-block-size", },
99 		.nr_sets_prop    = "i-cache-sets",
100 	}, {
101 		.size_prop       = "d-cache-size",
102 		.line_size_props = { "d-cache-line-size",
103 				     "d-cache-block-size", },
104 		.nr_sets_prop    = "d-cache-sets",
105 	},
106 };
107 
108 static inline int get_cacheinfo_idx(enum cache_type type)
109 {
110 	if (type == CACHE_TYPE_UNIFIED)
111 		return 0;
112 	return type;
113 }
114 
115 static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
116 {
117 	const char *propname;
118 	int ct_idx;
119 
120 	ct_idx = get_cacheinfo_idx(this_leaf->type);
121 	propname = cache_type_info[ct_idx].size_prop;
122 
123 	of_property_read_u32(np, propname, &this_leaf->size);
124 }
125 
126 /* not cache_line_size() because that's a macro in include/linux/cache.h */
127 static void cache_get_line_size(struct cacheinfo *this_leaf,
128 				struct device_node *np)
129 {
130 	int i, lim, ct_idx;
131 
132 	ct_idx = get_cacheinfo_idx(this_leaf->type);
133 	lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
134 
135 	for (i = 0; i < lim; i++) {
136 		int ret;
137 		u32 line_size;
138 		const char *propname;
139 
140 		propname = cache_type_info[ct_idx].line_size_props[i];
141 		ret = of_property_read_u32(np, propname, &line_size);
142 		if (!ret) {
143 			this_leaf->coherency_line_size = line_size;
144 			break;
145 		}
146 	}
147 }
148 
149 static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
150 {
151 	const char *propname;
152 	int ct_idx;
153 
154 	ct_idx = get_cacheinfo_idx(this_leaf->type);
155 	propname = cache_type_info[ct_idx].nr_sets_prop;
156 
157 	of_property_read_u32(np, propname, &this_leaf->number_of_sets);
158 }
159 
160 static void cache_associativity(struct cacheinfo *this_leaf)
161 {
162 	unsigned int line_size = this_leaf->coherency_line_size;
163 	unsigned int nr_sets = this_leaf->number_of_sets;
164 	unsigned int size = this_leaf->size;
165 
166 	/*
167 	 * If the cache is fully associative, there is no need to
168 	 * check the other properties.
169 	 */
170 	if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
171 		this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
172 }
173 
174 static bool cache_node_is_unified(struct cacheinfo *this_leaf,
175 				  struct device_node *np)
176 {
177 	return of_property_read_bool(np, "cache-unified");
178 }
179 
180 static void cache_of_set_props(struct cacheinfo *this_leaf,
181 			       struct device_node *np)
182 {
183 	/*
184 	 * init_cache_level must setup the cache level correctly
185 	 * overriding the architecturally specified levels, so
186 	 * if type is NONE at this stage, it should be unified
187 	 */
188 	if (this_leaf->type == CACHE_TYPE_NOCACHE &&
189 	    cache_node_is_unified(this_leaf, np))
190 		this_leaf->type = CACHE_TYPE_UNIFIED;
191 	cache_size(this_leaf, np);
192 	cache_get_line_size(this_leaf, np);
193 	cache_nr_sets(this_leaf, np);
194 	cache_associativity(this_leaf);
195 }
196 
197 static int cache_setup_of_node(unsigned int cpu)
198 {
199 	struct device_node *np, *prev;
200 	struct cacheinfo *this_leaf;
201 	unsigned int index = 0;
202 
203 	np = of_cpu_device_node_get(cpu);
204 	if (!np) {
205 		pr_err("Failed to find cpu%d device node\n", cpu);
206 		return -ENOENT;
207 	}
208 
209 	prev = np;
210 
211 	while (index < cache_leaves(cpu)) {
212 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
213 		if (this_leaf->level != 1) {
214 			np = of_find_next_cache_node(np);
215 			of_node_put(prev);
216 			prev = np;
217 			if (!np)
218 				break;
219 		}
220 		cache_of_set_props(this_leaf, np);
221 		this_leaf->fw_token = np;
222 		index++;
223 	}
224 
225 	of_node_put(np);
226 
227 	if (index != cache_leaves(cpu)) /* not all OF nodes populated */
228 		return -ENOENT;
229 
230 	return 0;
231 }
232 #else
233 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
234 #endif
235 
236 int __weak cache_setup_acpi(unsigned int cpu)
237 {
238 	return -ENOTSUPP;
239 }
240 
241 unsigned int coherency_max_size;
242 
243 static int cache_setup_properties(unsigned int cpu)
244 {
245 	int ret = 0;
246 
247 	if (of_have_populated_dt())
248 		ret = cache_setup_of_node(cpu);
249 	else if (!acpi_disabled)
250 		ret = cache_setup_acpi(cpu);
251 
252 	return ret;
253 }
254 
255 static int cache_shared_cpu_map_setup(unsigned int cpu)
256 {
257 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
258 	struct cacheinfo *this_leaf, *sib_leaf;
259 	unsigned int index;
260 	int ret = 0;
261 
262 	if (this_cpu_ci->cpu_map_populated)
263 		return 0;
264 
265 	/*
266 	 * skip setting up cache properties if LLC is valid, just need
267 	 * to update the shared cpu_map if the cache attributes were
268 	 * populated early before all the cpus are brought online
269 	 */
270 	if (!last_level_cache_is_valid(cpu)) {
271 		ret = cache_setup_properties(cpu);
272 		if (ret)
273 			return ret;
274 	}
275 
276 	for (index = 0; index < cache_leaves(cpu); index++) {
277 		unsigned int i;
278 
279 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
280 
281 		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
282 		for_each_online_cpu(i) {
283 			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
284 
285 			if (i == cpu || !sib_cpu_ci->info_list)
286 				continue;/* skip if itself or no cacheinfo */
287 
288 			sib_leaf = per_cpu_cacheinfo_idx(i, index);
289 			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
290 				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
291 				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
292 			}
293 		}
294 		/* record the maximum cache line size */
295 		if (this_leaf->coherency_line_size > coherency_max_size)
296 			coherency_max_size = this_leaf->coherency_line_size;
297 	}
298 
299 	return 0;
300 }
301 
302 static void cache_shared_cpu_map_remove(unsigned int cpu)
303 {
304 	struct cacheinfo *this_leaf, *sib_leaf;
305 	unsigned int sibling, index;
306 
307 	for (index = 0; index < cache_leaves(cpu); index++) {
308 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
309 		for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
310 			struct cpu_cacheinfo *sib_cpu_ci =
311 						get_cpu_cacheinfo(sibling);
312 
313 			if (sibling == cpu || !sib_cpu_ci->info_list)
314 				continue;/* skip if itself or no cacheinfo */
315 
316 			sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
317 			cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
318 			cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
319 		}
320 	}
321 }
322 
323 static void free_cache_attributes(unsigned int cpu)
324 {
325 	if (!per_cpu_cacheinfo(cpu))
326 		return;
327 
328 	cache_shared_cpu_map_remove(cpu);
329 
330 	kfree(per_cpu_cacheinfo(cpu));
331 	per_cpu_cacheinfo(cpu) = NULL;
332 	cache_leaves(cpu) = 0;
333 }
334 
335 int __weak init_cache_level(unsigned int cpu)
336 {
337 	return -ENOENT;
338 }
339 
340 int __weak populate_cache_leaves(unsigned int cpu)
341 {
342 	return -ENOENT;
343 }
344 
345 int detect_cache_attributes(unsigned int cpu)
346 {
347 	int ret;
348 
349 	/* Since early detection of the cacheinfo is allowed via this
350 	 * function and this also gets called as CPU hotplug callbacks via
351 	 * cacheinfo_cpu_online, the initialisation can be skipped and only
352 	 * CPU maps can be updated as the CPU online status would be update
353 	 * if called via cacheinfo_cpu_online path.
354 	 */
355 	if (per_cpu_cacheinfo(cpu))
356 		goto update_cpu_map;
357 
358 	if (init_cache_level(cpu) || !cache_leaves(cpu))
359 		return -ENOENT;
360 
361 	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
362 					 sizeof(struct cacheinfo), GFP_ATOMIC);
363 	if (per_cpu_cacheinfo(cpu) == NULL) {
364 		cache_leaves(cpu) = 0;
365 		return -ENOMEM;
366 	}
367 
368 	/*
369 	 * populate_cache_leaves() may completely setup the cache leaves and
370 	 * shared_cpu_map or it may leave it partially setup.
371 	 */
372 	ret = populate_cache_leaves(cpu);
373 	if (ret)
374 		goto free_ci;
375 
376 update_cpu_map:
377 	/*
378 	 * For systems using DT for cache hierarchy, fw_token
379 	 * and shared_cpu_map will be set up here only if they are
380 	 * not populated already
381 	 */
382 	ret = cache_shared_cpu_map_setup(cpu);
383 	if (ret) {
384 		pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
385 		goto free_ci;
386 	}
387 
388 	return 0;
389 
390 free_ci:
391 	free_cache_attributes(cpu);
392 	return ret;
393 }
394 
395 /* pointer to cpuX/cache device */
396 static DEFINE_PER_CPU(struct device *, ci_cache_dev);
397 #define per_cpu_cache_dev(cpu)	(per_cpu(ci_cache_dev, cpu))
398 
399 static cpumask_t cache_dev_map;
400 
401 /* pointer to array of devices for cpuX/cache/indexY */
402 static DEFINE_PER_CPU(struct device **, ci_index_dev);
403 #define per_cpu_index_dev(cpu)	(per_cpu(ci_index_dev, cpu))
404 #define per_cache_index_dev(cpu, idx)	((per_cpu_index_dev(cpu))[idx])
405 
406 #define show_one(file_name, object)				\
407 static ssize_t file_name##_show(struct device *dev,		\
408 		struct device_attribute *attr, char *buf)	\
409 {								\
410 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);	\
411 	return sysfs_emit(buf, "%u\n", this_leaf->object);	\
412 }
413 
414 show_one(id, id);
415 show_one(level, level);
416 show_one(coherency_line_size, coherency_line_size);
417 show_one(number_of_sets, number_of_sets);
418 show_one(physical_line_partition, physical_line_partition);
419 show_one(ways_of_associativity, ways_of_associativity);
420 
421 static ssize_t size_show(struct device *dev,
422 			 struct device_attribute *attr, char *buf)
423 {
424 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
425 
426 	return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
427 }
428 
429 static ssize_t shared_cpu_map_show(struct device *dev,
430 				   struct device_attribute *attr, char *buf)
431 {
432 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
433 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
434 
435 	return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
436 }
437 
438 static ssize_t shared_cpu_list_show(struct device *dev,
439 				    struct device_attribute *attr, char *buf)
440 {
441 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
442 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
443 
444 	return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
445 }
446 
447 static ssize_t type_show(struct device *dev,
448 			 struct device_attribute *attr, char *buf)
449 {
450 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
451 	const char *output;
452 
453 	switch (this_leaf->type) {
454 	case CACHE_TYPE_DATA:
455 		output = "Data";
456 		break;
457 	case CACHE_TYPE_INST:
458 		output = "Instruction";
459 		break;
460 	case CACHE_TYPE_UNIFIED:
461 		output = "Unified";
462 		break;
463 	default:
464 		return -EINVAL;
465 	}
466 
467 	return sysfs_emit(buf, "%s\n", output);
468 }
469 
470 static ssize_t allocation_policy_show(struct device *dev,
471 				      struct device_attribute *attr, char *buf)
472 {
473 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
474 	unsigned int ci_attr = this_leaf->attributes;
475 	const char *output;
476 
477 	if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
478 		output = "ReadWriteAllocate";
479 	else if (ci_attr & CACHE_READ_ALLOCATE)
480 		output = "ReadAllocate";
481 	else if (ci_attr & CACHE_WRITE_ALLOCATE)
482 		output = "WriteAllocate";
483 	else
484 		return 0;
485 
486 	return sysfs_emit(buf, "%s\n", output);
487 }
488 
489 static ssize_t write_policy_show(struct device *dev,
490 				 struct device_attribute *attr, char *buf)
491 {
492 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
493 	unsigned int ci_attr = this_leaf->attributes;
494 	int n = 0;
495 
496 	if (ci_attr & CACHE_WRITE_THROUGH)
497 		n = sysfs_emit(buf, "WriteThrough\n");
498 	else if (ci_attr & CACHE_WRITE_BACK)
499 		n = sysfs_emit(buf, "WriteBack\n");
500 	return n;
501 }
502 
503 static DEVICE_ATTR_RO(id);
504 static DEVICE_ATTR_RO(level);
505 static DEVICE_ATTR_RO(type);
506 static DEVICE_ATTR_RO(coherency_line_size);
507 static DEVICE_ATTR_RO(ways_of_associativity);
508 static DEVICE_ATTR_RO(number_of_sets);
509 static DEVICE_ATTR_RO(size);
510 static DEVICE_ATTR_RO(allocation_policy);
511 static DEVICE_ATTR_RO(write_policy);
512 static DEVICE_ATTR_RO(shared_cpu_map);
513 static DEVICE_ATTR_RO(shared_cpu_list);
514 static DEVICE_ATTR_RO(physical_line_partition);
515 
516 static struct attribute *cache_default_attrs[] = {
517 	&dev_attr_id.attr,
518 	&dev_attr_type.attr,
519 	&dev_attr_level.attr,
520 	&dev_attr_shared_cpu_map.attr,
521 	&dev_attr_shared_cpu_list.attr,
522 	&dev_attr_coherency_line_size.attr,
523 	&dev_attr_ways_of_associativity.attr,
524 	&dev_attr_number_of_sets.attr,
525 	&dev_attr_size.attr,
526 	&dev_attr_allocation_policy.attr,
527 	&dev_attr_write_policy.attr,
528 	&dev_attr_physical_line_partition.attr,
529 	NULL
530 };
531 
532 static umode_t
533 cache_default_attrs_is_visible(struct kobject *kobj,
534 			       struct attribute *attr, int unused)
535 {
536 	struct device *dev = kobj_to_dev(kobj);
537 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
538 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
539 	umode_t mode = attr->mode;
540 
541 	if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
542 		return mode;
543 	if ((attr == &dev_attr_type.attr) && this_leaf->type)
544 		return mode;
545 	if ((attr == &dev_attr_level.attr) && this_leaf->level)
546 		return mode;
547 	if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
548 		return mode;
549 	if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
550 		return mode;
551 	if ((attr == &dev_attr_coherency_line_size.attr) &&
552 	    this_leaf->coherency_line_size)
553 		return mode;
554 	if ((attr == &dev_attr_ways_of_associativity.attr) &&
555 	    this_leaf->size) /* allow 0 = full associativity */
556 		return mode;
557 	if ((attr == &dev_attr_number_of_sets.attr) &&
558 	    this_leaf->number_of_sets)
559 		return mode;
560 	if ((attr == &dev_attr_size.attr) && this_leaf->size)
561 		return mode;
562 	if ((attr == &dev_attr_write_policy.attr) &&
563 	    (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
564 		return mode;
565 	if ((attr == &dev_attr_allocation_policy.attr) &&
566 	    (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
567 		return mode;
568 	if ((attr == &dev_attr_physical_line_partition.attr) &&
569 	    this_leaf->physical_line_partition)
570 		return mode;
571 
572 	return 0;
573 }
574 
575 static const struct attribute_group cache_default_group = {
576 	.attrs = cache_default_attrs,
577 	.is_visible = cache_default_attrs_is_visible,
578 };
579 
580 static const struct attribute_group *cache_default_groups[] = {
581 	&cache_default_group,
582 	NULL,
583 };
584 
585 static const struct attribute_group *cache_private_groups[] = {
586 	&cache_default_group,
587 	NULL, /* Place holder for private group */
588 	NULL,
589 };
590 
591 const struct attribute_group *
592 __weak cache_get_priv_group(struct cacheinfo *this_leaf)
593 {
594 	return NULL;
595 }
596 
597 static const struct attribute_group **
598 cache_get_attribute_groups(struct cacheinfo *this_leaf)
599 {
600 	const struct attribute_group *priv_group =
601 			cache_get_priv_group(this_leaf);
602 
603 	if (!priv_group)
604 		return cache_default_groups;
605 
606 	if (!cache_private_groups[1])
607 		cache_private_groups[1] = priv_group;
608 
609 	return cache_private_groups;
610 }
611 
612 /* Add/Remove cache interface for CPU device */
613 static void cpu_cache_sysfs_exit(unsigned int cpu)
614 {
615 	int i;
616 	struct device *ci_dev;
617 
618 	if (per_cpu_index_dev(cpu)) {
619 		for (i = 0; i < cache_leaves(cpu); i++) {
620 			ci_dev = per_cache_index_dev(cpu, i);
621 			if (!ci_dev)
622 				continue;
623 			device_unregister(ci_dev);
624 		}
625 		kfree(per_cpu_index_dev(cpu));
626 		per_cpu_index_dev(cpu) = NULL;
627 	}
628 	device_unregister(per_cpu_cache_dev(cpu));
629 	per_cpu_cache_dev(cpu) = NULL;
630 }
631 
632 static int cpu_cache_sysfs_init(unsigned int cpu)
633 {
634 	struct device *dev = get_cpu_device(cpu);
635 
636 	if (per_cpu_cacheinfo(cpu) == NULL)
637 		return -ENOENT;
638 
639 	per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
640 	if (IS_ERR(per_cpu_cache_dev(cpu)))
641 		return PTR_ERR(per_cpu_cache_dev(cpu));
642 
643 	/* Allocate all required memory */
644 	per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
645 					 sizeof(struct device *), GFP_KERNEL);
646 	if (unlikely(per_cpu_index_dev(cpu) == NULL))
647 		goto err_out;
648 
649 	return 0;
650 
651 err_out:
652 	cpu_cache_sysfs_exit(cpu);
653 	return -ENOMEM;
654 }
655 
656 static int cache_add_dev(unsigned int cpu)
657 {
658 	unsigned int i;
659 	int rc;
660 	struct device *ci_dev, *parent;
661 	struct cacheinfo *this_leaf;
662 	const struct attribute_group **cache_groups;
663 
664 	rc = cpu_cache_sysfs_init(cpu);
665 	if (unlikely(rc < 0))
666 		return rc;
667 
668 	parent = per_cpu_cache_dev(cpu);
669 	for (i = 0; i < cache_leaves(cpu); i++) {
670 		this_leaf = per_cpu_cacheinfo_idx(cpu, i);
671 		if (this_leaf->disable_sysfs)
672 			continue;
673 		if (this_leaf->type == CACHE_TYPE_NOCACHE)
674 			break;
675 		cache_groups = cache_get_attribute_groups(this_leaf);
676 		ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
677 					   "index%1u", i);
678 		if (IS_ERR(ci_dev)) {
679 			rc = PTR_ERR(ci_dev);
680 			goto err;
681 		}
682 		per_cache_index_dev(cpu, i) = ci_dev;
683 	}
684 	cpumask_set_cpu(cpu, &cache_dev_map);
685 
686 	return 0;
687 err:
688 	cpu_cache_sysfs_exit(cpu);
689 	return rc;
690 }
691 
692 static int cacheinfo_cpu_online(unsigned int cpu)
693 {
694 	int rc = detect_cache_attributes(cpu);
695 
696 	if (rc)
697 		return rc;
698 	rc = cache_add_dev(cpu);
699 	if (rc)
700 		free_cache_attributes(cpu);
701 	return rc;
702 }
703 
704 static int cacheinfo_cpu_pre_down(unsigned int cpu)
705 {
706 	if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
707 		cpu_cache_sysfs_exit(cpu);
708 
709 	free_cache_attributes(cpu);
710 	return 0;
711 }
712 
713 static int __init cacheinfo_sysfs_init(void)
714 {
715 	return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
716 				 "base/cacheinfo:online",
717 				 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
718 }
719 device_initcall(cacheinfo_sysfs_init);
720