xref: /openbmc/linux/drivers/base/cacheinfo.c (revision c81ceb58e10124b79b9a01c70b9c25b2fe540475)
1 /*
2  * cacheinfo support - processor cache information via sysfs
3  *
4  * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
5  * Author: Sudeep Holla <sudeep.holla@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12  * kind, whether express or implied; without even the implied warranty
13  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/acpi.h>
22 #include <linux/bitops.h>
23 #include <linux/cacheinfo.h>
24 #include <linux/compiler.h>
25 #include <linux/cpu.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/of.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/smp.h>
32 #include <linux/sysfs.h>
33 
34 /* pointer to per cpu cacheinfo */
35 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
36 #define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
37 #define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
38 #define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
39 
40 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
41 {
42 	return ci_cacheinfo(cpu);
43 }
44 
45 #ifdef CONFIG_OF
46 static int cache_setup_of_node(unsigned int cpu)
47 {
48 	struct device_node *np;
49 	struct cacheinfo *this_leaf;
50 	struct device *cpu_dev = get_cpu_device(cpu);
51 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
52 	unsigned int index = 0;
53 
54 	/* skip if of_node is already populated */
55 	if (this_cpu_ci->info_list->of_node)
56 		return 0;
57 
58 	if (!cpu_dev) {
59 		pr_err("No cpu device for CPU %d\n", cpu);
60 		return -ENODEV;
61 	}
62 	np = cpu_dev->of_node;
63 	if (!np) {
64 		pr_err("Failed to find cpu%d device node\n", cpu);
65 		return -ENOENT;
66 	}
67 
68 	while (index < cache_leaves(cpu)) {
69 		this_leaf = this_cpu_ci->info_list + index;
70 		if (this_leaf->level != 1)
71 			np = of_find_next_cache_node(np);
72 		else
73 			np = of_node_get(np);/* cpu node itself */
74 		if (!np)
75 			break;
76 		this_leaf->of_node = np;
77 		index++;
78 	}
79 
80 	if (index != cache_leaves(cpu)) /* not all OF nodes populated */
81 		return -ENOENT;
82 
83 	return 0;
84 }
85 
86 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
87 					   struct cacheinfo *sib_leaf)
88 {
89 	return sib_leaf->of_node == this_leaf->of_node;
90 }
91 
92 /* OF properties to query for a given cache type */
93 struct cache_type_info {
94 	const char *size_prop;
95 	const char *line_size_props[2];
96 	const char *nr_sets_prop;
97 };
98 
99 static const struct cache_type_info cache_type_info[] = {
100 	{
101 		.size_prop       = "cache-size",
102 		.line_size_props = { "cache-line-size",
103 				     "cache-block-size", },
104 		.nr_sets_prop    = "cache-sets",
105 	}, {
106 		.size_prop       = "i-cache-size",
107 		.line_size_props = { "i-cache-line-size",
108 				     "i-cache-block-size", },
109 		.nr_sets_prop    = "i-cache-sets",
110 	}, {
111 		.size_prop       = "d-cache-size",
112 		.line_size_props = { "d-cache-line-size",
113 				     "d-cache-block-size", },
114 		.nr_sets_prop    = "d-cache-sets",
115 	},
116 };
117 
118 static inline int get_cacheinfo_idx(enum cache_type type)
119 {
120 	if (type == CACHE_TYPE_UNIFIED)
121 		return 0;
122 	return type;
123 }
124 
125 static void cache_size(struct cacheinfo *this_leaf)
126 {
127 	const char *propname;
128 	const __be32 *cache_size;
129 	int ct_idx;
130 
131 	ct_idx = get_cacheinfo_idx(this_leaf->type);
132 	propname = cache_type_info[ct_idx].size_prop;
133 
134 	cache_size = of_get_property(this_leaf->of_node, propname, NULL);
135 	if (cache_size)
136 		this_leaf->size = of_read_number(cache_size, 1);
137 }
138 
139 /* not cache_line_size() because that's a macro in include/linux/cache.h */
140 static void cache_get_line_size(struct cacheinfo *this_leaf)
141 {
142 	const __be32 *line_size;
143 	int i, lim, ct_idx;
144 
145 	ct_idx = get_cacheinfo_idx(this_leaf->type);
146 	lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
147 
148 	for (i = 0; i < lim; i++) {
149 		const char *propname;
150 
151 		propname = cache_type_info[ct_idx].line_size_props[i];
152 		line_size = of_get_property(this_leaf->of_node, propname, NULL);
153 		if (line_size)
154 			break;
155 	}
156 
157 	if (line_size)
158 		this_leaf->coherency_line_size = of_read_number(line_size, 1);
159 }
160 
161 static void cache_nr_sets(struct cacheinfo *this_leaf)
162 {
163 	const char *propname;
164 	const __be32 *nr_sets;
165 	int ct_idx;
166 
167 	ct_idx = get_cacheinfo_idx(this_leaf->type);
168 	propname = cache_type_info[ct_idx].nr_sets_prop;
169 
170 	nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
171 	if (nr_sets)
172 		this_leaf->number_of_sets = of_read_number(nr_sets, 1);
173 }
174 
175 static void cache_associativity(struct cacheinfo *this_leaf)
176 {
177 	unsigned int line_size = this_leaf->coherency_line_size;
178 	unsigned int nr_sets = this_leaf->number_of_sets;
179 	unsigned int size = this_leaf->size;
180 
181 	/*
182 	 * If the cache is fully associative, there is no need to
183 	 * check the other properties.
184 	 */
185 	if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
186 		this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
187 }
188 
189 static bool cache_node_is_unified(struct cacheinfo *this_leaf)
190 {
191 	return of_property_read_bool(this_leaf->of_node, "cache-unified");
192 }
193 
194 static void cache_of_override_properties(unsigned int cpu)
195 {
196 	int index;
197 	struct cacheinfo *this_leaf;
198 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
199 
200 	for (index = 0; index < cache_leaves(cpu); index++) {
201 		this_leaf = this_cpu_ci->info_list + index;
202 		/*
203 		 * init_cache_level must setup the cache level correctly
204 		 * overriding the architecturally specified levels, so
205 		 * if type is NONE at this stage, it should be unified
206 		 */
207 		if (this_leaf->type == CACHE_TYPE_NOCACHE &&
208 		    cache_node_is_unified(this_leaf))
209 			this_leaf->type = CACHE_TYPE_UNIFIED;
210 		cache_size(this_leaf);
211 		cache_get_line_size(this_leaf);
212 		cache_nr_sets(this_leaf);
213 		cache_associativity(this_leaf);
214 	}
215 }
216 #else
217 static void cache_of_override_properties(unsigned int cpu) { }
218 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
219 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
220 					   struct cacheinfo *sib_leaf)
221 {
222 	/*
223 	 * For non-DT systems, assume unique level 1 cache, system-wide
224 	 * shared caches for all other levels. This will be used only if
225 	 * arch specific code has not populated shared_cpu_map
226 	 */
227 	return !(this_leaf->level == 1);
228 }
229 #endif
230 
231 static int cache_shared_cpu_map_setup(unsigned int cpu)
232 {
233 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
234 	struct cacheinfo *this_leaf, *sib_leaf;
235 	unsigned int index;
236 	int ret = 0;
237 
238 	if (this_cpu_ci->cpu_map_populated)
239 		return 0;
240 
241 	if (of_have_populated_dt())
242 		ret = cache_setup_of_node(cpu);
243 	else if (!acpi_disabled)
244 		/* No cache property/hierarchy support yet in ACPI */
245 		ret = -ENOTSUPP;
246 	if (ret)
247 		return ret;
248 
249 	for (index = 0; index < cache_leaves(cpu); index++) {
250 		unsigned int i;
251 
252 		this_leaf = this_cpu_ci->info_list + index;
253 		/* skip if shared_cpu_map is already populated */
254 		if (!cpumask_empty(&this_leaf->shared_cpu_map))
255 			continue;
256 
257 		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
258 		for_each_online_cpu(i) {
259 			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
260 
261 			if (i == cpu || !sib_cpu_ci->info_list)
262 				continue;/* skip if itself or no cacheinfo */
263 			sib_leaf = sib_cpu_ci->info_list + index;
264 			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
265 				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
266 				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
267 			}
268 		}
269 	}
270 
271 	return 0;
272 }
273 
274 static void cache_shared_cpu_map_remove(unsigned int cpu)
275 {
276 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
277 	struct cacheinfo *this_leaf, *sib_leaf;
278 	unsigned int sibling, index;
279 
280 	for (index = 0; index < cache_leaves(cpu); index++) {
281 		this_leaf = this_cpu_ci->info_list + index;
282 		for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
283 			struct cpu_cacheinfo *sib_cpu_ci;
284 
285 			if (sibling == cpu) /* skip itself */
286 				continue;
287 
288 			sib_cpu_ci = get_cpu_cacheinfo(sibling);
289 			if (!sib_cpu_ci->info_list)
290 				continue;
291 
292 			sib_leaf = sib_cpu_ci->info_list + index;
293 			cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
294 			cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
295 		}
296 		of_node_put(this_leaf->of_node);
297 	}
298 }
299 
300 static void cache_override_properties(unsigned int cpu)
301 {
302 	if (of_have_populated_dt())
303 		return cache_of_override_properties(cpu);
304 }
305 
306 static void free_cache_attributes(unsigned int cpu)
307 {
308 	if (!per_cpu_cacheinfo(cpu))
309 		return;
310 
311 	cache_shared_cpu_map_remove(cpu);
312 
313 	kfree(per_cpu_cacheinfo(cpu));
314 	per_cpu_cacheinfo(cpu) = NULL;
315 }
316 
317 int __weak init_cache_level(unsigned int cpu)
318 {
319 	return -ENOENT;
320 }
321 
322 int __weak populate_cache_leaves(unsigned int cpu)
323 {
324 	return -ENOENT;
325 }
326 
327 static int detect_cache_attributes(unsigned int cpu)
328 {
329 	int ret;
330 
331 	if (init_cache_level(cpu) || !cache_leaves(cpu))
332 		return -ENOENT;
333 
334 	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
335 					 sizeof(struct cacheinfo), GFP_KERNEL);
336 	if (per_cpu_cacheinfo(cpu) == NULL)
337 		return -ENOMEM;
338 
339 	ret = populate_cache_leaves(cpu);
340 	if (ret)
341 		goto free_ci;
342 	/*
343 	 * For systems using DT for cache hierarchy, of_node and shared_cpu_map
344 	 * will be set up here only if they are not populated already
345 	 */
346 	ret = cache_shared_cpu_map_setup(cpu);
347 	if (ret) {
348 		pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
349 		goto free_ci;
350 	}
351 
352 	cache_override_properties(cpu);
353 	return 0;
354 
355 free_ci:
356 	free_cache_attributes(cpu);
357 	return ret;
358 }
359 
360 /* pointer to cpuX/cache device */
361 static DEFINE_PER_CPU(struct device *, ci_cache_dev);
362 #define per_cpu_cache_dev(cpu)	(per_cpu(ci_cache_dev, cpu))
363 
364 static cpumask_t cache_dev_map;
365 
366 /* pointer to array of devices for cpuX/cache/indexY */
367 static DEFINE_PER_CPU(struct device **, ci_index_dev);
368 #define per_cpu_index_dev(cpu)	(per_cpu(ci_index_dev, cpu))
369 #define per_cache_index_dev(cpu, idx)	((per_cpu_index_dev(cpu))[idx])
370 
371 #define show_one(file_name, object)				\
372 static ssize_t file_name##_show(struct device *dev,		\
373 		struct device_attribute *attr, char *buf)	\
374 {								\
375 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);	\
376 	return sprintf(buf, "%u\n", this_leaf->object);		\
377 }
378 
379 show_one(id, id);
380 show_one(level, level);
381 show_one(coherency_line_size, coherency_line_size);
382 show_one(number_of_sets, number_of_sets);
383 show_one(physical_line_partition, physical_line_partition);
384 show_one(ways_of_associativity, ways_of_associativity);
385 
386 static ssize_t size_show(struct device *dev,
387 			 struct device_attribute *attr, char *buf)
388 {
389 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
390 
391 	return sprintf(buf, "%uK\n", this_leaf->size >> 10);
392 }
393 
394 static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
395 {
396 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
397 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
398 
399 	return cpumap_print_to_pagebuf(list, buf, mask);
400 }
401 
402 static ssize_t shared_cpu_map_show(struct device *dev,
403 				   struct device_attribute *attr, char *buf)
404 {
405 	return shared_cpumap_show_func(dev, false, buf);
406 }
407 
408 static ssize_t shared_cpu_list_show(struct device *dev,
409 				    struct device_attribute *attr, char *buf)
410 {
411 	return shared_cpumap_show_func(dev, true, buf);
412 }
413 
414 static ssize_t type_show(struct device *dev,
415 			 struct device_attribute *attr, char *buf)
416 {
417 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
418 
419 	switch (this_leaf->type) {
420 	case CACHE_TYPE_DATA:
421 		return sprintf(buf, "Data\n");
422 	case CACHE_TYPE_INST:
423 		return sprintf(buf, "Instruction\n");
424 	case CACHE_TYPE_UNIFIED:
425 		return sprintf(buf, "Unified\n");
426 	default:
427 		return -EINVAL;
428 	}
429 }
430 
431 static ssize_t allocation_policy_show(struct device *dev,
432 				      struct device_attribute *attr, char *buf)
433 {
434 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
435 	unsigned int ci_attr = this_leaf->attributes;
436 	int n = 0;
437 
438 	if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
439 		n = sprintf(buf, "ReadWriteAllocate\n");
440 	else if (ci_attr & CACHE_READ_ALLOCATE)
441 		n = sprintf(buf, "ReadAllocate\n");
442 	else if (ci_attr & CACHE_WRITE_ALLOCATE)
443 		n = sprintf(buf, "WriteAllocate\n");
444 	return n;
445 }
446 
447 static ssize_t write_policy_show(struct device *dev,
448 				 struct device_attribute *attr, char *buf)
449 {
450 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
451 	unsigned int ci_attr = this_leaf->attributes;
452 	int n = 0;
453 
454 	if (ci_attr & CACHE_WRITE_THROUGH)
455 		n = sprintf(buf, "WriteThrough\n");
456 	else if (ci_attr & CACHE_WRITE_BACK)
457 		n = sprintf(buf, "WriteBack\n");
458 	return n;
459 }
460 
461 static DEVICE_ATTR_RO(id);
462 static DEVICE_ATTR_RO(level);
463 static DEVICE_ATTR_RO(type);
464 static DEVICE_ATTR_RO(coherency_line_size);
465 static DEVICE_ATTR_RO(ways_of_associativity);
466 static DEVICE_ATTR_RO(number_of_sets);
467 static DEVICE_ATTR_RO(size);
468 static DEVICE_ATTR_RO(allocation_policy);
469 static DEVICE_ATTR_RO(write_policy);
470 static DEVICE_ATTR_RO(shared_cpu_map);
471 static DEVICE_ATTR_RO(shared_cpu_list);
472 static DEVICE_ATTR_RO(physical_line_partition);
473 
474 static struct attribute *cache_default_attrs[] = {
475 	&dev_attr_id.attr,
476 	&dev_attr_type.attr,
477 	&dev_attr_level.attr,
478 	&dev_attr_shared_cpu_map.attr,
479 	&dev_attr_shared_cpu_list.attr,
480 	&dev_attr_coherency_line_size.attr,
481 	&dev_attr_ways_of_associativity.attr,
482 	&dev_attr_number_of_sets.attr,
483 	&dev_attr_size.attr,
484 	&dev_attr_allocation_policy.attr,
485 	&dev_attr_write_policy.attr,
486 	&dev_attr_physical_line_partition.attr,
487 	NULL
488 };
489 
490 static umode_t
491 cache_default_attrs_is_visible(struct kobject *kobj,
492 			       struct attribute *attr, int unused)
493 {
494 	struct device *dev = kobj_to_dev(kobj);
495 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
496 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
497 	umode_t mode = attr->mode;
498 
499 	if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
500 		return mode;
501 	if ((attr == &dev_attr_type.attr) && this_leaf->type)
502 		return mode;
503 	if ((attr == &dev_attr_level.attr) && this_leaf->level)
504 		return mode;
505 	if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
506 		return mode;
507 	if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
508 		return mode;
509 	if ((attr == &dev_attr_coherency_line_size.attr) &&
510 	    this_leaf->coherency_line_size)
511 		return mode;
512 	if ((attr == &dev_attr_ways_of_associativity.attr) &&
513 	    this_leaf->size) /* allow 0 = full associativity */
514 		return mode;
515 	if ((attr == &dev_attr_number_of_sets.attr) &&
516 	    this_leaf->number_of_sets)
517 		return mode;
518 	if ((attr == &dev_attr_size.attr) && this_leaf->size)
519 		return mode;
520 	if ((attr == &dev_attr_write_policy.attr) &&
521 	    (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
522 		return mode;
523 	if ((attr == &dev_attr_allocation_policy.attr) &&
524 	    (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
525 		return mode;
526 	if ((attr == &dev_attr_physical_line_partition.attr) &&
527 	    this_leaf->physical_line_partition)
528 		return mode;
529 
530 	return 0;
531 }
532 
533 static const struct attribute_group cache_default_group = {
534 	.attrs = cache_default_attrs,
535 	.is_visible = cache_default_attrs_is_visible,
536 };
537 
538 static const struct attribute_group *cache_default_groups[] = {
539 	&cache_default_group,
540 	NULL,
541 };
542 
543 static const struct attribute_group *cache_private_groups[] = {
544 	&cache_default_group,
545 	NULL, /* Place holder for private group */
546 	NULL,
547 };
548 
549 const struct attribute_group *
550 __weak cache_get_priv_group(struct cacheinfo *this_leaf)
551 {
552 	return NULL;
553 }
554 
555 static const struct attribute_group **
556 cache_get_attribute_groups(struct cacheinfo *this_leaf)
557 {
558 	const struct attribute_group *priv_group =
559 			cache_get_priv_group(this_leaf);
560 
561 	if (!priv_group)
562 		return cache_default_groups;
563 
564 	if (!cache_private_groups[1])
565 		cache_private_groups[1] = priv_group;
566 
567 	return cache_private_groups;
568 }
569 
570 /* Add/Remove cache interface for CPU device */
571 static void cpu_cache_sysfs_exit(unsigned int cpu)
572 {
573 	int i;
574 	struct device *ci_dev;
575 
576 	if (per_cpu_index_dev(cpu)) {
577 		for (i = 0; i < cache_leaves(cpu); i++) {
578 			ci_dev = per_cache_index_dev(cpu, i);
579 			if (!ci_dev)
580 				continue;
581 			device_unregister(ci_dev);
582 		}
583 		kfree(per_cpu_index_dev(cpu));
584 		per_cpu_index_dev(cpu) = NULL;
585 	}
586 	device_unregister(per_cpu_cache_dev(cpu));
587 	per_cpu_cache_dev(cpu) = NULL;
588 }
589 
590 static int cpu_cache_sysfs_init(unsigned int cpu)
591 {
592 	struct device *dev = get_cpu_device(cpu);
593 
594 	if (per_cpu_cacheinfo(cpu) == NULL)
595 		return -ENOENT;
596 
597 	per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
598 	if (IS_ERR(per_cpu_cache_dev(cpu)))
599 		return PTR_ERR(per_cpu_cache_dev(cpu));
600 
601 	/* Allocate all required memory */
602 	per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
603 					 sizeof(struct device *), GFP_KERNEL);
604 	if (unlikely(per_cpu_index_dev(cpu) == NULL))
605 		goto err_out;
606 
607 	return 0;
608 
609 err_out:
610 	cpu_cache_sysfs_exit(cpu);
611 	return -ENOMEM;
612 }
613 
614 static int cache_add_dev(unsigned int cpu)
615 {
616 	unsigned int i;
617 	int rc;
618 	struct device *ci_dev, *parent;
619 	struct cacheinfo *this_leaf;
620 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
621 	const struct attribute_group **cache_groups;
622 
623 	rc = cpu_cache_sysfs_init(cpu);
624 	if (unlikely(rc < 0))
625 		return rc;
626 
627 	parent = per_cpu_cache_dev(cpu);
628 	for (i = 0; i < cache_leaves(cpu); i++) {
629 		this_leaf = this_cpu_ci->info_list + i;
630 		if (this_leaf->disable_sysfs)
631 			continue;
632 		cache_groups = cache_get_attribute_groups(this_leaf);
633 		ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
634 					   "index%1u", i);
635 		if (IS_ERR(ci_dev)) {
636 			rc = PTR_ERR(ci_dev);
637 			goto err;
638 		}
639 		per_cache_index_dev(cpu, i) = ci_dev;
640 	}
641 	cpumask_set_cpu(cpu, &cache_dev_map);
642 
643 	return 0;
644 err:
645 	cpu_cache_sysfs_exit(cpu);
646 	return rc;
647 }
648 
649 static int cacheinfo_cpu_online(unsigned int cpu)
650 {
651 	int rc = detect_cache_attributes(cpu);
652 
653 	if (rc)
654 		return rc;
655 	rc = cache_add_dev(cpu);
656 	if (rc)
657 		free_cache_attributes(cpu);
658 	return rc;
659 }
660 
661 static int cacheinfo_cpu_pre_down(unsigned int cpu)
662 {
663 	if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
664 		cpu_cache_sysfs_exit(cpu);
665 
666 	free_cache_attributes(cpu);
667 	return 0;
668 }
669 
670 static int __init cacheinfo_sysfs_init(void)
671 {
672 	return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
673 				 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
674 }
675 device_initcall(cacheinfo_sysfs_init);
676