xref: /openbmc/linux/drivers/base/cacheinfo.c (revision 7a306e3e)
1989d42e8SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2246246cbSSudeep Holla /*
3246246cbSSudeep Holla  * cacheinfo support - processor cache information via sysfs
4246246cbSSudeep Holla  *
5246246cbSSudeep Holla  * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6246246cbSSudeep Holla  * Author: Sudeep Holla <sudeep.holla@arm.com>
7246246cbSSudeep Holla  */
88e1073b1SSudeep Holla #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
98e1073b1SSudeep Holla 
1055877ef4SSudeep Holla #include <linux/acpi.h>
11246246cbSSudeep Holla #include <linux/bitops.h>
12246246cbSSudeep Holla #include <linux/cacheinfo.h>
13246246cbSSudeep Holla #include <linux/compiler.h>
14246246cbSSudeep Holla #include <linux/cpu.h>
15246246cbSSudeep Holla #include <linux/device.h>
16246246cbSSudeep Holla #include <linux/init.h>
17d4ec840bSSudeep Holla #include <linux/of_device.h>
18246246cbSSudeep Holla #include <linux/sched.h>
19246246cbSSudeep Holla #include <linux/slab.h>
20246246cbSSudeep Holla #include <linux/smp.h>
21246246cbSSudeep Holla #include <linux/sysfs.h>
22246246cbSSudeep Holla 
23246246cbSSudeep Holla /* pointer to per cpu cacheinfo */
24246246cbSSudeep Holla static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
25246246cbSSudeep Holla #define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
26246246cbSSudeep Holla #define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
27246246cbSSudeep Holla #define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
28b14e8d21SSudeep Holla #define per_cpu_cacheinfo_idx(cpu, idx)		\
29b14e8d21SSudeep Holla 				(per_cpu_cacheinfo(cpu) + (idx))
30246246cbSSudeep Holla 
31246246cbSSudeep Holla struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
32246246cbSSudeep Holla {
33246246cbSSudeep Holla 	return ci_cacheinfo(cpu);
34246246cbSSudeep Holla }
35246246cbSSudeep Holla 
36246246cbSSudeep Holla static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
37246246cbSSudeep Holla 					   struct cacheinfo *sib_leaf)
38246246cbSSudeep Holla {
399447eb0fSSudeep Holla 	/*
409447eb0fSSudeep Holla 	 * For non DT/ACPI systems, assume unique level 1 caches,
41*7a306e3eSPierre Gondois 	 * system-wide shared caches for all other levels.
429447eb0fSSudeep Holla 	 */
439447eb0fSSudeep Holla 	if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
44*7a306e3eSPierre Gondois 		return (this_leaf->level != 1) && (sib_leaf->level != 1);
459447eb0fSSudeep Holla 
46f16d1becSSudeep Holla 	if ((sib_leaf->attributes & CACHE_ID) &&
47f16d1becSSudeep Holla 	    (this_leaf->attributes & CACHE_ID))
48f16d1becSSudeep Holla 		return sib_leaf->id == this_leaf->id;
49f16d1becSSudeep Holla 
509b97387cSJeremy Linton 	return sib_leaf->fw_token == this_leaf->fw_token;
51246246cbSSudeep Holla }
52dfea747dSSudeep Holla 
53cc1cfc47SSudeep Holla bool last_level_cache_is_valid(unsigned int cpu)
54cc1cfc47SSudeep Holla {
55cc1cfc47SSudeep Holla 	struct cacheinfo *llc;
56cc1cfc47SSudeep Holla 
57cc1cfc47SSudeep Holla 	if (!cache_leaves(cpu))
58cc1cfc47SSudeep Holla 		return false;
59cc1cfc47SSudeep Holla 
60cc1cfc47SSudeep Holla 	llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
61cc1cfc47SSudeep Holla 
62f16d1becSSudeep Holla 	return (llc->attributes & CACHE_ID) || !!llc->fw_token;
63f16d1becSSudeep Holla 
64cc1cfc47SSudeep Holla }
65cc1cfc47SSudeep Holla 
66cc1cfc47SSudeep Holla bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
67cc1cfc47SSudeep Holla {
68cc1cfc47SSudeep Holla 	struct cacheinfo *llc_x, *llc_y;
69cc1cfc47SSudeep Holla 
70cc1cfc47SSudeep Holla 	if (!last_level_cache_is_valid(cpu_x) ||
71cc1cfc47SSudeep Holla 	    !last_level_cache_is_valid(cpu_y))
72cc1cfc47SSudeep Holla 		return false;
73cc1cfc47SSudeep Holla 
74cc1cfc47SSudeep Holla 	llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
75cc1cfc47SSudeep Holla 	llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
76cc1cfc47SSudeep Holla 
77cc1cfc47SSudeep Holla 	return cache_leaves_are_shared(llc_x, llc_y);
78cc1cfc47SSudeep Holla }
79cc1cfc47SSudeep Holla 
809447eb0fSSudeep Holla #ifdef CONFIG_OF
81dfea747dSSudeep Holla /* OF properties to query for a given cache type */
82dfea747dSSudeep Holla struct cache_type_info {
83dfea747dSSudeep Holla 	const char *size_prop;
84dfea747dSSudeep Holla 	const char *line_size_props[2];
85dfea747dSSudeep Holla 	const char *nr_sets_prop;
86dfea747dSSudeep Holla };
87dfea747dSSudeep Holla 
88dfea747dSSudeep Holla static const struct cache_type_info cache_type_info[] = {
89dfea747dSSudeep Holla 	{
90dfea747dSSudeep Holla 		.size_prop       = "cache-size",
91dfea747dSSudeep Holla 		.line_size_props = { "cache-line-size",
92dfea747dSSudeep Holla 				     "cache-block-size", },
93dfea747dSSudeep Holla 		.nr_sets_prop    = "cache-sets",
94dfea747dSSudeep Holla 	}, {
95dfea747dSSudeep Holla 		.size_prop       = "i-cache-size",
96dfea747dSSudeep Holla 		.line_size_props = { "i-cache-line-size",
97dfea747dSSudeep Holla 				     "i-cache-block-size", },
98dfea747dSSudeep Holla 		.nr_sets_prop    = "i-cache-sets",
99dfea747dSSudeep Holla 	}, {
100dfea747dSSudeep Holla 		.size_prop       = "d-cache-size",
101dfea747dSSudeep Holla 		.line_size_props = { "d-cache-line-size",
102dfea747dSSudeep Holla 				     "d-cache-block-size", },
103dfea747dSSudeep Holla 		.nr_sets_prop    = "d-cache-sets",
104dfea747dSSudeep Holla 	},
105dfea747dSSudeep Holla };
106dfea747dSSudeep Holla 
107dfea747dSSudeep Holla static inline int get_cacheinfo_idx(enum cache_type type)
108dfea747dSSudeep Holla {
109dfea747dSSudeep Holla 	if (type == CACHE_TYPE_UNIFIED)
110dfea747dSSudeep Holla 		return 0;
111dfea747dSSudeep Holla 	return type;
112dfea747dSSudeep Holla }
113dfea747dSSudeep Holla 
1142ff075c7SJeremy Linton static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
115dfea747dSSudeep Holla {
116dfea747dSSudeep Holla 	const char *propname;
117dfea747dSSudeep Holla 	int ct_idx;
118dfea747dSSudeep Holla 
119dfea747dSSudeep Holla 	ct_idx = get_cacheinfo_idx(this_leaf->type);
120dfea747dSSudeep Holla 	propname = cache_type_info[ct_idx].size_prop;
121dfea747dSSudeep Holla 
1223a34c986SHuacai Chen 	of_property_read_u32(np, propname, &this_leaf->size);
123dfea747dSSudeep Holla }
124dfea747dSSudeep Holla 
125dfea747dSSudeep Holla /* not cache_line_size() because that's a macro in include/linux/cache.h */
1262ff075c7SJeremy Linton static void cache_get_line_size(struct cacheinfo *this_leaf,
1272ff075c7SJeremy Linton 				struct device_node *np)
128dfea747dSSudeep Holla {
129dfea747dSSudeep Holla 	int i, lim, ct_idx;
130dfea747dSSudeep Holla 
131dfea747dSSudeep Holla 	ct_idx = get_cacheinfo_idx(this_leaf->type);
132dfea747dSSudeep Holla 	lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
133dfea747dSSudeep Holla 
134dfea747dSSudeep Holla 	for (i = 0; i < lim; i++) {
135448a5a55SSudeep Holla 		int ret;
136448a5a55SSudeep Holla 		u32 line_size;
137dfea747dSSudeep Holla 		const char *propname;
138dfea747dSSudeep Holla 
139dfea747dSSudeep Holla 		propname = cache_type_info[ct_idx].line_size_props[i];
140448a5a55SSudeep Holla 		ret = of_property_read_u32(np, propname, &line_size);
141448a5a55SSudeep Holla 		if (!ret) {
142448a5a55SSudeep Holla 			this_leaf->coherency_line_size = line_size;
143dfea747dSSudeep Holla 			break;
144dfea747dSSudeep Holla 		}
145448a5a55SSudeep Holla 	}
146dfea747dSSudeep Holla }
147dfea747dSSudeep Holla 
1482ff075c7SJeremy Linton static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
149dfea747dSSudeep Holla {
150dfea747dSSudeep Holla 	const char *propname;
151dfea747dSSudeep Holla 	int ct_idx;
152dfea747dSSudeep Holla 
153dfea747dSSudeep Holla 	ct_idx = get_cacheinfo_idx(this_leaf->type);
154dfea747dSSudeep Holla 	propname = cache_type_info[ct_idx].nr_sets_prop;
155dfea747dSSudeep Holla 
1563a34c986SHuacai Chen 	of_property_read_u32(np, propname, &this_leaf->number_of_sets);
157dfea747dSSudeep Holla }
158dfea747dSSudeep Holla 
159dfea747dSSudeep Holla static void cache_associativity(struct cacheinfo *this_leaf)
160dfea747dSSudeep Holla {
161dfea747dSSudeep Holla 	unsigned int line_size = this_leaf->coherency_line_size;
162dfea747dSSudeep Holla 	unsigned int nr_sets = this_leaf->number_of_sets;
163dfea747dSSudeep Holla 	unsigned int size = this_leaf->size;
164dfea747dSSudeep Holla 
165dfea747dSSudeep Holla 	/*
166dfea747dSSudeep Holla 	 * If the cache is fully associative, there is no need to
167dfea747dSSudeep Holla 	 * check the other properties.
168dfea747dSSudeep Holla 	 */
169dfea747dSSudeep Holla 	if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
170dfea747dSSudeep Holla 		this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
171dfea747dSSudeep Holla }
172dfea747dSSudeep Holla 
1732ff075c7SJeremy Linton static bool cache_node_is_unified(struct cacheinfo *this_leaf,
1742ff075c7SJeremy Linton 				  struct device_node *np)
175f57ab9a0SSudeep Holla {
1762ff075c7SJeremy Linton 	return of_property_read_bool(np, "cache-unified");
177f57ab9a0SSudeep Holla }
178f57ab9a0SSudeep Holla 
1792ff075c7SJeremy Linton static void cache_of_set_props(struct cacheinfo *this_leaf,
1802ff075c7SJeremy Linton 			       struct device_node *np)
181dfea747dSSudeep Holla {
182f57ab9a0SSudeep Holla 	/*
183f57ab9a0SSudeep Holla 	 * init_cache_level must setup the cache level correctly
184f57ab9a0SSudeep Holla 	 * overriding the architecturally specified levels, so
185f57ab9a0SSudeep Holla 	 * if type is NONE at this stage, it should be unified
186f57ab9a0SSudeep Holla 	 */
187f57ab9a0SSudeep Holla 	if (this_leaf->type == CACHE_TYPE_NOCACHE &&
1882ff075c7SJeremy Linton 	    cache_node_is_unified(this_leaf, np))
189f57ab9a0SSudeep Holla 		this_leaf->type = CACHE_TYPE_UNIFIED;
1902ff075c7SJeremy Linton 	cache_size(this_leaf, np);
1912ff075c7SJeremy Linton 	cache_get_line_size(this_leaf, np);
1922ff075c7SJeremy Linton 	cache_nr_sets(this_leaf, np);
193dfea747dSSudeep Holla 	cache_associativity(this_leaf);
194dfea747dSSudeep Holla }
195d529a18aSJeremy Linton 
196d529a18aSJeremy Linton static int cache_setup_of_node(unsigned int cpu)
197d529a18aSJeremy Linton {
1983da72e18SPierre Gondois 	struct device_node *np, *prev;
199d529a18aSJeremy Linton 	struct cacheinfo *this_leaf;
200d529a18aSJeremy Linton 	unsigned int index = 0;
201d529a18aSJeremy Linton 
202d4ec840bSSudeep Holla 	np = of_cpu_device_node_get(cpu);
203d529a18aSJeremy Linton 	if (!np) {
204d529a18aSJeremy Linton 		pr_err("Failed to find cpu%d device node\n", cpu);
205d529a18aSJeremy Linton 		return -ENOENT;
206d529a18aSJeremy Linton 	}
207d529a18aSJeremy Linton 
2083da72e18SPierre Gondois 	prev = np;
2093da72e18SPierre Gondois 
210d529a18aSJeremy Linton 	while (index < cache_leaves(cpu)) {
211b14e8d21SSudeep Holla 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
2123da72e18SPierre Gondois 		if (this_leaf->level != 1) {
213d529a18aSJeremy Linton 			np = of_find_next_cache_node(np);
2143da72e18SPierre Gondois 			of_node_put(prev);
2153da72e18SPierre Gondois 			prev = np;
216d529a18aSJeremy Linton 			if (!np)
217d529a18aSJeremy Linton 				break;
2183da72e18SPierre Gondois 		}
2192ff075c7SJeremy Linton 		cache_of_set_props(this_leaf, np);
2209b97387cSJeremy Linton 		this_leaf->fw_token = np;
221d529a18aSJeremy Linton 		index++;
222d529a18aSJeremy Linton 	}
223d529a18aSJeremy Linton 
2243da72e18SPierre Gondois 	of_node_put(np);
2253da72e18SPierre Gondois 
226d529a18aSJeremy Linton 	if (index != cache_leaves(cpu)) /* not all OF nodes populated */
227d529a18aSJeremy Linton 		return -ENOENT;
228d529a18aSJeremy Linton 
229d529a18aSJeremy Linton 	return 0;
230d529a18aSJeremy Linton }
231c3719bd9SPierre Gondois 
232de0df442SPierre Gondois static int of_count_cache_leaves(struct device_node *np)
233c3719bd9SPierre Gondois {
234de0df442SPierre Gondois 	unsigned int leaves = 0;
235c3719bd9SPierre Gondois 
236c3719bd9SPierre Gondois 	if (of_property_read_bool(np, "cache-size"))
237c3719bd9SPierre Gondois 		++leaves;
238c3719bd9SPierre Gondois 	if (of_property_read_bool(np, "i-cache-size"))
239c3719bd9SPierre Gondois 		++leaves;
240c3719bd9SPierre Gondois 	if (of_property_read_bool(np, "d-cache-size"))
241c3719bd9SPierre Gondois 		++leaves;
242de0df442SPierre Gondois 
243de0df442SPierre Gondois 	if (!leaves) {
244de0df442SPierre Gondois 		/* The '[i-|d-|]cache-size' property is required, but
245de0df442SPierre Gondois 		 * if absent, fallback on the 'cache-unified' property.
246de0df442SPierre Gondois 		 */
247de0df442SPierre Gondois 		if (of_property_read_bool(np, "cache-unified"))
248de0df442SPierre Gondois 			return 1;
249de0df442SPierre Gondois 		else
250de0df442SPierre Gondois 			return 2;
251de0df442SPierre Gondois 	}
252de0df442SPierre Gondois 
253de0df442SPierre Gondois 	return leaves;
254de0df442SPierre Gondois }
255de0df442SPierre Gondois 
256de0df442SPierre Gondois int init_of_cache_level(unsigned int cpu)
257de0df442SPierre Gondois {
258de0df442SPierre Gondois 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
259de0df442SPierre Gondois 	struct device_node *np = of_cpu_device_node_get(cpu);
260de0df442SPierre Gondois 	struct device_node *prev = NULL;
261de0df442SPierre Gondois 	unsigned int levels = 0, leaves, level;
262de0df442SPierre Gondois 
263de0df442SPierre Gondois 	leaves = of_count_cache_leaves(np);
264c3719bd9SPierre Gondois 	if (leaves > 0)
265c3719bd9SPierre Gondois 		levels = 1;
266c3719bd9SPierre Gondois 
267c3719bd9SPierre Gondois 	prev = np;
268c3719bd9SPierre Gondois 	while ((np = of_find_next_cache_node(np))) {
269c3719bd9SPierre Gondois 		of_node_put(prev);
270c3719bd9SPierre Gondois 		prev = np;
271c3719bd9SPierre Gondois 		if (!of_device_is_compatible(np, "cache"))
2728844c3dfSPierre Gondois 			goto err_out;
273c3719bd9SPierre Gondois 		if (of_property_read_u32(np, "cache-level", &level))
2748844c3dfSPierre Gondois 			goto err_out;
275c3719bd9SPierre Gondois 		if (level <= levels)
2768844c3dfSPierre Gondois 			goto err_out;
277de0df442SPierre Gondois 
278de0df442SPierre Gondois 		leaves += of_count_cache_leaves(np);
279c3719bd9SPierre Gondois 		levels = level;
280c3719bd9SPierre Gondois 	}
281c3719bd9SPierre Gondois 
282c3719bd9SPierre Gondois 	of_node_put(np);
283c3719bd9SPierre Gondois 	this_cpu_ci->num_levels = levels;
284c3719bd9SPierre Gondois 	this_cpu_ci->num_leaves = leaves;
285c3719bd9SPierre Gondois 
286c3719bd9SPierre Gondois 	return 0;
2878844c3dfSPierre Gondois 
2888844c3dfSPierre Gondois err_out:
2898844c3dfSPierre Gondois 	of_node_put(np);
2908844c3dfSPierre Gondois 	return -EINVAL;
291c3719bd9SPierre Gondois }
292c3719bd9SPierre Gondois 
293246246cbSSudeep Holla #else
294246246cbSSudeep Holla static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
295c3719bd9SPierre Gondois int init_of_cache_level(unsigned int cpu) { return 0; }
296246246cbSSudeep Holla #endif
297246246cbSSudeep Holla 
298582b468bSJeremy Linton int __weak cache_setup_acpi(unsigned int cpu)
299582b468bSJeremy Linton {
300582b468bSJeremy Linton 	return -ENOTSUPP;
301582b468bSJeremy Linton }
302582b468bSJeremy Linton 
3039a83c84cSShaokun Zhang unsigned int coherency_max_size;
3049a83c84cSShaokun Zhang 
30536bbc5b4SSudeep Holla static int cache_setup_properties(unsigned int cpu)
30636bbc5b4SSudeep Holla {
30736bbc5b4SSudeep Holla 	int ret = 0;
30836bbc5b4SSudeep Holla 
30936bbc5b4SSudeep Holla 	if (of_have_populated_dt())
31036bbc5b4SSudeep Holla 		ret = cache_setup_of_node(cpu);
31136bbc5b4SSudeep Holla 	else if (!acpi_disabled)
31236bbc5b4SSudeep Holla 		ret = cache_setup_acpi(cpu);
31336bbc5b4SSudeep Holla 
31436bbc5b4SSudeep Holla 	return ret;
31536bbc5b4SSudeep Holla }
31636bbc5b4SSudeep Holla 
317246246cbSSudeep Holla static int cache_shared_cpu_map_setup(unsigned int cpu)
318246246cbSSudeep Holla {
319246246cbSSudeep Holla 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
320246246cbSSudeep Holla 	struct cacheinfo *this_leaf, *sib_leaf;
321198102c9SYong-Xuan Wang 	unsigned int index, sib_index;
32255877ef4SSudeep Holla 	int ret = 0;
323246246cbSSudeep Holla 
324fac51482SSudeep Holla 	if (this_cpu_ci->cpu_map_populated)
325fac51482SSudeep Holla 		return 0;
326fac51482SSudeep Holla 
32736bbc5b4SSudeep Holla 	/*
32836bbc5b4SSudeep Holla 	 * skip setting up cache properties if LLC is valid, just need
32936bbc5b4SSudeep Holla 	 * to update the shared cpu_map if the cache attributes were
33036bbc5b4SSudeep Holla 	 * populated early before all the cpus are brought online
33136bbc5b4SSudeep Holla 	 */
33236bbc5b4SSudeep Holla 	if (!last_level_cache_is_valid(cpu)) {
33336bbc5b4SSudeep Holla 		ret = cache_setup_properties(cpu);
334246246cbSSudeep Holla 		if (ret)
335246246cbSSudeep Holla 			return ret;
33636bbc5b4SSudeep Holla 	}
337246246cbSSudeep Holla 
338246246cbSSudeep Holla 	for (index = 0; index < cache_leaves(cpu); index++) {
339246246cbSSudeep Holla 		unsigned int i;
340246246cbSSudeep Holla 
341b14e8d21SSudeep Holla 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
342246246cbSSudeep Holla 
343246246cbSSudeep Holla 		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
344246246cbSSudeep Holla 		for_each_online_cpu(i) {
345246246cbSSudeep Holla 			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
346246246cbSSudeep Holla 
347246246cbSSudeep Holla 			if (i == cpu || !sib_cpu_ci->info_list)
348246246cbSSudeep Holla 				continue;/* skip if itself or no cacheinfo */
349198102c9SYong-Xuan Wang 			for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
350198102c9SYong-Xuan Wang 				sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
351246246cbSSudeep Holla 				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
352246246cbSSudeep Holla 					cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
353246246cbSSudeep Holla 					cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
354198102c9SYong-Xuan Wang 					break;
355198102c9SYong-Xuan Wang 				}
356246246cbSSudeep Holla 			}
357246246cbSSudeep Holla 		}
3589a83c84cSShaokun Zhang 		/* record the maximum cache line size */
3599a83c84cSShaokun Zhang 		if (this_leaf->coherency_line_size > coherency_max_size)
3609a83c84cSShaokun Zhang 			coherency_max_size = this_leaf->coherency_line_size;
361246246cbSSudeep Holla 	}
362246246cbSSudeep Holla 
363246246cbSSudeep Holla 	return 0;
364246246cbSSudeep Holla }
365246246cbSSudeep Holla 
366246246cbSSudeep Holla static void cache_shared_cpu_map_remove(unsigned int cpu)
367246246cbSSudeep Holla {
368246246cbSSudeep Holla 	struct cacheinfo *this_leaf, *sib_leaf;
369198102c9SYong-Xuan Wang 	unsigned int sibling, index, sib_index;
370246246cbSSudeep Holla 
371246246cbSSudeep Holla 	for (index = 0; index < cache_leaves(cpu); index++) {
372b14e8d21SSudeep Holla 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
373246246cbSSudeep Holla 		for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
37452110313SSudeep Holla 			struct cpu_cacheinfo *sib_cpu_ci =
37552110313SSudeep Holla 						get_cpu_cacheinfo(sibling);
376246246cbSSudeep Holla 
37752110313SSudeep Holla 			if (sibling == cpu || !sib_cpu_ci->info_list)
37852110313SSudeep Holla 				continue;/* skip if itself or no cacheinfo */
3792110d70cSBorislav Petkov 
380198102c9SYong-Xuan Wang 			for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
381198102c9SYong-Xuan Wang 				sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
382198102c9SYong-Xuan Wang 				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
383246246cbSSudeep Holla 					cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
384246246cbSSudeep Holla 					cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
385198102c9SYong-Xuan Wang 					break;
386198102c9SYong-Xuan Wang 				}
387198102c9SYong-Xuan Wang 			}
388246246cbSSudeep Holla 		}
389246246cbSSudeep Holla 	}
390246246cbSSudeep Holla }
391246246cbSSudeep Holla 
392246246cbSSudeep Holla static void free_cache_attributes(unsigned int cpu)
393246246cbSSudeep Holla {
3942110d70cSBorislav Petkov 	if (!per_cpu_cacheinfo(cpu))
3952110d70cSBorislav Petkov 		return;
3962110d70cSBorislav Petkov 
397246246cbSSudeep Holla 	cache_shared_cpu_map_remove(cpu);
398246246cbSSudeep Holla }
399246246cbSSudeep Holla 
4006539cffaSRadu Rendec int __weak early_cache_level(unsigned int cpu)
4016539cffaSRadu Rendec {
4026539cffaSRadu Rendec 	return -ENOENT;
4036539cffaSRadu Rendec }
4046539cffaSRadu Rendec 
405246246cbSSudeep Holla int __weak init_cache_level(unsigned int cpu)
406246246cbSSudeep Holla {
407246246cbSSudeep Holla 	return -ENOENT;
408246246cbSSudeep Holla }
409246246cbSSudeep Holla 
410246246cbSSudeep Holla int __weak populate_cache_leaves(unsigned int cpu)
411246246cbSSudeep Holla {
412246246cbSSudeep Holla 	return -ENOENT;
413246246cbSSudeep Holla }
414246246cbSSudeep Holla 
4155944ce09SPierre Gondois static inline
4165944ce09SPierre Gondois int allocate_cache_info(int cpu)
417246246cbSSudeep Holla {
418246246cbSSudeep Holla 	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
41911969d69SSudeep Holla 					 sizeof(struct cacheinfo), GFP_ATOMIC);
4205944ce09SPierre Gondois 	if (!per_cpu_cacheinfo(cpu)) {
42136bbc5b4SSudeep Holla 		cache_leaves(cpu) = 0;
422246246cbSSudeep Holla 		return -ENOMEM;
42336bbc5b4SSudeep Holla 	}
424246246cbSSudeep Holla 
4255944ce09SPierre Gondois 	return 0;
4265944ce09SPierre Gondois }
4275944ce09SPierre Gondois 
4285944ce09SPierre Gondois int fetch_cache_info(unsigned int cpu)
4295944ce09SPierre Gondois {
4306539cffaSRadu Rendec 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
431ecaef469SPierre Gondois 	unsigned int levels = 0, split_levels = 0;
4325944ce09SPierre Gondois 	int ret;
4335944ce09SPierre Gondois 
4345944ce09SPierre Gondois 	if (acpi_disabled) {
4355944ce09SPierre Gondois 		ret = init_of_cache_level(cpu);
4365944ce09SPierre Gondois 	} else {
4375944ce09SPierre Gondois 		ret = acpi_get_cache_info(cpu, &levels, &split_levels);
4386539cffaSRadu Rendec 		if (!ret) {
4395944ce09SPierre Gondois 			this_cpu_ci->num_levels = levels;
4405944ce09SPierre Gondois 			/*
4415944ce09SPierre Gondois 			 * This assumes that:
4425944ce09SPierre Gondois 			 * - there cannot be any split caches (data/instruction)
4435944ce09SPierre Gondois 			 *   above a unified cache
4445944ce09SPierre Gondois 			 * - data/instruction caches come by pair
4455944ce09SPierre Gondois 			 */
4465944ce09SPierre Gondois 			this_cpu_ci->num_leaves = levels + split_levels;
4475944ce09SPierre Gondois 		}
4486539cffaSRadu Rendec 	}
4496539cffaSRadu Rendec 
4506539cffaSRadu Rendec 	if (ret || !cache_leaves(cpu)) {
4516539cffaSRadu Rendec 		ret = early_cache_level(cpu);
4526539cffaSRadu Rendec 		if (ret)
4536539cffaSRadu Rendec 			return ret;
4546539cffaSRadu Rendec 
4555944ce09SPierre Gondois 		if (!cache_leaves(cpu))
4565944ce09SPierre Gondois 			return -ENOENT;
4575944ce09SPierre Gondois 
4586539cffaSRadu Rendec 		this_cpu_ci->early_ci_levels = true;
4596539cffaSRadu Rendec 	}
4606539cffaSRadu Rendec 
4616539cffaSRadu Rendec 	return allocate_cache_info(cpu);
4626539cffaSRadu Rendec }
4636539cffaSRadu Rendec 
4646539cffaSRadu Rendec static inline int init_level_allocate_ci(unsigned int cpu)
4656539cffaSRadu Rendec {
4666539cffaSRadu Rendec 	unsigned int early_leaves = cache_leaves(cpu);
4676539cffaSRadu Rendec 
4686539cffaSRadu Rendec 	/* Since early initialization/allocation of the cacheinfo is allowed
4696539cffaSRadu Rendec 	 * via fetch_cache_info() and this also gets called as CPU hotplug
4706539cffaSRadu Rendec 	 * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped
4716539cffaSRadu Rendec 	 * as it will happen only once (the cacheinfo memory is never freed).
4726539cffaSRadu Rendec 	 * Just populate the cacheinfo. However, if the cacheinfo has been
4736539cffaSRadu Rendec 	 * allocated early through the arch-specific early_cache_level() call,
4746539cffaSRadu Rendec 	 * there is a chance the info is wrong (this can happen on arm64). In
4756539cffaSRadu Rendec 	 * that case, call init_cache_level() anyway to give the arch-specific
4766539cffaSRadu Rendec 	 * code a chance to make things right.
4776539cffaSRadu Rendec 	 */
4786539cffaSRadu Rendec 	if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels)
4796539cffaSRadu Rendec 		return 0;
4806539cffaSRadu Rendec 
4816539cffaSRadu Rendec 	if (init_cache_level(cpu) || !cache_leaves(cpu))
4826539cffaSRadu Rendec 		return -ENOENT;
4836539cffaSRadu Rendec 
4846539cffaSRadu Rendec 	/*
4856539cffaSRadu Rendec 	 * Now that we have properly initialized the cache level info, make
4866539cffaSRadu Rendec 	 * sure we don't try to do that again the next time we are called
4876539cffaSRadu Rendec 	 * (e.g. as CPU hotplug callbacks).
4886539cffaSRadu Rendec 	 */
4896539cffaSRadu Rendec 	ci_cacheinfo(cpu)->early_ci_levels = false;
4906539cffaSRadu Rendec 
4916539cffaSRadu Rendec 	if (cache_leaves(cpu) <= early_leaves)
4926539cffaSRadu Rendec 		return 0;
4936539cffaSRadu Rendec 
4946539cffaSRadu Rendec 	kfree(per_cpu_cacheinfo(cpu));
4955944ce09SPierre Gondois 	return allocate_cache_info(cpu);
4965944ce09SPierre Gondois }
4975944ce09SPierre Gondois 
4985944ce09SPierre Gondois int detect_cache_attributes(unsigned int cpu)
4995944ce09SPierre Gondois {
5005944ce09SPierre Gondois 	int ret;
5015944ce09SPierre Gondois 
5026539cffaSRadu Rendec 	ret = init_level_allocate_ci(cpu);
5035944ce09SPierre Gondois 	if (ret)
5045944ce09SPierre Gondois 		return ret;
5055944ce09SPierre Gondois 
5062ff075c7SJeremy Linton 	/*
5072ff075c7SJeremy Linton 	 * populate_cache_leaves() may completely setup the cache leaves and
5082ff075c7SJeremy Linton 	 * shared_cpu_map or it may leave it partially setup.
5092ff075c7SJeremy Linton 	 */
510246246cbSSudeep Holla 	ret = populate_cache_leaves(cpu);
511246246cbSSudeep Holla 	if (ret)
512246246cbSSudeep Holla 		goto free_ci;
51336bbc5b4SSudeep Holla 
514246246cbSSudeep Holla 	/*
5159b97387cSJeremy Linton 	 * For systems using DT for cache hierarchy, fw_token
5169b97387cSJeremy Linton 	 * and shared_cpu_map will be set up here only if they are
5179b97387cSJeremy Linton 	 * not populated already
518246246cbSSudeep Holla 	 */
519246246cbSSudeep Holla 	ret = cache_shared_cpu_map_setup(cpu);
5208a7d95f9SSudeep Holla 	if (ret) {
52155877ef4SSudeep Holla 		pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
522246246cbSSudeep Holla 		goto free_ci;
5238a7d95f9SSudeep Holla 	}
524dfea747dSSudeep Holla 
525246246cbSSudeep Holla 	return 0;
526246246cbSSudeep Holla 
527246246cbSSudeep Holla free_ci:
528246246cbSSudeep Holla 	free_cache_attributes(cpu);
529246246cbSSudeep Holla 	return ret;
530246246cbSSudeep Holla }
531246246cbSSudeep Holla 
532246246cbSSudeep Holla /* pointer to cpuX/cache device */
533246246cbSSudeep Holla static DEFINE_PER_CPU(struct device *, ci_cache_dev);
534246246cbSSudeep Holla #define per_cpu_cache_dev(cpu)	(per_cpu(ci_cache_dev, cpu))
535246246cbSSudeep Holla 
536246246cbSSudeep Holla static cpumask_t cache_dev_map;
537246246cbSSudeep Holla 
538246246cbSSudeep Holla /* pointer to array of devices for cpuX/cache/indexY */
539246246cbSSudeep Holla static DEFINE_PER_CPU(struct device **, ci_index_dev);
540246246cbSSudeep Holla #define per_cpu_index_dev(cpu)	(per_cpu(ci_index_dev, cpu))
541246246cbSSudeep Holla #define per_cache_index_dev(cpu, idx)	((per_cpu_index_dev(cpu))[idx])
542246246cbSSudeep Holla 
543246246cbSSudeep Holla #define show_one(file_name, object)				\
544246246cbSSudeep Holla static ssize_t file_name##_show(struct device *dev,		\
545246246cbSSudeep Holla 		struct device_attribute *attr, char *buf)	\
546246246cbSSudeep Holla {								\
547246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);	\
548948b3edbSJoe Perches 	return sysfs_emit(buf, "%u\n", this_leaf->object);	\
549246246cbSSudeep Holla }
550246246cbSSudeep Holla 
551e9a2ea5aSFenghua Yu show_one(id, id);
552246246cbSSudeep Holla show_one(level, level);
553246246cbSSudeep Holla show_one(coherency_line_size, coherency_line_size);
554246246cbSSudeep Holla show_one(number_of_sets, number_of_sets);
555246246cbSSudeep Holla show_one(physical_line_partition, physical_line_partition);
556246246cbSSudeep Holla show_one(ways_of_associativity, ways_of_associativity);
557246246cbSSudeep Holla 
558246246cbSSudeep Holla static ssize_t size_show(struct device *dev,
559246246cbSSudeep Holla 			 struct device_attribute *attr, char *buf)
560246246cbSSudeep Holla {
561246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
562246246cbSSudeep Holla 
563aa838896SJoe Perches 	return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
564246246cbSSudeep Holla }
565246246cbSSudeep Holla 
566e015e036SJoe Perches static ssize_t shared_cpu_map_show(struct device *dev,
567e015e036SJoe Perches 				   struct device_attribute *attr, char *buf)
568246246cbSSudeep Holla {
569246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
570246246cbSSudeep Holla 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
571246246cbSSudeep Holla 
572e015e036SJoe Perches 	return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
573246246cbSSudeep Holla }
574246246cbSSudeep Holla 
575246246cbSSudeep Holla static ssize_t shared_cpu_list_show(struct device *dev,
576246246cbSSudeep Holla 				    struct device_attribute *attr, char *buf)
577246246cbSSudeep Holla {
578e015e036SJoe Perches 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
579e015e036SJoe Perches 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
580e015e036SJoe Perches 
581e015e036SJoe Perches 	return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
582246246cbSSudeep Holla }
583246246cbSSudeep Holla 
584246246cbSSudeep Holla static ssize_t type_show(struct device *dev,
585246246cbSSudeep Holla 			 struct device_attribute *attr, char *buf)
586246246cbSSudeep Holla {
587246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
588973c3911SJoe Perches 	const char *output;
589246246cbSSudeep Holla 
590246246cbSSudeep Holla 	switch (this_leaf->type) {
591246246cbSSudeep Holla 	case CACHE_TYPE_DATA:
592973c3911SJoe Perches 		output = "Data";
593973c3911SJoe Perches 		break;
594246246cbSSudeep Holla 	case CACHE_TYPE_INST:
595973c3911SJoe Perches 		output = "Instruction";
596973c3911SJoe Perches 		break;
597246246cbSSudeep Holla 	case CACHE_TYPE_UNIFIED:
598973c3911SJoe Perches 		output = "Unified";
599973c3911SJoe Perches 		break;
600246246cbSSudeep Holla 	default:
601246246cbSSudeep Holla 		return -EINVAL;
602246246cbSSudeep Holla 	}
603973c3911SJoe Perches 
604973c3911SJoe Perches 	return sysfs_emit(buf, "%s\n", output);
605246246cbSSudeep Holla }
606246246cbSSudeep Holla 
607246246cbSSudeep Holla static ssize_t allocation_policy_show(struct device *dev,
608246246cbSSudeep Holla 				      struct device_attribute *attr, char *buf)
609246246cbSSudeep Holla {
610246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
611246246cbSSudeep Holla 	unsigned int ci_attr = this_leaf->attributes;
612973c3911SJoe Perches 	const char *output;
613246246cbSSudeep Holla 
614246246cbSSudeep Holla 	if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
615973c3911SJoe Perches 		output = "ReadWriteAllocate";
616246246cbSSudeep Holla 	else if (ci_attr & CACHE_READ_ALLOCATE)
617973c3911SJoe Perches 		output = "ReadAllocate";
618246246cbSSudeep Holla 	else if (ci_attr & CACHE_WRITE_ALLOCATE)
619973c3911SJoe Perches 		output = "WriteAllocate";
620973c3911SJoe Perches 	else
621973c3911SJoe Perches 		return 0;
622973c3911SJoe Perches 
623973c3911SJoe Perches 	return sysfs_emit(buf, "%s\n", output);
624246246cbSSudeep Holla }
625246246cbSSudeep Holla 
626246246cbSSudeep Holla static ssize_t write_policy_show(struct device *dev,
627246246cbSSudeep Holla 				 struct device_attribute *attr, char *buf)
628246246cbSSudeep Holla {
629246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
630246246cbSSudeep Holla 	unsigned int ci_attr = this_leaf->attributes;
631246246cbSSudeep Holla 	int n = 0;
632246246cbSSudeep Holla 
633246246cbSSudeep Holla 	if (ci_attr & CACHE_WRITE_THROUGH)
634aa838896SJoe Perches 		n = sysfs_emit(buf, "WriteThrough\n");
635246246cbSSudeep Holla 	else if (ci_attr & CACHE_WRITE_BACK)
636aa838896SJoe Perches 		n = sysfs_emit(buf, "WriteBack\n");
637246246cbSSudeep Holla 	return n;
638246246cbSSudeep Holla }
639246246cbSSudeep Holla 
640e9a2ea5aSFenghua Yu static DEVICE_ATTR_RO(id);
641246246cbSSudeep Holla static DEVICE_ATTR_RO(level);
642246246cbSSudeep Holla static DEVICE_ATTR_RO(type);
643246246cbSSudeep Holla static DEVICE_ATTR_RO(coherency_line_size);
644246246cbSSudeep Holla static DEVICE_ATTR_RO(ways_of_associativity);
645246246cbSSudeep Holla static DEVICE_ATTR_RO(number_of_sets);
646246246cbSSudeep Holla static DEVICE_ATTR_RO(size);
647246246cbSSudeep Holla static DEVICE_ATTR_RO(allocation_policy);
648246246cbSSudeep Holla static DEVICE_ATTR_RO(write_policy);
649246246cbSSudeep Holla static DEVICE_ATTR_RO(shared_cpu_map);
650246246cbSSudeep Holla static DEVICE_ATTR_RO(shared_cpu_list);
651246246cbSSudeep Holla static DEVICE_ATTR_RO(physical_line_partition);
652246246cbSSudeep Holla 
653246246cbSSudeep Holla static struct attribute *cache_default_attrs[] = {
654e9a2ea5aSFenghua Yu 	&dev_attr_id.attr,
655246246cbSSudeep Holla 	&dev_attr_type.attr,
656246246cbSSudeep Holla 	&dev_attr_level.attr,
657246246cbSSudeep Holla 	&dev_attr_shared_cpu_map.attr,
658246246cbSSudeep Holla 	&dev_attr_shared_cpu_list.attr,
659246246cbSSudeep Holla 	&dev_attr_coherency_line_size.attr,
660246246cbSSudeep Holla 	&dev_attr_ways_of_associativity.attr,
661246246cbSSudeep Holla 	&dev_attr_number_of_sets.attr,
662246246cbSSudeep Holla 	&dev_attr_size.attr,
663246246cbSSudeep Holla 	&dev_attr_allocation_policy.attr,
664246246cbSSudeep Holla 	&dev_attr_write_policy.attr,
665246246cbSSudeep Holla 	&dev_attr_physical_line_partition.attr,
666246246cbSSudeep Holla 	NULL
667246246cbSSudeep Holla };
668246246cbSSudeep Holla 
669246246cbSSudeep Holla static umode_t
670246246cbSSudeep Holla cache_default_attrs_is_visible(struct kobject *kobj,
671246246cbSSudeep Holla 			       struct attribute *attr, int unused)
672246246cbSSudeep Holla {
673246246cbSSudeep Holla 	struct device *dev = kobj_to_dev(kobj);
674246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
675246246cbSSudeep Holla 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
676246246cbSSudeep Holla 	umode_t mode = attr->mode;
677246246cbSSudeep Holla 
678e9a2ea5aSFenghua Yu 	if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
679e9a2ea5aSFenghua Yu 		return mode;
680246246cbSSudeep Holla 	if ((attr == &dev_attr_type.attr) && this_leaf->type)
681246246cbSSudeep Holla 		return mode;
682246246cbSSudeep Holla 	if ((attr == &dev_attr_level.attr) && this_leaf->level)
683246246cbSSudeep Holla 		return mode;
684246246cbSSudeep Holla 	if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
685246246cbSSudeep Holla 		return mode;
686246246cbSSudeep Holla 	if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
687246246cbSSudeep Holla 		return mode;
688246246cbSSudeep Holla 	if ((attr == &dev_attr_coherency_line_size.attr) &&
689246246cbSSudeep Holla 	    this_leaf->coherency_line_size)
690246246cbSSudeep Holla 		return mode;
691246246cbSSudeep Holla 	if ((attr == &dev_attr_ways_of_associativity.attr) &&
692246246cbSSudeep Holla 	    this_leaf->size) /* allow 0 = full associativity */
693246246cbSSudeep Holla 		return mode;
694246246cbSSudeep Holla 	if ((attr == &dev_attr_number_of_sets.attr) &&
695246246cbSSudeep Holla 	    this_leaf->number_of_sets)
696246246cbSSudeep Holla 		return mode;
697246246cbSSudeep Holla 	if ((attr == &dev_attr_size.attr) && this_leaf->size)
698246246cbSSudeep Holla 		return mode;
699246246cbSSudeep Holla 	if ((attr == &dev_attr_write_policy.attr) &&
700246246cbSSudeep Holla 	    (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
701246246cbSSudeep Holla 		return mode;
702246246cbSSudeep Holla 	if ((attr == &dev_attr_allocation_policy.attr) &&
703246246cbSSudeep Holla 	    (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
704246246cbSSudeep Holla 		return mode;
705246246cbSSudeep Holla 	if ((attr == &dev_attr_physical_line_partition.attr) &&
706246246cbSSudeep Holla 	    this_leaf->physical_line_partition)
707246246cbSSudeep Holla 		return mode;
708246246cbSSudeep Holla 
709246246cbSSudeep Holla 	return 0;
710246246cbSSudeep Holla }
711246246cbSSudeep Holla 
712246246cbSSudeep Holla static const struct attribute_group cache_default_group = {
713246246cbSSudeep Holla 	.attrs = cache_default_attrs,
714246246cbSSudeep Holla 	.is_visible = cache_default_attrs_is_visible,
715246246cbSSudeep Holla };
716246246cbSSudeep Holla 
717246246cbSSudeep Holla static const struct attribute_group *cache_default_groups[] = {
718246246cbSSudeep Holla 	&cache_default_group,
719246246cbSSudeep Holla 	NULL,
720246246cbSSudeep Holla };
721246246cbSSudeep Holla 
722246246cbSSudeep Holla static const struct attribute_group *cache_private_groups[] = {
723246246cbSSudeep Holla 	&cache_default_group,
724246246cbSSudeep Holla 	NULL, /* Place holder for private group */
725246246cbSSudeep Holla 	NULL,
726246246cbSSudeep Holla };
727246246cbSSudeep Holla 
728246246cbSSudeep Holla const struct attribute_group *
729246246cbSSudeep Holla __weak cache_get_priv_group(struct cacheinfo *this_leaf)
730246246cbSSudeep Holla {
731246246cbSSudeep Holla 	return NULL;
732246246cbSSudeep Holla }
733246246cbSSudeep Holla 
734246246cbSSudeep Holla static const struct attribute_group **
735246246cbSSudeep Holla cache_get_attribute_groups(struct cacheinfo *this_leaf)
736246246cbSSudeep Holla {
737246246cbSSudeep Holla 	const struct attribute_group *priv_group =
738246246cbSSudeep Holla 			cache_get_priv_group(this_leaf);
739246246cbSSudeep Holla 
740246246cbSSudeep Holla 	if (!priv_group)
741246246cbSSudeep Holla 		return cache_default_groups;
742246246cbSSudeep Holla 
743246246cbSSudeep Holla 	if (!cache_private_groups[1])
744246246cbSSudeep Holla 		cache_private_groups[1] = priv_group;
745246246cbSSudeep Holla 
746246246cbSSudeep Holla 	return cache_private_groups;
747246246cbSSudeep Holla }
748246246cbSSudeep Holla 
749246246cbSSudeep Holla /* Add/Remove cache interface for CPU device */
750246246cbSSudeep Holla static void cpu_cache_sysfs_exit(unsigned int cpu)
751246246cbSSudeep Holla {
752246246cbSSudeep Holla 	int i;
753246246cbSSudeep Holla 	struct device *ci_dev;
754246246cbSSudeep Holla 
755246246cbSSudeep Holla 	if (per_cpu_index_dev(cpu)) {
756246246cbSSudeep Holla 		for (i = 0; i < cache_leaves(cpu); i++) {
757246246cbSSudeep Holla 			ci_dev = per_cache_index_dev(cpu, i);
758246246cbSSudeep Holla 			if (!ci_dev)
759246246cbSSudeep Holla 				continue;
760246246cbSSudeep Holla 			device_unregister(ci_dev);
761246246cbSSudeep Holla 		}
762246246cbSSudeep Holla 		kfree(per_cpu_index_dev(cpu));
763246246cbSSudeep Holla 		per_cpu_index_dev(cpu) = NULL;
764246246cbSSudeep Holla 	}
765246246cbSSudeep Holla 	device_unregister(per_cpu_cache_dev(cpu));
766246246cbSSudeep Holla 	per_cpu_cache_dev(cpu) = NULL;
767246246cbSSudeep Holla }
768246246cbSSudeep Holla 
769246246cbSSudeep Holla static int cpu_cache_sysfs_init(unsigned int cpu)
770246246cbSSudeep Holla {
771246246cbSSudeep Holla 	struct device *dev = get_cpu_device(cpu);
772246246cbSSudeep Holla 
773246246cbSSudeep Holla 	if (per_cpu_cacheinfo(cpu) == NULL)
774246246cbSSudeep Holla 		return -ENOENT;
775246246cbSSudeep Holla 
776246246cbSSudeep Holla 	per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
777246246cbSSudeep Holla 	if (IS_ERR(per_cpu_cache_dev(cpu)))
778246246cbSSudeep Holla 		return PTR_ERR(per_cpu_cache_dev(cpu));
779246246cbSSudeep Holla 
780246246cbSSudeep Holla 	/* Allocate all required memory */
781246246cbSSudeep Holla 	per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
782246246cbSSudeep Holla 					 sizeof(struct device *), GFP_KERNEL);
783246246cbSSudeep Holla 	if (unlikely(per_cpu_index_dev(cpu) == NULL))
784246246cbSSudeep Holla 		goto err_out;
785246246cbSSudeep Holla 
786246246cbSSudeep Holla 	return 0;
787246246cbSSudeep Holla 
788246246cbSSudeep Holla err_out:
789246246cbSSudeep Holla 	cpu_cache_sysfs_exit(cpu);
790246246cbSSudeep Holla 	return -ENOMEM;
791246246cbSSudeep Holla }
792246246cbSSudeep Holla 
793246246cbSSudeep Holla static int cache_add_dev(unsigned int cpu)
794246246cbSSudeep Holla {
795246246cbSSudeep Holla 	unsigned int i;
796246246cbSSudeep Holla 	int rc;
797246246cbSSudeep Holla 	struct device *ci_dev, *parent;
798246246cbSSudeep Holla 	struct cacheinfo *this_leaf;
799246246cbSSudeep Holla 	const struct attribute_group **cache_groups;
800246246cbSSudeep Holla 
801246246cbSSudeep Holla 	rc = cpu_cache_sysfs_init(cpu);
802246246cbSSudeep Holla 	if (unlikely(rc < 0))
803246246cbSSudeep Holla 		return rc;
804246246cbSSudeep Holla 
805246246cbSSudeep Holla 	parent = per_cpu_cache_dev(cpu);
806246246cbSSudeep Holla 	for (i = 0; i < cache_leaves(cpu); i++) {
807b14e8d21SSudeep Holla 		this_leaf = per_cpu_cacheinfo_idx(cpu, i);
808246246cbSSudeep Holla 		if (this_leaf->disable_sysfs)
809246246cbSSudeep Holla 			continue;
810ca388e43SJeffrey Hugo 		if (this_leaf->type == CACHE_TYPE_NOCACHE)
811ca388e43SJeffrey Hugo 			break;
812246246cbSSudeep Holla 		cache_groups = cache_get_attribute_groups(this_leaf);
813246246cbSSudeep Holla 		ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
814246246cbSSudeep Holla 					   "index%1u", i);
815246246cbSSudeep Holla 		if (IS_ERR(ci_dev)) {
816246246cbSSudeep Holla 			rc = PTR_ERR(ci_dev);
817246246cbSSudeep Holla 			goto err;
818246246cbSSudeep Holla 		}
819246246cbSSudeep Holla 		per_cache_index_dev(cpu, i) = ci_dev;
820246246cbSSudeep Holla 	}
821246246cbSSudeep Holla 	cpumask_set_cpu(cpu, &cache_dev_map);
822246246cbSSudeep Holla 
823246246cbSSudeep Holla 	return 0;
824246246cbSSudeep Holla err:
825246246cbSSudeep Holla 	cpu_cache_sysfs_exit(cpu);
826246246cbSSudeep Holla 	return rc;
827246246cbSSudeep Holla }
828246246cbSSudeep Holla 
8297cc277b4SSebastian Andrzej Siewior static int cacheinfo_cpu_online(unsigned int cpu)
830246246cbSSudeep Holla {
8317cc277b4SSebastian Andrzej Siewior 	int rc = detect_cache_attributes(cpu);
832246246cbSSudeep Holla 
8337cc277b4SSebastian Andrzej Siewior 	if (rc)
8347cc277b4SSebastian Andrzej Siewior 		return rc;
835246246cbSSudeep Holla 	rc = cache_add_dev(cpu);
8367cc277b4SSebastian Andrzej Siewior 	if (rc)
837246246cbSSudeep Holla 		free_cache_attributes(cpu);
8387cc277b4SSebastian Andrzej Siewior 	return rc;
839246246cbSSudeep Holla }
8407cc277b4SSebastian Andrzej Siewior 
8417cc277b4SSebastian Andrzej Siewior static int cacheinfo_cpu_pre_down(unsigned int cpu)
8427cc277b4SSebastian Andrzej Siewior {
8437cc277b4SSebastian Andrzej Siewior 	if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
8447cc277b4SSebastian Andrzej Siewior 		cpu_cache_sysfs_exit(cpu);
8457cc277b4SSebastian Andrzej Siewior 
8467cc277b4SSebastian Andrzej Siewior 	free_cache_attributes(cpu);
8477cc277b4SSebastian Andrzej Siewior 	return 0;
848246246cbSSudeep Holla }
849246246cbSSudeep Holla 
850246246cbSSudeep Holla static int __init cacheinfo_sysfs_init(void)
851246246cbSSudeep Holla {
85283b44fe3SJames Morse 	return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
85383b44fe3SJames Morse 				 "base/cacheinfo:online",
8547cc277b4SSebastian Andrzej Siewior 				 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
855246246cbSSudeep Holla }
856246246cbSSudeep Holla device_initcall(cacheinfo_sysfs_init);
857