xref: /openbmc/linux/drivers/base/cacheinfo.c (revision c26fabe7)
1989d42e8SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2246246cbSSudeep Holla /*
3246246cbSSudeep Holla  * cacheinfo support - processor cache information via sysfs
4246246cbSSudeep Holla  *
5246246cbSSudeep Holla  * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6246246cbSSudeep Holla  * Author: Sudeep Holla <sudeep.holla@arm.com>
7246246cbSSudeep Holla  */
88e1073b1SSudeep Holla #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
98e1073b1SSudeep Holla 
1055877ef4SSudeep Holla #include <linux/acpi.h>
11246246cbSSudeep Holla #include <linux/bitops.h>
12246246cbSSudeep Holla #include <linux/cacheinfo.h>
13246246cbSSudeep Holla #include <linux/compiler.h>
14246246cbSSudeep Holla #include <linux/cpu.h>
15246246cbSSudeep Holla #include <linux/device.h>
16246246cbSSudeep Holla #include <linux/init.h>
17b9581552SRob Herring #include <linux/of.h>
18246246cbSSudeep Holla #include <linux/sched.h>
19246246cbSSudeep Holla #include <linux/slab.h>
20246246cbSSudeep Holla #include <linux/smp.h>
21246246cbSSudeep Holla #include <linux/sysfs.h>
22246246cbSSudeep Holla 
23246246cbSSudeep Holla /* pointer to per cpu cacheinfo */
24246246cbSSudeep Holla static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
25246246cbSSudeep Holla #define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
26246246cbSSudeep Holla #define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
27246246cbSSudeep Holla #define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
28b14e8d21SSudeep Holla #define per_cpu_cacheinfo_idx(cpu, idx)		\
29b14e8d21SSudeep Holla 				(per_cpu_cacheinfo(cpu) + (idx))
30246246cbSSudeep Holla 
31ef9f643aSPierre Gondois /* Set if no cache information is found in DT/ACPI. */
32ef9f643aSPierre Gondois static bool use_arch_info;
33ef9f643aSPierre Gondois 
get_cpu_cacheinfo(unsigned int cpu)34246246cbSSudeep Holla struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
35246246cbSSudeep Holla {
36246246cbSSudeep Holla 	return ci_cacheinfo(cpu);
37246246cbSSudeep Holla }
38246246cbSSudeep Holla 
cache_leaves_are_shared(struct cacheinfo * this_leaf,struct cacheinfo * sib_leaf)39246246cbSSudeep Holla static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
40246246cbSSudeep Holla 					   struct cacheinfo *sib_leaf)
41246246cbSSudeep Holla {
429447eb0fSSudeep Holla 	/*
439447eb0fSSudeep Holla 	 * For non DT/ACPI systems, assume unique level 1 caches,
447a306e3eSPierre Gondois 	 * system-wide shared caches for all other levels.
459447eb0fSSudeep Holla 	 */
46ef9f643aSPierre Gondois 	if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)) ||
47ef9f643aSPierre Gondois 	    use_arch_info)
487a306e3eSPierre Gondois 		return (this_leaf->level != 1) && (sib_leaf->level != 1);
499447eb0fSSudeep Holla 
50f16d1becSSudeep Holla 	if ((sib_leaf->attributes & CACHE_ID) &&
51f16d1becSSudeep Holla 	    (this_leaf->attributes & CACHE_ID))
52f16d1becSSudeep Holla 		return sib_leaf->id == this_leaf->id;
53f16d1becSSudeep Holla 
549b97387cSJeremy Linton 	return sib_leaf->fw_token == this_leaf->fw_token;
55246246cbSSudeep Holla }
56dfea747dSSudeep Holla 
last_level_cache_is_valid(unsigned int cpu)57cc1cfc47SSudeep Holla bool last_level_cache_is_valid(unsigned int cpu)
58cc1cfc47SSudeep Holla {
59cc1cfc47SSudeep Holla 	struct cacheinfo *llc;
60cc1cfc47SSudeep Holla 
61cc1cfc47SSudeep Holla 	if (!cache_leaves(cpu))
62cc1cfc47SSudeep Holla 		return false;
63cc1cfc47SSudeep Holla 
64cc1cfc47SSudeep Holla 	llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
65cc1cfc47SSudeep Holla 
66f16d1becSSudeep Holla 	return (llc->attributes & CACHE_ID) || !!llc->fw_token;
67f16d1becSSudeep Holla 
68cc1cfc47SSudeep Holla }
69cc1cfc47SSudeep Holla 
last_level_cache_is_shared(unsigned int cpu_x,unsigned int cpu_y)70cc1cfc47SSudeep Holla bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
71cc1cfc47SSudeep Holla {
72cc1cfc47SSudeep Holla 	struct cacheinfo *llc_x, *llc_y;
73cc1cfc47SSudeep Holla 
74cc1cfc47SSudeep Holla 	if (!last_level_cache_is_valid(cpu_x) ||
75cc1cfc47SSudeep Holla 	    !last_level_cache_is_valid(cpu_y))
76cc1cfc47SSudeep Holla 		return false;
77cc1cfc47SSudeep Holla 
78cc1cfc47SSudeep Holla 	llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
79cc1cfc47SSudeep Holla 	llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
80cc1cfc47SSudeep Holla 
81cc1cfc47SSudeep Holla 	return cache_leaves_are_shared(llc_x, llc_y);
82cc1cfc47SSudeep Holla }
83cc1cfc47SSudeep Holla 
849447eb0fSSudeep Holla #ifdef CONFIG_OF
85cde0fbffSPierre Gondois 
86cde0fbffSPierre Gondois static bool of_check_cache_nodes(struct device_node *np);
87cde0fbffSPierre Gondois 
88dfea747dSSudeep Holla /* OF properties to query for a given cache type */
89dfea747dSSudeep Holla struct cache_type_info {
90dfea747dSSudeep Holla 	const char *size_prop;
91dfea747dSSudeep Holla 	const char *line_size_props[2];
92dfea747dSSudeep Holla 	const char *nr_sets_prop;
93dfea747dSSudeep Holla };
94dfea747dSSudeep Holla 
95dfea747dSSudeep Holla static const struct cache_type_info cache_type_info[] = {
96dfea747dSSudeep Holla 	{
97dfea747dSSudeep Holla 		.size_prop       = "cache-size",
98dfea747dSSudeep Holla 		.line_size_props = { "cache-line-size",
99dfea747dSSudeep Holla 				     "cache-block-size", },
100dfea747dSSudeep Holla 		.nr_sets_prop    = "cache-sets",
101dfea747dSSudeep Holla 	}, {
102dfea747dSSudeep Holla 		.size_prop       = "i-cache-size",
103dfea747dSSudeep Holla 		.line_size_props = { "i-cache-line-size",
104dfea747dSSudeep Holla 				     "i-cache-block-size", },
105dfea747dSSudeep Holla 		.nr_sets_prop    = "i-cache-sets",
106dfea747dSSudeep Holla 	}, {
107dfea747dSSudeep Holla 		.size_prop       = "d-cache-size",
108dfea747dSSudeep Holla 		.line_size_props = { "d-cache-line-size",
109dfea747dSSudeep Holla 				     "d-cache-block-size", },
110dfea747dSSudeep Holla 		.nr_sets_prop    = "d-cache-sets",
111dfea747dSSudeep Holla 	},
112dfea747dSSudeep Holla };
113dfea747dSSudeep Holla 
get_cacheinfo_idx(enum cache_type type)114dfea747dSSudeep Holla static inline int get_cacheinfo_idx(enum cache_type type)
115dfea747dSSudeep Holla {
116dfea747dSSudeep Holla 	if (type == CACHE_TYPE_UNIFIED)
117dfea747dSSudeep Holla 		return 0;
118dfea747dSSudeep Holla 	return type;
119dfea747dSSudeep Holla }
120dfea747dSSudeep Holla 
cache_size(struct cacheinfo * this_leaf,struct device_node * np)1212ff075c7SJeremy Linton static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
122dfea747dSSudeep Holla {
123dfea747dSSudeep Holla 	const char *propname;
124dfea747dSSudeep Holla 	int ct_idx;
125dfea747dSSudeep Holla 
126dfea747dSSudeep Holla 	ct_idx = get_cacheinfo_idx(this_leaf->type);
127dfea747dSSudeep Holla 	propname = cache_type_info[ct_idx].size_prop;
128dfea747dSSudeep Holla 
1293a34c986SHuacai Chen 	of_property_read_u32(np, propname, &this_leaf->size);
130dfea747dSSudeep Holla }
131dfea747dSSudeep Holla 
132dfea747dSSudeep Holla /* not cache_line_size() because that's a macro in include/linux/cache.h */
cache_get_line_size(struct cacheinfo * this_leaf,struct device_node * np)1332ff075c7SJeremy Linton static void cache_get_line_size(struct cacheinfo *this_leaf,
1342ff075c7SJeremy Linton 				struct device_node *np)
135dfea747dSSudeep Holla {
136dfea747dSSudeep Holla 	int i, lim, ct_idx;
137dfea747dSSudeep Holla 
138dfea747dSSudeep Holla 	ct_idx = get_cacheinfo_idx(this_leaf->type);
139dfea747dSSudeep Holla 	lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
140dfea747dSSudeep Holla 
141dfea747dSSudeep Holla 	for (i = 0; i < lim; i++) {
142448a5a55SSudeep Holla 		int ret;
143448a5a55SSudeep Holla 		u32 line_size;
144dfea747dSSudeep Holla 		const char *propname;
145dfea747dSSudeep Holla 
146dfea747dSSudeep Holla 		propname = cache_type_info[ct_idx].line_size_props[i];
147448a5a55SSudeep Holla 		ret = of_property_read_u32(np, propname, &line_size);
148448a5a55SSudeep Holla 		if (!ret) {
149448a5a55SSudeep Holla 			this_leaf->coherency_line_size = line_size;
150dfea747dSSudeep Holla 			break;
151dfea747dSSudeep Holla 		}
152448a5a55SSudeep Holla 	}
153dfea747dSSudeep Holla }
154dfea747dSSudeep Holla 
cache_nr_sets(struct cacheinfo * this_leaf,struct device_node * np)1552ff075c7SJeremy Linton static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
156dfea747dSSudeep Holla {
157dfea747dSSudeep Holla 	const char *propname;
158dfea747dSSudeep Holla 	int ct_idx;
159dfea747dSSudeep Holla 
160dfea747dSSudeep Holla 	ct_idx = get_cacheinfo_idx(this_leaf->type);
161dfea747dSSudeep Holla 	propname = cache_type_info[ct_idx].nr_sets_prop;
162dfea747dSSudeep Holla 
1633a34c986SHuacai Chen 	of_property_read_u32(np, propname, &this_leaf->number_of_sets);
164dfea747dSSudeep Holla }
165dfea747dSSudeep Holla 
cache_associativity(struct cacheinfo * this_leaf)166dfea747dSSudeep Holla static void cache_associativity(struct cacheinfo *this_leaf)
167dfea747dSSudeep Holla {
168dfea747dSSudeep Holla 	unsigned int line_size = this_leaf->coherency_line_size;
169dfea747dSSudeep Holla 	unsigned int nr_sets = this_leaf->number_of_sets;
170dfea747dSSudeep Holla 	unsigned int size = this_leaf->size;
171dfea747dSSudeep Holla 
172dfea747dSSudeep Holla 	/*
173dfea747dSSudeep Holla 	 * If the cache is fully associative, there is no need to
174dfea747dSSudeep Holla 	 * check the other properties.
175dfea747dSSudeep Holla 	 */
176dfea747dSSudeep Holla 	if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
177dfea747dSSudeep Holla 		this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
178dfea747dSSudeep Holla }
179dfea747dSSudeep Holla 
cache_node_is_unified(struct cacheinfo * this_leaf,struct device_node * np)1802ff075c7SJeremy Linton static bool cache_node_is_unified(struct cacheinfo *this_leaf,
1812ff075c7SJeremy Linton 				  struct device_node *np)
182f57ab9a0SSudeep Holla {
1832ff075c7SJeremy Linton 	return of_property_read_bool(np, "cache-unified");
184f57ab9a0SSudeep Holla }
185f57ab9a0SSudeep Holla 
cache_of_set_props(struct cacheinfo * this_leaf,struct device_node * np)1862ff075c7SJeremy Linton static void cache_of_set_props(struct cacheinfo *this_leaf,
1872ff075c7SJeremy Linton 			       struct device_node *np)
188dfea747dSSudeep Holla {
189f57ab9a0SSudeep Holla 	/*
190f57ab9a0SSudeep Holla 	 * init_cache_level must setup the cache level correctly
191f57ab9a0SSudeep Holla 	 * overriding the architecturally specified levels, so
192f57ab9a0SSudeep Holla 	 * if type is NONE at this stage, it should be unified
193f57ab9a0SSudeep Holla 	 */
194f57ab9a0SSudeep Holla 	if (this_leaf->type == CACHE_TYPE_NOCACHE &&
1952ff075c7SJeremy Linton 	    cache_node_is_unified(this_leaf, np))
196f57ab9a0SSudeep Holla 		this_leaf->type = CACHE_TYPE_UNIFIED;
1972ff075c7SJeremy Linton 	cache_size(this_leaf, np);
1982ff075c7SJeremy Linton 	cache_get_line_size(this_leaf, np);
1992ff075c7SJeremy Linton 	cache_nr_sets(this_leaf, np);
200dfea747dSSudeep Holla 	cache_associativity(this_leaf);
201dfea747dSSudeep Holla }
202d529a18aSJeremy Linton 
cache_setup_of_node(unsigned int cpu)203d529a18aSJeremy Linton static int cache_setup_of_node(unsigned int cpu)
204d529a18aSJeremy Linton {
2053da72e18SPierre Gondois 	struct device_node *np, *prev;
206d529a18aSJeremy Linton 	struct cacheinfo *this_leaf;
207d529a18aSJeremy Linton 	unsigned int index = 0;
208d529a18aSJeremy Linton 
209d4ec840bSSudeep Holla 	np = of_cpu_device_node_get(cpu);
210d529a18aSJeremy Linton 	if (!np) {
211d529a18aSJeremy Linton 		pr_err("Failed to find cpu%d device node\n", cpu);
212d529a18aSJeremy Linton 		return -ENOENT;
213d529a18aSJeremy Linton 	}
214d529a18aSJeremy Linton 
215cde0fbffSPierre Gondois 	if (!of_check_cache_nodes(np)) {
216cde0fbffSPierre Gondois 		of_node_put(np);
217cde0fbffSPierre Gondois 		return -ENOENT;
218cde0fbffSPierre Gondois 	}
219cde0fbffSPierre Gondois 
2203da72e18SPierre Gondois 	prev = np;
2213da72e18SPierre Gondois 
222d529a18aSJeremy Linton 	while (index < cache_leaves(cpu)) {
223b14e8d21SSudeep Holla 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
2243da72e18SPierre Gondois 		if (this_leaf->level != 1) {
225d529a18aSJeremy Linton 			np = of_find_next_cache_node(np);
2263da72e18SPierre Gondois 			of_node_put(prev);
2273da72e18SPierre Gondois 			prev = np;
228d529a18aSJeremy Linton 			if (!np)
229d529a18aSJeremy Linton 				break;
2303da72e18SPierre Gondois 		}
2312ff075c7SJeremy Linton 		cache_of_set_props(this_leaf, np);
2329b97387cSJeremy Linton 		this_leaf->fw_token = np;
233d529a18aSJeremy Linton 		index++;
234d529a18aSJeremy Linton 	}
235d529a18aSJeremy Linton 
2363da72e18SPierre Gondois 	of_node_put(np);
2373da72e18SPierre Gondois 
238d529a18aSJeremy Linton 	if (index != cache_leaves(cpu)) /* not all OF nodes populated */
239d529a18aSJeremy Linton 		return -ENOENT;
240d529a18aSJeremy Linton 
241d529a18aSJeremy Linton 	return 0;
242d529a18aSJeremy Linton }
243c3719bd9SPierre Gondois 
of_check_cache_nodes(struct device_node * np)244cde0fbffSPierre Gondois static bool of_check_cache_nodes(struct device_node *np)
245cde0fbffSPierre Gondois {
246cde0fbffSPierre Gondois 	struct device_node *next;
247cde0fbffSPierre Gondois 
248cde0fbffSPierre Gondois 	if (of_property_present(np, "cache-size")   ||
249cde0fbffSPierre Gondois 	    of_property_present(np, "i-cache-size") ||
250cde0fbffSPierre Gondois 	    of_property_present(np, "d-cache-size") ||
251cde0fbffSPierre Gondois 	    of_property_present(np, "cache-unified"))
252cde0fbffSPierre Gondois 		return true;
253cde0fbffSPierre Gondois 
254cde0fbffSPierre Gondois 	next = of_find_next_cache_node(np);
255cde0fbffSPierre Gondois 	if (next) {
256cde0fbffSPierre Gondois 		of_node_put(next);
257cde0fbffSPierre Gondois 		return true;
258cde0fbffSPierre Gondois 	}
259cde0fbffSPierre Gondois 
260cde0fbffSPierre Gondois 	return false;
261cde0fbffSPierre Gondois }
262cde0fbffSPierre Gondois 
of_count_cache_leaves(struct device_node * np)263de0df442SPierre Gondois static int of_count_cache_leaves(struct device_node *np)
264c3719bd9SPierre Gondois {
265de0df442SPierre Gondois 	unsigned int leaves = 0;
266c3719bd9SPierre Gondois 
267c3719bd9SPierre Gondois 	if (of_property_read_bool(np, "cache-size"))
268c3719bd9SPierre Gondois 		++leaves;
269c3719bd9SPierre Gondois 	if (of_property_read_bool(np, "i-cache-size"))
270c3719bd9SPierre Gondois 		++leaves;
271c3719bd9SPierre Gondois 	if (of_property_read_bool(np, "d-cache-size"))
272c3719bd9SPierre Gondois 		++leaves;
273de0df442SPierre Gondois 
274de0df442SPierre Gondois 	if (!leaves) {
275de0df442SPierre Gondois 		/* The '[i-|d-|]cache-size' property is required, but
276de0df442SPierre Gondois 		 * if absent, fallback on the 'cache-unified' property.
277de0df442SPierre Gondois 		 */
278de0df442SPierre Gondois 		if (of_property_read_bool(np, "cache-unified"))
279de0df442SPierre Gondois 			return 1;
280de0df442SPierre Gondois 		else
281de0df442SPierre Gondois 			return 2;
282de0df442SPierre Gondois 	}
283de0df442SPierre Gondois 
284de0df442SPierre Gondois 	return leaves;
285de0df442SPierre Gondois }
286de0df442SPierre Gondois 
init_of_cache_level(unsigned int cpu)287de0df442SPierre Gondois int init_of_cache_level(unsigned int cpu)
288de0df442SPierre Gondois {
289de0df442SPierre Gondois 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
290de0df442SPierre Gondois 	struct device_node *np = of_cpu_device_node_get(cpu);
291de0df442SPierre Gondois 	struct device_node *prev = NULL;
292de0df442SPierre Gondois 	unsigned int levels = 0, leaves, level;
293de0df442SPierre Gondois 
294cde0fbffSPierre Gondois 	if (!of_check_cache_nodes(np)) {
295cde0fbffSPierre Gondois 		of_node_put(np);
296cde0fbffSPierre Gondois 		return -ENOENT;
297cde0fbffSPierre Gondois 	}
298cde0fbffSPierre Gondois 
299de0df442SPierre Gondois 	leaves = of_count_cache_leaves(np);
300c3719bd9SPierre Gondois 	if (leaves > 0)
301c3719bd9SPierre Gondois 		levels = 1;
302c3719bd9SPierre Gondois 
303c3719bd9SPierre Gondois 	prev = np;
304c3719bd9SPierre Gondois 	while ((np = of_find_next_cache_node(np))) {
305c3719bd9SPierre Gondois 		of_node_put(prev);
306c3719bd9SPierre Gondois 		prev = np;
307c3719bd9SPierre Gondois 		if (!of_device_is_compatible(np, "cache"))
3088844c3dfSPierre Gondois 			goto err_out;
309c3719bd9SPierre Gondois 		if (of_property_read_u32(np, "cache-level", &level))
3108844c3dfSPierre Gondois 			goto err_out;
311c3719bd9SPierre Gondois 		if (level <= levels)
3128844c3dfSPierre Gondois 			goto err_out;
313de0df442SPierre Gondois 
314de0df442SPierre Gondois 		leaves += of_count_cache_leaves(np);
315c3719bd9SPierre Gondois 		levels = level;
316c3719bd9SPierre Gondois 	}
317c3719bd9SPierre Gondois 
318c3719bd9SPierre Gondois 	of_node_put(np);
319c3719bd9SPierre Gondois 	this_cpu_ci->num_levels = levels;
320c3719bd9SPierre Gondois 	this_cpu_ci->num_leaves = leaves;
321c3719bd9SPierre Gondois 
322c3719bd9SPierre Gondois 	return 0;
3238844c3dfSPierre Gondois 
3248844c3dfSPierre Gondois err_out:
3258844c3dfSPierre Gondois 	of_node_put(np);
3268844c3dfSPierre Gondois 	return -EINVAL;
327c3719bd9SPierre Gondois }
328c3719bd9SPierre Gondois 
329246246cbSSudeep Holla #else
cache_setup_of_node(unsigned int cpu)330246246cbSSudeep Holla static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
init_of_cache_level(unsigned int cpu)331c3719bd9SPierre Gondois int init_of_cache_level(unsigned int cpu) { return 0; }
332246246cbSSudeep Holla #endif
333246246cbSSudeep Holla 
cache_setup_acpi(unsigned int cpu)334582b468bSJeremy Linton int __weak cache_setup_acpi(unsigned int cpu)
335582b468bSJeremy Linton {
336582b468bSJeremy Linton 	return -ENOTSUPP;
337582b468bSJeremy Linton }
338582b468bSJeremy Linton 
3399a83c84cSShaokun Zhang unsigned int coherency_max_size;
3409a83c84cSShaokun Zhang 
cache_setup_properties(unsigned int cpu)34136bbc5b4SSudeep Holla static int cache_setup_properties(unsigned int cpu)
34236bbc5b4SSudeep Holla {
34336bbc5b4SSudeep Holla 	int ret = 0;
34436bbc5b4SSudeep Holla 
34536bbc5b4SSudeep Holla 	if (of_have_populated_dt())
34636bbc5b4SSudeep Holla 		ret = cache_setup_of_node(cpu);
34736bbc5b4SSudeep Holla 	else if (!acpi_disabled)
34836bbc5b4SSudeep Holla 		ret = cache_setup_acpi(cpu);
34936bbc5b4SSudeep Holla 
350ef9f643aSPierre Gondois 	// Assume there is no cache information available in DT/ACPI from now.
351ef9f643aSPierre Gondois 	if (ret && use_arch_cache_info())
352ef9f643aSPierre Gondois 		use_arch_info = true;
353ef9f643aSPierre Gondois 
35436bbc5b4SSudeep Holla 	return ret;
35536bbc5b4SSudeep Holla }
35636bbc5b4SSudeep Holla 
cache_shared_cpu_map_setup(unsigned int cpu)357246246cbSSudeep Holla static int cache_shared_cpu_map_setup(unsigned int cpu)
358246246cbSSudeep Holla {
359246246cbSSudeep Holla 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
360246246cbSSudeep Holla 	struct cacheinfo *this_leaf, *sib_leaf;
361198102c9SYong-Xuan Wang 	unsigned int index, sib_index;
36255877ef4SSudeep Holla 	int ret = 0;
363246246cbSSudeep Holla 
364fac51482SSudeep Holla 	if (this_cpu_ci->cpu_map_populated)
365fac51482SSudeep Holla 		return 0;
366fac51482SSudeep Holla 
36736bbc5b4SSudeep Holla 	/*
36836bbc5b4SSudeep Holla 	 * skip setting up cache properties if LLC is valid, just need
36936bbc5b4SSudeep Holla 	 * to update the shared cpu_map if the cache attributes were
37036bbc5b4SSudeep Holla 	 * populated early before all the cpus are brought online
37136bbc5b4SSudeep Holla 	 */
372ef9f643aSPierre Gondois 	if (!last_level_cache_is_valid(cpu) && !use_arch_info) {
37336bbc5b4SSudeep Holla 		ret = cache_setup_properties(cpu);
374246246cbSSudeep Holla 		if (ret)
375246246cbSSudeep Holla 			return ret;
37636bbc5b4SSudeep Holla 	}
377246246cbSSudeep Holla 
378246246cbSSudeep Holla 	for (index = 0; index < cache_leaves(cpu); index++) {
379246246cbSSudeep Holla 		unsigned int i;
380246246cbSSudeep Holla 
381b14e8d21SSudeep Holla 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
382246246cbSSudeep Holla 
383246246cbSSudeep Holla 		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
384246246cbSSudeep Holla 		for_each_online_cpu(i) {
385246246cbSSudeep Holla 			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
386246246cbSSudeep Holla 
387246246cbSSudeep Holla 			if (i == cpu || !sib_cpu_ci->info_list)
388246246cbSSudeep Holla 				continue;/* skip if itself or no cacheinfo */
389198102c9SYong-Xuan Wang 			for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
390198102c9SYong-Xuan Wang 				sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
391126310c9SK Prateek Nayak 
392126310c9SK Prateek Nayak 				/*
393126310c9SK Prateek Nayak 				 * Comparing cache IDs only makes sense if the leaves
394126310c9SK Prateek Nayak 				 * belong to the same cache level of same type. Skip
395126310c9SK Prateek Nayak 				 * the check if level and type do not match.
396126310c9SK Prateek Nayak 				 */
397126310c9SK Prateek Nayak 				if (sib_leaf->level != this_leaf->level ||
398126310c9SK Prateek Nayak 				    sib_leaf->type != this_leaf->type)
399126310c9SK Prateek Nayak 					continue;
400126310c9SK Prateek Nayak 
401246246cbSSudeep Holla 				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
402246246cbSSudeep Holla 					cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
403246246cbSSudeep Holla 					cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
404198102c9SYong-Xuan Wang 					break;
405198102c9SYong-Xuan Wang 				}
406246246cbSSudeep Holla 			}
407246246cbSSudeep Holla 		}
4089a83c84cSShaokun Zhang 		/* record the maximum cache line size */
4099a83c84cSShaokun Zhang 		if (this_leaf->coherency_line_size > coherency_max_size)
4109a83c84cSShaokun Zhang 			coherency_max_size = this_leaf->coherency_line_size;
411246246cbSSudeep Holla 	}
412246246cbSSudeep Holla 
413*c26fabe7SK Prateek Nayak 	/* shared_cpu_map is now populated for the cpu */
414*c26fabe7SK Prateek Nayak 	this_cpu_ci->cpu_map_populated = true;
415246246cbSSudeep Holla 	return 0;
416246246cbSSudeep Holla }
417246246cbSSudeep Holla 
cache_shared_cpu_map_remove(unsigned int cpu)418246246cbSSudeep Holla static void cache_shared_cpu_map_remove(unsigned int cpu)
419246246cbSSudeep Holla {
420*c26fabe7SK Prateek Nayak 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
421246246cbSSudeep Holla 	struct cacheinfo *this_leaf, *sib_leaf;
422198102c9SYong-Xuan Wang 	unsigned int sibling, index, sib_index;
423246246cbSSudeep Holla 
424246246cbSSudeep Holla 	for (index = 0; index < cache_leaves(cpu); index++) {
425b14e8d21SSudeep Holla 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
426246246cbSSudeep Holla 		for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
42752110313SSudeep Holla 			struct cpu_cacheinfo *sib_cpu_ci =
42852110313SSudeep Holla 						get_cpu_cacheinfo(sibling);
429246246cbSSudeep Holla 
43052110313SSudeep Holla 			if (sibling == cpu || !sib_cpu_ci->info_list)
43152110313SSudeep Holla 				continue;/* skip if itself or no cacheinfo */
4322110d70cSBorislav Petkov 
433198102c9SYong-Xuan Wang 			for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
434198102c9SYong-Xuan Wang 				sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
435126310c9SK Prateek Nayak 
436126310c9SK Prateek Nayak 				/*
437126310c9SK Prateek Nayak 				 * Comparing cache IDs only makes sense if the leaves
438126310c9SK Prateek Nayak 				 * belong to the same cache level of same type. Skip
439126310c9SK Prateek Nayak 				 * the check if level and type do not match.
440126310c9SK Prateek Nayak 				 */
441126310c9SK Prateek Nayak 				if (sib_leaf->level != this_leaf->level ||
442126310c9SK Prateek Nayak 				    sib_leaf->type != this_leaf->type)
443126310c9SK Prateek Nayak 					continue;
444126310c9SK Prateek Nayak 
445198102c9SYong-Xuan Wang 				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
446246246cbSSudeep Holla 					cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
447246246cbSSudeep Holla 					cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
448198102c9SYong-Xuan Wang 					break;
449198102c9SYong-Xuan Wang 				}
450198102c9SYong-Xuan Wang 			}
451246246cbSSudeep Holla 		}
452246246cbSSudeep Holla 	}
453*c26fabe7SK Prateek Nayak 
454*c26fabe7SK Prateek Nayak 	/* cpu is no longer populated in the shared map */
455*c26fabe7SK Prateek Nayak 	this_cpu_ci->cpu_map_populated = false;
456246246cbSSudeep Holla }
457246246cbSSudeep Holla 
free_cache_attributes(unsigned int cpu)458246246cbSSudeep Holla static void free_cache_attributes(unsigned int cpu)
459246246cbSSudeep Holla {
4602110d70cSBorislav Petkov 	if (!per_cpu_cacheinfo(cpu))
4612110d70cSBorislav Petkov 		return;
4622110d70cSBorislav Petkov 
463246246cbSSudeep Holla 	cache_shared_cpu_map_remove(cpu);
464246246cbSSudeep Holla }
465246246cbSSudeep Holla 
early_cache_level(unsigned int cpu)4666539cffaSRadu Rendec int __weak early_cache_level(unsigned int cpu)
4676539cffaSRadu Rendec {
4686539cffaSRadu Rendec 	return -ENOENT;
4696539cffaSRadu Rendec }
4706539cffaSRadu Rendec 
init_cache_level(unsigned int cpu)471246246cbSSudeep Holla int __weak init_cache_level(unsigned int cpu)
472246246cbSSudeep Holla {
473246246cbSSudeep Holla 	return -ENOENT;
474246246cbSSudeep Holla }
475246246cbSSudeep Holla 
populate_cache_leaves(unsigned int cpu)476246246cbSSudeep Holla int __weak populate_cache_leaves(unsigned int cpu)
477246246cbSSudeep Holla {
478246246cbSSudeep Holla 	return -ENOENT;
479246246cbSSudeep Holla }
480246246cbSSudeep Holla 
4815944ce09SPierre Gondois static inline
allocate_cache_info(int cpu)4825944ce09SPierre Gondois int allocate_cache_info(int cpu)
483246246cbSSudeep Holla {
484246246cbSSudeep Holla 	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
48511969d69SSudeep Holla 					 sizeof(struct cacheinfo), GFP_ATOMIC);
4865944ce09SPierre Gondois 	if (!per_cpu_cacheinfo(cpu)) {
48736bbc5b4SSudeep Holla 		cache_leaves(cpu) = 0;
488246246cbSSudeep Holla 		return -ENOMEM;
48936bbc5b4SSudeep Holla 	}
490246246cbSSudeep Holla 
4915944ce09SPierre Gondois 	return 0;
4925944ce09SPierre Gondois }
4935944ce09SPierre Gondois 
fetch_cache_info(unsigned int cpu)4945944ce09SPierre Gondois int fetch_cache_info(unsigned int cpu)
4955944ce09SPierre Gondois {
4966539cffaSRadu Rendec 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
497ecaef469SPierre Gondois 	unsigned int levels = 0, split_levels = 0;
4985944ce09SPierre Gondois 	int ret;
4995944ce09SPierre Gondois 
5005944ce09SPierre Gondois 	if (acpi_disabled) {
5015944ce09SPierre Gondois 		ret = init_of_cache_level(cpu);
5025944ce09SPierre Gondois 	} else {
5035944ce09SPierre Gondois 		ret = acpi_get_cache_info(cpu, &levels, &split_levels);
5046539cffaSRadu Rendec 		if (!ret) {
5055944ce09SPierre Gondois 			this_cpu_ci->num_levels = levels;
5065944ce09SPierre Gondois 			/*
5075944ce09SPierre Gondois 			 * This assumes that:
5085944ce09SPierre Gondois 			 * - there cannot be any split caches (data/instruction)
5095944ce09SPierre Gondois 			 *   above a unified cache
5105944ce09SPierre Gondois 			 * - data/instruction caches come by pair
5115944ce09SPierre Gondois 			 */
5125944ce09SPierre Gondois 			this_cpu_ci->num_leaves = levels + split_levels;
5135944ce09SPierre Gondois 		}
5146539cffaSRadu Rendec 	}
5156539cffaSRadu Rendec 
5166539cffaSRadu Rendec 	if (ret || !cache_leaves(cpu)) {
5176539cffaSRadu Rendec 		ret = early_cache_level(cpu);
5186539cffaSRadu Rendec 		if (ret)
5196539cffaSRadu Rendec 			return ret;
5206539cffaSRadu Rendec 
5215944ce09SPierre Gondois 		if (!cache_leaves(cpu))
5225944ce09SPierre Gondois 			return -ENOENT;
5235944ce09SPierre Gondois 
5246539cffaSRadu Rendec 		this_cpu_ci->early_ci_levels = true;
5256539cffaSRadu Rendec 	}
5266539cffaSRadu Rendec 
5276539cffaSRadu Rendec 	return allocate_cache_info(cpu);
5286539cffaSRadu Rendec }
5296539cffaSRadu Rendec 
init_level_allocate_ci(unsigned int cpu)5306539cffaSRadu Rendec static inline int init_level_allocate_ci(unsigned int cpu)
5316539cffaSRadu Rendec {
5326539cffaSRadu Rendec 	unsigned int early_leaves = cache_leaves(cpu);
5336539cffaSRadu Rendec 
5346539cffaSRadu Rendec 	/* Since early initialization/allocation of the cacheinfo is allowed
5356539cffaSRadu Rendec 	 * via fetch_cache_info() and this also gets called as CPU hotplug
5366539cffaSRadu Rendec 	 * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped
5376539cffaSRadu Rendec 	 * as it will happen only once (the cacheinfo memory is never freed).
5386539cffaSRadu Rendec 	 * Just populate the cacheinfo. However, if the cacheinfo has been
5396539cffaSRadu Rendec 	 * allocated early through the arch-specific early_cache_level() call,
5406539cffaSRadu Rendec 	 * there is a chance the info is wrong (this can happen on arm64). In
5416539cffaSRadu Rendec 	 * that case, call init_cache_level() anyway to give the arch-specific
5426539cffaSRadu Rendec 	 * code a chance to make things right.
5436539cffaSRadu Rendec 	 */
5446539cffaSRadu Rendec 	if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels)
5456539cffaSRadu Rendec 		return 0;
5466539cffaSRadu Rendec 
5476539cffaSRadu Rendec 	if (init_cache_level(cpu) || !cache_leaves(cpu))
5486539cffaSRadu Rendec 		return -ENOENT;
5496539cffaSRadu Rendec 
5506539cffaSRadu Rendec 	/*
5516539cffaSRadu Rendec 	 * Now that we have properly initialized the cache level info, make
5526539cffaSRadu Rendec 	 * sure we don't try to do that again the next time we are called
5536539cffaSRadu Rendec 	 * (e.g. as CPU hotplug callbacks).
5546539cffaSRadu Rendec 	 */
5556539cffaSRadu Rendec 	ci_cacheinfo(cpu)->early_ci_levels = false;
5566539cffaSRadu Rendec 
5576539cffaSRadu Rendec 	if (cache_leaves(cpu) <= early_leaves)
5586539cffaSRadu Rendec 		return 0;
5596539cffaSRadu Rendec 
5606539cffaSRadu Rendec 	kfree(per_cpu_cacheinfo(cpu));
5615944ce09SPierre Gondois 	return allocate_cache_info(cpu);
5625944ce09SPierre Gondois }
5635944ce09SPierre Gondois 
detect_cache_attributes(unsigned int cpu)5645944ce09SPierre Gondois int detect_cache_attributes(unsigned int cpu)
5655944ce09SPierre Gondois {
5665944ce09SPierre Gondois 	int ret;
5675944ce09SPierre Gondois 
5686539cffaSRadu Rendec 	ret = init_level_allocate_ci(cpu);
5695944ce09SPierre Gondois 	if (ret)
5705944ce09SPierre Gondois 		return ret;
5715944ce09SPierre Gondois 
5722ff075c7SJeremy Linton 	/*
5735c271238SYicong Yang 	 * If LLC is valid the cache leaves were already populated so just go to
5745c271238SYicong Yang 	 * update the cpu map.
5755c271238SYicong Yang 	 */
5765c271238SYicong Yang 	if (!last_level_cache_is_valid(cpu)) {
5775c271238SYicong Yang 		/*
5782ff075c7SJeremy Linton 		 * populate_cache_leaves() may completely setup the cache leaves and
5792ff075c7SJeremy Linton 		 * shared_cpu_map or it may leave it partially setup.
5802ff075c7SJeremy Linton 		 */
581246246cbSSudeep Holla 		ret = populate_cache_leaves(cpu);
582246246cbSSudeep Holla 		if (ret)
583246246cbSSudeep Holla 			goto free_ci;
5845c271238SYicong Yang 	}
58536bbc5b4SSudeep Holla 
586246246cbSSudeep Holla 	/*
5879b97387cSJeremy Linton 	 * For systems using DT for cache hierarchy, fw_token
5889b97387cSJeremy Linton 	 * and shared_cpu_map will be set up here only if they are
5899b97387cSJeremy Linton 	 * not populated already
590246246cbSSudeep Holla 	 */
591246246cbSSudeep Holla 	ret = cache_shared_cpu_map_setup(cpu);
5928a7d95f9SSudeep Holla 	if (ret) {
59355877ef4SSudeep Holla 		pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
594246246cbSSudeep Holla 		goto free_ci;
5958a7d95f9SSudeep Holla 	}
596dfea747dSSudeep Holla 
597246246cbSSudeep Holla 	return 0;
598246246cbSSudeep Holla 
599246246cbSSudeep Holla free_ci:
600246246cbSSudeep Holla 	free_cache_attributes(cpu);
601246246cbSSudeep Holla 	return ret;
602246246cbSSudeep Holla }
603246246cbSSudeep Holla 
604246246cbSSudeep Holla /* pointer to cpuX/cache device */
605246246cbSSudeep Holla static DEFINE_PER_CPU(struct device *, ci_cache_dev);
606246246cbSSudeep Holla #define per_cpu_cache_dev(cpu)	(per_cpu(ci_cache_dev, cpu))
607246246cbSSudeep Holla 
608246246cbSSudeep Holla static cpumask_t cache_dev_map;
609246246cbSSudeep Holla 
610246246cbSSudeep Holla /* pointer to array of devices for cpuX/cache/indexY */
611246246cbSSudeep Holla static DEFINE_PER_CPU(struct device **, ci_index_dev);
612246246cbSSudeep Holla #define per_cpu_index_dev(cpu)	(per_cpu(ci_index_dev, cpu))
613246246cbSSudeep Holla #define per_cache_index_dev(cpu, idx)	((per_cpu_index_dev(cpu))[idx])
614246246cbSSudeep Holla 
615246246cbSSudeep Holla #define show_one(file_name, object)				\
616246246cbSSudeep Holla static ssize_t file_name##_show(struct device *dev,		\
617246246cbSSudeep Holla 		struct device_attribute *attr, char *buf)	\
618246246cbSSudeep Holla {								\
619246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);	\
620948b3edbSJoe Perches 	return sysfs_emit(buf, "%u\n", this_leaf->object);	\
621246246cbSSudeep Holla }
622246246cbSSudeep Holla 
623e9a2ea5aSFenghua Yu show_one(id, id);
624246246cbSSudeep Holla show_one(level, level);
625246246cbSSudeep Holla show_one(coherency_line_size, coherency_line_size);
626246246cbSSudeep Holla show_one(number_of_sets, number_of_sets);
627246246cbSSudeep Holla show_one(physical_line_partition, physical_line_partition);
628246246cbSSudeep Holla show_one(ways_of_associativity, ways_of_associativity);
629246246cbSSudeep Holla 
size_show(struct device * dev,struct device_attribute * attr,char * buf)630246246cbSSudeep Holla static ssize_t size_show(struct device *dev,
631246246cbSSudeep Holla 			 struct device_attribute *attr, char *buf)
632246246cbSSudeep Holla {
633246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
634246246cbSSudeep Holla 
635aa838896SJoe Perches 	return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
636246246cbSSudeep Holla }
637246246cbSSudeep Holla 
shared_cpu_map_show(struct device * dev,struct device_attribute * attr,char * buf)638e015e036SJoe Perches static ssize_t shared_cpu_map_show(struct device *dev,
639e015e036SJoe Perches 				   struct device_attribute *attr, char *buf)
640246246cbSSudeep Holla {
641246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
642246246cbSSudeep Holla 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
643246246cbSSudeep Holla 
644e015e036SJoe Perches 	return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
645246246cbSSudeep Holla }
646246246cbSSudeep Holla 
shared_cpu_list_show(struct device * dev,struct device_attribute * attr,char * buf)647246246cbSSudeep Holla static ssize_t shared_cpu_list_show(struct device *dev,
648246246cbSSudeep Holla 				    struct device_attribute *attr, char *buf)
649246246cbSSudeep Holla {
650e015e036SJoe Perches 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
651e015e036SJoe Perches 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
652e015e036SJoe Perches 
653e015e036SJoe Perches 	return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
654246246cbSSudeep Holla }
655246246cbSSudeep Holla 
type_show(struct device * dev,struct device_attribute * attr,char * buf)656246246cbSSudeep Holla static ssize_t type_show(struct device *dev,
657246246cbSSudeep Holla 			 struct device_attribute *attr, char *buf)
658246246cbSSudeep Holla {
659246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
660973c3911SJoe Perches 	const char *output;
661246246cbSSudeep Holla 
662246246cbSSudeep Holla 	switch (this_leaf->type) {
663246246cbSSudeep Holla 	case CACHE_TYPE_DATA:
664973c3911SJoe Perches 		output = "Data";
665973c3911SJoe Perches 		break;
666246246cbSSudeep Holla 	case CACHE_TYPE_INST:
667973c3911SJoe Perches 		output = "Instruction";
668973c3911SJoe Perches 		break;
669246246cbSSudeep Holla 	case CACHE_TYPE_UNIFIED:
670973c3911SJoe Perches 		output = "Unified";
671973c3911SJoe Perches 		break;
672246246cbSSudeep Holla 	default:
673246246cbSSudeep Holla 		return -EINVAL;
674246246cbSSudeep Holla 	}
675973c3911SJoe Perches 
676973c3911SJoe Perches 	return sysfs_emit(buf, "%s\n", output);
677246246cbSSudeep Holla }
678246246cbSSudeep Holla 
allocation_policy_show(struct device * dev,struct device_attribute * attr,char * buf)679246246cbSSudeep Holla static ssize_t allocation_policy_show(struct device *dev,
680246246cbSSudeep Holla 				      struct device_attribute *attr, char *buf)
681246246cbSSudeep Holla {
682246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
683246246cbSSudeep Holla 	unsigned int ci_attr = this_leaf->attributes;
684973c3911SJoe Perches 	const char *output;
685246246cbSSudeep Holla 
686246246cbSSudeep Holla 	if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
687973c3911SJoe Perches 		output = "ReadWriteAllocate";
688246246cbSSudeep Holla 	else if (ci_attr & CACHE_READ_ALLOCATE)
689973c3911SJoe Perches 		output = "ReadAllocate";
690246246cbSSudeep Holla 	else if (ci_attr & CACHE_WRITE_ALLOCATE)
691973c3911SJoe Perches 		output = "WriteAllocate";
692973c3911SJoe Perches 	else
693973c3911SJoe Perches 		return 0;
694973c3911SJoe Perches 
695973c3911SJoe Perches 	return sysfs_emit(buf, "%s\n", output);
696246246cbSSudeep Holla }
697246246cbSSudeep Holla 
write_policy_show(struct device * dev,struct device_attribute * attr,char * buf)698246246cbSSudeep Holla static ssize_t write_policy_show(struct device *dev,
699246246cbSSudeep Holla 				 struct device_attribute *attr, char *buf)
700246246cbSSudeep Holla {
701246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
702246246cbSSudeep Holla 	unsigned int ci_attr = this_leaf->attributes;
703246246cbSSudeep Holla 	int n = 0;
704246246cbSSudeep Holla 
705246246cbSSudeep Holla 	if (ci_attr & CACHE_WRITE_THROUGH)
706aa838896SJoe Perches 		n = sysfs_emit(buf, "WriteThrough\n");
707246246cbSSudeep Holla 	else if (ci_attr & CACHE_WRITE_BACK)
708aa838896SJoe Perches 		n = sysfs_emit(buf, "WriteBack\n");
709246246cbSSudeep Holla 	return n;
710246246cbSSudeep Holla }
711246246cbSSudeep Holla 
712e9a2ea5aSFenghua Yu static DEVICE_ATTR_RO(id);
713246246cbSSudeep Holla static DEVICE_ATTR_RO(level);
714246246cbSSudeep Holla static DEVICE_ATTR_RO(type);
715246246cbSSudeep Holla static DEVICE_ATTR_RO(coherency_line_size);
716246246cbSSudeep Holla static DEVICE_ATTR_RO(ways_of_associativity);
717246246cbSSudeep Holla static DEVICE_ATTR_RO(number_of_sets);
718246246cbSSudeep Holla static DEVICE_ATTR_RO(size);
719246246cbSSudeep Holla static DEVICE_ATTR_RO(allocation_policy);
720246246cbSSudeep Holla static DEVICE_ATTR_RO(write_policy);
721246246cbSSudeep Holla static DEVICE_ATTR_RO(shared_cpu_map);
722246246cbSSudeep Holla static DEVICE_ATTR_RO(shared_cpu_list);
723246246cbSSudeep Holla static DEVICE_ATTR_RO(physical_line_partition);
724246246cbSSudeep Holla 
725246246cbSSudeep Holla static struct attribute *cache_default_attrs[] = {
726e9a2ea5aSFenghua Yu 	&dev_attr_id.attr,
727246246cbSSudeep Holla 	&dev_attr_type.attr,
728246246cbSSudeep Holla 	&dev_attr_level.attr,
729246246cbSSudeep Holla 	&dev_attr_shared_cpu_map.attr,
730246246cbSSudeep Holla 	&dev_attr_shared_cpu_list.attr,
731246246cbSSudeep Holla 	&dev_attr_coherency_line_size.attr,
732246246cbSSudeep Holla 	&dev_attr_ways_of_associativity.attr,
733246246cbSSudeep Holla 	&dev_attr_number_of_sets.attr,
734246246cbSSudeep Holla 	&dev_attr_size.attr,
735246246cbSSudeep Holla 	&dev_attr_allocation_policy.attr,
736246246cbSSudeep Holla 	&dev_attr_write_policy.attr,
737246246cbSSudeep Holla 	&dev_attr_physical_line_partition.attr,
738246246cbSSudeep Holla 	NULL
739246246cbSSudeep Holla };
740246246cbSSudeep Holla 
741246246cbSSudeep Holla static umode_t
cache_default_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int unused)742246246cbSSudeep Holla cache_default_attrs_is_visible(struct kobject *kobj,
743246246cbSSudeep Holla 			       struct attribute *attr, int unused)
744246246cbSSudeep Holla {
745246246cbSSudeep Holla 	struct device *dev = kobj_to_dev(kobj);
746246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
747246246cbSSudeep Holla 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
748246246cbSSudeep Holla 	umode_t mode = attr->mode;
749246246cbSSudeep Holla 
750e9a2ea5aSFenghua Yu 	if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
751e9a2ea5aSFenghua Yu 		return mode;
752246246cbSSudeep Holla 	if ((attr == &dev_attr_type.attr) && this_leaf->type)
753246246cbSSudeep Holla 		return mode;
754246246cbSSudeep Holla 	if ((attr == &dev_attr_level.attr) && this_leaf->level)
755246246cbSSudeep Holla 		return mode;
756246246cbSSudeep Holla 	if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
757246246cbSSudeep Holla 		return mode;
758246246cbSSudeep Holla 	if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
759246246cbSSudeep Holla 		return mode;
760246246cbSSudeep Holla 	if ((attr == &dev_attr_coherency_line_size.attr) &&
761246246cbSSudeep Holla 	    this_leaf->coherency_line_size)
762246246cbSSudeep Holla 		return mode;
763246246cbSSudeep Holla 	if ((attr == &dev_attr_ways_of_associativity.attr) &&
764246246cbSSudeep Holla 	    this_leaf->size) /* allow 0 = full associativity */
765246246cbSSudeep Holla 		return mode;
766246246cbSSudeep Holla 	if ((attr == &dev_attr_number_of_sets.attr) &&
767246246cbSSudeep Holla 	    this_leaf->number_of_sets)
768246246cbSSudeep Holla 		return mode;
769246246cbSSudeep Holla 	if ((attr == &dev_attr_size.attr) && this_leaf->size)
770246246cbSSudeep Holla 		return mode;
771246246cbSSudeep Holla 	if ((attr == &dev_attr_write_policy.attr) &&
772246246cbSSudeep Holla 	    (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
773246246cbSSudeep Holla 		return mode;
774246246cbSSudeep Holla 	if ((attr == &dev_attr_allocation_policy.attr) &&
775246246cbSSudeep Holla 	    (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
776246246cbSSudeep Holla 		return mode;
777246246cbSSudeep Holla 	if ((attr == &dev_attr_physical_line_partition.attr) &&
778246246cbSSudeep Holla 	    this_leaf->physical_line_partition)
779246246cbSSudeep Holla 		return mode;
780246246cbSSudeep Holla 
781246246cbSSudeep Holla 	return 0;
782246246cbSSudeep Holla }
783246246cbSSudeep Holla 
784246246cbSSudeep Holla static const struct attribute_group cache_default_group = {
785246246cbSSudeep Holla 	.attrs = cache_default_attrs,
786246246cbSSudeep Holla 	.is_visible = cache_default_attrs_is_visible,
787246246cbSSudeep Holla };
788246246cbSSudeep Holla 
789246246cbSSudeep Holla static const struct attribute_group *cache_default_groups[] = {
790246246cbSSudeep Holla 	&cache_default_group,
791246246cbSSudeep Holla 	NULL,
792246246cbSSudeep Holla };
793246246cbSSudeep Holla 
794246246cbSSudeep Holla static const struct attribute_group *cache_private_groups[] = {
795246246cbSSudeep Holla 	&cache_default_group,
796246246cbSSudeep Holla 	NULL, /* Place holder for private group */
797246246cbSSudeep Holla 	NULL,
798246246cbSSudeep Holla };
799246246cbSSudeep Holla 
800246246cbSSudeep Holla const struct attribute_group *
cache_get_priv_group(struct cacheinfo * this_leaf)801246246cbSSudeep Holla __weak cache_get_priv_group(struct cacheinfo *this_leaf)
802246246cbSSudeep Holla {
803246246cbSSudeep Holla 	return NULL;
804246246cbSSudeep Holla }
805246246cbSSudeep Holla 
806246246cbSSudeep Holla static const struct attribute_group **
cache_get_attribute_groups(struct cacheinfo * this_leaf)807246246cbSSudeep Holla cache_get_attribute_groups(struct cacheinfo *this_leaf)
808246246cbSSudeep Holla {
809246246cbSSudeep Holla 	const struct attribute_group *priv_group =
810246246cbSSudeep Holla 			cache_get_priv_group(this_leaf);
811246246cbSSudeep Holla 
812246246cbSSudeep Holla 	if (!priv_group)
813246246cbSSudeep Holla 		return cache_default_groups;
814246246cbSSudeep Holla 
815246246cbSSudeep Holla 	if (!cache_private_groups[1])
816246246cbSSudeep Holla 		cache_private_groups[1] = priv_group;
817246246cbSSudeep Holla 
818246246cbSSudeep Holla 	return cache_private_groups;
819246246cbSSudeep Holla }
820246246cbSSudeep Holla 
821246246cbSSudeep Holla /* Add/Remove cache interface for CPU device */
cpu_cache_sysfs_exit(unsigned int cpu)822246246cbSSudeep Holla static void cpu_cache_sysfs_exit(unsigned int cpu)
823246246cbSSudeep Holla {
824246246cbSSudeep Holla 	int i;
825246246cbSSudeep Holla 	struct device *ci_dev;
826246246cbSSudeep Holla 
827246246cbSSudeep Holla 	if (per_cpu_index_dev(cpu)) {
828246246cbSSudeep Holla 		for (i = 0; i < cache_leaves(cpu); i++) {
829246246cbSSudeep Holla 			ci_dev = per_cache_index_dev(cpu, i);
830246246cbSSudeep Holla 			if (!ci_dev)
831246246cbSSudeep Holla 				continue;
832246246cbSSudeep Holla 			device_unregister(ci_dev);
833246246cbSSudeep Holla 		}
834246246cbSSudeep Holla 		kfree(per_cpu_index_dev(cpu));
835246246cbSSudeep Holla 		per_cpu_index_dev(cpu) = NULL;
836246246cbSSudeep Holla 	}
837246246cbSSudeep Holla 	device_unregister(per_cpu_cache_dev(cpu));
838246246cbSSudeep Holla 	per_cpu_cache_dev(cpu) = NULL;
839246246cbSSudeep Holla }
840246246cbSSudeep Holla 
cpu_cache_sysfs_init(unsigned int cpu)841246246cbSSudeep Holla static int cpu_cache_sysfs_init(unsigned int cpu)
842246246cbSSudeep Holla {
843246246cbSSudeep Holla 	struct device *dev = get_cpu_device(cpu);
844246246cbSSudeep Holla 
845246246cbSSudeep Holla 	if (per_cpu_cacheinfo(cpu) == NULL)
846246246cbSSudeep Holla 		return -ENOENT;
847246246cbSSudeep Holla 
848246246cbSSudeep Holla 	per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
849246246cbSSudeep Holla 	if (IS_ERR(per_cpu_cache_dev(cpu)))
850246246cbSSudeep Holla 		return PTR_ERR(per_cpu_cache_dev(cpu));
851246246cbSSudeep Holla 
852246246cbSSudeep Holla 	/* Allocate all required memory */
853246246cbSSudeep Holla 	per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
854246246cbSSudeep Holla 					 sizeof(struct device *), GFP_KERNEL);
855246246cbSSudeep Holla 	if (unlikely(per_cpu_index_dev(cpu) == NULL))
856246246cbSSudeep Holla 		goto err_out;
857246246cbSSudeep Holla 
858246246cbSSudeep Holla 	return 0;
859246246cbSSudeep Holla 
860246246cbSSudeep Holla err_out:
861246246cbSSudeep Holla 	cpu_cache_sysfs_exit(cpu);
862246246cbSSudeep Holla 	return -ENOMEM;
863246246cbSSudeep Holla }
864246246cbSSudeep Holla 
cache_add_dev(unsigned int cpu)865246246cbSSudeep Holla static int cache_add_dev(unsigned int cpu)
866246246cbSSudeep Holla {
867246246cbSSudeep Holla 	unsigned int i;
868246246cbSSudeep Holla 	int rc;
869246246cbSSudeep Holla 	struct device *ci_dev, *parent;
870246246cbSSudeep Holla 	struct cacheinfo *this_leaf;
871246246cbSSudeep Holla 	const struct attribute_group **cache_groups;
872246246cbSSudeep Holla 
873246246cbSSudeep Holla 	rc = cpu_cache_sysfs_init(cpu);
874246246cbSSudeep Holla 	if (unlikely(rc < 0))
875246246cbSSudeep Holla 		return rc;
876246246cbSSudeep Holla 
877246246cbSSudeep Holla 	parent = per_cpu_cache_dev(cpu);
878246246cbSSudeep Holla 	for (i = 0; i < cache_leaves(cpu); i++) {
879b14e8d21SSudeep Holla 		this_leaf = per_cpu_cacheinfo_idx(cpu, i);
880246246cbSSudeep Holla 		if (this_leaf->disable_sysfs)
881246246cbSSudeep Holla 			continue;
882ca388e43SJeffrey Hugo 		if (this_leaf->type == CACHE_TYPE_NOCACHE)
883ca388e43SJeffrey Hugo 			break;
884246246cbSSudeep Holla 		cache_groups = cache_get_attribute_groups(this_leaf);
885246246cbSSudeep Holla 		ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
886246246cbSSudeep Holla 					   "index%1u", i);
887246246cbSSudeep Holla 		if (IS_ERR(ci_dev)) {
888246246cbSSudeep Holla 			rc = PTR_ERR(ci_dev);
889246246cbSSudeep Holla 			goto err;
890246246cbSSudeep Holla 		}
891246246cbSSudeep Holla 		per_cache_index_dev(cpu, i) = ci_dev;
892246246cbSSudeep Holla 	}
893246246cbSSudeep Holla 	cpumask_set_cpu(cpu, &cache_dev_map);
894246246cbSSudeep Holla 
895246246cbSSudeep Holla 	return 0;
896246246cbSSudeep Holla err:
897246246cbSSudeep Holla 	cpu_cache_sysfs_exit(cpu);
898246246cbSSudeep Holla 	return rc;
899246246cbSSudeep Holla }
900246246cbSSudeep Holla 
cacheinfo_cpu_online(unsigned int cpu)9017cc277b4SSebastian Andrzej Siewior static int cacheinfo_cpu_online(unsigned int cpu)
902246246cbSSudeep Holla {
9037cc277b4SSebastian Andrzej Siewior 	int rc = detect_cache_attributes(cpu);
904246246cbSSudeep Holla 
9057cc277b4SSebastian Andrzej Siewior 	if (rc)
9067cc277b4SSebastian Andrzej Siewior 		return rc;
907246246cbSSudeep Holla 	rc = cache_add_dev(cpu);
9087cc277b4SSebastian Andrzej Siewior 	if (rc)
909246246cbSSudeep Holla 		free_cache_attributes(cpu);
9107cc277b4SSebastian Andrzej Siewior 	return rc;
911246246cbSSudeep Holla }
9127cc277b4SSebastian Andrzej Siewior 
cacheinfo_cpu_pre_down(unsigned int cpu)9137cc277b4SSebastian Andrzej Siewior static int cacheinfo_cpu_pre_down(unsigned int cpu)
9147cc277b4SSebastian Andrzej Siewior {
9157cc277b4SSebastian Andrzej Siewior 	if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
9167cc277b4SSebastian Andrzej Siewior 		cpu_cache_sysfs_exit(cpu);
9177cc277b4SSebastian Andrzej Siewior 
9187cc277b4SSebastian Andrzej Siewior 	free_cache_attributes(cpu);
9197cc277b4SSebastian Andrzej Siewior 	return 0;
920246246cbSSudeep Holla }
921246246cbSSudeep Holla 
cacheinfo_sysfs_init(void)922246246cbSSudeep Holla static int __init cacheinfo_sysfs_init(void)
923246246cbSSudeep Holla {
92483b44fe3SJames Morse 	return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
92583b44fe3SJames Morse 				 "base/cacheinfo:online",
9267cc277b4SSebastian Andrzej Siewior 				 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
927246246cbSSudeep Holla }
928246246cbSSudeep Holla device_initcall(cacheinfo_sysfs_init);
929