xref: /openbmc/linux/drivers/base/cacheinfo.c (revision d4ec840b)
1989d42e8SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2246246cbSSudeep Holla /*
3246246cbSSudeep Holla  * cacheinfo support - processor cache information via sysfs
4246246cbSSudeep Holla  *
5246246cbSSudeep Holla  * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6246246cbSSudeep Holla  * Author: Sudeep Holla <sudeep.holla@arm.com>
7246246cbSSudeep Holla  */
88e1073b1SSudeep Holla #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
98e1073b1SSudeep Holla 
1055877ef4SSudeep Holla #include <linux/acpi.h>
11246246cbSSudeep Holla #include <linux/bitops.h>
12246246cbSSudeep Holla #include <linux/cacheinfo.h>
13246246cbSSudeep Holla #include <linux/compiler.h>
14246246cbSSudeep Holla #include <linux/cpu.h>
15246246cbSSudeep Holla #include <linux/device.h>
16246246cbSSudeep Holla #include <linux/init.h>
17*d4ec840bSSudeep Holla #include <linux/of_device.h>
18246246cbSSudeep Holla #include <linux/sched.h>
19246246cbSSudeep Holla #include <linux/slab.h>
20246246cbSSudeep Holla #include <linux/smp.h>
21246246cbSSudeep Holla #include <linux/sysfs.h>
22246246cbSSudeep Holla 
23246246cbSSudeep Holla /* pointer to per cpu cacheinfo */
24246246cbSSudeep Holla static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
25246246cbSSudeep Holla #define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
26246246cbSSudeep Holla #define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
27246246cbSSudeep Holla #define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
28246246cbSSudeep Holla 
29246246cbSSudeep Holla struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
30246246cbSSudeep Holla {
31246246cbSSudeep Holla 	return ci_cacheinfo(cpu);
32246246cbSSudeep Holla }
33246246cbSSudeep Holla 
34246246cbSSudeep Holla #ifdef CONFIG_OF
35246246cbSSudeep Holla static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
36246246cbSSudeep Holla 					   struct cacheinfo *sib_leaf)
37246246cbSSudeep Holla {
389b97387cSJeremy Linton 	return sib_leaf->fw_token == this_leaf->fw_token;
39246246cbSSudeep Holla }
40dfea747dSSudeep Holla 
41dfea747dSSudeep Holla /* OF properties to query for a given cache type */
42dfea747dSSudeep Holla struct cache_type_info {
43dfea747dSSudeep Holla 	const char *size_prop;
44dfea747dSSudeep Holla 	const char *line_size_props[2];
45dfea747dSSudeep Holla 	const char *nr_sets_prop;
46dfea747dSSudeep Holla };
47dfea747dSSudeep Holla 
48dfea747dSSudeep Holla static const struct cache_type_info cache_type_info[] = {
49dfea747dSSudeep Holla 	{
50dfea747dSSudeep Holla 		.size_prop       = "cache-size",
51dfea747dSSudeep Holla 		.line_size_props = { "cache-line-size",
52dfea747dSSudeep Holla 				     "cache-block-size", },
53dfea747dSSudeep Holla 		.nr_sets_prop    = "cache-sets",
54dfea747dSSudeep Holla 	}, {
55dfea747dSSudeep Holla 		.size_prop       = "i-cache-size",
56dfea747dSSudeep Holla 		.line_size_props = { "i-cache-line-size",
57dfea747dSSudeep Holla 				     "i-cache-block-size", },
58dfea747dSSudeep Holla 		.nr_sets_prop    = "i-cache-sets",
59dfea747dSSudeep Holla 	}, {
60dfea747dSSudeep Holla 		.size_prop       = "d-cache-size",
61dfea747dSSudeep Holla 		.line_size_props = { "d-cache-line-size",
62dfea747dSSudeep Holla 				     "d-cache-block-size", },
63dfea747dSSudeep Holla 		.nr_sets_prop    = "d-cache-sets",
64dfea747dSSudeep Holla 	},
65dfea747dSSudeep Holla };
66dfea747dSSudeep Holla 
67dfea747dSSudeep Holla static inline int get_cacheinfo_idx(enum cache_type type)
68dfea747dSSudeep Holla {
69dfea747dSSudeep Holla 	if (type == CACHE_TYPE_UNIFIED)
70dfea747dSSudeep Holla 		return 0;
71dfea747dSSudeep Holla 	return type;
72dfea747dSSudeep Holla }
73dfea747dSSudeep Holla 
742ff075c7SJeremy Linton static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
75dfea747dSSudeep Holla {
76dfea747dSSudeep Holla 	const char *propname;
77dfea747dSSudeep Holla 	int ct_idx;
78dfea747dSSudeep Holla 
79dfea747dSSudeep Holla 	ct_idx = get_cacheinfo_idx(this_leaf->type);
80dfea747dSSudeep Holla 	propname = cache_type_info[ct_idx].size_prop;
81dfea747dSSudeep Holla 
823a34c986SHuacai Chen 	of_property_read_u32(np, propname, &this_leaf->size);
83dfea747dSSudeep Holla }
84dfea747dSSudeep Holla 
85dfea747dSSudeep Holla /* not cache_line_size() because that's a macro in include/linux/cache.h */
862ff075c7SJeremy Linton static void cache_get_line_size(struct cacheinfo *this_leaf,
872ff075c7SJeremy Linton 				struct device_node *np)
88dfea747dSSudeep Holla {
89dfea747dSSudeep Holla 	int i, lim, ct_idx;
90dfea747dSSudeep Holla 
91dfea747dSSudeep Holla 	ct_idx = get_cacheinfo_idx(this_leaf->type);
92dfea747dSSudeep Holla 	lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
93dfea747dSSudeep Holla 
94dfea747dSSudeep Holla 	for (i = 0; i < lim; i++) {
95448a5a55SSudeep Holla 		int ret;
96448a5a55SSudeep Holla 		u32 line_size;
97dfea747dSSudeep Holla 		const char *propname;
98dfea747dSSudeep Holla 
99dfea747dSSudeep Holla 		propname = cache_type_info[ct_idx].line_size_props[i];
100448a5a55SSudeep Holla 		ret = of_property_read_u32(np, propname, &line_size);
101448a5a55SSudeep Holla 		if (!ret) {
102448a5a55SSudeep Holla 			this_leaf->coherency_line_size = line_size;
103dfea747dSSudeep Holla 			break;
104dfea747dSSudeep Holla 		}
105448a5a55SSudeep Holla 	}
106dfea747dSSudeep Holla }
107dfea747dSSudeep Holla 
1082ff075c7SJeremy Linton static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
109dfea747dSSudeep Holla {
110dfea747dSSudeep Holla 	const char *propname;
111dfea747dSSudeep Holla 	int ct_idx;
112dfea747dSSudeep Holla 
113dfea747dSSudeep Holla 	ct_idx = get_cacheinfo_idx(this_leaf->type);
114dfea747dSSudeep Holla 	propname = cache_type_info[ct_idx].nr_sets_prop;
115dfea747dSSudeep Holla 
1163a34c986SHuacai Chen 	of_property_read_u32(np, propname, &this_leaf->number_of_sets);
117dfea747dSSudeep Holla }
118dfea747dSSudeep Holla 
119dfea747dSSudeep Holla static void cache_associativity(struct cacheinfo *this_leaf)
120dfea747dSSudeep Holla {
121dfea747dSSudeep Holla 	unsigned int line_size = this_leaf->coherency_line_size;
122dfea747dSSudeep Holla 	unsigned int nr_sets = this_leaf->number_of_sets;
123dfea747dSSudeep Holla 	unsigned int size = this_leaf->size;
124dfea747dSSudeep Holla 
125dfea747dSSudeep Holla 	/*
126dfea747dSSudeep Holla 	 * If the cache is fully associative, there is no need to
127dfea747dSSudeep Holla 	 * check the other properties.
128dfea747dSSudeep Holla 	 */
129dfea747dSSudeep Holla 	if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
130dfea747dSSudeep Holla 		this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
131dfea747dSSudeep Holla }
132dfea747dSSudeep Holla 
1332ff075c7SJeremy Linton static bool cache_node_is_unified(struct cacheinfo *this_leaf,
1342ff075c7SJeremy Linton 				  struct device_node *np)
135f57ab9a0SSudeep Holla {
1362ff075c7SJeremy Linton 	return of_property_read_bool(np, "cache-unified");
137f57ab9a0SSudeep Holla }
138f57ab9a0SSudeep Holla 
1392ff075c7SJeremy Linton static void cache_of_set_props(struct cacheinfo *this_leaf,
1402ff075c7SJeremy Linton 			       struct device_node *np)
141dfea747dSSudeep Holla {
142f57ab9a0SSudeep Holla 	/*
143f57ab9a0SSudeep Holla 	 * init_cache_level must setup the cache level correctly
144f57ab9a0SSudeep Holla 	 * overriding the architecturally specified levels, so
145f57ab9a0SSudeep Holla 	 * if type is NONE at this stage, it should be unified
146f57ab9a0SSudeep Holla 	 */
147f57ab9a0SSudeep Holla 	if (this_leaf->type == CACHE_TYPE_NOCACHE &&
1482ff075c7SJeremy Linton 	    cache_node_is_unified(this_leaf, np))
149f57ab9a0SSudeep Holla 		this_leaf->type = CACHE_TYPE_UNIFIED;
1502ff075c7SJeremy Linton 	cache_size(this_leaf, np);
1512ff075c7SJeremy Linton 	cache_get_line_size(this_leaf, np);
1522ff075c7SJeremy Linton 	cache_nr_sets(this_leaf, np);
153dfea747dSSudeep Holla 	cache_associativity(this_leaf);
154dfea747dSSudeep Holla }
155d529a18aSJeremy Linton 
156d529a18aSJeremy Linton static int cache_setup_of_node(unsigned int cpu)
157d529a18aSJeremy Linton {
158d529a18aSJeremy Linton 	struct device_node *np;
159d529a18aSJeremy Linton 	struct cacheinfo *this_leaf;
160d529a18aSJeremy Linton 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
161d529a18aSJeremy Linton 	unsigned int index = 0;
162d529a18aSJeremy Linton 
1639b97387cSJeremy Linton 	/* skip if fw_token is already populated */
1649b97387cSJeremy Linton 	if (this_cpu_ci->info_list->fw_token) {
165d529a18aSJeremy Linton 		return 0;
1669b97387cSJeremy Linton 	}
167d529a18aSJeremy Linton 
168*d4ec840bSSudeep Holla 	np = of_cpu_device_node_get(cpu);
169d529a18aSJeremy Linton 	if (!np) {
170d529a18aSJeremy Linton 		pr_err("Failed to find cpu%d device node\n", cpu);
171d529a18aSJeremy Linton 		return -ENOENT;
172d529a18aSJeremy Linton 	}
173d529a18aSJeremy Linton 
174d529a18aSJeremy Linton 	while (index < cache_leaves(cpu)) {
175d529a18aSJeremy Linton 		this_leaf = this_cpu_ci->info_list + index;
176d529a18aSJeremy Linton 		if (this_leaf->level != 1)
177d529a18aSJeremy Linton 			np = of_find_next_cache_node(np);
178d529a18aSJeremy Linton 		else
179d529a18aSJeremy Linton 			np = of_node_get(np);/* cpu node itself */
180d529a18aSJeremy Linton 		if (!np)
181d529a18aSJeremy Linton 			break;
1822ff075c7SJeremy Linton 		cache_of_set_props(this_leaf, np);
1839b97387cSJeremy Linton 		this_leaf->fw_token = np;
184d529a18aSJeremy Linton 		index++;
185d529a18aSJeremy Linton 	}
186d529a18aSJeremy Linton 
187d529a18aSJeremy Linton 	if (index != cache_leaves(cpu)) /* not all OF nodes populated */
188d529a18aSJeremy Linton 		return -ENOENT;
189d529a18aSJeremy Linton 
190d529a18aSJeremy Linton 	return 0;
191d529a18aSJeremy Linton }
192246246cbSSudeep Holla #else
193246246cbSSudeep Holla static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
194246246cbSSudeep Holla static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
195246246cbSSudeep Holla 					   struct cacheinfo *sib_leaf)
196246246cbSSudeep Holla {
197246246cbSSudeep Holla 	/*
198582b468bSJeremy Linton 	 * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
199246246cbSSudeep Holla 	 * shared caches for all other levels. This will be used only if
200246246cbSSudeep Holla 	 * arch specific code has not populated shared_cpu_map
201246246cbSSudeep Holla 	 */
202246246cbSSudeep Holla 	return !(this_leaf->level == 1);
203246246cbSSudeep Holla }
204246246cbSSudeep Holla #endif
205246246cbSSudeep Holla 
206582b468bSJeremy Linton int __weak cache_setup_acpi(unsigned int cpu)
207582b468bSJeremy Linton {
208582b468bSJeremy Linton 	return -ENOTSUPP;
209582b468bSJeremy Linton }
210582b468bSJeremy Linton 
2119a83c84cSShaokun Zhang unsigned int coherency_max_size;
2129a83c84cSShaokun Zhang 
213246246cbSSudeep Holla static int cache_shared_cpu_map_setup(unsigned int cpu)
214246246cbSSudeep Holla {
215246246cbSSudeep Holla 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
216246246cbSSudeep Holla 	struct cacheinfo *this_leaf, *sib_leaf;
217246246cbSSudeep Holla 	unsigned int index;
21855877ef4SSudeep Holla 	int ret = 0;
219246246cbSSudeep Holla 
220fac51482SSudeep Holla 	if (this_cpu_ci->cpu_map_populated)
221fac51482SSudeep Holla 		return 0;
222fac51482SSudeep Holla 
22355877ef4SSudeep Holla 	if (of_have_populated_dt())
224246246cbSSudeep Holla 		ret = cache_setup_of_node(cpu);
22555877ef4SSudeep Holla 	else if (!acpi_disabled)
226582b468bSJeremy Linton 		ret = cache_setup_acpi(cpu);
227582b468bSJeremy Linton 
228246246cbSSudeep Holla 	if (ret)
229246246cbSSudeep Holla 		return ret;
230246246cbSSudeep Holla 
231246246cbSSudeep Holla 	for (index = 0; index < cache_leaves(cpu); index++) {
232246246cbSSudeep Holla 		unsigned int i;
233246246cbSSudeep Holla 
234246246cbSSudeep Holla 		this_leaf = this_cpu_ci->info_list + index;
235246246cbSSudeep Holla 		/* skip if shared_cpu_map is already populated */
236246246cbSSudeep Holla 		if (!cpumask_empty(&this_leaf->shared_cpu_map))
237246246cbSSudeep Holla 			continue;
238246246cbSSudeep Holla 
239246246cbSSudeep Holla 		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
240246246cbSSudeep Holla 		for_each_online_cpu(i) {
241246246cbSSudeep Holla 			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
242246246cbSSudeep Holla 
243246246cbSSudeep Holla 			if (i == cpu || !sib_cpu_ci->info_list)
244246246cbSSudeep Holla 				continue;/* skip if itself or no cacheinfo */
245246246cbSSudeep Holla 			sib_leaf = sib_cpu_ci->info_list + index;
246246246cbSSudeep Holla 			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
247246246cbSSudeep Holla 				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
248246246cbSSudeep Holla 				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
249246246cbSSudeep Holla 			}
250246246cbSSudeep Holla 		}
2519a83c84cSShaokun Zhang 		/* record the maximum cache line size */
2529a83c84cSShaokun Zhang 		if (this_leaf->coherency_line_size > coherency_max_size)
2539a83c84cSShaokun Zhang 			coherency_max_size = this_leaf->coherency_line_size;
254246246cbSSudeep Holla 	}
255246246cbSSudeep Holla 
256246246cbSSudeep Holla 	return 0;
257246246cbSSudeep Holla }
258246246cbSSudeep Holla 
259246246cbSSudeep Holla static void cache_shared_cpu_map_remove(unsigned int cpu)
260246246cbSSudeep Holla {
261246246cbSSudeep Holla 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
262246246cbSSudeep Holla 	struct cacheinfo *this_leaf, *sib_leaf;
263246246cbSSudeep Holla 	unsigned int sibling, index;
264246246cbSSudeep Holla 
265246246cbSSudeep Holla 	for (index = 0; index < cache_leaves(cpu); index++) {
266246246cbSSudeep Holla 		this_leaf = this_cpu_ci->info_list + index;
267246246cbSSudeep Holla 		for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
268246246cbSSudeep Holla 			struct cpu_cacheinfo *sib_cpu_ci;
269246246cbSSudeep Holla 
270246246cbSSudeep Holla 			if (sibling == cpu) /* skip itself */
271246246cbSSudeep Holla 				continue;
2722110d70cSBorislav Petkov 
273246246cbSSudeep Holla 			sib_cpu_ci = get_cpu_cacheinfo(sibling);
2742110d70cSBorislav Petkov 			if (!sib_cpu_ci->info_list)
2752110d70cSBorislav Petkov 				continue;
2762110d70cSBorislav Petkov 
277246246cbSSudeep Holla 			sib_leaf = sib_cpu_ci->info_list + index;
278246246cbSSudeep Holla 			cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
279246246cbSSudeep Holla 			cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
280246246cbSSudeep Holla 		}
281582b468bSJeremy Linton 		if (of_have_populated_dt())
2829b97387cSJeremy Linton 			of_node_put(this_leaf->fw_token);
283246246cbSSudeep Holla 	}
284246246cbSSudeep Holla }
285246246cbSSudeep Holla 
286246246cbSSudeep Holla static void free_cache_attributes(unsigned int cpu)
287246246cbSSudeep Holla {
2882110d70cSBorislav Petkov 	if (!per_cpu_cacheinfo(cpu))
2892110d70cSBorislav Petkov 		return;
2902110d70cSBorislav Petkov 
291246246cbSSudeep Holla 	cache_shared_cpu_map_remove(cpu);
292246246cbSSudeep Holla 
293246246cbSSudeep Holla 	kfree(per_cpu_cacheinfo(cpu));
294246246cbSSudeep Holla 	per_cpu_cacheinfo(cpu) = NULL;
295e022eac8SXiongfeng Wang 	cache_leaves(cpu) = 0;
296246246cbSSudeep Holla }
297246246cbSSudeep Holla 
298246246cbSSudeep Holla int __weak init_cache_level(unsigned int cpu)
299246246cbSSudeep Holla {
300246246cbSSudeep Holla 	return -ENOENT;
301246246cbSSudeep Holla }
302246246cbSSudeep Holla 
303246246cbSSudeep Holla int __weak populate_cache_leaves(unsigned int cpu)
304246246cbSSudeep Holla {
305246246cbSSudeep Holla 	return -ENOENT;
306246246cbSSudeep Holla }
307246246cbSSudeep Holla 
308246246cbSSudeep Holla static int detect_cache_attributes(unsigned int cpu)
309246246cbSSudeep Holla {
310246246cbSSudeep Holla 	int ret;
311246246cbSSudeep Holla 
3123370e13aSSudeep Holla 	if (init_cache_level(cpu) || !cache_leaves(cpu))
313246246cbSSudeep Holla 		return -ENOENT;
314246246cbSSudeep Holla 
315246246cbSSudeep Holla 	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
316246246cbSSudeep Holla 					 sizeof(struct cacheinfo), GFP_KERNEL);
317246246cbSSudeep Holla 	if (per_cpu_cacheinfo(cpu) == NULL)
318246246cbSSudeep Holla 		return -ENOMEM;
319246246cbSSudeep Holla 
3202ff075c7SJeremy Linton 	/*
3212ff075c7SJeremy Linton 	 * populate_cache_leaves() may completely setup the cache leaves and
3222ff075c7SJeremy Linton 	 * shared_cpu_map or it may leave it partially setup.
3232ff075c7SJeremy Linton 	 */
324246246cbSSudeep Holla 	ret = populate_cache_leaves(cpu);
325246246cbSSudeep Holla 	if (ret)
326246246cbSSudeep Holla 		goto free_ci;
327246246cbSSudeep Holla 	/*
3289b97387cSJeremy Linton 	 * For systems using DT for cache hierarchy, fw_token
3299b97387cSJeremy Linton 	 * and shared_cpu_map will be set up here only if they are
3309b97387cSJeremy Linton 	 * not populated already
331246246cbSSudeep Holla 	 */
332246246cbSSudeep Holla 	ret = cache_shared_cpu_map_setup(cpu);
3338a7d95f9SSudeep Holla 	if (ret) {
33455877ef4SSudeep Holla 		pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
335246246cbSSudeep Holla 		goto free_ci;
3368a7d95f9SSudeep Holla 	}
337dfea747dSSudeep Holla 
338246246cbSSudeep Holla 	return 0;
339246246cbSSudeep Holla 
340246246cbSSudeep Holla free_ci:
341246246cbSSudeep Holla 	free_cache_attributes(cpu);
342246246cbSSudeep Holla 	return ret;
343246246cbSSudeep Holla }
344246246cbSSudeep Holla 
345246246cbSSudeep Holla /* pointer to cpuX/cache device */
346246246cbSSudeep Holla static DEFINE_PER_CPU(struct device *, ci_cache_dev);
347246246cbSSudeep Holla #define per_cpu_cache_dev(cpu)	(per_cpu(ci_cache_dev, cpu))
348246246cbSSudeep Holla 
349246246cbSSudeep Holla static cpumask_t cache_dev_map;
350246246cbSSudeep Holla 
351246246cbSSudeep Holla /* pointer to array of devices for cpuX/cache/indexY */
352246246cbSSudeep Holla static DEFINE_PER_CPU(struct device **, ci_index_dev);
353246246cbSSudeep Holla #define per_cpu_index_dev(cpu)	(per_cpu(ci_index_dev, cpu))
354246246cbSSudeep Holla #define per_cache_index_dev(cpu, idx)	((per_cpu_index_dev(cpu))[idx])
355246246cbSSudeep Holla 
356246246cbSSudeep Holla #define show_one(file_name, object)				\
357246246cbSSudeep Holla static ssize_t file_name##_show(struct device *dev,		\
358246246cbSSudeep Holla 		struct device_attribute *attr, char *buf)	\
359246246cbSSudeep Holla {								\
360246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);	\
361948b3edbSJoe Perches 	return sysfs_emit(buf, "%u\n", this_leaf->object);	\
362246246cbSSudeep Holla }
363246246cbSSudeep Holla 
364e9a2ea5aSFenghua Yu show_one(id, id);
365246246cbSSudeep Holla show_one(level, level);
366246246cbSSudeep Holla show_one(coherency_line_size, coherency_line_size);
367246246cbSSudeep Holla show_one(number_of_sets, number_of_sets);
368246246cbSSudeep Holla show_one(physical_line_partition, physical_line_partition);
369246246cbSSudeep Holla show_one(ways_of_associativity, ways_of_associativity);
370246246cbSSudeep Holla 
371246246cbSSudeep Holla static ssize_t size_show(struct device *dev,
372246246cbSSudeep Holla 			 struct device_attribute *attr, char *buf)
373246246cbSSudeep Holla {
374246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
375246246cbSSudeep Holla 
376aa838896SJoe Perches 	return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
377246246cbSSudeep Holla }
378246246cbSSudeep Holla 
379e015e036SJoe Perches static ssize_t shared_cpu_map_show(struct device *dev,
380e015e036SJoe Perches 				   struct device_attribute *attr, char *buf)
381246246cbSSudeep Holla {
382246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
383246246cbSSudeep Holla 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
384246246cbSSudeep Holla 
385e015e036SJoe Perches 	return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
386246246cbSSudeep Holla }
387246246cbSSudeep Holla 
388246246cbSSudeep Holla static ssize_t shared_cpu_list_show(struct device *dev,
389246246cbSSudeep Holla 				    struct device_attribute *attr, char *buf)
390246246cbSSudeep Holla {
391e015e036SJoe Perches 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
392e015e036SJoe Perches 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
393e015e036SJoe Perches 
394e015e036SJoe Perches 	return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
395246246cbSSudeep Holla }
396246246cbSSudeep Holla 
397246246cbSSudeep Holla static ssize_t type_show(struct device *dev,
398246246cbSSudeep Holla 			 struct device_attribute *attr, char *buf)
399246246cbSSudeep Holla {
400246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
401973c3911SJoe Perches 	const char *output;
402246246cbSSudeep Holla 
403246246cbSSudeep Holla 	switch (this_leaf->type) {
404246246cbSSudeep Holla 	case CACHE_TYPE_DATA:
405973c3911SJoe Perches 		output = "Data";
406973c3911SJoe Perches 		break;
407246246cbSSudeep Holla 	case CACHE_TYPE_INST:
408973c3911SJoe Perches 		output = "Instruction";
409973c3911SJoe Perches 		break;
410246246cbSSudeep Holla 	case CACHE_TYPE_UNIFIED:
411973c3911SJoe Perches 		output = "Unified";
412973c3911SJoe Perches 		break;
413246246cbSSudeep Holla 	default:
414246246cbSSudeep Holla 		return -EINVAL;
415246246cbSSudeep Holla 	}
416973c3911SJoe Perches 
417973c3911SJoe Perches 	return sysfs_emit(buf, "%s\n", output);
418246246cbSSudeep Holla }
419246246cbSSudeep Holla 
420246246cbSSudeep Holla static ssize_t allocation_policy_show(struct device *dev,
421246246cbSSudeep Holla 				      struct device_attribute *attr, char *buf)
422246246cbSSudeep Holla {
423246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
424246246cbSSudeep Holla 	unsigned int ci_attr = this_leaf->attributes;
425973c3911SJoe Perches 	const char *output;
426246246cbSSudeep Holla 
427246246cbSSudeep Holla 	if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
428973c3911SJoe Perches 		output = "ReadWriteAllocate";
429246246cbSSudeep Holla 	else if (ci_attr & CACHE_READ_ALLOCATE)
430973c3911SJoe Perches 		output = "ReadAllocate";
431246246cbSSudeep Holla 	else if (ci_attr & CACHE_WRITE_ALLOCATE)
432973c3911SJoe Perches 		output = "WriteAllocate";
433973c3911SJoe Perches 	else
434973c3911SJoe Perches 		return 0;
435973c3911SJoe Perches 
436973c3911SJoe Perches 	return sysfs_emit(buf, "%s\n", output);
437246246cbSSudeep Holla }
438246246cbSSudeep Holla 
439246246cbSSudeep Holla static ssize_t write_policy_show(struct device *dev,
440246246cbSSudeep Holla 				 struct device_attribute *attr, char *buf)
441246246cbSSudeep Holla {
442246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
443246246cbSSudeep Holla 	unsigned int ci_attr = this_leaf->attributes;
444246246cbSSudeep Holla 	int n = 0;
445246246cbSSudeep Holla 
446246246cbSSudeep Holla 	if (ci_attr & CACHE_WRITE_THROUGH)
447aa838896SJoe Perches 		n = sysfs_emit(buf, "WriteThrough\n");
448246246cbSSudeep Holla 	else if (ci_attr & CACHE_WRITE_BACK)
449aa838896SJoe Perches 		n = sysfs_emit(buf, "WriteBack\n");
450246246cbSSudeep Holla 	return n;
451246246cbSSudeep Holla }
452246246cbSSudeep Holla 
453e9a2ea5aSFenghua Yu static DEVICE_ATTR_RO(id);
454246246cbSSudeep Holla static DEVICE_ATTR_RO(level);
455246246cbSSudeep Holla static DEVICE_ATTR_RO(type);
456246246cbSSudeep Holla static DEVICE_ATTR_RO(coherency_line_size);
457246246cbSSudeep Holla static DEVICE_ATTR_RO(ways_of_associativity);
458246246cbSSudeep Holla static DEVICE_ATTR_RO(number_of_sets);
459246246cbSSudeep Holla static DEVICE_ATTR_RO(size);
460246246cbSSudeep Holla static DEVICE_ATTR_RO(allocation_policy);
461246246cbSSudeep Holla static DEVICE_ATTR_RO(write_policy);
462246246cbSSudeep Holla static DEVICE_ATTR_RO(shared_cpu_map);
463246246cbSSudeep Holla static DEVICE_ATTR_RO(shared_cpu_list);
464246246cbSSudeep Holla static DEVICE_ATTR_RO(physical_line_partition);
465246246cbSSudeep Holla 
466246246cbSSudeep Holla static struct attribute *cache_default_attrs[] = {
467e9a2ea5aSFenghua Yu 	&dev_attr_id.attr,
468246246cbSSudeep Holla 	&dev_attr_type.attr,
469246246cbSSudeep Holla 	&dev_attr_level.attr,
470246246cbSSudeep Holla 	&dev_attr_shared_cpu_map.attr,
471246246cbSSudeep Holla 	&dev_attr_shared_cpu_list.attr,
472246246cbSSudeep Holla 	&dev_attr_coherency_line_size.attr,
473246246cbSSudeep Holla 	&dev_attr_ways_of_associativity.attr,
474246246cbSSudeep Holla 	&dev_attr_number_of_sets.attr,
475246246cbSSudeep Holla 	&dev_attr_size.attr,
476246246cbSSudeep Holla 	&dev_attr_allocation_policy.attr,
477246246cbSSudeep Holla 	&dev_attr_write_policy.attr,
478246246cbSSudeep Holla 	&dev_attr_physical_line_partition.attr,
479246246cbSSudeep Holla 	NULL
480246246cbSSudeep Holla };
481246246cbSSudeep Holla 
482246246cbSSudeep Holla static umode_t
483246246cbSSudeep Holla cache_default_attrs_is_visible(struct kobject *kobj,
484246246cbSSudeep Holla 			       struct attribute *attr, int unused)
485246246cbSSudeep Holla {
486246246cbSSudeep Holla 	struct device *dev = kobj_to_dev(kobj);
487246246cbSSudeep Holla 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
488246246cbSSudeep Holla 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
489246246cbSSudeep Holla 	umode_t mode = attr->mode;
490246246cbSSudeep Holla 
491e9a2ea5aSFenghua Yu 	if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
492e9a2ea5aSFenghua Yu 		return mode;
493246246cbSSudeep Holla 	if ((attr == &dev_attr_type.attr) && this_leaf->type)
494246246cbSSudeep Holla 		return mode;
495246246cbSSudeep Holla 	if ((attr == &dev_attr_level.attr) && this_leaf->level)
496246246cbSSudeep Holla 		return mode;
497246246cbSSudeep Holla 	if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
498246246cbSSudeep Holla 		return mode;
499246246cbSSudeep Holla 	if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
500246246cbSSudeep Holla 		return mode;
501246246cbSSudeep Holla 	if ((attr == &dev_attr_coherency_line_size.attr) &&
502246246cbSSudeep Holla 	    this_leaf->coherency_line_size)
503246246cbSSudeep Holla 		return mode;
504246246cbSSudeep Holla 	if ((attr == &dev_attr_ways_of_associativity.attr) &&
505246246cbSSudeep Holla 	    this_leaf->size) /* allow 0 = full associativity */
506246246cbSSudeep Holla 		return mode;
507246246cbSSudeep Holla 	if ((attr == &dev_attr_number_of_sets.attr) &&
508246246cbSSudeep Holla 	    this_leaf->number_of_sets)
509246246cbSSudeep Holla 		return mode;
510246246cbSSudeep Holla 	if ((attr == &dev_attr_size.attr) && this_leaf->size)
511246246cbSSudeep Holla 		return mode;
512246246cbSSudeep Holla 	if ((attr == &dev_attr_write_policy.attr) &&
513246246cbSSudeep Holla 	    (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
514246246cbSSudeep Holla 		return mode;
515246246cbSSudeep Holla 	if ((attr == &dev_attr_allocation_policy.attr) &&
516246246cbSSudeep Holla 	    (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
517246246cbSSudeep Holla 		return mode;
518246246cbSSudeep Holla 	if ((attr == &dev_attr_physical_line_partition.attr) &&
519246246cbSSudeep Holla 	    this_leaf->physical_line_partition)
520246246cbSSudeep Holla 		return mode;
521246246cbSSudeep Holla 
522246246cbSSudeep Holla 	return 0;
523246246cbSSudeep Holla }
524246246cbSSudeep Holla 
525246246cbSSudeep Holla static const struct attribute_group cache_default_group = {
526246246cbSSudeep Holla 	.attrs = cache_default_attrs,
527246246cbSSudeep Holla 	.is_visible = cache_default_attrs_is_visible,
528246246cbSSudeep Holla };
529246246cbSSudeep Holla 
530246246cbSSudeep Holla static const struct attribute_group *cache_default_groups[] = {
531246246cbSSudeep Holla 	&cache_default_group,
532246246cbSSudeep Holla 	NULL,
533246246cbSSudeep Holla };
534246246cbSSudeep Holla 
535246246cbSSudeep Holla static const struct attribute_group *cache_private_groups[] = {
536246246cbSSudeep Holla 	&cache_default_group,
537246246cbSSudeep Holla 	NULL, /* Place holder for private group */
538246246cbSSudeep Holla 	NULL,
539246246cbSSudeep Holla };
540246246cbSSudeep Holla 
541246246cbSSudeep Holla const struct attribute_group *
542246246cbSSudeep Holla __weak cache_get_priv_group(struct cacheinfo *this_leaf)
543246246cbSSudeep Holla {
544246246cbSSudeep Holla 	return NULL;
545246246cbSSudeep Holla }
546246246cbSSudeep Holla 
547246246cbSSudeep Holla static const struct attribute_group **
548246246cbSSudeep Holla cache_get_attribute_groups(struct cacheinfo *this_leaf)
549246246cbSSudeep Holla {
550246246cbSSudeep Holla 	const struct attribute_group *priv_group =
551246246cbSSudeep Holla 			cache_get_priv_group(this_leaf);
552246246cbSSudeep Holla 
553246246cbSSudeep Holla 	if (!priv_group)
554246246cbSSudeep Holla 		return cache_default_groups;
555246246cbSSudeep Holla 
556246246cbSSudeep Holla 	if (!cache_private_groups[1])
557246246cbSSudeep Holla 		cache_private_groups[1] = priv_group;
558246246cbSSudeep Holla 
559246246cbSSudeep Holla 	return cache_private_groups;
560246246cbSSudeep Holla }
561246246cbSSudeep Holla 
562246246cbSSudeep Holla /* Add/Remove cache interface for CPU device */
563246246cbSSudeep Holla static void cpu_cache_sysfs_exit(unsigned int cpu)
564246246cbSSudeep Holla {
565246246cbSSudeep Holla 	int i;
566246246cbSSudeep Holla 	struct device *ci_dev;
567246246cbSSudeep Holla 
568246246cbSSudeep Holla 	if (per_cpu_index_dev(cpu)) {
569246246cbSSudeep Holla 		for (i = 0; i < cache_leaves(cpu); i++) {
570246246cbSSudeep Holla 			ci_dev = per_cache_index_dev(cpu, i);
571246246cbSSudeep Holla 			if (!ci_dev)
572246246cbSSudeep Holla 				continue;
573246246cbSSudeep Holla 			device_unregister(ci_dev);
574246246cbSSudeep Holla 		}
575246246cbSSudeep Holla 		kfree(per_cpu_index_dev(cpu));
576246246cbSSudeep Holla 		per_cpu_index_dev(cpu) = NULL;
577246246cbSSudeep Holla 	}
578246246cbSSudeep Holla 	device_unregister(per_cpu_cache_dev(cpu));
579246246cbSSudeep Holla 	per_cpu_cache_dev(cpu) = NULL;
580246246cbSSudeep Holla }
581246246cbSSudeep Holla 
582246246cbSSudeep Holla static int cpu_cache_sysfs_init(unsigned int cpu)
583246246cbSSudeep Holla {
584246246cbSSudeep Holla 	struct device *dev = get_cpu_device(cpu);
585246246cbSSudeep Holla 
586246246cbSSudeep Holla 	if (per_cpu_cacheinfo(cpu) == NULL)
587246246cbSSudeep Holla 		return -ENOENT;
588246246cbSSudeep Holla 
589246246cbSSudeep Holla 	per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
590246246cbSSudeep Holla 	if (IS_ERR(per_cpu_cache_dev(cpu)))
591246246cbSSudeep Holla 		return PTR_ERR(per_cpu_cache_dev(cpu));
592246246cbSSudeep Holla 
593246246cbSSudeep Holla 	/* Allocate all required memory */
594246246cbSSudeep Holla 	per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
595246246cbSSudeep Holla 					 sizeof(struct device *), GFP_KERNEL);
596246246cbSSudeep Holla 	if (unlikely(per_cpu_index_dev(cpu) == NULL))
597246246cbSSudeep Holla 		goto err_out;
598246246cbSSudeep Holla 
599246246cbSSudeep Holla 	return 0;
600246246cbSSudeep Holla 
601246246cbSSudeep Holla err_out:
602246246cbSSudeep Holla 	cpu_cache_sysfs_exit(cpu);
603246246cbSSudeep Holla 	return -ENOMEM;
604246246cbSSudeep Holla }
605246246cbSSudeep Holla 
606246246cbSSudeep Holla static int cache_add_dev(unsigned int cpu)
607246246cbSSudeep Holla {
608246246cbSSudeep Holla 	unsigned int i;
609246246cbSSudeep Holla 	int rc;
610246246cbSSudeep Holla 	struct device *ci_dev, *parent;
611246246cbSSudeep Holla 	struct cacheinfo *this_leaf;
612246246cbSSudeep Holla 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
613246246cbSSudeep Holla 	const struct attribute_group **cache_groups;
614246246cbSSudeep Holla 
615246246cbSSudeep Holla 	rc = cpu_cache_sysfs_init(cpu);
616246246cbSSudeep Holla 	if (unlikely(rc < 0))
617246246cbSSudeep Holla 		return rc;
618246246cbSSudeep Holla 
619246246cbSSudeep Holla 	parent = per_cpu_cache_dev(cpu);
620246246cbSSudeep Holla 	for (i = 0; i < cache_leaves(cpu); i++) {
621246246cbSSudeep Holla 		this_leaf = this_cpu_ci->info_list + i;
622246246cbSSudeep Holla 		if (this_leaf->disable_sysfs)
623246246cbSSudeep Holla 			continue;
624ca388e43SJeffrey Hugo 		if (this_leaf->type == CACHE_TYPE_NOCACHE)
625ca388e43SJeffrey Hugo 			break;
626246246cbSSudeep Holla 		cache_groups = cache_get_attribute_groups(this_leaf);
627246246cbSSudeep Holla 		ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
628246246cbSSudeep Holla 					   "index%1u", i);
629246246cbSSudeep Holla 		if (IS_ERR(ci_dev)) {
630246246cbSSudeep Holla 			rc = PTR_ERR(ci_dev);
631246246cbSSudeep Holla 			goto err;
632246246cbSSudeep Holla 		}
633246246cbSSudeep Holla 		per_cache_index_dev(cpu, i) = ci_dev;
634246246cbSSudeep Holla 	}
635246246cbSSudeep Holla 	cpumask_set_cpu(cpu, &cache_dev_map);
636246246cbSSudeep Holla 
637246246cbSSudeep Holla 	return 0;
638246246cbSSudeep Holla err:
639246246cbSSudeep Holla 	cpu_cache_sysfs_exit(cpu);
640246246cbSSudeep Holla 	return rc;
641246246cbSSudeep Holla }
642246246cbSSudeep Holla 
6437cc277b4SSebastian Andrzej Siewior static int cacheinfo_cpu_online(unsigned int cpu)
644246246cbSSudeep Holla {
6457cc277b4SSebastian Andrzej Siewior 	int rc = detect_cache_attributes(cpu);
646246246cbSSudeep Holla 
6477cc277b4SSebastian Andrzej Siewior 	if (rc)
6487cc277b4SSebastian Andrzej Siewior 		return rc;
649246246cbSSudeep Holla 	rc = cache_add_dev(cpu);
6507cc277b4SSebastian Andrzej Siewior 	if (rc)
651246246cbSSudeep Holla 		free_cache_attributes(cpu);
6527cc277b4SSebastian Andrzej Siewior 	return rc;
653246246cbSSudeep Holla }
6547cc277b4SSebastian Andrzej Siewior 
6557cc277b4SSebastian Andrzej Siewior static int cacheinfo_cpu_pre_down(unsigned int cpu)
6567cc277b4SSebastian Andrzej Siewior {
6577cc277b4SSebastian Andrzej Siewior 	if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
6587cc277b4SSebastian Andrzej Siewior 		cpu_cache_sysfs_exit(cpu);
6597cc277b4SSebastian Andrzej Siewior 
6607cc277b4SSebastian Andrzej Siewior 	free_cache_attributes(cpu);
6617cc277b4SSebastian Andrzej Siewior 	return 0;
662246246cbSSudeep Holla }
663246246cbSSudeep Holla 
664246246cbSSudeep Holla static int __init cacheinfo_sysfs_init(void)
665246246cbSSudeep Holla {
66683b44fe3SJames Morse 	return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
66783b44fe3SJames Morse 				 "base/cacheinfo:online",
6687cc277b4SSebastian Andrzej Siewior 				 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
669246246cbSSudeep Holla }
670246246cbSSudeep Holla device_initcall(cacheinfo_sysfs_init);
671