xref: /openbmc/linux/arch/powerpc/kernel/cacheinfo.c (revision 93197a36a9c16a85fb24cf5a8639f7bf9af838a3)
1*93197a36SNathan Lynch /*
2*93197a36SNathan Lynch  * Processor cache information made available to userspace via sysfs;
3*93197a36SNathan Lynch  * intended to be compatible with x86 intel_cacheinfo implementation.
4*93197a36SNathan Lynch  *
5*93197a36SNathan Lynch  * Copyright 2008 IBM Corporation
6*93197a36SNathan Lynch  * Author: Nathan Lynch
7*93197a36SNathan Lynch  *
8*93197a36SNathan Lynch  * This program is free software; you can redistribute it and/or
9*93197a36SNathan Lynch  * modify it under the terms of the GNU General Public License version
10*93197a36SNathan Lynch  * 2 as published by the Free Software Foundation.
11*93197a36SNathan Lynch  */
12*93197a36SNathan Lynch 
13*93197a36SNathan Lynch #include <linux/cpu.h>
14*93197a36SNathan Lynch #include <linux/cpumask.h>
15*93197a36SNathan Lynch #include <linux/init.h>
16*93197a36SNathan Lynch #include <linux/kernel.h>
17*93197a36SNathan Lynch #include <linux/kobject.h>
18*93197a36SNathan Lynch #include <linux/list.h>
19*93197a36SNathan Lynch #include <linux/notifier.h>
20*93197a36SNathan Lynch #include <linux/of.h>
21*93197a36SNathan Lynch #include <linux/percpu.h>
22*93197a36SNathan Lynch #include <asm/prom.h>
23*93197a36SNathan Lynch 
24*93197a36SNathan Lynch #include "cacheinfo.h"
25*93197a36SNathan Lynch 
26*93197a36SNathan Lynch /* per-cpu object for tracking:
27*93197a36SNathan Lynch  * - a "cache" kobject for the top-level directory
28*93197a36SNathan Lynch  * - a list of "index" objects representing the cpu's local cache hierarchy
29*93197a36SNathan Lynch  */
30*93197a36SNathan Lynch struct cache_dir {
31*93197a36SNathan Lynch 	struct kobject *kobj; /* bare (not embedded) kobject for cache
32*93197a36SNathan Lynch 			       * directory */
33*93197a36SNathan Lynch 	struct cache_index_dir *index; /* list of index objects */
34*93197a36SNathan Lynch };
35*93197a36SNathan Lynch 
36*93197a36SNathan Lynch /* "index" object: each cpu's cache directory has an index
37*93197a36SNathan Lynch  * subdirectory corresponding to a cache object associated with the
38*93197a36SNathan Lynch  * cpu.  This object's lifetime is managed via the embedded kobject.
39*93197a36SNathan Lynch  */
40*93197a36SNathan Lynch struct cache_index_dir {
41*93197a36SNathan Lynch 	struct kobject kobj;
42*93197a36SNathan Lynch 	struct cache_index_dir *next; /* next index in parent directory */
43*93197a36SNathan Lynch 	struct cache *cache;
44*93197a36SNathan Lynch };
45*93197a36SNathan Lynch 
46*93197a36SNathan Lynch /* Template for determining which OF properties to query for a given
47*93197a36SNathan Lynch  * cache type */
48*93197a36SNathan Lynch struct cache_type_info {
49*93197a36SNathan Lynch 	const char *name;
50*93197a36SNathan Lynch 	const char *size_prop;
51*93197a36SNathan Lynch 
52*93197a36SNathan Lynch 	/* Allow for both [di]-cache-line-size and
53*93197a36SNathan Lynch 	 * [di]-cache-block-size properties.  According to the PowerPC
54*93197a36SNathan Lynch 	 * Processor binding, -line-size should be provided if it
55*93197a36SNathan Lynch 	 * differs from the cache block size (that which is operated
56*93197a36SNathan Lynch 	 * on by cache instructions), so we look for -line-size first.
57*93197a36SNathan Lynch 	 * See cache_get_line_size(). */
58*93197a36SNathan Lynch 
59*93197a36SNathan Lynch 	const char *line_size_props[2];
60*93197a36SNathan Lynch 	const char *nr_sets_prop;
61*93197a36SNathan Lynch };
62*93197a36SNathan Lynch 
63*93197a36SNathan Lynch /* These are used to index the cache_type_info array. */
64*93197a36SNathan Lynch #define CACHE_TYPE_UNIFIED     0
65*93197a36SNathan Lynch #define CACHE_TYPE_INSTRUCTION 1
66*93197a36SNathan Lynch #define CACHE_TYPE_DATA        2
67*93197a36SNathan Lynch 
68*93197a36SNathan Lynch static const struct cache_type_info cache_type_info[] = {
69*93197a36SNathan Lynch 	{
70*93197a36SNathan Lynch 		/* PowerPC Processor binding says the [di]-cache-*
71*93197a36SNathan Lynch 		 * must be equal on unified caches, so just use
72*93197a36SNathan Lynch 		 * d-cache properties. */
73*93197a36SNathan Lynch 		.name            = "Unified",
74*93197a36SNathan Lynch 		.size_prop       = "d-cache-size",
75*93197a36SNathan Lynch 		.line_size_props = { "d-cache-line-size",
76*93197a36SNathan Lynch 				     "d-cache-block-size", },
77*93197a36SNathan Lynch 		.nr_sets_prop    = "d-cache-sets",
78*93197a36SNathan Lynch 	},
79*93197a36SNathan Lynch 	{
80*93197a36SNathan Lynch 		.name            = "Instruction",
81*93197a36SNathan Lynch 		.size_prop       = "i-cache-size",
82*93197a36SNathan Lynch 		.line_size_props = { "i-cache-line-size",
83*93197a36SNathan Lynch 				     "i-cache-block-size", },
84*93197a36SNathan Lynch 		.nr_sets_prop    = "i-cache-sets",
85*93197a36SNathan Lynch 	},
86*93197a36SNathan Lynch 	{
87*93197a36SNathan Lynch 		.name            = "Data",
88*93197a36SNathan Lynch 		.size_prop       = "d-cache-size",
89*93197a36SNathan Lynch 		.line_size_props = { "d-cache-line-size",
90*93197a36SNathan Lynch 				     "d-cache-block-size", },
91*93197a36SNathan Lynch 		.nr_sets_prop    = "d-cache-sets",
92*93197a36SNathan Lynch 	},
93*93197a36SNathan Lynch };
94*93197a36SNathan Lynch 
95*93197a36SNathan Lynch /* Cache object: each instance of this corresponds to a distinct cache
96*93197a36SNathan Lynch  * in the system.  There are separate objects for Harvard caches: one
97*93197a36SNathan Lynch  * each for instruction and data, and each refers to the same OF node.
98*93197a36SNathan Lynch  * The refcount of the OF node is elevated for the lifetime of the
99*93197a36SNathan Lynch  * cache object.  A cache object is released when its shared_cpu_map
100*93197a36SNathan Lynch  * is cleared (see cache_cpu_clear).
101*93197a36SNathan Lynch  *
102*93197a36SNathan Lynch  * A cache object is on two lists: an unsorted global list
103*93197a36SNathan Lynch  * (cache_list) of cache objects; and a singly-linked list
104*93197a36SNathan Lynch  * representing the local cache hierarchy, which is ordered by level
105*93197a36SNathan Lynch  * (e.g. L1d -> L1i -> L2 -> L3).
106*93197a36SNathan Lynch  */
107*93197a36SNathan Lynch struct cache {
108*93197a36SNathan Lynch 	struct device_node *ofnode;    /* OF node for this cache, may be cpu */
109*93197a36SNathan Lynch 	struct cpumask shared_cpu_map; /* online CPUs using this cache */
110*93197a36SNathan Lynch 	int type;                      /* split cache disambiguation */
111*93197a36SNathan Lynch 	int level;                     /* level not explicit in device tree */
112*93197a36SNathan Lynch 	struct list_head list;         /* global list of cache objects */
113*93197a36SNathan Lynch 	struct cache *next_local;      /* next cache of >= level */
114*93197a36SNathan Lynch };
115*93197a36SNathan Lynch 
116*93197a36SNathan Lynch static DEFINE_PER_CPU(struct cache_dir *, cache_dir);
117*93197a36SNathan Lynch 
118*93197a36SNathan Lynch /* traversal/modification of this list occurs only at cpu hotplug time;
119*93197a36SNathan Lynch  * access is serialized by cpu hotplug locking
120*93197a36SNathan Lynch  */
121*93197a36SNathan Lynch static LIST_HEAD(cache_list);
122*93197a36SNathan Lynch 
123*93197a36SNathan Lynch static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
124*93197a36SNathan Lynch {
125*93197a36SNathan Lynch 	return container_of(k, struct cache_index_dir, kobj);
126*93197a36SNathan Lynch }
127*93197a36SNathan Lynch 
128*93197a36SNathan Lynch static const char *cache_type_string(const struct cache *cache)
129*93197a36SNathan Lynch {
130*93197a36SNathan Lynch 	return cache_type_info[cache->type].name;
131*93197a36SNathan Lynch }
132*93197a36SNathan Lynch 
133*93197a36SNathan Lynch static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode)
134*93197a36SNathan Lynch {
135*93197a36SNathan Lynch 	cache->type = type;
136*93197a36SNathan Lynch 	cache->level = level;
137*93197a36SNathan Lynch 	cache->ofnode = of_node_get(ofnode);
138*93197a36SNathan Lynch 	INIT_LIST_HEAD(&cache->list);
139*93197a36SNathan Lynch 	list_add(&cache->list, &cache_list);
140*93197a36SNathan Lynch }
141*93197a36SNathan Lynch 
142*93197a36SNathan Lynch static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode)
143*93197a36SNathan Lynch {
144*93197a36SNathan Lynch 	struct cache *cache;
145*93197a36SNathan Lynch 
146*93197a36SNathan Lynch 	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
147*93197a36SNathan Lynch 	if (cache)
148*93197a36SNathan Lynch 		cache_init(cache, type, level, ofnode);
149*93197a36SNathan Lynch 
150*93197a36SNathan Lynch 	return cache;
151*93197a36SNathan Lynch }
152*93197a36SNathan Lynch 
153*93197a36SNathan Lynch static void release_cache_debugcheck(struct cache *cache)
154*93197a36SNathan Lynch {
155*93197a36SNathan Lynch 	struct cache *iter;
156*93197a36SNathan Lynch 
157*93197a36SNathan Lynch 	list_for_each_entry(iter, &cache_list, list)
158*93197a36SNathan Lynch 		WARN_ONCE(iter->next_local == cache,
159*93197a36SNathan Lynch 			  "cache for %s(%s) refers to cache for %s(%s)\n",
160*93197a36SNathan Lynch 			  iter->ofnode->full_name,
161*93197a36SNathan Lynch 			  cache_type_string(iter),
162*93197a36SNathan Lynch 			  cache->ofnode->full_name,
163*93197a36SNathan Lynch 			  cache_type_string(cache));
164*93197a36SNathan Lynch }
165*93197a36SNathan Lynch 
166*93197a36SNathan Lynch static void release_cache(struct cache *cache)
167*93197a36SNathan Lynch {
168*93197a36SNathan Lynch 	if (!cache)
169*93197a36SNathan Lynch 		return;
170*93197a36SNathan Lynch 
171*93197a36SNathan Lynch 	pr_debug("freeing L%d %s cache for %s\n", cache->level,
172*93197a36SNathan Lynch 		 cache_type_string(cache), cache->ofnode->full_name);
173*93197a36SNathan Lynch 
174*93197a36SNathan Lynch 	release_cache_debugcheck(cache);
175*93197a36SNathan Lynch 	list_del(&cache->list);
176*93197a36SNathan Lynch 	of_node_put(cache->ofnode);
177*93197a36SNathan Lynch 	kfree(cache);
178*93197a36SNathan Lynch }
179*93197a36SNathan Lynch 
180*93197a36SNathan Lynch static void cache_cpu_set(struct cache *cache, int cpu)
181*93197a36SNathan Lynch {
182*93197a36SNathan Lynch 	struct cache *next = cache;
183*93197a36SNathan Lynch 
184*93197a36SNathan Lynch 	while (next) {
185*93197a36SNathan Lynch 		WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
186*93197a36SNathan Lynch 			  "CPU %i already accounted in %s(%s)\n",
187*93197a36SNathan Lynch 			  cpu, next->ofnode->full_name,
188*93197a36SNathan Lynch 			  cache_type_string(next));
189*93197a36SNathan Lynch 		cpumask_set_cpu(cpu, &next->shared_cpu_map);
190*93197a36SNathan Lynch 		next = next->next_local;
191*93197a36SNathan Lynch 	}
192*93197a36SNathan Lynch }
193*93197a36SNathan Lynch 
194*93197a36SNathan Lynch static int cache_size(const struct cache *cache, unsigned int *ret)
195*93197a36SNathan Lynch {
196*93197a36SNathan Lynch 	const char *propname;
197*93197a36SNathan Lynch 	const u32 *cache_size;
198*93197a36SNathan Lynch 
199*93197a36SNathan Lynch 	propname = cache_type_info[cache->type].size_prop;
200*93197a36SNathan Lynch 
201*93197a36SNathan Lynch 	cache_size = of_get_property(cache->ofnode, propname, NULL);
202*93197a36SNathan Lynch 	if (!cache_size)
203*93197a36SNathan Lynch 		return -ENODEV;
204*93197a36SNathan Lynch 
205*93197a36SNathan Lynch 	*ret = *cache_size;
206*93197a36SNathan Lynch 	return 0;
207*93197a36SNathan Lynch }
208*93197a36SNathan Lynch 
209*93197a36SNathan Lynch static int cache_size_kb(const struct cache *cache, unsigned int *ret)
210*93197a36SNathan Lynch {
211*93197a36SNathan Lynch 	unsigned int size;
212*93197a36SNathan Lynch 
213*93197a36SNathan Lynch 	if (cache_size(cache, &size))
214*93197a36SNathan Lynch 		return -ENODEV;
215*93197a36SNathan Lynch 
216*93197a36SNathan Lynch 	*ret = size / 1024;
217*93197a36SNathan Lynch 	return 0;
218*93197a36SNathan Lynch }
219*93197a36SNathan Lynch 
220*93197a36SNathan Lynch /* not cache_line_size() because that's a macro in include/linux/cache.h */
221*93197a36SNathan Lynch static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
222*93197a36SNathan Lynch {
223*93197a36SNathan Lynch 	const u32 *line_size;
224*93197a36SNathan Lynch 	int i, lim;
225*93197a36SNathan Lynch 
226*93197a36SNathan Lynch 	lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
227*93197a36SNathan Lynch 
228*93197a36SNathan Lynch 	for (i = 0; i < lim; i++) {
229*93197a36SNathan Lynch 		const char *propname;
230*93197a36SNathan Lynch 
231*93197a36SNathan Lynch 		propname = cache_type_info[cache->type].line_size_props[i];
232*93197a36SNathan Lynch 		line_size = of_get_property(cache->ofnode, propname, NULL);
233*93197a36SNathan Lynch 		if (line_size)
234*93197a36SNathan Lynch 			break;
235*93197a36SNathan Lynch 	}
236*93197a36SNathan Lynch 
237*93197a36SNathan Lynch 	if (!line_size)
238*93197a36SNathan Lynch 		return -ENODEV;
239*93197a36SNathan Lynch 
240*93197a36SNathan Lynch 	*ret = *line_size;
241*93197a36SNathan Lynch 	return 0;
242*93197a36SNathan Lynch }
243*93197a36SNathan Lynch 
244*93197a36SNathan Lynch static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
245*93197a36SNathan Lynch {
246*93197a36SNathan Lynch 	const char *propname;
247*93197a36SNathan Lynch 	const u32 *nr_sets;
248*93197a36SNathan Lynch 
249*93197a36SNathan Lynch 	propname = cache_type_info[cache->type].nr_sets_prop;
250*93197a36SNathan Lynch 
251*93197a36SNathan Lynch 	nr_sets = of_get_property(cache->ofnode, propname, NULL);
252*93197a36SNathan Lynch 	if (!nr_sets)
253*93197a36SNathan Lynch 		return -ENODEV;
254*93197a36SNathan Lynch 
255*93197a36SNathan Lynch 	*ret = *nr_sets;
256*93197a36SNathan Lynch 	return 0;
257*93197a36SNathan Lynch }
258*93197a36SNathan Lynch 
259*93197a36SNathan Lynch static int cache_associativity(const struct cache *cache, unsigned int *ret)
260*93197a36SNathan Lynch {
261*93197a36SNathan Lynch 	unsigned int line_size;
262*93197a36SNathan Lynch 	unsigned int nr_sets;
263*93197a36SNathan Lynch 	unsigned int size;
264*93197a36SNathan Lynch 
265*93197a36SNathan Lynch 	if (cache_nr_sets(cache, &nr_sets))
266*93197a36SNathan Lynch 		goto err;
267*93197a36SNathan Lynch 
268*93197a36SNathan Lynch 	/* If the cache is fully associative, there is no need to
269*93197a36SNathan Lynch 	 * check the other properties.
270*93197a36SNathan Lynch 	 */
271*93197a36SNathan Lynch 	if (nr_sets == 1) {
272*93197a36SNathan Lynch 		*ret = 0;
273*93197a36SNathan Lynch 		return 0;
274*93197a36SNathan Lynch 	}
275*93197a36SNathan Lynch 
276*93197a36SNathan Lynch 	if (cache_get_line_size(cache, &line_size))
277*93197a36SNathan Lynch 		goto err;
278*93197a36SNathan Lynch 	if (cache_size(cache, &size))
279*93197a36SNathan Lynch 		goto err;
280*93197a36SNathan Lynch 
281*93197a36SNathan Lynch 	if (!(nr_sets > 0 && size > 0 && line_size > 0))
282*93197a36SNathan Lynch 		goto err;
283*93197a36SNathan Lynch 
284*93197a36SNathan Lynch 	*ret = (size / nr_sets) / line_size;
285*93197a36SNathan Lynch 	return 0;
286*93197a36SNathan Lynch err:
287*93197a36SNathan Lynch 	return -ENODEV;
288*93197a36SNathan Lynch }
289*93197a36SNathan Lynch 
290*93197a36SNathan Lynch /* helper for dealing with split caches */
291*93197a36SNathan Lynch static struct cache *cache_find_first_sibling(struct cache *cache)
292*93197a36SNathan Lynch {
293*93197a36SNathan Lynch 	struct cache *iter;
294*93197a36SNathan Lynch 
295*93197a36SNathan Lynch 	if (cache->type == CACHE_TYPE_UNIFIED)
296*93197a36SNathan Lynch 		return cache;
297*93197a36SNathan Lynch 
298*93197a36SNathan Lynch 	list_for_each_entry(iter, &cache_list, list)
299*93197a36SNathan Lynch 		if (iter->ofnode == cache->ofnode && iter->next_local == cache)
300*93197a36SNathan Lynch 			return iter;
301*93197a36SNathan Lynch 
302*93197a36SNathan Lynch 	return cache;
303*93197a36SNathan Lynch }
304*93197a36SNathan Lynch 
305*93197a36SNathan Lynch /* return the first cache on a local list matching node */
306*93197a36SNathan Lynch static struct cache *cache_lookup_by_node(const struct device_node *node)
307*93197a36SNathan Lynch {
308*93197a36SNathan Lynch 	struct cache *cache = NULL;
309*93197a36SNathan Lynch 	struct cache *iter;
310*93197a36SNathan Lynch 
311*93197a36SNathan Lynch 	list_for_each_entry(iter, &cache_list, list) {
312*93197a36SNathan Lynch 		if (iter->ofnode != node)
313*93197a36SNathan Lynch 			continue;
314*93197a36SNathan Lynch 		cache = cache_find_first_sibling(iter);
315*93197a36SNathan Lynch 		break;
316*93197a36SNathan Lynch 	}
317*93197a36SNathan Lynch 
318*93197a36SNathan Lynch 	return cache;
319*93197a36SNathan Lynch }
320*93197a36SNathan Lynch 
321*93197a36SNathan Lynch static bool cache_node_is_unified(const struct device_node *np)
322*93197a36SNathan Lynch {
323*93197a36SNathan Lynch 	return of_get_property(np, "cache-unified", NULL);
324*93197a36SNathan Lynch }
325*93197a36SNathan Lynch 
326*93197a36SNathan Lynch static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
327*93197a36SNathan Lynch {
328*93197a36SNathan Lynch 	struct cache *cache;
329*93197a36SNathan Lynch 
330*93197a36SNathan Lynch 	pr_debug("creating L%d ucache for %s\n", level, node->full_name);
331*93197a36SNathan Lynch 
332*93197a36SNathan Lynch 	cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
333*93197a36SNathan Lynch 
334*93197a36SNathan Lynch 	return cache;
335*93197a36SNathan Lynch }
336*93197a36SNathan Lynch 
337*93197a36SNathan Lynch static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
338*93197a36SNathan Lynch {
339*93197a36SNathan Lynch 	struct cache *dcache, *icache;
340*93197a36SNathan Lynch 
341*93197a36SNathan Lynch 	pr_debug("creating L%d dcache and icache for %s\n", level,
342*93197a36SNathan Lynch 		 node->full_name);
343*93197a36SNathan Lynch 
344*93197a36SNathan Lynch 	dcache = new_cache(CACHE_TYPE_DATA, level, node);
345*93197a36SNathan Lynch 	icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
346*93197a36SNathan Lynch 
347*93197a36SNathan Lynch 	if (!dcache || !icache)
348*93197a36SNathan Lynch 		goto err;
349*93197a36SNathan Lynch 
350*93197a36SNathan Lynch 	dcache->next_local = icache;
351*93197a36SNathan Lynch 
352*93197a36SNathan Lynch 	return dcache;
353*93197a36SNathan Lynch err:
354*93197a36SNathan Lynch 	release_cache(dcache);
355*93197a36SNathan Lynch 	release_cache(icache);
356*93197a36SNathan Lynch 	return NULL;
357*93197a36SNathan Lynch }
358*93197a36SNathan Lynch 
359*93197a36SNathan Lynch static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level)
360*93197a36SNathan Lynch {
361*93197a36SNathan Lynch 	struct cache *cache;
362*93197a36SNathan Lynch 
363*93197a36SNathan Lynch 	if (cache_node_is_unified(node))
364*93197a36SNathan Lynch 		cache = cache_do_one_devnode_unified(node, level);
365*93197a36SNathan Lynch 	else
366*93197a36SNathan Lynch 		cache = cache_do_one_devnode_split(node, level);
367*93197a36SNathan Lynch 
368*93197a36SNathan Lynch 	return cache;
369*93197a36SNathan Lynch }
370*93197a36SNathan Lynch 
371*93197a36SNathan Lynch static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level)
372*93197a36SNathan Lynch {
373*93197a36SNathan Lynch 	struct cache *cache;
374*93197a36SNathan Lynch 
375*93197a36SNathan Lynch 	cache = cache_lookup_by_node(node);
376*93197a36SNathan Lynch 
377*93197a36SNathan Lynch 	WARN_ONCE(cache && cache->level != level,
378*93197a36SNathan Lynch 		  "cache level mismatch on lookup (got %d, expected %d)\n",
379*93197a36SNathan Lynch 		  cache->level, level);
380*93197a36SNathan Lynch 
381*93197a36SNathan Lynch 	if (!cache)
382*93197a36SNathan Lynch 		cache = cache_do_one_devnode(node, level);
383*93197a36SNathan Lynch 
384*93197a36SNathan Lynch 	return cache;
385*93197a36SNathan Lynch }
386*93197a36SNathan Lynch 
387*93197a36SNathan Lynch static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger)
388*93197a36SNathan Lynch {
389*93197a36SNathan Lynch 	while (smaller->next_local) {
390*93197a36SNathan Lynch 		if (smaller->next_local == bigger)
391*93197a36SNathan Lynch 			return; /* already linked */
392*93197a36SNathan Lynch 		smaller = smaller->next_local;
393*93197a36SNathan Lynch 	}
394*93197a36SNathan Lynch 
395*93197a36SNathan Lynch 	smaller->next_local = bigger;
396*93197a36SNathan Lynch }
397*93197a36SNathan Lynch 
398*93197a36SNathan Lynch static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache)
399*93197a36SNathan Lynch {
400*93197a36SNathan Lynch 	WARN_ON_ONCE(cache->level != 1);
401*93197a36SNathan Lynch 	WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
402*93197a36SNathan Lynch }
403*93197a36SNathan Lynch 
404*93197a36SNathan Lynch static void __cpuinit do_subsidiary_caches(struct cache *cache)
405*93197a36SNathan Lynch {
406*93197a36SNathan Lynch 	struct device_node *subcache_node;
407*93197a36SNathan Lynch 	int level = cache->level;
408*93197a36SNathan Lynch 
409*93197a36SNathan Lynch 	do_subsidiary_caches_debugcheck(cache);
410*93197a36SNathan Lynch 
411*93197a36SNathan Lynch 	while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
412*93197a36SNathan Lynch 		struct cache *subcache;
413*93197a36SNathan Lynch 
414*93197a36SNathan Lynch 		level++;
415*93197a36SNathan Lynch 		subcache = cache_lookup_or_instantiate(subcache_node, level);
416*93197a36SNathan Lynch 		of_node_put(subcache_node);
417*93197a36SNathan Lynch 		if (!subcache)
418*93197a36SNathan Lynch 			break;
419*93197a36SNathan Lynch 
420*93197a36SNathan Lynch 		link_cache_lists(cache, subcache);
421*93197a36SNathan Lynch 		cache = subcache;
422*93197a36SNathan Lynch 	}
423*93197a36SNathan Lynch }
424*93197a36SNathan Lynch 
425*93197a36SNathan Lynch static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id)
426*93197a36SNathan Lynch {
427*93197a36SNathan Lynch 	struct device_node *cpu_node;
428*93197a36SNathan Lynch 	struct cache *cpu_cache = NULL;
429*93197a36SNathan Lynch 
430*93197a36SNathan Lynch 	pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
431*93197a36SNathan Lynch 
432*93197a36SNathan Lynch 	cpu_node = of_get_cpu_node(cpu_id, NULL);
433*93197a36SNathan Lynch 	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
434*93197a36SNathan Lynch 	if (!cpu_node)
435*93197a36SNathan Lynch 		goto out;
436*93197a36SNathan Lynch 
437*93197a36SNathan Lynch 	cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
438*93197a36SNathan Lynch 	if (!cpu_cache)
439*93197a36SNathan Lynch 		goto out;
440*93197a36SNathan Lynch 
441*93197a36SNathan Lynch 	do_subsidiary_caches(cpu_cache);
442*93197a36SNathan Lynch 
443*93197a36SNathan Lynch 	cache_cpu_set(cpu_cache, cpu_id);
444*93197a36SNathan Lynch out:
445*93197a36SNathan Lynch 	of_node_put(cpu_node);
446*93197a36SNathan Lynch 
447*93197a36SNathan Lynch 	return cpu_cache;
448*93197a36SNathan Lynch }
449*93197a36SNathan Lynch 
450*93197a36SNathan Lynch static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
451*93197a36SNathan Lynch {
452*93197a36SNathan Lynch 	struct cache_dir *cache_dir;
453*93197a36SNathan Lynch 	struct sys_device *sysdev;
454*93197a36SNathan Lynch 	struct kobject *kobj = NULL;
455*93197a36SNathan Lynch 
456*93197a36SNathan Lynch 	sysdev = get_cpu_sysdev(cpu_id);
457*93197a36SNathan Lynch 	WARN_ONCE(!sysdev, "no sysdev for CPU %i\n", cpu_id);
458*93197a36SNathan Lynch 	if (!sysdev)
459*93197a36SNathan Lynch 		goto err;
460*93197a36SNathan Lynch 
461*93197a36SNathan Lynch 	kobj = kobject_create_and_add("cache", &sysdev->kobj);
462*93197a36SNathan Lynch 	if (!kobj)
463*93197a36SNathan Lynch 		goto err;
464*93197a36SNathan Lynch 
465*93197a36SNathan Lynch 	cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
466*93197a36SNathan Lynch 	if (!cache_dir)
467*93197a36SNathan Lynch 		goto err;
468*93197a36SNathan Lynch 
469*93197a36SNathan Lynch 	cache_dir->kobj = kobj;
470*93197a36SNathan Lynch 
471*93197a36SNathan Lynch 	WARN_ON_ONCE(per_cpu(cache_dir, cpu_id) != NULL);
472*93197a36SNathan Lynch 
473*93197a36SNathan Lynch 	per_cpu(cache_dir, cpu_id) = cache_dir;
474*93197a36SNathan Lynch 
475*93197a36SNathan Lynch 	return cache_dir;
476*93197a36SNathan Lynch err:
477*93197a36SNathan Lynch 	kobject_put(kobj);
478*93197a36SNathan Lynch 	return NULL;
479*93197a36SNathan Lynch }
480*93197a36SNathan Lynch 
481*93197a36SNathan Lynch static void cache_index_release(struct kobject *kobj)
482*93197a36SNathan Lynch {
483*93197a36SNathan Lynch 	struct cache_index_dir *index;
484*93197a36SNathan Lynch 
485*93197a36SNathan Lynch 	index = kobj_to_cache_index_dir(kobj);
486*93197a36SNathan Lynch 
487*93197a36SNathan Lynch 	pr_debug("freeing index directory for L%d %s cache\n",
488*93197a36SNathan Lynch 		 index->cache->level, cache_type_string(index->cache));
489*93197a36SNathan Lynch 
490*93197a36SNathan Lynch 	kfree(index);
491*93197a36SNathan Lynch }
492*93197a36SNathan Lynch 
493*93197a36SNathan Lynch static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
494*93197a36SNathan Lynch {
495*93197a36SNathan Lynch 	struct kobj_attribute *kobj_attr;
496*93197a36SNathan Lynch 
497*93197a36SNathan Lynch 	kobj_attr = container_of(attr, struct kobj_attribute, attr);
498*93197a36SNathan Lynch 
499*93197a36SNathan Lynch 	return kobj_attr->show(k, kobj_attr, buf);
500*93197a36SNathan Lynch }
501*93197a36SNathan Lynch 
502*93197a36SNathan Lynch static struct cache *index_kobj_to_cache(struct kobject *k)
503*93197a36SNathan Lynch {
504*93197a36SNathan Lynch 	struct cache_index_dir *index;
505*93197a36SNathan Lynch 
506*93197a36SNathan Lynch 	index = kobj_to_cache_index_dir(k);
507*93197a36SNathan Lynch 
508*93197a36SNathan Lynch 	return index->cache;
509*93197a36SNathan Lynch }
510*93197a36SNathan Lynch 
511*93197a36SNathan Lynch static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
512*93197a36SNathan Lynch {
513*93197a36SNathan Lynch 	unsigned int size_kb;
514*93197a36SNathan Lynch 	struct cache *cache;
515*93197a36SNathan Lynch 
516*93197a36SNathan Lynch 	cache = index_kobj_to_cache(k);
517*93197a36SNathan Lynch 
518*93197a36SNathan Lynch 	if (cache_size_kb(cache, &size_kb))
519*93197a36SNathan Lynch 		return -ENODEV;
520*93197a36SNathan Lynch 
521*93197a36SNathan Lynch 	return sprintf(buf, "%uK\n", size_kb);
522*93197a36SNathan Lynch }
523*93197a36SNathan Lynch 
524*93197a36SNathan Lynch static struct kobj_attribute cache_size_attr =
525*93197a36SNathan Lynch 	__ATTR(size, 0444, size_show, NULL);
526*93197a36SNathan Lynch 
527*93197a36SNathan Lynch 
528*93197a36SNathan Lynch static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
529*93197a36SNathan Lynch {
530*93197a36SNathan Lynch 	unsigned int line_size;
531*93197a36SNathan Lynch 	struct cache *cache;
532*93197a36SNathan Lynch 
533*93197a36SNathan Lynch 	cache = index_kobj_to_cache(k);
534*93197a36SNathan Lynch 
535*93197a36SNathan Lynch 	if (cache_get_line_size(cache, &line_size))
536*93197a36SNathan Lynch 		return -ENODEV;
537*93197a36SNathan Lynch 
538*93197a36SNathan Lynch 	return sprintf(buf, "%u\n", line_size);
539*93197a36SNathan Lynch }
540*93197a36SNathan Lynch 
541*93197a36SNathan Lynch static struct kobj_attribute cache_line_size_attr =
542*93197a36SNathan Lynch 	__ATTR(coherency_line_size, 0444, line_size_show, NULL);
543*93197a36SNathan Lynch 
544*93197a36SNathan Lynch static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
545*93197a36SNathan Lynch {
546*93197a36SNathan Lynch 	unsigned int nr_sets;
547*93197a36SNathan Lynch 	struct cache *cache;
548*93197a36SNathan Lynch 
549*93197a36SNathan Lynch 	cache = index_kobj_to_cache(k);
550*93197a36SNathan Lynch 
551*93197a36SNathan Lynch 	if (cache_nr_sets(cache, &nr_sets))
552*93197a36SNathan Lynch 		return -ENODEV;
553*93197a36SNathan Lynch 
554*93197a36SNathan Lynch 	return sprintf(buf, "%u\n", nr_sets);
555*93197a36SNathan Lynch }
556*93197a36SNathan Lynch 
557*93197a36SNathan Lynch static struct kobj_attribute cache_nr_sets_attr =
558*93197a36SNathan Lynch 	__ATTR(number_of_sets, 0444, nr_sets_show, NULL);
559*93197a36SNathan Lynch 
560*93197a36SNathan Lynch static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
561*93197a36SNathan Lynch {
562*93197a36SNathan Lynch 	unsigned int associativity;
563*93197a36SNathan Lynch 	struct cache *cache;
564*93197a36SNathan Lynch 
565*93197a36SNathan Lynch 	cache = index_kobj_to_cache(k);
566*93197a36SNathan Lynch 
567*93197a36SNathan Lynch 	if (cache_associativity(cache, &associativity))
568*93197a36SNathan Lynch 		return -ENODEV;
569*93197a36SNathan Lynch 
570*93197a36SNathan Lynch 	return sprintf(buf, "%u\n", associativity);
571*93197a36SNathan Lynch }
572*93197a36SNathan Lynch 
573*93197a36SNathan Lynch static struct kobj_attribute cache_assoc_attr =
574*93197a36SNathan Lynch 	__ATTR(ways_of_associativity, 0444, associativity_show, NULL);
575*93197a36SNathan Lynch 
576*93197a36SNathan Lynch static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
577*93197a36SNathan Lynch {
578*93197a36SNathan Lynch 	struct cache *cache;
579*93197a36SNathan Lynch 
580*93197a36SNathan Lynch 	cache = index_kobj_to_cache(k);
581*93197a36SNathan Lynch 
582*93197a36SNathan Lynch 	return sprintf(buf, "%s\n", cache_type_string(cache));
583*93197a36SNathan Lynch }
584*93197a36SNathan Lynch 
585*93197a36SNathan Lynch static struct kobj_attribute cache_type_attr =
586*93197a36SNathan Lynch 	__ATTR(type, 0444, type_show, NULL);
587*93197a36SNathan Lynch 
588*93197a36SNathan Lynch static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
589*93197a36SNathan Lynch {
590*93197a36SNathan Lynch 	struct cache_index_dir *index;
591*93197a36SNathan Lynch 	struct cache *cache;
592*93197a36SNathan Lynch 
593*93197a36SNathan Lynch 	index = kobj_to_cache_index_dir(k);
594*93197a36SNathan Lynch 	cache = index->cache;
595*93197a36SNathan Lynch 
596*93197a36SNathan Lynch 	return sprintf(buf, "%d\n", cache->level);
597*93197a36SNathan Lynch }
598*93197a36SNathan Lynch 
599*93197a36SNathan Lynch static struct kobj_attribute cache_level_attr =
600*93197a36SNathan Lynch 	__ATTR(level, 0444, level_show, NULL);
601*93197a36SNathan Lynch 
602*93197a36SNathan Lynch static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
603*93197a36SNathan Lynch {
604*93197a36SNathan Lynch 	struct cache_index_dir *index;
605*93197a36SNathan Lynch 	struct cache *cache;
606*93197a36SNathan Lynch 	int len;
607*93197a36SNathan Lynch 	int n = 0;
608*93197a36SNathan Lynch 
609*93197a36SNathan Lynch 	index = kobj_to_cache_index_dir(k);
610*93197a36SNathan Lynch 	cache = index->cache;
611*93197a36SNathan Lynch 	len = PAGE_SIZE - 2;
612*93197a36SNathan Lynch 
613*93197a36SNathan Lynch 	if (len > 1) {
614*93197a36SNathan Lynch 		n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
615*93197a36SNathan Lynch 		buf[n++] = '\n';
616*93197a36SNathan Lynch 		buf[n] = '\0';
617*93197a36SNathan Lynch 	}
618*93197a36SNathan Lynch 	return n;
619*93197a36SNathan Lynch }
620*93197a36SNathan Lynch 
621*93197a36SNathan Lynch static struct kobj_attribute cache_shared_cpu_map_attr =
622*93197a36SNathan Lynch 	__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
623*93197a36SNathan Lynch 
624*93197a36SNathan Lynch /* Attributes which should always be created -- the kobject/sysfs core
625*93197a36SNathan Lynch  * does this automatically via kobj_type->default_attrs.  This is the
626*93197a36SNathan Lynch  * minimum data required to uniquely identify a cache.
627*93197a36SNathan Lynch  */
628*93197a36SNathan Lynch static struct attribute *cache_index_default_attrs[] = {
629*93197a36SNathan Lynch 	&cache_type_attr.attr,
630*93197a36SNathan Lynch 	&cache_level_attr.attr,
631*93197a36SNathan Lynch 	&cache_shared_cpu_map_attr.attr,
632*93197a36SNathan Lynch 	NULL,
633*93197a36SNathan Lynch };
634*93197a36SNathan Lynch 
635*93197a36SNathan Lynch /* Attributes which should be created if the cache device node has the
636*93197a36SNathan Lynch  * right properties -- see cacheinfo_create_index_opt_attrs
637*93197a36SNathan Lynch  */
638*93197a36SNathan Lynch static struct kobj_attribute *cache_index_opt_attrs[] = {
639*93197a36SNathan Lynch 	&cache_size_attr,
640*93197a36SNathan Lynch 	&cache_line_size_attr,
641*93197a36SNathan Lynch 	&cache_nr_sets_attr,
642*93197a36SNathan Lynch 	&cache_assoc_attr,
643*93197a36SNathan Lynch };
644*93197a36SNathan Lynch 
645*93197a36SNathan Lynch static struct sysfs_ops cache_index_ops = {
646*93197a36SNathan Lynch 	.show = cache_index_show,
647*93197a36SNathan Lynch };
648*93197a36SNathan Lynch 
649*93197a36SNathan Lynch static struct kobj_type cache_index_type = {
650*93197a36SNathan Lynch 	.release = cache_index_release,
651*93197a36SNathan Lynch 	.sysfs_ops = &cache_index_ops,
652*93197a36SNathan Lynch 	.default_attrs = cache_index_default_attrs,
653*93197a36SNathan Lynch };
654*93197a36SNathan Lynch 
655*93197a36SNathan Lynch static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
656*93197a36SNathan Lynch {
657*93197a36SNathan Lynch 	const char *cache_name;
658*93197a36SNathan Lynch 	const char *cache_type;
659*93197a36SNathan Lynch 	struct cache *cache;
660*93197a36SNathan Lynch 	char *buf;
661*93197a36SNathan Lynch 	int i;
662*93197a36SNathan Lynch 
663*93197a36SNathan Lynch 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
664*93197a36SNathan Lynch 	if (!buf)
665*93197a36SNathan Lynch 		return;
666*93197a36SNathan Lynch 
667*93197a36SNathan Lynch 	cache = dir->cache;
668*93197a36SNathan Lynch 	cache_name = cache->ofnode->full_name;
669*93197a36SNathan Lynch 	cache_type = cache_type_string(cache);
670*93197a36SNathan Lynch 
671*93197a36SNathan Lynch 	/* We don't want to create an attribute that can't provide a
672*93197a36SNathan Lynch 	 * meaningful value.  Check the return value of each optional
673*93197a36SNathan Lynch 	 * attribute's ->show method before registering the
674*93197a36SNathan Lynch 	 * attribute.
675*93197a36SNathan Lynch 	 */
676*93197a36SNathan Lynch 	for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
677*93197a36SNathan Lynch 		struct kobj_attribute *attr;
678*93197a36SNathan Lynch 		ssize_t rc;
679*93197a36SNathan Lynch 
680*93197a36SNathan Lynch 		attr = cache_index_opt_attrs[i];
681*93197a36SNathan Lynch 
682*93197a36SNathan Lynch 		rc = attr->show(&dir->kobj, attr, buf);
683*93197a36SNathan Lynch 		if (rc <= 0) {
684*93197a36SNathan Lynch 			pr_debug("not creating %s attribute for "
685*93197a36SNathan Lynch 				 "%s(%s) (rc = %zd)\n",
686*93197a36SNathan Lynch 				 attr->attr.name, cache_name,
687*93197a36SNathan Lynch 				 cache_type, rc);
688*93197a36SNathan Lynch 			continue;
689*93197a36SNathan Lynch 		}
690*93197a36SNathan Lynch 		if (sysfs_create_file(&dir->kobj, &attr->attr))
691*93197a36SNathan Lynch 			pr_debug("could not create %s attribute for %s(%s)\n",
692*93197a36SNathan Lynch 				 attr->attr.name, cache_name, cache_type);
693*93197a36SNathan Lynch 	}
694*93197a36SNathan Lynch 
695*93197a36SNathan Lynch 	kfree(buf);
696*93197a36SNathan Lynch }
697*93197a36SNathan Lynch 
698*93197a36SNathan Lynch static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
699*93197a36SNathan Lynch {
700*93197a36SNathan Lynch 	struct cache_index_dir *index_dir;
701*93197a36SNathan Lynch 	int rc;
702*93197a36SNathan Lynch 
703*93197a36SNathan Lynch 	index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
704*93197a36SNathan Lynch 	if (!index_dir)
705*93197a36SNathan Lynch 		goto err;
706*93197a36SNathan Lynch 
707*93197a36SNathan Lynch 	index_dir->cache = cache;
708*93197a36SNathan Lynch 
709*93197a36SNathan Lynch 	rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
710*93197a36SNathan Lynch 				  cache_dir->kobj, "index%d", index);
711*93197a36SNathan Lynch 	if (rc)
712*93197a36SNathan Lynch 		goto err;
713*93197a36SNathan Lynch 
714*93197a36SNathan Lynch 	index_dir->next = cache_dir->index;
715*93197a36SNathan Lynch 	cache_dir->index = index_dir;
716*93197a36SNathan Lynch 
717*93197a36SNathan Lynch 	cacheinfo_create_index_opt_attrs(index_dir);
718*93197a36SNathan Lynch 
719*93197a36SNathan Lynch 	return;
720*93197a36SNathan Lynch err:
721*93197a36SNathan Lynch 	kfree(index_dir);
722*93197a36SNathan Lynch }
723*93197a36SNathan Lynch 
724*93197a36SNathan Lynch static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list)
725*93197a36SNathan Lynch {
726*93197a36SNathan Lynch 	struct cache_dir *cache_dir;
727*93197a36SNathan Lynch 	struct cache *cache;
728*93197a36SNathan Lynch 	int index = 0;
729*93197a36SNathan Lynch 
730*93197a36SNathan Lynch 	cache_dir = cacheinfo_create_cache_dir(cpu_id);
731*93197a36SNathan Lynch 	if (!cache_dir)
732*93197a36SNathan Lynch 		return;
733*93197a36SNathan Lynch 
734*93197a36SNathan Lynch 	cache = cache_list;
735*93197a36SNathan Lynch 	while (cache) {
736*93197a36SNathan Lynch 		cacheinfo_create_index_dir(cache, index, cache_dir);
737*93197a36SNathan Lynch 		index++;
738*93197a36SNathan Lynch 		cache = cache->next_local;
739*93197a36SNathan Lynch 	}
740*93197a36SNathan Lynch }
741*93197a36SNathan Lynch 
742*93197a36SNathan Lynch void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id)
743*93197a36SNathan Lynch {
744*93197a36SNathan Lynch 	struct cache *cache;
745*93197a36SNathan Lynch 
746*93197a36SNathan Lynch 	cache = cache_chain_instantiate(cpu_id);
747*93197a36SNathan Lynch 	if (!cache)
748*93197a36SNathan Lynch 		return;
749*93197a36SNathan Lynch 
750*93197a36SNathan Lynch 	cacheinfo_sysfs_populate(cpu_id, cache);
751*93197a36SNathan Lynch }
752*93197a36SNathan Lynch 
753*93197a36SNathan Lynch #ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */
754*93197a36SNathan Lynch 
755*93197a36SNathan Lynch static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
756*93197a36SNathan Lynch {
757*93197a36SNathan Lynch 	struct device_node *cpu_node;
758*93197a36SNathan Lynch 	struct cache *cache;
759*93197a36SNathan Lynch 
760*93197a36SNathan Lynch 	cpu_node = of_get_cpu_node(cpu_id, NULL);
761*93197a36SNathan Lynch 	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
762*93197a36SNathan Lynch 	if (!cpu_node)
763*93197a36SNathan Lynch 		return NULL;
764*93197a36SNathan Lynch 
765*93197a36SNathan Lynch 	cache = cache_lookup_by_node(cpu_node);
766*93197a36SNathan Lynch 	of_node_put(cpu_node);
767*93197a36SNathan Lynch 
768*93197a36SNathan Lynch 	return cache;
769*93197a36SNathan Lynch }
770*93197a36SNathan Lynch 
771*93197a36SNathan Lynch static void remove_index_dirs(struct cache_dir *cache_dir)
772*93197a36SNathan Lynch {
773*93197a36SNathan Lynch 	struct cache_index_dir *index;
774*93197a36SNathan Lynch 
775*93197a36SNathan Lynch 	index = cache_dir->index;
776*93197a36SNathan Lynch 
777*93197a36SNathan Lynch 	while (index) {
778*93197a36SNathan Lynch 		struct cache_index_dir *next;
779*93197a36SNathan Lynch 
780*93197a36SNathan Lynch 		next = index->next;
781*93197a36SNathan Lynch 		kobject_put(&index->kobj);
782*93197a36SNathan Lynch 		index = next;
783*93197a36SNathan Lynch 	}
784*93197a36SNathan Lynch }
785*93197a36SNathan Lynch 
786*93197a36SNathan Lynch static void remove_cache_dir(struct cache_dir *cache_dir)
787*93197a36SNathan Lynch {
788*93197a36SNathan Lynch 	remove_index_dirs(cache_dir);
789*93197a36SNathan Lynch 
790*93197a36SNathan Lynch 	kobject_put(cache_dir->kobj);
791*93197a36SNathan Lynch 
792*93197a36SNathan Lynch 	kfree(cache_dir);
793*93197a36SNathan Lynch }
794*93197a36SNathan Lynch 
795*93197a36SNathan Lynch static void cache_cpu_clear(struct cache *cache, int cpu)
796*93197a36SNathan Lynch {
797*93197a36SNathan Lynch 	while (cache) {
798*93197a36SNathan Lynch 		struct cache *next = cache->next_local;
799*93197a36SNathan Lynch 
800*93197a36SNathan Lynch 		WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
801*93197a36SNathan Lynch 			  "CPU %i not accounted in %s(%s)\n",
802*93197a36SNathan Lynch 			  cpu, cache->ofnode->full_name,
803*93197a36SNathan Lynch 			  cache_type_string(cache));
804*93197a36SNathan Lynch 
805*93197a36SNathan Lynch 		cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
806*93197a36SNathan Lynch 
807*93197a36SNathan Lynch 		/* Release the cache object if all the cpus using it
808*93197a36SNathan Lynch 		 * are offline */
809*93197a36SNathan Lynch 		if (cpumask_empty(&cache->shared_cpu_map))
810*93197a36SNathan Lynch 			release_cache(cache);
811*93197a36SNathan Lynch 
812*93197a36SNathan Lynch 		cache = next;
813*93197a36SNathan Lynch 	}
814*93197a36SNathan Lynch }
815*93197a36SNathan Lynch 
816*93197a36SNathan Lynch void cacheinfo_cpu_offline(unsigned int cpu_id)
817*93197a36SNathan Lynch {
818*93197a36SNathan Lynch 	struct cache_dir *cache_dir;
819*93197a36SNathan Lynch 	struct cache *cache;
820*93197a36SNathan Lynch 
821*93197a36SNathan Lynch 	/* Prevent userspace from seeing inconsistent state - remove
822*93197a36SNathan Lynch 	 * the sysfs hierarchy first */
823*93197a36SNathan Lynch 	cache_dir = per_cpu(cache_dir, cpu_id);
824*93197a36SNathan Lynch 
825*93197a36SNathan Lynch 	/* careful, sysfs population may have failed */
826*93197a36SNathan Lynch 	if (cache_dir)
827*93197a36SNathan Lynch 		remove_cache_dir(cache_dir);
828*93197a36SNathan Lynch 
829*93197a36SNathan Lynch 	per_cpu(cache_dir, cpu_id) = NULL;
830*93197a36SNathan Lynch 
831*93197a36SNathan Lynch 	/* clear the CPU's bit in its cache chain, possibly freeing
832*93197a36SNathan Lynch 	 * cache objects */
833*93197a36SNathan Lynch 	cache = cache_lookup_by_cpu(cpu_id);
834*93197a36SNathan Lynch 	if (cache)
835*93197a36SNathan Lynch 		cache_cpu_clear(cache, cpu_id);
836*93197a36SNathan Lynch }
837*93197a36SNathan Lynch #endif /* CONFIG_HOTPLUG_CPU */
838