193197a36SNathan Lynch /* 293197a36SNathan Lynch * Processor cache information made available to userspace via sysfs; 393197a36SNathan Lynch * intended to be compatible with x86 intel_cacheinfo implementation. 493197a36SNathan Lynch * 593197a36SNathan Lynch * Copyright 2008 IBM Corporation 693197a36SNathan Lynch * Author: Nathan Lynch 793197a36SNathan Lynch * 893197a36SNathan Lynch * This program is free software; you can redistribute it and/or 993197a36SNathan Lynch * modify it under the terms of the GNU General Public License version 1093197a36SNathan Lynch * 2 as published by the Free Software Foundation. 1193197a36SNathan Lynch */ 1293197a36SNathan Lynch 1393197a36SNathan Lynch #include <linux/cpu.h> 1493197a36SNathan Lynch #include <linux/cpumask.h> 1593197a36SNathan Lynch #include <linux/kernel.h> 1693197a36SNathan Lynch #include <linux/kobject.h> 1793197a36SNathan Lynch #include <linux/list.h> 1893197a36SNathan Lynch #include <linux/notifier.h> 1993197a36SNathan Lynch #include <linux/of.h> 2093197a36SNathan Lynch #include <linux/percpu.h> 215a0e3ad6STejun Heo #include <linux/slab.h> 2293197a36SNathan Lynch #include <asm/prom.h> 23*500fe5f5SGautham R. Shenoy #include <asm/cputhreads.h> 24*500fe5f5SGautham R. Shenoy #include <asm/smp.h> 2593197a36SNathan Lynch 2693197a36SNathan Lynch #include "cacheinfo.h" 2793197a36SNathan Lynch 2893197a36SNathan Lynch /* per-cpu object for tracking: 2993197a36SNathan Lynch * - a "cache" kobject for the top-level directory 3093197a36SNathan Lynch * - a list of "index" objects representing the cpu's local cache hierarchy 3193197a36SNathan Lynch */ 3293197a36SNathan Lynch struct cache_dir { 3393197a36SNathan Lynch struct kobject *kobj; /* bare (not embedded) kobject for cache 3493197a36SNathan Lynch * directory */ 3593197a36SNathan Lynch struct cache_index_dir *index; /* list of index objects */ 3693197a36SNathan Lynch }; 3793197a36SNathan Lynch 3893197a36SNathan Lynch /* "index" object: each cpu's cache directory has an index 3993197a36SNathan Lynch * subdirectory corresponding to a cache object associated with the 4093197a36SNathan Lynch * cpu. This object's lifetime is managed via the embedded kobject. 4193197a36SNathan Lynch */ 4293197a36SNathan Lynch struct cache_index_dir { 4393197a36SNathan Lynch struct kobject kobj; 4493197a36SNathan Lynch struct cache_index_dir *next; /* next index in parent directory */ 4593197a36SNathan Lynch struct cache *cache; 4693197a36SNathan Lynch }; 4793197a36SNathan Lynch 4893197a36SNathan Lynch /* Template for determining which OF properties to query for a given 4993197a36SNathan Lynch * cache type */ 5093197a36SNathan Lynch struct cache_type_info { 5193197a36SNathan Lynch const char *name; 5293197a36SNathan Lynch const char *size_prop; 5393197a36SNathan Lynch 5493197a36SNathan Lynch /* Allow for both [di]-cache-line-size and 5593197a36SNathan Lynch * [di]-cache-block-size properties. According to the PowerPC 5693197a36SNathan Lynch * Processor binding, -line-size should be provided if it 5793197a36SNathan Lynch * differs from the cache block size (that which is operated 5893197a36SNathan Lynch * on by cache instructions), so we look for -line-size first. 5993197a36SNathan Lynch * See cache_get_line_size(). */ 6093197a36SNathan Lynch 6193197a36SNathan Lynch const char *line_size_props[2]; 6293197a36SNathan Lynch const char *nr_sets_prop; 6393197a36SNathan Lynch }; 6493197a36SNathan Lynch 6593197a36SNathan Lynch /* These are used to index the cache_type_info array. */ 66f7e9e358SDave Olson #define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */ 67f7e9e358SDave Olson #define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */ 68f7e9e358SDave Olson #define CACHE_TYPE_INSTRUCTION 2 69f7e9e358SDave Olson #define CACHE_TYPE_DATA 3 7093197a36SNathan Lynch 7193197a36SNathan Lynch static const struct cache_type_info cache_type_info[] = { 7293197a36SNathan Lynch { 73f7e9e358SDave Olson /* Embedded systems that use cache-size, cache-block-size, 74f7e9e358SDave Olson * etc. for the Unified (typically L2) cache. */ 75f7e9e358SDave Olson .name = "Unified", 76f7e9e358SDave Olson .size_prop = "cache-size", 77f7e9e358SDave Olson .line_size_props = { "cache-line-size", 78f7e9e358SDave Olson "cache-block-size", }, 79f7e9e358SDave Olson .nr_sets_prop = "cache-sets", 80f7e9e358SDave Olson }, 81f7e9e358SDave Olson { 8293197a36SNathan Lynch /* PowerPC Processor binding says the [di]-cache-* 8393197a36SNathan Lynch * must be equal on unified caches, so just use 8493197a36SNathan Lynch * d-cache properties. */ 8593197a36SNathan Lynch .name = "Unified", 8693197a36SNathan Lynch .size_prop = "d-cache-size", 8793197a36SNathan Lynch .line_size_props = { "d-cache-line-size", 8893197a36SNathan Lynch "d-cache-block-size", }, 8993197a36SNathan Lynch .nr_sets_prop = "d-cache-sets", 9093197a36SNathan Lynch }, 9193197a36SNathan Lynch { 9293197a36SNathan Lynch .name = "Instruction", 9393197a36SNathan Lynch .size_prop = "i-cache-size", 9493197a36SNathan Lynch .line_size_props = { "i-cache-line-size", 9593197a36SNathan Lynch "i-cache-block-size", }, 9693197a36SNathan Lynch .nr_sets_prop = "i-cache-sets", 9793197a36SNathan Lynch }, 9893197a36SNathan Lynch { 9993197a36SNathan Lynch .name = "Data", 10093197a36SNathan Lynch .size_prop = "d-cache-size", 10193197a36SNathan Lynch .line_size_props = { "d-cache-line-size", 10293197a36SNathan Lynch "d-cache-block-size", }, 10393197a36SNathan Lynch .nr_sets_prop = "d-cache-sets", 10493197a36SNathan Lynch }, 10593197a36SNathan Lynch }; 10693197a36SNathan Lynch 10793197a36SNathan Lynch /* Cache object: each instance of this corresponds to a distinct cache 10893197a36SNathan Lynch * in the system. There are separate objects for Harvard caches: one 10993197a36SNathan Lynch * each for instruction and data, and each refers to the same OF node. 11093197a36SNathan Lynch * The refcount of the OF node is elevated for the lifetime of the 11193197a36SNathan Lynch * cache object. A cache object is released when its shared_cpu_map 11293197a36SNathan Lynch * is cleared (see cache_cpu_clear). 11393197a36SNathan Lynch * 11493197a36SNathan Lynch * A cache object is on two lists: an unsorted global list 11593197a36SNathan Lynch * (cache_list) of cache objects; and a singly-linked list 11693197a36SNathan Lynch * representing the local cache hierarchy, which is ordered by level 11793197a36SNathan Lynch * (e.g. L1d -> L1i -> L2 -> L3). 11893197a36SNathan Lynch */ 11993197a36SNathan Lynch struct cache { 12093197a36SNathan Lynch struct device_node *ofnode; /* OF node for this cache, may be cpu */ 12193197a36SNathan Lynch struct cpumask shared_cpu_map; /* online CPUs using this cache */ 12293197a36SNathan Lynch int type; /* split cache disambiguation */ 12393197a36SNathan Lynch int level; /* level not explicit in device tree */ 12493197a36SNathan Lynch struct list_head list; /* global list of cache objects */ 12593197a36SNathan Lynch struct cache *next_local; /* next cache of >= level */ 12693197a36SNathan Lynch }; 12793197a36SNathan Lynch 128fc7a9febSNathan Lynch static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu); 12993197a36SNathan Lynch 13093197a36SNathan Lynch /* traversal/modification of this list occurs only at cpu hotplug time; 13193197a36SNathan Lynch * access is serialized by cpu hotplug locking 13293197a36SNathan Lynch */ 13393197a36SNathan Lynch static LIST_HEAD(cache_list); 13493197a36SNathan Lynch 13593197a36SNathan Lynch static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k) 13693197a36SNathan Lynch { 13793197a36SNathan Lynch return container_of(k, struct cache_index_dir, kobj); 13893197a36SNathan Lynch } 13993197a36SNathan Lynch 14093197a36SNathan Lynch static const char *cache_type_string(const struct cache *cache) 14193197a36SNathan Lynch { 14293197a36SNathan Lynch return cache_type_info[cache->type].name; 14393197a36SNathan Lynch } 14493197a36SNathan Lynch 145061d19f2SPaul Gortmaker static void cache_init(struct cache *cache, int type, int level, 146061d19f2SPaul Gortmaker struct device_node *ofnode) 14793197a36SNathan Lynch { 14893197a36SNathan Lynch cache->type = type; 14993197a36SNathan Lynch cache->level = level; 15093197a36SNathan Lynch cache->ofnode = of_node_get(ofnode); 15193197a36SNathan Lynch INIT_LIST_HEAD(&cache->list); 15293197a36SNathan Lynch list_add(&cache->list, &cache_list); 15393197a36SNathan Lynch } 15493197a36SNathan Lynch 155061d19f2SPaul Gortmaker static struct cache *new_cache(int type, int level, struct device_node *ofnode) 15693197a36SNathan Lynch { 15793197a36SNathan Lynch struct cache *cache; 15893197a36SNathan Lynch 15993197a36SNathan Lynch cache = kzalloc(sizeof(*cache), GFP_KERNEL); 16093197a36SNathan Lynch if (cache) 16193197a36SNathan Lynch cache_init(cache, type, level, ofnode); 16293197a36SNathan Lynch 16393197a36SNathan Lynch return cache; 16493197a36SNathan Lynch } 16593197a36SNathan Lynch 16693197a36SNathan Lynch static void release_cache_debugcheck(struct cache *cache) 16793197a36SNathan Lynch { 16893197a36SNathan Lynch struct cache *iter; 16993197a36SNathan Lynch 17093197a36SNathan Lynch list_for_each_entry(iter, &cache_list, list) 17193197a36SNathan Lynch WARN_ONCE(iter->next_local == cache, 172b7c670d6SRob Herring "cache for %pOF(%s) refers to cache for %pOF(%s)\n", 173b7c670d6SRob Herring iter->ofnode, 17493197a36SNathan Lynch cache_type_string(iter), 175b7c670d6SRob Herring cache->ofnode, 17693197a36SNathan Lynch cache_type_string(cache)); 17793197a36SNathan Lynch } 17893197a36SNathan Lynch 17993197a36SNathan Lynch static void release_cache(struct cache *cache) 18093197a36SNathan Lynch { 18193197a36SNathan Lynch if (!cache) 18293197a36SNathan Lynch return; 18393197a36SNathan Lynch 184b7c670d6SRob Herring pr_debug("freeing L%d %s cache for %pOF\n", cache->level, 185b7c670d6SRob Herring cache_type_string(cache), cache->ofnode); 18693197a36SNathan Lynch 18793197a36SNathan Lynch release_cache_debugcheck(cache); 18893197a36SNathan Lynch list_del(&cache->list); 18993197a36SNathan Lynch of_node_put(cache->ofnode); 19093197a36SNathan Lynch kfree(cache); 19193197a36SNathan Lynch } 19293197a36SNathan Lynch 19393197a36SNathan Lynch static void cache_cpu_set(struct cache *cache, int cpu) 19493197a36SNathan Lynch { 19593197a36SNathan Lynch struct cache *next = cache; 19693197a36SNathan Lynch 19793197a36SNathan Lynch while (next) { 19893197a36SNathan Lynch WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map), 199b7c670d6SRob Herring "CPU %i already accounted in %pOF(%s)\n", 200b7c670d6SRob Herring cpu, next->ofnode, 20193197a36SNathan Lynch cache_type_string(next)); 20293197a36SNathan Lynch cpumask_set_cpu(cpu, &next->shared_cpu_map); 20393197a36SNathan Lynch next = next->next_local; 20493197a36SNathan Lynch } 20593197a36SNathan Lynch } 20693197a36SNathan Lynch 20793197a36SNathan Lynch static int cache_size(const struct cache *cache, unsigned int *ret) 20893197a36SNathan Lynch { 20993197a36SNathan Lynch const char *propname; 210d10bd84fSAnton Blanchard const __be32 *cache_size; 21193197a36SNathan Lynch 21293197a36SNathan Lynch propname = cache_type_info[cache->type].size_prop; 21393197a36SNathan Lynch 21493197a36SNathan Lynch cache_size = of_get_property(cache->ofnode, propname, NULL); 21593197a36SNathan Lynch if (!cache_size) 21693197a36SNathan Lynch return -ENODEV; 21793197a36SNathan Lynch 218d10bd84fSAnton Blanchard *ret = of_read_number(cache_size, 1); 21993197a36SNathan Lynch return 0; 22093197a36SNathan Lynch } 22193197a36SNathan Lynch 22293197a36SNathan Lynch static int cache_size_kb(const struct cache *cache, unsigned int *ret) 22393197a36SNathan Lynch { 22493197a36SNathan Lynch unsigned int size; 22593197a36SNathan Lynch 22693197a36SNathan Lynch if (cache_size(cache, &size)) 22793197a36SNathan Lynch return -ENODEV; 22893197a36SNathan Lynch 22993197a36SNathan Lynch *ret = size / 1024; 23093197a36SNathan Lynch return 0; 23193197a36SNathan Lynch } 23293197a36SNathan Lynch 23393197a36SNathan Lynch /* not cache_line_size() because that's a macro in include/linux/cache.h */ 23493197a36SNathan Lynch static int cache_get_line_size(const struct cache *cache, unsigned int *ret) 23593197a36SNathan Lynch { 236d10bd84fSAnton Blanchard const __be32 *line_size; 23793197a36SNathan Lynch int i, lim; 23893197a36SNathan Lynch 23993197a36SNathan Lynch lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props); 24093197a36SNathan Lynch 24193197a36SNathan Lynch for (i = 0; i < lim; i++) { 24293197a36SNathan Lynch const char *propname; 24393197a36SNathan Lynch 24493197a36SNathan Lynch propname = cache_type_info[cache->type].line_size_props[i]; 24593197a36SNathan Lynch line_size = of_get_property(cache->ofnode, propname, NULL); 24693197a36SNathan Lynch if (line_size) 24793197a36SNathan Lynch break; 24893197a36SNathan Lynch } 24993197a36SNathan Lynch 25093197a36SNathan Lynch if (!line_size) 25193197a36SNathan Lynch return -ENODEV; 25293197a36SNathan Lynch 253d10bd84fSAnton Blanchard *ret = of_read_number(line_size, 1); 25493197a36SNathan Lynch return 0; 25593197a36SNathan Lynch } 25693197a36SNathan Lynch 25793197a36SNathan Lynch static int cache_nr_sets(const struct cache *cache, unsigned int *ret) 25893197a36SNathan Lynch { 25993197a36SNathan Lynch const char *propname; 260d10bd84fSAnton Blanchard const __be32 *nr_sets; 26193197a36SNathan Lynch 26293197a36SNathan Lynch propname = cache_type_info[cache->type].nr_sets_prop; 26393197a36SNathan Lynch 26493197a36SNathan Lynch nr_sets = of_get_property(cache->ofnode, propname, NULL); 26593197a36SNathan Lynch if (!nr_sets) 26693197a36SNathan Lynch return -ENODEV; 26793197a36SNathan Lynch 268d10bd84fSAnton Blanchard *ret = of_read_number(nr_sets, 1); 26993197a36SNathan Lynch return 0; 27093197a36SNathan Lynch } 27193197a36SNathan Lynch 27293197a36SNathan Lynch static int cache_associativity(const struct cache *cache, unsigned int *ret) 27393197a36SNathan Lynch { 27493197a36SNathan Lynch unsigned int line_size; 27593197a36SNathan Lynch unsigned int nr_sets; 27693197a36SNathan Lynch unsigned int size; 27793197a36SNathan Lynch 27893197a36SNathan Lynch if (cache_nr_sets(cache, &nr_sets)) 27993197a36SNathan Lynch goto err; 28093197a36SNathan Lynch 28193197a36SNathan Lynch /* If the cache is fully associative, there is no need to 28293197a36SNathan Lynch * check the other properties. 28393197a36SNathan Lynch */ 28493197a36SNathan Lynch if (nr_sets == 1) { 28593197a36SNathan Lynch *ret = 0; 28693197a36SNathan Lynch return 0; 28793197a36SNathan Lynch } 28893197a36SNathan Lynch 28993197a36SNathan Lynch if (cache_get_line_size(cache, &line_size)) 29093197a36SNathan Lynch goto err; 29193197a36SNathan Lynch if (cache_size(cache, &size)) 29293197a36SNathan Lynch goto err; 29393197a36SNathan Lynch 29493197a36SNathan Lynch if (!(nr_sets > 0 && size > 0 && line_size > 0)) 29593197a36SNathan Lynch goto err; 29693197a36SNathan Lynch 29793197a36SNathan Lynch *ret = (size / nr_sets) / line_size; 29893197a36SNathan Lynch return 0; 29993197a36SNathan Lynch err: 30093197a36SNathan Lynch return -ENODEV; 30193197a36SNathan Lynch } 30293197a36SNathan Lynch 30393197a36SNathan Lynch /* helper for dealing with split caches */ 30493197a36SNathan Lynch static struct cache *cache_find_first_sibling(struct cache *cache) 30593197a36SNathan Lynch { 30693197a36SNathan Lynch struct cache *iter; 30793197a36SNathan Lynch 308f7e9e358SDave Olson if (cache->type == CACHE_TYPE_UNIFIED || 309f7e9e358SDave Olson cache->type == CACHE_TYPE_UNIFIED_D) 31093197a36SNathan Lynch return cache; 31193197a36SNathan Lynch 31293197a36SNathan Lynch list_for_each_entry(iter, &cache_list, list) 31393197a36SNathan Lynch if (iter->ofnode == cache->ofnode && iter->next_local == cache) 31493197a36SNathan Lynch return iter; 31593197a36SNathan Lynch 31693197a36SNathan Lynch return cache; 31793197a36SNathan Lynch } 31893197a36SNathan Lynch 31993197a36SNathan Lynch /* return the first cache on a local list matching node */ 32093197a36SNathan Lynch static struct cache *cache_lookup_by_node(const struct device_node *node) 32193197a36SNathan Lynch { 32293197a36SNathan Lynch struct cache *cache = NULL; 32393197a36SNathan Lynch struct cache *iter; 32493197a36SNathan Lynch 32593197a36SNathan Lynch list_for_each_entry(iter, &cache_list, list) { 32693197a36SNathan Lynch if (iter->ofnode != node) 32793197a36SNathan Lynch continue; 32893197a36SNathan Lynch cache = cache_find_first_sibling(iter); 32993197a36SNathan Lynch break; 33093197a36SNathan Lynch } 33193197a36SNathan Lynch 33293197a36SNathan Lynch return cache; 33393197a36SNathan Lynch } 33493197a36SNathan Lynch 33593197a36SNathan Lynch static bool cache_node_is_unified(const struct device_node *np) 33693197a36SNathan Lynch { 33793197a36SNathan Lynch return of_get_property(np, "cache-unified", NULL); 33893197a36SNathan Lynch } 33993197a36SNathan Lynch 340f7e9e358SDave Olson /* 341f7e9e358SDave Olson * Unified caches can have two different sets of tags. Most embedded 342f7e9e358SDave Olson * use cache-size, etc. for the unified cache size, but open firmware systems 343f7e9e358SDave Olson * use d-cache-size, etc. Check on initialization for which type we have, and 344f7e9e358SDave Olson * return the appropriate structure type. Assume it's embedded if it isn't 345f7e9e358SDave Olson * open firmware. If it's yet a 3rd type, then there will be missing entries 346f7e9e358SDave Olson * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need 347f7e9e358SDave Olson * to be extended further. 348f7e9e358SDave Olson */ 349f7e9e358SDave Olson static int cache_is_unified_d(const struct device_node *np) 35093197a36SNathan Lynch { 351f7e9e358SDave Olson return of_get_property(np, 352f7e9e358SDave Olson cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ? 353f7e9e358SDave Olson CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED; 354f7e9e358SDave Olson } 35593197a36SNathan Lynch 356f7e9e358SDave Olson /* 357f7e9e358SDave Olson */ 358f7e9e358SDave Olson static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level) 359f7e9e358SDave Olson { 360b7c670d6SRob Herring pr_debug("creating L%d ucache for %pOF\n", level, node); 36193197a36SNathan Lynch 362f7e9e358SDave Olson return new_cache(cache_is_unified_d(node), level, node); 36393197a36SNathan Lynch } 36493197a36SNathan Lynch 365061d19f2SPaul Gortmaker static struct cache *cache_do_one_devnode_split(struct device_node *node, 366061d19f2SPaul Gortmaker int level) 36793197a36SNathan Lynch { 36893197a36SNathan Lynch struct cache *dcache, *icache; 36993197a36SNathan Lynch 370b7c670d6SRob Herring pr_debug("creating L%d dcache and icache for %pOF\n", level, 371b7c670d6SRob Herring node); 37293197a36SNathan Lynch 37393197a36SNathan Lynch dcache = new_cache(CACHE_TYPE_DATA, level, node); 37493197a36SNathan Lynch icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node); 37593197a36SNathan Lynch 37693197a36SNathan Lynch if (!dcache || !icache) 37793197a36SNathan Lynch goto err; 37893197a36SNathan Lynch 37993197a36SNathan Lynch dcache->next_local = icache; 38093197a36SNathan Lynch 38193197a36SNathan Lynch return dcache; 38293197a36SNathan Lynch err: 38393197a36SNathan Lynch release_cache(dcache); 38493197a36SNathan Lynch release_cache(icache); 38593197a36SNathan Lynch return NULL; 38693197a36SNathan Lynch } 38793197a36SNathan Lynch 388061d19f2SPaul Gortmaker static struct cache *cache_do_one_devnode(struct device_node *node, int level) 38993197a36SNathan Lynch { 39093197a36SNathan Lynch struct cache *cache; 39193197a36SNathan Lynch 39293197a36SNathan Lynch if (cache_node_is_unified(node)) 39393197a36SNathan Lynch cache = cache_do_one_devnode_unified(node, level); 39493197a36SNathan Lynch else 39593197a36SNathan Lynch cache = cache_do_one_devnode_split(node, level); 39693197a36SNathan Lynch 39793197a36SNathan Lynch return cache; 39893197a36SNathan Lynch } 39993197a36SNathan Lynch 400061d19f2SPaul Gortmaker static struct cache *cache_lookup_or_instantiate(struct device_node *node, 401061d19f2SPaul Gortmaker int level) 40293197a36SNathan Lynch { 40393197a36SNathan Lynch struct cache *cache; 40493197a36SNathan Lynch 40593197a36SNathan Lynch cache = cache_lookup_by_node(node); 40693197a36SNathan Lynch 40793197a36SNathan Lynch WARN_ONCE(cache && cache->level != level, 40893197a36SNathan Lynch "cache level mismatch on lookup (got %d, expected %d)\n", 40993197a36SNathan Lynch cache->level, level); 41093197a36SNathan Lynch 41193197a36SNathan Lynch if (!cache) 41293197a36SNathan Lynch cache = cache_do_one_devnode(node, level); 41393197a36SNathan Lynch 41493197a36SNathan Lynch return cache; 41593197a36SNathan Lynch } 41693197a36SNathan Lynch 417061d19f2SPaul Gortmaker static void link_cache_lists(struct cache *smaller, struct cache *bigger) 41893197a36SNathan Lynch { 41993197a36SNathan Lynch while (smaller->next_local) { 42093197a36SNathan Lynch if (smaller->next_local == bigger) 42193197a36SNathan Lynch return; /* already linked */ 42293197a36SNathan Lynch smaller = smaller->next_local; 42393197a36SNathan Lynch } 42493197a36SNathan Lynch 42593197a36SNathan Lynch smaller->next_local = bigger; 42693197a36SNathan Lynch } 42793197a36SNathan Lynch 428061d19f2SPaul Gortmaker static void do_subsidiary_caches_debugcheck(struct cache *cache) 42993197a36SNathan Lynch { 43093197a36SNathan Lynch WARN_ON_ONCE(cache->level != 1); 43193197a36SNathan Lynch WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu")); 43293197a36SNathan Lynch } 43393197a36SNathan Lynch 434061d19f2SPaul Gortmaker static void do_subsidiary_caches(struct cache *cache) 43593197a36SNathan Lynch { 43693197a36SNathan Lynch struct device_node *subcache_node; 43793197a36SNathan Lynch int level = cache->level; 43893197a36SNathan Lynch 43993197a36SNathan Lynch do_subsidiary_caches_debugcheck(cache); 44093197a36SNathan Lynch 44193197a36SNathan Lynch while ((subcache_node = of_find_next_cache_node(cache->ofnode))) { 44293197a36SNathan Lynch struct cache *subcache; 44393197a36SNathan Lynch 44493197a36SNathan Lynch level++; 44593197a36SNathan Lynch subcache = cache_lookup_or_instantiate(subcache_node, level); 44693197a36SNathan Lynch of_node_put(subcache_node); 44793197a36SNathan Lynch if (!subcache) 44893197a36SNathan Lynch break; 44993197a36SNathan Lynch 45093197a36SNathan Lynch link_cache_lists(cache, subcache); 45193197a36SNathan Lynch cache = subcache; 45293197a36SNathan Lynch } 45393197a36SNathan Lynch } 45493197a36SNathan Lynch 455061d19f2SPaul Gortmaker static struct cache *cache_chain_instantiate(unsigned int cpu_id) 45693197a36SNathan Lynch { 45793197a36SNathan Lynch struct device_node *cpu_node; 45893197a36SNathan Lynch struct cache *cpu_cache = NULL; 45993197a36SNathan Lynch 46093197a36SNathan Lynch pr_debug("creating cache object(s) for CPU %i\n", cpu_id); 46193197a36SNathan Lynch 46293197a36SNathan Lynch cpu_node = of_get_cpu_node(cpu_id, NULL); 46393197a36SNathan Lynch WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); 46493197a36SNathan Lynch if (!cpu_node) 46593197a36SNathan Lynch goto out; 46693197a36SNathan Lynch 46793197a36SNathan Lynch cpu_cache = cache_lookup_or_instantiate(cpu_node, 1); 46893197a36SNathan Lynch if (!cpu_cache) 46993197a36SNathan Lynch goto out; 47093197a36SNathan Lynch 47193197a36SNathan Lynch do_subsidiary_caches(cpu_cache); 47293197a36SNathan Lynch 47393197a36SNathan Lynch cache_cpu_set(cpu_cache, cpu_id); 47493197a36SNathan Lynch out: 47593197a36SNathan Lynch of_node_put(cpu_node); 47693197a36SNathan Lynch 47793197a36SNathan Lynch return cpu_cache; 47893197a36SNathan Lynch } 47993197a36SNathan Lynch 480061d19f2SPaul Gortmaker static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id) 48193197a36SNathan Lynch { 48293197a36SNathan Lynch struct cache_dir *cache_dir; 4838a25a2fdSKay Sievers struct device *dev; 48493197a36SNathan Lynch struct kobject *kobj = NULL; 48593197a36SNathan Lynch 4868a25a2fdSKay Sievers dev = get_cpu_device(cpu_id); 4878a25a2fdSKay Sievers WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id); 4888a25a2fdSKay Sievers if (!dev) 48993197a36SNathan Lynch goto err; 49093197a36SNathan Lynch 4918a25a2fdSKay Sievers kobj = kobject_create_and_add("cache", &dev->kobj); 49293197a36SNathan Lynch if (!kobj) 49393197a36SNathan Lynch goto err; 49493197a36SNathan Lynch 49593197a36SNathan Lynch cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL); 49693197a36SNathan Lynch if (!cache_dir) 49793197a36SNathan Lynch goto err; 49893197a36SNathan Lynch 49993197a36SNathan Lynch cache_dir->kobj = kobj; 50093197a36SNathan Lynch 501fc7a9febSNathan Lynch WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL); 50293197a36SNathan Lynch 503fc7a9febSNathan Lynch per_cpu(cache_dir_pcpu, cpu_id) = cache_dir; 50493197a36SNathan Lynch 50593197a36SNathan Lynch return cache_dir; 50693197a36SNathan Lynch err: 50793197a36SNathan Lynch kobject_put(kobj); 50893197a36SNathan Lynch return NULL; 50993197a36SNathan Lynch } 51093197a36SNathan Lynch 51193197a36SNathan Lynch static void cache_index_release(struct kobject *kobj) 51293197a36SNathan Lynch { 51393197a36SNathan Lynch struct cache_index_dir *index; 51493197a36SNathan Lynch 51593197a36SNathan Lynch index = kobj_to_cache_index_dir(kobj); 51693197a36SNathan Lynch 51793197a36SNathan Lynch pr_debug("freeing index directory for L%d %s cache\n", 51893197a36SNathan Lynch index->cache->level, cache_type_string(index->cache)); 51993197a36SNathan Lynch 52093197a36SNathan Lynch kfree(index); 52193197a36SNathan Lynch } 52293197a36SNathan Lynch 52393197a36SNathan Lynch static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf) 52493197a36SNathan Lynch { 52593197a36SNathan Lynch struct kobj_attribute *kobj_attr; 52693197a36SNathan Lynch 52793197a36SNathan Lynch kobj_attr = container_of(attr, struct kobj_attribute, attr); 52893197a36SNathan Lynch 52993197a36SNathan Lynch return kobj_attr->show(k, kobj_attr, buf); 53093197a36SNathan Lynch } 53193197a36SNathan Lynch 53293197a36SNathan Lynch static struct cache *index_kobj_to_cache(struct kobject *k) 53393197a36SNathan Lynch { 53493197a36SNathan Lynch struct cache_index_dir *index; 53593197a36SNathan Lynch 53693197a36SNathan Lynch index = kobj_to_cache_index_dir(k); 53793197a36SNathan Lynch 53893197a36SNathan Lynch return index->cache; 53993197a36SNathan Lynch } 54093197a36SNathan Lynch 54193197a36SNathan Lynch static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 54293197a36SNathan Lynch { 54393197a36SNathan Lynch unsigned int size_kb; 54493197a36SNathan Lynch struct cache *cache; 54593197a36SNathan Lynch 54693197a36SNathan Lynch cache = index_kobj_to_cache(k); 54793197a36SNathan Lynch 54893197a36SNathan Lynch if (cache_size_kb(cache, &size_kb)) 54993197a36SNathan Lynch return -ENODEV; 55093197a36SNathan Lynch 55193197a36SNathan Lynch return sprintf(buf, "%uK\n", size_kb); 55293197a36SNathan Lynch } 55393197a36SNathan Lynch 55493197a36SNathan Lynch static struct kobj_attribute cache_size_attr = 55593197a36SNathan Lynch __ATTR(size, 0444, size_show, NULL); 55693197a36SNathan Lynch 55793197a36SNathan Lynch 55893197a36SNathan Lynch static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 55993197a36SNathan Lynch { 56093197a36SNathan Lynch unsigned int line_size; 56193197a36SNathan Lynch struct cache *cache; 56293197a36SNathan Lynch 56393197a36SNathan Lynch cache = index_kobj_to_cache(k); 56493197a36SNathan Lynch 56593197a36SNathan Lynch if (cache_get_line_size(cache, &line_size)) 56693197a36SNathan Lynch return -ENODEV; 56793197a36SNathan Lynch 56893197a36SNathan Lynch return sprintf(buf, "%u\n", line_size); 56993197a36SNathan Lynch } 57093197a36SNathan Lynch 57193197a36SNathan Lynch static struct kobj_attribute cache_line_size_attr = 57293197a36SNathan Lynch __ATTR(coherency_line_size, 0444, line_size_show, NULL); 57393197a36SNathan Lynch 57493197a36SNathan Lynch static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 57593197a36SNathan Lynch { 57693197a36SNathan Lynch unsigned int nr_sets; 57793197a36SNathan Lynch struct cache *cache; 57893197a36SNathan Lynch 57993197a36SNathan Lynch cache = index_kobj_to_cache(k); 58093197a36SNathan Lynch 58193197a36SNathan Lynch if (cache_nr_sets(cache, &nr_sets)) 58293197a36SNathan Lynch return -ENODEV; 58393197a36SNathan Lynch 58493197a36SNathan Lynch return sprintf(buf, "%u\n", nr_sets); 58593197a36SNathan Lynch } 58693197a36SNathan Lynch 58793197a36SNathan Lynch static struct kobj_attribute cache_nr_sets_attr = 58893197a36SNathan Lynch __ATTR(number_of_sets, 0444, nr_sets_show, NULL); 58993197a36SNathan Lynch 59093197a36SNathan Lynch static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 59193197a36SNathan Lynch { 59293197a36SNathan Lynch unsigned int associativity; 59393197a36SNathan Lynch struct cache *cache; 59493197a36SNathan Lynch 59593197a36SNathan Lynch cache = index_kobj_to_cache(k); 59693197a36SNathan Lynch 59793197a36SNathan Lynch if (cache_associativity(cache, &associativity)) 59893197a36SNathan Lynch return -ENODEV; 59993197a36SNathan Lynch 60093197a36SNathan Lynch return sprintf(buf, "%u\n", associativity); 60193197a36SNathan Lynch } 60293197a36SNathan Lynch 60393197a36SNathan Lynch static struct kobj_attribute cache_assoc_attr = 60493197a36SNathan Lynch __ATTR(ways_of_associativity, 0444, associativity_show, NULL); 60593197a36SNathan Lynch 60693197a36SNathan Lynch static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 60793197a36SNathan Lynch { 60893197a36SNathan Lynch struct cache *cache; 60993197a36SNathan Lynch 61093197a36SNathan Lynch cache = index_kobj_to_cache(k); 61193197a36SNathan Lynch 61293197a36SNathan Lynch return sprintf(buf, "%s\n", cache_type_string(cache)); 61393197a36SNathan Lynch } 61493197a36SNathan Lynch 61593197a36SNathan Lynch static struct kobj_attribute cache_type_attr = 61693197a36SNathan Lynch __ATTR(type, 0444, type_show, NULL); 61793197a36SNathan Lynch 61893197a36SNathan Lynch static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 61993197a36SNathan Lynch { 62093197a36SNathan Lynch struct cache_index_dir *index; 62193197a36SNathan Lynch struct cache *cache; 62293197a36SNathan Lynch 62393197a36SNathan Lynch index = kobj_to_cache_index_dir(k); 62493197a36SNathan Lynch cache = index->cache; 62593197a36SNathan Lynch 62693197a36SNathan Lynch return sprintf(buf, "%d\n", cache->level); 62793197a36SNathan Lynch } 62893197a36SNathan Lynch 62993197a36SNathan Lynch static struct kobj_attribute cache_level_attr = 63093197a36SNathan Lynch __ATTR(level, 0444, level_show, NULL); 63193197a36SNathan Lynch 632*500fe5f5SGautham R. Shenoy static unsigned int index_dir_to_cpu(struct cache_index_dir *index) 633*500fe5f5SGautham R. Shenoy { 634*500fe5f5SGautham R. Shenoy struct kobject *index_dir_kobj = &index->kobj; 635*500fe5f5SGautham R. Shenoy struct kobject *cache_dir_kobj = index_dir_kobj->parent; 636*500fe5f5SGautham R. Shenoy struct kobject *cpu_dev_kobj = cache_dir_kobj->parent; 637*500fe5f5SGautham R. Shenoy struct device *dev = kobj_to_dev(cpu_dev_kobj); 638*500fe5f5SGautham R. Shenoy 639*500fe5f5SGautham R. Shenoy return dev->id; 640*500fe5f5SGautham R. Shenoy } 641*500fe5f5SGautham R. Shenoy 642*500fe5f5SGautham R. Shenoy /* 643*500fe5f5SGautham R. Shenoy * On big-core systems, each core has two groups of CPUs each of which 644*500fe5f5SGautham R. Shenoy * has its own L1-cache. The thread-siblings which share l1-cache with 645*500fe5f5SGautham R. Shenoy * @cpu can be obtained via cpu_smallcore_mask(). 646*500fe5f5SGautham R. Shenoy */ 647*500fe5f5SGautham R. Shenoy static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache) 648*500fe5f5SGautham R. Shenoy { 649*500fe5f5SGautham R. Shenoy if (cache->level == 1) 650*500fe5f5SGautham R. Shenoy return cpu_smallcore_mask(cpu); 651*500fe5f5SGautham R. Shenoy 652*500fe5f5SGautham R. Shenoy return &cache->shared_cpu_map; 653*500fe5f5SGautham R. Shenoy } 654*500fe5f5SGautham R. Shenoy 65593197a36SNathan Lynch static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 65693197a36SNathan Lynch { 65793197a36SNathan Lynch struct cache_index_dir *index; 65893197a36SNathan Lynch struct cache *cache; 659*500fe5f5SGautham R. Shenoy const struct cpumask *mask; 660*500fe5f5SGautham R. Shenoy int ret, cpu; 66193197a36SNathan Lynch 66293197a36SNathan Lynch index = kobj_to_cache_index_dir(k); 66393197a36SNathan Lynch cache = index->cache; 66493197a36SNathan Lynch 665*500fe5f5SGautham R. Shenoy if (has_big_cores) { 666*500fe5f5SGautham R. Shenoy cpu = index_dir_to_cpu(index); 667*500fe5f5SGautham R. Shenoy mask = get_big_core_shared_cpu_map(cpu, cache); 668*500fe5f5SGautham R. Shenoy } else { 669*500fe5f5SGautham R. Shenoy mask = &cache->shared_cpu_map; 670*500fe5f5SGautham R. Shenoy } 671*500fe5f5SGautham R. Shenoy 6720c118b7bSTejun Heo ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb\n", 673*500fe5f5SGautham R. Shenoy cpumask_pr_args(mask)); 6740c118b7bSTejun Heo buf[ret++] = '\n'; 6750c118b7bSTejun Heo buf[ret] = '\0'; 6760c118b7bSTejun Heo return ret; 67793197a36SNathan Lynch } 67893197a36SNathan Lynch 67993197a36SNathan Lynch static struct kobj_attribute cache_shared_cpu_map_attr = 68093197a36SNathan Lynch __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); 68193197a36SNathan Lynch 68293197a36SNathan Lynch /* Attributes which should always be created -- the kobject/sysfs core 68393197a36SNathan Lynch * does this automatically via kobj_type->default_attrs. This is the 68493197a36SNathan Lynch * minimum data required to uniquely identify a cache. 68593197a36SNathan Lynch */ 68693197a36SNathan Lynch static struct attribute *cache_index_default_attrs[] = { 68793197a36SNathan Lynch &cache_type_attr.attr, 68893197a36SNathan Lynch &cache_level_attr.attr, 68993197a36SNathan Lynch &cache_shared_cpu_map_attr.attr, 69093197a36SNathan Lynch NULL, 69193197a36SNathan Lynch }; 69293197a36SNathan Lynch 69393197a36SNathan Lynch /* Attributes which should be created if the cache device node has the 69493197a36SNathan Lynch * right properties -- see cacheinfo_create_index_opt_attrs 69593197a36SNathan Lynch */ 69693197a36SNathan Lynch static struct kobj_attribute *cache_index_opt_attrs[] = { 69793197a36SNathan Lynch &cache_size_attr, 69893197a36SNathan Lynch &cache_line_size_attr, 69993197a36SNathan Lynch &cache_nr_sets_attr, 70093197a36SNathan Lynch &cache_assoc_attr, 70193197a36SNathan Lynch }; 70293197a36SNathan Lynch 70352cf25d0SEmese Revfy static const struct sysfs_ops cache_index_ops = { 70493197a36SNathan Lynch .show = cache_index_show, 70593197a36SNathan Lynch }; 70693197a36SNathan Lynch 70793197a36SNathan Lynch static struct kobj_type cache_index_type = { 70893197a36SNathan Lynch .release = cache_index_release, 70993197a36SNathan Lynch .sysfs_ops = &cache_index_ops, 71093197a36SNathan Lynch .default_attrs = cache_index_default_attrs, 71193197a36SNathan Lynch }; 71293197a36SNathan Lynch 713061d19f2SPaul Gortmaker static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) 71493197a36SNathan Lynch { 71593197a36SNathan Lynch const char *cache_type; 71693197a36SNathan Lynch struct cache *cache; 71793197a36SNathan Lynch char *buf; 71893197a36SNathan Lynch int i; 71993197a36SNathan Lynch 72093197a36SNathan Lynch buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 72193197a36SNathan Lynch if (!buf) 72293197a36SNathan Lynch return; 72393197a36SNathan Lynch 72493197a36SNathan Lynch cache = dir->cache; 72593197a36SNathan Lynch cache_type = cache_type_string(cache); 72693197a36SNathan Lynch 72793197a36SNathan Lynch /* We don't want to create an attribute that can't provide a 72893197a36SNathan Lynch * meaningful value. Check the return value of each optional 72993197a36SNathan Lynch * attribute's ->show method before registering the 73093197a36SNathan Lynch * attribute. 73193197a36SNathan Lynch */ 73293197a36SNathan Lynch for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) { 73393197a36SNathan Lynch struct kobj_attribute *attr; 73493197a36SNathan Lynch ssize_t rc; 73593197a36SNathan Lynch 73693197a36SNathan Lynch attr = cache_index_opt_attrs[i]; 73793197a36SNathan Lynch 73893197a36SNathan Lynch rc = attr->show(&dir->kobj, attr, buf); 73993197a36SNathan Lynch if (rc <= 0) { 74093197a36SNathan Lynch pr_debug("not creating %s attribute for " 741b7c670d6SRob Herring "%pOF(%s) (rc = %zd)\n", 742b7c670d6SRob Herring attr->attr.name, cache->ofnode, 74393197a36SNathan Lynch cache_type, rc); 74493197a36SNathan Lynch continue; 74593197a36SNathan Lynch } 74693197a36SNathan Lynch if (sysfs_create_file(&dir->kobj, &attr->attr)) 747b7c670d6SRob Herring pr_debug("could not create %s attribute for %pOF(%s)\n", 748b7c670d6SRob Herring attr->attr.name, cache->ofnode, cache_type); 74993197a36SNathan Lynch } 75093197a36SNathan Lynch 75193197a36SNathan Lynch kfree(buf); 75293197a36SNathan Lynch } 75393197a36SNathan Lynch 754061d19f2SPaul Gortmaker static void cacheinfo_create_index_dir(struct cache *cache, int index, 755061d19f2SPaul Gortmaker struct cache_dir *cache_dir) 75693197a36SNathan Lynch { 75793197a36SNathan Lynch struct cache_index_dir *index_dir; 75893197a36SNathan Lynch int rc; 75993197a36SNathan Lynch 76093197a36SNathan Lynch index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); 76193197a36SNathan Lynch if (!index_dir) 76293197a36SNathan Lynch goto err; 76393197a36SNathan Lynch 76493197a36SNathan Lynch index_dir->cache = cache; 76593197a36SNathan Lynch 76693197a36SNathan Lynch rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, 76793197a36SNathan Lynch cache_dir->kobj, "index%d", index); 76893197a36SNathan Lynch if (rc) 76993197a36SNathan Lynch goto err; 77093197a36SNathan Lynch 77193197a36SNathan Lynch index_dir->next = cache_dir->index; 77293197a36SNathan Lynch cache_dir->index = index_dir; 77393197a36SNathan Lynch 77493197a36SNathan Lynch cacheinfo_create_index_opt_attrs(index_dir); 77593197a36SNathan Lynch 77693197a36SNathan Lynch return; 77793197a36SNathan Lynch err: 77893197a36SNathan Lynch kfree(index_dir); 77993197a36SNathan Lynch } 78093197a36SNathan Lynch 781061d19f2SPaul Gortmaker static void cacheinfo_sysfs_populate(unsigned int cpu_id, 782061d19f2SPaul Gortmaker struct cache *cache_list) 78393197a36SNathan Lynch { 78493197a36SNathan Lynch struct cache_dir *cache_dir; 78593197a36SNathan Lynch struct cache *cache; 78693197a36SNathan Lynch int index = 0; 78793197a36SNathan Lynch 78893197a36SNathan Lynch cache_dir = cacheinfo_create_cache_dir(cpu_id); 78993197a36SNathan Lynch if (!cache_dir) 79093197a36SNathan Lynch return; 79193197a36SNathan Lynch 79293197a36SNathan Lynch cache = cache_list; 79393197a36SNathan Lynch while (cache) { 79493197a36SNathan Lynch cacheinfo_create_index_dir(cache, index, cache_dir); 79593197a36SNathan Lynch index++; 79693197a36SNathan Lynch cache = cache->next_local; 79793197a36SNathan Lynch } 79893197a36SNathan Lynch } 79993197a36SNathan Lynch 800061d19f2SPaul Gortmaker void cacheinfo_cpu_online(unsigned int cpu_id) 80193197a36SNathan Lynch { 80293197a36SNathan Lynch struct cache *cache; 80393197a36SNathan Lynch 80493197a36SNathan Lynch cache = cache_chain_instantiate(cpu_id); 80593197a36SNathan Lynch if (!cache) 80693197a36SNathan Lynch return; 80793197a36SNathan Lynch 80893197a36SNathan Lynch cacheinfo_sysfs_populate(cpu_id, cache); 80993197a36SNathan Lynch } 81093197a36SNathan Lynch 8116b36ba84SHaren Myneni /* functions needed to remove cache entry for cpu offline or suspend/resume */ 8126b36ba84SHaren Myneni 8136b36ba84SHaren Myneni #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \ 8146b36ba84SHaren Myneni defined(CONFIG_HOTPLUG_CPU) 81593197a36SNathan Lynch 81693197a36SNathan Lynch static struct cache *cache_lookup_by_cpu(unsigned int cpu_id) 81793197a36SNathan Lynch { 81893197a36SNathan Lynch struct device_node *cpu_node; 81993197a36SNathan Lynch struct cache *cache; 82093197a36SNathan Lynch 82193197a36SNathan Lynch cpu_node = of_get_cpu_node(cpu_id, NULL); 82293197a36SNathan Lynch WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); 82393197a36SNathan Lynch if (!cpu_node) 82493197a36SNathan Lynch return NULL; 82593197a36SNathan Lynch 82693197a36SNathan Lynch cache = cache_lookup_by_node(cpu_node); 82793197a36SNathan Lynch of_node_put(cpu_node); 82893197a36SNathan Lynch 82993197a36SNathan Lynch return cache; 83093197a36SNathan Lynch } 83193197a36SNathan Lynch 83293197a36SNathan Lynch static void remove_index_dirs(struct cache_dir *cache_dir) 83393197a36SNathan Lynch { 83493197a36SNathan Lynch struct cache_index_dir *index; 83593197a36SNathan Lynch 83693197a36SNathan Lynch index = cache_dir->index; 83793197a36SNathan Lynch 83893197a36SNathan Lynch while (index) { 83993197a36SNathan Lynch struct cache_index_dir *next; 84093197a36SNathan Lynch 84193197a36SNathan Lynch next = index->next; 84293197a36SNathan Lynch kobject_put(&index->kobj); 84393197a36SNathan Lynch index = next; 84493197a36SNathan Lynch } 84593197a36SNathan Lynch } 84693197a36SNathan Lynch 84793197a36SNathan Lynch static void remove_cache_dir(struct cache_dir *cache_dir) 84893197a36SNathan Lynch { 84993197a36SNathan Lynch remove_index_dirs(cache_dir); 85093197a36SNathan Lynch 85191b973f9SPaul Mackerras /* Remove cache dir from sysfs */ 85291b973f9SPaul Mackerras kobject_del(cache_dir->kobj); 85391b973f9SPaul Mackerras 85493197a36SNathan Lynch kobject_put(cache_dir->kobj); 85593197a36SNathan Lynch 85693197a36SNathan Lynch kfree(cache_dir); 85793197a36SNathan Lynch } 85893197a36SNathan Lynch 85993197a36SNathan Lynch static void cache_cpu_clear(struct cache *cache, int cpu) 86093197a36SNathan Lynch { 86193197a36SNathan Lynch while (cache) { 86293197a36SNathan Lynch struct cache *next = cache->next_local; 86393197a36SNathan Lynch 86493197a36SNathan Lynch WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), 865b7c670d6SRob Herring "CPU %i not accounted in %pOF(%s)\n", 866b7c670d6SRob Herring cpu, cache->ofnode, 86793197a36SNathan Lynch cache_type_string(cache)); 86893197a36SNathan Lynch 86993197a36SNathan Lynch cpumask_clear_cpu(cpu, &cache->shared_cpu_map); 87093197a36SNathan Lynch 87193197a36SNathan Lynch /* Release the cache object if all the cpus using it 87293197a36SNathan Lynch * are offline */ 87393197a36SNathan Lynch if (cpumask_empty(&cache->shared_cpu_map)) 87493197a36SNathan Lynch release_cache(cache); 87593197a36SNathan Lynch 87693197a36SNathan Lynch cache = next; 87793197a36SNathan Lynch } 87893197a36SNathan Lynch } 87993197a36SNathan Lynch 88093197a36SNathan Lynch void cacheinfo_cpu_offline(unsigned int cpu_id) 88193197a36SNathan Lynch { 88293197a36SNathan Lynch struct cache_dir *cache_dir; 88393197a36SNathan Lynch struct cache *cache; 88493197a36SNathan Lynch 88593197a36SNathan Lynch /* Prevent userspace from seeing inconsistent state - remove 88693197a36SNathan Lynch * the sysfs hierarchy first */ 887fc7a9febSNathan Lynch cache_dir = per_cpu(cache_dir_pcpu, cpu_id); 88893197a36SNathan Lynch 88993197a36SNathan Lynch /* careful, sysfs population may have failed */ 89093197a36SNathan Lynch if (cache_dir) 89193197a36SNathan Lynch remove_cache_dir(cache_dir); 89293197a36SNathan Lynch 893fc7a9febSNathan Lynch per_cpu(cache_dir_pcpu, cpu_id) = NULL; 89493197a36SNathan Lynch 89593197a36SNathan Lynch /* clear the CPU's bit in its cache chain, possibly freeing 89693197a36SNathan Lynch * cache objects */ 89793197a36SNathan Lynch cache = cache_lookup_by_cpu(cpu_id); 89893197a36SNathan Lynch if (cache) 89993197a36SNathan Lynch cache_cpu_clear(cache, cpu_id); 90093197a36SNathan Lynch } 9016b36ba84SHaren Myneni #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */ 902