1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 293197a36SNathan Lynch /* 393197a36SNathan Lynch * Processor cache information made available to userspace via sysfs; 493197a36SNathan Lynch * intended to be compatible with x86 intel_cacheinfo implementation. 593197a36SNathan Lynch * 693197a36SNathan Lynch * Copyright 2008 IBM Corporation 793197a36SNathan Lynch * Author: Nathan Lynch 893197a36SNathan Lynch */ 993197a36SNathan Lynch 10*e2b3c165SNathan Lynch #define pr_fmt(fmt) "cacheinfo: " fmt 11*e2b3c165SNathan Lynch 1293197a36SNathan Lynch #include <linux/cpu.h> 1393197a36SNathan Lynch #include <linux/cpumask.h> 1493197a36SNathan Lynch #include <linux/kernel.h> 1593197a36SNathan Lynch #include <linux/kobject.h> 1693197a36SNathan Lynch #include <linux/list.h> 1793197a36SNathan Lynch #include <linux/notifier.h> 1893197a36SNathan Lynch #include <linux/of.h> 1993197a36SNathan Lynch #include <linux/percpu.h> 205a0e3ad6STejun Heo #include <linux/slab.h> 2193197a36SNathan Lynch #include <asm/prom.h> 22500fe5f5SGautham R. Shenoy #include <asm/cputhreads.h> 23500fe5f5SGautham R. Shenoy #include <asm/smp.h> 2493197a36SNathan Lynch 2593197a36SNathan Lynch #include "cacheinfo.h" 2693197a36SNathan Lynch 2793197a36SNathan Lynch /* per-cpu object for tracking: 2893197a36SNathan Lynch * - a "cache" kobject for the top-level directory 2993197a36SNathan Lynch * - a list of "index" objects representing the cpu's local cache hierarchy 3093197a36SNathan Lynch */ 3193197a36SNathan Lynch struct cache_dir { 3293197a36SNathan Lynch struct kobject *kobj; /* bare (not embedded) kobject for cache 3393197a36SNathan Lynch * directory */ 3493197a36SNathan Lynch struct cache_index_dir *index; /* list of index objects */ 3593197a36SNathan Lynch }; 3693197a36SNathan Lynch 3793197a36SNathan Lynch /* "index" object: each cpu's cache directory has an index 3893197a36SNathan Lynch * subdirectory corresponding to a cache object associated with the 3993197a36SNathan Lynch * cpu. This object's lifetime is managed via the embedded kobject. 4093197a36SNathan Lynch */ 4193197a36SNathan Lynch struct cache_index_dir { 4293197a36SNathan Lynch struct kobject kobj; 4393197a36SNathan Lynch struct cache_index_dir *next; /* next index in parent directory */ 4493197a36SNathan Lynch struct cache *cache; 4593197a36SNathan Lynch }; 4693197a36SNathan Lynch 4793197a36SNathan Lynch /* Template for determining which OF properties to query for a given 4893197a36SNathan Lynch * cache type */ 4993197a36SNathan Lynch struct cache_type_info { 5093197a36SNathan Lynch const char *name; 5193197a36SNathan Lynch const char *size_prop; 5293197a36SNathan Lynch 5393197a36SNathan Lynch /* Allow for both [di]-cache-line-size and 5493197a36SNathan Lynch * [di]-cache-block-size properties. According to the PowerPC 5593197a36SNathan Lynch * Processor binding, -line-size should be provided if it 5693197a36SNathan Lynch * differs from the cache block size (that which is operated 5793197a36SNathan Lynch * on by cache instructions), so we look for -line-size first. 5893197a36SNathan Lynch * See cache_get_line_size(). */ 5993197a36SNathan Lynch 6093197a36SNathan Lynch const char *line_size_props[2]; 6193197a36SNathan Lynch const char *nr_sets_prop; 6293197a36SNathan Lynch }; 6393197a36SNathan Lynch 6493197a36SNathan Lynch /* These are used to index the cache_type_info array. */ 65f7e9e358SDave Olson #define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */ 66f7e9e358SDave Olson #define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */ 67f7e9e358SDave Olson #define CACHE_TYPE_INSTRUCTION 2 68f7e9e358SDave Olson #define CACHE_TYPE_DATA 3 6993197a36SNathan Lynch 7093197a36SNathan Lynch static const struct cache_type_info cache_type_info[] = { 7193197a36SNathan Lynch { 72f7e9e358SDave Olson /* Embedded systems that use cache-size, cache-block-size, 73f7e9e358SDave Olson * etc. for the Unified (typically L2) cache. */ 74f7e9e358SDave Olson .name = "Unified", 75f7e9e358SDave Olson .size_prop = "cache-size", 76f7e9e358SDave Olson .line_size_props = { "cache-line-size", 77f7e9e358SDave Olson "cache-block-size", }, 78f7e9e358SDave Olson .nr_sets_prop = "cache-sets", 79f7e9e358SDave Olson }, 80f7e9e358SDave Olson { 8193197a36SNathan Lynch /* PowerPC Processor binding says the [di]-cache-* 8293197a36SNathan Lynch * must be equal on unified caches, so just use 8393197a36SNathan Lynch * d-cache properties. */ 8493197a36SNathan Lynch .name = "Unified", 8593197a36SNathan Lynch .size_prop = "d-cache-size", 8693197a36SNathan Lynch .line_size_props = { "d-cache-line-size", 8793197a36SNathan Lynch "d-cache-block-size", }, 8893197a36SNathan Lynch .nr_sets_prop = "d-cache-sets", 8993197a36SNathan Lynch }, 9093197a36SNathan Lynch { 9193197a36SNathan Lynch .name = "Instruction", 9293197a36SNathan Lynch .size_prop = "i-cache-size", 9393197a36SNathan Lynch .line_size_props = { "i-cache-line-size", 9493197a36SNathan Lynch "i-cache-block-size", }, 9593197a36SNathan Lynch .nr_sets_prop = "i-cache-sets", 9693197a36SNathan Lynch }, 9793197a36SNathan Lynch { 9893197a36SNathan Lynch .name = "Data", 9993197a36SNathan Lynch .size_prop = "d-cache-size", 10093197a36SNathan Lynch .line_size_props = { "d-cache-line-size", 10193197a36SNathan Lynch "d-cache-block-size", }, 10293197a36SNathan Lynch .nr_sets_prop = "d-cache-sets", 10393197a36SNathan Lynch }, 10493197a36SNathan Lynch }; 10593197a36SNathan Lynch 10693197a36SNathan Lynch /* Cache object: each instance of this corresponds to a distinct cache 10793197a36SNathan Lynch * in the system. There are separate objects for Harvard caches: one 10893197a36SNathan Lynch * each for instruction and data, and each refers to the same OF node. 10993197a36SNathan Lynch * The refcount of the OF node is elevated for the lifetime of the 11093197a36SNathan Lynch * cache object. A cache object is released when its shared_cpu_map 11193197a36SNathan Lynch * is cleared (see cache_cpu_clear). 11293197a36SNathan Lynch * 11393197a36SNathan Lynch * A cache object is on two lists: an unsorted global list 11493197a36SNathan Lynch * (cache_list) of cache objects; and a singly-linked list 11593197a36SNathan Lynch * representing the local cache hierarchy, which is ordered by level 11693197a36SNathan Lynch * (e.g. L1d -> L1i -> L2 -> L3). 11793197a36SNathan Lynch */ 11893197a36SNathan Lynch struct cache { 11993197a36SNathan Lynch struct device_node *ofnode; /* OF node for this cache, may be cpu */ 12093197a36SNathan Lynch struct cpumask shared_cpu_map; /* online CPUs using this cache */ 12193197a36SNathan Lynch int type; /* split cache disambiguation */ 12293197a36SNathan Lynch int level; /* level not explicit in device tree */ 12393197a36SNathan Lynch struct list_head list; /* global list of cache objects */ 12493197a36SNathan Lynch struct cache *next_local; /* next cache of >= level */ 12593197a36SNathan Lynch }; 12693197a36SNathan Lynch 127fc7a9febSNathan Lynch static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu); 12893197a36SNathan Lynch 12993197a36SNathan Lynch /* traversal/modification of this list occurs only at cpu hotplug time; 13093197a36SNathan Lynch * access is serialized by cpu hotplug locking 13193197a36SNathan Lynch */ 13293197a36SNathan Lynch static LIST_HEAD(cache_list); 13393197a36SNathan Lynch 13493197a36SNathan Lynch static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k) 13593197a36SNathan Lynch { 13693197a36SNathan Lynch return container_of(k, struct cache_index_dir, kobj); 13793197a36SNathan Lynch } 13893197a36SNathan Lynch 13993197a36SNathan Lynch static const char *cache_type_string(const struct cache *cache) 14093197a36SNathan Lynch { 14193197a36SNathan Lynch return cache_type_info[cache->type].name; 14293197a36SNathan Lynch } 14393197a36SNathan Lynch 144061d19f2SPaul Gortmaker static void cache_init(struct cache *cache, int type, int level, 145061d19f2SPaul Gortmaker struct device_node *ofnode) 14693197a36SNathan Lynch { 14793197a36SNathan Lynch cache->type = type; 14893197a36SNathan Lynch cache->level = level; 14993197a36SNathan Lynch cache->ofnode = of_node_get(ofnode); 15093197a36SNathan Lynch INIT_LIST_HEAD(&cache->list); 15193197a36SNathan Lynch list_add(&cache->list, &cache_list); 15293197a36SNathan Lynch } 15393197a36SNathan Lynch 154061d19f2SPaul Gortmaker static struct cache *new_cache(int type, int level, struct device_node *ofnode) 15593197a36SNathan Lynch { 15693197a36SNathan Lynch struct cache *cache; 15793197a36SNathan Lynch 15893197a36SNathan Lynch cache = kzalloc(sizeof(*cache), GFP_KERNEL); 15993197a36SNathan Lynch if (cache) 16093197a36SNathan Lynch cache_init(cache, type, level, ofnode); 16193197a36SNathan Lynch 16293197a36SNathan Lynch return cache; 16393197a36SNathan Lynch } 16493197a36SNathan Lynch 16593197a36SNathan Lynch static void release_cache_debugcheck(struct cache *cache) 16693197a36SNathan Lynch { 16793197a36SNathan Lynch struct cache *iter; 16893197a36SNathan Lynch 16993197a36SNathan Lynch list_for_each_entry(iter, &cache_list, list) 17093197a36SNathan Lynch WARN_ONCE(iter->next_local == cache, 171b7c670d6SRob Herring "cache for %pOF(%s) refers to cache for %pOF(%s)\n", 172b7c670d6SRob Herring iter->ofnode, 17393197a36SNathan Lynch cache_type_string(iter), 174b7c670d6SRob Herring cache->ofnode, 17593197a36SNathan Lynch cache_type_string(cache)); 17693197a36SNathan Lynch } 17793197a36SNathan Lynch 17893197a36SNathan Lynch static void release_cache(struct cache *cache) 17993197a36SNathan Lynch { 18093197a36SNathan Lynch if (!cache) 18193197a36SNathan Lynch return; 18293197a36SNathan Lynch 183b7c670d6SRob Herring pr_debug("freeing L%d %s cache for %pOF\n", cache->level, 184b7c670d6SRob Herring cache_type_string(cache), cache->ofnode); 18593197a36SNathan Lynch 18693197a36SNathan Lynch release_cache_debugcheck(cache); 18793197a36SNathan Lynch list_del(&cache->list); 18893197a36SNathan Lynch of_node_put(cache->ofnode); 18993197a36SNathan Lynch kfree(cache); 19093197a36SNathan Lynch } 19193197a36SNathan Lynch 19293197a36SNathan Lynch static void cache_cpu_set(struct cache *cache, int cpu) 19393197a36SNathan Lynch { 19493197a36SNathan Lynch struct cache *next = cache; 19593197a36SNathan Lynch 19693197a36SNathan Lynch while (next) { 19793197a36SNathan Lynch WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map), 198b7c670d6SRob Herring "CPU %i already accounted in %pOF(%s)\n", 199b7c670d6SRob Herring cpu, next->ofnode, 20093197a36SNathan Lynch cache_type_string(next)); 20193197a36SNathan Lynch cpumask_set_cpu(cpu, &next->shared_cpu_map); 20293197a36SNathan Lynch next = next->next_local; 20393197a36SNathan Lynch } 20493197a36SNathan Lynch } 20593197a36SNathan Lynch 20693197a36SNathan Lynch static int cache_size(const struct cache *cache, unsigned int *ret) 20793197a36SNathan Lynch { 20893197a36SNathan Lynch const char *propname; 209d10bd84fSAnton Blanchard const __be32 *cache_size; 21093197a36SNathan Lynch 21193197a36SNathan Lynch propname = cache_type_info[cache->type].size_prop; 21293197a36SNathan Lynch 21393197a36SNathan Lynch cache_size = of_get_property(cache->ofnode, propname, NULL); 21493197a36SNathan Lynch if (!cache_size) 21593197a36SNathan Lynch return -ENODEV; 21693197a36SNathan Lynch 217d10bd84fSAnton Blanchard *ret = of_read_number(cache_size, 1); 21893197a36SNathan Lynch return 0; 21993197a36SNathan Lynch } 22093197a36SNathan Lynch 22193197a36SNathan Lynch static int cache_size_kb(const struct cache *cache, unsigned int *ret) 22293197a36SNathan Lynch { 22393197a36SNathan Lynch unsigned int size; 22493197a36SNathan Lynch 22593197a36SNathan Lynch if (cache_size(cache, &size)) 22693197a36SNathan Lynch return -ENODEV; 22793197a36SNathan Lynch 22893197a36SNathan Lynch *ret = size / 1024; 22993197a36SNathan Lynch return 0; 23093197a36SNathan Lynch } 23193197a36SNathan Lynch 23293197a36SNathan Lynch /* not cache_line_size() because that's a macro in include/linux/cache.h */ 23393197a36SNathan Lynch static int cache_get_line_size(const struct cache *cache, unsigned int *ret) 23493197a36SNathan Lynch { 235d10bd84fSAnton Blanchard const __be32 *line_size; 23693197a36SNathan Lynch int i, lim; 23793197a36SNathan Lynch 23893197a36SNathan Lynch lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props); 23993197a36SNathan Lynch 24093197a36SNathan Lynch for (i = 0; i < lim; i++) { 24193197a36SNathan Lynch const char *propname; 24293197a36SNathan Lynch 24393197a36SNathan Lynch propname = cache_type_info[cache->type].line_size_props[i]; 24493197a36SNathan Lynch line_size = of_get_property(cache->ofnode, propname, NULL); 24593197a36SNathan Lynch if (line_size) 24693197a36SNathan Lynch break; 24793197a36SNathan Lynch } 24893197a36SNathan Lynch 24993197a36SNathan Lynch if (!line_size) 25093197a36SNathan Lynch return -ENODEV; 25193197a36SNathan Lynch 252d10bd84fSAnton Blanchard *ret = of_read_number(line_size, 1); 25393197a36SNathan Lynch return 0; 25493197a36SNathan Lynch } 25593197a36SNathan Lynch 25693197a36SNathan Lynch static int cache_nr_sets(const struct cache *cache, unsigned int *ret) 25793197a36SNathan Lynch { 25893197a36SNathan Lynch const char *propname; 259d10bd84fSAnton Blanchard const __be32 *nr_sets; 26093197a36SNathan Lynch 26193197a36SNathan Lynch propname = cache_type_info[cache->type].nr_sets_prop; 26293197a36SNathan Lynch 26393197a36SNathan Lynch nr_sets = of_get_property(cache->ofnode, propname, NULL); 26493197a36SNathan Lynch if (!nr_sets) 26593197a36SNathan Lynch return -ENODEV; 26693197a36SNathan Lynch 267d10bd84fSAnton Blanchard *ret = of_read_number(nr_sets, 1); 26893197a36SNathan Lynch return 0; 26993197a36SNathan Lynch } 27093197a36SNathan Lynch 27193197a36SNathan Lynch static int cache_associativity(const struct cache *cache, unsigned int *ret) 27293197a36SNathan Lynch { 27393197a36SNathan Lynch unsigned int line_size; 27493197a36SNathan Lynch unsigned int nr_sets; 27593197a36SNathan Lynch unsigned int size; 27693197a36SNathan Lynch 27793197a36SNathan Lynch if (cache_nr_sets(cache, &nr_sets)) 27893197a36SNathan Lynch goto err; 27993197a36SNathan Lynch 28093197a36SNathan Lynch /* If the cache is fully associative, there is no need to 28193197a36SNathan Lynch * check the other properties. 28293197a36SNathan Lynch */ 28393197a36SNathan Lynch if (nr_sets == 1) { 28493197a36SNathan Lynch *ret = 0; 28593197a36SNathan Lynch return 0; 28693197a36SNathan Lynch } 28793197a36SNathan Lynch 28893197a36SNathan Lynch if (cache_get_line_size(cache, &line_size)) 28993197a36SNathan Lynch goto err; 29093197a36SNathan Lynch if (cache_size(cache, &size)) 29193197a36SNathan Lynch goto err; 29293197a36SNathan Lynch 29393197a36SNathan Lynch if (!(nr_sets > 0 && size > 0 && line_size > 0)) 29493197a36SNathan Lynch goto err; 29593197a36SNathan Lynch 29693197a36SNathan Lynch *ret = (size / nr_sets) / line_size; 29793197a36SNathan Lynch return 0; 29893197a36SNathan Lynch err: 29993197a36SNathan Lynch return -ENODEV; 30093197a36SNathan Lynch } 30193197a36SNathan Lynch 30293197a36SNathan Lynch /* helper for dealing with split caches */ 30393197a36SNathan Lynch static struct cache *cache_find_first_sibling(struct cache *cache) 30493197a36SNathan Lynch { 30593197a36SNathan Lynch struct cache *iter; 30693197a36SNathan Lynch 307f7e9e358SDave Olson if (cache->type == CACHE_TYPE_UNIFIED || 308f7e9e358SDave Olson cache->type == CACHE_TYPE_UNIFIED_D) 30993197a36SNathan Lynch return cache; 31093197a36SNathan Lynch 31193197a36SNathan Lynch list_for_each_entry(iter, &cache_list, list) 31293197a36SNathan Lynch if (iter->ofnode == cache->ofnode && iter->next_local == cache) 31393197a36SNathan Lynch return iter; 31493197a36SNathan Lynch 31593197a36SNathan Lynch return cache; 31693197a36SNathan Lynch } 31793197a36SNathan Lynch 31893197a36SNathan Lynch /* return the first cache on a local list matching node */ 31993197a36SNathan Lynch static struct cache *cache_lookup_by_node(const struct device_node *node) 32093197a36SNathan Lynch { 32193197a36SNathan Lynch struct cache *cache = NULL; 32293197a36SNathan Lynch struct cache *iter; 32393197a36SNathan Lynch 32493197a36SNathan Lynch list_for_each_entry(iter, &cache_list, list) { 32593197a36SNathan Lynch if (iter->ofnode != node) 32693197a36SNathan Lynch continue; 32793197a36SNathan Lynch cache = cache_find_first_sibling(iter); 32893197a36SNathan Lynch break; 32993197a36SNathan Lynch } 33093197a36SNathan Lynch 33193197a36SNathan Lynch return cache; 33293197a36SNathan Lynch } 33393197a36SNathan Lynch 33493197a36SNathan Lynch static bool cache_node_is_unified(const struct device_node *np) 33593197a36SNathan Lynch { 33693197a36SNathan Lynch return of_get_property(np, "cache-unified", NULL); 33793197a36SNathan Lynch } 33893197a36SNathan Lynch 339f7e9e358SDave Olson /* 340f7e9e358SDave Olson * Unified caches can have two different sets of tags. Most embedded 341f7e9e358SDave Olson * use cache-size, etc. for the unified cache size, but open firmware systems 342f7e9e358SDave Olson * use d-cache-size, etc. Check on initialization for which type we have, and 343f7e9e358SDave Olson * return the appropriate structure type. Assume it's embedded if it isn't 344f7e9e358SDave Olson * open firmware. If it's yet a 3rd type, then there will be missing entries 345f7e9e358SDave Olson * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need 346f7e9e358SDave Olson * to be extended further. 347f7e9e358SDave Olson */ 348f7e9e358SDave Olson static int cache_is_unified_d(const struct device_node *np) 34993197a36SNathan Lynch { 350f7e9e358SDave Olson return of_get_property(np, 351f7e9e358SDave Olson cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ? 352f7e9e358SDave Olson CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED; 353f7e9e358SDave Olson } 35493197a36SNathan Lynch 355f7e9e358SDave Olson static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level) 356f7e9e358SDave Olson { 357b7c670d6SRob Herring pr_debug("creating L%d ucache for %pOF\n", level, node); 35893197a36SNathan Lynch 359f7e9e358SDave Olson return new_cache(cache_is_unified_d(node), level, node); 36093197a36SNathan Lynch } 36193197a36SNathan Lynch 362061d19f2SPaul Gortmaker static struct cache *cache_do_one_devnode_split(struct device_node *node, 363061d19f2SPaul Gortmaker int level) 36493197a36SNathan Lynch { 36593197a36SNathan Lynch struct cache *dcache, *icache; 36693197a36SNathan Lynch 367b7c670d6SRob Herring pr_debug("creating L%d dcache and icache for %pOF\n", level, 368b7c670d6SRob Herring node); 36993197a36SNathan Lynch 37093197a36SNathan Lynch dcache = new_cache(CACHE_TYPE_DATA, level, node); 37193197a36SNathan Lynch icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node); 37293197a36SNathan Lynch 37393197a36SNathan Lynch if (!dcache || !icache) 37493197a36SNathan Lynch goto err; 37593197a36SNathan Lynch 37693197a36SNathan Lynch dcache->next_local = icache; 37793197a36SNathan Lynch 37893197a36SNathan Lynch return dcache; 37993197a36SNathan Lynch err: 38093197a36SNathan Lynch release_cache(dcache); 38193197a36SNathan Lynch release_cache(icache); 38293197a36SNathan Lynch return NULL; 38393197a36SNathan Lynch } 38493197a36SNathan Lynch 385061d19f2SPaul Gortmaker static struct cache *cache_do_one_devnode(struct device_node *node, int level) 38693197a36SNathan Lynch { 38793197a36SNathan Lynch struct cache *cache; 38893197a36SNathan Lynch 38993197a36SNathan Lynch if (cache_node_is_unified(node)) 39093197a36SNathan Lynch cache = cache_do_one_devnode_unified(node, level); 39193197a36SNathan Lynch else 39293197a36SNathan Lynch cache = cache_do_one_devnode_split(node, level); 39393197a36SNathan Lynch 39493197a36SNathan Lynch return cache; 39593197a36SNathan Lynch } 39693197a36SNathan Lynch 397061d19f2SPaul Gortmaker static struct cache *cache_lookup_or_instantiate(struct device_node *node, 398061d19f2SPaul Gortmaker int level) 39993197a36SNathan Lynch { 40093197a36SNathan Lynch struct cache *cache; 40193197a36SNathan Lynch 40293197a36SNathan Lynch cache = cache_lookup_by_node(node); 40393197a36SNathan Lynch 40493197a36SNathan Lynch WARN_ONCE(cache && cache->level != level, 40593197a36SNathan Lynch "cache level mismatch on lookup (got %d, expected %d)\n", 40693197a36SNathan Lynch cache->level, level); 40793197a36SNathan Lynch 40893197a36SNathan Lynch if (!cache) 40993197a36SNathan Lynch cache = cache_do_one_devnode(node, level); 41093197a36SNathan Lynch 41193197a36SNathan Lynch return cache; 41293197a36SNathan Lynch } 41393197a36SNathan Lynch 414061d19f2SPaul Gortmaker static void link_cache_lists(struct cache *smaller, struct cache *bigger) 41593197a36SNathan Lynch { 41693197a36SNathan Lynch while (smaller->next_local) { 41793197a36SNathan Lynch if (smaller->next_local == bigger) 41893197a36SNathan Lynch return; /* already linked */ 41993197a36SNathan Lynch smaller = smaller->next_local; 42093197a36SNathan Lynch } 42193197a36SNathan Lynch 42293197a36SNathan Lynch smaller->next_local = bigger; 42393197a36SNathan Lynch } 42493197a36SNathan Lynch 425061d19f2SPaul Gortmaker static void do_subsidiary_caches_debugcheck(struct cache *cache) 42693197a36SNathan Lynch { 42793197a36SNathan Lynch WARN_ON_ONCE(cache->level != 1); 428e5480bdcSRob Herring WARN_ON_ONCE(!of_node_is_type(cache->ofnode, "cpu")); 42993197a36SNathan Lynch } 43093197a36SNathan Lynch 431061d19f2SPaul Gortmaker static void do_subsidiary_caches(struct cache *cache) 43293197a36SNathan Lynch { 43393197a36SNathan Lynch struct device_node *subcache_node; 43493197a36SNathan Lynch int level = cache->level; 43593197a36SNathan Lynch 43693197a36SNathan Lynch do_subsidiary_caches_debugcheck(cache); 43793197a36SNathan Lynch 43893197a36SNathan Lynch while ((subcache_node = of_find_next_cache_node(cache->ofnode))) { 43993197a36SNathan Lynch struct cache *subcache; 44093197a36SNathan Lynch 44193197a36SNathan Lynch level++; 44293197a36SNathan Lynch subcache = cache_lookup_or_instantiate(subcache_node, level); 44393197a36SNathan Lynch of_node_put(subcache_node); 44493197a36SNathan Lynch if (!subcache) 44593197a36SNathan Lynch break; 44693197a36SNathan Lynch 44793197a36SNathan Lynch link_cache_lists(cache, subcache); 44893197a36SNathan Lynch cache = subcache; 44993197a36SNathan Lynch } 45093197a36SNathan Lynch } 45193197a36SNathan Lynch 452061d19f2SPaul Gortmaker static struct cache *cache_chain_instantiate(unsigned int cpu_id) 45393197a36SNathan Lynch { 45493197a36SNathan Lynch struct device_node *cpu_node; 45593197a36SNathan Lynch struct cache *cpu_cache = NULL; 45693197a36SNathan Lynch 45793197a36SNathan Lynch pr_debug("creating cache object(s) for CPU %i\n", cpu_id); 45893197a36SNathan Lynch 45993197a36SNathan Lynch cpu_node = of_get_cpu_node(cpu_id, NULL); 46093197a36SNathan Lynch WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); 46193197a36SNathan Lynch if (!cpu_node) 46293197a36SNathan Lynch goto out; 46393197a36SNathan Lynch 46493197a36SNathan Lynch cpu_cache = cache_lookup_or_instantiate(cpu_node, 1); 46593197a36SNathan Lynch if (!cpu_cache) 46693197a36SNathan Lynch goto out; 46793197a36SNathan Lynch 46893197a36SNathan Lynch do_subsidiary_caches(cpu_cache); 46993197a36SNathan Lynch 47093197a36SNathan Lynch cache_cpu_set(cpu_cache, cpu_id); 47193197a36SNathan Lynch out: 47293197a36SNathan Lynch of_node_put(cpu_node); 47393197a36SNathan Lynch 47493197a36SNathan Lynch return cpu_cache; 47593197a36SNathan Lynch } 47693197a36SNathan Lynch 477061d19f2SPaul Gortmaker static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id) 47893197a36SNathan Lynch { 47993197a36SNathan Lynch struct cache_dir *cache_dir; 4808a25a2fdSKay Sievers struct device *dev; 48193197a36SNathan Lynch struct kobject *kobj = NULL; 48293197a36SNathan Lynch 4838a25a2fdSKay Sievers dev = get_cpu_device(cpu_id); 4848a25a2fdSKay Sievers WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id); 4858a25a2fdSKay Sievers if (!dev) 48693197a36SNathan Lynch goto err; 48793197a36SNathan Lynch 4888a25a2fdSKay Sievers kobj = kobject_create_and_add("cache", &dev->kobj); 48993197a36SNathan Lynch if (!kobj) 49093197a36SNathan Lynch goto err; 49193197a36SNathan Lynch 49293197a36SNathan Lynch cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL); 49393197a36SNathan Lynch if (!cache_dir) 49493197a36SNathan Lynch goto err; 49593197a36SNathan Lynch 49693197a36SNathan Lynch cache_dir->kobj = kobj; 49793197a36SNathan Lynch 498fc7a9febSNathan Lynch WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL); 49993197a36SNathan Lynch 500fc7a9febSNathan Lynch per_cpu(cache_dir_pcpu, cpu_id) = cache_dir; 50193197a36SNathan Lynch 50293197a36SNathan Lynch return cache_dir; 50393197a36SNathan Lynch err: 50493197a36SNathan Lynch kobject_put(kobj); 50593197a36SNathan Lynch return NULL; 50693197a36SNathan Lynch } 50793197a36SNathan Lynch 50893197a36SNathan Lynch static void cache_index_release(struct kobject *kobj) 50993197a36SNathan Lynch { 51093197a36SNathan Lynch struct cache_index_dir *index; 51193197a36SNathan Lynch 51293197a36SNathan Lynch index = kobj_to_cache_index_dir(kobj); 51393197a36SNathan Lynch 51493197a36SNathan Lynch pr_debug("freeing index directory for L%d %s cache\n", 51593197a36SNathan Lynch index->cache->level, cache_type_string(index->cache)); 51693197a36SNathan Lynch 51793197a36SNathan Lynch kfree(index); 51893197a36SNathan Lynch } 51993197a36SNathan Lynch 52093197a36SNathan Lynch static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf) 52193197a36SNathan Lynch { 52293197a36SNathan Lynch struct kobj_attribute *kobj_attr; 52393197a36SNathan Lynch 52493197a36SNathan Lynch kobj_attr = container_of(attr, struct kobj_attribute, attr); 52593197a36SNathan Lynch 52693197a36SNathan Lynch return kobj_attr->show(k, kobj_attr, buf); 52793197a36SNathan Lynch } 52893197a36SNathan Lynch 52993197a36SNathan Lynch static struct cache *index_kobj_to_cache(struct kobject *k) 53093197a36SNathan Lynch { 53193197a36SNathan Lynch struct cache_index_dir *index; 53293197a36SNathan Lynch 53393197a36SNathan Lynch index = kobj_to_cache_index_dir(k); 53493197a36SNathan Lynch 53593197a36SNathan Lynch return index->cache; 53693197a36SNathan Lynch } 53793197a36SNathan Lynch 53893197a36SNathan Lynch static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 53993197a36SNathan Lynch { 54093197a36SNathan Lynch unsigned int size_kb; 54193197a36SNathan Lynch struct cache *cache; 54293197a36SNathan Lynch 54393197a36SNathan Lynch cache = index_kobj_to_cache(k); 54493197a36SNathan Lynch 54593197a36SNathan Lynch if (cache_size_kb(cache, &size_kb)) 54693197a36SNathan Lynch return -ENODEV; 54793197a36SNathan Lynch 54893197a36SNathan Lynch return sprintf(buf, "%uK\n", size_kb); 54993197a36SNathan Lynch } 55093197a36SNathan Lynch 55193197a36SNathan Lynch static struct kobj_attribute cache_size_attr = 55293197a36SNathan Lynch __ATTR(size, 0444, size_show, NULL); 55393197a36SNathan Lynch 55493197a36SNathan Lynch 55593197a36SNathan Lynch static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 55693197a36SNathan Lynch { 55793197a36SNathan Lynch unsigned int line_size; 55893197a36SNathan Lynch struct cache *cache; 55993197a36SNathan Lynch 56093197a36SNathan Lynch cache = index_kobj_to_cache(k); 56193197a36SNathan Lynch 56293197a36SNathan Lynch if (cache_get_line_size(cache, &line_size)) 56393197a36SNathan Lynch return -ENODEV; 56493197a36SNathan Lynch 56593197a36SNathan Lynch return sprintf(buf, "%u\n", line_size); 56693197a36SNathan Lynch } 56793197a36SNathan Lynch 56893197a36SNathan Lynch static struct kobj_attribute cache_line_size_attr = 56993197a36SNathan Lynch __ATTR(coherency_line_size, 0444, line_size_show, NULL); 57093197a36SNathan Lynch 57193197a36SNathan Lynch static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 57293197a36SNathan Lynch { 57393197a36SNathan Lynch unsigned int nr_sets; 57493197a36SNathan Lynch struct cache *cache; 57593197a36SNathan Lynch 57693197a36SNathan Lynch cache = index_kobj_to_cache(k); 57793197a36SNathan Lynch 57893197a36SNathan Lynch if (cache_nr_sets(cache, &nr_sets)) 57993197a36SNathan Lynch return -ENODEV; 58093197a36SNathan Lynch 58193197a36SNathan Lynch return sprintf(buf, "%u\n", nr_sets); 58293197a36SNathan Lynch } 58393197a36SNathan Lynch 58493197a36SNathan Lynch static struct kobj_attribute cache_nr_sets_attr = 58593197a36SNathan Lynch __ATTR(number_of_sets, 0444, nr_sets_show, NULL); 58693197a36SNathan Lynch 58793197a36SNathan Lynch static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 58893197a36SNathan Lynch { 58993197a36SNathan Lynch unsigned int associativity; 59093197a36SNathan Lynch struct cache *cache; 59193197a36SNathan Lynch 59293197a36SNathan Lynch cache = index_kobj_to_cache(k); 59393197a36SNathan Lynch 59493197a36SNathan Lynch if (cache_associativity(cache, &associativity)) 59593197a36SNathan Lynch return -ENODEV; 59693197a36SNathan Lynch 59793197a36SNathan Lynch return sprintf(buf, "%u\n", associativity); 59893197a36SNathan Lynch } 59993197a36SNathan Lynch 60093197a36SNathan Lynch static struct kobj_attribute cache_assoc_attr = 60193197a36SNathan Lynch __ATTR(ways_of_associativity, 0444, associativity_show, NULL); 60293197a36SNathan Lynch 60393197a36SNathan Lynch static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 60493197a36SNathan Lynch { 60593197a36SNathan Lynch struct cache *cache; 60693197a36SNathan Lynch 60793197a36SNathan Lynch cache = index_kobj_to_cache(k); 60893197a36SNathan Lynch 60993197a36SNathan Lynch return sprintf(buf, "%s\n", cache_type_string(cache)); 61093197a36SNathan Lynch } 61193197a36SNathan Lynch 61293197a36SNathan Lynch static struct kobj_attribute cache_type_attr = 61393197a36SNathan Lynch __ATTR(type, 0444, type_show, NULL); 61493197a36SNathan Lynch 61593197a36SNathan Lynch static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 61693197a36SNathan Lynch { 61793197a36SNathan Lynch struct cache_index_dir *index; 61893197a36SNathan Lynch struct cache *cache; 61993197a36SNathan Lynch 62093197a36SNathan Lynch index = kobj_to_cache_index_dir(k); 62193197a36SNathan Lynch cache = index->cache; 62293197a36SNathan Lynch 62393197a36SNathan Lynch return sprintf(buf, "%d\n", cache->level); 62493197a36SNathan Lynch } 62593197a36SNathan Lynch 62693197a36SNathan Lynch static struct kobj_attribute cache_level_attr = 62793197a36SNathan Lynch __ATTR(level, 0444, level_show, NULL); 62893197a36SNathan Lynch 629500fe5f5SGautham R. Shenoy static unsigned int index_dir_to_cpu(struct cache_index_dir *index) 630500fe5f5SGautham R. Shenoy { 631500fe5f5SGautham R. Shenoy struct kobject *index_dir_kobj = &index->kobj; 632500fe5f5SGautham R. Shenoy struct kobject *cache_dir_kobj = index_dir_kobj->parent; 633500fe5f5SGautham R. Shenoy struct kobject *cpu_dev_kobj = cache_dir_kobj->parent; 634500fe5f5SGautham R. Shenoy struct device *dev = kobj_to_dev(cpu_dev_kobj); 635500fe5f5SGautham R. Shenoy 636500fe5f5SGautham R. Shenoy return dev->id; 637500fe5f5SGautham R. Shenoy } 638500fe5f5SGautham R. Shenoy 639500fe5f5SGautham R. Shenoy /* 640500fe5f5SGautham R. Shenoy * On big-core systems, each core has two groups of CPUs each of which 641500fe5f5SGautham R. Shenoy * has its own L1-cache. The thread-siblings which share l1-cache with 642500fe5f5SGautham R. Shenoy * @cpu can be obtained via cpu_smallcore_mask(). 643500fe5f5SGautham R. Shenoy */ 644500fe5f5SGautham R. Shenoy static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache) 645500fe5f5SGautham R. Shenoy { 646500fe5f5SGautham R. Shenoy if (cache->level == 1) 647500fe5f5SGautham R. Shenoy return cpu_smallcore_mask(cpu); 648500fe5f5SGautham R. Shenoy 649500fe5f5SGautham R. Shenoy return &cache->shared_cpu_map; 650500fe5f5SGautham R. Shenoy } 651500fe5f5SGautham R. Shenoy 65274b7492eSSrikar Dronamraju static ssize_t 65374b7492eSSrikar Dronamraju show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list) 65493197a36SNathan Lynch { 65593197a36SNathan Lynch struct cache_index_dir *index; 65693197a36SNathan Lynch struct cache *cache; 657500fe5f5SGautham R. Shenoy const struct cpumask *mask; 6585658cf08SSrikar Dronamraju int cpu; 65993197a36SNathan Lynch 66093197a36SNathan Lynch index = kobj_to_cache_index_dir(k); 66193197a36SNathan Lynch cache = index->cache; 66293197a36SNathan Lynch 663500fe5f5SGautham R. Shenoy if (has_big_cores) { 664500fe5f5SGautham R. Shenoy cpu = index_dir_to_cpu(index); 665500fe5f5SGautham R. Shenoy mask = get_big_core_shared_cpu_map(cpu, cache); 666500fe5f5SGautham R. Shenoy } else { 667500fe5f5SGautham R. Shenoy mask = &cache->shared_cpu_map; 668500fe5f5SGautham R. Shenoy } 669500fe5f5SGautham R. Shenoy 67074b7492eSSrikar Dronamraju return cpumap_print_to_pagebuf(list, buf, mask); 67174b7492eSSrikar Dronamraju } 67274b7492eSSrikar Dronamraju 67374b7492eSSrikar Dronamraju static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 67474b7492eSSrikar Dronamraju { 675a87a77cbSSrikar Dronamraju return show_shared_cpumap(k, attr, buf, false); 676a87a77cbSSrikar Dronamraju } 677a87a77cbSSrikar Dronamraju 678a87a77cbSSrikar Dronamraju static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 679a87a77cbSSrikar Dronamraju { 680a87a77cbSSrikar Dronamraju return show_shared_cpumap(k, attr, buf, true); 68193197a36SNathan Lynch } 68293197a36SNathan Lynch 68393197a36SNathan Lynch static struct kobj_attribute cache_shared_cpu_map_attr = 68493197a36SNathan Lynch __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); 68593197a36SNathan Lynch 686a87a77cbSSrikar Dronamraju static struct kobj_attribute cache_shared_cpu_list_attr = 687a87a77cbSSrikar Dronamraju __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); 688a87a77cbSSrikar Dronamraju 68993197a36SNathan Lynch /* Attributes which should always be created -- the kobject/sysfs core 69093197a36SNathan Lynch * does this automatically via kobj_type->default_attrs. This is the 69193197a36SNathan Lynch * minimum data required to uniquely identify a cache. 69293197a36SNathan Lynch */ 69393197a36SNathan Lynch static struct attribute *cache_index_default_attrs[] = { 69493197a36SNathan Lynch &cache_type_attr.attr, 69593197a36SNathan Lynch &cache_level_attr.attr, 69693197a36SNathan Lynch &cache_shared_cpu_map_attr.attr, 697a87a77cbSSrikar Dronamraju &cache_shared_cpu_list_attr.attr, 69893197a36SNathan Lynch NULL, 69993197a36SNathan Lynch }; 70093197a36SNathan Lynch 70193197a36SNathan Lynch /* Attributes which should be created if the cache device node has the 70293197a36SNathan Lynch * right properties -- see cacheinfo_create_index_opt_attrs 70393197a36SNathan Lynch */ 70493197a36SNathan Lynch static struct kobj_attribute *cache_index_opt_attrs[] = { 70593197a36SNathan Lynch &cache_size_attr, 70693197a36SNathan Lynch &cache_line_size_attr, 70793197a36SNathan Lynch &cache_nr_sets_attr, 70893197a36SNathan Lynch &cache_assoc_attr, 70993197a36SNathan Lynch }; 71093197a36SNathan Lynch 71152cf25d0SEmese Revfy static const struct sysfs_ops cache_index_ops = { 71293197a36SNathan Lynch .show = cache_index_show, 71393197a36SNathan Lynch }; 71493197a36SNathan Lynch 71593197a36SNathan Lynch static struct kobj_type cache_index_type = { 71693197a36SNathan Lynch .release = cache_index_release, 71793197a36SNathan Lynch .sysfs_ops = &cache_index_ops, 71893197a36SNathan Lynch .default_attrs = cache_index_default_attrs, 71993197a36SNathan Lynch }; 72093197a36SNathan Lynch 721061d19f2SPaul Gortmaker static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) 72293197a36SNathan Lynch { 72393197a36SNathan Lynch const char *cache_type; 72493197a36SNathan Lynch struct cache *cache; 72593197a36SNathan Lynch char *buf; 72693197a36SNathan Lynch int i; 72793197a36SNathan Lynch 72893197a36SNathan Lynch buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 72993197a36SNathan Lynch if (!buf) 73093197a36SNathan Lynch return; 73193197a36SNathan Lynch 73293197a36SNathan Lynch cache = dir->cache; 73393197a36SNathan Lynch cache_type = cache_type_string(cache); 73493197a36SNathan Lynch 73593197a36SNathan Lynch /* We don't want to create an attribute that can't provide a 73693197a36SNathan Lynch * meaningful value. Check the return value of each optional 73793197a36SNathan Lynch * attribute's ->show method before registering the 73893197a36SNathan Lynch * attribute. 73993197a36SNathan Lynch */ 74093197a36SNathan Lynch for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) { 74193197a36SNathan Lynch struct kobj_attribute *attr; 74293197a36SNathan Lynch ssize_t rc; 74393197a36SNathan Lynch 74493197a36SNathan Lynch attr = cache_index_opt_attrs[i]; 74593197a36SNathan Lynch 74693197a36SNathan Lynch rc = attr->show(&dir->kobj, attr, buf); 74793197a36SNathan Lynch if (rc <= 0) { 74893197a36SNathan Lynch pr_debug("not creating %s attribute for " 749b7c670d6SRob Herring "%pOF(%s) (rc = %zd)\n", 750b7c670d6SRob Herring attr->attr.name, cache->ofnode, 75193197a36SNathan Lynch cache_type, rc); 75293197a36SNathan Lynch continue; 75393197a36SNathan Lynch } 75493197a36SNathan Lynch if (sysfs_create_file(&dir->kobj, &attr->attr)) 755b7c670d6SRob Herring pr_debug("could not create %s attribute for %pOF(%s)\n", 756b7c670d6SRob Herring attr->attr.name, cache->ofnode, cache_type); 75793197a36SNathan Lynch } 75893197a36SNathan Lynch 75993197a36SNathan Lynch kfree(buf); 76093197a36SNathan Lynch } 76193197a36SNathan Lynch 762061d19f2SPaul Gortmaker static void cacheinfo_create_index_dir(struct cache *cache, int index, 763061d19f2SPaul Gortmaker struct cache_dir *cache_dir) 76493197a36SNathan Lynch { 76593197a36SNathan Lynch struct cache_index_dir *index_dir; 76693197a36SNathan Lynch int rc; 76793197a36SNathan Lynch 76893197a36SNathan Lynch index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); 76993197a36SNathan Lynch if (!index_dir) 7707e803979STobin C. Harding return; 77193197a36SNathan Lynch 77293197a36SNathan Lynch index_dir->cache = cache; 77393197a36SNathan Lynch 77493197a36SNathan Lynch rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, 77593197a36SNathan Lynch cache_dir->kobj, "index%d", index); 7767e803979STobin C. Harding if (rc) { 7777e803979STobin C. Harding kobject_put(&index_dir->kobj); 7787e803979STobin C. Harding return; 7797e803979STobin C. Harding } 78093197a36SNathan Lynch 78193197a36SNathan Lynch index_dir->next = cache_dir->index; 78293197a36SNathan Lynch cache_dir->index = index_dir; 78393197a36SNathan Lynch 78493197a36SNathan Lynch cacheinfo_create_index_opt_attrs(index_dir); 78593197a36SNathan Lynch } 78693197a36SNathan Lynch 787061d19f2SPaul Gortmaker static void cacheinfo_sysfs_populate(unsigned int cpu_id, 788061d19f2SPaul Gortmaker struct cache *cache_list) 78993197a36SNathan Lynch { 79093197a36SNathan Lynch struct cache_dir *cache_dir; 79193197a36SNathan Lynch struct cache *cache; 79293197a36SNathan Lynch int index = 0; 79393197a36SNathan Lynch 79493197a36SNathan Lynch cache_dir = cacheinfo_create_cache_dir(cpu_id); 79593197a36SNathan Lynch if (!cache_dir) 79693197a36SNathan Lynch return; 79793197a36SNathan Lynch 79893197a36SNathan Lynch cache = cache_list; 79993197a36SNathan Lynch while (cache) { 80093197a36SNathan Lynch cacheinfo_create_index_dir(cache, index, cache_dir); 80193197a36SNathan Lynch index++; 80293197a36SNathan Lynch cache = cache->next_local; 80393197a36SNathan Lynch } 80493197a36SNathan Lynch } 80593197a36SNathan Lynch 806061d19f2SPaul Gortmaker void cacheinfo_cpu_online(unsigned int cpu_id) 80793197a36SNathan Lynch { 80893197a36SNathan Lynch struct cache *cache; 80993197a36SNathan Lynch 81093197a36SNathan Lynch cache = cache_chain_instantiate(cpu_id); 81193197a36SNathan Lynch if (!cache) 81293197a36SNathan Lynch return; 81393197a36SNathan Lynch 81493197a36SNathan Lynch cacheinfo_sysfs_populate(cpu_id, cache); 81593197a36SNathan Lynch } 81693197a36SNathan Lynch 8176b36ba84SHaren Myneni /* functions needed to remove cache entry for cpu offline or suspend/resume */ 8186b36ba84SHaren Myneni 8196b36ba84SHaren Myneni #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \ 8206b36ba84SHaren Myneni defined(CONFIG_HOTPLUG_CPU) 82193197a36SNathan Lynch 82293197a36SNathan Lynch static struct cache *cache_lookup_by_cpu(unsigned int cpu_id) 82393197a36SNathan Lynch { 82493197a36SNathan Lynch struct device_node *cpu_node; 82593197a36SNathan Lynch struct cache *cache; 82693197a36SNathan Lynch 82793197a36SNathan Lynch cpu_node = of_get_cpu_node(cpu_id, NULL); 82893197a36SNathan Lynch WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); 82993197a36SNathan Lynch if (!cpu_node) 83093197a36SNathan Lynch return NULL; 83193197a36SNathan Lynch 83293197a36SNathan Lynch cache = cache_lookup_by_node(cpu_node); 83393197a36SNathan Lynch of_node_put(cpu_node); 83493197a36SNathan Lynch 83593197a36SNathan Lynch return cache; 83693197a36SNathan Lynch } 83793197a36SNathan Lynch 83893197a36SNathan Lynch static void remove_index_dirs(struct cache_dir *cache_dir) 83993197a36SNathan Lynch { 84093197a36SNathan Lynch struct cache_index_dir *index; 84193197a36SNathan Lynch 84293197a36SNathan Lynch index = cache_dir->index; 84393197a36SNathan Lynch 84493197a36SNathan Lynch while (index) { 84593197a36SNathan Lynch struct cache_index_dir *next; 84693197a36SNathan Lynch 84793197a36SNathan Lynch next = index->next; 84893197a36SNathan Lynch kobject_put(&index->kobj); 84993197a36SNathan Lynch index = next; 85093197a36SNathan Lynch } 85193197a36SNathan Lynch } 85293197a36SNathan Lynch 85393197a36SNathan Lynch static void remove_cache_dir(struct cache_dir *cache_dir) 85493197a36SNathan Lynch { 85593197a36SNathan Lynch remove_index_dirs(cache_dir); 85693197a36SNathan Lynch 85791b973f9SPaul Mackerras /* Remove cache dir from sysfs */ 85891b973f9SPaul Mackerras kobject_del(cache_dir->kobj); 85991b973f9SPaul Mackerras 86093197a36SNathan Lynch kobject_put(cache_dir->kobj); 86193197a36SNathan Lynch 86293197a36SNathan Lynch kfree(cache_dir); 86393197a36SNathan Lynch } 86493197a36SNathan Lynch 86593197a36SNathan Lynch static void cache_cpu_clear(struct cache *cache, int cpu) 86693197a36SNathan Lynch { 86793197a36SNathan Lynch while (cache) { 86893197a36SNathan Lynch struct cache *next = cache->next_local; 86993197a36SNathan Lynch 87093197a36SNathan Lynch WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), 871b7c670d6SRob Herring "CPU %i not accounted in %pOF(%s)\n", 872b7c670d6SRob Herring cpu, cache->ofnode, 87393197a36SNathan Lynch cache_type_string(cache)); 87493197a36SNathan Lynch 87593197a36SNathan Lynch cpumask_clear_cpu(cpu, &cache->shared_cpu_map); 87693197a36SNathan Lynch 87793197a36SNathan Lynch /* Release the cache object if all the cpus using it 87893197a36SNathan Lynch * are offline */ 87993197a36SNathan Lynch if (cpumask_empty(&cache->shared_cpu_map)) 88093197a36SNathan Lynch release_cache(cache); 88193197a36SNathan Lynch 88293197a36SNathan Lynch cache = next; 88393197a36SNathan Lynch } 88493197a36SNathan Lynch } 88593197a36SNathan Lynch 88693197a36SNathan Lynch void cacheinfo_cpu_offline(unsigned int cpu_id) 88793197a36SNathan Lynch { 88893197a36SNathan Lynch struct cache_dir *cache_dir; 88993197a36SNathan Lynch struct cache *cache; 89093197a36SNathan Lynch 89193197a36SNathan Lynch /* Prevent userspace from seeing inconsistent state - remove 89293197a36SNathan Lynch * the sysfs hierarchy first */ 893fc7a9febSNathan Lynch cache_dir = per_cpu(cache_dir_pcpu, cpu_id); 89493197a36SNathan Lynch 89593197a36SNathan Lynch /* careful, sysfs population may have failed */ 89693197a36SNathan Lynch if (cache_dir) 89793197a36SNathan Lynch remove_cache_dir(cache_dir); 89893197a36SNathan Lynch 899fc7a9febSNathan Lynch per_cpu(cache_dir_pcpu, cpu_id) = NULL; 90093197a36SNathan Lynch 90193197a36SNathan Lynch /* clear the CPU's bit in its cache chain, possibly freeing 90293197a36SNathan Lynch * cache objects */ 90393197a36SNathan Lynch cache = cache_lookup_by_cpu(cpu_id); 90493197a36SNathan Lynch if (cache) 90593197a36SNathan Lynch cache_cpu_clear(cache, cpu_id); 90693197a36SNathan Lynch } 907d4aa219aSNathan Lynch 908d4aa219aSNathan Lynch void cacheinfo_teardown(void) 909d4aa219aSNathan Lynch { 910d4aa219aSNathan Lynch unsigned int cpu; 911d4aa219aSNathan Lynch 912d4aa219aSNathan Lynch lockdep_assert_cpus_held(); 913d4aa219aSNathan Lynch 914d4aa219aSNathan Lynch for_each_online_cpu(cpu) 915d4aa219aSNathan Lynch cacheinfo_cpu_offline(cpu); 916d4aa219aSNathan Lynch } 917d4aa219aSNathan Lynch 918d4aa219aSNathan Lynch void cacheinfo_rebuild(void) 919d4aa219aSNathan Lynch { 920d4aa219aSNathan Lynch unsigned int cpu; 921d4aa219aSNathan Lynch 922d4aa219aSNathan Lynch lockdep_assert_cpus_held(); 923d4aa219aSNathan Lynch 924d4aa219aSNathan Lynch for_each_online_cpu(cpu) 925d4aa219aSNathan Lynch cacheinfo_cpu_online(cpu); 926d4aa219aSNathan Lynch } 927d4aa219aSNathan Lynch 9286b36ba84SHaren Myneni #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */ 929