1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 293197a36SNathan Lynch /* 393197a36SNathan Lynch * Processor cache information made available to userspace via sysfs; 493197a36SNathan Lynch * intended to be compatible with x86 intel_cacheinfo implementation. 593197a36SNathan Lynch * 693197a36SNathan Lynch * Copyright 2008 IBM Corporation 793197a36SNathan Lynch * Author: Nathan Lynch 893197a36SNathan Lynch */ 993197a36SNathan Lynch 10e2b3c165SNathan Lynch #define pr_fmt(fmt) "cacheinfo: " fmt 11e2b3c165SNathan Lynch 1293197a36SNathan Lynch #include <linux/cpu.h> 1393197a36SNathan Lynch #include <linux/cpumask.h> 1493197a36SNathan Lynch #include <linux/kernel.h> 1593197a36SNathan Lynch #include <linux/kobject.h> 1693197a36SNathan Lynch #include <linux/list.h> 1793197a36SNathan Lynch #include <linux/notifier.h> 1893197a36SNathan Lynch #include <linux/of.h> 1993197a36SNathan Lynch #include <linux/percpu.h> 205a0e3ad6STejun Heo #include <linux/slab.h> 2193197a36SNathan Lynch #include <asm/prom.h> 22500fe5f5SGautham R. Shenoy #include <asm/cputhreads.h> 23500fe5f5SGautham R. Shenoy #include <asm/smp.h> 2493197a36SNathan Lynch 2593197a36SNathan Lynch #include "cacheinfo.h" 2693197a36SNathan Lynch 2793197a36SNathan Lynch /* per-cpu object for tracking: 2893197a36SNathan Lynch * - a "cache" kobject for the top-level directory 2993197a36SNathan Lynch * - a list of "index" objects representing the cpu's local cache hierarchy 3093197a36SNathan Lynch */ 3193197a36SNathan Lynch struct cache_dir { 3293197a36SNathan Lynch struct kobject *kobj; /* bare (not embedded) kobject for cache 3393197a36SNathan Lynch * directory */ 3493197a36SNathan Lynch struct cache_index_dir *index; /* list of index objects */ 3593197a36SNathan Lynch }; 3693197a36SNathan Lynch 3793197a36SNathan Lynch /* "index" object: each cpu's cache directory has an index 3893197a36SNathan Lynch * subdirectory corresponding to a cache object associated with the 3993197a36SNathan Lynch * cpu. This object's lifetime is managed via the embedded kobject. 4093197a36SNathan Lynch */ 4193197a36SNathan Lynch struct cache_index_dir { 4293197a36SNathan Lynch struct kobject kobj; 4393197a36SNathan Lynch struct cache_index_dir *next; /* next index in parent directory */ 4493197a36SNathan Lynch struct cache *cache; 4593197a36SNathan Lynch }; 4693197a36SNathan Lynch 4793197a36SNathan Lynch /* Template for determining which OF properties to query for a given 4893197a36SNathan Lynch * cache type */ 4993197a36SNathan Lynch struct cache_type_info { 5093197a36SNathan Lynch const char *name; 5193197a36SNathan Lynch const char *size_prop; 5293197a36SNathan Lynch 5393197a36SNathan Lynch /* Allow for both [di]-cache-line-size and 5493197a36SNathan Lynch * [di]-cache-block-size properties. According to the PowerPC 5593197a36SNathan Lynch * Processor binding, -line-size should be provided if it 5693197a36SNathan Lynch * differs from the cache block size (that which is operated 5793197a36SNathan Lynch * on by cache instructions), so we look for -line-size first. 5893197a36SNathan Lynch * See cache_get_line_size(). */ 5993197a36SNathan Lynch 6093197a36SNathan Lynch const char *line_size_props[2]; 6193197a36SNathan Lynch const char *nr_sets_prop; 6293197a36SNathan Lynch }; 6393197a36SNathan Lynch 6493197a36SNathan Lynch /* These are used to index the cache_type_info array. */ 65f7e9e358SDave Olson #define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */ 66f7e9e358SDave Olson #define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */ 67f7e9e358SDave Olson #define CACHE_TYPE_INSTRUCTION 2 68f7e9e358SDave Olson #define CACHE_TYPE_DATA 3 6993197a36SNathan Lynch 7093197a36SNathan Lynch static const struct cache_type_info cache_type_info[] = { 7193197a36SNathan Lynch { 72f7e9e358SDave Olson /* Embedded systems that use cache-size, cache-block-size, 73f7e9e358SDave Olson * etc. for the Unified (typically L2) cache. */ 74f7e9e358SDave Olson .name = "Unified", 75f7e9e358SDave Olson .size_prop = "cache-size", 76f7e9e358SDave Olson .line_size_props = { "cache-line-size", 77f7e9e358SDave Olson "cache-block-size", }, 78f7e9e358SDave Olson .nr_sets_prop = "cache-sets", 79f7e9e358SDave Olson }, 80f7e9e358SDave Olson { 8193197a36SNathan Lynch /* PowerPC Processor binding says the [di]-cache-* 8293197a36SNathan Lynch * must be equal on unified caches, so just use 8393197a36SNathan Lynch * d-cache properties. */ 8493197a36SNathan Lynch .name = "Unified", 8593197a36SNathan Lynch .size_prop = "d-cache-size", 8693197a36SNathan Lynch .line_size_props = { "d-cache-line-size", 8793197a36SNathan Lynch "d-cache-block-size", }, 8893197a36SNathan Lynch .nr_sets_prop = "d-cache-sets", 8993197a36SNathan Lynch }, 9093197a36SNathan Lynch { 9193197a36SNathan Lynch .name = "Instruction", 9293197a36SNathan Lynch .size_prop = "i-cache-size", 9393197a36SNathan Lynch .line_size_props = { "i-cache-line-size", 9493197a36SNathan Lynch "i-cache-block-size", }, 9593197a36SNathan Lynch .nr_sets_prop = "i-cache-sets", 9693197a36SNathan Lynch }, 9793197a36SNathan Lynch { 9893197a36SNathan Lynch .name = "Data", 9993197a36SNathan Lynch .size_prop = "d-cache-size", 10093197a36SNathan Lynch .line_size_props = { "d-cache-line-size", 10193197a36SNathan Lynch "d-cache-block-size", }, 10293197a36SNathan Lynch .nr_sets_prop = "d-cache-sets", 10393197a36SNathan Lynch }, 10493197a36SNathan Lynch }; 10593197a36SNathan Lynch 10693197a36SNathan Lynch /* Cache object: each instance of this corresponds to a distinct cache 10793197a36SNathan Lynch * in the system. There are separate objects for Harvard caches: one 10893197a36SNathan Lynch * each for instruction and data, and each refers to the same OF node. 10993197a36SNathan Lynch * The refcount of the OF node is elevated for the lifetime of the 11093197a36SNathan Lynch * cache object. A cache object is released when its shared_cpu_map 11193197a36SNathan Lynch * is cleared (see cache_cpu_clear). 11293197a36SNathan Lynch * 11393197a36SNathan Lynch * A cache object is on two lists: an unsorted global list 11493197a36SNathan Lynch * (cache_list) of cache objects; and a singly-linked list 11593197a36SNathan Lynch * representing the local cache hierarchy, which is ordered by level 11693197a36SNathan Lynch * (e.g. L1d -> L1i -> L2 -> L3). 11793197a36SNathan Lynch */ 11893197a36SNathan Lynch struct cache { 11993197a36SNathan Lynch struct device_node *ofnode; /* OF node for this cache, may be cpu */ 12093197a36SNathan Lynch struct cpumask shared_cpu_map; /* online CPUs using this cache */ 12193197a36SNathan Lynch int type; /* split cache disambiguation */ 12293197a36SNathan Lynch int level; /* level not explicit in device tree */ 123*a4bec516SGautham R. Shenoy int group_id; /* id of the group of threads that share this cache */ 12493197a36SNathan Lynch struct list_head list; /* global list of cache objects */ 12593197a36SNathan Lynch struct cache *next_local; /* next cache of >= level */ 12693197a36SNathan Lynch }; 12793197a36SNathan Lynch 128fc7a9febSNathan Lynch static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu); 12993197a36SNathan Lynch 13093197a36SNathan Lynch /* traversal/modification of this list occurs only at cpu hotplug time; 13193197a36SNathan Lynch * access is serialized by cpu hotplug locking 13293197a36SNathan Lynch */ 13393197a36SNathan Lynch static LIST_HEAD(cache_list); 13493197a36SNathan Lynch 13593197a36SNathan Lynch static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k) 13693197a36SNathan Lynch { 13793197a36SNathan Lynch return container_of(k, struct cache_index_dir, kobj); 13893197a36SNathan Lynch } 13993197a36SNathan Lynch 14093197a36SNathan Lynch static const char *cache_type_string(const struct cache *cache) 14193197a36SNathan Lynch { 14293197a36SNathan Lynch return cache_type_info[cache->type].name; 14393197a36SNathan Lynch } 14493197a36SNathan Lynch 145061d19f2SPaul Gortmaker static void cache_init(struct cache *cache, int type, int level, 146*a4bec516SGautham R. Shenoy struct device_node *ofnode, int group_id) 14793197a36SNathan Lynch { 14893197a36SNathan Lynch cache->type = type; 14993197a36SNathan Lynch cache->level = level; 15093197a36SNathan Lynch cache->ofnode = of_node_get(ofnode); 151*a4bec516SGautham R. Shenoy cache->group_id = group_id; 15293197a36SNathan Lynch INIT_LIST_HEAD(&cache->list); 15393197a36SNathan Lynch list_add(&cache->list, &cache_list); 15493197a36SNathan Lynch } 15593197a36SNathan Lynch 156*a4bec516SGautham R. Shenoy static struct cache *new_cache(int type, int level, 157*a4bec516SGautham R. Shenoy struct device_node *ofnode, int group_id) 15893197a36SNathan Lynch { 15993197a36SNathan Lynch struct cache *cache; 16093197a36SNathan Lynch 16193197a36SNathan Lynch cache = kzalloc(sizeof(*cache), GFP_KERNEL); 16293197a36SNathan Lynch if (cache) 163*a4bec516SGautham R. Shenoy cache_init(cache, type, level, ofnode, group_id); 16493197a36SNathan Lynch 16593197a36SNathan Lynch return cache; 16693197a36SNathan Lynch } 16793197a36SNathan Lynch 16893197a36SNathan Lynch static void release_cache_debugcheck(struct cache *cache) 16993197a36SNathan Lynch { 17093197a36SNathan Lynch struct cache *iter; 17193197a36SNathan Lynch 17293197a36SNathan Lynch list_for_each_entry(iter, &cache_list, list) 17393197a36SNathan Lynch WARN_ONCE(iter->next_local == cache, 174be6f885eSNathan Lynch "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n", 175b7c670d6SRob Herring iter->ofnode, 17693197a36SNathan Lynch cache_type_string(iter), 177b7c670d6SRob Herring cache->ofnode, 17893197a36SNathan Lynch cache_type_string(cache)); 17993197a36SNathan Lynch } 18093197a36SNathan Lynch 18193197a36SNathan Lynch static void release_cache(struct cache *cache) 18293197a36SNathan Lynch { 18393197a36SNathan Lynch if (!cache) 18493197a36SNathan Lynch return; 18593197a36SNathan Lynch 186be6f885eSNathan Lynch pr_debug("freeing L%d %s cache for %pOFP\n", cache->level, 187b7c670d6SRob Herring cache_type_string(cache), cache->ofnode); 18893197a36SNathan Lynch 18993197a36SNathan Lynch release_cache_debugcheck(cache); 19093197a36SNathan Lynch list_del(&cache->list); 19193197a36SNathan Lynch of_node_put(cache->ofnode); 19293197a36SNathan Lynch kfree(cache); 19393197a36SNathan Lynch } 19493197a36SNathan Lynch 19593197a36SNathan Lynch static void cache_cpu_set(struct cache *cache, int cpu) 19693197a36SNathan Lynch { 19793197a36SNathan Lynch struct cache *next = cache; 19893197a36SNathan Lynch 19993197a36SNathan Lynch while (next) { 20093197a36SNathan Lynch WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map), 201be6f885eSNathan Lynch "CPU %i already accounted in %pOFP(%s)\n", 202b7c670d6SRob Herring cpu, next->ofnode, 20393197a36SNathan Lynch cache_type_string(next)); 20493197a36SNathan Lynch cpumask_set_cpu(cpu, &next->shared_cpu_map); 20593197a36SNathan Lynch next = next->next_local; 20693197a36SNathan Lynch } 20793197a36SNathan Lynch } 20893197a36SNathan Lynch 20993197a36SNathan Lynch static int cache_size(const struct cache *cache, unsigned int *ret) 21093197a36SNathan Lynch { 21193197a36SNathan Lynch const char *propname; 212d10bd84fSAnton Blanchard const __be32 *cache_size; 21393197a36SNathan Lynch 21493197a36SNathan Lynch propname = cache_type_info[cache->type].size_prop; 21593197a36SNathan Lynch 21693197a36SNathan Lynch cache_size = of_get_property(cache->ofnode, propname, NULL); 21793197a36SNathan Lynch if (!cache_size) 21893197a36SNathan Lynch return -ENODEV; 21993197a36SNathan Lynch 220d10bd84fSAnton Blanchard *ret = of_read_number(cache_size, 1); 22193197a36SNathan Lynch return 0; 22293197a36SNathan Lynch } 22393197a36SNathan Lynch 22493197a36SNathan Lynch static int cache_size_kb(const struct cache *cache, unsigned int *ret) 22593197a36SNathan Lynch { 22693197a36SNathan Lynch unsigned int size; 22793197a36SNathan Lynch 22893197a36SNathan Lynch if (cache_size(cache, &size)) 22993197a36SNathan Lynch return -ENODEV; 23093197a36SNathan Lynch 23193197a36SNathan Lynch *ret = size / 1024; 23293197a36SNathan Lynch return 0; 23393197a36SNathan Lynch } 23493197a36SNathan Lynch 23593197a36SNathan Lynch /* not cache_line_size() because that's a macro in include/linux/cache.h */ 23693197a36SNathan Lynch static int cache_get_line_size(const struct cache *cache, unsigned int *ret) 23793197a36SNathan Lynch { 238d10bd84fSAnton Blanchard const __be32 *line_size; 23993197a36SNathan Lynch int i, lim; 24093197a36SNathan Lynch 24193197a36SNathan Lynch lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props); 24293197a36SNathan Lynch 24393197a36SNathan Lynch for (i = 0; i < lim; i++) { 24493197a36SNathan Lynch const char *propname; 24593197a36SNathan Lynch 24693197a36SNathan Lynch propname = cache_type_info[cache->type].line_size_props[i]; 24793197a36SNathan Lynch line_size = of_get_property(cache->ofnode, propname, NULL); 24893197a36SNathan Lynch if (line_size) 24993197a36SNathan Lynch break; 25093197a36SNathan Lynch } 25193197a36SNathan Lynch 25293197a36SNathan Lynch if (!line_size) 25393197a36SNathan Lynch return -ENODEV; 25493197a36SNathan Lynch 255d10bd84fSAnton Blanchard *ret = of_read_number(line_size, 1); 25693197a36SNathan Lynch return 0; 25793197a36SNathan Lynch } 25893197a36SNathan Lynch 25993197a36SNathan Lynch static int cache_nr_sets(const struct cache *cache, unsigned int *ret) 26093197a36SNathan Lynch { 26193197a36SNathan Lynch const char *propname; 262d10bd84fSAnton Blanchard const __be32 *nr_sets; 26393197a36SNathan Lynch 26493197a36SNathan Lynch propname = cache_type_info[cache->type].nr_sets_prop; 26593197a36SNathan Lynch 26693197a36SNathan Lynch nr_sets = of_get_property(cache->ofnode, propname, NULL); 26793197a36SNathan Lynch if (!nr_sets) 26893197a36SNathan Lynch return -ENODEV; 26993197a36SNathan Lynch 270d10bd84fSAnton Blanchard *ret = of_read_number(nr_sets, 1); 27193197a36SNathan Lynch return 0; 27293197a36SNathan Lynch } 27393197a36SNathan Lynch 27493197a36SNathan Lynch static int cache_associativity(const struct cache *cache, unsigned int *ret) 27593197a36SNathan Lynch { 27693197a36SNathan Lynch unsigned int line_size; 27793197a36SNathan Lynch unsigned int nr_sets; 27893197a36SNathan Lynch unsigned int size; 27993197a36SNathan Lynch 28093197a36SNathan Lynch if (cache_nr_sets(cache, &nr_sets)) 28193197a36SNathan Lynch goto err; 28293197a36SNathan Lynch 28393197a36SNathan Lynch /* If the cache is fully associative, there is no need to 28493197a36SNathan Lynch * check the other properties. 28593197a36SNathan Lynch */ 28693197a36SNathan Lynch if (nr_sets == 1) { 28793197a36SNathan Lynch *ret = 0; 28893197a36SNathan Lynch return 0; 28993197a36SNathan Lynch } 29093197a36SNathan Lynch 29193197a36SNathan Lynch if (cache_get_line_size(cache, &line_size)) 29293197a36SNathan Lynch goto err; 29393197a36SNathan Lynch if (cache_size(cache, &size)) 29493197a36SNathan Lynch goto err; 29593197a36SNathan Lynch 29693197a36SNathan Lynch if (!(nr_sets > 0 && size > 0 && line_size > 0)) 29793197a36SNathan Lynch goto err; 29893197a36SNathan Lynch 29993197a36SNathan Lynch *ret = (size / nr_sets) / line_size; 30093197a36SNathan Lynch return 0; 30193197a36SNathan Lynch err: 30293197a36SNathan Lynch return -ENODEV; 30393197a36SNathan Lynch } 30493197a36SNathan Lynch 30593197a36SNathan Lynch /* helper for dealing with split caches */ 30693197a36SNathan Lynch static struct cache *cache_find_first_sibling(struct cache *cache) 30793197a36SNathan Lynch { 30893197a36SNathan Lynch struct cache *iter; 30993197a36SNathan Lynch 310f7e9e358SDave Olson if (cache->type == CACHE_TYPE_UNIFIED || 311f7e9e358SDave Olson cache->type == CACHE_TYPE_UNIFIED_D) 31293197a36SNathan Lynch return cache; 31393197a36SNathan Lynch 31493197a36SNathan Lynch list_for_each_entry(iter, &cache_list, list) 315*a4bec516SGautham R. Shenoy if (iter->ofnode == cache->ofnode && 316*a4bec516SGautham R. Shenoy iter->group_id == cache->group_id && 317*a4bec516SGautham R. Shenoy iter->next_local == cache) 31893197a36SNathan Lynch return iter; 31993197a36SNathan Lynch 32093197a36SNathan Lynch return cache; 32193197a36SNathan Lynch } 32293197a36SNathan Lynch 323*a4bec516SGautham R. Shenoy /* return the first cache on a local list matching node and thread-group id */ 324*a4bec516SGautham R. Shenoy static struct cache *cache_lookup_by_node_group(const struct device_node *node, 325*a4bec516SGautham R. Shenoy int group_id) 32693197a36SNathan Lynch { 32793197a36SNathan Lynch struct cache *cache = NULL; 32893197a36SNathan Lynch struct cache *iter; 32993197a36SNathan Lynch 33093197a36SNathan Lynch list_for_each_entry(iter, &cache_list, list) { 331*a4bec516SGautham R. Shenoy if (iter->ofnode != node || 332*a4bec516SGautham R. Shenoy iter->group_id != group_id) 33393197a36SNathan Lynch continue; 33493197a36SNathan Lynch cache = cache_find_first_sibling(iter); 33593197a36SNathan Lynch break; 33693197a36SNathan Lynch } 33793197a36SNathan Lynch 33893197a36SNathan Lynch return cache; 33993197a36SNathan Lynch } 34093197a36SNathan Lynch 34193197a36SNathan Lynch static bool cache_node_is_unified(const struct device_node *np) 34293197a36SNathan Lynch { 34393197a36SNathan Lynch return of_get_property(np, "cache-unified", NULL); 34493197a36SNathan Lynch } 34593197a36SNathan Lynch 346f7e9e358SDave Olson /* 347f7e9e358SDave Olson * Unified caches can have two different sets of tags. Most embedded 348f7e9e358SDave Olson * use cache-size, etc. for the unified cache size, but open firmware systems 349f7e9e358SDave Olson * use d-cache-size, etc. Check on initialization for which type we have, and 350f7e9e358SDave Olson * return the appropriate structure type. Assume it's embedded if it isn't 351f7e9e358SDave Olson * open firmware. If it's yet a 3rd type, then there will be missing entries 352f7e9e358SDave Olson * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need 353f7e9e358SDave Olson * to be extended further. 354f7e9e358SDave Olson */ 355f7e9e358SDave Olson static int cache_is_unified_d(const struct device_node *np) 35693197a36SNathan Lynch { 357f7e9e358SDave Olson return of_get_property(np, 358f7e9e358SDave Olson cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ? 359f7e9e358SDave Olson CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED; 360f7e9e358SDave Olson } 36193197a36SNathan Lynch 362*a4bec516SGautham R. Shenoy static struct cache *cache_do_one_devnode_unified(struct device_node *node, int group_id, 363*a4bec516SGautham R. Shenoy int level) 364f7e9e358SDave Olson { 365be6f885eSNathan Lynch pr_debug("creating L%d ucache for %pOFP\n", level, node); 36693197a36SNathan Lynch 367*a4bec516SGautham R. Shenoy return new_cache(cache_is_unified_d(node), level, node, group_id); 36893197a36SNathan Lynch } 36993197a36SNathan Lynch 370*a4bec516SGautham R. Shenoy static struct cache *cache_do_one_devnode_split(struct device_node *node, int group_id, 371061d19f2SPaul Gortmaker int level) 37293197a36SNathan Lynch { 37393197a36SNathan Lynch struct cache *dcache, *icache; 37493197a36SNathan Lynch 375be6f885eSNathan Lynch pr_debug("creating L%d dcache and icache for %pOFP\n", level, 376b7c670d6SRob Herring node); 37793197a36SNathan Lynch 378*a4bec516SGautham R. Shenoy dcache = new_cache(CACHE_TYPE_DATA, level, node, group_id); 379*a4bec516SGautham R. Shenoy icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node, group_id); 38093197a36SNathan Lynch 38193197a36SNathan Lynch if (!dcache || !icache) 38293197a36SNathan Lynch goto err; 38393197a36SNathan Lynch 38493197a36SNathan Lynch dcache->next_local = icache; 38593197a36SNathan Lynch 38693197a36SNathan Lynch return dcache; 38793197a36SNathan Lynch err: 38893197a36SNathan Lynch release_cache(dcache); 38993197a36SNathan Lynch release_cache(icache); 39093197a36SNathan Lynch return NULL; 39193197a36SNathan Lynch } 39293197a36SNathan Lynch 393*a4bec516SGautham R. Shenoy static struct cache *cache_do_one_devnode(struct device_node *node, int group_id, int level) 39493197a36SNathan Lynch { 39593197a36SNathan Lynch struct cache *cache; 39693197a36SNathan Lynch 39793197a36SNathan Lynch if (cache_node_is_unified(node)) 398*a4bec516SGautham R. Shenoy cache = cache_do_one_devnode_unified(node, group_id, level); 39993197a36SNathan Lynch else 400*a4bec516SGautham R. Shenoy cache = cache_do_one_devnode_split(node, group_id, level); 40193197a36SNathan Lynch 40293197a36SNathan Lynch return cache; 40393197a36SNathan Lynch } 40493197a36SNathan Lynch 405061d19f2SPaul Gortmaker static struct cache *cache_lookup_or_instantiate(struct device_node *node, 406*a4bec516SGautham R. Shenoy int group_id, 407061d19f2SPaul Gortmaker int level) 40893197a36SNathan Lynch { 40993197a36SNathan Lynch struct cache *cache; 41093197a36SNathan Lynch 411*a4bec516SGautham R. Shenoy cache = cache_lookup_by_node_group(node, group_id); 41293197a36SNathan Lynch 41393197a36SNathan Lynch WARN_ONCE(cache && cache->level != level, 41493197a36SNathan Lynch "cache level mismatch on lookup (got %d, expected %d)\n", 41593197a36SNathan Lynch cache->level, level); 41693197a36SNathan Lynch 41793197a36SNathan Lynch if (!cache) 418*a4bec516SGautham R. Shenoy cache = cache_do_one_devnode(node, group_id, level); 41993197a36SNathan Lynch 42093197a36SNathan Lynch return cache; 42193197a36SNathan Lynch } 42293197a36SNathan Lynch 423061d19f2SPaul Gortmaker static void link_cache_lists(struct cache *smaller, struct cache *bigger) 42493197a36SNathan Lynch { 42593197a36SNathan Lynch while (smaller->next_local) { 42693197a36SNathan Lynch if (smaller->next_local == bigger) 42793197a36SNathan Lynch return; /* already linked */ 42893197a36SNathan Lynch smaller = smaller->next_local; 42993197a36SNathan Lynch } 43093197a36SNathan Lynch 43193197a36SNathan Lynch smaller->next_local = bigger; 4326ec54363SNathan Lynch 4336ec54363SNathan Lynch /* 4346ec54363SNathan Lynch * The cache->next_local list sorts by level ascending: 4356ec54363SNathan Lynch * L1d -> L1i -> L2 -> L3 ... 4366ec54363SNathan Lynch */ 4376ec54363SNathan Lynch WARN_ONCE((smaller->level == 1 && bigger->level > 2) || 4386ec54363SNathan Lynch (smaller->level > 1 && bigger->level != smaller->level + 1), 4396ec54363SNathan Lynch "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n", 4406ec54363SNathan Lynch smaller->level, smaller->ofnode, bigger->level, bigger->ofnode); 44193197a36SNathan Lynch } 44293197a36SNathan Lynch 443061d19f2SPaul Gortmaker static void do_subsidiary_caches_debugcheck(struct cache *cache) 44493197a36SNathan Lynch { 4451b3da8ffSNathan Lynch WARN_ONCE(cache->level != 1, 4461b3da8ffSNathan Lynch "instantiating cache chain from L%d %s cache for " 4471b3da8ffSNathan Lynch "%pOFP instead of an L1\n", cache->level, 4481b3da8ffSNathan Lynch cache_type_string(cache), cache->ofnode); 4491b3da8ffSNathan Lynch WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"), 4501b3da8ffSNathan Lynch "instantiating cache chain from node %pOFP of type '%s' " 4511b3da8ffSNathan Lynch "instead of a cpu node\n", cache->ofnode, 4521b3da8ffSNathan Lynch of_node_get_device_type(cache->ofnode)); 45393197a36SNathan Lynch } 45493197a36SNathan Lynch 455*a4bec516SGautham R. Shenoy /* 456*a4bec516SGautham R. Shenoy * If sub-groups of threads in a core containing @cpu_id share the 457*a4bec516SGautham R. Shenoy * L@level-cache (information obtained via "ibm,thread-groups" 458*a4bec516SGautham R. Shenoy * device-tree property), then we identify the group by the first 459*a4bec516SGautham R. Shenoy * thread-sibling in the group. We define this to be the group-id. 460*a4bec516SGautham R. Shenoy * 461*a4bec516SGautham R. Shenoy * In the absence of any thread-group information for L@level-cache, 462*a4bec516SGautham R. Shenoy * this function returns -1. 463*a4bec516SGautham R. Shenoy */ 464*a4bec516SGautham R. Shenoy static int get_group_id(unsigned int cpu_id, int level) 465*a4bec516SGautham R. Shenoy { 466*a4bec516SGautham R. Shenoy if (has_big_cores && level == 1) 467*a4bec516SGautham R. Shenoy return cpumask_first(per_cpu(thread_group_l1_cache_map, 468*a4bec516SGautham R. Shenoy cpu_id)); 469*a4bec516SGautham R. Shenoy else if (thread_group_shares_l2 && level == 2) 470*a4bec516SGautham R. Shenoy return cpumask_first(per_cpu(thread_group_l2_cache_map, 471*a4bec516SGautham R. Shenoy cpu_id)); 472*a4bec516SGautham R. Shenoy return -1; 473*a4bec516SGautham R. Shenoy } 474*a4bec516SGautham R. Shenoy 475*a4bec516SGautham R. Shenoy static void do_subsidiary_caches(struct cache *cache, unsigned int cpu_id) 47693197a36SNathan Lynch { 47793197a36SNathan Lynch struct device_node *subcache_node; 47893197a36SNathan Lynch int level = cache->level; 47993197a36SNathan Lynch 48093197a36SNathan Lynch do_subsidiary_caches_debugcheck(cache); 48193197a36SNathan Lynch 48293197a36SNathan Lynch while ((subcache_node = of_find_next_cache_node(cache->ofnode))) { 48393197a36SNathan Lynch struct cache *subcache; 484*a4bec516SGautham R. Shenoy int group_id; 48593197a36SNathan Lynch 48693197a36SNathan Lynch level++; 487*a4bec516SGautham R. Shenoy group_id = get_group_id(cpu_id, level); 488*a4bec516SGautham R. Shenoy subcache = cache_lookup_or_instantiate(subcache_node, group_id, level); 48993197a36SNathan Lynch of_node_put(subcache_node); 49093197a36SNathan Lynch if (!subcache) 49193197a36SNathan Lynch break; 49293197a36SNathan Lynch 49393197a36SNathan Lynch link_cache_lists(cache, subcache); 49493197a36SNathan Lynch cache = subcache; 49593197a36SNathan Lynch } 49693197a36SNathan Lynch } 49793197a36SNathan Lynch 498061d19f2SPaul Gortmaker static struct cache *cache_chain_instantiate(unsigned int cpu_id) 49993197a36SNathan Lynch { 50093197a36SNathan Lynch struct device_node *cpu_node; 50193197a36SNathan Lynch struct cache *cpu_cache = NULL; 502*a4bec516SGautham R. Shenoy int group_id; 50393197a36SNathan Lynch 50493197a36SNathan Lynch pr_debug("creating cache object(s) for CPU %i\n", cpu_id); 50593197a36SNathan Lynch 50693197a36SNathan Lynch cpu_node = of_get_cpu_node(cpu_id, NULL); 50793197a36SNathan Lynch WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); 50893197a36SNathan Lynch if (!cpu_node) 50993197a36SNathan Lynch goto out; 51093197a36SNathan Lynch 511*a4bec516SGautham R. Shenoy group_id = get_group_id(cpu_id, 1); 512*a4bec516SGautham R. Shenoy 513*a4bec516SGautham R. Shenoy cpu_cache = cache_lookup_or_instantiate(cpu_node, group_id, 1); 51493197a36SNathan Lynch if (!cpu_cache) 51593197a36SNathan Lynch goto out; 51693197a36SNathan Lynch 517*a4bec516SGautham R. Shenoy do_subsidiary_caches(cpu_cache, cpu_id); 51893197a36SNathan Lynch 51993197a36SNathan Lynch cache_cpu_set(cpu_cache, cpu_id); 52093197a36SNathan Lynch out: 52193197a36SNathan Lynch of_node_put(cpu_node); 52293197a36SNathan Lynch 52393197a36SNathan Lynch return cpu_cache; 52493197a36SNathan Lynch } 52593197a36SNathan Lynch 526061d19f2SPaul Gortmaker static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id) 52793197a36SNathan Lynch { 52893197a36SNathan Lynch struct cache_dir *cache_dir; 5298a25a2fdSKay Sievers struct device *dev; 53093197a36SNathan Lynch struct kobject *kobj = NULL; 53193197a36SNathan Lynch 5328a25a2fdSKay Sievers dev = get_cpu_device(cpu_id); 5338a25a2fdSKay Sievers WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id); 5348a25a2fdSKay Sievers if (!dev) 53593197a36SNathan Lynch goto err; 53693197a36SNathan Lynch 5378a25a2fdSKay Sievers kobj = kobject_create_and_add("cache", &dev->kobj); 53893197a36SNathan Lynch if (!kobj) 53993197a36SNathan Lynch goto err; 54093197a36SNathan Lynch 54193197a36SNathan Lynch cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL); 54293197a36SNathan Lynch if (!cache_dir) 54393197a36SNathan Lynch goto err; 54493197a36SNathan Lynch 54593197a36SNathan Lynch cache_dir->kobj = kobj; 54693197a36SNathan Lynch 547fc7a9febSNathan Lynch WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL); 54893197a36SNathan Lynch 549fc7a9febSNathan Lynch per_cpu(cache_dir_pcpu, cpu_id) = cache_dir; 55093197a36SNathan Lynch 55193197a36SNathan Lynch return cache_dir; 55293197a36SNathan Lynch err: 55393197a36SNathan Lynch kobject_put(kobj); 55493197a36SNathan Lynch return NULL; 55593197a36SNathan Lynch } 55693197a36SNathan Lynch 55793197a36SNathan Lynch static void cache_index_release(struct kobject *kobj) 55893197a36SNathan Lynch { 55993197a36SNathan Lynch struct cache_index_dir *index; 56093197a36SNathan Lynch 56193197a36SNathan Lynch index = kobj_to_cache_index_dir(kobj); 56293197a36SNathan Lynch 56393197a36SNathan Lynch pr_debug("freeing index directory for L%d %s cache\n", 56493197a36SNathan Lynch index->cache->level, cache_type_string(index->cache)); 56593197a36SNathan Lynch 56693197a36SNathan Lynch kfree(index); 56793197a36SNathan Lynch } 56893197a36SNathan Lynch 56993197a36SNathan Lynch static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf) 57093197a36SNathan Lynch { 57193197a36SNathan Lynch struct kobj_attribute *kobj_attr; 57293197a36SNathan Lynch 57393197a36SNathan Lynch kobj_attr = container_of(attr, struct kobj_attribute, attr); 57493197a36SNathan Lynch 57593197a36SNathan Lynch return kobj_attr->show(k, kobj_attr, buf); 57693197a36SNathan Lynch } 57793197a36SNathan Lynch 57893197a36SNathan Lynch static struct cache *index_kobj_to_cache(struct kobject *k) 57993197a36SNathan Lynch { 58093197a36SNathan Lynch struct cache_index_dir *index; 58193197a36SNathan Lynch 58293197a36SNathan Lynch index = kobj_to_cache_index_dir(k); 58393197a36SNathan Lynch 58493197a36SNathan Lynch return index->cache; 58593197a36SNathan Lynch } 58693197a36SNathan Lynch 58793197a36SNathan Lynch static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 58893197a36SNathan Lynch { 58993197a36SNathan Lynch unsigned int size_kb; 59093197a36SNathan Lynch struct cache *cache; 59193197a36SNathan Lynch 59293197a36SNathan Lynch cache = index_kobj_to_cache(k); 59393197a36SNathan Lynch 59493197a36SNathan Lynch if (cache_size_kb(cache, &size_kb)) 59593197a36SNathan Lynch return -ENODEV; 59693197a36SNathan Lynch 59793197a36SNathan Lynch return sprintf(buf, "%uK\n", size_kb); 59893197a36SNathan Lynch } 59993197a36SNathan Lynch 60093197a36SNathan Lynch static struct kobj_attribute cache_size_attr = 60193197a36SNathan Lynch __ATTR(size, 0444, size_show, NULL); 60293197a36SNathan Lynch 60393197a36SNathan Lynch 60493197a36SNathan Lynch static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 60593197a36SNathan Lynch { 60693197a36SNathan Lynch unsigned int line_size; 60793197a36SNathan Lynch struct cache *cache; 60893197a36SNathan Lynch 60993197a36SNathan Lynch cache = index_kobj_to_cache(k); 61093197a36SNathan Lynch 61193197a36SNathan Lynch if (cache_get_line_size(cache, &line_size)) 61293197a36SNathan Lynch return -ENODEV; 61393197a36SNathan Lynch 61493197a36SNathan Lynch return sprintf(buf, "%u\n", line_size); 61593197a36SNathan Lynch } 61693197a36SNathan Lynch 61793197a36SNathan Lynch static struct kobj_attribute cache_line_size_attr = 61893197a36SNathan Lynch __ATTR(coherency_line_size, 0444, line_size_show, NULL); 61993197a36SNathan Lynch 62093197a36SNathan Lynch static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 62193197a36SNathan Lynch { 62293197a36SNathan Lynch unsigned int nr_sets; 62393197a36SNathan Lynch struct cache *cache; 62493197a36SNathan Lynch 62593197a36SNathan Lynch cache = index_kobj_to_cache(k); 62693197a36SNathan Lynch 62793197a36SNathan Lynch if (cache_nr_sets(cache, &nr_sets)) 62893197a36SNathan Lynch return -ENODEV; 62993197a36SNathan Lynch 63093197a36SNathan Lynch return sprintf(buf, "%u\n", nr_sets); 63193197a36SNathan Lynch } 63293197a36SNathan Lynch 63393197a36SNathan Lynch static struct kobj_attribute cache_nr_sets_attr = 63493197a36SNathan Lynch __ATTR(number_of_sets, 0444, nr_sets_show, NULL); 63593197a36SNathan Lynch 63693197a36SNathan Lynch static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 63793197a36SNathan Lynch { 63893197a36SNathan Lynch unsigned int associativity; 63993197a36SNathan Lynch struct cache *cache; 64093197a36SNathan Lynch 64193197a36SNathan Lynch cache = index_kobj_to_cache(k); 64293197a36SNathan Lynch 64393197a36SNathan Lynch if (cache_associativity(cache, &associativity)) 64493197a36SNathan Lynch return -ENODEV; 64593197a36SNathan Lynch 64693197a36SNathan Lynch return sprintf(buf, "%u\n", associativity); 64793197a36SNathan Lynch } 64893197a36SNathan Lynch 64993197a36SNathan Lynch static struct kobj_attribute cache_assoc_attr = 65093197a36SNathan Lynch __ATTR(ways_of_associativity, 0444, associativity_show, NULL); 65193197a36SNathan Lynch 65293197a36SNathan Lynch static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 65393197a36SNathan Lynch { 65493197a36SNathan Lynch struct cache *cache; 65593197a36SNathan Lynch 65693197a36SNathan Lynch cache = index_kobj_to_cache(k); 65793197a36SNathan Lynch 65893197a36SNathan Lynch return sprintf(buf, "%s\n", cache_type_string(cache)); 65993197a36SNathan Lynch } 66093197a36SNathan Lynch 66193197a36SNathan Lynch static struct kobj_attribute cache_type_attr = 66293197a36SNathan Lynch __ATTR(type, 0444, type_show, NULL); 66393197a36SNathan Lynch 66493197a36SNathan Lynch static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 66593197a36SNathan Lynch { 66693197a36SNathan Lynch struct cache_index_dir *index; 66793197a36SNathan Lynch struct cache *cache; 66893197a36SNathan Lynch 66993197a36SNathan Lynch index = kobj_to_cache_index_dir(k); 67093197a36SNathan Lynch cache = index->cache; 67193197a36SNathan Lynch 67293197a36SNathan Lynch return sprintf(buf, "%d\n", cache->level); 67393197a36SNathan Lynch } 67493197a36SNathan Lynch 67593197a36SNathan Lynch static struct kobj_attribute cache_level_attr = 67693197a36SNathan Lynch __ATTR(level, 0444, level_show, NULL); 67793197a36SNathan Lynch 678500fe5f5SGautham R. Shenoy static unsigned int index_dir_to_cpu(struct cache_index_dir *index) 679500fe5f5SGautham R. Shenoy { 680500fe5f5SGautham R. Shenoy struct kobject *index_dir_kobj = &index->kobj; 681500fe5f5SGautham R. Shenoy struct kobject *cache_dir_kobj = index_dir_kobj->parent; 682500fe5f5SGautham R. Shenoy struct kobject *cpu_dev_kobj = cache_dir_kobj->parent; 683500fe5f5SGautham R. Shenoy struct device *dev = kobj_to_dev(cpu_dev_kobj); 684500fe5f5SGautham R. Shenoy 685500fe5f5SGautham R. Shenoy return dev->id; 686500fe5f5SGautham R. Shenoy } 687500fe5f5SGautham R. Shenoy 688500fe5f5SGautham R. Shenoy /* 689500fe5f5SGautham R. Shenoy * On big-core systems, each core has two groups of CPUs each of which 690500fe5f5SGautham R. Shenoy * has its own L1-cache. The thread-siblings which share l1-cache with 691500fe5f5SGautham R. Shenoy * @cpu can be obtained via cpu_smallcore_mask(). 6920be47634SGautham R. Shenoy * 6930be47634SGautham R. Shenoy * On some big-core systems, the L2 cache is shared only between some 6940be47634SGautham R. Shenoy * groups of siblings. This is already parsed and encoded in 6950be47634SGautham R. Shenoy * cpu_l2_cache_mask(). 6960be47634SGautham R. Shenoy * 6970be47634SGautham R. Shenoy * TODO: cache_lookup_or_instantiate() needs to be made aware of the 6980be47634SGautham R. Shenoy * "ibm,thread-groups" property so that cache->shared_cpu_map 6990be47634SGautham R. Shenoy * reflects the correct siblings on platforms that have this 7000be47634SGautham R. Shenoy * device-tree property. This helper function is only a stop-gap 7010be47634SGautham R. Shenoy * solution so that we report the correct siblings to the 7020be47634SGautham R. Shenoy * userspace via sysfs. 703500fe5f5SGautham R. Shenoy */ 7040be47634SGautham R. Shenoy static const struct cpumask *get_shared_cpu_map(struct cache_index_dir *index, struct cache *cache) 705500fe5f5SGautham R. Shenoy { 7060be47634SGautham R. Shenoy if (has_big_cores) { 7070be47634SGautham R. Shenoy int cpu = index_dir_to_cpu(index); 708500fe5f5SGautham R. Shenoy if (cache->level == 1) 709500fe5f5SGautham R. Shenoy return cpu_smallcore_mask(cpu); 7100be47634SGautham R. Shenoy if (cache->level == 2 && thread_group_shares_l2) 7110be47634SGautham R. Shenoy return cpu_l2_cache_mask(cpu); 7120be47634SGautham R. Shenoy } 713500fe5f5SGautham R. Shenoy 714500fe5f5SGautham R. Shenoy return &cache->shared_cpu_map; 715500fe5f5SGautham R. Shenoy } 716500fe5f5SGautham R. Shenoy 71774b7492eSSrikar Dronamraju static ssize_t 71874b7492eSSrikar Dronamraju show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list) 71993197a36SNathan Lynch { 72093197a36SNathan Lynch struct cache_index_dir *index; 72193197a36SNathan Lynch struct cache *cache; 722500fe5f5SGautham R. Shenoy const struct cpumask *mask; 72393197a36SNathan Lynch 72493197a36SNathan Lynch index = kobj_to_cache_index_dir(k); 72593197a36SNathan Lynch cache = index->cache; 72693197a36SNathan Lynch 7270be47634SGautham R. Shenoy mask = get_shared_cpu_map(index, cache); 728500fe5f5SGautham R. Shenoy 72974b7492eSSrikar Dronamraju return cpumap_print_to_pagebuf(list, buf, mask); 73074b7492eSSrikar Dronamraju } 73174b7492eSSrikar Dronamraju 73274b7492eSSrikar Dronamraju static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 73374b7492eSSrikar Dronamraju { 734a87a77cbSSrikar Dronamraju return show_shared_cpumap(k, attr, buf, false); 735a87a77cbSSrikar Dronamraju } 736a87a77cbSSrikar Dronamraju 737a87a77cbSSrikar Dronamraju static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 738a87a77cbSSrikar Dronamraju { 739a87a77cbSSrikar Dronamraju return show_shared_cpumap(k, attr, buf, true); 74093197a36SNathan Lynch } 74193197a36SNathan Lynch 74293197a36SNathan Lynch static struct kobj_attribute cache_shared_cpu_map_attr = 74393197a36SNathan Lynch __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); 74493197a36SNathan Lynch 745a87a77cbSSrikar Dronamraju static struct kobj_attribute cache_shared_cpu_list_attr = 746a87a77cbSSrikar Dronamraju __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); 747a87a77cbSSrikar Dronamraju 74893197a36SNathan Lynch /* Attributes which should always be created -- the kobject/sysfs core 74993197a36SNathan Lynch * does this automatically via kobj_type->default_attrs. This is the 75093197a36SNathan Lynch * minimum data required to uniquely identify a cache. 75193197a36SNathan Lynch */ 75293197a36SNathan Lynch static struct attribute *cache_index_default_attrs[] = { 75393197a36SNathan Lynch &cache_type_attr.attr, 75493197a36SNathan Lynch &cache_level_attr.attr, 75593197a36SNathan Lynch &cache_shared_cpu_map_attr.attr, 756a87a77cbSSrikar Dronamraju &cache_shared_cpu_list_attr.attr, 75793197a36SNathan Lynch NULL, 75893197a36SNathan Lynch }; 75993197a36SNathan Lynch 76093197a36SNathan Lynch /* Attributes which should be created if the cache device node has the 76193197a36SNathan Lynch * right properties -- see cacheinfo_create_index_opt_attrs 76293197a36SNathan Lynch */ 76393197a36SNathan Lynch static struct kobj_attribute *cache_index_opt_attrs[] = { 76493197a36SNathan Lynch &cache_size_attr, 76593197a36SNathan Lynch &cache_line_size_attr, 76693197a36SNathan Lynch &cache_nr_sets_attr, 76793197a36SNathan Lynch &cache_assoc_attr, 76893197a36SNathan Lynch }; 76993197a36SNathan Lynch 77052cf25d0SEmese Revfy static const struct sysfs_ops cache_index_ops = { 77193197a36SNathan Lynch .show = cache_index_show, 77293197a36SNathan Lynch }; 77393197a36SNathan Lynch 77493197a36SNathan Lynch static struct kobj_type cache_index_type = { 77593197a36SNathan Lynch .release = cache_index_release, 77693197a36SNathan Lynch .sysfs_ops = &cache_index_ops, 77793197a36SNathan Lynch .default_attrs = cache_index_default_attrs, 77893197a36SNathan Lynch }; 77993197a36SNathan Lynch 780061d19f2SPaul Gortmaker static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) 78193197a36SNathan Lynch { 78293197a36SNathan Lynch const char *cache_type; 78393197a36SNathan Lynch struct cache *cache; 78493197a36SNathan Lynch char *buf; 78593197a36SNathan Lynch int i; 78693197a36SNathan Lynch 78793197a36SNathan Lynch buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 78893197a36SNathan Lynch if (!buf) 78993197a36SNathan Lynch return; 79093197a36SNathan Lynch 79193197a36SNathan Lynch cache = dir->cache; 79293197a36SNathan Lynch cache_type = cache_type_string(cache); 79393197a36SNathan Lynch 79493197a36SNathan Lynch /* We don't want to create an attribute that can't provide a 79593197a36SNathan Lynch * meaningful value. Check the return value of each optional 79693197a36SNathan Lynch * attribute's ->show method before registering the 79793197a36SNathan Lynch * attribute. 79893197a36SNathan Lynch */ 79993197a36SNathan Lynch for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) { 80093197a36SNathan Lynch struct kobj_attribute *attr; 80193197a36SNathan Lynch ssize_t rc; 80293197a36SNathan Lynch 80393197a36SNathan Lynch attr = cache_index_opt_attrs[i]; 80493197a36SNathan Lynch 80593197a36SNathan Lynch rc = attr->show(&dir->kobj, attr, buf); 80693197a36SNathan Lynch if (rc <= 0) { 80793197a36SNathan Lynch pr_debug("not creating %s attribute for " 808be6f885eSNathan Lynch "%pOFP(%s) (rc = %zd)\n", 809b7c670d6SRob Herring attr->attr.name, cache->ofnode, 81093197a36SNathan Lynch cache_type, rc); 81193197a36SNathan Lynch continue; 81293197a36SNathan Lynch } 81393197a36SNathan Lynch if (sysfs_create_file(&dir->kobj, &attr->attr)) 814be6f885eSNathan Lynch pr_debug("could not create %s attribute for %pOFP(%s)\n", 815b7c670d6SRob Herring attr->attr.name, cache->ofnode, cache_type); 81693197a36SNathan Lynch } 81793197a36SNathan Lynch 81893197a36SNathan Lynch kfree(buf); 81993197a36SNathan Lynch } 82093197a36SNathan Lynch 821061d19f2SPaul Gortmaker static void cacheinfo_create_index_dir(struct cache *cache, int index, 822061d19f2SPaul Gortmaker struct cache_dir *cache_dir) 82393197a36SNathan Lynch { 82493197a36SNathan Lynch struct cache_index_dir *index_dir; 82593197a36SNathan Lynch int rc; 82693197a36SNathan Lynch 82793197a36SNathan Lynch index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); 82893197a36SNathan Lynch if (!index_dir) 8297e803979STobin C. Harding return; 83093197a36SNathan Lynch 83193197a36SNathan Lynch index_dir->cache = cache; 83293197a36SNathan Lynch 83393197a36SNathan Lynch rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, 83493197a36SNathan Lynch cache_dir->kobj, "index%d", index); 8357e803979STobin C. Harding if (rc) { 8367e803979STobin C. Harding kobject_put(&index_dir->kobj); 8377e803979STobin C. Harding return; 8387e803979STobin C. Harding } 83993197a36SNathan Lynch 84093197a36SNathan Lynch index_dir->next = cache_dir->index; 84193197a36SNathan Lynch cache_dir->index = index_dir; 84293197a36SNathan Lynch 84393197a36SNathan Lynch cacheinfo_create_index_opt_attrs(index_dir); 84493197a36SNathan Lynch } 84593197a36SNathan Lynch 846061d19f2SPaul Gortmaker static void cacheinfo_sysfs_populate(unsigned int cpu_id, 847061d19f2SPaul Gortmaker struct cache *cache_list) 84893197a36SNathan Lynch { 84993197a36SNathan Lynch struct cache_dir *cache_dir; 85093197a36SNathan Lynch struct cache *cache; 85193197a36SNathan Lynch int index = 0; 85293197a36SNathan Lynch 85393197a36SNathan Lynch cache_dir = cacheinfo_create_cache_dir(cpu_id); 85493197a36SNathan Lynch if (!cache_dir) 85593197a36SNathan Lynch return; 85693197a36SNathan Lynch 85793197a36SNathan Lynch cache = cache_list; 85893197a36SNathan Lynch while (cache) { 85993197a36SNathan Lynch cacheinfo_create_index_dir(cache, index, cache_dir); 86093197a36SNathan Lynch index++; 86193197a36SNathan Lynch cache = cache->next_local; 86293197a36SNathan Lynch } 86393197a36SNathan Lynch } 86493197a36SNathan Lynch 865061d19f2SPaul Gortmaker void cacheinfo_cpu_online(unsigned int cpu_id) 86693197a36SNathan Lynch { 86793197a36SNathan Lynch struct cache *cache; 86893197a36SNathan Lynch 86993197a36SNathan Lynch cache = cache_chain_instantiate(cpu_id); 87093197a36SNathan Lynch if (!cache) 87193197a36SNathan Lynch return; 87293197a36SNathan Lynch 87393197a36SNathan Lynch cacheinfo_sysfs_populate(cpu_id, cache); 87493197a36SNathan Lynch } 87593197a36SNathan Lynch 8766b36ba84SHaren Myneni /* functions needed to remove cache entry for cpu offline or suspend/resume */ 8776b36ba84SHaren Myneni 8786b36ba84SHaren Myneni #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \ 8796b36ba84SHaren Myneni defined(CONFIG_HOTPLUG_CPU) 88093197a36SNathan Lynch 88193197a36SNathan Lynch static struct cache *cache_lookup_by_cpu(unsigned int cpu_id) 88293197a36SNathan Lynch { 88393197a36SNathan Lynch struct device_node *cpu_node; 88493197a36SNathan Lynch struct cache *cache; 885*a4bec516SGautham R. Shenoy int group_id; 88693197a36SNathan Lynch 88793197a36SNathan Lynch cpu_node = of_get_cpu_node(cpu_id, NULL); 88893197a36SNathan Lynch WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); 88993197a36SNathan Lynch if (!cpu_node) 89093197a36SNathan Lynch return NULL; 89193197a36SNathan Lynch 892*a4bec516SGautham R. Shenoy group_id = get_group_id(cpu_id, 1); 893*a4bec516SGautham R. Shenoy cache = cache_lookup_by_node_group(cpu_node, group_id); 89493197a36SNathan Lynch of_node_put(cpu_node); 89593197a36SNathan Lynch 89693197a36SNathan Lynch return cache; 89793197a36SNathan Lynch } 89893197a36SNathan Lynch 89993197a36SNathan Lynch static void remove_index_dirs(struct cache_dir *cache_dir) 90093197a36SNathan Lynch { 90193197a36SNathan Lynch struct cache_index_dir *index; 90293197a36SNathan Lynch 90393197a36SNathan Lynch index = cache_dir->index; 90493197a36SNathan Lynch 90593197a36SNathan Lynch while (index) { 90693197a36SNathan Lynch struct cache_index_dir *next; 90793197a36SNathan Lynch 90893197a36SNathan Lynch next = index->next; 90993197a36SNathan Lynch kobject_put(&index->kobj); 91093197a36SNathan Lynch index = next; 91193197a36SNathan Lynch } 91293197a36SNathan Lynch } 91393197a36SNathan Lynch 91493197a36SNathan Lynch static void remove_cache_dir(struct cache_dir *cache_dir) 91593197a36SNathan Lynch { 91693197a36SNathan Lynch remove_index_dirs(cache_dir); 91793197a36SNathan Lynch 91891b973f9SPaul Mackerras /* Remove cache dir from sysfs */ 91991b973f9SPaul Mackerras kobject_del(cache_dir->kobj); 92091b973f9SPaul Mackerras 92193197a36SNathan Lynch kobject_put(cache_dir->kobj); 92293197a36SNathan Lynch 92393197a36SNathan Lynch kfree(cache_dir); 92493197a36SNathan Lynch } 92593197a36SNathan Lynch 92693197a36SNathan Lynch static void cache_cpu_clear(struct cache *cache, int cpu) 92793197a36SNathan Lynch { 92893197a36SNathan Lynch while (cache) { 92993197a36SNathan Lynch struct cache *next = cache->next_local; 93093197a36SNathan Lynch 93193197a36SNathan Lynch WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), 932be6f885eSNathan Lynch "CPU %i not accounted in %pOFP(%s)\n", 933b7c670d6SRob Herring cpu, cache->ofnode, 93493197a36SNathan Lynch cache_type_string(cache)); 93593197a36SNathan Lynch 93693197a36SNathan Lynch cpumask_clear_cpu(cpu, &cache->shared_cpu_map); 93793197a36SNathan Lynch 93893197a36SNathan Lynch /* Release the cache object if all the cpus using it 93993197a36SNathan Lynch * are offline */ 94093197a36SNathan Lynch if (cpumask_empty(&cache->shared_cpu_map)) 94193197a36SNathan Lynch release_cache(cache); 94293197a36SNathan Lynch 94393197a36SNathan Lynch cache = next; 94493197a36SNathan Lynch } 94593197a36SNathan Lynch } 94693197a36SNathan Lynch 94793197a36SNathan Lynch void cacheinfo_cpu_offline(unsigned int cpu_id) 94893197a36SNathan Lynch { 94993197a36SNathan Lynch struct cache_dir *cache_dir; 95093197a36SNathan Lynch struct cache *cache; 95193197a36SNathan Lynch 95293197a36SNathan Lynch /* Prevent userspace from seeing inconsistent state - remove 95393197a36SNathan Lynch * the sysfs hierarchy first */ 954fc7a9febSNathan Lynch cache_dir = per_cpu(cache_dir_pcpu, cpu_id); 95593197a36SNathan Lynch 95693197a36SNathan Lynch /* careful, sysfs population may have failed */ 95793197a36SNathan Lynch if (cache_dir) 95893197a36SNathan Lynch remove_cache_dir(cache_dir); 95993197a36SNathan Lynch 960fc7a9febSNathan Lynch per_cpu(cache_dir_pcpu, cpu_id) = NULL; 96193197a36SNathan Lynch 96293197a36SNathan Lynch /* clear the CPU's bit in its cache chain, possibly freeing 96393197a36SNathan Lynch * cache objects */ 96493197a36SNathan Lynch cache = cache_lookup_by_cpu(cpu_id); 96593197a36SNathan Lynch if (cache) 96693197a36SNathan Lynch cache_cpu_clear(cache, cpu_id); 96793197a36SNathan Lynch } 968d4aa219aSNathan Lynch 969d4aa219aSNathan Lynch void cacheinfo_teardown(void) 970d4aa219aSNathan Lynch { 971d4aa219aSNathan Lynch unsigned int cpu; 972d4aa219aSNathan Lynch 973d4aa219aSNathan Lynch lockdep_assert_cpus_held(); 974d4aa219aSNathan Lynch 975d4aa219aSNathan Lynch for_each_online_cpu(cpu) 976d4aa219aSNathan Lynch cacheinfo_cpu_offline(cpu); 977d4aa219aSNathan Lynch } 978d4aa219aSNathan Lynch 979d4aa219aSNathan Lynch void cacheinfo_rebuild(void) 980d4aa219aSNathan Lynch { 981d4aa219aSNathan Lynch unsigned int cpu; 982d4aa219aSNathan Lynch 983d4aa219aSNathan Lynch lockdep_assert_cpus_held(); 984d4aa219aSNathan Lynch 985d4aa219aSNathan Lynch for_each_online_cpu(cpu) 986d4aa219aSNathan Lynch cacheinfo_cpu_online(cpu); 987d4aa219aSNathan Lynch } 988d4aa219aSNathan Lynch 9896b36ba84SHaren Myneni #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */ 990