1989d42e8SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2246246cbSSudeep Holla /* 3246246cbSSudeep Holla * cacheinfo support - processor cache information via sysfs 4246246cbSSudeep Holla * 5246246cbSSudeep Holla * Based on arch/x86/kernel/cpu/intel_cacheinfo.c 6246246cbSSudeep Holla * Author: Sudeep Holla <sudeep.holla@arm.com> 7246246cbSSudeep Holla */ 88e1073b1SSudeep Holla #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 98e1073b1SSudeep Holla 1055877ef4SSudeep Holla #include <linux/acpi.h> 11246246cbSSudeep Holla #include <linux/bitops.h> 12246246cbSSudeep Holla #include <linux/cacheinfo.h> 13246246cbSSudeep Holla #include <linux/compiler.h> 14246246cbSSudeep Holla #include <linux/cpu.h> 15246246cbSSudeep Holla #include <linux/device.h> 16246246cbSSudeep Holla #include <linux/init.h> 17246246cbSSudeep Holla #include <linux/of.h> 18246246cbSSudeep Holla #include <linux/sched.h> 19246246cbSSudeep Holla #include <linux/slab.h> 20246246cbSSudeep Holla #include <linux/smp.h> 21246246cbSSudeep Holla #include <linux/sysfs.h> 22246246cbSSudeep Holla 23246246cbSSudeep Holla /* pointer to per cpu cacheinfo */ 24246246cbSSudeep Holla static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); 25246246cbSSudeep Holla #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) 26246246cbSSudeep Holla #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) 27246246cbSSudeep Holla #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) 28246246cbSSudeep Holla 29246246cbSSudeep Holla struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) 30246246cbSSudeep Holla { 31246246cbSSudeep Holla return ci_cacheinfo(cpu); 32246246cbSSudeep Holla } 33246246cbSSudeep Holla 34246246cbSSudeep Holla #ifdef CONFIG_OF 35246246cbSSudeep Holla static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, 36246246cbSSudeep Holla struct cacheinfo *sib_leaf) 37246246cbSSudeep Holla { 389b97387cSJeremy Linton return sib_leaf->fw_token == this_leaf->fw_token; 39246246cbSSudeep Holla } 40dfea747dSSudeep Holla 41dfea747dSSudeep Holla /* OF properties to query for a given cache type */ 42dfea747dSSudeep Holla struct cache_type_info { 43dfea747dSSudeep Holla const char *size_prop; 44dfea747dSSudeep Holla const char *line_size_props[2]; 45dfea747dSSudeep Holla const char *nr_sets_prop; 46dfea747dSSudeep Holla }; 47dfea747dSSudeep Holla 48dfea747dSSudeep Holla static const struct cache_type_info cache_type_info[] = { 49dfea747dSSudeep Holla { 50dfea747dSSudeep Holla .size_prop = "cache-size", 51dfea747dSSudeep Holla .line_size_props = { "cache-line-size", 52dfea747dSSudeep Holla "cache-block-size", }, 53dfea747dSSudeep Holla .nr_sets_prop = "cache-sets", 54dfea747dSSudeep Holla }, { 55dfea747dSSudeep Holla .size_prop = "i-cache-size", 56dfea747dSSudeep Holla .line_size_props = { "i-cache-line-size", 57dfea747dSSudeep Holla "i-cache-block-size", }, 58dfea747dSSudeep Holla .nr_sets_prop = "i-cache-sets", 59dfea747dSSudeep Holla }, { 60dfea747dSSudeep Holla .size_prop = "d-cache-size", 61dfea747dSSudeep Holla .line_size_props = { "d-cache-line-size", 62dfea747dSSudeep Holla "d-cache-block-size", }, 63dfea747dSSudeep Holla .nr_sets_prop = "d-cache-sets", 64dfea747dSSudeep Holla }, 65dfea747dSSudeep Holla }; 66dfea747dSSudeep Holla 67dfea747dSSudeep Holla static inline int get_cacheinfo_idx(enum cache_type type) 68dfea747dSSudeep Holla { 69dfea747dSSudeep Holla if (type == CACHE_TYPE_UNIFIED) 70dfea747dSSudeep Holla return 0; 71dfea747dSSudeep Holla return type; 72dfea747dSSudeep Holla } 73dfea747dSSudeep Holla 742ff075c7SJeremy Linton static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) 75dfea747dSSudeep Holla { 76dfea747dSSudeep Holla const char *propname; 77dfea747dSSudeep Holla int ct_idx; 78dfea747dSSudeep Holla 79dfea747dSSudeep Holla ct_idx = get_cacheinfo_idx(this_leaf->type); 80dfea747dSSudeep Holla propname = cache_type_info[ct_idx].size_prop; 81dfea747dSSudeep Holla 823a34c986SHuacai Chen of_property_read_u32(np, propname, &this_leaf->size); 83dfea747dSSudeep Holla } 84dfea747dSSudeep Holla 85dfea747dSSudeep Holla /* not cache_line_size() because that's a macro in include/linux/cache.h */ 862ff075c7SJeremy Linton static void cache_get_line_size(struct cacheinfo *this_leaf, 872ff075c7SJeremy Linton struct device_node *np) 88dfea747dSSudeep Holla { 89dfea747dSSudeep Holla int i, lim, ct_idx; 90dfea747dSSudeep Holla 91dfea747dSSudeep Holla ct_idx = get_cacheinfo_idx(this_leaf->type); 92dfea747dSSudeep Holla lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props); 93dfea747dSSudeep Holla 94dfea747dSSudeep Holla for (i = 0; i < lim; i++) { 95448a5a55SSudeep Holla int ret; 96448a5a55SSudeep Holla u32 line_size; 97dfea747dSSudeep Holla const char *propname; 98dfea747dSSudeep Holla 99dfea747dSSudeep Holla propname = cache_type_info[ct_idx].line_size_props[i]; 100448a5a55SSudeep Holla ret = of_property_read_u32(np, propname, &line_size); 101448a5a55SSudeep Holla if (!ret) { 102448a5a55SSudeep Holla this_leaf->coherency_line_size = line_size; 103dfea747dSSudeep Holla break; 104dfea747dSSudeep Holla } 105448a5a55SSudeep Holla } 106dfea747dSSudeep Holla } 107dfea747dSSudeep Holla 1082ff075c7SJeremy Linton static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) 109dfea747dSSudeep Holla { 110dfea747dSSudeep Holla const char *propname; 111dfea747dSSudeep Holla int ct_idx; 112dfea747dSSudeep Holla 113dfea747dSSudeep Holla ct_idx = get_cacheinfo_idx(this_leaf->type); 114dfea747dSSudeep Holla propname = cache_type_info[ct_idx].nr_sets_prop; 115dfea747dSSudeep Holla 1163a34c986SHuacai Chen of_property_read_u32(np, propname, &this_leaf->number_of_sets); 117dfea747dSSudeep Holla } 118dfea747dSSudeep Holla 119dfea747dSSudeep Holla static void cache_associativity(struct cacheinfo *this_leaf) 120dfea747dSSudeep Holla { 121dfea747dSSudeep Holla unsigned int line_size = this_leaf->coherency_line_size; 122dfea747dSSudeep Holla unsigned int nr_sets = this_leaf->number_of_sets; 123dfea747dSSudeep Holla unsigned int size = this_leaf->size; 124dfea747dSSudeep Holla 125dfea747dSSudeep Holla /* 126dfea747dSSudeep Holla * If the cache is fully associative, there is no need to 127dfea747dSSudeep Holla * check the other properties. 128dfea747dSSudeep Holla */ 129dfea747dSSudeep Holla if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0)) 130dfea747dSSudeep Holla this_leaf->ways_of_associativity = (size / nr_sets) / line_size; 131dfea747dSSudeep Holla } 132dfea747dSSudeep Holla 1332ff075c7SJeremy Linton static bool cache_node_is_unified(struct cacheinfo *this_leaf, 1342ff075c7SJeremy Linton struct device_node *np) 135f57ab9a0SSudeep Holla { 1362ff075c7SJeremy Linton return of_property_read_bool(np, "cache-unified"); 137f57ab9a0SSudeep Holla } 138f57ab9a0SSudeep Holla 1392ff075c7SJeremy Linton static void cache_of_set_props(struct cacheinfo *this_leaf, 1402ff075c7SJeremy Linton struct device_node *np) 141dfea747dSSudeep Holla { 142f57ab9a0SSudeep Holla /* 143f57ab9a0SSudeep Holla * init_cache_level must setup the cache level correctly 144f57ab9a0SSudeep Holla * overriding the architecturally specified levels, so 145f57ab9a0SSudeep Holla * if type is NONE at this stage, it should be unified 146f57ab9a0SSudeep Holla */ 147f57ab9a0SSudeep Holla if (this_leaf->type == CACHE_TYPE_NOCACHE && 1482ff075c7SJeremy Linton cache_node_is_unified(this_leaf, np)) 149f57ab9a0SSudeep Holla this_leaf->type = CACHE_TYPE_UNIFIED; 1502ff075c7SJeremy Linton cache_size(this_leaf, np); 1512ff075c7SJeremy Linton cache_get_line_size(this_leaf, np); 1522ff075c7SJeremy Linton cache_nr_sets(this_leaf, np); 153dfea747dSSudeep Holla cache_associativity(this_leaf); 154dfea747dSSudeep Holla } 155d529a18aSJeremy Linton 156d529a18aSJeremy Linton static int cache_setup_of_node(unsigned int cpu) 157d529a18aSJeremy Linton { 158d529a18aSJeremy Linton struct device_node *np; 159d529a18aSJeremy Linton struct cacheinfo *this_leaf; 160d529a18aSJeremy Linton struct device *cpu_dev = get_cpu_device(cpu); 161d529a18aSJeremy Linton struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 162d529a18aSJeremy Linton unsigned int index = 0; 163d529a18aSJeremy Linton 1649b97387cSJeremy Linton /* skip if fw_token is already populated */ 1659b97387cSJeremy Linton if (this_cpu_ci->info_list->fw_token) { 166d529a18aSJeremy Linton return 0; 1679b97387cSJeremy Linton } 168d529a18aSJeremy Linton 169d529a18aSJeremy Linton if (!cpu_dev) { 170d529a18aSJeremy Linton pr_err("No cpu device for CPU %d\n", cpu); 171d529a18aSJeremy Linton return -ENODEV; 172d529a18aSJeremy Linton } 173d529a18aSJeremy Linton np = cpu_dev->of_node; 174d529a18aSJeremy Linton if (!np) { 175d529a18aSJeremy Linton pr_err("Failed to find cpu%d device node\n", cpu); 176d529a18aSJeremy Linton return -ENOENT; 177d529a18aSJeremy Linton } 178d529a18aSJeremy Linton 179d529a18aSJeremy Linton while (index < cache_leaves(cpu)) { 180d529a18aSJeremy Linton this_leaf = this_cpu_ci->info_list + index; 181d529a18aSJeremy Linton if (this_leaf->level != 1) 182d529a18aSJeremy Linton np = of_find_next_cache_node(np); 183d529a18aSJeremy Linton else 184d529a18aSJeremy Linton np = of_node_get(np);/* cpu node itself */ 185d529a18aSJeremy Linton if (!np) 186d529a18aSJeremy Linton break; 1872ff075c7SJeremy Linton cache_of_set_props(this_leaf, np); 1889b97387cSJeremy Linton this_leaf->fw_token = np; 189d529a18aSJeremy Linton index++; 190d529a18aSJeremy Linton } 191d529a18aSJeremy Linton 192d529a18aSJeremy Linton if (index != cache_leaves(cpu)) /* not all OF nodes populated */ 193d529a18aSJeremy Linton return -ENOENT; 194d529a18aSJeremy Linton 195d529a18aSJeremy Linton return 0; 196d529a18aSJeremy Linton } 197246246cbSSudeep Holla #else 198246246cbSSudeep Holla static inline int cache_setup_of_node(unsigned int cpu) { return 0; } 199246246cbSSudeep Holla static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, 200246246cbSSudeep Holla struct cacheinfo *sib_leaf) 201246246cbSSudeep Holla { 202246246cbSSudeep Holla /* 203582b468bSJeremy Linton * For non-DT/ACPI systems, assume unique level 1 caches, system-wide 204246246cbSSudeep Holla * shared caches for all other levels. This will be used only if 205246246cbSSudeep Holla * arch specific code has not populated shared_cpu_map 206246246cbSSudeep Holla */ 207246246cbSSudeep Holla return !(this_leaf->level == 1); 208246246cbSSudeep Holla } 209246246cbSSudeep Holla #endif 210246246cbSSudeep Holla 211582b468bSJeremy Linton int __weak cache_setup_acpi(unsigned int cpu) 212582b468bSJeremy Linton { 213582b468bSJeremy Linton return -ENOTSUPP; 214582b468bSJeremy Linton } 215582b468bSJeremy Linton 2169a83c84cSShaokun Zhang unsigned int coherency_max_size; 2179a83c84cSShaokun Zhang 218246246cbSSudeep Holla static int cache_shared_cpu_map_setup(unsigned int cpu) 219246246cbSSudeep Holla { 220246246cbSSudeep Holla struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 221246246cbSSudeep Holla struct cacheinfo *this_leaf, *sib_leaf; 222246246cbSSudeep Holla unsigned int index; 22355877ef4SSudeep Holla int ret = 0; 224246246cbSSudeep Holla 225fac51482SSudeep Holla if (this_cpu_ci->cpu_map_populated) 226fac51482SSudeep Holla return 0; 227fac51482SSudeep Holla 22855877ef4SSudeep Holla if (of_have_populated_dt()) 229246246cbSSudeep Holla ret = cache_setup_of_node(cpu); 23055877ef4SSudeep Holla else if (!acpi_disabled) 231582b468bSJeremy Linton ret = cache_setup_acpi(cpu); 232582b468bSJeremy Linton 233246246cbSSudeep Holla if (ret) 234246246cbSSudeep Holla return ret; 235246246cbSSudeep Holla 236246246cbSSudeep Holla for (index = 0; index < cache_leaves(cpu); index++) { 237246246cbSSudeep Holla unsigned int i; 238246246cbSSudeep Holla 239246246cbSSudeep Holla this_leaf = this_cpu_ci->info_list + index; 240246246cbSSudeep Holla /* skip if shared_cpu_map is already populated */ 241246246cbSSudeep Holla if (!cpumask_empty(&this_leaf->shared_cpu_map)) 242246246cbSSudeep Holla continue; 243246246cbSSudeep Holla 244246246cbSSudeep Holla cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); 245246246cbSSudeep Holla for_each_online_cpu(i) { 246246246cbSSudeep Holla struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); 247246246cbSSudeep Holla 248246246cbSSudeep Holla if (i == cpu || !sib_cpu_ci->info_list) 249246246cbSSudeep Holla continue;/* skip if itself or no cacheinfo */ 250246246cbSSudeep Holla sib_leaf = sib_cpu_ci->info_list + index; 251246246cbSSudeep Holla if (cache_leaves_are_shared(this_leaf, sib_leaf)) { 252246246cbSSudeep Holla cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); 253246246cbSSudeep Holla cpumask_set_cpu(i, &this_leaf->shared_cpu_map); 254246246cbSSudeep Holla } 255246246cbSSudeep Holla } 2569a83c84cSShaokun Zhang /* record the maximum cache line size */ 2579a83c84cSShaokun Zhang if (this_leaf->coherency_line_size > coherency_max_size) 2589a83c84cSShaokun Zhang coherency_max_size = this_leaf->coherency_line_size; 259246246cbSSudeep Holla } 260246246cbSSudeep Holla 261246246cbSSudeep Holla return 0; 262246246cbSSudeep Holla } 263246246cbSSudeep Holla 264246246cbSSudeep Holla static void cache_shared_cpu_map_remove(unsigned int cpu) 265246246cbSSudeep Holla { 266246246cbSSudeep Holla struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 267246246cbSSudeep Holla struct cacheinfo *this_leaf, *sib_leaf; 268246246cbSSudeep Holla unsigned int sibling, index; 269246246cbSSudeep Holla 270246246cbSSudeep Holla for (index = 0; index < cache_leaves(cpu); index++) { 271246246cbSSudeep Holla this_leaf = this_cpu_ci->info_list + index; 272246246cbSSudeep Holla for_each_cpu(sibling, &this_leaf->shared_cpu_map) { 273246246cbSSudeep Holla struct cpu_cacheinfo *sib_cpu_ci; 274246246cbSSudeep Holla 275246246cbSSudeep Holla if (sibling == cpu) /* skip itself */ 276246246cbSSudeep Holla continue; 2772110d70cSBorislav Petkov 278246246cbSSudeep Holla sib_cpu_ci = get_cpu_cacheinfo(sibling); 2792110d70cSBorislav Petkov if (!sib_cpu_ci->info_list) 2802110d70cSBorislav Petkov continue; 2812110d70cSBorislav Petkov 282246246cbSSudeep Holla sib_leaf = sib_cpu_ci->info_list + index; 283246246cbSSudeep Holla cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); 284246246cbSSudeep Holla cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); 285246246cbSSudeep Holla } 286582b468bSJeremy Linton if (of_have_populated_dt()) 2879b97387cSJeremy Linton of_node_put(this_leaf->fw_token); 288246246cbSSudeep Holla } 289246246cbSSudeep Holla } 290246246cbSSudeep Holla 291246246cbSSudeep Holla static void free_cache_attributes(unsigned int cpu) 292246246cbSSudeep Holla { 2932110d70cSBorislav Petkov if (!per_cpu_cacheinfo(cpu)) 2942110d70cSBorislav Petkov return; 2952110d70cSBorislav Petkov 296246246cbSSudeep Holla cache_shared_cpu_map_remove(cpu); 297246246cbSSudeep Holla 298246246cbSSudeep Holla kfree(per_cpu_cacheinfo(cpu)); 299246246cbSSudeep Holla per_cpu_cacheinfo(cpu) = NULL; 300246246cbSSudeep Holla } 301246246cbSSudeep Holla 302246246cbSSudeep Holla int __weak init_cache_level(unsigned int cpu) 303246246cbSSudeep Holla { 304246246cbSSudeep Holla return -ENOENT; 305246246cbSSudeep Holla } 306246246cbSSudeep Holla 307246246cbSSudeep Holla int __weak populate_cache_leaves(unsigned int cpu) 308246246cbSSudeep Holla { 309246246cbSSudeep Holla return -ENOENT; 310246246cbSSudeep Holla } 311246246cbSSudeep Holla 312246246cbSSudeep Holla static int detect_cache_attributes(unsigned int cpu) 313246246cbSSudeep Holla { 314246246cbSSudeep Holla int ret; 315246246cbSSudeep Holla 3163370e13aSSudeep Holla if (init_cache_level(cpu) || !cache_leaves(cpu)) 317246246cbSSudeep Holla return -ENOENT; 318246246cbSSudeep Holla 319246246cbSSudeep Holla per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), 320246246cbSSudeep Holla sizeof(struct cacheinfo), GFP_KERNEL); 321246246cbSSudeep Holla if (per_cpu_cacheinfo(cpu) == NULL) 322246246cbSSudeep Holla return -ENOMEM; 323246246cbSSudeep Holla 3242ff075c7SJeremy Linton /* 3252ff075c7SJeremy Linton * populate_cache_leaves() may completely setup the cache leaves and 3262ff075c7SJeremy Linton * shared_cpu_map or it may leave it partially setup. 3272ff075c7SJeremy Linton */ 328246246cbSSudeep Holla ret = populate_cache_leaves(cpu); 329246246cbSSudeep Holla if (ret) 330246246cbSSudeep Holla goto free_ci; 331246246cbSSudeep Holla /* 3329b97387cSJeremy Linton * For systems using DT for cache hierarchy, fw_token 3339b97387cSJeremy Linton * and shared_cpu_map will be set up here only if they are 3349b97387cSJeremy Linton * not populated already 335246246cbSSudeep Holla */ 336246246cbSSudeep Holla ret = cache_shared_cpu_map_setup(cpu); 3378a7d95f9SSudeep Holla if (ret) { 33855877ef4SSudeep Holla pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu); 339246246cbSSudeep Holla goto free_ci; 3408a7d95f9SSudeep Holla } 341dfea747dSSudeep Holla 342246246cbSSudeep Holla return 0; 343246246cbSSudeep Holla 344246246cbSSudeep Holla free_ci: 345246246cbSSudeep Holla free_cache_attributes(cpu); 346246246cbSSudeep Holla return ret; 347246246cbSSudeep Holla } 348246246cbSSudeep Holla 349246246cbSSudeep Holla /* pointer to cpuX/cache device */ 350246246cbSSudeep Holla static DEFINE_PER_CPU(struct device *, ci_cache_dev); 351246246cbSSudeep Holla #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) 352246246cbSSudeep Holla 353246246cbSSudeep Holla static cpumask_t cache_dev_map; 354246246cbSSudeep Holla 355246246cbSSudeep Holla /* pointer to array of devices for cpuX/cache/indexY */ 356246246cbSSudeep Holla static DEFINE_PER_CPU(struct device **, ci_index_dev); 357246246cbSSudeep Holla #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu)) 358246246cbSSudeep Holla #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx]) 359246246cbSSudeep Holla 360246246cbSSudeep Holla #define show_one(file_name, object) \ 361246246cbSSudeep Holla static ssize_t file_name##_show(struct device *dev, \ 362246246cbSSudeep Holla struct device_attribute *attr, char *buf) \ 363246246cbSSudeep Holla { \ 364246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ 365246246cbSSudeep Holla return sprintf(buf, "%u\n", this_leaf->object); \ 366246246cbSSudeep Holla } 367246246cbSSudeep Holla 368e9a2ea5aSFenghua Yu show_one(id, id); 369246246cbSSudeep Holla show_one(level, level); 370246246cbSSudeep Holla show_one(coherency_line_size, coherency_line_size); 371246246cbSSudeep Holla show_one(number_of_sets, number_of_sets); 372246246cbSSudeep Holla show_one(physical_line_partition, physical_line_partition); 373246246cbSSudeep Holla show_one(ways_of_associativity, ways_of_associativity); 374246246cbSSudeep Holla 375246246cbSSudeep Holla static ssize_t size_show(struct device *dev, 376246246cbSSudeep Holla struct device_attribute *attr, char *buf) 377246246cbSSudeep Holla { 378246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); 379246246cbSSudeep Holla 380aa838896SJoe Perches return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10); 381246246cbSSudeep Holla } 382246246cbSSudeep Holla 383246246cbSSudeep Holla static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf) 384246246cbSSudeep Holla { 385246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); 386246246cbSSudeep Holla const struct cpumask *mask = &this_leaf->shared_cpu_map; 387246246cbSSudeep Holla 388246246cbSSudeep Holla return cpumap_print_to_pagebuf(list, buf, mask); 389246246cbSSudeep Holla } 390246246cbSSudeep Holla 391246246cbSSudeep Holla static ssize_t shared_cpu_map_show(struct device *dev, 392246246cbSSudeep Holla struct device_attribute *attr, char *buf) 393246246cbSSudeep Holla { 394246246cbSSudeep Holla return shared_cpumap_show_func(dev, false, buf); 395246246cbSSudeep Holla } 396246246cbSSudeep Holla 397246246cbSSudeep Holla static ssize_t shared_cpu_list_show(struct device *dev, 398246246cbSSudeep Holla struct device_attribute *attr, char *buf) 399246246cbSSudeep Holla { 400246246cbSSudeep Holla return shared_cpumap_show_func(dev, true, buf); 401246246cbSSudeep Holla } 402246246cbSSudeep Holla 403246246cbSSudeep Holla static ssize_t type_show(struct device *dev, 404246246cbSSudeep Holla struct device_attribute *attr, char *buf) 405246246cbSSudeep Holla { 406246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); 407246246cbSSudeep Holla 408246246cbSSudeep Holla switch (this_leaf->type) { 409246246cbSSudeep Holla case CACHE_TYPE_DATA: 410aa838896SJoe Perches return sysfs_emit(buf, "Data\n"); 411246246cbSSudeep Holla case CACHE_TYPE_INST: 412aa838896SJoe Perches return sysfs_emit(buf, "Instruction\n"); 413246246cbSSudeep Holla case CACHE_TYPE_UNIFIED: 414aa838896SJoe Perches return sysfs_emit(buf, "Unified\n"); 415246246cbSSudeep Holla default: 416246246cbSSudeep Holla return -EINVAL; 417246246cbSSudeep Holla } 418246246cbSSudeep Holla } 419246246cbSSudeep Holla 420246246cbSSudeep Holla static ssize_t allocation_policy_show(struct device *dev, 421246246cbSSudeep Holla struct device_attribute *attr, char *buf) 422246246cbSSudeep Holla { 423246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); 424246246cbSSudeep Holla unsigned int ci_attr = this_leaf->attributes; 425246246cbSSudeep Holla int n = 0; 426246246cbSSudeep Holla 427246246cbSSudeep Holla if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE)) 428aa838896SJoe Perches n = sysfs_emit(buf, "ReadWriteAllocate\n"); 429246246cbSSudeep Holla else if (ci_attr & CACHE_READ_ALLOCATE) 430aa838896SJoe Perches n = sysfs_emit(buf, "ReadAllocate\n"); 431246246cbSSudeep Holla else if (ci_attr & CACHE_WRITE_ALLOCATE) 432aa838896SJoe Perches n = sysfs_emit(buf, "WriteAllocate\n"); 433246246cbSSudeep Holla return n; 434246246cbSSudeep Holla } 435246246cbSSudeep Holla 436246246cbSSudeep Holla static ssize_t write_policy_show(struct device *dev, 437246246cbSSudeep Holla struct device_attribute *attr, char *buf) 438246246cbSSudeep Holla { 439246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); 440246246cbSSudeep Holla unsigned int ci_attr = this_leaf->attributes; 441246246cbSSudeep Holla int n = 0; 442246246cbSSudeep Holla 443246246cbSSudeep Holla if (ci_attr & CACHE_WRITE_THROUGH) 444aa838896SJoe Perches n = sysfs_emit(buf, "WriteThrough\n"); 445246246cbSSudeep Holla else if (ci_attr & CACHE_WRITE_BACK) 446aa838896SJoe Perches n = sysfs_emit(buf, "WriteBack\n"); 447246246cbSSudeep Holla return n; 448246246cbSSudeep Holla } 449246246cbSSudeep Holla 450e9a2ea5aSFenghua Yu static DEVICE_ATTR_RO(id); 451246246cbSSudeep Holla static DEVICE_ATTR_RO(level); 452246246cbSSudeep Holla static DEVICE_ATTR_RO(type); 453246246cbSSudeep Holla static DEVICE_ATTR_RO(coherency_line_size); 454246246cbSSudeep Holla static DEVICE_ATTR_RO(ways_of_associativity); 455246246cbSSudeep Holla static DEVICE_ATTR_RO(number_of_sets); 456246246cbSSudeep Holla static DEVICE_ATTR_RO(size); 457246246cbSSudeep Holla static DEVICE_ATTR_RO(allocation_policy); 458246246cbSSudeep Holla static DEVICE_ATTR_RO(write_policy); 459246246cbSSudeep Holla static DEVICE_ATTR_RO(shared_cpu_map); 460246246cbSSudeep Holla static DEVICE_ATTR_RO(shared_cpu_list); 461246246cbSSudeep Holla static DEVICE_ATTR_RO(physical_line_partition); 462246246cbSSudeep Holla 463246246cbSSudeep Holla static struct attribute *cache_default_attrs[] = { 464e9a2ea5aSFenghua Yu &dev_attr_id.attr, 465246246cbSSudeep Holla &dev_attr_type.attr, 466246246cbSSudeep Holla &dev_attr_level.attr, 467246246cbSSudeep Holla &dev_attr_shared_cpu_map.attr, 468246246cbSSudeep Holla &dev_attr_shared_cpu_list.attr, 469246246cbSSudeep Holla &dev_attr_coherency_line_size.attr, 470246246cbSSudeep Holla &dev_attr_ways_of_associativity.attr, 471246246cbSSudeep Holla &dev_attr_number_of_sets.attr, 472246246cbSSudeep Holla &dev_attr_size.attr, 473246246cbSSudeep Holla &dev_attr_allocation_policy.attr, 474246246cbSSudeep Holla &dev_attr_write_policy.attr, 475246246cbSSudeep Holla &dev_attr_physical_line_partition.attr, 476246246cbSSudeep Holla NULL 477246246cbSSudeep Holla }; 478246246cbSSudeep Holla 479246246cbSSudeep Holla static umode_t 480246246cbSSudeep Holla cache_default_attrs_is_visible(struct kobject *kobj, 481246246cbSSudeep Holla struct attribute *attr, int unused) 482246246cbSSudeep Holla { 483246246cbSSudeep Holla struct device *dev = kobj_to_dev(kobj); 484246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); 485246246cbSSudeep Holla const struct cpumask *mask = &this_leaf->shared_cpu_map; 486246246cbSSudeep Holla umode_t mode = attr->mode; 487246246cbSSudeep Holla 488e9a2ea5aSFenghua Yu if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID)) 489e9a2ea5aSFenghua Yu return mode; 490246246cbSSudeep Holla if ((attr == &dev_attr_type.attr) && this_leaf->type) 491246246cbSSudeep Holla return mode; 492246246cbSSudeep Holla if ((attr == &dev_attr_level.attr) && this_leaf->level) 493246246cbSSudeep Holla return mode; 494246246cbSSudeep Holla if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask)) 495246246cbSSudeep Holla return mode; 496246246cbSSudeep Holla if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask)) 497246246cbSSudeep Holla return mode; 498246246cbSSudeep Holla if ((attr == &dev_attr_coherency_line_size.attr) && 499246246cbSSudeep Holla this_leaf->coherency_line_size) 500246246cbSSudeep Holla return mode; 501246246cbSSudeep Holla if ((attr == &dev_attr_ways_of_associativity.attr) && 502246246cbSSudeep Holla this_leaf->size) /* allow 0 = full associativity */ 503246246cbSSudeep Holla return mode; 504246246cbSSudeep Holla if ((attr == &dev_attr_number_of_sets.attr) && 505246246cbSSudeep Holla this_leaf->number_of_sets) 506246246cbSSudeep Holla return mode; 507246246cbSSudeep Holla if ((attr == &dev_attr_size.attr) && this_leaf->size) 508246246cbSSudeep Holla return mode; 509246246cbSSudeep Holla if ((attr == &dev_attr_write_policy.attr) && 510246246cbSSudeep Holla (this_leaf->attributes & CACHE_WRITE_POLICY_MASK)) 511246246cbSSudeep Holla return mode; 512246246cbSSudeep Holla if ((attr == &dev_attr_allocation_policy.attr) && 513246246cbSSudeep Holla (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK)) 514246246cbSSudeep Holla return mode; 515246246cbSSudeep Holla if ((attr == &dev_attr_physical_line_partition.attr) && 516246246cbSSudeep Holla this_leaf->physical_line_partition) 517246246cbSSudeep Holla return mode; 518246246cbSSudeep Holla 519246246cbSSudeep Holla return 0; 520246246cbSSudeep Holla } 521246246cbSSudeep Holla 522246246cbSSudeep Holla static const struct attribute_group cache_default_group = { 523246246cbSSudeep Holla .attrs = cache_default_attrs, 524246246cbSSudeep Holla .is_visible = cache_default_attrs_is_visible, 525246246cbSSudeep Holla }; 526246246cbSSudeep Holla 527246246cbSSudeep Holla static const struct attribute_group *cache_default_groups[] = { 528246246cbSSudeep Holla &cache_default_group, 529246246cbSSudeep Holla NULL, 530246246cbSSudeep Holla }; 531246246cbSSudeep Holla 532246246cbSSudeep Holla static const struct attribute_group *cache_private_groups[] = { 533246246cbSSudeep Holla &cache_default_group, 534246246cbSSudeep Holla NULL, /* Place holder for private group */ 535246246cbSSudeep Holla NULL, 536246246cbSSudeep Holla }; 537246246cbSSudeep Holla 538246246cbSSudeep Holla const struct attribute_group * 539246246cbSSudeep Holla __weak cache_get_priv_group(struct cacheinfo *this_leaf) 540246246cbSSudeep Holla { 541246246cbSSudeep Holla return NULL; 542246246cbSSudeep Holla } 543246246cbSSudeep Holla 544246246cbSSudeep Holla static const struct attribute_group ** 545246246cbSSudeep Holla cache_get_attribute_groups(struct cacheinfo *this_leaf) 546246246cbSSudeep Holla { 547246246cbSSudeep Holla const struct attribute_group *priv_group = 548246246cbSSudeep Holla cache_get_priv_group(this_leaf); 549246246cbSSudeep Holla 550246246cbSSudeep Holla if (!priv_group) 551246246cbSSudeep Holla return cache_default_groups; 552246246cbSSudeep Holla 553246246cbSSudeep Holla if (!cache_private_groups[1]) 554246246cbSSudeep Holla cache_private_groups[1] = priv_group; 555246246cbSSudeep Holla 556246246cbSSudeep Holla return cache_private_groups; 557246246cbSSudeep Holla } 558246246cbSSudeep Holla 559246246cbSSudeep Holla /* Add/Remove cache interface for CPU device */ 560246246cbSSudeep Holla static void cpu_cache_sysfs_exit(unsigned int cpu) 561246246cbSSudeep Holla { 562246246cbSSudeep Holla int i; 563246246cbSSudeep Holla struct device *ci_dev; 564246246cbSSudeep Holla 565246246cbSSudeep Holla if (per_cpu_index_dev(cpu)) { 566246246cbSSudeep Holla for (i = 0; i < cache_leaves(cpu); i++) { 567246246cbSSudeep Holla ci_dev = per_cache_index_dev(cpu, i); 568246246cbSSudeep Holla if (!ci_dev) 569246246cbSSudeep Holla continue; 570246246cbSSudeep Holla device_unregister(ci_dev); 571246246cbSSudeep Holla } 572246246cbSSudeep Holla kfree(per_cpu_index_dev(cpu)); 573246246cbSSudeep Holla per_cpu_index_dev(cpu) = NULL; 574246246cbSSudeep Holla } 575246246cbSSudeep Holla device_unregister(per_cpu_cache_dev(cpu)); 576246246cbSSudeep Holla per_cpu_cache_dev(cpu) = NULL; 577246246cbSSudeep Holla } 578246246cbSSudeep Holla 579246246cbSSudeep Holla static int cpu_cache_sysfs_init(unsigned int cpu) 580246246cbSSudeep Holla { 581246246cbSSudeep Holla struct device *dev = get_cpu_device(cpu); 582246246cbSSudeep Holla 583246246cbSSudeep Holla if (per_cpu_cacheinfo(cpu) == NULL) 584246246cbSSudeep Holla return -ENOENT; 585246246cbSSudeep Holla 586246246cbSSudeep Holla per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache"); 587246246cbSSudeep Holla if (IS_ERR(per_cpu_cache_dev(cpu))) 588246246cbSSudeep Holla return PTR_ERR(per_cpu_cache_dev(cpu)); 589246246cbSSudeep Holla 590246246cbSSudeep Holla /* Allocate all required memory */ 591246246cbSSudeep Holla per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), 592246246cbSSudeep Holla sizeof(struct device *), GFP_KERNEL); 593246246cbSSudeep Holla if (unlikely(per_cpu_index_dev(cpu) == NULL)) 594246246cbSSudeep Holla goto err_out; 595246246cbSSudeep Holla 596246246cbSSudeep Holla return 0; 597246246cbSSudeep Holla 598246246cbSSudeep Holla err_out: 599246246cbSSudeep Holla cpu_cache_sysfs_exit(cpu); 600246246cbSSudeep Holla return -ENOMEM; 601246246cbSSudeep Holla } 602246246cbSSudeep Holla 603246246cbSSudeep Holla static int cache_add_dev(unsigned int cpu) 604246246cbSSudeep Holla { 605246246cbSSudeep Holla unsigned int i; 606246246cbSSudeep Holla int rc; 607246246cbSSudeep Holla struct device *ci_dev, *parent; 608246246cbSSudeep Holla struct cacheinfo *this_leaf; 609246246cbSSudeep Holla struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 610246246cbSSudeep Holla const struct attribute_group **cache_groups; 611246246cbSSudeep Holla 612246246cbSSudeep Holla rc = cpu_cache_sysfs_init(cpu); 613246246cbSSudeep Holla if (unlikely(rc < 0)) 614246246cbSSudeep Holla return rc; 615246246cbSSudeep Holla 616246246cbSSudeep Holla parent = per_cpu_cache_dev(cpu); 617246246cbSSudeep Holla for (i = 0; i < cache_leaves(cpu); i++) { 618246246cbSSudeep Holla this_leaf = this_cpu_ci->info_list + i; 619246246cbSSudeep Holla if (this_leaf->disable_sysfs) 620246246cbSSudeep Holla continue; 621ca388e43SJeffrey Hugo if (this_leaf->type == CACHE_TYPE_NOCACHE) 622ca388e43SJeffrey Hugo break; 623246246cbSSudeep Holla cache_groups = cache_get_attribute_groups(this_leaf); 624246246cbSSudeep Holla ci_dev = cpu_device_create(parent, this_leaf, cache_groups, 625246246cbSSudeep Holla "index%1u", i); 626246246cbSSudeep Holla if (IS_ERR(ci_dev)) { 627246246cbSSudeep Holla rc = PTR_ERR(ci_dev); 628246246cbSSudeep Holla goto err; 629246246cbSSudeep Holla } 630246246cbSSudeep Holla per_cache_index_dev(cpu, i) = ci_dev; 631246246cbSSudeep Holla } 632246246cbSSudeep Holla cpumask_set_cpu(cpu, &cache_dev_map); 633246246cbSSudeep Holla 634246246cbSSudeep Holla return 0; 635246246cbSSudeep Holla err: 636246246cbSSudeep Holla cpu_cache_sysfs_exit(cpu); 637246246cbSSudeep Holla return rc; 638246246cbSSudeep Holla } 639246246cbSSudeep Holla 6407cc277b4SSebastian Andrzej Siewior static int cacheinfo_cpu_online(unsigned int cpu) 641246246cbSSudeep Holla { 6427cc277b4SSebastian Andrzej Siewior int rc = detect_cache_attributes(cpu); 643246246cbSSudeep Holla 6447cc277b4SSebastian Andrzej Siewior if (rc) 6457cc277b4SSebastian Andrzej Siewior return rc; 646246246cbSSudeep Holla rc = cache_add_dev(cpu); 6477cc277b4SSebastian Andrzej Siewior if (rc) 648246246cbSSudeep Holla free_cache_attributes(cpu); 6497cc277b4SSebastian Andrzej Siewior return rc; 650246246cbSSudeep Holla } 6517cc277b4SSebastian Andrzej Siewior 6527cc277b4SSebastian Andrzej Siewior static int cacheinfo_cpu_pre_down(unsigned int cpu) 6537cc277b4SSebastian Andrzej Siewior { 6547cc277b4SSebastian Andrzej Siewior if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map)) 6557cc277b4SSebastian Andrzej Siewior cpu_cache_sysfs_exit(cpu); 6567cc277b4SSebastian Andrzej Siewior 6577cc277b4SSebastian Andrzej Siewior free_cache_attributes(cpu); 6587cc277b4SSebastian Andrzej Siewior return 0; 659246246cbSSudeep Holla } 660246246cbSSudeep Holla 661246246cbSSudeep Holla static int __init cacheinfo_sysfs_init(void) 662246246cbSSudeep Holla { 66383b44fe3SJames Morse return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE, 66483b44fe3SJames Morse "base/cacheinfo:online", 6657cc277b4SSebastian Andrzej Siewior cacheinfo_cpu_online, cacheinfo_cpu_pre_down); 666246246cbSSudeep Holla } 667246246cbSSudeep Holla device_initcall(cacheinfo_sysfs_init); 668