1989d42e8SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2246246cbSSudeep Holla /* 3246246cbSSudeep Holla * cacheinfo support - processor cache information via sysfs 4246246cbSSudeep Holla * 5246246cbSSudeep Holla * Based on arch/x86/kernel/cpu/intel_cacheinfo.c 6246246cbSSudeep Holla * Author: Sudeep Holla <sudeep.holla@arm.com> 7246246cbSSudeep Holla */ 88e1073b1SSudeep Holla #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 98e1073b1SSudeep Holla 1055877ef4SSudeep Holla #include <linux/acpi.h> 11246246cbSSudeep Holla #include <linux/bitops.h> 12246246cbSSudeep Holla #include <linux/cacheinfo.h> 13246246cbSSudeep Holla #include <linux/compiler.h> 14246246cbSSudeep Holla #include <linux/cpu.h> 15246246cbSSudeep Holla #include <linux/device.h> 16246246cbSSudeep Holla #include <linux/init.h> 17d4ec840bSSudeep Holla #include <linux/of_device.h> 18246246cbSSudeep Holla #include <linux/sched.h> 19246246cbSSudeep Holla #include <linux/slab.h> 20246246cbSSudeep Holla #include <linux/smp.h> 21246246cbSSudeep Holla #include <linux/sysfs.h> 22246246cbSSudeep Holla 23246246cbSSudeep Holla /* pointer to per cpu cacheinfo */ 24246246cbSSudeep Holla static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); 25246246cbSSudeep Holla #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) 26246246cbSSudeep Holla #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) 27246246cbSSudeep Holla #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) 28b14e8d21SSudeep Holla #define per_cpu_cacheinfo_idx(cpu, idx) \ 29b14e8d21SSudeep Holla (per_cpu_cacheinfo(cpu) + (idx)) 30246246cbSSudeep Holla 31246246cbSSudeep Holla struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) 32246246cbSSudeep Holla { 33246246cbSSudeep Holla return ci_cacheinfo(cpu); 34246246cbSSudeep Holla } 35246246cbSSudeep Holla 36246246cbSSudeep Holla static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, 37246246cbSSudeep Holla struct cacheinfo *sib_leaf) 38246246cbSSudeep Holla { 399447eb0fSSudeep Holla /* 409447eb0fSSudeep Holla * For non DT/ACPI systems, assume unique level 1 caches, 419447eb0fSSudeep Holla * system-wide shared caches for all other levels. This will be used 429447eb0fSSudeep Holla * only if arch specific code has not populated shared_cpu_map 439447eb0fSSudeep Holla */ 449447eb0fSSudeep Holla if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI))) 459447eb0fSSudeep Holla return !(this_leaf->level == 1); 469447eb0fSSudeep Holla 47f16d1becSSudeep Holla if ((sib_leaf->attributes & CACHE_ID) && 48f16d1becSSudeep Holla (this_leaf->attributes & CACHE_ID)) 49f16d1becSSudeep Holla return sib_leaf->id == this_leaf->id; 50f16d1becSSudeep Holla 519b97387cSJeremy Linton return sib_leaf->fw_token == this_leaf->fw_token; 52246246cbSSudeep Holla } 53dfea747dSSudeep Holla 54cc1cfc47SSudeep Holla bool last_level_cache_is_valid(unsigned int cpu) 55cc1cfc47SSudeep Holla { 56cc1cfc47SSudeep Holla struct cacheinfo *llc; 57cc1cfc47SSudeep Holla 58cc1cfc47SSudeep Holla if (!cache_leaves(cpu)) 59cc1cfc47SSudeep Holla return false; 60cc1cfc47SSudeep Holla 61cc1cfc47SSudeep Holla llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); 62cc1cfc47SSudeep Holla 63f16d1becSSudeep Holla return (llc->attributes & CACHE_ID) || !!llc->fw_token; 64f16d1becSSudeep Holla 65cc1cfc47SSudeep Holla } 66cc1cfc47SSudeep Holla 67cc1cfc47SSudeep Holla bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y) 68cc1cfc47SSudeep Holla { 69cc1cfc47SSudeep Holla struct cacheinfo *llc_x, *llc_y; 70cc1cfc47SSudeep Holla 71cc1cfc47SSudeep Holla if (!last_level_cache_is_valid(cpu_x) || 72cc1cfc47SSudeep Holla !last_level_cache_is_valid(cpu_y)) 73cc1cfc47SSudeep Holla return false; 74cc1cfc47SSudeep Holla 75cc1cfc47SSudeep Holla llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1); 76cc1cfc47SSudeep Holla llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1); 77cc1cfc47SSudeep Holla 78cc1cfc47SSudeep Holla return cache_leaves_are_shared(llc_x, llc_y); 79cc1cfc47SSudeep Holla } 80cc1cfc47SSudeep Holla 819447eb0fSSudeep Holla #ifdef CONFIG_OF 82dfea747dSSudeep Holla /* OF properties to query for a given cache type */ 83dfea747dSSudeep Holla struct cache_type_info { 84dfea747dSSudeep Holla const char *size_prop; 85dfea747dSSudeep Holla const char *line_size_props[2]; 86dfea747dSSudeep Holla const char *nr_sets_prop; 87dfea747dSSudeep Holla }; 88dfea747dSSudeep Holla 89dfea747dSSudeep Holla static const struct cache_type_info cache_type_info[] = { 90dfea747dSSudeep Holla { 91dfea747dSSudeep Holla .size_prop = "cache-size", 92dfea747dSSudeep Holla .line_size_props = { "cache-line-size", 93dfea747dSSudeep Holla "cache-block-size", }, 94dfea747dSSudeep Holla .nr_sets_prop = "cache-sets", 95dfea747dSSudeep Holla }, { 96dfea747dSSudeep Holla .size_prop = "i-cache-size", 97dfea747dSSudeep Holla .line_size_props = { "i-cache-line-size", 98dfea747dSSudeep Holla "i-cache-block-size", }, 99dfea747dSSudeep Holla .nr_sets_prop = "i-cache-sets", 100dfea747dSSudeep Holla }, { 101dfea747dSSudeep Holla .size_prop = "d-cache-size", 102dfea747dSSudeep Holla .line_size_props = { "d-cache-line-size", 103dfea747dSSudeep Holla "d-cache-block-size", }, 104dfea747dSSudeep Holla .nr_sets_prop = "d-cache-sets", 105dfea747dSSudeep Holla }, 106dfea747dSSudeep Holla }; 107dfea747dSSudeep Holla 108dfea747dSSudeep Holla static inline int get_cacheinfo_idx(enum cache_type type) 109dfea747dSSudeep Holla { 110dfea747dSSudeep Holla if (type == CACHE_TYPE_UNIFIED) 111dfea747dSSudeep Holla return 0; 112dfea747dSSudeep Holla return type; 113dfea747dSSudeep Holla } 114dfea747dSSudeep Holla 1152ff075c7SJeremy Linton static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) 116dfea747dSSudeep Holla { 117dfea747dSSudeep Holla const char *propname; 118dfea747dSSudeep Holla int ct_idx; 119dfea747dSSudeep Holla 120dfea747dSSudeep Holla ct_idx = get_cacheinfo_idx(this_leaf->type); 121dfea747dSSudeep Holla propname = cache_type_info[ct_idx].size_prop; 122dfea747dSSudeep Holla 1233a34c986SHuacai Chen of_property_read_u32(np, propname, &this_leaf->size); 124dfea747dSSudeep Holla } 125dfea747dSSudeep Holla 126dfea747dSSudeep Holla /* not cache_line_size() because that's a macro in include/linux/cache.h */ 1272ff075c7SJeremy Linton static void cache_get_line_size(struct cacheinfo *this_leaf, 1282ff075c7SJeremy Linton struct device_node *np) 129dfea747dSSudeep Holla { 130dfea747dSSudeep Holla int i, lim, ct_idx; 131dfea747dSSudeep Holla 132dfea747dSSudeep Holla ct_idx = get_cacheinfo_idx(this_leaf->type); 133dfea747dSSudeep Holla lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props); 134dfea747dSSudeep Holla 135dfea747dSSudeep Holla for (i = 0; i < lim; i++) { 136448a5a55SSudeep Holla int ret; 137448a5a55SSudeep Holla u32 line_size; 138dfea747dSSudeep Holla const char *propname; 139dfea747dSSudeep Holla 140dfea747dSSudeep Holla propname = cache_type_info[ct_idx].line_size_props[i]; 141448a5a55SSudeep Holla ret = of_property_read_u32(np, propname, &line_size); 142448a5a55SSudeep Holla if (!ret) { 143448a5a55SSudeep Holla this_leaf->coherency_line_size = line_size; 144dfea747dSSudeep Holla break; 145dfea747dSSudeep Holla } 146448a5a55SSudeep Holla } 147dfea747dSSudeep Holla } 148dfea747dSSudeep Holla 1492ff075c7SJeremy Linton static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) 150dfea747dSSudeep Holla { 151dfea747dSSudeep Holla const char *propname; 152dfea747dSSudeep Holla int ct_idx; 153dfea747dSSudeep Holla 154dfea747dSSudeep Holla ct_idx = get_cacheinfo_idx(this_leaf->type); 155dfea747dSSudeep Holla propname = cache_type_info[ct_idx].nr_sets_prop; 156dfea747dSSudeep Holla 1573a34c986SHuacai Chen of_property_read_u32(np, propname, &this_leaf->number_of_sets); 158dfea747dSSudeep Holla } 159dfea747dSSudeep Holla 160dfea747dSSudeep Holla static void cache_associativity(struct cacheinfo *this_leaf) 161dfea747dSSudeep Holla { 162dfea747dSSudeep Holla unsigned int line_size = this_leaf->coherency_line_size; 163dfea747dSSudeep Holla unsigned int nr_sets = this_leaf->number_of_sets; 164dfea747dSSudeep Holla unsigned int size = this_leaf->size; 165dfea747dSSudeep Holla 166dfea747dSSudeep Holla /* 167dfea747dSSudeep Holla * If the cache is fully associative, there is no need to 168dfea747dSSudeep Holla * check the other properties. 169dfea747dSSudeep Holla */ 170dfea747dSSudeep Holla if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0)) 171dfea747dSSudeep Holla this_leaf->ways_of_associativity = (size / nr_sets) / line_size; 172dfea747dSSudeep Holla } 173dfea747dSSudeep Holla 1742ff075c7SJeremy Linton static bool cache_node_is_unified(struct cacheinfo *this_leaf, 1752ff075c7SJeremy Linton struct device_node *np) 176f57ab9a0SSudeep Holla { 1772ff075c7SJeremy Linton return of_property_read_bool(np, "cache-unified"); 178f57ab9a0SSudeep Holla } 179f57ab9a0SSudeep Holla 1802ff075c7SJeremy Linton static void cache_of_set_props(struct cacheinfo *this_leaf, 1812ff075c7SJeremy Linton struct device_node *np) 182dfea747dSSudeep Holla { 183f57ab9a0SSudeep Holla /* 184f57ab9a0SSudeep Holla * init_cache_level must setup the cache level correctly 185f57ab9a0SSudeep Holla * overriding the architecturally specified levels, so 186f57ab9a0SSudeep Holla * if type is NONE at this stage, it should be unified 187f57ab9a0SSudeep Holla */ 188f57ab9a0SSudeep Holla if (this_leaf->type == CACHE_TYPE_NOCACHE && 1892ff075c7SJeremy Linton cache_node_is_unified(this_leaf, np)) 190f57ab9a0SSudeep Holla this_leaf->type = CACHE_TYPE_UNIFIED; 1912ff075c7SJeremy Linton cache_size(this_leaf, np); 1922ff075c7SJeremy Linton cache_get_line_size(this_leaf, np); 1932ff075c7SJeremy Linton cache_nr_sets(this_leaf, np); 194dfea747dSSudeep Holla cache_associativity(this_leaf); 195dfea747dSSudeep Holla } 196d529a18aSJeremy Linton 197d529a18aSJeremy Linton static int cache_setup_of_node(unsigned int cpu) 198d529a18aSJeremy Linton { 1993da72e18SPierre Gondois struct device_node *np, *prev; 200d529a18aSJeremy Linton struct cacheinfo *this_leaf; 201d529a18aSJeremy Linton unsigned int index = 0; 202d529a18aSJeremy Linton 203d4ec840bSSudeep Holla np = of_cpu_device_node_get(cpu); 204d529a18aSJeremy Linton if (!np) { 205d529a18aSJeremy Linton pr_err("Failed to find cpu%d device node\n", cpu); 206d529a18aSJeremy Linton return -ENOENT; 207d529a18aSJeremy Linton } 208d529a18aSJeremy Linton 2093da72e18SPierre Gondois prev = np; 2103da72e18SPierre Gondois 211d529a18aSJeremy Linton while (index < cache_leaves(cpu)) { 212b14e8d21SSudeep Holla this_leaf = per_cpu_cacheinfo_idx(cpu, index); 2133da72e18SPierre Gondois if (this_leaf->level != 1) { 214d529a18aSJeremy Linton np = of_find_next_cache_node(np); 2153da72e18SPierre Gondois of_node_put(prev); 2163da72e18SPierre Gondois prev = np; 217d529a18aSJeremy Linton if (!np) 218d529a18aSJeremy Linton break; 2193da72e18SPierre Gondois } 2202ff075c7SJeremy Linton cache_of_set_props(this_leaf, np); 2219b97387cSJeremy Linton this_leaf->fw_token = np; 222d529a18aSJeremy Linton index++; 223d529a18aSJeremy Linton } 224d529a18aSJeremy Linton 2253da72e18SPierre Gondois of_node_put(np); 2263da72e18SPierre Gondois 227d529a18aSJeremy Linton if (index != cache_leaves(cpu)) /* not all OF nodes populated */ 228d529a18aSJeremy Linton return -ENOENT; 229d529a18aSJeremy Linton 230d529a18aSJeremy Linton return 0; 231d529a18aSJeremy Linton } 232c3719bd9SPierre Gondois 233*de0df442SPierre Gondois static int of_count_cache_leaves(struct device_node *np) 234c3719bd9SPierre Gondois { 235*de0df442SPierre Gondois unsigned int leaves = 0; 236c3719bd9SPierre Gondois 237c3719bd9SPierre Gondois if (of_property_read_bool(np, "cache-size")) 238c3719bd9SPierre Gondois ++leaves; 239c3719bd9SPierre Gondois if (of_property_read_bool(np, "i-cache-size")) 240c3719bd9SPierre Gondois ++leaves; 241c3719bd9SPierre Gondois if (of_property_read_bool(np, "d-cache-size")) 242c3719bd9SPierre Gondois ++leaves; 243*de0df442SPierre Gondois 244*de0df442SPierre Gondois if (!leaves) { 245*de0df442SPierre Gondois /* The '[i-|d-|]cache-size' property is required, but 246*de0df442SPierre Gondois * if absent, fallback on the 'cache-unified' property. 247*de0df442SPierre Gondois */ 248*de0df442SPierre Gondois if (of_property_read_bool(np, "cache-unified")) 249*de0df442SPierre Gondois return 1; 250*de0df442SPierre Gondois else 251*de0df442SPierre Gondois return 2; 252*de0df442SPierre Gondois } 253*de0df442SPierre Gondois 254*de0df442SPierre Gondois return leaves; 255*de0df442SPierre Gondois } 256*de0df442SPierre Gondois 257*de0df442SPierre Gondois int init_of_cache_level(unsigned int cpu) 258*de0df442SPierre Gondois { 259*de0df442SPierre Gondois struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 260*de0df442SPierre Gondois struct device_node *np = of_cpu_device_node_get(cpu); 261*de0df442SPierre Gondois struct device_node *prev = NULL; 262*de0df442SPierre Gondois unsigned int levels = 0, leaves, level; 263*de0df442SPierre Gondois 264*de0df442SPierre Gondois leaves = of_count_cache_leaves(np); 265c3719bd9SPierre Gondois if (leaves > 0) 266c3719bd9SPierre Gondois levels = 1; 267c3719bd9SPierre Gondois 268c3719bd9SPierre Gondois prev = np; 269c3719bd9SPierre Gondois while ((np = of_find_next_cache_node(np))) { 270c3719bd9SPierre Gondois of_node_put(prev); 271c3719bd9SPierre Gondois prev = np; 272c3719bd9SPierre Gondois if (!of_device_is_compatible(np, "cache")) 2738844c3dfSPierre Gondois goto err_out; 274c3719bd9SPierre Gondois if (of_property_read_u32(np, "cache-level", &level)) 2758844c3dfSPierre Gondois goto err_out; 276c3719bd9SPierre Gondois if (level <= levels) 2778844c3dfSPierre Gondois goto err_out; 278*de0df442SPierre Gondois 279*de0df442SPierre Gondois leaves += of_count_cache_leaves(np); 280c3719bd9SPierre Gondois levels = level; 281c3719bd9SPierre Gondois } 282c3719bd9SPierre Gondois 283c3719bd9SPierre Gondois of_node_put(np); 284c3719bd9SPierre Gondois this_cpu_ci->num_levels = levels; 285c3719bd9SPierre Gondois this_cpu_ci->num_leaves = leaves; 286c3719bd9SPierre Gondois 287c3719bd9SPierre Gondois return 0; 2888844c3dfSPierre Gondois 2898844c3dfSPierre Gondois err_out: 2908844c3dfSPierre Gondois of_node_put(np); 2918844c3dfSPierre Gondois return -EINVAL; 292c3719bd9SPierre Gondois } 293c3719bd9SPierre Gondois 294246246cbSSudeep Holla #else 295246246cbSSudeep Holla static inline int cache_setup_of_node(unsigned int cpu) { return 0; } 296c3719bd9SPierre Gondois int init_of_cache_level(unsigned int cpu) { return 0; } 297246246cbSSudeep Holla #endif 298246246cbSSudeep Holla 299582b468bSJeremy Linton int __weak cache_setup_acpi(unsigned int cpu) 300582b468bSJeremy Linton { 301582b468bSJeremy Linton return -ENOTSUPP; 302582b468bSJeremy Linton } 303582b468bSJeremy Linton 3049a83c84cSShaokun Zhang unsigned int coherency_max_size; 3059a83c84cSShaokun Zhang 30636bbc5b4SSudeep Holla static int cache_setup_properties(unsigned int cpu) 30736bbc5b4SSudeep Holla { 30836bbc5b4SSudeep Holla int ret = 0; 30936bbc5b4SSudeep Holla 31036bbc5b4SSudeep Holla if (of_have_populated_dt()) 31136bbc5b4SSudeep Holla ret = cache_setup_of_node(cpu); 31236bbc5b4SSudeep Holla else if (!acpi_disabled) 31336bbc5b4SSudeep Holla ret = cache_setup_acpi(cpu); 31436bbc5b4SSudeep Holla 31536bbc5b4SSudeep Holla return ret; 31636bbc5b4SSudeep Holla } 31736bbc5b4SSudeep Holla 318246246cbSSudeep Holla static int cache_shared_cpu_map_setup(unsigned int cpu) 319246246cbSSudeep Holla { 320246246cbSSudeep Holla struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 321246246cbSSudeep Holla struct cacheinfo *this_leaf, *sib_leaf; 322246246cbSSudeep Holla unsigned int index; 32355877ef4SSudeep Holla int ret = 0; 324246246cbSSudeep Holla 325fac51482SSudeep Holla if (this_cpu_ci->cpu_map_populated) 326fac51482SSudeep Holla return 0; 327fac51482SSudeep Holla 32836bbc5b4SSudeep Holla /* 32936bbc5b4SSudeep Holla * skip setting up cache properties if LLC is valid, just need 33036bbc5b4SSudeep Holla * to update the shared cpu_map if the cache attributes were 33136bbc5b4SSudeep Holla * populated early before all the cpus are brought online 33236bbc5b4SSudeep Holla */ 33336bbc5b4SSudeep Holla if (!last_level_cache_is_valid(cpu)) { 33436bbc5b4SSudeep Holla ret = cache_setup_properties(cpu); 335246246cbSSudeep Holla if (ret) 336246246cbSSudeep Holla return ret; 33736bbc5b4SSudeep Holla } 338246246cbSSudeep Holla 339246246cbSSudeep Holla for (index = 0; index < cache_leaves(cpu); index++) { 340246246cbSSudeep Holla unsigned int i; 341246246cbSSudeep Holla 342b14e8d21SSudeep Holla this_leaf = per_cpu_cacheinfo_idx(cpu, index); 343246246cbSSudeep Holla 344246246cbSSudeep Holla cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); 345246246cbSSudeep Holla for_each_online_cpu(i) { 346246246cbSSudeep Holla struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); 347246246cbSSudeep Holla 348246246cbSSudeep Holla if (i == cpu || !sib_cpu_ci->info_list) 349246246cbSSudeep Holla continue;/* skip if itself or no cacheinfo */ 35052110313SSudeep Holla 351b14e8d21SSudeep Holla sib_leaf = per_cpu_cacheinfo_idx(i, index); 352246246cbSSudeep Holla if (cache_leaves_are_shared(this_leaf, sib_leaf)) { 353246246cbSSudeep Holla cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); 354246246cbSSudeep Holla cpumask_set_cpu(i, &this_leaf->shared_cpu_map); 355246246cbSSudeep Holla } 356246246cbSSudeep Holla } 3579a83c84cSShaokun Zhang /* record the maximum cache line size */ 3589a83c84cSShaokun Zhang if (this_leaf->coherency_line_size > coherency_max_size) 3599a83c84cSShaokun Zhang coherency_max_size = this_leaf->coherency_line_size; 360246246cbSSudeep Holla } 361246246cbSSudeep Holla 362246246cbSSudeep Holla return 0; 363246246cbSSudeep Holla } 364246246cbSSudeep Holla 365246246cbSSudeep Holla static void cache_shared_cpu_map_remove(unsigned int cpu) 366246246cbSSudeep Holla { 367246246cbSSudeep Holla struct cacheinfo *this_leaf, *sib_leaf; 368246246cbSSudeep Holla unsigned int sibling, index; 369246246cbSSudeep Holla 370246246cbSSudeep Holla for (index = 0; index < cache_leaves(cpu); index++) { 371b14e8d21SSudeep Holla this_leaf = per_cpu_cacheinfo_idx(cpu, index); 372246246cbSSudeep Holla for_each_cpu(sibling, &this_leaf->shared_cpu_map) { 37352110313SSudeep Holla struct cpu_cacheinfo *sib_cpu_ci = 37452110313SSudeep Holla get_cpu_cacheinfo(sibling); 375246246cbSSudeep Holla 37652110313SSudeep Holla if (sibling == cpu || !sib_cpu_ci->info_list) 37752110313SSudeep Holla continue;/* skip if itself or no cacheinfo */ 3782110d70cSBorislav Petkov 379b14e8d21SSudeep Holla sib_leaf = per_cpu_cacheinfo_idx(sibling, index); 380246246cbSSudeep Holla cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); 381246246cbSSudeep Holla cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); 382246246cbSSudeep Holla } 383246246cbSSudeep Holla } 384246246cbSSudeep Holla } 385246246cbSSudeep Holla 386246246cbSSudeep Holla static void free_cache_attributes(unsigned int cpu) 387246246cbSSudeep Holla { 3882110d70cSBorislav Petkov if (!per_cpu_cacheinfo(cpu)) 3892110d70cSBorislav Petkov return; 3902110d70cSBorislav Petkov 391246246cbSSudeep Holla cache_shared_cpu_map_remove(cpu); 392246246cbSSudeep Holla 393246246cbSSudeep Holla kfree(per_cpu_cacheinfo(cpu)); 394246246cbSSudeep Holla per_cpu_cacheinfo(cpu) = NULL; 395e022eac8SXiongfeng Wang cache_leaves(cpu) = 0; 396246246cbSSudeep Holla } 397246246cbSSudeep Holla 398246246cbSSudeep Holla int __weak init_cache_level(unsigned int cpu) 399246246cbSSudeep Holla { 400246246cbSSudeep Holla return -ENOENT; 401246246cbSSudeep Holla } 402246246cbSSudeep Holla 403246246cbSSudeep Holla int __weak populate_cache_leaves(unsigned int cpu) 404246246cbSSudeep Holla { 405246246cbSSudeep Holla return -ENOENT; 406246246cbSSudeep Holla } 407246246cbSSudeep Holla 40836bbc5b4SSudeep Holla int detect_cache_attributes(unsigned int cpu) 409246246cbSSudeep Holla { 410246246cbSSudeep Holla int ret; 411246246cbSSudeep Holla 41236bbc5b4SSudeep Holla /* Since early detection of the cacheinfo is allowed via this 41336bbc5b4SSudeep Holla * function and this also gets called as CPU hotplug callbacks via 41436bbc5b4SSudeep Holla * cacheinfo_cpu_online, the initialisation can be skipped and only 41536bbc5b4SSudeep Holla * CPU maps can be updated as the CPU online status would be update 41636bbc5b4SSudeep Holla * if called via cacheinfo_cpu_online path. 41736bbc5b4SSudeep Holla */ 41836bbc5b4SSudeep Holla if (per_cpu_cacheinfo(cpu)) 41936bbc5b4SSudeep Holla goto update_cpu_map; 42036bbc5b4SSudeep Holla 4213370e13aSSudeep Holla if (init_cache_level(cpu) || !cache_leaves(cpu)) 422246246cbSSudeep Holla return -ENOENT; 423246246cbSSudeep Holla 424246246cbSSudeep Holla per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), 42511969d69SSudeep Holla sizeof(struct cacheinfo), GFP_ATOMIC); 42636bbc5b4SSudeep Holla if (per_cpu_cacheinfo(cpu) == NULL) { 42736bbc5b4SSudeep Holla cache_leaves(cpu) = 0; 428246246cbSSudeep Holla return -ENOMEM; 42936bbc5b4SSudeep Holla } 430246246cbSSudeep Holla 4312ff075c7SJeremy Linton /* 4322ff075c7SJeremy Linton * populate_cache_leaves() may completely setup the cache leaves and 4332ff075c7SJeremy Linton * shared_cpu_map or it may leave it partially setup. 4342ff075c7SJeremy Linton */ 435246246cbSSudeep Holla ret = populate_cache_leaves(cpu); 436246246cbSSudeep Holla if (ret) 437246246cbSSudeep Holla goto free_ci; 43836bbc5b4SSudeep Holla 43936bbc5b4SSudeep Holla update_cpu_map: 440246246cbSSudeep Holla /* 4419b97387cSJeremy Linton * For systems using DT for cache hierarchy, fw_token 4429b97387cSJeremy Linton * and shared_cpu_map will be set up here only if they are 4439b97387cSJeremy Linton * not populated already 444246246cbSSudeep Holla */ 445246246cbSSudeep Holla ret = cache_shared_cpu_map_setup(cpu); 4468a7d95f9SSudeep Holla if (ret) { 44755877ef4SSudeep Holla pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu); 448246246cbSSudeep Holla goto free_ci; 4498a7d95f9SSudeep Holla } 450dfea747dSSudeep Holla 451246246cbSSudeep Holla return 0; 452246246cbSSudeep Holla 453246246cbSSudeep Holla free_ci: 454246246cbSSudeep Holla free_cache_attributes(cpu); 455246246cbSSudeep Holla return ret; 456246246cbSSudeep Holla } 457246246cbSSudeep Holla 458246246cbSSudeep Holla /* pointer to cpuX/cache device */ 459246246cbSSudeep Holla static DEFINE_PER_CPU(struct device *, ci_cache_dev); 460246246cbSSudeep Holla #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) 461246246cbSSudeep Holla 462246246cbSSudeep Holla static cpumask_t cache_dev_map; 463246246cbSSudeep Holla 464246246cbSSudeep Holla /* pointer to array of devices for cpuX/cache/indexY */ 465246246cbSSudeep Holla static DEFINE_PER_CPU(struct device **, ci_index_dev); 466246246cbSSudeep Holla #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu)) 467246246cbSSudeep Holla #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx]) 468246246cbSSudeep Holla 469246246cbSSudeep Holla #define show_one(file_name, object) \ 470246246cbSSudeep Holla static ssize_t file_name##_show(struct device *dev, \ 471246246cbSSudeep Holla struct device_attribute *attr, char *buf) \ 472246246cbSSudeep Holla { \ 473246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ 474948b3edbSJoe Perches return sysfs_emit(buf, "%u\n", this_leaf->object); \ 475246246cbSSudeep Holla } 476246246cbSSudeep Holla 477e9a2ea5aSFenghua Yu show_one(id, id); 478246246cbSSudeep Holla show_one(level, level); 479246246cbSSudeep Holla show_one(coherency_line_size, coherency_line_size); 480246246cbSSudeep Holla show_one(number_of_sets, number_of_sets); 481246246cbSSudeep Holla show_one(physical_line_partition, physical_line_partition); 482246246cbSSudeep Holla show_one(ways_of_associativity, ways_of_associativity); 483246246cbSSudeep Holla 484246246cbSSudeep Holla static ssize_t size_show(struct device *dev, 485246246cbSSudeep Holla struct device_attribute *attr, char *buf) 486246246cbSSudeep Holla { 487246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); 488246246cbSSudeep Holla 489aa838896SJoe Perches return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10); 490246246cbSSudeep Holla } 491246246cbSSudeep Holla 492e015e036SJoe Perches static ssize_t shared_cpu_map_show(struct device *dev, 493e015e036SJoe Perches struct device_attribute *attr, char *buf) 494246246cbSSudeep Holla { 495246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); 496246246cbSSudeep Holla const struct cpumask *mask = &this_leaf->shared_cpu_map; 497246246cbSSudeep Holla 498e015e036SJoe Perches return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask); 499246246cbSSudeep Holla } 500246246cbSSudeep Holla 501246246cbSSudeep Holla static ssize_t shared_cpu_list_show(struct device *dev, 502246246cbSSudeep Holla struct device_attribute *attr, char *buf) 503246246cbSSudeep Holla { 504e015e036SJoe Perches struct cacheinfo *this_leaf = dev_get_drvdata(dev); 505e015e036SJoe Perches const struct cpumask *mask = &this_leaf->shared_cpu_map; 506e015e036SJoe Perches 507e015e036SJoe Perches return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask); 508246246cbSSudeep Holla } 509246246cbSSudeep Holla 510246246cbSSudeep Holla static ssize_t type_show(struct device *dev, 511246246cbSSudeep Holla struct device_attribute *attr, char *buf) 512246246cbSSudeep Holla { 513246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); 514973c3911SJoe Perches const char *output; 515246246cbSSudeep Holla 516246246cbSSudeep Holla switch (this_leaf->type) { 517246246cbSSudeep Holla case CACHE_TYPE_DATA: 518973c3911SJoe Perches output = "Data"; 519973c3911SJoe Perches break; 520246246cbSSudeep Holla case CACHE_TYPE_INST: 521973c3911SJoe Perches output = "Instruction"; 522973c3911SJoe Perches break; 523246246cbSSudeep Holla case CACHE_TYPE_UNIFIED: 524973c3911SJoe Perches output = "Unified"; 525973c3911SJoe Perches break; 526246246cbSSudeep Holla default: 527246246cbSSudeep Holla return -EINVAL; 528246246cbSSudeep Holla } 529973c3911SJoe Perches 530973c3911SJoe Perches return sysfs_emit(buf, "%s\n", output); 531246246cbSSudeep Holla } 532246246cbSSudeep Holla 533246246cbSSudeep Holla static ssize_t allocation_policy_show(struct device *dev, 534246246cbSSudeep Holla struct device_attribute *attr, char *buf) 535246246cbSSudeep Holla { 536246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); 537246246cbSSudeep Holla unsigned int ci_attr = this_leaf->attributes; 538973c3911SJoe Perches const char *output; 539246246cbSSudeep Holla 540246246cbSSudeep Holla if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE)) 541973c3911SJoe Perches output = "ReadWriteAllocate"; 542246246cbSSudeep Holla else if (ci_attr & CACHE_READ_ALLOCATE) 543973c3911SJoe Perches output = "ReadAllocate"; 544246246cbSSudeep Holla else if (ci_attr & CACHE_WRITE_ALLOCATE) 545973c3911SJoe Perches output = "WriteAllocate"; 546973c3911SJoe Perches else 547973c3911SJoe Perches return 0; 548973c3911SJoe Perches 549973c3911SJoe Perches return sysfs_emit(buf, "%s\n", output); 550246246cbSSudeep Holla } 551246246cbSSudeep Holla 552246246cbSSudeep Holla static ssize_t write_policy_show(struct device *dev, 553246246cbSSudeep Holla struct device_attribute *attr, char *buf) 554246246cbSSudeep Holla { 555246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); 556246246cbSSudeep Holla unsigned int ci_attr = this_leaf->attributes; 557246246cbSSudeep Holla int n = 0; 558246246cbSSudeep Holla 559246246cbSSudeep Holla if (ci_attr & CACHE_WRITE_THROUGH) 560aa838896SJoe Perches n = sysfs_emit(buf, "WriteThrough\n"); 561246246cbSSudeep Holla else if (ci_attr & CACHE_WRITE_BACK) 562aa838896SJoe Perches n = sysfs_emit(buf, "WriteBack\n"); 563246246cbSSudeep Holla return n; 564246246cbSSudeep Holla } 565246246cbSSudeep Holla 566e9a2ea5aSFenghua Yu static DEVICE_ATTR_RO(id); 567246246cbSSudeep Holla static DEVICE_ATTR_RO(level); 568246246cbSSudeep Holla static DEVICE_ATTR_RO(type); 569246246cbSSudeep Holla static DEVICE_ATTR_RO(coherency_line_size); 570246246cbSSudeep Holla static DEVICE_ATTR_RO(ways_of_associativity); 571246246cbSSudeep Holla static DEVICE_ATTR_RO(number_of_sets); 572246246cbSSudeep Holla static DEVICE_ATTR_RO(size); 573246246cbSSudeep Holla static DEVICE_ATTR_RO(allocation_policy); 574246246cbSSudeep Holla static DEVICE_ATTR_RO(write_policy); 575246246cbSSudeep Holla static DEVICE_ATTR_RO(shared_cpu_map); 576246246cbSSudeep Holla static DEVICE_ATTR_RO(shared_cpu_list); 577246246cbSSudeep Holla static DEVICE_ATTR_RO(physical_line_partition); 578246246cbSSudeep Holla 579246246cbSSudeep Holla static struct attribute *cache_default_attrs[] = { 580e9a2ea5aSFenghua Yu &dev_attr_id.attr, 581246246cbSSudeep Holla &dev_attr_type.attr, 582246246cbSSudeep Holla &dev_attr_level.attr, 583246246cbSSudeep Holla &dev_attr_shared_cpu_map.attr, 584246246cbSSudeep Holla &dev_attr_shared_cpu_list.attr, 585246246cbSSudeep Holla &dev_attr_coherency_line_size.attr, 586246246cbSSudeep Holla &dev_attr_ways_of_associativity.attr, 587246246cbSSudeep Holla &dev_attr_number_of_sets.attr, 588246246cbSSudeep Holla &dev_attr_size.attr, 589246246cbSSudeep Holla &dev_attr_allocation_policy.attr, 590246246cbSSudeep Holla &dev_attr_write_policy.attr, 591246246cbSSudeep Holla &dev_attr_physical_line_partition.attr, 592246246cbSSudeep Holla NULL 593246246cbSSudeep Holla }; 594246246cbSSudeep Holla 595246246cbSSudeep Holla static umode_t 596246246cbSSudeep Holla cache_default_attrs_is_visible(struct kobject *kobj, 597246246cbSSudeep Holla struct attribute *attr, int unused) 598246246cbSSudeep Holla { 599246246cbSSudeep Holla struct device *dev = kobj_to_dev(kobj); 600246246cbSSudeep Holla struct cacheinfo *this_leaf = dev_get_drvdata(dev); 601246246cbSSudeep Holla const struct cpumask *mask = &this_leaf->shared_cpu_map; 602246246cbSSudeep Holla umode_t mode = attr->mode; 603246246cbSSudeep Holla 604e9a2ea5aSFenghua Yu if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID)) 605e9a2ea5aSFenghua Yu return mode; 606246246cbSSudeep Holla if ((attr == &dev_attr_type.attr) && this_leaf->type) 607246246cbSSudeep Holla return mode; 608246246cbSSudeep Holla if ((attr == &dev_attr_level.attr) && this_leaf->level) 609246246cbSSudeep Holla return mode; 610246246cbSSudeep Holla if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask)) 611246246cbSSudeep Holla return mode; 612246246cbSSudeep Holla if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask)) 613246246cbSSudeep Holla return mode; 614246246cbSSudeep Holla if ((attr == &dev_attr_coherency_line_size.attr) && 615246246cbSSudeep Holla this_leaf->coherency_line_size) 616246246cbSSudeep Holla return mode; 617246246cbSSudeep Holla if ((attr == &dev_attr_ways_of_associativity.attr) && 618246246cbSSudeep Holla this_leaf->size) /* allow 0 = full associativity */ 619246246cbSSudeep Holla return mode; 620246246cbSSudeep Holla if ((attr == &dev_attr_number_of_sets.attr) && 621246246cbSSudeep Holla this_leaf->number_of_sets) 622246246cbSSudeep Holla return mode; 623246246cbSSudeep Holla if ((attr == &dev_attr_size.attr) && this_leaf->size) 624246246cbSSudeep Holla return mode; 625246246cbSSudeep Holla if ((attr == &dev_attr_write_policy.attr) && 626246246cbSSudeep Holla (this_leaf->attributes & CACHE_WRITE_POLICY_MASK)) 627246246cbSSudeep Holla return mode; 628246246cbSSudeep Holla if ((attr == &dev_attr_allocation_policy.attr) && 629246246cbSSudeep Holla (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK)) 630246246cbSSudeep Holla return mode; 631246246cbSSudeep Holla if ((attr == &dev_attr_physical_line_partition.attr) && 632246246cbSSudeep Holla this_leaf->physical_line_partition) 633246246cbSSudeep Holla return mode; 634246246cbSSudeep Holla 635246246cbSSudeep Holla return 0; 636246246cbSSudeep Holla } 637246246cbSSudeep Holla 638246246cbSSudeep Holla static const struct attribute_group cache_default_group = { 639246246cbSSudeep Holla .attrs = cache_default_attrs, 640246246cbSSudeep Holla .is_visible = cache_default_attrs_is_visible, 641246246cbSSudeep Holla }; 642246246cbSSudeep Holla 643246246cbSSudeep Holla static const struct attribute_group *cache_default_groups[] = { 644246246cbSSudeep Holla &cache_default_group, 645246246cbSSudeep Holla NULL, 646246246cbSSudeep Holla }; 647246246cbSSudeep Holla 648246246cbSSudeep Holla static const struct attribute_group *cache_private_groups[] = { 649246246cbSSudeep Holla &cache_default_group, 650246246cbSSudeep Holla NULL, /* Place holder for private group */ 651246246cbSSudeep Holla NULL, 652246246cbSSudeep Holla }; 653246246cbSSudeep Holla 654246246cbSSudeep Holla const struct attribute_group * 655246246cbSSudeep Holla __weak cache_get_priv_group(struct cacheinfo *this_leaf) 656246246cbSSudeep Holla { 657246246cbSSudeep Holla return NULL; 658246246cbSSudeep Holla } 659246246cbSSudeep Holla 660246246cbSSudeep Holla static const struct attribute_group ** 661246246cbSSudeep Holla cache_get_attribute_groups(struct cacheinfo *this_leaf) 662246246cbSSudeep Holla { 663246246cbSSudeep Holla const struct attribute_group *priv_group = 664246246cbSSudeep Holla cache_get_priv_group(this_leaf); 665246246cbSSudeep Holla 666246246cbSSudeep Holla if (!priv_group) 667246246cbSSudeep Holla return cache_default_groups; 668246246cbSSudeep Holla 669246246cbSSudeep Holla if (!cache_private_groups[1]) 670246246cbSSudeep Holla cache_private_groups[1] = priv_group; 671246246cbSSudeep Holla 672246246cbSSudeep Holla return cache_private_groups; 673246246cbSSudeep Holla } 674246246cbSSudeep Holla 675246246cbSSudeep Holla /* Add/Remove cache interface for CPU device */ 676246246cbSSudeep Holla static void cpu_cache_sysfs_exit(unsigned int cpu) 677246246cbSSudeep Holla { 678246246cbSSudeep Holla int i; 679246246cbSSudeep Holla struct device *ci_dev; 680246246cbSSudeep Holla 681246246cbSSudeep Holla if (per_cpu_index_dev(cpu)) { 682246246cbSSudeep Holla for (i = 0; i < cache_leaves(cpu); i++) { 683246246cbSSudeep Holla ci_dev = per_cache_index_dev(cpu, i); 684246246cbSSudeep Holla if (!ci_dev) 685246246cbSSudeep Holla continue; 686246246cbSSudeep Holla device_unregister(ci_dev); 687246246cbSSudeep Holla } 688246246cbSSudeep Holla kfree(per_cpu_index_dev(cpu)); 689246246cbSSudeep Holla per_cpu_index_dev(cpu) = NULL; 690246246cbSSudeep Holla } 691246246cbSSudeep Holla device_unregister(per_cpu_cache_dev(cpu)); 692246246cbSSudeep Holla per_cpu_cache_dev(cpu) = NULL; 693246246cbSSudeep Holla } 694246246cbSSudeep Holla 695246246cbSSudeep Holla static int cpu_cache_sysfs_init(unsigned int cpu) 696246246cbSSudeep Holla { 697246246cbSSudeep Holla struct device *dev = get_cpu_device(cpu); 698246246cbSSudeep Holla 699246246cbSSudeep Holla if (per_cpu_cacheinfo(cpu) == NULL) 700246246cbSSudeep Holla return -ENOENT; 701246246cbSSudeep Holla 702246246cbSSudeep Holla per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache"); 703246246cbSSudeep Holla if (IS_ERR(per_cpu_cache_dev(cpu))) 704246246cbSSudeep Holla return PTR_ERR(per_cpu_cache_dev(cpu)); 705246246cbSSudeep Holla 706246246cbSSudeep Holla /* Allocate all required memory */ 707246246cbSSudeep Holla per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), 708246246cbSSudeep Holla sizeof(struct device *), GFP_KERNEL); 709246246cbSSudeep Holla if (unlikely(per_cpu_index_dev(cpu) == NULL)) 710246246cbSSudeep Holla goto err_out; 711246246cbSSudeep Holla 712246246cbSSudeep Holla return 0; 713246246cbSSudeep Holla 714246246cbSSudeep Holla err_out: 715246246cbSSudeep Holla cpu_cache_sysfs_exit(cpu); 716246246cbSSudeep Holla return -ENOMEM; 717246246cbSSudeep Holla } 718246246cbSSudeep Holla 719246246cbSSudeep Holla static int cache_add_dev(unsigned int cpu) 720246246cbSSudeep Holla { 721246246cbSSudeep Holla unsigned int i; 722246246cbSSudeep Holla int rc; 723246246cbSSudeep Holla struct device *ci_dev, *parent; 724246246cbSSudeep Holla struct cacheinfo *this_leaf; 725246246cbSSudeep Holla const struct attribute_group **cache_groups; 726246246cbSSudeep Holla 727246246cbSSudeep Holla rc = cpu_cache_sysfs_init(cpu); 728246246cbSSudeep Holla if (unlikely(rc < 0)) 729246246cbSSudeep Holla return rc; 730246246cbSSudeep Holla 731246246cbSSudeep Holla parent = per_cpu_cache_dev(cpu); 732246246cbSSudeep Holla for (i = 0; i < cache_leaves(cpu); i++) { 733b14e8d21SSudeep Holla this_leaf = per_cpu_cacheinfo_idx(cpu, i); 734246246cbSSudeep Holla if (this_leaf->disable_sysfs) 735246246cbSSudeep Holla continue; 736ca388e43SJeffrey Hugo if (this_leaf->type == CACHE_TYPE_NOCACHE) 737ca388e43SJeffrey Hugo break; 738246246cbSSudeep Holla cache_groups = cache_get_attribute_groups(this_leaf); 739246246cbSSudeep Holla ci_dev = cpu_device_create(parent, this_leaf, cache_groups, 740246246cbSSudeep Holla "index%1u", i); 741246246cbSSudeep Holla if (IS_ERR(ci_dev)) { 742246246cbSSudeep Holla rc = PTR_ERR(ci_dev); 743246246cbSSudeep Holla goto err; 744246246cbSSudeep Holla } 745246246cbSSudeep Holla per_cache_index_dev(cpu, i) = ci_dev; 746246246cbSSudeep Holla } 747246246cbSSudeep Holla cpumask_set_cpu(cpu, &cache_dev_map); 748246246cbSSudeep Holla 749246246cbSSudeep Holla return 0; 750246246cbSSudeep Holla err: 751246246cbSSudeep Holla cpu_cache_sysfs_exit(cpu); 752246246cbSSudeep Holla return rc; 753246246cbSSudeep Holla } 754246246cbSSudeep Holla 7557cc277b4SSebastian Andrzej Siewior static int cacheinfo_cpu_online(unsigned int cpu) 756246246cbSSudeep Holla { 7577cc277b4SSebastian Andrzej Siewior int rc = detect_cache_attributes(cpu); 758246246cbSSudeep Holla 7597cc277b4SSebastian Andrzej Siewior if (rc) 7607cc277b4SSebastian Andrzej Siewior return rc; 761246246cbSSudeep Holla rc = cache_add_dev(cpu); 7627cc277b4SSebastian Andrzej Siewior if (rc) 763246246cbSSudeep Holla free_cache_attributes(cpu); 7647cc277b4SSebastian Andrzej Siewior return rc; 765246246cbSSudeep Holla } 7667cc277b4SSebastian Andrzej Siewior 7677cc277b4SSebastian Andrzej Siewior static int cacheinfo_cpu_pre_down(unsigned int cpu) 7687cc277b4SSebastian Andrzej Siewior { 7697cc277b4SSebastian Andrzej Siewior if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map)) 7707cc277b4SSebastian Andrzej Siewior cpu_cache_sysfs_exit(cpu); 7717cc277b4SSebastian Andrzej Siewior 7727cc277b4SSebastian Andrzej Siewior free_cache_attributes(cpu); 7737cc277b4SSebastian Andrzej Siewior return 0; 774246246cbSSudeep Holla } 775246246cbSSudeep Holla 776246246cbSSudeep Holla static int __init cacheinfo_sysfs_init(void) 777246246cbSSudeep Holla { 77883b44fe3SJames Morse return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE, 77983b44fe3SJames Morse "base/cacheinfo:online", 7807cc277b4SSebastian Andrzej Siewior cacheinfo_cpu_online, cacheinfo_cpu_pre_down); 781246246cbSSudeep Holla } 782246246cbSSudeep Holla device_initcall(cacheinfo_sysfs_init); 783