1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Processor cache information made available to userspace via sysfs; 4 * intended to be compatible with x86 intel_cacheinfo implementation. 5 * 6 * Copyright 2008 IBM Corporation 7 * Author: Nathan Lynch 8 */ 9 10 #define pr_fmt(fmt) "cacheinfo: " fmt 11 12 #include <linux/cpu.h> 13 #include <linux/cpumask.h> 14 #include <linux/kernel.h> 15 #include <linux/kobject.h> 16 #include <linux/list.h> 17 #include <linux/notifier.h> 18 #include <linux/of.h> 19 #include <linux/percpu.h> 20 #include <linux/slab.h> 21 #include <asm/prom.h> 22 #include <asm/cputhreads.h> 23 #include <asm/smp.h> 24 25 #include "cacheinfo.h" 26 27 /* per-cpu object for tracking: 28 * - a "cache" kobject for the top-level directory 29 * - a list of "index" objects representing the cpu's local cache hierarchy 30 */ 31 struct cache_dir { 32 struct kobject *kobj; /* bare (not embedded) kobject for cache 33 * directory */ 34 struct cache_index_dir *index; /* list of index objects */ 35 }; 36 37 /* "index" object: each cpu's cache directory has an index 38 * subdirectory corresponding to a cache object associated with the 39 * cpu. This object's lifetime is managed via the embedded kobject. 40 */ 41 struct cache_index_dir { 42 struct kobject kobj; 43 struct cache_index_dir *next; /* next index in parent directory */ 44 struct cache *cache; 45 }; 46 47 /* Template for determining which OF properties to query for a given 48 * cache type */ 49 struct cache_type_info { 50 const char *name; 51 const char *size_prop; 52 53 /* Allow for both [di]-cache-line-size and 54 * [di]-cache-block-size properties. According to the PowerPC 55 * Processor binding, -line-size should be provided if it 56 * differs from the cache block size (that which is operated 57 * on by cache instructions), so we look for -line-size first. 58 * See cache_get_line_size(). */ 59 60 const char *line_size_props[2]; 61 const char *nr_sets_prop; 62 }; 63 64 /* These are used to index the cache_type_info array. */ 65 #define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */ 66 #define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */ 67 #define CACHE_TYPE_INSTRUCTION 2 68 #define CACHE_TYPE_DATA 3 69 70 static const struct cache_type_info cache_type_info[] = { 71 { 72 /* Embedded systems that use cache-size, cache-block-size, 73 * etc. for the Unified (typically L2) cache. */ 74 .name = "Unified", 75 .size_prop = "cache-size", 76 .line_size_props = { "cache-line-size", 77 "cache-block-size", }, 78 .nr_sets_prop = "cache-sets", 79 }, 80 { 81 /* PowerPC Processor binding says the [di]-cache-* 82 * must be equal on unified caches, so just use 83 * d-cache properties. */ 84 .name = "Unified", 85 .size_prop = "d-cache-size", 86 .line_size_props = { "d-cache-line-size", 87 "d-cache-block-size", }, 88 .nr_sets_prop = "d-cache-sets", 89 }, 90 { 91 .name = "Instruction", 92 .size_prop = "i-cache-size", 93 .line_size_props = { "i-cache-line-size", 94 "i-cache-block-size", }, 95 .nr_sets_prop = "i-cache-sets", 96 }, 97 { 98 .name = "Data", 99 .size_prop = "d-cache-size", 100 .line_size_props = { "d-cache-line-size", 101 "d-cache-block-size", }, 102 .nr_sets_prop = "d-cache-sets", 103 }, 104 }; 105 106 /* Cache object: each instance of this corresponds to a distinct cache 107 * in the system. There are separate objects for Harvard caches: one 108 * each for instruction and data, and each refers to the same OF node. 109 * The refcount of the OF node is elevated for the lifetime of the 110 * cache object. A cache object is released when its shared_cpu_map 111 * is cleared (see cache_cpu_clear). 112 * 113 * A cache object is on two lists: an unsorted global list 114 * (cache_list) of cache objects; and a singly-linked list 115 * representing the local cache hierarchy, which is ordered by level 116 * (e.g. L1d -> L1i -> L2 -> L3). 117 */ 118 struct cache { 119 struct device_node *ofnode; /* OF node for this cache, may be cpu */ 120 struct cpumask shared_cpu_map; /* online CPUs using this cache */ 121 int type; /* split cache disambiguation */ 122 int level; /* level not explicit in device tree */ 123 struct list_head list; /* global list of cache objects */ 124 struct cache *next_local; /* next cache of >= level */ 125 }; 126 127 static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu); 128 129 /* traversal/modification of this list occurs only at cpu hotplug time; 130 * access is serialized by cpu hotplug locking 131 */ 132 static LIST_HEAD(cache_list); 133 134 static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k) 135 { 136 return container_of(k, struct cache_index_dir, kobj); 137 } 138 139 static const char *cache_type_string(const struct cache *cache) 140 { 141 return cache_type_info[cache->type].name; 142 } 143 144 static void cache_init(struct cache *cache, int type, int level, 145 struct device_node *ofnode) 146 { 147 cache->type = type; 148 cache->level = level; 149 cache->ofnode = of_node_get(ofnode); 150 INIT_LIST_HEAD(&cache->list); 151 list_add(&cache->list, &cache_list); 152 } 153 154 static struct cache *new_cache(int type, int level, struct device_node *ofnode) 155 { 156 struct cache *cache; 157 158 cache = kzalloc(sizeof(*cache), GFP_KERNEL); 159 if (cache) 160 cache_init(cache, type, level, ofnode); 161 162 return cache; 163 } 164 165 static void release_cache_debugcheck(struct cache *cache) 166 { 167 struct cache *iter; 168 169 list_for_each_entry(iter, &cache_list, list) 170 WARN_ONCE(iter->next_local == cache, 171 "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n", 172 iter->ofnode, 173 cache_type_string(iter), 174 cache->ofnode, 175 cache_type_string(cache)); 176 } 177 178 static void release_cache(struct cache *cache) 179 { 180 if (!cache) 181 return; 182 183 pr_debug("freeing L%d %s cache for %pOFP\n", cache->level, 184 cache_type_string(cache), cache->ofnode); 185 186 release_cache_debugcheck(cache); 187 list_del(&cache->list); 188 of_node_put(cache->ofnode); 189 kfree(cache); 190 } 191 192 static void cache_cpu_set(struct cache *cache, int cpu) 193 { 194 struct cache *next = cache; 195 196 while (next) { 197 WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map), 198 "CPU %i already accounted in %pOFP(%s)\n", 199 cpu, next->ofnode, 200 cache_type_string(next)); 201 cpumask_set_cpu(cpu, &next->shared_cpu_map); 202 next = next->next_local; 203 } 204 } 205 206 static int cache_size(const struct cache *cache, unsigned int *ret) 207 { 208 const char *propname; 209 const __be32 *cache_size; 210 211 propname = cache_type_info[cache->type].size_prop; 212 213 cache_size = of_get_property(cache->ofnode, propname, NULL); 214 if (!cache_size) 215 return -ENODEV; 216 217 *ret = of_read_number(cache_size, 1); 218 return 0; 219 } 220 221 static int cache_size_kb(const struct cache *cache, unsigned int *ret) 222 { 223 unsigned int size; 224 225 if (cache_size(cache, &size)) 226 return -ENODEV; 227 228 *ret = size / 1024; 229 return 0; 230 } 231 232 /* not cache_line_size() because that's a macro in include/linux/cache.h */ 233 static int cache_get_line_size(const struct cache *cache, unsigned int *ret) 234 { 235 const __be32 *line_size; 236 int i, lim; 237 238 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props); 239 240 for (i = 0; i < lim; i++) { 241 const char *propname; 242 243 propname = cache_type_info[cache->type].line_size_props[i]; 244 line_size = of_get_property(cache->ofnode, propname, NULL); 245 if (line_size) 246 break; 247 } 248 249 if (!line_size) 250 return -ENODEV; 251 252 *ret = of_read_number(line_size, 1); 253 return 0; 254 } 255 256 static int cache_nr_sets(const struct cache *cache, unsigned int *ret) 257 { 258 const char *propname; 259 const __be32 *nr_sets; 260 261 propname = cache_type_info[cache->type].nr_sets_prop; 262 263 nr_sets = of_get_property(cache->ofnode, propname, NULL); 264 if (!nr_sets) 265 return -ENODEV; 266 267 *ret = of_read_number(nr_sets, 1); 268 return 0; 269 } 270 271 static int cache_associativity(const struct cache *cache, unsigned int *ret) 272 { 273 unsigned int line_size; 274 unsigned int nr_sets; 275 unsigned int size; 276 277 if (cache_nr_sets(cache, &nr_sets)) 278 goto err; 279 280 /* If the cache is fully associative, there is no need to 281 * check the other properties. 282 */ 283 if (nr_sets == 1) { 284 *ret = 0; 285 return 0; 286 } 287 288 if (cache_get_line_size(cache, &line_size)) 289 goto err; 290 if (cache_size(cache, &size)) 291 goto err; 292 293 if (!(nr_sets > 0 && size > 0 && line_size > 0)) 294 goto err; 295 296 *ret = (size / nr_sets) / line_size; 297 return 0; 298 err: 299 return -ENODEV; 300 } 301 302 /* helper for dealing with split caches */ 303 static struct cache *cache_find_first_sibling(struct cache *cache) 304 { 305 struct cache *iter; 306 307 if (cache->type == CACHE_TYPE_UNIFIED || 308 cache->type == CACHE_TYPE_UNIFIED_D) 309 return cache; 310 311 list_for_each_entry(iter, &cache_list, list) 312 if (iter->ofnode == cache->ofnode && iter->next_local == cache) 313 return iter; 314 315 return cache; 316 } 317 318 /* return the first cache on a local list matching node */ 319 static struct cache *cache_lookup_by_node(const struct device_node *node) 320 { 321 struct cache *cache = NULL; 322 struct cache *iter; 323 324 list_for_each_entry(iter, &cache_list, list) { 325 if (iter->ofnode != node) 326 continue; 327 cache = cache_find_first_sibling(iter); 328 break; 329 } 330 331 return cache; 332 } 333 334 static bool cache_node_is_unified(const struct device_node *np) 335 { 336 return of_get_property(np, "cache-unified", NULL); 337 } 338 339 /* 340 * Unified caches can have two different sets of tags. Most embedded 341 * use cache-size, etc. for the unified cache size, but open firmware systems 342 * use d-cache-size, etc. Check on initialization for which type we have, and 343 * return the appropriate structure type. Assume it's embedded if it isn't 344 * open firmware. If it's yet a 3rd type, then there will be missing entries 345 * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need 346 * to be extended further. 347 */ 348 static int cache_is_unified_d(const struct device_node *np) 349 { 350 return of_get_property(np, 351 cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ? 352 CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED; 353 } 354 355 static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level) 356 { 357 pr_debug("creating L%d ucache for %pOFP\n", level, node); 358 359 return new_cache(cache_is_unified_d(node), level, node); 360 } 361 362 static struct cache *cache_do_one_devnode_split(struct device_node *node, 363 int level) 364 { 365 struct cache *dcache, *icache; 366 367 pr_debug("creating L%d dcache and icache for %pOFP\n", level, 368 node); 369 370 dcache = new_cache(CACHE_TYPE_DATA, level, node); 371 icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node); 372 373 if (!dcache || !icache) 374 goto err; 375 376 dcache->next_local = icache; 377 378 return dcache; 379 err: 380 release_cache(dcache); 381 release_cache(icache); 382 return NULL; 383 } 384 385 static struct cache *cache_do_one_devnode(struct device_node *node, int level) 386 { 387 struct cache *cache; 388 389 if (cache_node_is_unified(node)) 390 cache = cache_do_one_devnode_unified(node, level); 391 else 392 cache = cache_do_one_devnode_split(node, level); 393 394 return cache; 395 } 396 397 static struct cache *cache_lookup_or_instantiate(struct device_node *node, 398 int level) 399 { 400 struct cache *cache; 401 402 cache = cache_lookup_by_node(node); 403 404 WARN_ONCE(cache && cache->level != level, 405 "cache level mismatch on lookup (got %d, expected %d)\n", 406 cache->level, level); 407 408 if (!cache) 409 cache = cache_do_one_devnode(node, level); 410 411 return cache; 412 } 413 414 static void link_cache_lists(struct cache *smaller, struct cache *bigger) 415 { 416 while (smaller->next_local) { 417 if (smaller->next_local == bigger) 418 return; /* already linked */ 419 smaller = smaller->next_local; 420 } 421 422 smaller->next_local = bigger; 423 424 /* 425 * The cache->next_local list sorts by level ascending: 426 * L1d -> L1i -> L2 -> L3 ... 427 */ 428 WARN_ONCE((smaller->level == 1 && bigger->level > 2) || 429 (smaller->level > 1 && bigger->level != smaller->level + 1), 430 "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n", 431 smaller->level, smaller->ofnode, bigger->level, bigger->ofnode); 432 } 433 434 static void do_subsidiary_caches_debugcheck(struct cache *cache) 435 { 436 WARN_ONCE(cache->level != 1, 437 "instantiating cache chain from L%d %s cache for " 438 "%pOFP instead of an L1\n", cache->level, 439 cache_type_string(cache), cache->ofnode); 440 WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"), 441 "instantiating cache chain from node %pOFP of type '%s' " 442 "instead of a cpu node\n", cache->ofnode, 443 of_node_get_device_type(cache->ofnode)); 444 } 445 446 static void do_subsidiary_caches(struct cache *cache) 447 { 448 struct device_node *subcache_node; 449 int level = cache->level; 450 451 do_subsidiary_caches_debugcheck(cache); 452 453 while ((subcache_node = of_find_next_cache_node(cache->ofnode))) { 454 struct cache *subcache; 455 456 level++; 457 subcache = cache_lookup_or_instantiate(subcache_node, level); 458 of_node_put(subcache_node); 459 if (!subcache) 460 break; 461 462 link_cache_lists(cache, subcache); 463 cache = subcache; 464 } 465 } 466 467 static struct cache *cache_chain_instantiate(unsigned int cpu_id) 468 { 469 struct device_node *cpu_node; 470 struct cache *cpu_cache = NULL; 471 472 pr_debug("creating cache object(s) for CPU %i\n", cpu_id); 473 474 cpu_node = of_get_cpu_node(cpu_id, NULL); 475 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); 476 if (!cpu_node) 477 goto out; 478 479 cpu_cache = cache_lookup_or_instantiate(cpu_node, 1); 480 if (!cpu_cache) 481 goto out; 482 483 do_subsidiary_caches(cpu_cache); 484 485 cache_cpu_set(cpu_cache, cpu_id); 486 out: 487 of_node_put(cpu_node); 488 489 return cpu_cache; 490 } 491 492 static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id) 493 { 494 struct cache_dir *cache_dir; 495 struct device *dev; 496 struct kobject *kobj = NULL; 497 498 dev = get_cpu_device(cpu_id); 499 WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id); 500 if (!dev) 501 goto err; 502 503 kobj = kobject_create_and_add("cache", &dev->kobj); 504 if (!kobj) 505 goto err; 506 507 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL); 508 if (!cache_dir) 509 goto err; 510 511 cache_dir->kobj = kobj; 512 513 WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL); 514 515 per_cpu(cache_dir_pcpu, cpu_id) = cache_dir; 516 517 return cache_dir; 518 err: 519 kobject_put(kobj); 520 return NULL; 521 } 522 523 static void cache_index_release(struct kobject *kobj) 524 { 525 struct cache_index_dir *index; 526 527 index = kobj_to_cache_index_dir(kobj); 528 529 pr_debug("freeing index directory for L%d %s cache\n", 530 index->cache->level, cache_type_string(index->cache)); 531 532 kfree(index); 533 } 534 535 static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf) 536 { 537 struct kobj_attribute *kobj_attr; 538 539 kobj_attr = container_of(attr, struct kobj_attribute, attr); 540 541 return kobj_attr->show(k, kobj_attr, buf); 542 } 543 544 static struct cache *index_kobj_to_cache(struct kobject *k) 545 { 546 struct cache_index_dir *index; 547 548 index = kobj_to_cache_index_dir(k); 549 550 return index->cache; 551 } 552 553 static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 554 { 555 unsigned int size_kb; 556 struct cache *cache; 557 558 cache = index_kobj_to_cache(k); 559 560 if (cache_size_kb(cache, &size_kb)) 561 return -ENODEV; 562 563 return sprintf(buf, "%uK\n", size_kb); 564 } 565 566 static struct kobj_attribute cache_size_attr = 567 __ATTR(size, 0444, size_show, NULL); 568 569 570 static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 571 { 572 unsigned int line_size; 573 struct cache *cache; 574 575 cache = index_kobj_to_cache(k); 576 577 if (cache_get_line_size(cache, &line_size)) 578 return -ENODEV; 579 580 return sprintf(buf, "%u\n", line_size); 581 } 582 583 static struct kobj_attribute cache_line_size_attr = 584 __ATTR(coherency_line_size, 0444, line_size_show, NULL); 585 586 static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 587 { 588 unsigned int nr_sets; 589 struct cache *cache; 590 591 cache = index_kobj_to_cache(k); 592 593 if (cache_nr_sets(cache, &nr_sets)) 594 return -ENODEV; 595 596 return sprintf(buf, "%u\n", nr_sets); 597 } 598 599 static struct kobj_attribute cache_nr_sets_attr = 600 __ATTR(number_of_sets, 0444, nr_sets_show, NULL); 601 602 static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 603 { 604 unsigned int associativity; 605 struct cache *cache; 606 607 cache = index_kobj_to_cache(k); 608 609 if (cache_associativity(cache, &associativity)) 610 return -ENODEV; 611 612 return sprintf(buf, "%u\n", associativity); 613 } 614 615 static struct kobj_attribute cache_assoc_attr = 616 __ATTR(ways_of_associativity, 0444, associativity_show, NULL); 617 618 static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 619 { 620 struct cache *cache; 621 622 cache = index_kobj_to_cache(k); 623 624 return sprintf(buf, "%s\n", cache_type_string(cache)); 625 } 626 627 static struct kobj_attribute cache_type_attr = 628 __ATTR(type, 0444, type_show, NULL); 629 630 static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 631 { 632 struct cache_index_dir *index; 633 struct cache *cache; 634 635 index = kobj_to_cache_index_dir(k); 636 cache = index->cache; 637 638 return sprintf(buf, "%d\n", cache->level); 639 } 640 641 static struct kobj_attribute cache_level_attr = 642 __ATTR(level, 0444, level_show, NULL); 643 644 static unsigned int index_dir_to_cpu(struct cache_index_dir *index) 645 { 646 struct kobject *index_dir_kobj = &index->kobj; 647 struct kobject *cache_dir_kobj = index_dir_kobj->parent; 648 struct kobject *cpu_dev_kobj = cache_dir_kobj->parent; 649 struct device *dev = kobj_to_dev(cpu_dev_kobj); 650 651 return dev->id; 652 } 653 654 /* 655 * On big-core systems, each core has two groups of CPUs each of which 656 * has its own L1-cache. The thread-siblings which share l1-cache with 657 * @cpu can be obtained via cpu_smallcore_mask(). 658 * 659 * On some big-core systems, the L2 cache is shared only between some 660 * groups of siblings. This is already parsed and encoded in 661 * cpu_l2_cache_mask(). 662 * 663 * TODO: cache_lookup_or_instantiate() needs to be made aware of the 664 * "ibm,thread-groups" property so that cache->shared_cpu_map 665 * reflects the correct siblings on platforms that have this 666 * device-tree property. This helper function is only a stop-gap 667 * solution so that we report the correct siblings to the 668 * userspace via sysfs. 669 */ 670 static const struct cpumask *get_shared_cpu_map(struct cache_index_dir *index, struct cache *cache) 671 { 672 if (has_big_cores) { 673 int cpu = index_dir_to_cpu(index); 674 if (cache->level == 1) 675 return cpu_smallcore_mask(cpu); 676 if (cache->level == 2 && thread_group_shares_l2) 677 return cpu_l2_cache_mask(cpu); 678 } 679 680 return &cache->shared_cpu_map; 681 } 682 683 static ssize_t 684 show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list) 685 { 686 struct cache_index_dir *index; 687 struct cache *cache; 688 const struct cpumask *mask; 689 690 index = kobj_to_cache_index_dir(k); 691 cache = index->cache; 692 693 mask = get_shared_cpu_map(index, cache); 694 695 return cpumap_print_to_pagebuf(list, buf, mask); 696 } 697 698 static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 699 { 700 return show_shared_cpumap(k, attr, buf, false); 701 } 702 703 static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 704 { 705 return show_shared_cpumap(k, attr, buf, true); 706 } 707 708 static struct kobj_attribute cache_shared_cpu_map_attr = 709 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); 710 711 static struct kobj_attribute cache_shared_cpu_list_attr = 712 __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); 713 714 /* Attributes which should always be created -- the kobject/sysfs core 715 * does this automatically via kobj_type->default_attrs. This is the 716 * minimum data required to uniquely identify a cache. 717 */ 718 static struct attribute *cache_index_default_attrs[] = { 719 &cache_type_attr.attr, 720 &cache_level_attr.attr, 721 &cache_shared_cpu_map_attr.attr, 722 &cache_shared_cpu_list_attr.attr, 723 NULL, 724 }; 725 726 /* Attributes which should be created if the cache device node has the 727 * right properties -- see cacheinfo_create_index_opt_attrs 728 */ 729 static struct kobj_attribute *cache_index_opt_attrs[] = { 730 &cache_size_attr, 731 &cache_line_size_attr, 732 &cache_nr_sets_attr, 733 &cache_assoc_attr, 734 }; 735 736 static const struct sysfs_ops cache_index_ops = { 737 .show = cache_index_show, 738 }; 739 740 static struct kobj_type cache_index_type = { 741 .release = cache_index_release, 742 .sysfs_ops = &cache_index_ops, 743 .default_attrs = cache_index_default_attrs, 744 }; 745 746 static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) 747 { 748 const char *cache_type; 749 struct cache *cache; 750 char *buf; 751 int i; 752 753 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 754 if (!buf) 755 return; 756 757 cache = dir->cache; 758 cache_type = cache_type_string(cache); 759 760 /* We don't want to create an attribute that can't provide a 761 * meaningful value. Check the return value of each optional 762 * attribute's ->show method before registering the 763 * attribute. 764 */ 765 for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) { 766 struct kobj_attribute *attr; 767 ssize_t rc; 768 769 attr = cache_index_opt_attrs[i]; 770 771 rc = attr->show(&dir->kobj, attr, buf); 772 if (rc <= 0) { 773 pr_debug("not creating %s attribute for " 774 "%pOFP(%s) (rc = %zd)\n", 775 attr->attr.name, cache->ofnode, 776 cache_type, rc); 777 continue; 778 } 779 if (sysfs_create_file(&dir->kobj, &attr->attr)) 780 pr_debug("could not create %s attribute for %pOFP(%s)\n", 781 attr->attr.name, cache->ofnode, cache_type); 782 } 783 784 kfree(buf); 785 } 786 787 static void cacheinfo_create_index_dir(struct cache *cache, int index, 788 struct cache_dir *cache_dir) 789 { 790 struct cache_index_dir *index_dir; 791 int rc; 792 793 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); 794 if (!index_dir) 795 return; 796 797 index_dir->cache = cache; 798 799 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, 800 cache_dir->kobj, "index%d", index); 801 if (rc) { 802 kobject_put(&index_dir->kobj); 803 return; 804 } 805 806 index_dir->next = cache_dir->index; 807 cache_dir->index = index_dir; 808 809 cacheinfo_create_index_opt_attrs(index_dir); 810 } 811 812 static void cacheinfo_sysfs_populate(unsigned int cpu_id, 813 struct cache *cache_list) 814 { 815 struct cache_dir *cache_dir; 816 struct cache *cache; 817 int index = 0; 818 819 cache_dir = cacheinfo_create_cache_dir(cpu_id); 820 if (!cache_dir) 821 return; 822 823 cache = cache_list; 824 while (cache) { 825 cacheinfo_create_index_dir(cache, index, cache_dir); 826 index++; 827 cache = cache->next_local; 828 } 829 } 830 831 void cacheinfo_cpu_online(unsigned int cpu_id) 832 { 833 struct cache *cache; 834 835 cache = cache_chain_instantiate(cpu_id); 836 if (!cache) 837 return; 838 839 cacheinfo_sysfs_populate(cpu_id, cache); 840 } 841 842 /* functions needed to remove cache entry for cpu offline or suspend/resume */ 843 844 #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \ 845 defined(CONFIG_HOTPLUG_CPU) 846 847 static struct cache *cache_lookup_by_cpu(unsigned int cpu_id) 848 { 849 struct device_node *cpu_node; 850 struct cache *cache; 851 852 cpu_node = of_get_cpu_node(cpu_id, NULL); 853 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); 854 if (!cpu_node) 855 return NULL; 856 857 cache = cache_lookup_by_node(cpu_node); 858 of_node_put(cpu_node); 859 860 return cache; 861 } 862 863 static void remove_index_dirs(struct cache_dir *cache_dir) 864 { 865 struct cache_index_dir *index; 866 867 index = cache_dir->index; 868 869 while (index) { 870 struct cache_index_dir *next; 871 872 next = index->next; 873 kobject_put(&index->kobj); 874 index = next; 875 } 876 } 877 878 static void remove_cache_dir(struct cache_dir *cache_dir) 879 { 880 remove_index_dirs(cache_dir); 881 882 /* Remove cache dir from sysfs */ 883 kobject_del(cache_dir->kobj); 884 885 kobject_put(cache_dir->kobj); 886 887 kfree(cache_dir); 888 } 889 890 static void cache_cpu_clear(struct cache *cache, int cpu) 891 { 892 while (cache) { 893 struct cache *next = cache->next_local; 894 895 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), 896 "CPU %i not accounted in %pOFP(%s)\n", 897 cpu, cache->ofnode, 898 cache_type_string(cache)); 899 900 cpumask_clear_cpu(cpu, &cache->shared_cpu_map); 901 902 /* Release the cache object if all the cpus using it 903 * are offline */ 904 if (cpumask_empty(&cache->shared_cpu_map)) 905 release_cache(cache); 906 907 cache = next; 908 } 909 } 910 911 void cacheinfo_cpu_offline(unsigned int cpu_id) 912 { 913 struct cache_dir *cache_dir; 914 struct cache *cache; 915 916 /* Prevent userspace from seeing inconsistent state - remove 917 * the sysfs hierarchy first */ 918 cache_dir = per_cpu(cache_dir_pcpu, cpu_id); 919 920 /* careful, sysfs population may have failed */ 921 if (cache_dir) 922 remove_cache_dir(cache_dir); 923 924 per_cpu(cache_dir_pcpu, cpu_id) = NULL; 925 926 /* clear the CPU's bit in its cache chain, possibly freeing 927 * cache objects */ 928 cache = cache_lookup_by_cpu(cpu_id); 929 if (cache) 930 cache_cpu_clear(cache, cpu_id); 931 } 932 933 void cacheinfo_teardown(void) 934 { 935 unsigned int cpu; 936 937 lockdep_assert_cpus_held(); 938 939 for_each_online_cpu(cpu) 940 cacheinfo_cpu_offline(cpu); 941 } 942 943 void cacheinfo_rebuild(void) 944 { 945 unsigned int cpu; 946 947 lockdep_assert_cpus_held(); 948 949 for_each_online_cpu(cpu) 950 cacheinfo_cpu_online(cpu); 951 } 952 953 #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */ 954