1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * cacheinfo support - processor cache information via sysfs 4 * 5 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c 6 * Author: Sudeep Holla <sudeep.holla@arm.com> 7 */ 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/acpi.h> 11 #include <linux/bitops.h> 12 #include <linux/cacheinfo.h> 13 #include <linux/compiler.h> 14 #include <linux/cpu.h> 15 #include <linux/device.h> 16 #include <linux/init.h> 17 #include <linux/of.h> 18 #include <linux/sched.h> 19 #include <linux/slab.h> 20 #include <linux/smp.h> 21 #include <linux/sysfs.h> 22 23 /* pointer to per cpu cacheinfo */ 24 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); 25 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) 26 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) 27 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) 28 29 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) 30 { 31 return ci_cacheinfo(cpu); 32 } 33 34 #ifdef CONFIG_OF 35 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, 36 struct cacheinfo *sib_leaf) 37 { 38 return sib_leaf->fw_token == this_leaf->fw_token; 39 } 40 41 /* OF properties to query for a given cache type */ 42 struct cache_type_info { 43 const char *size_prop; 44 const char *line_size_props[2]; 45 const char *nr_sets_prop; 46 }; 47 48 static const struct cache_type_info cache_type_info[] = { 49 { 50 .size_prop = "cache-size", 51 .line_size_props = { "cache-line-size", 52 "cache-block-size", }, 53 .nr_sets_prop = "cache-sets", 54 }, { 55 .size_prop = "i-cache-size", 56 .line_size_props = { "i-cache-line-size", 57 "i-cache-block-size", }, 58 .nr_sets_prop = "i-cache-sets", 59 }, { 60 .size_prop = "d-cache-size", 61 .line_size_props = { "d-cache-line-size", 62 "d-cache-block-size", }, 63 .nr_sets_prop = "d-cache-sets", 64 }, 65 }; 66 67 static inline int get_cacheinfo_idx(enum cache_type type) 68 { 69 if (type == CACHE_TYPE_UNIFIED) 70 return 0; 71 return type; 72 } 73 74 static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) 75 { 76 const char *propname; 77 int ct_idx; 78 79 ct_idx = get_cacheinfo_idx(this_leaf->type); 80 propname = cache_type_info[ct_idx].size_prop; 81 82 if (of_property_read_u32(np, propname, &this_leaf->size)) 83 this_leaf->size = 0; 84 } 85 86 /* not cache_line_size() because that's a macro in include/linux/cache.h */ 87 static void cache_get_line_size(struct cacheinfo *this_leaf, 88 struct device_node *np) 89 { 90 int i, lim, ct_idx; 91 92 ct_idx = get_cacheinfo_idx(this_leaf->type); 93 lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props); 94 95 for (i = 0; i < lim; i++) { 96 int ret; 97 u32 line_size; 98 const char *propname; 99 100 propname = cache_type_info[ct_idx].line_size_props[i]; 101 ret = of_property_read_u32(np, propname, &line_size); 102 if (!ret) { 103 this_leaf->coherency_line_size = line_size; 104 break; 105 } 106 } 107 } 108 109 static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) 110 { 111 const char *propname; 112 int ct_idx; 113 114 ct_idx = get_cacheinfo_idx(this_leaf->type); 115 propname = cache_type_info[ct_idx].nr_sets_prop; 116 117 if (of_property_read_u32(np, propname, &this_leaf->number_of_sets)) 118 this_leaf->number_of_sets = 0; 119 } 120 121 static void cache_associativity(struct cacheinfo *this_leaf) 122 { 123 unsigned int line_size = this_leaf->coherency_line_size; 124 unsigned int nr_sets = this_leaf->number_of_sets; 125 unsigned int size = this_leaf->size; 126 127 /* 128 * If the cache is fully associative, there is no need to 129 * check the other properties. 130 */ 131 if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0)) 132 this_leaf->ways_of_associativity = (size / nr_sets) / line_size; 133 } 134 135 static bool cache_node_is_unified(struct cacheinfo *this_leaf, 136 struct device_node *np) 137 { 138 return of_property_read_bool(np, "cache-unified"); 139 } 140 141 static void cache_of_set_props(struct cacheinfo *this_leaf, 142 struct device_node *np) 143 { 144 /* 145 * init_cache_level must setup the cache level correctly 146 * overriding the architecturally specified levels, so 147 * if type is NONE at this stage, it should be unified 148 */ 149 if (this_leaf->type == CACHE_TYPE_NOCACHE && 150 cache_node_is_unified(this_leaf, np)) 151 this_leaf->type = CACHE_TYPE_UNIFIED; 152 cache_size(this_leaf, np); 153 cache_get_line_size(this_leaf, np); 154 cache_nr_sets(this_leaf, np); 155 cache_associativity(this_leaf); 156 } 157 158 static int cache_setup_of_node(unsigned int cpu) 159 { 160 struct device_node *np; 161 struct cacheinfo *this_leaf; 162 struct device *cpu_dev = get_cpu_device(cpu); 163 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 164 unsigned int index = 0; 165 166 /* skip if fw_token is already populated */ 167 if (this_cpu_ci->info_list->fw_token) { 168 return 0; 169 } 170 171 if (!cpu_dev) { 172 pr_err("No cpu device for CPU %d\n", cpu); 173 return -ENODEV; 174 } 175 np = cpu_dev->of_node; 176 if (!np) { 177 pr_err("Failed to find cpu%d device node\n", cpu); 178 return -ENOENT; 179 } 180 181 while (index < cache_leaves(cpu)) { 182 this_leaf = this_cpu_ci->info_list + index; 183 if (this_leaf->level != 1) 184 np = of_find_next_cache_node(np); 185 else 186 np = of_node_get(np);/* cpu node itself */ 187 if (!np) 188 break; 189 cache_of_set_props(this_leaf, np); 190 this_leaf->fw_token = np; 191 index++; 192 } 193 194 if (index != cache_leaves(cpu)) /* not all OF nodes populated */ 195 return -ENOENT; 196 197 return 0; 198 } 199 #else 200 static inline int cache_setup_of_node(unsigned int cpu) { return 0; } 201 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, 202 struct cacheinfo *sib_leaf) 203 { 204 /* 205 * For non-DT/ACPI systems, assume unique level 1 caches, system-wide 206 * shared caches for all other levels. This will be used only if 207 * arch specific code has not populated shared_cpu_map 208 */ 209 return !(this_leaf->level == 1); 210 } 211 #endif 212 213 int __weak cache_setup_acpi(unsigned int cpu) 214 { 215 return -ENOTSUPP; 216 } 217 218 static int cache_shared_cpu_map_setup(unsigned int cpu) 219 { 220 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 221 struct cacheinfo *this_leaf, *sib_leaf; 222 unsigned int index; 223 int ret = 0; 224 225 if (this_cpu_ci->cpu_map_populated) 226 return 0; 227 228 if (of_have_populated_dt()) 229 ret = cache_setup_of_node(cpu); 230 else if (!acpi_disabled) 231 ret = cache_setup_acpi(cpu); 232 233 if (ret) 234 return ret; 235 236 for (index = 0; index < cache_leaves(cpu); index++) { 237 unsigned int i; 238 239 this_leaf = this_cpu_ci->info_list + index; 240 /* skip if shared_cpu_map is already populated */ 241 if (!cpumask_empty(&this_leaf->shared_cpu_map)) 242 continue; 243 244 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); 245 for_each_online_cpu(i) { 246 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); 247 248 if (i == cpu || !sib_cpu_ci->info_list) 249 continue;/* skip if itself or no cacheinfo */ 250 sib_leaf = sib_cpu_ci->info_list + index; 251 if (cache_leaves_are_shared(this_leaf, sib_leaf)) { 252 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); 253 cpumask_set_cpu(i, &this_leaf->shared_cpu_map); 254 } 255 } 256 } 257 258 return 0; 259 } 260 261 static void cache_shared_cpu_map_remove(unsigned int cpu) 262 { 263 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 264 struct cacheinfo *this_leaf, *sib_leaf; 265 unsigned int sibling, index; 266 267 for (index = 0; index < cache_leaves(cpu); index++) { 268 this_leaf = this_cpu_ci->info_list + index; 269 for_each_cpu(sibling, &this_leaf->shared_cpu_map) { 270 struct cpu_cacheinfo *sib_cpu_ci; 271 272 if (sibling == cpu) /* skip itself */ 273 continue; 274 275 sib_cpu_ci = get_cpu_cacheinfo(sibling); 276 if (!sib_cpu_ci->info_list) 277 continue; 278 279 sib_leaf = sib_cpu_ci->info_list + index; 280 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); 281 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); 282 } 283 if (of_have_populated_dt()) 284 of_node_put(this_leaf->fw_token); 285 } 286 } 287 288 static void free_cache_attributes(unsigned int cpu) 289 { 290 if (!per_cpu_cacheinfo(cpu)) 291 return; 292 293 cache_shared_cpu_map_remove(cpu); 294 295 kfree(per_cpu_cacheinfo(cpu)); 296 per_cpu_cacheinfo(cpu) = NULL; 297 } 298 299 int __weak init_cache_level(unsigned int cpu) 300 { 301 return -ENOENT; 302 } 303 304 int __weak populate_cache_leaves(unsigned int cpu) 305 { 306 return -ENOENT; 307 } 308 309 static int detect_cache_attributes(unsigned int cpu) 310 { 311 int ret; 312 313 if (init_cache_level(cpu) || !cache_leaves(cpu)) 314 return -ENOENT; 315 316 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), 317 sizeof(struct cacheinfo), GFP_KERNEL); 318 if (per_cpu_cacheinfo(cpu) == NULL) 319 return -ENOMEM; 320 321 /* 322 * populate_cache_leaves() may completely setup the cache leaves and 323 * shared_cpu_map or it may leave it partially setup. 324 */ 325 ret = populate_cache_leaves(cpu); 326 if (ret) 327 goto free_ci; 328 /* 329 * For systems using DT for cache hierarchy, fw_token 330 * and shared_cpu_map will be set up here only if they are 331 * not populated already 332 */ 333 ret = cache_shared_cpu_map_setup(cpu); 334 if (ret) { 335 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu); 336 goto free_ci; 337 } 338 339 return 0; 340 341 free_ci: 342 free_cache_attributes(cpu); 343 return ret; 344 } 345 346 /* pointer to cpuX/cache device */ 347 static DEFINE_PER_CPU(struct device *, ci_cache_dev); 348 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) 349 350 static cpumask_t cache_dev_map; 351 352 /* pointer to array of devices for cpuX/cache/indexY */ 353 static DEFINE_PER_CPU(struct device **, ci_index_dev); 354 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu)) 355 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx]) 356 357 #define show_one(file_name, object) \ 358 static ssize_t file_name##_show(struct device *dev, \ 359 struct device_attribute *attr, char *buf) \ 360 { \ 361 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ 362 return sprintf(buf, "%u\n", this_leaf->object); \ 363 } 364 365 show_one(id, id); 366 show_one(level, level); 367 show_one(coherency_line_size, coherency_line_size); 368 show_one(number_of_sets, number_of_sets); 369 show_one(physical_line_partition, physical_line_partition); 370 show_one(ways_of_associativity, ways_of_associativity); 371 372 static ssize_t size_show(struct device *dev, 373 struct device_attribute *attr, char *buf) 374 { 375 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 376 377 return sprintf(buf, "%uK\n", this_leaf->size >> 10); 378 } 379 380 static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf) 381 { 382 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 383 const struct cpumask *mask = &this_leaf->shared_cpu_map; 384 385 return cpumap_print_to_pagebuf(list, buf, mask); 386 } 387 388 static ssize_t shared_cpu_map_show(struct device *dev, 389 struct device_attribute *attr, char *buf) 390 { 391 return shared_cpumap_show_func(dev, false, buf); 392 } 393 394 static ssize_t shared_cpu_list_show(struct device *dev, 395 struct device_attribute *attr, char *buf) 396 { 397 return shared_cpumap_show_func(dev, true, buf); 398 } 399 400 static ssize_t type_show(struct device *dev, 401 struct device_attribute *attr, char *buf) 402 { 403 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 404 405 switch (this_leaf->type) { 406 case CACHE_TYPE_DATA: 407 return sprintf(buf, "Data\n"); 408 case CACHE_TYPE_INST: 409 return sprintf(buf, "Instruction\n"); 410 case CACHE_TYPE_UNIFIED: 411 return sprintf(buf, "Unified\n"); 412 default: 413 return -EINVAL; 414 } 415 } 416 417 static ssize_t allocation_policy_show(struct device *dev, 418 struct device_attribute *attr, char *buf) 419 { 420 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 421 unsigned int ci_attr = this_leaf->attributes; 422 int n = 0; 423 424 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE)) 425 n = sprintf(buf, "ReadWriteAllocate\n"); 426 else if (ci_attr & CACHE_READ_ALLOCATE) 427 n = sprintf(buf, "ReadAllocate\n"); 428 else if (ci_attr & CACHE_WRITE_ALLOCATE) 429 n = sprintf(buf, "WriteAllocate\n"); 430 return n; 431 } 432 433 static ssize_t write_policy_show(struct device *dev, 434 struct device_attribute *attr, char *buf) 435 { 436 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 437 unsigned int ci_attr = this_leaf->attributes; 438 int n = 0; 439 440 if (ci_attr & CACHE_WRITE_THROUGH) 441 n = sprintf(buf, "WriteThrough\n"); 442 else if (ci_attr & CACHE_WRITE_BACK) 443 n = sprintf(buf, "WriteBack\n"); 444 return n; 445 } 446 447 static DEVICE_ATTR_RO(id); 448 static DEVICE_ATTR_RO(level); 449 static DEVICE_ATTR_RO(type); 450 static DEVICE_ATTR_RO(coherency_line_size); 451 static DEVICE_ATTR_RO(ways_of_associativity); 452 static DEVICE_ATTR_RO(number_of_sets); 453 static DEVICE_ATTR_RO(size); 454 static DEVICE_ATTR_RO(allocation_policy); 455 static DEVICE_ATTR_RO(write_policy); 456 static DEVICE_ATTR_RO(shared_cpu_map); 457 static DEVICE_ATTR_RO(shared_cpu_list); 458 static DEVICE_ATTR_RO(physical_line_partition); 459 460 static struct attribute *cache_default_attrs[] = { 461 &dev_attr_id.attr, 462 &dev_attr_type.attr, 463 &dev_attr_level.attr, 464 &dev_attr_shared_cpu_map.attr, 465 &dev_attr_shared_cpu_list.attr, 466 &dev_attr_coherency_line_size.attr, 467 &dev_attr_ways_of_associativity.attr, 468 &dev_attr_number_of_sets.attr, 469 &dev_attr_size.attr, 470 &dev_attr_allocation_policy.attr, 471 &dev_attr_write_policy.attr, 472 &dev_attr_physical_line_partition.attr, 473 NULL 474 }; 475 476 static umode_t 477 cache_default_attrs_is_visible(struct kobject *kobj, 478 struct attribute *attr, int unused) 479 { 480 struct device *dev = kobj_to_dev(kobj); 481 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 482 const struct cpumask *mask = &this_leaf->shared_cpu_map; 483 umode_t mode = attr->mode; 484 485 if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID)) 486 return mode; 487 if ((attr == &dev_attr_type.attr) && this_leaf->type) 488 return mode; 489 if ((attr == &dev_attr_level.attr) && this_leaf->level) 490 return mode; 491 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask)) 492 return mode; 493 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask)) 494 return mode; 495 if ((attr == &dev_attr_coherency_line_size.attr) && 496 this_leaf->coherency_line_size) 497 return mode; 498 if ((attr == &dev_attr_ways_of_associativity.attr) && 499 this_leaf->size) /* allow 0 = full associativity */ 500 return mode; 501 if ((attr == &dev_attr_number_of_sets.attr) && 502 this_leaf->number_of_sets) 503 return mode; 504 if ((attr == &dev_attr_size.attr) && this_leaf->size) 505 return mode; 506 if ((attr == &dev_attr_write_policy.attr) && 507 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK)) 508 return mode; 509 if ((attr == &dev_attr_allocation_policy.attr) && 510 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK)) 511 return mode; 512 if ((attr == &dev_attr_physical_line_partition.attr) && 513 this_leaf->physical_line_partition) 514 return mode; 515 516 return 0; 517 } 518 519 static const struct attribute_group cache_default_group = { 520 .attrs = cache_default_attrs, 521 .is_visible = cache_default_attrs_is_visible, 522 }; 523 524 static const struct attribute_group *cache_default_groups[] = { 525 &cache_default_group, 526 NULL, 527 }; 528 529 static const struct attribute_group *cache_private_groups[] = { 530 &cache_default_group, 531 NULL, /* Place holder for private group */ 532 NULL, 533 }; 534 535 const struct attribute_group * 536 __weak cache_get_priv_group(struct cacheinfo *this_leaf) 537 { 538 return NULL; 539 } 540 541 static const struct attribute_group ** 542 cache_get_attribute_groups(struct cacheinfo *this_leaf) 543 { 544 const struct attribute_group *priv_group = 545 cache_get_priv_group(this_leaf); 546 547 if (!priv_group) 548 return cache_default_groups; 549 550 if (!cache_private_groups[1]) 551 cache_private_groups[1] = priv_group; 552 553 return cache_private_groups; 554 } 555 556 /* Add/Remove cache interface for CPU device */ 557 static void cpu_cache_sysfs_exit(unsigned int cpu) 558 { 559 int i; 560 struct device *ci_dev; 561 562 if (per_cpu_index_dev(cpu)) { 563 for (i = 0; i < cache_leaves(cpu); i++) { 564 ci_dev = per_cache_index_dev(cpu, i); 565 if (!ci_dev) 566 continue; 567 device_unregister(ci_dev); 568 } 569 kfree(per_cpu_index_dev(cpu)); 570 per_cpu_index_dev(cpu) = NULL; 571 } 572 device_unregister(per_cpu_cache_dev(cpu)); 573 per_cpu_cache_dev(cpu) = NULL; 574 } 575 576 static int cpu_cache_sysfs_init(unsigned int cpu) 577 { 578 struct device *dev = get_cpu_device(cpu); 579 580 if (per_cpu_cacheinfo(cpu) == NULL) 581 return -ENOENT; 582 583 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache"); 584 if (IS_ERR(per_cpu_cache_dev(cpu))) 585 return PTR_ERR(per_cpu_cache_dev(cpu)); 586 587 /* Allocate all required memory */ 588 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), 589 sizeof(struct device *), GFP_KERNEL); 590 if (unlikely(per_cpu_index_dev(cpu) == NULL)) 591 goto err_out; 592 593 return 0; 594 595 err_out: 596 cpu_cache_sysfs_exit(cpu); 597 return -ENOMEM; 598 } 599 600 static int cache_add_dev(unsigned int cpu) 601 { 602 unsigned int i; 603 int rc; 604 struct device *ci_dev, *parent; 605 struct cacheinfo *this_leaf; 606 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 607 const struct attribute_group **cache_groups; 608 609 rc = cpu_cache_sysfs_init(cpu); 610 if (unlikely(rc < 0)) 611 return rc; 612 613 parent = per_cpu_cache_dev(cpu); 614 for (i = 0; i < cache_leaves(cpu); i++) { 615 this_leaf = this_cpu_ci->info_list + i; 616 if (this_leaf->disable_sysfs) 617 continue; 618 if (this_leaf->type == CACHE_TYPE_NOCACHE) 619 break; 620 cache_groups = cache_get_attribute_groups(this_leaf); 621 ci_dev = cpu_device_create(parent, this_leaf, cache_groups, 622 "index%1u", i); 623 if (IS_ERR(ci_dev)) { 624 rc = PTR_ERR(ci_dev); 625 goto err; 626 } 627 per_cache_index_dev(cpu, i) = ci_dev; 628 } 629 cpumask_set_cpu(cpu, &cache_dev_map); 630 631 return 0; 632 err: 633 cpu_cache_sysfs_exit(cpu); 634 return rc; 635 } 636 637 static int cacheinfo_cpu_online(unsigned int cpu) 638 { 639 int rc = detect_cache_attributes(cpu); 640 641 if (rc) 642 return rc; 643 rc = cache_add_dev(cpu); 644 if (rc) 645 free_cache_attributes(cpu); 646 return rc; 647 } 648 649 static int cacheinfo_cpu_pre_down(unsigned int cpu) 650 { 651 if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map)) 652 cpu_cache_sysfs_exit(cpu); 653 654 free_cache_attributes(cpu); 655 return 0; 656 } 657 658 static int __init cacheinfo_sysfs_init(void) 659 { 660 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online", 661 cacheinfo_cpu_online, cacheinfo_cpu_pre_down); 662 } 663 device_initcall(cacheinfo_sysfs_init); 664