1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * cacheinfo support - processor cache information via sysfs 4 * 5 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c 6 * Author: Sudeep Holla <sudeep.holla@arm.com> 7 */ 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/acpi.h> 11 #include <linux/bitops.h> 12 #include <linux/cacheinfo.h> 13 #include <linux/compiler.h> 14 #include <linux/cpu.h> 15 #include <linux/device.h> 16 #include <linux/init.h> 17 #include <linux/of.h> 18 #include <linux/sched.h> 19 #include <linux/slab.h> 20 #include <linux/smp.h> 21 #include <linux/sysfs.h> 22 23 /* pointer to per cpu cacheinfo */ 24 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); 25 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) 26 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) 27 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) 28 29 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) 30 { 31 return ci_cacheinfo(cpu); 32 } 33 34 #ifdef CONFIG_OF 35 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, 36 struct cacheinfo *sib_leaf) 37 { 38 return sib_leaf->fw_token == this_leaf->fw_token; 39 } 40 41 /* OF properties to query for a given cache type */ 42 struct cache_type_info { 43 const char *size_prop; 44 const char *line_size_props[2]; 45 const char *nr_sets_prop; 46 }; 47 48 static const struct cache_type_info cache_type_info[] = { 49 { 50 .size_prop = "cache-size", 51 .line_size_props = { "cache-line-size", 52 "cache-block-size", }, 53 .nr_sets_prop = "cache-sets", 54 }, { 55 .size_prop = "i-cache-size", 56 .line_size_props = { "i-cache-line-size", 57 "i-cache-block-size", }, 58 .nr_sets_prop = "i-cache-sets", 59 }, { 60 .size_prop = "d-cache-size", 61 .line_size_props = { "d-cache-line-size", 62 "d-cache-block-size", }, 63 .nr_sets_prop = "d-cache-sets", 64 }, 65 }; 66 67 static inline int get_cacheinfo_idx(enum cache_type type) 68 { 69 if (type == CACHE_TYPE_UNIFIED) 70 return 0; 71 return type; 72 } 73 74 static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) 75 { 76 const char *propname; 77 int ct_idx; 78 79 ct_idx = get_cacheinfo_idx(this_leaf->type); 80 propname = cache_type_info[ct_idx].size_prop; 81 82 of_property_read_u32(np, propname, &this_leaf->size); 83 } 84 85 /* not cache_line_size() because that's a macro in include/linux/cache.h */ 86 static void cache_get_line_size(struct cacheinfo *this_leaf, 87 struct device_node *np) 88 { 89 int i, lim, ct_idx; 90 91 ct_idx = get_cacheinfo_idx(this_leaf->type); 92 lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props); 93 94 for (i = 0; i < lim; i++) { 95 int ret; 96 u32 line_size; 97 const char *propname; 98 99 propname = cache_type_info[ct_idx].line_size_props[i]; 100 ret = of_property_read_u32(np, propname, &line_size); 101 if (!ret) { 102 this_leaf->coherency_line_size = line_size; 103 break; 104 } 105 } 106 } 107 108 static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) 109 { 110 const char *propname; 111 int ct_idx; 112 113 ct_idx = get_cacheinfo_idx(this_leaf->type); 114 propname = cache_type_info[ct_idx].nr_sets_prop; 115 116 of_property_read_u32(np, propname, &this_leaf->number_of_sets); 117 } 118 119 static void cache_associativity(struct cacheinfo *this_leaf) 120 { 121 unsigned int line_size = this_leaf->coherency_line_size; 122 unsigned int nr_sets = this_leaf->number_of_sets; 123 unsigned int size = this_leaf->size; 124 125 /* 126 * If the cache is fully associative, there is no need to 127 * check the other properties. 128 */ 129 if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0)) 130 this_leaf->ways_of_associativity = (size / nr_sets) / line_size; 131 } 132 133 static bool cache_node_is_unified(struct cacheinfo *this_leaf, 134 struct device_node *np) 135 { 136 return of_property_read_bool(np, "cache-unified"); 137 } 138 139 static void cache_of_set_props(struct cacheinfo *this_leaf, 140 struct device_node *np) 141 { 142 /* 143 * init_cache_level must setup the cache level correctly 144 * overriding the architecturally specified levels, so 145 * if type is NONE at this stage, it should be unified 146 */ 147 if (this_leaf->type == CACHE_TYPE_NOCACHE && 148 cache_node_is_unified(this_leaf, np)) 149 this_leaf->type = CACHE_TYPE_UNIFIED; 150 cache_size(this_leaf, np); 151 cache_get_line_size(this_leaf, np); 152 cache_nr_sets(this_leaf, np); 153 cache_associativity(this_leaf); 154 } 155 156 static int cache_setup_of_node(unsigned int cpu) 157 { 158 struct device_node *np; 159 struct cacheinfo *this_leaf; 160 struct device *cpu_dev = get_cpu_device(cpu); 161 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 162 unsigned int index = 0; 163 164 /* skip if fw_token is already populated */ 165 if (this_cpu_ci->info_list->fw_token) { 166 return 0; 167 } 168 169 if (!cpu_dev) { 170 pr_err("No cpu device for CPU %d\n", cpu); 171 return -ENODEV; 172 } 173 np = cpu_dev->of_node; 174 if (!np) { 175 pr_err("Failed to find cpu%d device node\n", cpu); 176 return -ENOENT; 177 } 178 179 while (index < cache_leaves(cpu)) { 180 this_leaf = this_cpu_ci->info_list + index; 181 if (this_leaf->level != 1) 182 np = of_find_next_cache_node(np); 183 else 184 np = of_node_get(np);/* cpu node itself */ 185 if (!np) 186 break; 187 cache_of_set_props(this_leaf, np); 188 this_leaf->fw_token = np; 189 index++; 190 } 191 192 if (index != cache_leaves(cpu)) /* not all OF nodes populated */ 193 return -ENOENT; 194 195 return 0; 196 } 197 #else 198 static inline int cache_setup_of_node(unsigned int cpu) { return 0; } 199 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, 200 struct cacheinfo *sib_leaf) 201 { 202 /* 203 * For non-DT/ACPI systems, assume unique level 1 caches, system-wide 204 * shared caches for all other levels. This will be used only if 205 * arch specific code has not populated shared_cpu_map 206 */ 207 return !(this_leaf->level == 1); 208 } 209 #endif 210 211 int __weak cache_setup_acpi(unsigned int cpu) 212 { 213 return -ENOTSUPP; 214 } 215 216 unsigned int coherency_max_size; 217 218 static int cache_shared_cpu_map_setup(unsigned int cpu) 219 { 220 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 221 struct cacheinfo *this_leaf, *sib_leaf; 222 unsigned int index; 223 int ret = 0; 224 225 if (this_cpu_ci->cpu_map_populated) 226 return 0; 227 228 if (of_have_populated_dt()) 229 ret = cache_setup_of_node(cpu); 230 else if (!acpi_disabled) 231 ret = cache_setup_acpi(cpu); 232 233 if (ret) 234 return ret; 235 236 for (index = 0; index < cache_leaves(cpu); index++) { 237 unsigned int i; 238 239 this_leaf = this_cpu_ci->info_list + index; 240 /* skip if shared_cpu_map is already populated */ 241 if (!cpumask_empty(&this_leaf->shared_cpu_map)) 242 continue; 243 244 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); 245 for_each_online_cpu(i) { 246 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); 247 248 if (i == cpu || !sib_cpu_ci->info_list) 249 continue;/* skip if itself or no cacheinfo */ 250 sib_leaf = sib_cpu_ci->info_list + index; 251 if (cache_leaves_are_shared(this_leaf, sib_leaf)) { 252 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); 253 cpumask_set_cpu(i, &this_leaf->shared_cpu_map); 254 } 255 } 256 /* record the maximum cache line size */ 257 if (this_leaf->coherency_line_size > coherency_max_size) 258 coherency_max_size = this_leaf->coherency_line_size; 259 } 260 261 return 0; 262 } 263 264 static void cache_shared_cpu_map_remove(unsigned int cpu) 265 { 266 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 267 struct cacheinfo *this_leaf, *sib_leaf; 268 unsigned int sibling, index; 269 270 for (index = 0; index < cache_leaves(cpu); index++) { 271 this_leaf = this_cpu_ci->info_list + index; 272 for_each_cpu(sibling, &this_leaf->shared_cpu_map) { 273 struct cpu_cacheinfo *sib_cpu_ci; 274 275 if (sibling == cpu) /* skip itself */ 276 continue; 277 278 sib_cpu_ci = get_cpu_cacheinfo(sibling); 279 if (!sib_cpu_ci->info_list) 280 continue; 281 282 sib_leaf = sib_cpu_ci->info_list + index; 283 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); 284 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); 285 } 286 if (of_have_populated_dt()) 287 of_node_put(this_leaf->fw_token); 288 } 289 } 290 291 static void free_cache_attributes(unsigned int cpu) 292 { 293 if (!per_cpu_cacheinfo(cpu)) 294 return; 295 296 cache_shared_cpu_map_remove(cpu); 297 298 kfree(per_cpu_cacheinfo(cpu)); 299 per_cpu_cacheinfo(cpu) = NULL; 300 } 301 302 int __weak init_cache_level(unsigned int cpu) 303 { 304 return -ENOENT; 305 } 306 307 int __weak populate_cache_leaves(unsigned int cpu) 308 { 309 return -ENOENT; 310 } 311 312 static int detect_cache_attributes(unsigned int cpu) 313 { 314 int ret; 315 316 if (init_cache_level(cpu) || !cache_leaves(cpu)) 317 return -ENOENT; 318 319 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), 320 sizeof(struct cacheinfo), GFP_KERNEL); 321 if (per_cpu_cacheinfo(cpu) == NULL) 322 return -ENOMEM; 323 324 /* 325 * populate_cache_leaves() may completely setup the cache leaves and 326 * shared_cpu_map or it may leave it partially setup. 327 */ 328 ret = populate_cache_leaves(cpu); 329 if (ret) 330 goto free_ci; 331 /* 332 * For systems using DT for cache hierarchy, fw_token 333 * and shared_cpu_map will be set up here only if they are 334 * not populated already 335 */ 336 ret = cache_shared_cpu_map_setup(cpu); 337 if (ret) { 338 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu); 339 goto free_ci; 340 } 341 342 return 0; 343 344 free_ci: 345 free_cache_attributes(cpu); 346 return ret; 347 } 348 349 /* pointer to cpuX/cache device */ 350 static DEFINE_PER_CPU(struct device *, ci_cache_dev); 351 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) 352 353 static cpumask_t cache_dev_map; 354 355 /* pointer to array of devices for cpuX/cache/indexY */ 356 static DEFINE_PER_CPU(struct device **, ci_index_dev); 357 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu)) 358 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx]) 359 360 #define show_one(file_name, object) \ 361 static ssize_t file_name##_show(struct device *dev, \ 362 struct device_attribute *attr, char *buf) \ 363 { \ 364 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ 365 return sysfs_emit(buf, "%u\n", this_leaf->object); \ 366 } 367 368 show_one(id, id); 369 show_one(level, level); 370 show_one(coherency_line_size, coherency_line_size); 371 show_one(number_of_sets, number_of_sets); 372 show_one(physical_line_partition, physical_line_partition); 373 show_one(ways_of_associativity, ways_of_associativity); 374 375 static ssize_t size_show(struct device *dev, 376 struct device_attribute *attr, char *buf) 377 { 378 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 379 380 return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10); 381 } 382 383 static ssize_t shared_cpu_map_show(struct device *dev, 384 struct device_attribute *attr, char *buf) 385 { 386 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 387 const struct cpumask *mask = &this_leaf->shared_cpu_map; 388 389 return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask); 390 } 391 392 static ssize_t shared_cpu_list_show(struct device *dev, 393 struct device_attribute *attr, char *buf) 394 { 395 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 396 const struct cpumask *mask = &this_leaf->shared_cpu_map; 397 398 return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask); 399 } 400 401 static ssize_t type_show(struct device *dev, 402 struct device_attribute *attr, char *buf) 403 { 404 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 405 const char *output; 406 407 switch (this_leaf->type) { 408 case CACHE_TYPE_DATA: 409 output = "Data"; 410 break; 411 case CACHE_TYPE_INST: 412 output = "Instruction"; 413 break; 414 case CACHE_TYPE_UNIFIED: 415 output = "Unified"; 416 break; 417 default: 418 return -EINVAL; 419 } 420 421 return sysfs_emit(buf, "%s\n", output); 422 } 423 424 static ssize_t allocation_policy_show(struct device *dev, 425 struct device_attribute *attr, char *buf) 426 { 427 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 428 unsigned int ci_attr = this_leaf->attributes; 429 const char *output; 430 431 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE)) 432 output = "ReadWriteAllocate"; 433 else if (ci_attr & CACHE_READ_ALLOCATE) 434 output = "ReadAllocate"; 435 else if (ci_attr & CACHE_WRITE_ALLOCATE) 436 output = "WriteAllocate"; 437 else 438 return 0; 439 440 return sysfs_emit(buf, "%s\n", output); 441 } 442 443 static ssize_t write_policy_show(struct device *dev, 444 struct device_attribute *attr, char *buf) 445 { 446 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 447 unsigned int ci_attr = this_leaf->attributes; 448 int n = 0; 449 450 if (ci_attr & CACHE_WRITE_THROUGH) 451 n = sysfs_emit(buf, "WriteThrough\n"); 452 else if (ci_attr & CACHE_WRITE_BACK) 453 n = sysfs_emit(buf, "WriteBack\n"); 454 return n; 455 } 456 457 static DEVICE_ATTR_RO(id); 458 static DEVICE_ATTR_RO(level); 459 static DEVICE_ATTR_RO(type); 460 static DEVICE_ATTR_RO(coherency_line_size); 461 static DEVICE_ATTR_RO(ways_of_associativity); 462 static DEVICE_ATTR_RO(number_of_sets); 463 static DEVICE_ATTR_RO(size); 464 static DEVICE_ATTR_RO(allocation_policy); 465 static DEVICE_ATTR_RO(write_policy); 466 static DEVICE_ATTR_RO(shared_cpu_map); 467 static DEVICE_ATTR_RO(shared_cpu_list); 468 static DEVICE_ATTR_RO(physical_line_partition); 469 470 static struct attribute *cache_default_attrs[] = { 471 &dev_attr_id.attr, 472 &dev_attr_type.attr, 473 &dev_attr_level.attr, 474 &dev_attr_shared_cpu_map.attr, 475 &dev_attr_shared_cpu_list.attr, 476 &dev_attr_coherency_line_size.attr, 477 &dev_attr_ways_of_associativity.attr, 478 &dev_attr_number_of_sets.attr, 479 &dev_attr_size.attr, 480 &dev_attr_allocation_policy.attr, 481 &dev_attr_write_policy.attr, 482 &dev_attr_physical_line_partition.attr, 483 NULL 484 }; 485 486 static umode_t 487 cache_default_attrs_is_visible(struct kobject *kobj, 488 struct attribute *attr, int unused) 489 { 490 struct device *dev = kobj_to_dev(kobj); 491 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 492 const struct cpumask *mask = &this_leaf->shared_cpu_map; 493 umode_t mode = attr->mode; 494 495 if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID)) 496 return mode; 497 if ((attr == &dev_attr_type.attr) && this_leaf->type) 498 return mode; 499 if ((attr == &dev_attr_level.attr) && this_leaf->level) 500 return mode; 501 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask)) 502 return mode; 503 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask)) 504 return mode; 505 if ((attr == &dev_attr_coherency_line_size.attr) && 506 this_leaf->coherency_line_size) 507 return mode; 508 if ((attr == &dev_attr_ways_of_associativity.attr) && 509 this_leaf->size) /* allow 0 = full associativity */ 510 return mode; 511 if ((attr == &dev_attr_number_of_sets.attr) && 512 this_leaf->number_of_sets) 513 return mode; 514 if ((attr == &dev_attr_size.attr) && this_leaf->size) 515 return mode; 516 if ((attr == &dev_attr_write_policy.attr) && 517 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK)) 518 return mode; 519 if ((attr == &dev_attr_allocation_policy.attr) && 520 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK)) 521 return mode; 522 if ((attr == &dev_attr_physical_line_partition.attr) && 523 this_leaf->physical_line_partition) 524 return mode; 525 526 return 0; 527 } 528 529 static const struct attribute_group cache_default_group = { 530 .attrs = cache_default_attrs, 531 .is_visible = cache_default_attrs_is_visible, 532 }; 533 534 static const struct attribute_group *cache_default_groups[] = { 535 &cache_default_group, 536 NULL, 537 }; 538 539 static const struct attribute_group *cache_private_groups[] = { 540 &cache_default_group, 541 NULL, /* Place holder for private group */ 542 NULL, 543 }; 544 545 const struct attribute_group * 546 __weak cache_get_priv_group(struct cacheinfo *this_leaf) 547 { 548 return NULL; 549 } 550 551 static const struct attribute_group ** 552 cache_get_attribute_groups(struct cacheinfo *this_leaf) 553 { 554 const struct attribute_group *priv_group = 555 cache_get_priv_group(this_leaf); 556 557 if (!priv_group) 558 return cache_default_groups; 559 560 if (!cache_private_groups[1]) 561 cache_private_groups[1] = priv_group; 562 563 return cache_private_groups; 564 } 565 566 /* Add/Remove cache interface for CPU device */ 567 static void cpu_cache_sysfs_exit(unsigned int cpu) 568 { 569 int i; 570 struct device *ci_dev; 571 572 if (per_cpu_index_dev(cpu)) { 573 for (i = 0; i < cache_leaves(cpu); i++) { 574 ci_dev = per_cache_index_dev(cpu, i); 575 if (!ci_dev) 576 continue; 577 device_unregister(ci_dev); 578 } 579 kfree(per_cpu_index_dev(cpu)); 580 per_cpu_index_dev(cpu) = NULL; 581 } 582 device_unregister(per_cpu_cache_dev(cpu)); 583 per_cpu_cache_dev(cpu) = NULL; 584 } 585 586 static int cpu_cache_sysfs_init(unsigned int cpu) 587 { 588 struct device *dev = get_cpu_device(cpu); 589 590 if (per_cpu_cacheinfo(cpu) == NULL) 591 return -ENOENT; 592 593 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache"); 594 if (IS_ERR(per_cpu_cache_dev(cpu))) 595 return PTR_ERR(per_cpu_cache_dev(cpu)); 596 597 /* Allocate all required memory */ 598 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), 599 sizeof(struct device *), GFP_KERNEL); 600 if (unlikely(per_cpu_index_dev(cpu) == NULL)) 601 goto err_out; 602 603 return 0; 604 605 err_out: 606 cpu_cache_sysfs_exit(cpu); 607 return -ENOMEM; 608 } 609 610 static int cache_add_dev(unsigned int cpu) 611 { 612 unsigned int i; 613 int rc; 614 struct device *ci_dev, *parent; 615 struct cacheinfo *this_leaf; 616 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 617 const struct attribute_group **cache_groups; 618 619 rc = cpu_cache_sysfs_init(cpu); 620 if (unlikely(rc < 0)) 621 return rc; 622 623 parent = per_cpu_cache_dev(cpu); 624 for (i = 0; i < cache_leaves(cpu); i++) { 625 this_leaf = this_cpu_ci->info_list + i; 626 if (this_leaf->disable_sysfs) 627 continue; 628 if (this_leaf->type == CACHE_TYPE_NOCACHE) 629 break; 630 cache_groups = cache_get_attribute_groups(this_leaf); 631 ci_dev = cpu_device_create(parent, this_leaf, cache_groups, 632 "index%1u", i); 633 if (IS_ERR(ci_dev)) { 634 rc = PTR_ERR(ci_dev); 635 goto err; 636 } 637 per_cache_index_dev(cpu, i) = ci_dev; 638 } 639 cpumask_set_cpu(cpu, &cache_dev_map); 640 641 return 0; 642 err: 643 cpu_cache_sysfs_exit(cpu); 644 return rc; 645 } 646 647 static int cacheinfo_cpu_online(unsigned int cpu) 648 { 649 int rc = detect_cache_attributes(cpu); 650 651 if (rc) 652 return rc; 653 rc = cache_add_dev(cpu); 654 if (rc) 655 free_cache_attributes(cpu); 656 return rc; 657 } 658 659 static int cacheinfo_cpu_pre_down(unsigned int cpu) 660 { 661 if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map)) 662 cpu_cache_sysfs_exit(cpu); 663 664 free_cache_attributes(cpu); 665 return 0; 666 } 667 668 static int __init cacheinfo_sysfs_init(void) 669 { 670 return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE, 671 "base/cacheinfo:online", 672 cacheinfo_cpu_online, cacheinfo_cpu_pre_down); 673 } 674 device_initcall(cacheinfo_sysfs_init); 675