1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2011 4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 5 */ 6 7 #define KMSG_COMPONENT "cpu" 8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 10 #include <linux/workqueue.h> 11 #include <linux/memblock.h> 12 #include <linux/uaccess.h> 13 #include <linux/sysctl.h> 14 #include <linux/cpuset.h> 15 #include <linux/device.h> 16 #include <linux/export.h> 17 #include <linux/kernel.h> 18 #include <linux/sched.h> 19 #include <linux/sched/topology.h> 20 #include <linux/delay.h> 21 #include <linux/init.h> 22 #include <linux/slab.h> 23 #include <linux/cpu.h> 24 #include <linux/smp.h> 25 #include <linux/mm.h> 26 #include <linux/nodemask.h> 27 #include <linux/node.h> 28 #include <asm/sysinfo.h> 29 #include <asm/numa.h> 30 31 #define PTF_HORIZONTAL (0UL) 32 #define PTF_VERTICAL (1UL) 33 #define PTF_CHECK (2UL) 34 35 enum { 36 TOPOLOGY_MODE_HW, 37 TOPOLOGY_MODE_SINGLE, 38 TOPOLOGY_MODE_PACKAGE, 39 TOPOLOGY_MODE_UNINITIALIZED 40 }; 41 42 struct mask_info { 43 struct mask_info *next; 44 unsigned char id; 45 cpumask_t mask; 46 }; 47 48 static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED; 49 static void set_topology_timer(void); 50 static void topology_work_fn(struct work_struct *work); 51 static struct sysinfo_15_1_x *tl_info; 52 53 static DECLARE_WORK(topology_work, topology_work_fn); 54 55 /* 56 * Socket/Book linked lists and cpu_topology updates are 57 * protected by "sched_domains_mutex". 58 */ 59 static struct mask_info socket_info; 60 static struct mask_info book_info; 61 static struct mask_info drawer_info; 62 63 struct cpu_topology_s390 cpu_topology[NR_CPUS]; 64 EXPORT_SYMBOL_GPL(cpu_topology); 65 66 cpumask_t cpus_with_topology; 67 68 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 69 { 70 cpumask_t mask; 71 72 cpumask_copy(&mask, cpumask_of(cpu)); 73 switch (topology_mode) { 74 case TOPOLOGY_MODE_HW: 75 while (info) { 76 if (cpumask_test_cpu(cpu, &info->mask)) { 77 mask = info->mask; 78 break; 79 } 80 info = info->next; 81 } 82 if (cpumask_empty(&mask)) 83 cpumask_copy(&mask, cpumask_of(cpu)); 84 break; 85 case TOPOLOGY_MODE_PACKAGE: 86 cpumask_copy(&mask, cpu_present_mask); 87 break; 88 default: 89 /* fallthrough */ 90 case TOPOLOGY_MODE_SINGLE: 91 cpumask_copy(&mask, cpumask_of(cpu)); 92 break; 93 } 94 return mask; 95 } 96 97 static cpumask_t cpu_thread_map(unsigned int cpu) 98 { 99 cpumask_t mask; 100 int i; 101 102 cpumask_copy(&mask, cpumask_of(cpu)); 103 if (topology_mode != TOPOLOGY_MODE_HW) 104 return mask; 105 cpu -= cpu % (smp_cpu_mtid + 1); 106 for (i = 0; i <= smp_cpu_mtid; i++) 107 if (cpu_present(cpu + i)) 108 cpumask_set_cpu(cpu + i, &mask); 109 return mask; 110 } 111 112 #define TOPOLOGY_CORE_BITS 64 113 114 static void add_cpus_to_mask(struct topology_core *tl_core, 115 struct mask_info *drawer, 116 struct mask_info *book, 117 struct mask_info *socket) 118 { 119 struct cpu_topology_s390 *topo; 120 unsigned int core; 121 122 for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) { 123 unsigned int rcore; 124 int lcpu, i; 125 126 rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin; 127 lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); 128 if (lcpu < 0) 129 continue; 130 for (i = 0; i <= smp_cpu_mtid; i++) { 131 topo = &cpu_topology[lcpu + i]; 132 topo->drawer_id = drawer->id; 133 topo->book_id = book->id; 134 topo->socket_id = socket->id; 135 topo->core_id = rcore; 136 topo->thread_id = lcpu + i; 137 topo->dedicated = tl_core->d; 138 cpumask_set_cpu(lcpu + i, &drawer->mask); 139 cpumask_set_cpu(lcpu + i, &book->mask); 140 cpumask_set_cpu(lcpu + i, &socket->mask); 141 cpumask_set_cpu(lcpu + i, &cpus_with_topology); 142 smp_cpu_set_polarization(lcpu + i, tl_core->pp); 143 } 144 } 145 } 146 147 static void clear_masks(void) 148 { 149 struct mask_info *info; 150 151 info = &socket_info; 152 while (info) { 153 cpumask_clear(&info->mask); 154 info = info->next; 155 } 156 info = &book_info; 157 while (info) { 158 cpumask_clear(&info->mask); 159 info = info->next; 160 } 161 info = &drawer_info; 162 while (info) { 163 cpumask_clear(&info->mask); 164 info = info->next; 165 } 166 } 167 168 static union topology_entry *next_tle(union topology_entry *tle) 169 { 170 if (!tle->nl) 171 return (union topology_entry *)((struct topology_core *)tle + 1); 172 return (union topology_entry *)((struct topology_container *)tle + 1); 173 } 174 175 static void tl_to_masks(struct sysinfo_15_1_x *info) 176 { 177 struct mask_info *socket = &socket_info; 178 struct mask_info *book = &book_info; 179 struct mask_info *drawer = &drawer_info; 180 union topology_entry *tle, *end; 181 182 clear_masks(); 183 tle = info->tle; 184 end = (union topology_entry *)((unsigned long)info + info->length); 185 while (tle < end) { 186 switch (tle->nl) { 187 case 3: 188 drawer = drawer->next; 189 drawer->id = tle->container.id; 190 break; 191 case 2: 192 book = book->next; 193 book->id = tle->container.id; 194 break; 195 case 1: 196 socket = socket->next; 197 socket->id = tle->container.id; 198 break; 199 case 0: 200 add_cpus_to_mask(&tle->cpu, drawer, book, socket); 201 break; 202 default: 203 clear_masks(); 204 return; 205 } 206 tle = next_tle(tle); 207 } 208 } 209 210 static void topology_update_polarization_simple(void) 211 { 212 int cpu; 213 214 for_each_possible_cpu(cpu) 215 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ); 216 } 217 218 static int ptf(unsigned long fc) 219 { 220 int rc; 221 222 asm volatile( 223 " .insn rre,0xb9a20000,%1,%1\n" 224 " ipm %0\n" 225 " srl %0,28\n" 226 : "=d" (rc) 227 : "d" (fc) : "cc"); 228 return rc; 229 } 230 231 int topology_set_cpu_management(int fc) 232 { 233 int cpu, rc; 234 235 if (!MACHINE_HAS_TOPOLOGY) 236 return -EOPNOTSUPP; 237 if (fc) 238 rc = ptf(PTF_VERTICAL); 239 else 240 rc = ptf(PTF_HORIZONTAL); 241 if (rc) 242 return -EBUSY; 243 for_each_possible_cpu(cpu) 244 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 245 return rc; 246 } 247 248 static void update_cpu_masks(void) 249 { 250 struct cpu_topology_s390 *topo; 251 int cpu, id; 252 253 for_each_possible_cpu(cpu) { 254 topo = &cpu_topology[cpu]; 255 topo->thread_mask = cpu_thread_map(cpu); 256 topo->core_mask = cpu_group_map(&socket_info, cpu); 257 topo->book_mask = cpu_group_map(&book_info, cpu); 258 topo->drawer_mask = cpu_group_map(&drawer_info, cpu); 259 if (topology_mode != TOPOLOGY_MODE_HW) { 260 id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu; 261 topo->thread_id = cpu; 262 topo->core_id = cpu; 263 topo->socket_id = id; 264 topo->book_id = id; 265 topo->drawer_id = id; 266 if (cpu_present(cpu)) 267 cpumask_set_cpu(cpu, &cpus_with_topology); 268 } 269 } 270 numa_update_cpu_topology(); 271 } 272 273 void store_topology(struct sysinfo_15_1_x *info) 274 { 275 stsi(info, 15, 1, topology_mnest_limit()); 276 } 277 278 static void __arch_update_dedicated_flag(void *arg) 279 { 280 if (topology_cpu_dedicated(smp_processor_id())) 281 set_cpu_flag(CIF_DEDICATED_CPU); 282 else 283 clear_cpu_flag(CIF_DEDICATED_CPU); 284 } 285 286 static int __arch_update_cpu_topology(void) 287 { 288 struct sysinfo_15_1_x *info = tl_info; 289 int rc = 0; 290 291 mutex_lock(&smp_cpu_state_mutex); 292 cpumask_clear(&cpus_with_topology); 293 if (MACHINE_HAS_TOPOLOGY) { 294 rc = 1; 295 store_topology(info); 296 tl_to_masks(info); 297 } 298 update_cpu_masks(); 299 if (!MACHINE_HAS_TOPOLOGY) 300 topology_update_polarization_simple(); 301 mutex_unlock(&smp_cpu_state_mutex); 302 return rc; 303 } 304 305 int arch_update_cpu_topology(void) 306 { 307 struct device *dev; 308 int cpu, rc; 309 310 rc = __arch_update_cpu_topology(); 311 on_each_cpu(__arch_update_dedicated_flag, NULL, 0); 312 for_each_online_cpu(cpu) { 313 dev = get_cpu_device(cpu); 314 kobject_uevent(&dev->kobj, KOBJ_CHANGE); 315 } 316 return rc; 317 } 318 319 static void topology_work_fn(struct work_struct *work) 320 { 321 rebuild_sched_domains(); 322 } 323 324 void topology_schedule_update(void) 325 { 326 schedule_work(&topology_work); 327 } 328 329 static void topology_flush_work(void) 330 { 331 flush_work(&topology_work); 332 } 333 334 static void topology_timer_fn(struct timer_list *unused) 335 { 336 if (ptf(PTF_CHECK)) 337 topology_schedule_update(); 338 set_topology_timer(); 339 } 340 341 static struct timer_list topology_timer; 342 343 static atomic_t topology_poll = ATOMIC_INIT(0); 344 345 static void set_topology_timer(void) 346 { 347 if (atomic_add_unless(&topology_poll, -1, 0)) 348 mod_timer(&topology_timer, jiffies + HZ / 10); 349 else 350 mod_timer(&topology_timer, jiffies + HZ * 60); 351 } 352 353 void topology_expect_change(void) 354 { 355 if (!MACHINE_HAS_TOPOLOGY) 356 return; 357 /* This is racy, but it doesn't matter since it is just a heuristic. 358 * Worst case is that we poll in a higher frequency for a bit longer. 359 */ 360 if (atomic_read(&topology_poll) > 60) 361 return; 362 atomic_add(60, &topology_poll); 363 set_topology_timer(); 364 } 365 366 static int cpu_management; 367 368 static ssize_t dispatching_show(struct device *dev, 369 struct device_attribute *attr, 370 char *buf) 371 { 372 ssize_t count; 373 374 mutex_lock(&smp_cpu_state_mutex); 375 count = sprintf(buf, "%d\n", cpu_management); 376 mutex_unlock(&smp_cpu_state_mutex); 377 return count; 378 } 379 380 static ssize_t dispatching_store(struct device *dev, 381 struct device_attribute *attr, 382 const char *buf, 383 size_t count) 384 { 385 int val, rc; 386 char delim; 387 388 if (sscanf(buf, "%d %c", &val, &delim) != 1) 389 return -EINVAL; 390 if (val != 0 && val != 1) 391 return -EINVAL; 392 rc = 0; 393 get_online_cpus(); 394 mutex_lock(&smp_cpu_state_mutex); 395 if (cpu_management == val) 396 goto out; 397 rc = topology_set_cpu_management(val); 398 if (rc) 399 goto out; 400 cpu_management = val; 401 topology_expect_change(); 402 out: 403 mutex_unlock(&smp_cpu_state_mutex); 404 put_online_cpus(); 405 return rc ? rc : count; 406 } 407 static DEVICE_ATTR_RW(dispatching); 408 409 static ssize_t cpu_polarization_show(struct device *dev, 410 struct device_attribute *attr, char *buf) 411 { 412 int cpu = dev->id; 413 ssize_t count; 414 415 mutex_lock(&smp_cpu_state_mutex); 416 switch (smp_cpu_get_polarization(cpu)) { 417 case POLARIZATION_HRZ: 418 count = sprintf(buf, "horizontal\n"); 419 break; 420 case POLARIZATION_VL: 421 count = sprintf(buf, "vertical:low\n"); 422 break; 423 case POLARIZATION_VM: 424 count = sprintf(buf, "vertical:medium\n"); 425 break; 426 case POLARIZATION_VH: 427 count = sprintf(buf, "vertical:high\n"); 428 break; 429 default: 430 count = sprintf(buf, "unknown\n"); 431 break; 432 } 433 mutex_unlock(&smp_cpu_state_mutex); 434 return count; 435 } 436 static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL); 437 438 static struct attribute *topology_cpu_attrs[] = { 439 &dev_attr_polarization.attr, 440 NULL, 441 }; 442 443 static struct attribute_group topology_cpu_attr_group = { 444 .attrs = topology_cpu_attrs, 445 }; 446 447 static ssize_t cpu_dedicated_show(struct device *dev, 448 struct device_attribute *attr, char *buf) 449 { 450 int cpu = dev->id; 451 ssize_t count; 452 453 mutex_lock(&smp_cpu_state_mutex); 454 count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu)); 455 mutex_unlock(&smp_cpu_state_mutex); 456 return count; 457 } 458 static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL); 459 460 static struct attribute *topology_extra_cpu_attrs[] = { 461 &dev_attr_dedicated.attr, 462 NULL, 463 }; 464 465 static struct attribute_group topology_extra_cpu_attr_group = { 466 .attrs = topology_extra_cpu_attrs, 467 }; 468 469 int topology_cpu_init(struct cpu *cpu) 470 { 471 int rc; 472 473 rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); 474 if (rc || !MACHINE_HAS_TOPOLOGY) 475 return rc; 476 rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group); 477 if (rc) 478 sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group); 479 return rc; 480 } 481 482 static const struct cpumask *cpu_thread_mask(int cpu) 483 { 484 return &cpu_topology[cpu].thread_mask; 485 } 486 487 488 const struct cpumask *cpu_coregroup_mask(int cpu) 489 { 490 return &cpu_topology[cpu].core_mask; 491 } 492 493 static const struct cpumask *cpu_book_mask(int cpu) 494 { 495 return &cpu_topology[cpu].book_mask; 496 } 497 498 static const struct cpumask *cpu_drawer_mask(int cpu) 499 { 500 return &cpu_topology[cpu].drawer_mask; 501 } 502 503 static struct sched_domain_topology_level s390_topology[] = { 504 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 505 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 506 { cpu_book_mask, SD_INIT_NAME(BOOK) }, 507 { cpu_drawer_mask, SD_INIT_NAME(DRAWER) }, 508 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 509 { NULL, }, 510 }; 511 512 static void __init alloc_masks(struct sysinfo_15_1_x *info, 513 struct mask_info *mask, int offset) 514 { 515 int i, nr_masks; 516 517 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset]; 518 for (i = 0; i < info->mnest - offset; i++) 519 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; 520 nr_masks = max(nr_masks, 1); 521 for (i = 0; i < nr_masks; i++) { 522 mask->next = memblock_alloc(sizeof(*mask->next), 8); 523 mask = mask->next; 524 } 525 } 526 527 void __init topology_init_early(void) 528 { 529 struct sysinfo_15_1_x *info; 530 531 set_sched_topology(s390_topology); 532 if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) { 533 if (MACHINE_HAS_TOPOLOGY) 534 topology_mode = TOPOLOGY_MODE_HW; 535 else 536 topology_mode = TOPOLOGY_MODE_SINGLE; 537 } 538 if (!MACHINE_HAS_TOPOLOGY) 539 goto out; 540 tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 541 info = tl_info; 542 store_topology(info); 543 pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n", 544 info->mag[0], info->mag[1], info->mag[2], info->mag[3], 545 info->mag[4], info->mag[5], info->mnest); 546 alloc_masks(info, &socket_info, 1); 547 alloc_masks(info, &book_info, 2); 548 alloc_masks(info, &drawer_info, 3); 549 out: 550 __arch_update_cpu_topology(); 551 __arch_update_dedicated_flag(NULL); 552 } 553 554 static inline int topology_get_mode(int enabled) 555 { 556 if (!enabled) 557 return TOPOLOGY_MODE_SINGLE; 558 return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE; 559 } 560 561 static inline int topology_is_enabled(void) 562 { 563 return topology_mode != TOPOLOGY_MODE_SINGLE; 564 } 565 566 static int __init topology_setup(char *str) 567 { 568 bool enabled; 569 int rc; 570 571 rc = kstrtobool(str, &enabled); 572 if (rc) 573 return rc; 574 topology_mode = topology_get_mode(enabled); 575 return 0; 576 } 577 early_param("topology", topology_setup); 578 579 static int topology_ctl_handler(struct ctl_table *ctl, int write, 580 void __user *buffer, size_t *lenp, loff_t *ppos) 581 { 582 int enabled = topology_is_enabled(); 583 int new_mode; 584 int zero = 0; 585 int one = 1; 586 int rc; 587 struct ctl_table ctl_entry = { 588 .procname = ctl->procname, 589 .data = &enabled, 590 .maxlen = sizeof(int), 591 .extra1 = &zero, 592 .extra2 = &one, 593 }; 594 595 rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos); 596 if (rc < 0 || !write) 597 return rc; 598 599 mutex_lock(&smp_cpu_state_mutex); 600 new_mode = topology_get_mode(enabled); 601 if (topology_mode != new_mode) { 602 topology_mode = new_mode; 603 topology_schedule_update(); 604 } 605 mutex_unlock(&smp_cpu_state_mutex); 606 topology_flush_work(); 607 608 return rc; 609 } 610 611 static struct ctl_table topology_ctl_table[] = { 612 { 613 .procname = "topology", 614 .mode = 0644, 615 .proc_handler = topology_ctl_handler, 616 }, 617 { }, 618 }; 619 620 static struct ctl_table topology_dir_table[] = { 621 { 622 .procname = "s390", 623 .maxlen = 0, 624 .mode = 0555, 625 .child = topology_ctl_table, 626 }, 627 { }, 628 }; 629 630 static int __init topology_init(void) 631 { 632 timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE); 633 if (MACHINE_HAS_TOPOLOGY) 634 set_topology_timer(); 635 else 636 topology_update_polarization_simple(); 637 register_sysctl_table(topology_dir_table); 638 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); 639 } 640 device_initcall(topology_init); 641