1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * pseries CPU Hotplug infrastructure. 4 * 5 * Split out from arch/powerpc/platforms/pseries/setup.c 6 * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c 7 * 8 * Peter Bergner, IBM March 2001. 9 * Copyright (C) 2001 IBM. 10 * Dave Engebretsen, Peter Bergner, and 11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 12 * Plus various changes from other IBM teams... 13 * 14 * Copyright (C) 2006 Michael Ellerman, IBM Corporation 15 */ 16 17 #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt 18 19 #include <linux/kernel.h> 20 #include <linux/interrupt.h> 21 #include <linux/delay.h> 22 #include <linux/sched.h> /* for idle_task_exit */ 23 #include <linux/sched/hotplug.h> 24 #include <linux/cpu.h> 25 #include <linux/of.h> 26 #include <linux/slab.h> 27 #include <asm/prom.h> 28 #include <asm/rtas.h> 29 #include <asm/firmware.h> 30 #include <asm/machdep.h> 31 #include <asm/vdso_datapage.h> 32 #include <asm/xics.h> 33 #include <asm/xive.h> 34 #include <asm/plpar_wrappers.h> 35 #include <asm/topology.h> 36 37 #include "pseries.h" 38 39 /* This version can't take the spinlock, because it never returns */ 40 static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; 41 42 /* 43 * Record the CPU ids used on each nodes. 44 * Protected by cpu_add_remove_lock. 45 */ 46 static cpumask_var_t node_recorded_ids_map[MAX_NUMNODES]; 47 48 static void rtas_stop_self(void) 49 { 50 static struct rtas_args args; 51 52 local_irq_disable(); 53 54 BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); 55 56 rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL); 57 58 panic("Alas, I survived.\n"); 59 } 60 61 static void pseries_cpu_offline_self(void) 62 { 63 unsigned int hwcpu = hard_smp_processor_id(); 64 65 local_irq_disable(); 66 idle_task_exit(); 67 if (xive_enabled()) 68 xive_teardown_cpu(); 69 else 70 xics_teardown_cpu(); 71 72 unregister_slb_shadow(hwcpu); 73 rtas_stop_self(); 74 75 /* Should never get here... */ 76 BUG(); 77 for(;;); 78 } 79 80 static int pseries_cpu_disable(void) 81 { 82 int cpu = smp_processor_id(); 83 84 set_cpu_online(cpu, false); 85 vdso_data->processorCount--; 86 87 /*fix boot_cpuid here*/ 88 if (cpu == boot_cpuid) 89 boot_cpuid = cpumask_any(cpu_online_mask); 90 91 /* FIXME: abstract this to not be platform specific later on */ 92 if (xive_enabled()) 93 xive_smp_disable_cpu(); 94 else 95 xics_migrate_irqs_away(); 96 97 cleanup_cpu_mmu_context(); 98 99 return 0; 100 } 101 102 /* 103 * pseries_cpu_die: Wait for the cpu to die. 104 * @cpu: logical processor id of the CPU whose death we're awaiting. 105 * 106 * This function is called from the context of the thread which is performing 107 * the cpu-offline. Here we wait for long enough to allow the cpu in question 108 * to self-destroy so that the cpu-offline thread can send the CPU_DEAD 109 * notifications. 110 * 111 * OTOH, pseries_cpu_offline_self() is called by the @cpu when it wants to 112 * self-destruct. 113 */ 114 static void pseries_cpu_die(unsigned int cpu) 115 { 116 int cpu_status = 1; 117 unsigned int pcpu = get_hard_smp_processor_id(cpu); 118 unsigned long timeout = jiffies + msecs_to_jiffies(120000); 119 120 while (true) { 121 cpu_status = smp_query_cpu_stopped(pcpu); 122 if (cpu_status == QCSS_STOPPED || 123 cpu_status == QCSS_HARDWARE_ERROR) 124 break; 125 126 if (time_after(jiffies, timeout)) { 127 pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n", 128 cpu, pcpu); 129 timeout = jiffies + msecs_to_jiffies(120000); 130 } 131 132 cond_resched(); 133 } 134 135 if (cpu_status == QCSS_HARDWARE_ERROR) { 136 pr_warn("CPU %i (hwid %i) reported error while dying\n", 137 cpu, pcpu); 138 } 139 140 paca_ptrs[cpu]->cpu_start = 0; 141 } 142 143 /** 144 * find_cpu_id_range - found a linear ranger of @nthreads free CPU ids. 145 * @nthreads : the number of threads (cpu ids) 146 * @assigned_node : the node it belongs to or NUMA_NO_NODE if free ids from any 147 * node can be peek. 148 * @cpu_mask: the returned CPU mask. 149 * 150 * Returns 0 on success. 151 */ 152 static int find_cpu_id_range(unsigned int nthreads, int assigned_node, 153 cpumask_var_t *cpu_mask) 154 { 155 cpumask_var_t candidate_mask; 156 unsigned int cpu, node; 157 int rc = -ENOSPC; 158 159 if (!zalloc_cpumask_var(&candidate_mask, GFP_KERNEL)) 160 return -ENOMEM; 161 162 cpumask_clear(*cpu_mask); 163 for (cpu = 0; cpu < nthreads; cpu++) 164 cpumask_set_cpu(cpu, *cpu_mask); 165 166 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask)); 167 168 /* Get a bitmap of unoccupied slots. */ 169 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask); 170 171 if (assigned_node != NUMA_NO_NODE) { 172 /* 173 * Remove free ids previously assigned on the other nodes. We 174 * can walk only online nodes because once a node became online 175 * it is not turned offlined back. 176 */ 177 for_each_online_node(node) { 178 if (node == assigned_node) 179 continue; 180 cpumask_andnot(candidate_mask, candidate_mask, 181 node_recorded_ids_map[node]); 182 } 183 } 184 185 if (cpumask_empty(candidate_mask)) 186 goto out; 187 188 while (!cpumask_empty(*cpu_mask)) { 189 if (cpumask_subset(*cpu_mask, candidate_mask)) 190 /* Found a range where we can insert the new cpu(s) */ 191 break; 192 cpumask_shift_left(*cpu_mask, *cpu_mask, nthreads); 193 } 194 195 if (!cpumask_empty(*cpu_mask)) 196 rc = 0; 197 198 out: 199 free_cpumask_var(candidate_mask); 200 return rc; 201 } 202 203 /* 204 * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle 205 * here is that a cpu device node may represent multiple logical cpus 206 * in the SMT case. We must honor the assumption in other code that 207 * the logical ids for sibling SMT threads x and y are adjacent, such 208 * that x^1 == y and y^1 == x. 209 */ 210 static int pseries_add_processor(struct device_node *np) 211 { 212 int len, nthreads, node, cpu, assigned_node; 213 int rc = 0; 214 cpumask_var_t cpu_mask; 215 const __be32 *intserv; 216 217 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 218 if (!intserv) 219 return 0; 220 221 nthreads = len / sizeof(u32); 222 223 if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 224 return -ENOMEM; 225 226 /* 227 * Fetch from the DT nodes read by dlpar_configure_connector() the NUMA 228 * node id the added CPU belongs to. 229 */ 230 node = of_node_to_nid(np); 231 if (node < 0 || !node_possible(node)) 232 node = first_online_node; 233 234 BUG_ON(node == NUMA_NO_NODE); 235 assigned_node = node; 236 237 cpu_maps_update_begin(); 238 239 rc = find_cpu_id_range(nthreads, node, &cpu_mask); 240 if (rc && nr_node_ids > 1) { 241 /* 242 * Try again, considering the free CPU ids from the other node. 243 */ 244 node = NUMA_NO_NODE; 245 rc = find_cpu_id_range(nthreads, NUMA_NO_NODE, &cpu_mask); 246 } 247 248 if (rc) { 249 pr_err("Cannot add cpu %pOF; this system configuration" 250 " supports %d logical cpus.\n", np, num_possible_cpus()); 251 goto out; 252 } 253 254 for_each_cpu(cpu, cpu_mask) { 255 BUG_ON(cpu_present(cpu)); 256 set_cpu_present(cpu, true); 257 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++)); 258 } 259 260 /* Record the newly used CPU ids for the associate node. */ 261 cpumask_or(node_recorded_ids_map[assigned_node], 262 node_recorded_ids_map[assigned_node], cpu_mask); 263 264 /* 265 * If node is set to NUMA_NO_NODE, CPU ids have be reused from 266 * another node, remove them from its mask. 267 */ 268 if (node == NUMA_NO_NODE) { 269 cpu = cpumask_first(cpu_mask); 270 pr_warn("Reusing free CPU ids %d-%d from another node\n", 271 cpu, cpu + nthreads - 1); 272 for_each_online_node(node) { 273 if (node == assigned_node) 274 continue; 275 cpumask_andnot(node_recorded_ids_map[node], 276 node_recorded_ids_map[node], 277 cpu_mask); 278 } 279 } 280 281 out: 282 cpu_maps_update_done(); 283 free_cpumask_var(cpu_mask); 284 return rc; 285 } 286 287 /* 288 * Update the present map for a cpu node which is going away, and set 289 * the hard id in the paca(s) to -1 to be consistent with boot time 290 * convention for non-present cpus. 291 */ 292 static void pseries_remove_processor(struct device_node *np) 293 { 294 unsigned int cpu; 295 int len, nthreads, i; 296 const __be32 *intserv; 297 u32 thread; 298 299 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 300 if (!intserv) 301 return; 302 303 nthreads = len / sizeof(u32); 304 305 cpu_maps_update_begin(); 306 for (i = 0; i < nthreads; i++) { 307 thread = be32_to_cpu(intserv[i]); 308 for_each_present_cpu(cpu) { 309 if (get_hard_smp_processor_id(cpu) != thread) 310 continue; 311 BUG_ON(cpu_online(cpu)); 312 set_cpu_present(cpu, false); 313 set_hard_smp_processor_id(cpu, -1); 314 update_numa_cpu_lookup_table(cpu, -1); 315 break; 316 } 317 if (cpu >= nr_cpu_ids) 318 printk(KERN_WARNING "Could not find cpu to remove " 319 "with physical id 0x%x\n", thread); 320 } 321 cpu_maps_update_done(); 322 } 323 324 static int dlpar_offline_cpu(struct device_node *dn) 325 { 326 int rc = 0; 327 unsigned int cpu; 328 int len, nthreads, i; 329 const __be32 *intserv; 330 u32 thread; 331 332 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 333 if (!intserv) 334 return -EINVAL; 335 336 nthreads = len / sizeof(u32); 337 338 cpu_maps_update_begin(); 339 for (i = 0; i < nthreads; i++) { 340 thread = be32_to_cpu(intserv[i]); 341 for_each_present_cpu(cpu) { 342 if (get_hard_smp_processor_id(cpu) != thread) 343 continue; 344 345 if (!cpu_online(cpu)) 346 break; 347 348 /* 349 * device_offline() will return -EBUSY (via cpu_down()) if there 350 * is only one CPU left. Check it here to fail earlier and with a 351 * more informative error message, while also retaining the 352 * cpu_add_remove_lock to be sure that no CPUs are being 353 * online/offlined during this check. 354 */ 355 if (num_online_cpus() == 1) { 356 pr_warn("Unable to remove last online CPU %pOFn\n", dn); 357 rc = -EBUSY; 358 goto out_unlock; 359 } 360 361 cpu_maps_update_done(); 362 rc = device_offline(get_cpu_device(cpu)); 363 if (rc) 364 goto out; 365 cpu_maps_update_begin(); 366 break; 367 } 368 if (cpu == num_possible_cpus()) { 369 pr_warn("Could not find cpu to offline with physical id 0x%x\n", 370 thread); 371 } 372 } 373 out_unlock: 374 cpu_maps_update_done(); 375 376 out: 377 return rc; 378 } 379 380 static int dlpar_online_cpu(struct device_node *dn) 381 { 382 int rc = 0; 383 unsigned int cpu; 384 int len, nthreads, i; 385 const __be32 *intserv; 386 u32 thread; 387 388 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 389 if (!intserv) 390 return -EINVAL; 391 392 nthreads = len / sizeof(u32); 393 394 cpu_maps_update_begin(); 395 for (i = 0; i < nthreads; i++) { 396 thread = be32_to_cpu(intserv[i]); 397 for_each_present_cpu(cpu) { 398 if (get_hard_smp_processor_id(cpu) != thread) 399 continue; 400 cpu_maps_update_done(); 401 find_and_update_cpu_nid(cpu); 402 rc = device_online(get_cpu_device(cpu)); 403 if (rc) { 404 dlpar_offline_cpu(dn); 405 goto out; 406 } 407 cpu_maps_update_begin(); 408 409 break; 410 } 411 if (cpu == num_possible_cpus()) 412 printk(KERN_WARNING "Could not find cpu to online " 413 "with physical id 0x%x\n", thread); 414 } 415 cpu_maps_update_done(); 416 417 out: 418 return rc; 419 420 } 421 422 static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index) 423 { 424 struct device_node *child = NULL; 425 u32 my_drc_index; 426 bool found; 427 int rc; 428 429 /* Assume cpu doesn't exist */ 430 found = false; 431 432 for_each_child_of_node(parent, child) { 433 rc = of_property_read_u32(child, "ibm,my-drc-index", 434 &my_drc_index); 435 if (rc) 436 continue; 437 438 if (my_drc_index == drc_index) { 439 of_node_put(child); 440 found = true; 441 break; 442 } 443 } 444 445 return found; 446 } 447 448 static bool drc_info_valid_index(struct device_node *parent, u32 drc_index) 449 { 450 struct property *info; 451 struct of_drc_info drc; 452 const __be32 *value; 453 u32 index; 454 int count, i, j; 455 456 info = of_find_property(parent, "ibm,drc-info", NULL); 457 if (!info) 458 return false; 459 460 value = of_prop_next_u32(info, NULL, &count); 461 462 /* First value of ibm,drc-info is number of drc-info records */ 463 if (value) 464 value++; 465 else 466 return false; 467 468 for (i = 0; i < count; i++) { 469 if (of_read_drc_info_cell(&info, &value, &drc)) 470 return false; 471 472 if (strncmp(drc.drc_type, "CPU", 3)) 473 break; 474 475 if (drc_index > drc.last_drc_index) 476 continue; 477 478 index = drc.drc_index_start; 479 for (j = 0; j < drc.num_sequential_elems; j++) { 480 if (drc_index == index) 481 return true; 482 483 index += drc.sequential_inc; 484 } 485 } 486 487 return false; 488 } 489 490 static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index) 491 { 492 bool found = false; 493 int rc, index; 494 495 if (of_find_property(parent, "ibm,drc-info", NULL)) 496 return drc_info_valid_index(parent, drc_index); 497 498 /* Note that the format of the ibm,drc-indexes array is 499 * the number of entries in the array followed by the array 500 * of drc values so we start looking at index = 1. 501 */ 502 index = 1; 503 while (!found) { 504 u32 drc; 505 506 rc = of_property_read_u32_index(parent, "ibm,drc-indexes", 507 index++, &drc); 508 509 if (rc) 510 break; 511 512 if (drc == drc_index) 513 found = true; 514 } 515 516 return found; 517 } 518 519 static int pseries_cpuhp_attach_nodes(struct device_node *dn) 520 { 521 struct of_changeset cs; 522 int ret; 523 524 /* 525 * This device node is unattached but may have siblings; open-code the 526 * traversal. 527 */ 528 for (of_changeset_init(&cs); dn != NULL; dn = dn->sibling) { 529 ret = of_changeset_attach_node(&cs, dn); 530 if (ret) 531 goto out; 532 } 533 534 ret = of_changeset_apply(&cs); 535 out: 536 of_changeset_destroy(&cs); 537 return ret; 538 } 539 540 static ssize_t dlpar_cpu_add(u32 drc_index) 541 { 542 struct device_node *dn, *parent; 543 int rc, saved_rc; 544 545 pr_debug("Attempting to add CPU, drc index: %x\n", drc_index); 546 547 parent = of_find_node_by_path("/cpus"); 548 if (!parent) { 549 pr_warn("Failed to find CPU root node \"/cpus\"\n"); 550 return -ENODEV; 551 } 552 553 if (dlpar_cpu_exists(parent, drc_index)) { 554 of_node_put(parent); 555 pr_warn("CPU with drc index %x already exists\n", drc_index); 556 return -EINVAL; 557 } 558 559 if (!valid_cpu_drc_index(parent, drc_index)) { 560 of_node_put(parent); 561 pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index); 562 return -EINVAL; 563 } 564 565 rc = dlpar_acquire_drc(drc_index); 566 if (rc) { 567 pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n", 568 rc, drc_index); 569 of_node_put(parent); 570 return -EINVAL; 571 } 572 573 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); 574 if (!dn) { 575 pr_warn("Failed call to configure-connector, drc index: %x\n", 576 drc_index); 577 dlpar_release_drc(drc_index); 578 of_node_put(parent); 579 return -EINVAL; 580 } 581 582 rc = pseries_cpuhp_attach_nodes(dn); 583 584 /* Regardless we are done with parent now */ 585 of_node_put(parent); 586 587 if (rc) { 588 saved_rc = rc; 589 pr_warn("Failed to attach node %pOFn, rc: %d, drc index: %x\n", 590 dn, rc, drc_index); 591 592 rc = dlpar_release_drc(drc_index); 593 if (!rc) 594 dlpar_free_cc_nodes(dn); 595 596 return saved_rc; 597 } 598 599 update_numa_distance(dn); 600 601 rc = dlpar_online_cpu(dn); 602 if (rc) { 603 saved_rc = rc; 604 pr_warn("Failed to online cpu %pOFn, rc: %d, drc index: %x\n", 605 dn, rc, drc_index); 606 607 rc = dlpar_detach_node(dn); 608 if (!rc) 609 dlpar_release_drc(drc_index); 610 611 return saved_rc; 612 } 613 614 pr_debug("Successfully added CPU %pOFn, drc index: %x\n", dn, 615 drc_index); 616 return rc; 617 } 618 619 static unsigned int pseries_cpuhp_cache_use_count(const struct device_node *cachedn) 620 { 621 unsigned int use_count = 0; 622 struct device_node *dn; 623 624 WARN_ON(!of_node_is_type(cachedn, "cache")); 625 626 for_each_of_cpu_node(dn) { 627 if (of_find_next_cache_node(dn) == cachedn) 628 use_count++; 629 } 630 631 for_each_node_by_type(dn, "cache") { 632 if (of_find_next_cache_node(dn) == cachedn) 633 use_count++; 634 } 635 636 return use_count; 637 } 638 639 static int pseries_cpuhp_detach_nodes(struct device_node *cpudn) 640 { 641 struct device_node *dn; 642 struct of_changeset cs; 643 int ret = 0; 644 645 of_changeset_init(&cs); 646 ret = of_changeset_detach_node(&cs, cpudn); 647 if (ret) 648 goto out; 649 650 dn = cpudn; 651 while ((dn = of_find_next_cache_node(dn))) { 652 if (pseries_cpuhp_cache_use_count(dn) > 1) 653 break; 654 655 ret = of_changeset_detach_node(&cs, dn); 656 if (ret) 657 goto out; 658 } 659 660 ret = of_changeset_apply(&cs); 661 out: 662 of_changeset_destroy(&cs); 663 return ret; 664 } 665 666 static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index) 667 { 668 int rc; 669 670 pr_debug("Attempting to remove CPU %pOFn, drc index: %x\n", 671 dn, drc_index); 672 673 rc = dlpar_offline_cpu(dn); 674 if (rc) { 675 pr_warn("Failed to offline CPU %pOFn, rc: %d\n", dn, rc); 676 return -EINVAL; 677 } 678 679 rc = dlpar_release_drc(drc_index); 680 if (rc) { 681 pr_warn("Failed to release drc (%x) for CPU %pOFn, rc: %d\n", 682 drc_index, dn, rc); 683 dlpar_online_cpu(dn); 684 return rc; 685 } 686 687 rc = pseries_cpuhp_detach_nodes(dn); 688 if (rc) { 689 int saved_rc = rc; 690 691 pr_warn("Failed to detach CPU %pOFn, rc: %d", dn, rc); 692 693 rc = dlpar_acquire_drc(drc_index); 694 if (!rc) 695 dlpar_online_cpu(dn); 696 697 return saved_rc; 698 } 699 700 pr_debug("Successfully removed CPU, drc index: %x\n", drc_index); 701 return 0; 702 } 703 704 static struct device_node *cpu_drc_index_to_dn(u32 drc_index) 705 { 706 struct device_node *dn; 707 u32 my_index; 708 int rc; 709 710 for_each_node_by_type(dn, "cpu") { 711 rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index); 712 if (rc) 713 continue; 714 715 if (my_index == drc_index) 716 break; 717 } 718 719 return dn; 720 } 721 722 static int dlpar_cpu_remove_by_index(u32 drc_index) 723 { 724 struct device_node *dn; 725 int rc; 726 727 dn = cpu_drc_index_to_dn(drc_index); 728 if (!dn) { 729 pr_warn("Cannot find CPU (drc index %x) to remove\n", 730 drc_index); 731 return -ENODEV; 732 } 733 734 rc = dlpar_cpu_remove(dn, drc_index); 735 of_node_put(dn); 736 return rc; 737 } 738 739 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) 740 { 741 u32 drc_index; 742 int rc; 743 744 drc_index = hp_elog->_drc_u.drc_index; 745 746 lock_device_hotplug(); 747 748 switch (hp_elog->action) { 749 case PSERIES_HP_ELOG_ACTION_REMOVE: 750 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) { 751 rc = dlpar_cpu_remove_by_index(drc_index); 752 /* 753 * Setting the isolation state of an UNISOLATED/CONFIGURED 754 * device to UNISOLATE is a no-op, but the hypervisor can 755 * use it as a hint that the CPU removal failed. 756 */ 757 if (rc) 758 dlpar_unisolate_drc(drc_index); 759 } 760 else 761 rc = -EINVAL; 762 break; 763 case PSERIES_HP_ELOG_ACTION_ADD: 764 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 765 rc = dlpar_cpu_add(drc_index); 766 else 767 rc = -EINVAL; 768 break; 769 default: 770 pr_err("Invalid action (%d) specified\n", hp_elog->action); 771 rc = -EINVAL; 772 break; 773 } 774 775 unlock_device_hotplug(); 776 return rc; 777 } 778 779 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 780 781 static ssize_t dlpar_cpu_probe(const char *buf, size_t count) 782 { 783 u32 drc_index; 784 int rc; 785 786 rc = kstrtou32(buf, 0, &drc_index); 787 if (rc) 788 return -EINVAL; 789 790 rc = dlpar_cpu_add(drc_index); 791 792 return rc ? rc : count; 793 } 794 795 static ssize_t dlpar_cpu_release(const char *buf, size_t count) 796 { 797 struct device_node *dn; 798 u32 drc_index; 799 int rc; 800 801 dn = of_find_node_by_path(buf); 802 if (!dn) 803 return -EINVAL; 804 805 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); 806 if (rc) { 807 of_node_put(dn); 808 return -EINVAL; 809 } 810 811 rc = dlpar_cpu_remove(dn, drc_index); 812 of_node_put(dn); 813 814 return rc ? rc : count; 815 } 816 817 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 818 819 static int pseries_smp_notifier(struct notifier_block *nb, 820 unsigned long action, void *data) 821 { 822 struct of_reconfig_data *rd = data; 823 int err = 0; 824 825 switch (action) { 826 case OF_RECONFIG_ATTACH_NODE: 827 err = pseries_add_processor(rd->dn); 828 break; 829 case OF_RECONFIG_DETACH_NODE: 830 pseries_remove_processor(rd->dn); 831 break; 832 } 833 return notifier_from_errno(err); 834 } 835 836 static struct notifier_block pseries_smp_nb = { 837 .notifier_call = pseries_smp_notifier, 838 }; 839 840 static int __init pseries_cpu_hotplug_init(void) 841 { 842 int qcss_tok; 843 unsigned int node; 844 845 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 846 ppc_md.cpu_probe = dlpar_cpu_probe; 847 ppc_md.cpu_release = dlpar_cpu_release; 848 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 849 850 rtas_stop_self_token = rtas_token("stop-self"); 851 qcss_tok = rtas_token("query-cpu-stopped-state"); 852 853 if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || 854 qcss_tok == RTAS_UNKNOWN_SERVICE) { 855 printk(KERN_INFO "CPU Hotplug not supported by firmware " 856 "- disabling.\n"); 857 return 0; 858 } 859 860 smp_ops->cpu_offline_self = pseries_cpu_offline_self; 861 smp_ops->cpu_disable = pseries_cpu_disable; 862 smp_ops->cpu_die = pseries_cpu_die; 863 864 /* Processors can be added/removed only on LPAR */ 865 if (firmware_has_feature(FW_FEATURE_LPAR)) { 866 for_each_node(node) { 867 if (!alloc_cpumask_var_node(&node_recorded_ids_map[node], 868 GFP_KERNEL, node)) 869 return -ENOMEM; 870 871 /* Record ids of CPU added at boot time */ 872 cpumask_copy(node_recorded_ids_map[node], 873 cpumask_of_node(node)); 874 } 875 876 of_reconfig_notifier_register(&pseries_smp_nb); 877 } 878 879 return 0; 880 } 881 machine_arch_initcall(pseries, pseries_cpu_hotplug_init); 882