1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * pseries CPU Hotplug infrastructure. 4 * 5 * Split out from arch/powerpc/platforms/pseries/setup.c 6 * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c 7 * 8 * Peter Bergner, IBM March 2001. 9 * Copyright (C) 2001 IBM. 10 * Dave Engebretsen, Peter Bergner, and 11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 12 * Plus various changes from other IBM teams... 13 * 14 * Copyright (C) 2006 Michael Ellerman, IBM Corporation 15 */ 16 17 #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt 18 19 #include <linux/kernel.h> 20 #include <linux/interrupt.h> 21 #include <linux/delay.h> 22 #include <linux/sched.h> /* for idle_task_exit */ 23 #include <linux/sched/hotplug.h> 24 #include <linux/cpu.h> 25 #include <linux/of.h> 26 #include <linux/slab.h> 27 #include <asm/prom.h> 28 #include <asm/rtas.h> 29 #include <asm/firmware.h> 30 #include <asm/machdep.h> 31 #include <asm/vdso_datapage.h> 32 #include <asm/xics.h> 33 #include <asm/xive.h> 34 #include <asm/plpar_wrappers.h> 35 #include <asm/topology.h> 36 37 #include "pseries.h" 38 39 /* This version can't take the spinlock, because it never returns */ 40 static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; 41 42 static void rtas_stop_self(void) 43 { 44 static struct rtas_args args; 45 46 local_irq_disable(); 47 48 BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); 49 50 printk("cpu %u (hwid %u) Ready to die...\n", 51 smp_processor_id(), hard_smp_processor_id()); 52 53 rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL); 54 55 panic("Alas, I survived.\n"); 56 } 57 58 static void pseries_cpu_offline_self(void) 59 { 60 unsigned int hwcpu = hard_smp_processor_id(); 61 62 local_irq_disable(); 63 idle_task_exit(); 64 if (xive_enabled()) 65 xive_teardown_cpu(); 66 else 67 xics_teardown_cpu(); 68 69 unregister_slb_shadow(hwcpu); 70 rtas_stop_self(); 71 72 /* Should never get here... */ 73 BUG(); 74 for(;;); 75 } 76 77 static int pseries_cpu_disable(void) 78 { 79 int cpu = smp_processor_id(); 80 81 set_cpu_online(cpu, false); 82 vdso_data->processorCount--; 83 84 /*fix boot_cpuid here*/ 85 if (cpu == boot_cpuid) 86 boot_cpuid = cpumask_any(cpu_online_mask); 87 88 /* FIXME: abstract this to not be platform specific later on */ 89 if (xive_enabled()) 90 xive_smp_disable_cpu(); 91 else 92 xics_migrate_irqs_away(); 93 94 cleanup_cpu_mmu_context(); 95 96 return 0; 97 } 98 99 /* 100 * pseries_cpu_die: Wait for the cpu to die. 101 * @cpu: logical processor id of the CPU whose death we're awaiting. 102 * 103 * This function is called from the context of the thread which is performing 104 * the cpu-offline. Here we wait for long enough to allow the cpu in question 105 * to self-destroy so that the cpu-offline thread can send the CPU_DEAD 106 * notifications. 107 * 108 * OTOH, pseries_cpu_offline_self() is called by the @cpu when it wants to 109 * self-destruct. 110 */ 111 static void pseries_cpu_die(unsigned int cpu) 112 { 113 int cpu_status = 1; 114 unsigned int pcpu = get_hard_smp_processor_id(cpu); 115 unsigned long timeout = jiffies + msecs_to_jiffies(120000); 116 117 while (true) { 118 cpu_status = smp_query_cpu_stopped(pcpu); 119 if (cpu_status == QCSS_STOPPED || 120 cpu_status == QCSS_HARDWARE_ERROR) 121 break; 122 123 if (time_after(jiffies, timeout)) { 124 pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n", 125 cpu, pcpu); 126 timeout = jiffies + msecs_to_jiffies(120000); 127 } 128 129 cond_resched(); 130 } 131 132 if (cpu_status == QCSS_HARDWARE_ERROR) { 133 pr_warn("CPU %i (hwid %i) reported error while dying\n", 134 cpu, pcpu); 135 } 136 137 /* Isolation and deallocation are definitely done by 138 * drslot_chrp_cpu. If they were not they would be 139 * done here. Change isolate state to Isolate and 140 * change allocation-state to Unusable. 141 */ 142 paca_ptrs[cpu]->cpu_start = 0; 143 } 144 145 /* 146 * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle 147 * here is that a cpu device node may represent up to two logical cpus 148 * in the SMT case. We must honor the assumption in other code that 149 * the logical ids for sibling SMT threads x and y are adjacent, such 150 * that x^1 == y and y^1 == x. 151 */ 152 static int pseries_add_processor(struct device_node *np) 153 { 154 unsigned int cpu; 155 cpumask_var_t candidate_mask, tmp; 156 int err = -ENOSPC, len, nthreads, i; 157 const __be32 *intserv; 158 159 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 160 if (!intserv) 161 return 0; 162 163 zalloc_cpumask_var(&candidate_mask, GFP_KERNEL); 164 zalloc_cpumask_var(&tmp, GFP_KERNEL); 165 166 nthreads = len / sizeof(u32); 167 for (i = 0; i < nthreads; i++) 168 cpumask_set_cpu(i, tmp); 169 170 cpu_maps_update_begin(); 171 172 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask)); 173 174 /* Get a bitmap of unoccupied slots. */ 175 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask); 176 if (cpumask_empty(candidate_mask)) { 177 /* If we get here, it most likely means that NR_CPUS is 178 * less than the partition's max processors setting. 179 */ 180 printk(KERN_ERR "Cannot add cpu %pOF; this system configuration" 181 " supports %d logical cpus.\n", np, 182 num_possible_cpus()); 183 goto out_unlock; 184 } 185 186 while (!cpumask_empty(tmp)) 187 if (cpumask_subset(tmp, candidate_mask)) 188 /* Found a range where we can insert the new cpu(s) */ 189 break; 190 else 191 cpumask_shift_left(tmp, tmp, nthreads); 192 193 if (cpumask_empty(tmp)) { 194 printk(KERN_ERR "Unable to find space in cpu_present_mask for" 195 " processor %pOFn with %d thread(s)\n", np, 196 nthreads); 197 goto out_unlock; 198 } 199 200 for_each_cpu(cpu, tmp) { 201 BUG_ON(cpu_present(cpu)); 202 set_cpu_present(cpu, true); 203 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++)); 204 } 205 err = 0; 206 out_unlock: 207 cpu_maps_update_done(); 208 free_cpumask_var(candidate_mask); 209 free_cpumask_var(tmp); 210 return err; 211 } 212 213 /* 214 * Update the present map for a cpu node which is going away, and set 215 * the hard id in the paca(s) to -1 to be consistent with boot time 216 * convention for non-present cpus. 217 */ 218 static void pseries_remove_processor(struct device_node *np) 219 { 220 unsigned int cpu; 221 int len, nthreads, i; 222 const __be32 *intserv; 223 u32 thread; 224 225 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 226 if (!intserv) 227 return; 228 229 nthreads = len / sizeof(u32); 230 231 cpu_maps_update_begin(); 232 for (i = 0; i < nthreads; i++) { 233 thread = be32_to_cpu(intserv[i]); 234 for_each_present_cpu(cpu) { 235 if (get_hard_smp_processor_id(cpu) != thread) 236 continue; 237 BUG_ON(cpu_online(cpu)); 238 set_cpu_present(cpu, false); 239 set_hard_smp_processor_id(cpu, -1); 240 update_numa_cpu_lookup_table(cpu, -1); 241 break; 242 } 243 if (cpu >= nr_cpu_ids) 244 printk(KERN_WARNING "Could not find cpu to remove " 245 "with physical id 0x%x\n", thread); 246 } 247 cpu_maps_update_done(); 248 } 249 250 static int dlpar_offline_cpu(struct device_node *dn) 251 { 252 int rc = 0; 253 unsigned int cpu; 254 int len, nthreads, i; 255 const __be32 *intserv; 256 u32 thread; 257 258 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 259 if (!intserv) 260 return -EINVAL; 261 262 nthreads = len / sizeof(u32); 263 264 cpu_maps_update_begin(); 265 for (i = 0; i < nthreads; i++) { 266 thread = be32_to_cpu(intserv[i]); 267 for_each_present_cpu(cpu) { 268 if (get_hard_smp_processor_id(cpu) != thread) 269 continue; 270 271 if (!cpu_online(cpu)) 272 break; 273 274 cpu_maps_update_done(); 275 rc = device_offline(get_cpu_device(cpu)); 276 if (rc) 277 goto out; 278 cpu_maps_update_begin(); 279 break; 280 } 281 if (cpu == num_possible_cpus()) { 282 pr_warn("Could not find cpu to offline with physical id 0x%x\n", 283 thread); 284 } 285 } 286 cpu_maps_update_done(); 287 288 out: 289 return rc; 290 } 291 292 static int dlpar_online_cpu(struct device_node *dn) 293 { 294 int rc = 0; 295 unsigned int cpu; 296 int len, nthreads, i; 297 const __be32 *intserv; 298 u32 thread; 299 300 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 301 if (!intserv) 302 return -EINVAL; 303 304 nthreads = len / sizeof(u32); 305 306 cpu_maps_update_begin(); 307 for (i = 0; i < nthreads; i++) { 308 thread = be32_to_cpu(intserv[i]); 309 for_each_present_cpu(cpu) { 310 if (get_hard_smp_processor_id(cpu) != thread) 311 continue; 312 cpu_maps_update_done(); 313 find_and_online_cpu_nid(cpu); 314 rc = device_online(get_cpu_device(cpu)); 315 if (rc) { 316 dlpar_offline_cpu(dn); 317 goto out; 318 } 319 cpu_maps_update_begin(); 320 321 break; 322 } 323 if (cpu == num_possible_cpus()) 324 printk(KERN_WARNING "Could not find cpu to online " 325 "with physical id 0x%x\n", thread); 326 } 327 cpu_maps_update_done(); 328 329 out: 330 return rc; 331 332 } 333 334 static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index) 335 { 336 struct device_node *child = NULL; 337 u32 my_drc_index; 338 bool found; 339 int rc; 340 341 /* Assume cpu doesn't exist */ 342 found = false; 343 344 for_each_child_of_node(parent, child) { 345 rc = of_property_read_u32(child, "ibm,my-drc-index", 346 &my_drc_index); 347 if (rc) 348 continue; 349 350 if (my_drc_index == drc_index) { 351 of_node_put(child); 352 found = true; 353 break; 354 } 355 } 356 357 return found; 358 } 359 360 static bool drc_info_valid_index(struct device_node *parent, u32 drc_index) 361 { 362 struct property *info; 363 struct of_drc_info drc; 364 const __be32 *value; 365 u32 index; 366 int count, i, j; 367 368 info = of_find_property(parent, "ibm,drc-info", NULL); 369 if (!info) 370 return false; 371 372 value = of_prop_next_u32(info, NULL, &count); 373 374 /* First value of ibm,drc-info is number of drc-info records */ 375 if (value) 376 value++; 377 else 378 return false; 379 380 for (i = 0; i < count; i++) { 381 if (of_read_drc_info_cell(&info, &value, &drc)) 382 return false; 383 384 if (strncmp(drc.drc_type, "CPU", 3)) 385 break; 386 387 if (drc_index > drc.last_drc_index) 388 continue; 389 390 index = drc.drc_index_start; 391 for (j = 0; j < drc.num_sequential_elems; j++) { 392 if (drc_index == index) 393 return true; 394 395 index += drc.sequential_inc; 396 } 397 } 398 399 return false; 400 } 401 402 static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index) 403 { 404 bool found = false; 405 int rc, index; 406 407 if (of_find_property(parent, "ibm,drc-info", NULL)) 408 return drc_info_valid_index(parent, drc_index); 409 410 /* Note that the format of the ibm,drc-indexes array is 411 * the number of entries in the array followed by the array 412 * of drc values so we start looking at index = 1. 413 */ 414 index = 1; 415 while (!found) { 416 u32 drc; 417 418 rc = of_property_read_u32_index(parent, "ibm,drc-indexes", 419 index++, &drc); 420 421 if (rc) 422 break; 423 424 if (drc == drc_index) 425 found = true; 426 } 427 428 return found; 429 } 430 431 static ssize_t dlpar_cpu_add(u32 drc_index) 432 { 433 struct device_node *dn, *parent; 434 int rc, saved_rc; 435 436 pr_debug("Attempting to add CPU, drc index: %x\n", drc_index); 437 438 parent = of_find_node_by_path("/cpus"); 439 if (!parent) { 440 pr_warn("Failed to find CPU root node \"/cpus\"\n"); 441 return -ENODEV; 442 } 443 444 if (dlpar_cpu_exists(parent, drc_index)) { 445 of_node_put(parent); 446 pr_warn("CPU with drc index %x already exists\n", drc_index); 447 return -EINVAL; 448 } 449 450 if (!valid_cpu_drc_index(parent, drc_index)) { 451 of_node_put(parent); 452 pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index); 453 return -EINVAL; 454 } 455 456 rc = dlpar_acquire_drc(drc_index); 457 if (rc) { 458 pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n", 459 rc, drc_index); 460 of_node_put(parent); 461 return -EINVAL; 462 } 463 464 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); 465 if (!dn) { 466 pr_warn("Failed call to configure-connector, drc index: %x\n", 467 drc_index); 468 dlpar_release_drc(drc_index); 469 of_node_put(parent); 470 return -EINVAL; 471 } 472 473 rc = dlpar_attach_node(dn, parent); 474 475 /* Regardless we are done with parent now */ 476 of_node_put(parent); 477 478 if (rc) { 479 saved_rc = rc; 480 pr_warn("Failed to attach node %pOFn, rc: %d, drc index: %x\n", 481 dn, rc, drc_index); 482 483 rc = dlpar_release_drc(drc_index); 484 if (!rc) 485 dlpar_free_cc_nodes(dn); 486 487 return saved_rc; 488 } 489 490 rc = dlpar_online_cpu(dn); 491 if (rc) { 492 saved_rc = rc; 493 pr_warn("Failed to online cpu %pOFn, rc: %d, drc index: %x\n", 494 dn, rc, drc_index); 495 496 rc = dlpar_detach_node(dn); 497 if (!rc) 498 dlpar_release_drc(drc_index); 499 500 return saved_rc; 501 } 502 503 pr_debug("Successfully added CPU %pOFn, drc index: %x\n", dn, 504 drc_index); 505 return rc; 506 } 507 508 static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index) 509 { 510 int rc; 511 512 pr_debug("Attempting to remove CPU %pOFn, drc index: %x\n", 513 dn, drc_index); 514 515 rc = dlpar_offline_cpu(dn); 516 if (rc) { 517 pr_warn("Failed to offline CPU %pOFn, rc: %d\n", dn, rc); 518 return -EINVAL; 519 } 520 521 rc = dlpar_release_drc(drc_index); 522 if (rc) { 523 pr_warn("Failed to release drc (%x) for CPU %pOFn, rc: %d\n", 524 drc_index, dn, rc); 525 dlpar_online_cpu(dn); 526 return rc; 527 } 528 529 rc = dlpar_detach_node(dn); 530 if (rc) { 531 int saved_rc = rc; 532 533 pr_warn("Failed to detach CPU %pOFn, rc: %d", dn, rc); 534 535 rc = dlpar_acquire_drc(drc_index); 536 if (!rc) 537 dlpar_online_cpu(dn); 538 539 return saved_rc; 540 } 541 542 pr_debug("Successfully removed CPU, drc index: %x\n", drc_index); 543 return 0; 544 } 545 546 static struct device_node *cpu_drc_index_to_dn(u32 drc_index) 547 { 548 struct device_node *dn; 549 u32 my_index; 550 int rc; 551 552 for_each_node_by_type(dn, "cpu") { 553 rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index); 554 if (rc) 555 continue; 556 557 if (my_index == drc_index) 558 break; 559 } 560 561 return dn; 562 } 563 564 static int dlpar_cpu_remove_by_index(u32 drc_index) 565 { 566 struct device_node *dn; 567 int rc; 568 569 dn = cpu_drc_index_to_dn(drc_index); 570 if (!dn) { 571 pr_warn("Cannot find CPU (drc index %x) to remove\n", 572 drc_index); 573 return -ENODEV; 574 } 575 576 rc = dlpar_cpu_remove(dn, drc_index); 577 of_node_put(dn); 578 return rc; 579 } 580 581 static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove) 582 { 583 struct device_node *dn; 584 int cpus_found = 0; 585 int rc; 586 587 /* We want to find cpus_to_remove + 1 CPUs to ensure we do not 588 * remove the last CPU. 589 */ 590 for_each_node_by_type(dn, "cpu") { 591 cpus_found++; 592 593 if (cpus_found > cpus_to_remove) { 594 of_node_put(dn); 595 break; 596 } 597 598 /* Note that cpus_found is always 1 ahead of the index 599 * into the cpu_drcs array, so we use cpus_found - 1 600 */ 601 rc = of_property_read_u32(dn, "ibm,my-drc-index", 602 &cpu_drcs[cpus_found - 1]); 603 if (rc) { 604 pr_warn("Error occurred getting drc-index for %pOFn\n", 605 dn); 606 of_node_put(dn); 607 return -1; 608 } 609 } 610 611 if (cpus_found < cpus_to_remove) { 612 pr_warn("Failed to find enough CPUs (%d of %d) to remove\n", 613 cpus_found, cpus_to_remove); 614 } else if (cpus_found == cpus_to_remove) { 615 pr_warn("Cannot remove all CPUs\n"); 616 } 617 618 return cpus_found; 619 } 620 621 static int dlpar_cpu_remove_by_count(u32 cpus_to_remove) 622 { 623 u32 *cpu_drcs; 624 int cpus_found; 625 int cpus_removed = 0; 626 int i, rc; 627 628 pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove); 629 630 cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL); 631 if (!cpu_drcs) 632 return -EINVAL; 633 634 cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove); 635 if (cpus_found <= cpus_to_remove) { 636 kfree(cpu_drcs); 637 return -EINVAL; 638 } 639 640 for (i = 0; i < cpus_to_remove; i++) { 641 rc = dlpar_cpu_remove_by_index(cpu_drcs[i]); 642 if (rc) 643 break; 644 645 cpus_removed++; 646 } 647 648 if (cpus_removed != cpus_to_remove) { 649 pr_warn("CPU hot-remove failed, adding back removed CPUs\n"); 650 651 for (i = 0; i < cpus_removed; i++) 652 dlpar_cpu_add(cpu_drcs[i]); 653 654 rc = -EINVAL; 655 } else { 656 rc = 0; 657 } 658 659 kfree(cpu_drcs); 660 return rc; 661 } 662 663 static int find_drc_info_cpus_to_add(struct device_node *cpus, 664 struct property *info, 665 u32 *cpu_drcs, u32 cpus_to_add) 666 { 667 struct of_drc_info drc; 668 const __be32 *value; 669 u32 count, drc_index; 670 int cpus_found = 0; 671 int i, j; 672 673 if (!info) 674 return -1; 675 676 value = of_prop_next_u32(info, NULL, &count); 677 if (value) 678 value++; 679 680 for (i = 0; i < count; i++) { 681 of_read_drc_info_cell(&info, &value, &drc); 682 if (strncmp(drc.drc_type, "CPU", 3)) 683 break; 684 685 drc_index = drc.drc_index_start; 686 for (j = 0; j < drc.num_sequential_elems; j++) { 687 if (dlpar_cpu_exists(cpus, drc_index)) 688 continue; 689 690 cpu_drcs[cpus_found++] = drc_index; 691 692 if (cpus_found == cpus_to_add) 693 return cpus_found; 694 695 drc_index += drc.sequential_inc; 696 } 697 } 698 699 return cpus_found; 700 } 701 702 static int find_drc_index_cpus_to_add(struct device_node *cpus, 703 u32 *cpu_drcs, u32 cpus_to_add) 704 { 705 int cpus_found = 0; 706 int index, rc; 707 u32 drc_index; 708 709 /* Search the ibm,drc-indexes array for possible CPU drcs to 710 * add. Note that the format of the ibm,drc-indexes array is 711 * the number of entries in the array followed by the array 712 * of drc values so we start looking at index = 1. 713 */ 714 index = 1; 715 while (cpus_found < cpus_to_add) { 716 rc = of_property_read_u32_index(cpus, "ibm,drc-indexes", 717 index++, &drc_index); 718 719 if (rc) 720 break; 721 722 if (dlpar_cpu_exists(cpus, drc_index)) 723 continue; 724 725 cpu_drcs[cpus_found++] = drc_index; 726 } 727 728 return cpus_found; 729 } 730 731 static int dlpar_cpu_add_by_count(u32 cpus_to_add) 732 { 733 struct device_node *parent; 734 struct property *info; 735 u32 *cpu_drcs; 736 int cpus_added = 0; 737 int cpus_found; 738 int i, rc; 739 740 pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add); 741 742 cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL); 743 if (!cpu_drcs) 744 return -EINVAL; 745 746 parent = of_find_node_by_path("/cpus"); 747 if (!parent) { 748 pr_warn("Could not find CPU root node in device tree\n"); 749 kfree(cpu_drcs); 750 return -1; 751 } 752 753 info = of_find_property(parent, "ibm,drc-info", NULL); 754 if (info) 755 cpus_found = find_drc_info_cpus_to_add(parent, info, cpu_drcs, cpus_to_add); 756 else 757 cpus_found = find_drc_index_cpus_to_add(parent, cpu_drcs, cpus_to_add); 758 759 of_node_put(parent); 760 761 if (cpus_found < cpus_to_add) { 762 pr_warn("Failed to find enough CPUs (%d of %d) to add\n", 763 cpus_found, cpus_to_add); 764 kfree(cpu_drcs); 765 return -EINVAL; 766 } 767 768 for (i = 0; i < cpus_to_add; i++) { 769 rc = dlpar_cpu_add(cpu_drcs[i]); 770 if (rc) 771 break; 772 773 cpus_added++; 774 } 775 776 if (cpus_added < cpus_to_add) { 777 pr_warn("CPU hot-add failed, removing any added CPUs\n"); 778 779 for (i = 0; i < cpus_added; i++) 780 dlpar_cpu_remove_by_index(cpu_drcs[i]); 781 782 rc = -EINVAL; 783 } else { 784 rc = 0; 785 } 786 787 kfree(cpu_drcs); 788 return rc; 789 } 790 791 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) 792 { 793 u32 count, drc_index; 794 int rc; 795 796 count = hp_elog->_drc_u.drc_count; 797 drc_index = hp_elog->_drc_u.drc_index; 798 799 lock_device_hotplug(); 800 801 switch (hp_elog->action) { 802 case PSERIES_HP_ELOG_ACTION_REMOVE: 803 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 804 rc = dlpar_cpu_remove_by_count(count); 805 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 806 rc = dlpar_cpu_remove_by_index(drc_index); 807 else 808 rc = -EINVAL; 809 break; 810 case PSERIES_HP_ELOG_ACTION_ADD: 811 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 812 rc = dlpar_cpu_add_by_count(count); 813 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 814 rc = dlpar_cpu_add(drc_index); 815 else 816 rc = -EINVAL; 817 break; 818 default: 819 pr_err("Invalid action (%d) specified\n", hp_elog->action); 820 rc = -EINVAL; 821 break; 822 } 823 824 unlock_device_hotplug(); 825 return rc; 826 } 827 828 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 829 830 static ssize_t dlpar_cpu_probe(const char *buf, size_t count) 831 { 832 u32 drc_index; 833 int rc; 834 835 rc = kstrtou32(buf, 0, &drc_index); 836 if (rc) 837 return -EINVAL; 838 839 rc = dlpar_cpu_add(drc_index); 840 841 return rc ? rc : count; 842 } 843 844 static ssize_t dlpar_cpu_release(const char *buf, size_t count) 845 { 846 struct device_node *dn; 847 u32 drc_index; 848 int rc; 849 850 dn = of_find_node_by_path(buf); 851 if (!dn) 852 return -EINVAL; 853 854 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); 855 if (rc) { 856 of_node_put(dn); 857 return -EINVAL; 858 } 859 860 rc = dlpar_cpu_remove(dn, drc_index); 861 of_node_put(dn); 862 863 return rc ? rc : count; 864 } 865 866 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 867 868 static int pseries_smp_notifier(struct notifier_block *nb, 869 unsigned long action, void *data) 870 { 871 struct of_reconfig_data *rd = data; 872 int err = 0; 873 874 switch (action) { 875 case OF_RECONFIG_ATTACH_NODE: 876 err = pseries_add_processor(rd->dn); 877 break; 878 case OF_RECONFIG_DETACH_NODE: 879 pseries_remove_processor(rd->dn); 880 break; 881 } 882 return notifier_from_errno(err); 883 } 884 885 static struct notifier_block pseries_smp_nb = { 886 .notifier_call = pseries_smp_notifier, 887 }; 888 889 static int __init pseries_cpu_hotplug_init(void) 890 { 891 int qcss_tok; 892 893 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 894 ppc_md.cpu_probe = dlpar_cpu_probe; 895 ppc_md.cpu_release = dlpar_cpu_release; 896 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 897 898 rtas_stop_self_token = rtas_token("stop-self"); 899 qcss_tok = rtas_token("query-cpu-stopped-state"); 900 901 if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || 902 qcss_tok == RTAS_UNKNOWN_SERVICE) { 903 printk(KERN_INFO "CPU Hotplug not supported by firmware " 904 "- disabling.\n"); 905 return 0; 906 } 907 908 smp_ops->cpu_offline_self = pseries_cpu_offline_self; 909 smp_ops->cpu_disable = pseries_cpu_disable; 910 smp_ops->cpu_die = pseries_cpu_die; 911 912 /* Processors can be added/removed only on LPAR */ 913 if (firmware_has_feature(FW_FEATURE_LPAR)) 914 of_reconfig_notifier_register(&pseries_smp_nb); 915 916 return 0; 917 } 918 machine_arch_initcall(pseries, pseries_cpu_hotplug_init); 919