1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * pseries CPU Hotplug infrastructure. 4 * 5 * Split out from arch/powerpc/platforms/pseries/setup.c 6 * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c 7 * 8 * Peter Bergner, IBM March 2001. 9 * Copyright (C) 2001 IBM. 10 * Dave Engebretsen, Peter Bergner, and 11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 12 * Plus various changes from other IBM teams... 13 * 14 * Copyright (C) 2006 Michael Ellerman, IBM Corporation 15 */ 16 17 #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt 18 19 #include <linux/kernel.h> 20 #include <linux/interrupt.h> 21 #include <linux/delay.h> 22 #include <linux/sched.h> /* for idle_task_exit */ 23 #include <linux/sched/hotplug.h> 24 #include <linux/cpu.h> 25 #include <linux/of.h> 26 #include <linux/slab.h> 27 #include <asm/prom.h> 28 #include <asm/rtas.h> 29 #include <asm/firmware.h> 30 #include <asm/machdep.h> 31 #include <asm/vdso_datapage.h> 32 #include <asm/xics.h> 33 #include <asm/xive.h> 34 #include <asm/plpar_wrappers.h> 35 #include <asm/topology.h> 36 37 #include "pseries.h" 38 39 /* This version can't take the spinlock, because it never returns */ 40 static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; 41 42 static void rtas_stop_self(void) 43 { 44 static struct rtas_args args; 45 46 local_irq_disable(); 47 48 BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); 49 50 printk("cpu %u (hwid %u) Ready to die...\n", 51 smp_processor_id(), hard_smp_processor_id()); 52 53 rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL); 54 55 panic("Alas, I survived.\n"); 56 } 57 58 static void pseries_mach_cpu_die(void) 59 { 60 unsigned int hwcpu = hard_smp_processor_id(); 61 62 local_irq_disable(); 63 idle_task_exit(); 64 if (xive_enabled()) 65 xive_teardown_cpu(); 66 else 67 xics_teardown_cpu(); 68 69 unregister_slb_shadow(hwcpu); 70 rtas_stop_self(); 71 72 /* Should never get here... */ 73 BUG(); 74 for(;;); 75 } 76 77 static int pseries_cpu_disable(void) 78 { 79 int cpu = smp_processor_id(); 80 81 set_cpu_online(cpu, false); 82 vdso_data->processorCount--; 83 84 /*fix boot_cpuid here*/ 85 if (cpu == boot_cpuid) 86 boot_cpuid = cpumask_any(cpu_online_mask); 87 88 /* FIXME: abstract this to not be platform specific later on */ 89 if (xive_enabled()) 90 xive_smp_disable_cpu(); 91 else 92 xics_migrate_irqs_away(); 93 return 0; 94 } 95 96 /* 97 * pseries_cpu_die: Wait for the cpu to die. 98 * @cpu: logical processor id of the CPU whose death we're awaiting. 99 * 100 * This function is called from the context of the thread which is performing 101 * the cpu-offline. Here we wait for long enough to allow the cpu in question 102 * to self-destroy so that the cpu-offline thread can send the CPU_DEAD 103 * notifications. 104 * 105 * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to 106 * self-destruct. 107 */ 108 static void pseries_cpu_die(unsigned int cpu) 109 { 110 int tries; 111 int cpu_status = 1; 112 unsigned int pcpu = get_hard_smp_processor_id(cpu); 113 114 for (tries = 0; tries < 25; tries++) { 115 cpu_status = smp_query_cpu_stopped(pcpu); 116 if (cpu_status == QCSS_STOPPED || 117 cpu_status == QCSS_HARDWARE_ERROR) 118 break; 119 cpu_relax(); 120 121 } 122 123 if (cpu_status != 0) { 124 printk("Querying DEAD? cpu %i (%i) shows %i\n", 125 cpu, pcpu, cpu_status); 126 } 127 128 /* Isolation and deallocation are definitely done by 129 * drslot_chrp_cpu. If they were not they would be 130 * done here. Change isolate state to Isolate and 131 * change allocation-state to Unusable. 132 */ 133 paca_ptrs[cpu]->cpu_start = 0; 134 } 135 136 /* 137 * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle 138 * here is that a cpu device node may represent up to two logical cpus 139 * in the SMT case. We must honor the assumption in other code that 140 * the logical ids for sibling SMT threads x and y are adjacent, such 141 * that x^1 == y and y^1 == x. 142 */ 143 static int pseries_add_processor(struct device_node *np) 144 { 145 unsigned int cpu; 146 cpumask_var_t candidate_mask, tmp; 147 int err = -ENOSPC, len, nthreads, i; 148 const __be32 *intserv; 149 150 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 151 if (!intserv) 152 return 0; 153 154 zalloc_cpumask_var(&candidate_mask, GFP_KERNEL); 155 zalloc_cpumask_var(&tmp, GFP_KERNEL); 156 157 nthreads = len / sizeof(u32); 158 for (i = 0; i < nthreads; i++) 159 cpumask_set_cpu(i, tmp); 160 161 cpu_maps_update_begin(); 162 163 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask)); 164 165 /* Get a bitmap of unoccupied slots. */ 166 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask); 167 if (cpumask_empty(candidate_mask)) { 168 /* If we get here, it most likely means that NR_CPUS is 169 * less than the partition's max processors setting. 170 */ 171 printk(KERN_ERR "Cannot add cpu %pOF; this system configuration" 172 " supports %d logical cpus.\n", np, 173 num_possible_cpus()); 174 goto out_unlock; 175 } 176 177 while (!cpumask_empty(tmp)) 178 if (cpumask_subset(tmp, candidate_mask)) 179 /* Found a range where we can insert the new cpu(s) */ 180 break; 181 else 182 cpumask_shift_left(tmp, tmp, nthreads); 183 184 if (cpumask_empty(tmp)) { 185 printk(KERN_ERR "Unable to find space in cpu_present_mask for" 186 " processor %pOFn with %d thread(s)\n", np, 187 nthreads); 188 goto out_unlock; 189 } 190 191 for_each_cpu(cpu, tmp) { 192 BUG_ON(cpu_present(cpu)); 193 set_cpu_present(cpu, true); 194 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++)); 195 } 196 err = 0; 197 out_unlock: 198 cpu_maps_update_done(); 199 free_cpumask_var(candidate_mask); 200 free_cpumask_var(tmp); 201 return err; 202 } 203 204 /* 205 * Update the present map for a cpu node which is going away, and set 206 * the hard id in the paca(s) to -1 to be consistent with boot time 207 * convention for non-present cpus. 208 */ 209 static void pseries_remove_processor(struct device_node *np) 210 { 211 unsigned int cpu; 212 int len, nthreads, i; 213 const __be32 *intserv; 214 u32 thread; 215 216 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 217 if (!intserv) 218 return; 219 220 nthreads = len / sizeof(u32); 221 222 cpu_maps_update_begin(); 223 for (i = 0; i < nthreads; i++) { 224 thread = be32_to_cpu(intserv[i]); 225 for_each_present_cpu(cpu) { 226 if (get_hard_smp_processor_id(cpu) != thread) 227 continue; 228 BUG_ON(cpu_online(cpu)); 229 set_cpu_present(cpu, false); 230 set_hard_smp_processor_id(cpu, -1); 231 update_numa_cpu_lookup_table(cpu, -1); 232 break; 233 } 234 if (cpu >= nr_cpu_ids) 235 printk(KERN_WARNING "Could not find cpu to remove " 236 "with physical id 0x%x\n", thread); 237 } 238 cpu_maps_update_done(); 239 } 240 241 static int dlpar_offline_cpu(struct device_node *dn) 242 { 243 int rc = 0; 244 unsigned int cpu; 245 int len, nthreads, i; 246 const __be32 *intserv; 247 u32 thread; 248 249 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 250 if (!intserv) 251 return -EINVAL; 252 253 nthreads = len / sizeof(u32); 254 255 cpu_maps_update_begin(); 256 for (i = 0; i < nthreads; i++) { 257 thread = be32_to_cpu(intserv[i]); 258 for_each_present_cpu(cpu) { 259 if (get_hard_smp_processor_id(cpu) != thread) 260 continue; 261 262 if (!cpu_online(cpu)) 263 break; 264 265 cpu_maps_update_done(); 266 rc = device_offline(get_cpu_device(cpu)); 267 if (rc) 268 goto out; 269 cpu_maps_update_begin(); 270 break; 271 } 272 if (cpu == num_possible_cpus()) { 273 pr_warn("Could not find cpu to offline with physical id 0x%x\n", 274 thread); 275 } 276 } 277 cpu_maps_update_done(); 278 279 out: 280 return rc; 281 } 282 283 static int dlpar_online_cpu(struct device_node *dn) 284 { 285 int rc = 0; 286 unsigned int cpu; 287 int len, nthreads, i; 288 const __be32 *intserv; 289 u32 thread; 290 291 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 292 if (!intserv) 293 return -EINVAL; 294 295 nthreads = len / sizeof(u32); 296 297 cpu_maps_update_begin(); 298 for (i = 0; i < nthreads; i++) { 299 thread = be32_to_cpu(intserv[i]); 300 for_each_present_cpu(cpu) { 301 if (get_hard_smp_processor_id(cpu) != thread) 302 continue; 303 cpu_maps_update_done(); 304 find_and_online_cpu_nid(cpu); 305 rc = device_online(get_cpu_device(cpu)); 306 if (rc) { 307 dlpar_offline_cpu(dn); 308 goto out; 309 } 310 cpu_maps_update_begin(); 311 312 break; 313 } 314 if (cpu == num_possible_cpus()) 315 printk(KERN_WARNING "Could not find cpu to online " 316 "with physical id 0x%x\n", thread); 317 } 318 cpu_maps_update_done(); 319 320 out: 321 return rc; 322 323 } 324 325 static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index) 326 { 327 struct device_node *child = NULL; 328 u32 my_drc_index; 329 bool found; 330 int rc; 331 332 /* Assume cpu doesn't exist */ 333 found = false; 334 335 for_each_child_of_node(parent, child) { 336 rc = of_property_read_u32(child, "ibm,my-drc-index", 337 &my_drc_index); 338 if (rc) 339 continue; 340 341 if (my_drc_index == drc_index) { 342 of_node_put(child); 343 found = true; 344 break; 345 } 346 } 347 348 return found; 349 } 350 351 static bool drc_info_valid_index(struct device_node *parent, u32 drc_index) 352 { 353 struct property *info; 354 struct of_drc_info drc; 355 const __be32 *value; 356 u32 index; 357 int count, i, j; 358 359 info = of_find_property(parent, "ibm,drc-info", NULL); 360 if (!info) 361 return false; 362 363 value = of_prop_next_u32(info, NULL, &count); 364 365 /* First value of ibm,drc-info is number of drc-info records */ 366 if (value) 367 value++; 368 else 369 return false; 370 371 for (i = 0; i < count; i++) { 372 if (of_read_drc_info_cell(&info, &value, &drc)) 373 return false; 374 375 if (strncmp(drc.drc_type, "CPU", 3)) 376 break; 377 378 if (drc_index > drc.last_drc_index) 379 continue; 380 381 index = drc.drc_index_start; 382 for (j = 0; j < drc.num_sequential_elems; j++) { 383 if (drc_index == index) 384 return true; 385 386 index += drc.sequential_inc; 387 } 388 } 389 390 return false; 391 } 392 393 static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index) 394 { 395 bool found = false; 396 int rc, index; 397 398 if (of_find_property(parent, "ibm,drc-info", NULL)) 399 return drc_info_valid_index(parent, drc_index); 400 401 /* Note that the format of the ibm,drc-indexes array is 402 * the number of entries in the array followed by the array 403 * of drc values so we start looking at index = 1. 404 */ 405 index = 1; 406 while (!found) { 407 u32 drc; 408 409 rc = of_property_read_u32_index(parent, "ibm,drc-indexes", 410 index++, &drc); 411 412 if (rc) 413 break; 414 415 if (drc == drc_index) 416 found = true; 417 } 418 419 return found; 420 } 421 422 static ssize_t dlpar_cpu_add(u32 drc_index) 423 { 424 struct device_node *dn, *parent; 425 int rc, saved_rc; 426 427 pr_debug("Attempting to add CPU, drc index: %x\n", drc_index); 428 429 parent = of_find_node_by_path("/cpus"); 430 if (!parent) { 431 pr_warn("Failed to find CPU root node \"/cpus\"\n"); 432 return -ENODEV; 433 } 434 435 if (dlpar_cpu_exists(parent, drc_index)) { 436 of_node_put(parent); 437 pr_warn("CPU with drc index %x already exists\n", drc_index); 438 return -EINVAL; 439 } 440 441 if (!valid_cpu_drc_index(parent, drc_index)) { 442 of_node_put(parent); 443 pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index); 444 return -EINVAL; 445 } 446 447 rc = dlpar_acquire_drc(drc_index); 448 if (rc) { 449 pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n", 450 rc, drc_index); 451 of_node_put(parent); 452 return -EINVAL; 453 } 454 455 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); 456 if (!dn) { 457 pr_warn("Failed call to configure-connector, drc index: %x\n", 458 drc_index); 459 dlpar_release_drc(drc_index); 460 of_node_put(parent); 461 return -EINVAL; 462 } 463 464 rc = dlpar_attach_node(dn, parent); 465 466 /* Regardless we are done with parent now */ 467 of_node_put(parent); 468 469 if (rc) { 470 saved_rc = rc; 471 pr_warn("Failed to attach node %pOFn, rc: %d, drc index: %x\n", 472 dn, rc, drc_index); 473 474 rc = dlpar_release_drc(drc_index); 475 if (!rc) 476 dlpar_free_cc_nodes(dn); 477 478 return saved_rc; 479 } 480 481 rc = dlpar_online_cpu(dn); 482 if (rc) { 483 saved_rc = rc; 484 pr_warn("Failed to online cpu %pOFn, rc: %d, drc index: %x\n", 485 dn, rc, drc_index); 486 487 rc = dlpar_detach_node(dn); 488 if (!rc) 489 dlpar_release_drc(drc_index); 490 491 return saved_rc; 492 } 493 494 pr_debug("Successfully added CPU %pOFn, drc index: %x\n", dn, 495 drc_index); 496 return rc; 497 } 498 499 static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index) 500 { 501 int rc; 502 503 pr_debug("Attempting to remove CPU %pOFn, drc index: %x\n", 504 dn, drc_index); 505 506 rc = dlpar_offline_cpu(dn); 507 if (rc) { 508 pr_warn("Failed to offline CPU %pOFn, rc: %d\n", dn, rc); 509 return -EINVAL; 510 } 511 512 rc = dlpar_release_drc(drc_index); 513 if (rc) { 514 pr_warn("Failed to release drc (%x) for CPU %pOFn, rc: %d\n", 515 drc_index, dn, rc); 516 dlpar_online_cpu(dn); 517 return rc; 518 } 519 520 rc = dlpar_detach_node(dn); 521 if (rc) { 522 int saved_rc = rc; 523 524 pr_warn("Failed to detach CPU %pOFn, rc: %d", dn, rc); 525 526 rc = dlpar_acquire_drc(drc_index); 527 if (!rc) 528 dlpar_online_cpu(dn); 529 530 return saved_rc; 531 } 532 533 pr_debug("Successfully removed CPU, drc index: %x\n", drc_index); 534 return 0; 535 } 536 537 static struct device_node *cpu_drc_index_to_dn(u32 drc_index) 538 { 539 struct device_node *dn; 540 u32 my_index; 541 int rc; 542 543 for_each_node_by_type(dn, "cpu") { 544 rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index); 545 if (rc) 546 continue; 547 548 if (my_index == drc_index) 549 break; 550 } 551 552 return dn; 553 } 554 555 static int dlpar_cpu_remove_by_index(u32 drc_index) 556 { 557 struct device_node *dn; 558 int rc; 559 560 dn = cpu_drc_index_to_dn(drc_index); 561 if (!dn) { 562 pr_warn("Cannot find CPU (drc index %x) to remove\n", 563 drc_index); 564 return -ENODEV; 565 } 566 567 rc = dlpar_cpu_remove(dn, drc_index); 568 of_node_put(dn); 569 return rc; 570 } 571 572 static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove) 573 { 574 struct device_node *dn; 575 int cpus_found = 0; 576 int rc; 577 578 /* We want to find cpus_to_remove + 1 CPUs to ensure we do not 579 * remove the last CPU. 580 */ 581 for_each_node_by_type(dn, "cpu") { 582 cpus_found++; 583 584 if (cpus_found > cpus_to_remove) { 585 of_node_put(dn); 586 break; 587 } 588 589 /* Note that cpus_found is always 1 ahead of the index 590 * into the cpu_drcs array, so we use cpus_found - 1 591 */ 592 rc = of_property_read_u32(dn, "ibm,my-drc-index", 593 &cpu_drcs[cpus_found - 1]); 594 if (rc) { 595 pr_warn("Error occurred getting drc-index for %pOFn\n", 596 dn); 597 of_node_put(dn); 598 return -1; 599 } 600 } 601 602 if (cpus_found < cpus_to_remove) { 603 pr_warn("Failed to find enough CPUs (%d of %d) to remove\n", 604 cpus_found, cpus_to_remove); 605 } else if (cpus_found == cpus_to_remove) { 606 pr_warn("Cannot remove all CPUs\n"); 607 } 608 609 return cpus_found; 610 } 611 612 static int dlpar_cpu_remove_by_count(u32 cpus_to_remove) 613 { 614 u32 *cpu_drcs; 615 int cpus_found; 616 int cpus_removed = 0; 617 int i, rc; 618 619 pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove); 620 621 cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL); 622 if (!cpu_drcs) 623 return -EINVAL; 624 625 cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove); 626 if (cpus_found <= cpus_to_remove) { 627 kfree(cpu_drcs); 628 return -EINVAL; 629 } 630 631 for (i = 0; i < cpus_to_remove; i++) { 632 rc = dlpar_cpu_remove_by_index(cpu_drcs[i]); 633 if (rc) 634 break; 635 636 cpus_removed++; 637 } 638 639 if (cpus_removed != cpus_to_remove) { 640 pr_warn("CPU hot-remove failed, adding back removed CPUs\n"); 641 642 for (i = 0; i < cpus_removed; i++) 643 dlpar_cpu_add(cpu_drcs[i]); 644 645 rc = -EINVAL; 646 } else { 647 rc = 0; 648 } 649 650 kfree(cpu_drcs); 651 return rc; 652 } 653 654 static int find_drc_info_cpus_to_add(struct device_node *cpus, 655 struct property *info, 656 u32 *cpu_drcs, u32 cpus_to_add) 657 { 658 struct of_drc_info drc; 659 const __be32 *value; 660 u32 count, drc_index; 661 int cpus_found = 0; 662 int i, j; 663 664 if (!info) 665 return -1; 666 667 value = of_prop_next_u32(info, NULL, &count); 668 if (value) 669 value++; 670 671 for (i = 0; i < count; i++) { 672 of_read_drc_info_cell(&info, &value, &drc); 673 if (strncmp(drc.drc_type, "CPU", 3)) 674 break; 675 676 drc_index = drc.drc_index_start; 677 for (j = 0; j < drc.num_sequential_elems; j++) { 678 if (dlpar_cpu_exists(cpus, drc_index)) 679 continue; 680 681 cpu_drcs[cpus_found++] = drc_index; 682 683 if (cpus_found == cpus_to_add) 684 return cpus_found; 685 686 drc_index += drc.sequential_inc; 687 } 688 } 689 690 return cpus_found; 691 } 692 693 static int find_drc_index_cpus_to_add(struct device_node *cpus, 694 u32 *cpu_drcs, u32 cpus_to_add) 695 { 696 int cpus_found = 0; 697 int index, rc; 698 u32 drc_index; 699 700 /* Search the ibm,drc-indexes array for possible CPU drcs to 701 * add. Note that the format of the ibm,drc-indexes array is 702 * the number of entries in the array followed by the array 703 * of drc values so we start looking at index = 1. 704 */ 705 index = 1; 706 while (cpus_found < cpus_to_add) { 707 rc = of_property_read_u32_index(cpus, "ibm,drc-indexes", 708 index++, &drc_index); 709 710 if (rc) 711 break; 712 713 if (dlpar_cpu_exists(cpus, drc_index)) 714 continue; 715 716 cpu_drcs[cpus_found++] = drc_index; 717 } 718 719 return cpus_found; 720 } 721 722 static int dlpar_cpu_add_by_count(u32 cpus_to_add) 723 { 724 struct device_node *parent; 725 struct property *info; 726 u32 *cpu_drcs; 727 int cpus_added = 0; 728 int cpus_found; 729 int i, rc; 730 731 pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add); 732 733 cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL); 734 if (!cpu_drcs) 735 return -EINVAL; 736 737 parent = of_find_node_by_path("/cpus"); 738 if (!parent) { 739 pr_warn("Could not find CPU root node in device tree\n"); 740 return -1; 741 } 742 743 info = of_find_property(parent, "ibm,drc-info", NULL); 744 if (info) 745 cpus_found = find_drc_info_cpus_to_add(parent, info, cpu_drcs, cpus_to_add); 746 else 747 cpus_found = find_drc_index_cpus_to_add(parent, cpu_drcs, cpus_to_add); 748 749 of_node_put(parent); 750 751 if (cpus_found < cpus_to_add) { 752 pr_warn("Failed to find enough CPUs (%d of %d) to add\n", 753 cpus_found, cpus_to_add); 754 kfree(cpu_drcs); 755 return -EINVAL; 756 } 757 758 for (i = 0; i < cpus_to_add; i++) { 759 rc = dlpar_cpu_add(cpu_drcs[i]); 760 if (rc) 761 break; 762 763 cpus_added++; 764 } 765 766 if (cpus_added < cpus_to_add) { 767 pr_warn("CPU hot-add failed, removing any added CPUs\n"); 768 769 for (i = 0; i < cpus_added; i++) 770 dlpar_cpu_remove_by_index(cpu_drcs[i]); 771 772 rc = -EINVAL; 773 } else { 774 rc = 0; 775 } 776 777 kfree(cpu_drcs); 778 return rc; 779 } 780 781 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) 782 { 783 u32 count, drc_index; 784 int rc; 785 786 count = hp_elog->_drc_u.drc_count; 787 drc_index = hp_elog->_drc_u.drc_index; 788 789 lock_device_hotplug(); 790 791 switch (hp_elog->action) { 792 case PSERIES_HP_ELOG_ACTION_REMOVE: 793 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 794 rc = dlpar_cpu_remove_by_count(count); 795 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 796 rc = dlpar_cpu_remove_by_index(drc_index); 797 else 798 rc = -EINVAL; 799 break; 800 case PSERIES_HP_ELOG_ACTION_ADD: 801 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 802 rc = dlpar_cpu_add_by_count(count); 803 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 804 rc = dlpar_cpu_add(drc_index); 805 else 806 rc = -EINVAL; 807 break; 808 default: 809 pr_err("Invalid action (%d) specified\n", hp_elog->action); 810 rc = -EINVAL; 811 break; 812 } 813 814 unlock_device_hotplug(); 815 return rc; 816 } 817 818 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 819 820 static ssize_t dlpar_cpu_probe(const char *buf, size_t count) 821 { 822 u32 drc_index; 823 int rc; 824 825 rc = kstrtou32(buf, 0, &drc_index); 826 if (rc) 827 return -EINVAL; 828 829 rc = dlpar_cpu_add(drc_index); 830 831 return rc ? rc : count; 832 } 833 834 static ssize_t dlpar_cpu_release(const char *buf, size_t count) 835 { 836 struct device_node *dn; 837 u32 drc_index; 838 int rc; 839 840 dn = of_find_node_by_path(buf); 841 if (!dn) 842 return -EINVAL; 843 844 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); 845 if (rc) { 846 of_node_put(dn); 847 return -EINVAL; 848 } 849 850 rc = dlpar_cpu_remove(dn, drc_index); 851 of_node_put(dn); 852 853 return rc ? rc : count; 854 } 855 856 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 857 858 static int pseries_smp_notifier(struct notifier_block *nb, 859 unsigned long action, void *data) 860 { 861 struct of_reconfig_data *rd = data; 862 int err = 0; 863 864 switch (action) { 865 case OF_RECONFIG_ATTACH_NODE: 866 err = pseries_add_processor(rd->dn); 867 break; 868 case OF_RECONFIG_DETACH_NODE: 869 pseries_remove_processor(rd->dn); 870 break; 871 } 872 return notifier_from_errno(err); 873 } 874 875 static struct notifier_block pseries_smp_nb = { 876 .notifier_call = pseries_smp_notifier, 877 }; 878 879 static int __init pseries_cpu_hotplug_init(void) 880 { 881 int qcss_tok; 882 883 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 884 ppc_md.cpu_probe = dlpar_cpu_probe; 885 ppc_md.cpu_release = dlpar_cpu_release; 886 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 887 888 rtas_stop_self_token = rtas_token("stop-self"); 889 qcss_tok = rtas_token("query-cpu-stopped-state"); 890 891 if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || 892 qcss_tok == RTAS_UNKNOWN_SERVICE) { 893 printk(KERN_INFO "CPU Hotplug not supported by firmware " 894 "- disabling.\n"); 895 return 0; 896 } 897 898 ppc_md.cpu_die = pseries_mach_cpu_die; 899 smp_ops->cpu_disable = pseries_cpu_disable; 900 smp_ops->cpu_die = pseries_cpu_die; 901 902 /* Processors can be added/removed only on LPAR */ 903 if (firmware_has_feature(FW_FEATURE_LPAR)) 904 of_reconfig_notifier_register(&pseries_smp_nb); 905 906 return 0; 907 } 908 machine_arch_initcall(pseries, pseries_cpu_hotplug_init); 909