1 /* 2 * pseries CPU Hotplug infrastructure. 3 * 4 * Split out from arch/powerpc/platforms/pseries/setup.c 5 * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c 6 * 7 * Peter Bergner, IBM March 2001. 8 * Copyright (C) 2001 IBM. 9 * Dave Engebretsen, Peter Bergner, and 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 11 * Plus various changes from other IBM teams... 12 * 13 * Copyright (C) 2006 Michael Ellerman, IBM Corporation 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 */ 20 21 #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt 22 23 #include <linux/kernel.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/sched.h> /* for idle_task_exit */ 27 #include <linux/sched/hotplug.h> 28 #include <linux/cpu.h> 29 #include <linux/of.h> 30 #include <linux/slab.h> 31 #include <asm/prom.h> 32 #include <asm/rtas.h> 33 #include <asm/firmware.h> 34 #include <asm/machdep.h> 35 #include <asm/vdso_datapage.h> 36 #include <asm/xics.h> 37 #include <asm/xive.h> 38 #include <asm/plpar_wrappers.h> 39 40 #include "pseries.h" 41 #include "offline_states.h" 42 43 /* This version can't take the spinlock, because it never returns */ 44 static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; 45 46 static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = 47 CPU_STATE_OFFLINE; 48 static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE; 49 50 static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE; 51 52 static bool cede_offline_enabled __read_mostly = true; 53 54 /* 55 * Enable/disable cede_offline when available. 56 */ 57 static int __init setup_cede_offline(char *str) 58 { 59 return (kstrtobool(str, &cede_offline_enabled) == 0); 60 } 61 62 __setup("cede_offline=", setup_cede_offline); 63 64 enum cpu_state_vals get_cpu_current_state(int cpu) 65 { 66 return per_cpu(current_state, cpu); 67 } 68 69 void set_cpu_current_state(int cpu, enum cpu_state_vals state) 70 { 71 per_cpu(current_state, cpu) = state; 72 } 73 74 enum cpu_state_vals get_preferred_offline_state(int cpu) 75 { 76 return per_cpu(preferred_offline_state, cpu); 77 } 78 79 void set_preferred_offline_state(int cpu, enum cpu_state_vals state) 80 { 81 per_cpu(preferred_offline_state, cpu) = state; 82 } 83 84 void set_default_offline_state(int cpu) 85 { 86 per_cpu(preferred_offline_state, cpu) = default_offline_state; 87 } 88 89 static void rtas_stop_self(void) 90 { 91 static struct rtas_args args; 92 93 local_irq_disable(); 94 95 BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); 96 97 printk("cpu %u (hwid %u) Ready to die...\n", 98 smp_processor_id(), hard_smp_processor_id()); 99 100 rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL); 101 102 panic("Alas, I survived.\n"); 103 } 104 105 static void pseries_mach_cpu_die(void) 106 { 107 unsigned int cpu = smp_processor_id(); 108 unsigned int hwcpu = hard_smp_processor_id(); 109 u8 cede_latency_hint = 0; 110 111 local_irq_disable(); 112 idle_task_exit(); 113 if (xive_enabled()) 114 xive_teardown_cpu(); 115 else 116 xics_teardown_cpu(); 117 118 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 119 set_cpu_current_state(cpu, CPU_STATE_INACTIVE); 120 if (ppc_md.suspend_disable_cpu) 121 ppc_md.suspend_disable_cpu(); 122 123 cede_latency_hint = 2; 124 125 get_lppaca()->idle = 1; 126 if (!lppaca_shared_proc(get_lppaca())) 127 get_lppaca()->donate_dedicated_cpu = 1; 128 129 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 130 while (!prep_irq_for_idle()) { 131 local_irq_enable(); 132 local_irq_disable(); 133 } 134 135 extended_cede_processor(cede_latency_hint); 136 } 137 138 local_irq_disable(); 139 140 if (!lppaca_shared_proc(get_lppaca())) 141 get_lppaca()->donate_dedicated_cpu = 0; 142 get_lppaca()->idle = 0; 143 144 if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { 145 unregister_slb_shadow(hwcpu); 146 147 hard_irq_disable(); 148 /* 149 * Call to start_secondary_resume() will not return. 150 * Kernel stack will be reset and start_secondary() 151 * will be called to continue the online operation. 152 */ 153 start_secondary_resume(); 154 } 155 } 156 157 /* Requested state is CPU_STATE_OFFLINE at this point */ 158 WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); 159 160 set_cpu_current_state(cpu, CPU_STATE_OFFLINE); 161 unregister_slb_shadow(hwcpu); 162 rtas_stop_self(); 163 164 /* Should never get here... */ 165 BUG(); 166 for(;;); 167 } 168 169 static int pseries_cpu_disable(void) 170 { 171 int cpu = smp_processor_id(); 172 173 set_cpu_online(cpu, false); 174 vdso_data->processorCount--; 175 176 /*fix boot_cpuid here*/ 177 if (cpu == boot_cpuid) 178 boot_cpuid = cpumask_any(cpu_online_mask); 179 180 /* FIXME: abstract this to not be platform specific later on */ 181 if (xive_enabled()) 182 xive_smp_disable_cpu(); 183 else 184 xics_migrate_irqs_away(); 185 return 0; 186 } 187 188 /* 189 * pseries_cpu_die: Wait for the cpu to die. 190 * @cpu: logical processor id of the CPU whose death we're awaiting. 191 * 192 * This function is called from the context of the thread which is performing 193 * the cpu-offline. Here we wait for long enough to allow the cpu in question 194 * to self-destroy so that the cpu-offline thread can send the CPU_DEAD 195 * notifications. 196 * 197 * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to 198 * self-destruct. 199 */ 200 static void pseries_cpu_die(unsigned int cpu) 201 { 202 int tries; 203 int cpu_status = 1; 204 unsigned int pcpu = get_hard_smp_processor_id(cpu); 205 206 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 207 cpu_status = 1; 208 for (tries = 0; tries < 5000; tries++) { 209 if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) { 210 cpu_status = 0; 211 break; 212 } 213 msleep(1); 214 } 215 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { 216 217 for (tries = 0; tries < 25; tries++) { 218 cpu_status = smp_query_cpu_stopped(pcpu); 219 if (cpu_status == QCSS_STOPPED || 220 cpu_status == QCSS_HARDWARE_ERROR) 221 break; 222 cpu_relax(); 223 } 224 } 225 226 if (cpu_status != 0) { 227 printk("Querying DEAD? cpu %i (%i) shows %i\n", 228 cpu, pcpu, cpu_status); 229 } 230 231 /* Isolation and deallocation are definitely done by 232 * drslot_chrp_cpu. If they were not they would be 233 * done here. Change isolate state to Isolate and 234 * change allocation-state to Unusable. 235 */ 236 paca[cpu].cpu_start = 0; 237 } 238 239 /* 240 * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle 241 * here is that a cpu device node may represent up to two logical cpus 242 * in the SMT case. We must honor the assumption in other code that 243 * the logical ids for sibling SMT threads x and y are adjacent, such 244 * that x^1 == y and y^1 == x. 245 */ 246 static int pseries_add_processor(struct device_node *np) 247 { 248 unsigned int cpu; 249 cpumask_var_t candidate_mask, tmp; 250 int err = -ENOSPC, len, nthreads, i; 251 const __be32 *intserv; 252 253 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 254 if (!intserv) 255 return 0; 256 257 zalloc_cpumask_var(&candidate_mask, GFP_KERNEL); 258 zalloc_cpumask_var(&tmp, GFP_KERNEL); 259 260 nthreads = len / sizeof(u32); 261 for (i = 0; i < nthreads; i++) 262 cpumask_set_cpu(i, tmp); 263 264 cpu_maps_update_begin(); 265 266 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask)); 267 268 /* Get a bitmap of unoccupied slots. */ 269 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask); 270 if (cpumask_empty(candidate_mask)) { 271 /* If we get here, it most likely means that NR_CPUS is 272 * less than the partition's max processors setting. 273 */ 274 printk(KERN_ERR "Cannot add cpu %pOF; this system configuration" 275 " supports %d logical cpus.\n", np, 276 num_possible_cpus()); 277 goto out_unlock; 278 } 279 280 while (!cpumask_empty(tmp)) 281 if (cpumask_subset(tmp, candidate_mask)) 282 /* Found a range where we can insert the new cpu(s) */ 283 break; 284 else 285 cpumask_shift_left(tmp, tmp, nthreads); 286 287 if (cpumask_empty(tmp)) { 288 printk(KERN_ERR "Unable to find space in cpu_present_mask for" 289 " processor %s with %d thread(s)\n", np->name, 290 nthreads); 291 goto out_unlock; 292 } 293 294 for_each_cpu(cpu, tmp) { 295 BUG_ON(cpu_present(cpu)); 296 set_cpu_present(cpu, true); 297 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++)); 298 } 299 err = 0; 300 out_unlock: 301 cpu_maps_update_done(); 302 free_cpumask_var(candidate_mask); 303 free_cpumask_var(tmp); 304 return err; 305 } 306 307 /* 308 * Update the present map for a cpu node which is going away, and set 309 * the hard id in the paca(s) to -1 to be consistent with boot time 310 * convention for non-present cpus. 311 */ 312 static void pseries_remove_processor(struct device_node *np) 313 { 314 unsigned int cpu; 315 int len, nthreads, i; 316 const __be32 *intserv; 317 u32 thread; 318 319 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 320 if (!intserv) 321 return; 322 323 nthreads = len / sizeof(u32); 324 325 cpu_maps_update_begin(); 326 for (i = 0; i < nthreads; i++) { 327 thread = be32_to_cpu(intserv[i]); 328 for_each_present_cpu(cpu) { 329 if (get_hard_smp_processor_id(cpu) != thread) 330 continue; 331 BUG_ON(cpu_online(cpu)); 332 set_cpu_present(cpu, false); 333 set_hard_smp_processor_id(cpu, -1); 334 break; 335 } 336 if (cpu >= nr_cpu_ids) 337 printk(KERN_WARNING "Could not find cpu to remove " 338 "with physical id 0x%x\n", thread); 339 } 340 cpu_maps_update_done(); 341 } 342 343 static int dlpar_online_cpu(struct device_node *dn) 344 { 345 int rc = 0; 346 unsigned int cpu; 347 int len, nthreads, i; 348 const __be32 *intserv; 349 u32 thread; 350 351 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 352 if (!intserv) 353 return -EINVAL; 354 355 nthreads = len / sizeof(u32); 356 357 cpu_maps_update_begin(); 358 for (i = 0; i < nthreads; i++) { 359 thread = be32_to_cpu(intserv[i]); 360 for_each_present_cpu(cpu) { 361 if (get_hard_smp_processor_id(cpu) != thread) 362 continue; 363 BUG_ON(get_cpu_current_state(cpu) 364 != CPU_STATE_OFFLINE); 365 cpu_maps_update_done(); 366 rc = device_online(get_cpu_device(cpu)); 367 if (rc) 368 goto out; 369 cpu_maps_update_begin(); 370 371 break; 372 } 373 if (cpu == num_possible_cpus()) 374 printk(KERN_WARNING "Could not find cpu to online " 375 "with physical id 0x%x\n", thread); 376 } 377 cpu_maps_update_done(); 378 379 out: 380 return rc; 381 382 } 383 384 static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index) 385 { 386 struct device_node *child = NULL; 387 u32 my_drc_index; 388 bool found; 389 int rc; 390 391 /* Assume cpu doesn't exist */ 392 found = false; 393 394 for_each_child_of_node(parent, child) { 395 rc = of_property_read_u32(child, "ibm,my-drc-index", 396 &my_drc_index); 397 if (rc) 398 continue; 399 400 if (my_drc_index == drc_index) { 401 of_node_put(child); 402 found = true; 403 break; 404 } 405 } 406 407 return found; 408 } 409 410 static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index) 411 { 412 bool found = false; 413 int rc, index; 414 415 index = 0; 416 while (!found) { 417 u32 drc; 418 419 rc = of_property_read_u32_index(parent, "ibm,drc-indexes", 420 index++, &drc); 421 if (rc) 422 break; 423 424 if (drc == drc_index) 425 found = true; 426 } 427 428 return found; 429 } 430 431 static ssize_t dlpar_cpu_add(u32 drc_index) 432 { 433 struct device_node *dn, *parent; 434 int rc, saved_rc; 435 436 pr_debug("Attempting to add CPU, drc index: %x\n", drc_index); 437 438 parent = of_find_node_by_path("/cpus"); 439 if (!parent) { 440 pr_warn("Failed to find CPU root node \"/cpus\"\n"); 441 return -ENODEV; 442 } 443 444 if (dlpar_cpu_exists(parent, drc_index)) { 445 of_node_put(parent); 446 pr_warn("CPU with drc index %x already exists\n", drc_index); 447 return -EINVAL; 448 } 449 450 if (!valid_cpu_drc_index(parent, drc_index)) { 451 of_node_put(parent); 452 pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index); 453 return -EINVAL; 454 } 455 456 rc = dlpar_acquire_drc(drc_index); 457 if (rc) { 458 pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n", 459 rc, drc_index); 460 of_node_put(parent); 461 return -EINVAL; 462 } 463 464 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); 465 of_node_put(parent); 466 if (!dn) { 467 pr_warn("Failed call to configure-connector, drc index: %x\n", 468 drc_index); 469 dlpar_release_drc(drc_index); 470 return -EINVAL; 471 } 472 473 rc = dlpar_attach_node(dn, parent); 474 if (rc) { 475 saved_rc = rc; 476 pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n", 477 dn->name, rc, drc_index); 478 479 rc = dlpar_release_drc(drc_index); 480 if (!rc) 481 dlpar_free_cc_nodes(dn); 482 483 return saved_rc; 484 } 485 486 rc = dlpar_online_cpu(dn); 487 if (rc) { 488 saved_rc = rc; 489 pr_warn("Failed to online cpu %s, rc: %d, drc index: %x\n", 490 dn->name, rc, drc_index); 491 492 rc = dlpar_detach_node(dn); 493 if (!rc) 494 dlpar_release_drc(drc_index); 495 496 return saved_rc; 497 } 498 499 pr_debug("Successfully added CPU %s, drc index: %x\n", dn->name, 500 drc_index); 501 return rc; 502 } 503 504 static int dlpar_offline_cpu(struct device_node *dn) 505 { 506 int rc = 0; 507 unsigned int cpu; 508 int len, nthreads, i; 509 const __be32 *intserv; 510 u32 thread; 511 512 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 513 if (!intserv) 514 return -EINVAL; 515 516 nthreads = len / sizeof(u32); 517 518 cpu_maps_update_begin(); 519 for (i = 0; i < nthreads; i++) { 520 thread = be32_to_cpu(intserv[i]); 521 for_each_present_cpu(cpu) { 522 if (get_hard_smp_processor_id(cpu) != thread) 523 continue; 524 525 if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) 526 break; 527 528 if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { 529 set_preferred_offline_state(cpu, 530 CPU_STATE_OFFLINE); 531 cpu_maps_update_done(); 532 rc = device_offline(get_cpu_device(cpu)); 533 if (rc) 534 goto out; 535 cpu_maps_update_begin(); 536 break; 537 538 } 539 540 /* 541 * The cpu is in CPU_STATE_INACTIVE. 542 * Upgrade it's state to CPU_STATE_OFFLINE. 543 */ 544 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); 545 BUG_ON(plpar_hcall_norets(H_PROD, thread) 546 != H_SUCCESS); 547 __cpu_die(cpu); 548 break; 549 } 550 if (cpu == num_possible_cpus()) 551 printk(KERN_WARNING "Could not find cpu to offline with physical id 0x%x\n", thread); 552 } 553 cpu_maps_update_done(); 554 555 out: 556 return rc; 557 558 } 559 560 static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index) 561 { 562 int rc; 563 564 pr_debug("Attempting to remove CPU %s, drc index: %x\n", 565 dn->name, drc_index); 566 567 rc = dlpar_offline_cpu(dn); 568 if (rc) { 569 pr_warn("Failed to offline CPU %s, rc: %d\n", dn->name, rc); 570 return -EINVAL; 571 } 572 573 rc = dlpar_release_drc(drc_index); 574 if (rc) { 575 pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n", 576 drc_index, dn->name, rc); 577 dlpar_online_cpu(dn); 578 return rc; 579 } 580 581 rc = dlpar_detach_node(dn); 582 if (rc) { 583 int saved_rc = rc; 584 585 pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc); 586 587 rc = dlpar_acquire_drc(drc_index); 588 if (!rc) 589 dlpar_online_cpu(dn); 590 591 return saved_rc; 592 } 593 594 pr_debug("Successfully removed CPU, drc index: %x\n", drc_index); 595 return 0; 596 } 597 598 static struct device_node *cpu_drc_index_to_dn(u32 drc_index) 599 { 600 struct device_node *dn; 601 u32 my_index; 602 int rc; 603 604 for_each_node_by_type(dn, "cpu") { 605 rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index); 606 if (rc) 607 continue; 608 609 if (my_index == drc_index) 610 break; 611 } 612 613 return dn; 614 } 615 616 static int dlpar_cpu_remove_by_index(u32 drc_index) 617 { 618 struct device_node *dn; 619 int rc; 620 621 dn = cpu_drc_index_to_dn(drc_index); 622 if (!dn) { 623 pr_warn("Cannot find CPU (drc index %x) to remove\n", 624 drc_index); 625 return -ENODEV; 626 } 627 628 rc = dlpar_cpu_remove(dn, drc_index); 629 of_node_put(dn); 630 return rc; 631 } 632 633 static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove) 634 { 635 struct device_node *dn; 636 int cpus_found = 0; 637 int rc; 638 639 /* We want to find cpus_to_remove + 1 CPUs to ensure we do not 640 * remove the last CPU. 641 */ 642 for_each_node_by_type(dn, "cpu") { 643 cpus_found++; 644 645 if (cpus_found > cpus_to_remove) { 646 of_node_put(dn); 647 break; 648 } 649 650 /* Note that cpus_found is always 1 ahead of the index 651 * into the cpu_drcs array, so we use cpus_found - 1 652 */ 653 rc = of_property_read_u32(dn, "ibm,my-drc-index", 654 &cpu_drcs[cpus_found - 1]); 655 if (rc) { 656 pr_warn("Error occurred getting drc-index for %s\n", 657 dn->name); 658 of_node_put(dn); 659 return -1; 660 } 661 } 662 663 if (cpus_found < cpus_to_remove) { 664 pr_warn("Failed to find enough CPUs (%d of %d) to remove\n", 665 cpus_found, cpus_to_remove); 666 } else if (cpus_found == cpus_to_remove) { 667 pr_warn("Cannot remove all CPUs\n"); 668 } 669 670 return cpus_found; 671 } 672 673 static int dlpar_cpu_remove_by_count(u32 cpus_to_remove) 674 { 675 u32 *cpu_drcs; 676 int cpus_found; 677 int cpus_removed = 0; 678 int i, rc; 679 680 pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove); 681 682 cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL); 683 if (!cpu_drcs) 684 return -EINVAL; 685 686 cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove); 687 if (cpus_found <= cpus_to_remove) { 688 kfree(cpu_drcs); 689 return -EINVAL; 690 } 691 692 for (i = 0; i < cpus_to_remove; i++) { 693 rc = dlpar_cpu_remove_by_index(cpu_drcs[i]); 694 if (rc) 695 break; 696 697 cpus_removed++; 698 } 699 700 if (cpus_removed != cpus_to_remove) { 701 pr_warn("CPU hot-remove failed, adding back removed CPUs\n"); 702 703 for (i = 0; i < cpus_removed; i++) 704 dlpar_cpu_add(cpu_drcs[i]); 705 706 rc = -EINVAL; 707 } else { 708 rc = 0; 709 } 710 711 kfree(cpu_drcs); 712 return rc; 713 } 714 715 static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add) 716 { 717 struct device_node *parent; 718 int cpus_found = 0; 719 int index, rc; 720 721 parent = of_find_node_by_path("/cpus"); 722 if (!parent) { 723 pr_warn("Could not find CPU root node in device tree\n"); 724 kfree(cpu_drcs); 725 return -1; 726 } 727 728 /* Search the ibm,drc-indexes array for possible CPU drcs to 729 * add. Note that the format of the ibm,drc-indexes array is 730 * the number of entries in the array followed by the array 731 * of drc values so we start looking at index = 1. 732 */ 733 index = 1; 734 while (cpus_found < cpus_to_add) { 735 u32 drc; 736 737 rc = of_property_read_u32_index(parent, "ibm,drc-indexes", 738 index++, &drc); 739 if (rc) 740 break; 741 742 if (dlpar_cpu_exists(parent, drc)) 743 continue; 744 745 cpu_drcs[cpus_found++] = drc; 746 } 747 748 of_node_put(parent); 749 return cpus_found; 750 } 751 752 static int dlpar_cpu_add_by_count(u32 cpus_to_add) 753 { 754 u32 *cpu_drcs; 755 int cpus_added = 0; 756 int cpus_found; 757 int i, rc; 758 759 pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add); 760 761 cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL); 762 if (!cpu_drcs) 763 return -EINVAL; 764 765 cpus_found = find_dlpar_cpus_to_add(cpu_drcs, cpus_to_add); 766 if (cpus_found < cpus_to_add) { 767 pr_warn("Failed to find enough CPUs (%d of %d) to add\n", 768 cpus_found, cpus_to_add); 769 kfree(cpu_drcs); 770 return -EINVAL; 771 } 772 773 for (i = 0; i < cpus_to_add; i++) { 774 rc = dlpar_cpu_add(cpu_drcs[i]); 775 if (rc) 776 break; 777 778 cpus_added++; 779 } 780 781 if (cpus_added < cpus_to_add) { 782 pr_warn("CPU hot-add failed, removing any added CPUs\n"); 783 784 for (i = 0; i < cpus_added; i++) 785 dlpar_cpu_remove_by_index(cpu_drcs[i]); 786 787 rc = -EINVAL; 788 } else { 789 rc = 0; 790 } 791 792 kfree(cpu_drcs); 793 return rc; 794 } 795 796 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) 797 { 798 u32 count, drc_index; 799 int rc; 800 801 count = hp_elog->_drc_u.drc_count; 802 drc_index = hp_elog->_drc_u.drc_index; 803 804 lock_device_hotplug(); 805 806 switch (hp_elog->action) { 807 case PSERIES_HP_ELOG_ACTION_REMOVE: 808 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 809 rc = dlpar_cpu_remove_by_count(count); 810 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 811 rc = dlpar_cpu_remove_by_index(drc_index); 812 else 813 rc = -EINVAL; 814 break; 815 case PSERIES_HP_ELOG_ACTION_ADD: 816 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 817 rc = dlpar_cpu_add_by_count(count); 818 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 819 rc = dlpar_cpu_add(drc_index); 820 else 821 rc = -EINVAL; 822 break; 823 default: 824 pr_err("Invalid action (%d) specified\n", hp_elog->action); 825 rc = -EINVAL; 826 break; 827 } 828 829 unlock_device_hotplug(); 830 return rc; 831 } 832 833 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 834 835 static ssize_t dlpar_cpu_probe(const char *buf, size_t count) 836 { 837 u32 drc_index; 838 int rc; 839 840 rc = kstrtou32(buf, 0, &drc_index); 841 if (rc) 842 return -EINVAL; 843 844 rc = dlpar_cpu_add(drc_index); 845 846 return rc ? rc : count; 847 } 848 849 static ssize_t dlpar_cpu_release(const char *buf, size_t count) 850 { 851 struct device_node *dn; 852 u32 drc_index; 853 int rc; 854 855 dn = of_find_node_by_path(buf); 856 if (!dn) 857 return -EINVAL; 858 859 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); 860 if (rc) { 861 of_node_put(dn); 862 return -EINVAL; 863 } 864 865 rc = dlpar_cpu_remove(dn, drc_index); 866 of_node_put(dn); 867 868 return rc ? rc : count; 869 } 870 871 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 872 873 static int pseries_smp_notifier(struct notifier_block *nb, 874 unsigned long action, void *data) 875 { 876 struct of_reconfig_data *rd = data; 877 int err = 0; 878 879 switch (action) { 880 case OF_RECONFIG_ATTACH_NODE: 881 err = pseries_add_processor(rd->dn); 882 break; 883 case OF_RECONFIG_DETACH_NODE: 884 pseries_remove_processor(rd->dn); 885 break; 886 } 887 return notifier_from_errno(err); 888 } 889 890 static struct notifier_block pseries_smp_nb = { 891 .notifier_call = pseries_smp_notifier, 892 }; 893 894 #define MAX_CEDE_LATENCY_LEVELS 4 895 #define CEDE_LATENCY_PARAM_LENGTH 10 896 #define CEDE_LATENCY_PARAM_MAX_LENGTH \ 897 (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char)) 898 #define CEDE_LATENCY_TOKEN 45 899 900 static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH]; 901 902 static int parse_cede_parameters(void) 903 { 904 memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH); 905 return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, 906 NULL, 907 CEDE_LATENCY_TOKEN, 908 __pa(cede_parameters), 909 CEDE_LATENCY_PARAM_MAX_LENGTH); 910 } 911 912 static int __init pseries_cpu_hotplug_init(void) 913 { 914 int cpu; 915 int qcss_tok; 916 917 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 918 ppc_md.cpu_probe = dlpar_cpu_probe; 919 ppc_md.cpu_release = dlpar_cpu_release; 920 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 921 922 rtas_stop_self_token = rtas_token("stop-self"); 923 qcss_tok = rtas_token("query-cpu-stopped-state"); 924 925 if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || 926 qcss_tok == RTAS_UNKNOWN_SERVICE) { 927 printk(KERN_INFO "CPU Hotplug not supported by firmware " 928 "- disabling.\n"); 929 return 0; 930 } 931 932 ppc_md.cpu_die = pseries_mach_cpu_die; 933 smp_ops->cpu_disable = pseries_cpu_disable; 934 smp_ops->cpu_die = pseries_cpu_die; 935 936 /* Processors can be added/removed only on LPAR */ 937 if (firmware_has_feature(FW_FEATURE_LPAR)) { 938 of_reconfig_notifier_register(&pseries_smp_nb); 939 cpu_maps_update_begin(); 940 if (cede_offline_enabled && parse_cede_parameters() == 0) { 941 default_offline_state = CPU_STATE_INACTIVE; 942 for_each_online_cpu(cpu) 943 set_default_offline_state(cpu); 944 } 945 cpu_maps_update_done(); 946 } 947 948 return 0; 949 } 950 machine_arch_initcall(pseries, pseries_cpu_hotplug_init); 951