1 /* 2 * pseries CPU Hotplug infrastructure. 3 * 4 * Split out from arch/powerpc/platforms/pseries/setup.c 5 * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c 6 * 7 * Peter Bergner, IBM March 2001. 8 * Copyright (C) 2001 IBM. 9 * Dave Engebretsen, Peter Bergner, and 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 11 * Plus various changes from other IBM teams... 12 * 13 * Copyright (C) 2006 Michael Ellerman, IBM Corporation 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 */ 20 21 #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt 22 23 #include <linux/kernel.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/sched.h> /* for idle_task_exit */ 27 #include <linux/sched/hotplug.h> 28 #include <linux/cpu.h> 29 #include <linux/of.h> 30 #include <linux/slab.h> 31 #include <asm/prom.h> 32 #include <asm/rtas.h> 33 #include <asm/firmware.h> 34 #include <asm/machdep.h> 35 #include <asm/vdso_datapage.h> 36 #include <asm/xics.h> 37 #include <asm/xive.h> 38 #include <asm/plpar_wrappers.h> 39 #include <asm/topology.h> 40 41 #include "pseries.h" 42 #include "offline_states.h" 43 44 /* This version can't take the spinlock, because it never returns */ 45 static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; 46 47 static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = 48 CPU_STATE_OFFLINE; 49 static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE; 50 51 static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE; 52 53 static bool cede_offline_enabled __read_mostly = true; 54 55 /* 56 * Enable/disable cede_offline when available. 57 */ 58 static int __init setup_cede_offline(char *str) 59 { 60 return (kstrtobool(str, &cede_offline_enabled) == 0); 61 } 62 63 __setup("cede_offline=", setup_cede_offline); 64 65 enum cpu_state_vals get_cpu_current_state(int cpu) 66 { 67 return per_cpu(current_state, cpu); 68 } 69 70 void set_cpu_current_state(int cpu, enum cpu_state_vals state) 71 { 72 per_cpu(current_state, cpu) = state; 73 } 74 75 enum cpu_state_vals get_preferred_offline_state(int cpu) 76 { 77 return per_cpu(preferred_offline_state, cpu); 78 } 79 80 void set_preferred_offline_state(int cpu, enum cpu_state_vals state) 81 { 82 per_cpu(preferred_offline_state, cpu) = state; 83 } 84 85 void set_default_offline_state(int cpu) 86 { 87 per_cpu(preferred_offline_state, cpu) = default_offline_state; 88 } 89 90 static void rtas_stop_self(void) 91 { 92 static struct rtas_args args; 93 94 local_irq_disable(); 95 96 BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); 97 98 printk("cpu %u (hwid %u) Ready to die...\n", 99 smp_processor_id(), hard_smp_processor_id()); 100 101 rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL); 102 103 panic("Alas, I survived.\n"); 104 } 105 106 static void pseries_mach_cpu_die(void) 107 { 108 unsigned int cpu = smp_processor_id(); 109 unsigned int hwcpu = hard_smp_processor_id(); 110 u8 cede_latency_hint = 0; 111 112 local_irq_disable(); 113 idle_task_exit(); 114 if (xive_enabled()) 115 xive_teardown_cpu(); 116 else 117 xics_teardown_cpu(); 118 119 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 120 set_cpu_current_state(cpu, CPU_STATE_INACTIVE); 121 if (ppc_md.suspend_disable_cpu) 122 ppc_md.suspend_disable_cpu(); 123 124 cede_latency_hint = 2; 125 126 get_lppaca()->idle = 1; 127 if (!lppaca_shared_proc(get_lppaca())) 128 get_lppaca()->donate_dedicated_cpu = 1; 129 130 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 131 while (!prep_irq_for_idle()) { 132 local_irq_enable(); 133 local_irq_disable(); 134 } 135 136 extended_cede_processor(cede_latency_hint); 137 } 138 139 local_irq_disable(); 140 141 if (!lppaca_shared_proc(get_lppaca())) 142 get_lppaca()->donate_dedicated_cpu = 0; 143 get_lppaca()->idle = 0; 144 145 if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { 146 unregister_slb_shadow(hwcpu); 147 148 hard_irq_disable(); 149 /* 150 * Call to start_secondary_resume() will not return. 151 * Kernel stack will be reset and start_secondary() 152 * will be called to continue the online operation. 153 */ 154 start_secondary_resume(); 155 } 156 } 157 158 /* Requested state is CPU_STATE_OFFLINE at this point */ 159 WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); 160 161 set_cpu_current_state(cpu, CPU_STATE_OFFLINE); 162 unregister_slb_shadow(hwcpu); 163 rtas_stop_self(); 164 165 /* Should never get here... */ 166 BUG(); 167 for(;;); 168 } 169 170 static int pseries_cpu_disable(void) 171 { 172 int cpu = smp_processor_id(); 173 174 set_cpu_online(cpu, false); 175 vdso_data->processorCount--; 176 177 /*fix boot_cpuid here*/ 178 if (cpu == boot_cpuid) 179 boot_cpuid = cpumask_any(cpu_online_mask); 180 181 /* FIXME: abstract this to not be platform specific later on */ 182 if (xive_enabled()) 183 xive_smp_disable_cpu(); 184 else 185 xics_migrate_irqs_away(); 186 return 0; 187 } 188 189 /* 190 * pseries_cpu_die: Wait for the cpu to die. 191 * @cpu: logical processor id of the CPU whose death we're awaiting. 192 * 193 * This function is called from the context of the thread which is performing 194 * the cpu-offline. Here we wait for long enough to allow the cpu in question 195 * to self-destroy so that the cpu-offline thread can send the CPU_DEAD 196 * notifications. 197 * 198 * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to 199 * self-destruct. 200 */ 201 static void pseries_cpu_die(unsigned int cpu) 202 { 203 int tries; 204 int cpu_status = 1; 205 unsigned int pcpu = get_hard_smp_processor_id(cpu); 206 207 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 208 cpu_status = 1; 209 for (tries = 0; tries < 5000; tries++) { 210 if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) { 211 cpu_status = 0; 212 break; 213 } 214 msleep(1); 215 } 216 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { 217 218 for (tries = 0; tries < 25; tries++) { 219 cpu_status = smp_query_cpu_stopped(pcpu); 220 if (cpu_status == QCSS_STOPPED || 221 cpu_status == QCSS_HARDWARE_ERROR) 222 break; 223 cpu_relax(); 224 } 225 } 226 227 if (cpu_status != 0) { 228 printk("Querying DEAD? cpu %i (%i) shows %i\n", 229 cpu, pcpu, cpu_status); 230 } 231 232 /* Isolation and deallocation are definitely done by 233 * drslot_chrp_cpu. If they were not they would be 234 * done here. Change isolate state to Isolate and 235 * change allocation-state to Unusable. 236 */ 237 paca_ptrs[cpu]->cpu_start = 0; 238 } 239 240 /* 241 * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle 242 * here is that a cpu device node may represent up to two logical cpus 243 * in the SMT case. We must honor the assumption in other code that 244 * the logical ids for sibling SMT threads x and y are adjacent, such 245 * that x^1 == y and y^1 == x. 246 */ 247 static int pseries_add_processor(struct device_node *np) 248 { 249 unsigned int cpu; 250 cpumask_var_t candidate_mask, tmp; 251 int err = -ENOSPC, len, nthreads, i; 252 const __be32 *intserv; 253 254 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 255 if (!intserv) 256 return 0; 257 258 zalloc_cpumask_var(&candidate_mask, GFP_KERNEL); 259 zalloc_cpumask_var(&tmp, GFP_KERNEL); 260 261 nthreads = len / sizeof(u32); 262 for (i = 0; i < nthreads; i++) 263 cpumask_set_cpu(i, tmp); 264 265 cpu_maps_update_begin(); 266 267 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask)); 268 269 /* Get a bitmap of unoccupied slots. */ 270 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask); 271 if (cpumask_empty(candidate_mask)) { 272 /* If we get here, it most likely means that NR_CPUS is 273 * less than the partition's max processors setting. 274 */ 275 printk(KERN_ERR "Cannot add cpu %pOF; this system configuration" 276 " supports %d logical cpus.\n", np, 277 num_possible_cpus()); 278 goto out_unlock; 279 } 280 281 while (!cpumask_empty(tmp)) 282 if (cpumask_subset(tmp, candidate_mask)) 283 /* Found a range where we can insert the new cpu(s) */ 284 break; 285 else 286 cpumask_shift_left(tmp, tmp, nthreads); 287 288 if (cpumask_empty(tmp)) { 289 printk(KERN_ERR "Unable to find space in cpu_present_mask for" 290 " processor %pOFn with %d thread(s)\n", np, 291 nthreads); 292 goto out_unlock; 293 } 294 295 for_each_cpu(cpu, tmp) { 296 BUG_ON(cpu_present(cpu)); 297 set_cpu_present(cpu, true); 298 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++)); 299 } 300 err = 0; 301 out_unlock: 302 cpu_maps_update_done(); 303 free_cpumask_var(candidate_mask); 304 free_cpumask_var(tmp); 305 return err; 306 } 307 308 /* 309 * Update the present map for a cpu node which is going away, and set 310 * the hard id in the paca(s) to -1 to be consistent with boot time 311 * convention for non-present cpus. 312 */ 313 static void pseries_remove_processor(struct device_node *np) 314 { 315 unsigned int cpu; 316 int len, nthreads, i; 317 const __be32 *intserv; 318 u32 thread; 319 320 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 321 if (!intserv) 322 return; 323 324 nthreads = len / sizeof(u32); 325 326 cpu_maps_update_begin(); 327 for (i = 0; i < nthreads; i++) { 328 thread = be32_to_cpu(intserv[i]); 329 for_each_present_cpu(cpu) { 330 if (get_hard_smp_processor_id(cpu) != thread) 331 continue; 332 BUG_ON(cpu_online(cpu)); 333 set_cpu_present(cpu, false); 334 set_hard_smp_processor_id(cpu, -1); 335 update_numa_cpu_lookup_table(cpu, -1); 336 break; 337 } 338 if (cpu >= nr_cpu_ids) 339 printk(KERN_WARNING "Could not find cpu to remove " 340 "with physical id 0x%x\n", thread); 341 } 342 cpu_maps_update_done(); 343 } 344 345 static int dlpar_online_cpu(struct device_node *dn) 346 { 347 int rc = 0; 348 unsigned int cpu; 349 int len, nthreads, i; 350 const __be32 *intserv; 351 u32 thread; 352 353 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 354 if (!intserv) 355 return -EINVAL; 356 357 nthreads = len / sizeof(u32); 358 359 cpu_maps_update_begin(); 360 for (i = 0; i < nthreads; i++) { 361 thread = be32_to_cpu(intserv[i]); 362 for_each_present_cpu(cpu) { 363 if (get_hard_smp_processor_id(cpu) != thread) 364 continue; 365 BUG_ON(get_cpu_current_state(cpu) 366 != CPU_STATE_OFFLINE); 367 cpu_maps_update_done(); 368 timed_topology_update(1); 369 find_and_online_cpu_nid(cpu); 370 rc = device_online(get_cpu_device(cpu)); 371 if (rc) 372 goto out; 373 cpu_maps_update_begin(); 374 375 break; 376 } 377 if (cpu == num_possible_cpus()) 378 printk(KERN_WARNING "Could not find cpu to online " 379 "with physical id 0x%x\n", thread); 380 } 381 cpu_maps_update_done(); 382 383 out: 384 return rc; 385 386 } 387 388 static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index) 389 { 390 struct device_node *child = NULL; 391 u32 my_drc_index; 392 bool found; 393 int rc; 394 395 /* Assume cpu doesn't exist */ 396 found = false; 397 398 for_each_child_of_node(parent, child) { 399 rc = of_property_read_u32(child, "ibm,my-drc-index", 400 &my_drc_index); 401 if (rc) 402 continue; 403 404 if (my_drc_index == drc_index) { 405 of_node_put(child); 406 found = true; 407 break; 408 } 409 } 410 411 return found; 412 } 413 414 static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index) 415 { 416 bool found = false; 417 int rc, index; 418 419 index = 0; 420 while (!found) { 421 u32 drc; 422 423 rc = of_property_read_u32_index(parent, "ibm,drc-indexes", 424 index++, &drc); 425 if (rc) 426 break; 427 428 if (drc == drc_index) 429 found = true; 430 } 431 432 return found; 433 } 434 435 static ssize_t dlpar_cpu_add(u32 drc_index) 436 { 437 struct device_node *dn, *parent; 438 int rc, saved_rc; 439 440 pr_debug("Attempting to add CPU, drc index: %x\n", drc_index); 441 442 parent = of_find_node_by_path("/cpus"); 443 if (!parent) { 444 pr_warn("Failed to find CPU root node \"/cpus\"\n"); 445 return -ENODEV; 446 } 447 448 if (dlpar_cpu_exists(parent, drc_index)) { 449 of_node_put(parent); 450 pr_warn("CPU with drc index %x already exists\n", drc_index); 451 return -EINVAL; 452 } 453 454 if (!valid_cpu_drc_index(parent, drc_index)) { 455 of_node_put(parent); 456 pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index); 457 return -EINVAL; 458 } 459 460 rc = dlpar_acquire_drc(drc_index); 461 if (rc) { 462 pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n", 463 rc, drc_index); 464 of_node_put(parent); 465 return -EINVAL; 466 } 467 468 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); 469 if (!dn) { 470 pr_warn("Failed call to configure-connector, drc index: %x\n", 471 drc_index); 472 dlpar_release_drc(drc_index); 473 of_node_put(parent); 474 return -EINVAL; 475 } 476 477 rc = dlpar_attach_node(dn, parent); 478 479 /* Regardless we are done with parent now */ 480 of_node_put(parent); 481 482 if (rc) { 483 saved_rc = rc; 484 pr_warn("Failed to attach node %pOFn, rc: %d, drc index: %x\n", 485 dn, rc, drc_index); 486 487 rc = dlpar_release_drc(drc_index); 488 if (!rc) 489 dlpar_free_cc_nodes(dn); 490 491 return saved_rc; 492 } 493 494 rc = dlpar_online_cpu(dn); 495 if (rc) { 496 saved_rc = rc; 497 pr_warn("Failed to online cpu %pOFn, rc: %d, drc index: %x\n", 498 dn, rc, drc_index); 499 500 rc = dlpar_detach_node(dn); 501 if (!rc) 502 dlpar_release_drc(drc_index); 503 504 return saved_rc; 505 } 506 507 pr_debug("Successfully added CPU %pOFn, drc index: %x\n", dn, 508 drc_index); 509 return rc; 510 } 511 512 static int dlpar_offline_cpu(struct device_node *dn) 513 { 514 int rc = 0; 515 unsigned int cpu; 516 int len, nthreads, i; 517 const __be32 *intserv; 518 u32 thread; 519 520 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 521 if (!intserv) 522 return -EINVAL; 523 524 nthreads = len / sizeof(u32); 525 526 cpu_maps_update_begin(); 527 for (i = 0; i < nthreads; i++) { 528 thread = be32_to_cpu(intserv[i]); 529 for_each_present_cpu(cpu) { 530 if (get_hard_smp_processor_id(cpu) != thread) 531 continue; 532 533 if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) 534 break; 535 536 if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { 537 set_preferred_offline_state(cpu, 538 CPU_STATE_OFFLINE); 539 cpu_maps_update_done(); 540 timed_topology_update(1); 541 rc = device_offline(get_cpu_device(cpu)); 542 if (rc) 543 goto out; 544 cpu_maps_update_begin(); 545 break; 546 547 } 548 549 /* 550 * The cpu is in CPU_STATE_INACTIVE. 551 * Upgrade it's state to CPU_STATE_OFFLINE. 552 */ 553 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); 554 BUG_ON(plpar_hcall_norets(H_PROD, thread) 555 != H_SUCCESS); 556 __cpu_die(cpu); 557 break; 558 } 559 if (cpu == num_possible_cpus()) 560 printk(KERN_WARNING "Could not find cpu to offline with physical id 0x%x\n", thread); 561 } 562 cpu_maps_update_done(); 563 564 out: 565 return rc; 566 567 } 568 569 static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index) 570 { 571 int rc; 572 573 pr_debug("Attempting to remove CPU %pOFn, drc index: %x\n", 574 dn, drc_index); 575 576 rc = dlpar_offline_cpu(dn); 577 if (rc) { 578 pr_warn("Failed to offline CPU %pOFn, rc: %d\n", dn, rc); 579 return -EINVAL; 580 } 581 582 rc = dlpar_release_drc(drc_index); 583 if (rc) { 584 pr_warn("Failed to release drc (%x) for CPU %pOFn, rc: %d\n", 585 drc_index, dn, rc); 586 dlpar_online_cpu(dn); 587 return rc; 588 } 589 590 rc = dlpar_detach_node(dn); 591 if (rc) { 592 int saved_rc = rc; 593 594 pr_warn("Failed to detach CPU %pOFn, rc: %d", dn, rc); 595 596 rc = dlpar_acquire_drc(drc_index); 597 if (!rc) 598 dlpar_online_cpu(dn); 599 600 return saved_rc; 601 } 602 603 pr_debug("Successfully removed CPU, drc index: %x\n", drc_index); 604 return 0; 605 } 606 607 static struct device_node *cpu_drc_index_to_dn(u32 drc_index) 608 { 609 struct device_node *dn; 610 u32 my_index; 611 int rc; 612 613 for_each_node_by_type(dn, "cpu") { 614 rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index); 615 if (rc) 616 continue; 617 618 if (my_index == drc_index) 619 break; 620 } 621 622 return dn; 623 } 624 625 static int dlpar_cpu_remove_by_index(u32 drc_index) 626 { 627 struct device_node *dn; 628 int rc; 629 630 dn = cpu_drc_index_to_dn(drc_index); 631 if (!dn) { 632 pr_warn("Cannot find CPU (drc index %x) to remove\n", 633 drc_index); 634 return -ENODEV; 635 } 636 637 rc = dlpar_cpu_remove(dn, drc_index); 638 of_node_put(dn); 639 return rc; 640 } 641 642 static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove) 643 { 644 struct device_node *dn; 645 int cpus_found = 0; 646 int rc; 647 648 /* We want to find cpus_to_remove + 1 CPUs to ensure we do not 649 * remove the last CPU. 650 */ 651 for_each_node_by_type(dn, "cpu") { 652 cpus_found++; 653 654 if (cpus_found > cpus_to_remove) { 655 of_node_put(dn); 656 break; 657 } 658 659 /* Note that cpus_found is always 1 ahead of the index 660 * into the cpu_drcs array, so we use cpus_found - 1 661 */ 662 rc = of_property_read_u32(dn, "ibm,my-drc-index", 663 &cpu_drcs[cpus_found - 1]); 664 if (rc) { 665 pr_warn("Error occurred getting drc-index for %pOFn\n", 666 dn); 667 of_node_put(dn); 668 return -1; 669 } 670 } 671 672 if (cpus_found < cpus_to_remove) { 673 pr_warn("Failed to find enough CPUs (%d of %d) to remove\n", 674 cpus_found, cpus_to_remove); 675 } else if (cpus_found == cpus_to_remove) { 676 pr_warn("Cannot remove all CPUs\n"); 677 } 678 679 return cpus_found; 680 } 681 682 static int dlpar_cpu_remove_by_count(u32 cpus_to_remove) 683 { 684 u32 *cpu_drcs; 685 int cpus_found; 686 int cpus_removed = 0; 687 int i, rc; 688 689 pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove); 690 691 cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL); 692 if (!cpu_drcs) 693 return -EINVAL; 694 695 cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove); 696 if (cpus_found <= cpus_to_remove) { 697 kfree(cpu_drcs); 698 return -EINVAL; 699 } 700 701 for (i = 0; i < cpus_to_remove; i++) { 702 rc = dlpar_cpu_remove_by_index(cpu_drcs[i]); 703 if (rc) 704 break; 705 706 cpus_removed++; 707 } 708 709 if (cpus_removed != cpus_to_remove) { 710 pr_warn("CPU hot-remove failed, adding back removed CPUs\n"); 711 712 for (i = 0; i < cpus_removed; i++) 713 dlpar_cpu_add(cpu_drcs[i]); 714 715 rc = -EINVAL; 716 } else { 717 rc = 0; 718 } 719 720 kfree(cpu_drcs); 721 return rc; 722 } 723 724 static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add) 725 { 726 struct device_node *parent; 727 int cpus_found = 0; 728 int index, rc; 729 730 parent = of_find_node_by_path("/cpus"); 731 if (!parent) { 732 pr_warn("Could not find CPU root node in device tree\n"); 733 kfree(cpu_drcs); 734 return -1; 735 } 736 737 /* Search the ibm,drc-indexes array for possible CPU drcs to 738 * add. Note that the format of the ibm,drc-indexes array is 739 * the number of entries in the array followed by the array 740 * of drc values so we start looking at index = 1. 741 */ 742 index = 1; 743 while (cpus_found < cpus_to_add) { 744 u32 drc; 745 746 rc = of_property_read_u32_index(parent, "ibm,drc-indexes", 747 index++, &drc); 748 if (rc) 749 break; 750 751 if (dlpar_cpu_exists(parent, drc)) 752 continue; 753 754 cpu_drcs[cpus_found++] = drc; 755 } 756 757 of_node_put(parent); 758 return cpus_found; 759 } 760 761 static int dlpar_cpu_add_by_count(u32 cpus_to_add) 762 { 763 u32 *cpu_drcs; 764 int cpus_added = 0; 765 int cpus_found; 766 int i, rc; 767 768 pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add); 769 770 cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL); 771 if (!cpu_drcs) 772 return -EINVAL; 773 774 cpus_found = find_dlpar_cpus_to_add(cpu_drcs, cpus_to_add); 775 if (cpus_found < cpus_to_add) { 776 pr_warn("Failed to find enough CPUs (%d of %d) to add\n", 777 cpus_found, cpus_to_add); 778 kfree(cpu_drcs); 779 return -EINVAL; 780 } 781 782 for (i = 0; i < cpus_to_add; i++) { 783 rc = dlpar_cpu_add(cpu_drcs[i]); 784 if (rc) 785 break; 786 787 cpus_added++; 788 } 789 790 if (cpus_added < cpus_to_add) { 791 pr_warn("CPU hot-add failed, removing any added CPUs\n"); 792 793 for (i = 0; i < cpus_added; i++) 794 dlpar_cpu_remove_by_index(cpu_drcs[i]); 795 796 rc = -EINVAL; 797 } else { 798 rc = 0; 799 } 800 801 kfree(cpu_drcs); 802 return rc; 803 } 804 805 int dlpar_cpu_readd(int cpu) 806 { 807 struct device_node *dn; 808 struct device *dev; 809 u32 drc_index; 810 int rc; 811 812 dev = get_cpu_device(cpu); 813 dn = dev->of_node; 814 815 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); 816 817 rc = dlpar_cpu_remove_by_index(drc_index); 818 if (!rc) 819 rc = dlpar_cpu_add(drc_index); 820 821 return rc; 822 } 823 824 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) 825 { 826 u32 count, drc_index; 827 int rc; 828 829 count = hp_elog->_drc_u.drc_count; 830 drc_index = hp_elog->_drc_u.drc_index; 831 832 lock_device_hotplug(); 833 834 switch (hp_elog->action) { 835 case PSERIES_HP_ELOG_ACTION_REMOVE: 836 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 837 rc = dlpar_cpu_remove_by_count(count); 838 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 839 rc = dlpar_cpu_remove_by_index(drc_index); 840 else 841 rc = -EINVAL; 842 break; 843 case PSERIES_HP_ELOG_ACTION_ADD: 844 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 845 rc = dlpar_cpu_add_by_count(count); 846 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 847 rc = dlpar_cpu_add(drc_index); 848 else 849 rc = -EINVAL; 850 break; 851 default: 852 pr_err("Invalid action (%d) specified\n", hp_elog->action); 853 rc = -EINVAL; 854 break; 855 } 856 857 unlock_device_hotplug(); 858 return rc; 859 } 860 861 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 862 863 static ssize_t dlpar_cpu_probe(const char *buf, size_t count) 864 { 865 u32 drc_index; 866 int rc; 867 868 rc = kstrtou32(buf, 0, &drc_index); 869 if (rc) 870 return -EINVAL; 871 872 rc = dlpar_cpu_add(drc_index); 873 874 return rc ? rc : count; 875 } 876 877 static ssize_t dlpar_cpu_release(const char *buf, size_t count) 878 { 879 struct device_node *dn; 880 u32 drc_index; 881 int rc; 882 883 dn = of_find_node_by_path(buf); 884 if (!dn) 885 return -EINVAL; 886 887 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); 888 if (rc) { 889 of_node_put(dn); 890 return -EINVAL; 891 } 892 893 rc = dlpar_cpu_remove(dn, drc_index); 894 of_node_put(dn); 895 896 return rc ? rc : count; 897 } 898 899 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 900 901 static int pseries_smp_notifier(struct notifier_block *nb, 902 unsigned long action, void *data) 903 { 904 struct of_reconfig_data *rd = data; 905 int err = 0; 906 907 switch (action) { 908 case OF_RECONFIG_ATTACH_NODE: 909 err = pseries_add_processor(rd->dn); 910 break; 911 case OF_RECONFIG_DETACH_NODE: 912 pseries_remove_processor(rd->dn); 913 break; 914 } 915 return notifier_from_errno(err); 916 } 917 918 static struct notifier_block pseries_smp_nb = { 919 .notifier_call = pseries_smp_notifier, 920 }; 921 922 #define MAX_CEDE_LATENCY_LEVELS 4 923 #define CEDE_LATENCY_PARAM_LENGTH 10 924 #define CEDE_LATENCY_PARAM_MAX_LENGTH \ 925 (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char)) 926 #define CEDE_LATENCY_TOKEN 45 927 928 static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH]; 929 930 static int parse_cede_parameters(void) 931 { 932 memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH); 933 return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, 934 NULL, 935 CEDE_LATENCY_TOKEN, 936 __pa(cede_parameters), 937 CEDE_LATENCY_PARAM_MAX_LENGTH); 938 } 939 940 static int __init pseries_cpu_hotplug_init(void) 941 { 942 int cpu; 943 int qcss_tok; 944 945 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 946 ppc_md.cpu_probe = dlpar_cpu_probe; 947 ppc_md.cpu_release = dlpar_cpu_release; 948 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 949 950 rtas_stop_self_token = rtas_token("stop-self"); 951 qcss_tok = rtas_token("query-cpu-stopped-state"); 952 953 if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || 954 qcss_tok == RTAS_UNKNOWN_SERVICE) { 955 printk(KERN_INFO "CPU Hotplug not supported by firmware " 956 "- disabling.\n"); 957 return 0; 958 } 959 960 ppc_md.cpu_die = pseries_mach_cpu_die; 961 smp_ops->cpu_disable = pseries_cpu_disable; 962 smp_ops->cpu_die = pseries_cpu_die; 963 964 /* Processors can be added/removed only on LPAR */ 965 if (firmware_has_feature(FW_FEATURE_LPAR)) { 966 of_reconfig_notifier_register(&pseries_smp_nb); 967 cpu_maps_update_begin(); 968 if (cede_offline_enabled && parse_cede_parameters() == 0) { 969 default_offline_state = CPU_STATE_INACTIVE; 970 for_each_online_cpu(cpu) 971 set_default_offline_state(cpu); 972 } 973 cpu_maps_update_done(); 974 } 975 976 return 0; 977 } 978 machine_arch_initcall(pseries, pseries_cpu_hotplug_init); 979