1 /* 2 * SMP support for ppc. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great 5 * deal of code from the sparc and intel versions. 6 * 7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 8 * 9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 #undef DEBUG 19 20 #include <linux/kernel.h> 21 #include <linux/export.h> 22 #include <linux/sched.h> 23 #include <linux/smp.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/init.h> 27 #include <linux/spinlock.h> 28 #include <linux/cache.h> 29 #include <linux/err.h> 30 #include <linux/device.h> 31 #include <linux/cpu.h> 32 #include <linux/notifier.h> 33 #include <linux/topology.h> 34 35 #include <asm/ptrace.h> 36 #include <linux/atomic.h> 37 #include <asm/irq.h> 38 #include <asm/page.h> 39 #include <asm/pgtable.h> 40 #include <asm/prom.h> 41 #include <asm/smp.h> 42 #include <asm/time.h> 43 #include <asm/machdep.h> 44 #include <asm/cputhreads.h> 45 #include <asm/cputable.h> 46 #include <asm/mpic.h> 47 #include <asm/vdso_datapage.h> 48 #ifdef CONFIG_PPC64 49 #include <asm/paca.h> 50 #endif 51 #include <asm/vdso.h> 52 #include <asm/debug.h> 53 54 #ifdef DEBUG 55 #include <asm/udbg.h> 56 #define DBG(fmt...) udbg_printf(fmt) 57 #else 58 #define DBG(fmt...) 59 #endif 60 61 #ifdef CONFIG_HOTPLUG_CPU 62 /* State of each CPU during hotplug phases */ 63 static DEFINE_PER_CPU(int, cpu_state) = { 0 }; 64 #endif 65 66 struct thread_info *secondary_ti; 67 68 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 69 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); 70 71 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 72 EXPORT_PER_CPU_SYMBOL(cpu_core_map); 73 74 /* SMP operations for this machine */ 75 struct smp_ops_t *smp_ops; 76 77 /* Can't be static due to PowerMac hackery */ 78 volatile unsigned int cpu_callin_map[NR_CPUS]; 79 80 int smt_enabled_at_boot = 1; 81 82 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 83 84 /* 85 * Returns 1 if the specified cpu should be brought up during boot. 86 * Used to inhibit booting threads if they've been disabled or 87 * limited on the command line 88 */ 89 int smp_generic_cpu_bootable(unsigned int nr) 90 { 91 /* Special case - we inhibit secondary thread startup 92 * during boot if the user requests it. 93 */ 94 if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) { 95 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) 96 return 0; 97 if (smt_enabled_at_boot 98 && cpu_thread_in_core(nr) >= smt_enabled_at_boot) 99 return 0; 100 } 101 102 return 1; 103 } 104 105 106 #ifdef CONFIG_PPC64 107 int smp_generic_kick_cpu(int nr) 108 { 109 BUG_ON(nr < 0 || nr >= NR_CPUS); 110 111 /* 112 * The processor is currently spinning, waiting for the 113 * cpu_start field to become non-zero After we set cpu_start, 114 * the processor will continue on to secondary_start 115 */ 116 if (!paca[nr].cpu_start) { 117 paca[nr].cpu_start = 1; 118 smp_mb(); 119 return 0; 120 } 121 122 #ifdef CONFIG_HOTPLUG_CPU 123 /* 124 * Ok it's not there, so it might be soft-unplugged, let's 125 * try to bring it back 126 */ 127 generic_set_cpu_up(nr); 128 smp_wmb(); 129 smp_send_reschedule(nr); 130 #endif /* CONFIG_HOTPLUG_CPU */ 131 132 return 0; 133 } 134 #endif /* CONFIG_PPC64 */ 135 136 static irqreturn_t call_function_action(int irq, void *data) 137 { 138 generic_smp_call_function_interrupt(); 139 return IRQ_HANDLED; 140 } 141 142 static irqreturn_t reschedule_action(int irq, void *data) 143 { 144 scheduler_ipi(); 145 return IRQ_HANDLED; 146 } 147 148 static irqreturn_t call_function_single_action(int irq, void *data) 149 { 150 generic_smp_call_function_single_interrupt(); 151 return IRQ_HANDLED; 152 } 153 154 static irqreturn_t debug_ipi_action(int irq, void *data) 155 { 156 if (crash_ipi_function_ptr) { 157 crash_ipi_function_ptr(get_irq_regs()); 158 return IRQ_HANDLED; 159 } 160 161 #ifdef CONFIG_DEBUGGER 162 debugger_ipi(get_irq_regs()); 163 #endif /* CONFIG_DEBUGGER */ 164 165 return IRQ_HANDLED; 166 } 167 168 static irq_handler_t smp_ipi_action[] = { 169 [PPC_MSG_CALL_FUNCTION] = call_function_action, 170 [PPC_MSG_RESCHEDULE] = reschedule_action, 171 [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action, 172 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action, 173 }; 174 175 const char *smp_ipi_name[] = { 176 [PPC_MSG_CALL_FUNCTION] = "ipi call function", 177 [PPC_MSG_RESCHEDULE] = "ipi reschedule", 178 [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single", 179 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger", 180 }; 181 182 /* optional function to request ipi, for controllers with >= 4 ipis */ 183 int smp_request_message_ipi(int virq, int msg) 184 { 185 int err; 186 187 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) { 188 return -EINVAL; 189 } 190 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC) 191 if (msg == PPC_MSG_DEBUGGER_BREAK) { 192 return 1; 193 } 194 #endif 195 err = request_irq(virq, smp_ipi_action[msg], 196 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND, 197 smp_ipi_name[msg], NULL); 198 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", 199 virq, smp_ipi_name[msg], err); 200 201 return err; 202 } 203 204 #ifdef CONFIG_PPC_SMP_MUXED_IPI 205 struct cpu_messages { 206 int messages; /* current messages */ 207 unsigned long data; /* data for cause ipi */ 208 }; 209 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); 210 211 void smp_muxed_ipi_set_data(int cpu, unsigned long data) 212 { 213 struct cpu_messages *info = &per_cpu(ipi_message, cpu); 214 215 info->data = data; 216 } 217 218 void smp_muxed_ipi_message_pass(int cpu, int msg) 219 { 220 struct cpu_messages *info = &per_cpu(ipi_message, cpu); 221 char *message = (char *)&info->messages; 222 223 /* 224 * Order previous accesses before accesses in the IPI handler. 225 */ 226 smp_mb(); 227 message[msg] = 1; 228 /* 229 * cause_ipi functions are required to include a full barrier 230 * before doing whatever causes the IPI. 231 */ 232 smp_ops->cause_ipi(cpu, info->data); 233 } 234 235 #ifdef __BIG_ENDIAN__ 236 #define IPI_MESSAGE(A) (1 << (24 - 8 * (A))) 237 #else 238 #define IPI_MESSAGE(A) (1 << (8 * (A))) 239 #endif 240 241 irqreturn_t smp_ipi_demux(void) 242 { 243 struct cpu_messages *info = &__get_cpu_var(ipi_message); 244 unsigned int all; 245 246 mb(); /* order any irq clear */ 247 248 do { 249 all = xchg(&info->messages, 0); 250 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) 251 generic_smp_call_function_interrupt(); 252 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) 253 scheduler_ipi(); 254 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNC_SINGLE)) 255 generic_smp_call_function_single_interrupt(); 256 if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK)) 257 debug_ipi_action(0, NULL); 258 } while (info->messages); 259 260 return IRQ_HANDLED; 261 } 262 #endif /* CONFIG_PPC_SMP_MUXED_IPI */ 263 264 static inline void do_message_pass(int cpu, int msg) 265 { 266 if (smp_ops->message_pass) 267 smp_ops->message_pass(cpu, msg); 268 #ifdef CONFIG_PPC_SMP_MUXED_IPI 269 else 270 smp_muxed_ipi_message_pass(cpu, msg); 271 #endif 272 } 273 274 void smp_send_reschedule(int cpu) 275 { 276 if (likely(smp_ops)) 277 do_message_pass(cpu, PPC_MSG_RESCHEDULE); 278 } 279 EXPORT_SYMBOL_GPL(smp_send_reschedule); 280 281 void arch_send_call_function_single_ipi(int cpu) 282 { 283 do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); 284 } 285 286 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 287 { 288 unsigned int cpu; 289 290 for_each_cpu(cpu, mask) 291 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); 292 } 293 294 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 295 void smp_send_debugger_break(void) 296 { 297 int cpu; 298 int me = raw_smp_processor_id(); 299 300 if (unlikely(!smp_ops)) 301 return; 302 303 for_each_online_cpu(cpu) 304 if (cpu != me) 305 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); 306 } 307 #endif 308 309 #ifdef CONFIG_KEXEC 310 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 311 { 312 crash_ipi_function_ptr = crash_ipi_callback; 313 if (crash_ipi_callback) { 314 mb(); 315 smp_send_debugger_break(); 316 } 317 } 318 #endif 319 320 static void stop_this_cpu(void *dummy) 321 { 322 /* Remove this CPU */ 323 set_cpu_online(smp_processor_id(), false); 324 325 local_irq_disable(); 326 while (1) 327 ; 328 } 329 330 void smp_send_stop(void) 331 { 332 smp_call_function(stop_this_cpu, NULL, 0); 333 } 334 335 struct thread_info *current_set[NR_CPUS]; 336 337 static void smp_store_cpu_info(int id) 338 { 339 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 340 #ifdef CONFIG_PPC_FSL_BOOK3E 341 per_cpu(next_tlbcam_idx, id) 342 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 343 #endif 344 } 345 346 void __init smp_prepare_cpus(unsigned int max_cpus) 347 { 348 unsigned int cpu; 349 350 DBG("smp_prepare_cpus\n"); 351 352 /* 353 * setup_cpu may need to be called on the boot cpu. We havent 354 * spun any cpus up but lets be paranoid. 355 */ 356 BUG_ON(boot_cpuid != smp_processor_id()); 357 358 /* Fixup boot cpu */ 359 smp_store_cpu_info(boot_cpuid); 360 cpu_callin_map[boot_cpuid] = 1; 361 362 for_each_possible_cpu(cpu) { 363 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), 364 GFP_KERNEL, cpu_to_node(cpu)); 365 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), 366 GFP_KERNEL, cpu_to_node(cpu)); 367 } 368 369 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); 370 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); 371 372 if (smp_ops) 373 if (smp_ops->probe) 374 max_cpus = smp_ops->probe(); 375 else 376 max_cpus = NR_CPUS; 377 else 378 max_cpus = 1; 379 } 380 381 void smp_prepare_boot_cpu(void) 382 { 383 BUG_ON(smp_processor_id() != boot_cpuid); 384 #ifdef CONFIG_PPC64 385 paca[boot_cpuid].__current = current; 386 #endif 387 current_set[boot_cpuid] = task_thread_info(current); 388 } 389 390 #ifdef CONFIG_HOTPLUG_CPU 391 392 int generic_cpu_disable(void) 393 { 394 unsigned int cpu = smp_processor_id(); 395 396 if (cpu == boot_cpuid) 397 return -EBUSY; 398 399 set_cpu_online(cpu, false); 400 #ifdef CONFIG_PPC64 401 vdso_data->processorCount--; 402 #endif 403 migrate_irqs(); 404 return 0; 405 } 406 407 void generic_cpu_die(unsigned int cpu) 408 { 409 int i; 410 411 for (i = 0; i < 100; i++) { 412 smp_rmb(); 413 if (per_cpu(cpu_state, cpu) == CPU_DEAD) 414 return; 415 msleep(100); 416 } 417 printk(KERN_ERR "CPU%d didn't die...\n", cpu); 418 } 419 420 void generic_mach_cpu_die(void) 421 { 422 unsigned int cpu; 423 424 local_irq_disable(); 425 idle_task_exit(); 426 cpu = smp_processor_id(); 427 printk(KERN_DEBUG "CPU%d offline\n", cpu); 428 __get_cpu_var(cpu_state) = CPU_DEAD; 429 smp_wmb(); 430 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 431 cpu_relax(); 432 } 433 434 void generic_set_cpu_dead(unsigned int cpu) 435 { 436 per_cpu(cpu_state, cpu) = CPU_DEAD; 437 } 438 439 /* 440 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise 441 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(), 442 * which makes the delay in generic_cpu_die() not happen. 443 */ 444 void generic_set_cpu_up(unsigned int cpu) 445 { 446 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 447 } 448 449 int generic_check_cpu_restart(unsigned int cpu) 450 { 451 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; 452 } 453 454 static atomic_t secondary_inhibit_count; 455 456 /* 457 * Don't allow secondary CPU threads to come online 458 */ 459 void inhibit_secondary_onlining(void) 460 { 461 /* 462 * This makes secondary_inhibit_count stable during cpu 463 * online/offline operations. 464 */ 465 get_online_cpus(); 466 467 atomic_inc(&secondary_inhibit_count); 468 put_online_cpus(); 469 } 470 EXPORT_SYMBOL_GPL(inhibit_secondary_onlining); 471 472 /* 473 * Allow secondary CPU threads to come online again 474 */ 475 void uninhibit_secondary_onlining(void) 476 { 477 get_online_cpus(); 478 atomic_dec(&secondary_inhibit_count); 479 put_online_cpus(); 480 } 481 EXPORT_SYMBOL_GPL(uninhibit_secondary_onlining); 482 483 static int secondaries_inhibited(void) 484 { 485 return atomic_read(&secondary_inhibit_count); 486 } 487 488 #else /* HOTPLUG_CPU */ 489 490 #define secondaries_inhibited() 0 491 492 #endif 493 494 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) 495 { 496 struct thread_info *ti = task_thread_info(idle); 497 498 #ifdef CONFIG_PPC64 499 paca[cpu].__current = idle; 500 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; 501 #endif 502 ti->cpu = cpu; 503 secondary_ti = current_set[cpu] = ti; 504 } 505 506 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 507 { 508 int rc, c; 509 510 /* 511 * Don't allow secondary threads to come online if inhibited 512 */ 513 if (threads_per_core > 1 && secondaries_inhibited() && 514 cpu % threads_per_core != 0) 515 return -EBUSY; 516 517 if (smp_ops == NULL || 518 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 519 return -EINVAL; 520 521 cpu_idle_thread_init(cpu, tidle); 522 523 /* Make sure callin-map entry is 0 (can be leftover a CPU 524 * hotplug 525 */ 526 cpu_callin_map[cpu] = 0; 527 528 /* The information for processor bringup must 529 * be written out to main store before we release 530 * the processor. 531 */ 532 smp_mb(); 533 534 /* wake up cpus */ 535 DBG("smp: kicking cpu %d\n", cpu); 536 rc = smp_ops->kick_cpu(cpu); 537 if (rc) { 538 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); 539 return rc; 540 } 541 542 /* 543 * wait to see if the cpu made a callin (is actually up). 544 * use this value that I found through experimentation. 545 * -- Cort 546 */ 547 if (system_state < SYSTEM_RUNNING) 548 for (c = 50000; c && !cpu_callin_map[cpu]; c--) 549 udelay(100); 550 #ifdef CONFIG_HOTPLUG_CPU 551 else 552 /* 553 * CPUs can take much longer to come up in the 554 * hotplug case. Wait five seconds. 555 */ 556 for (c = 5000; c && !cpu_callin_map[cpu]; c--) 557 msleep(1); 558 #endif 559 560 if (!cpu_callin_map[cpu]) { 561 printk(KERN_ERR "Processor %u is stuck.\n", cpu); 562 return -ENOENT; 563 } 564 565 DBG("Processor %u found.\n", cpu); 566 567 if (smp_ops->give_timebase) 568 smp_ops->give_timebase(); 569 570 /* Wait until cpu puts itself in the online map */ 571 while (!cpu_online(cpu)) 572 cpu_relax(); 573 574 return 0; 575 } 576 577 /* Return the value of the reg property corresponding to the given 578 * logical cpu. 579 */ 580 int cpu_to_core_id(int cpu) 581 { 582 struct device_node *np; 583 const int *reg; 584 int id = -1; 585 586 np = of_get_cpu_node(cpu, NULL); 587 if (!np) 588 goto out; 589 590 reg = of_get_property(np, "reg", NULL); 591 if (!reg) 592 goto out; 593 594 id = *reg; 595 out: 596 of_node_put(np); 597 return id; 598 } 599 600 /* Return the value of the chip-id property corresponding 601 * to the given logical cpu. 602 */ 603 int cpu_to_chip_id(int cpu) 604 { 605 struct device_node *np; 606 607 np = of_get_cpu_node(cpu, NULL); 608 if (!np) 609 return -1; 610 611 of_node_put(np); 612 return of_get_ibm_chip_id(np); 613 } 614 EXPORT_SYMBOL(cpu_to_chip_id); 615 616 /* Helper routines for cpu to core mapping */ 617 int cpu_core_index_of_thread(int cpu) 618 { 619 return cpu >> threads_shift; 620 } 621 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); 622 623 int cpu_first_thread_of_core(int core) 624 { 625 return core << threads_shift; 626 } 627 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); 628 629 static void traverse_siblings_chip_id(int cpu, bool add, int chipid) 630 { 631 const struct cpumask *mask; 632 struct device_node *np; 633 int i, plen; 634 const __be32 *prop; 635 636 mask = add ? cpu_online_mask : cpu_present_mask; 637 for_each_cpu(i, mask) { 638 np = of_get_cpu_node(i, NULL); 639 if (!np) 640 continue; 641 prop = of_get_property(np, "ibm,chip-id", &plen); 642 if (prop && plen == sizeof(int) && 643 of_read_number(prop, 1) == chipid) { 644 if (add) { 645 cpumask_set_cpu(cpu, cpu_core_mask(i)); 646 cpumask_set_cpu(i, cpu_core_mask(cpu)); 647 } else { 648 cpumask_clear_cpu(cpu, cpu_core_mask(i)); 649 cpumask_clear_cpu(i, cpu_core_mask(cpu)); 650 } 651 } 652 of_node_put(np); 653 } 654 } 655 656 /* Must be called when no change can occur to cpu_present_mask, 657 * i.e. during cpu online or offline. 658 */ 659 static struct device_node *cpu_to_l2cache(int cpu) 660 { 661 struct device_node *np; 662 struct device_node *cache; 663 664 if (!cpu_present(cpu)) 665 return NULL; 666 667 np = of_get_cpu_node(cpu, NULL); 668 if (np == NULL) 669 return NULL; 670 671 cache = of_find_next_cache_node(np); 672 673 of_node_put(np); 674 675 return cache; 676 } 677 678 static void traverse_core_siblings(int cpu, bool add) 679 { 680 struct device_node *l2_cache, *np; 681 const struct cpumask *mask; 682 int i, chip, plen; 683 const __be32 *prop; 684 685 /* First see if we have ibm,chip-id properties in cpu nodes */ 686 np = of_get_cpu_node(cpu, NULL); 687 if (np) { 688 chip = -1; 689 prop = of_get_property(np, "ibm,chip-id", &plen); 690 if (prop && plen == sizeof(int)) 691 chip = of_read_number(prop, 1); 692 of_node_put(np); 693 if (chip >= 0) { 694 traverse_siblings_chip_id(cpu, add, chip); 695 return; 696 } 697 } 698 699 l2_cache = cpu_to_l2cache(cpu); 700 mask = add ? cpu_online_mask : cpu_present_mask; 701 for_each_cpu(i, mask) { 702 np = cpu_to_l2cache(i); 703 if (!np) 704 continue; 705 if (np == l2_cache) { 706 if (add) { 707 cpumask_set_cpu(cpu, cpu_core_mask(i)); 708 cpumask_set_cpu(i, cpu_core_mask(cpu)); 709 } else { 710 cpumask_clear_cpu(cpu, cpu_core_mask(i)); 711 cpumask_clear_cpu(i, cpu_core_mask(cpu)); 712 } 713 } 714 of_node_put(np); 715 } 716 of_node_put(l2_cache); 717 } 718 719 /* Activate a secondary processor. */ 720 void start_secondary(void *unused) 721 { 722 unsigned int cpu = smp_processor_id(); 723 int i, base; 724 725 atomic_inc(&init_mm.mm_count); 726 current->active_mm = &init_mm; 727 728 smp_store_cpu_info(cpu); 729 set_dec(tb_ticks_per_jiffy); 730 preempt_disable(); 731 cpu_callin_map[cpu] = 1; 732 733 if (smp_ops->setup_cpu) 734 smp_ops->setup_cpu(cpu); 735 if (smp_ops->take_timebase) 736 smp_ops->take_timebase(); 737 738 secondary_cpu_time_init(); 739 740 #ifdef CONFIG_PPC64 741 if (system_state == SYSTEM_RUNNING) 742 vdso_data->processorCount++; 743 744 vdso_getcpu_init(); 745 #endif 746 /* Update sibling maps */ 747 base = cpu_first_thread_sibling(cpu); 748 for (i = 0; i < threads_per_core; i++) { 749 if (cpu_is_offline(base + i) && (cpu != base + i)) 750 continue; 751 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); 752 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); 753 754 /* cpu_core_map should be a superset of 755 * cpu_sibling_map even if we don't have cache 756 * information, so update the former here, too. 757 */ 758 cpumask_set_cpu(cpu, cpu_core_mask(base + i)); 759 cpumask_set_cpu(base + i, cpu_core_mask(cpu)); 760 } 761 traverse_core_siblings(cpu, true); 762 763 smp_wmb(); 764 notify_cpu_starting(cpu); 765 set_cpu_online(cpu, true); 766 767 local_irq_enable(); 768 769 cpu_startup_entry(CPUHP_ONLINE); 770 771 BUG(); 772 } 773 774 int setup_profiling_timer(unsigned int multiplier) 775 { 776 return 0; 777 } 778 779 void __init smp_cpus_done(unsigned int max_cpus) 780 { 781 cpumask_var_t old_mask; 782 783 /* We want the setup_cpu() here to be called from CPU 0, but our 784 * init thread may have been "borrowed" by another CPU in the meantime 785 * se we pin us down to CPU 0 for a short while 786 */ 787 alloc_cpumask_var(&old_mask, GFP_NOWAIT); 788 cpumask_copy(old_mask, tsk_cpus_allowed(current)); 789 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); 790 791 if (smp_ops && smp_ops->setup_cpu) 792 smp_ops->setup_cpu(boot_cpuid); 793 794 set_cpus_allowed_ptr(current, old_mask); 795 796 free_cpumask_var(old_mask); 797 798 if (smp_ops && smp_ops->bringup_done) 799 smp_ops->bringup_done(); 800 801 dump_numa_cpu_topology(); 802 803 } 804 805 int arch_sd_sibling_asym_packing(void) 806 { 807 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { 808 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); 809 return SD_ASYM_PACKING; 810 } 811 return 0; 812 } 813 814 #ifdef CONFIG_HOTPLUG_CPU 815 int __cpu_disable(void) 816 { 817 int cpu = smp_processor_id(); 818 int base, i; 819 int err; 820 821 if (!smp_ops->cpu_disable) 822 return -ENOSYS; 823 824 err = smp_ops->cpu_disable(); 825 if (err) 826 return err; 827 828 /* Update sibling maps */ 829 base = cpu_first_thread_sibling(cpu); 830 for (i = 0; i < threads_per_core; i++) { 831 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); 832 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); 833 cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); 834 cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); 835 } 836 traverse_core_siblings(cpu, false); 837 838 return 0; 839 } 840 841 void __cpu_die(unsigned int cpu) 842 { 843 if (smp_ops->cpu_die) 844 smp_ops->cpu_die(cpu); 845 } 846 847 static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex); 848 849 void cpu_hotplug_driver_lock() 850 { 851 mutex_lock(&powerpc_cpu_hotplug_driver_mutex); 852 } 853 854 void cpu_hotplug_driver_unlock() 855 { 856 mutex_unlock(&powerpc_cpu_hotplug_driver_mutex); 857 } 858 859 void cpu_die(void) 860 { 861 if (ppc_md.cpu_die) 862 ppc_md.cpu_die(); 863 864 /* If we return, we re-enter start_secondary */ 865 start_secondary_resume(); 866 } 867 868 #endif 869