1 /* 2 * SMP support for ppc. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great 5 * deal of code from the sparc and intel versions. 6 * 7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 8 * 9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 #undef DEBUG 19 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/sched.h> 23 #include <linux/smp.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/init.h> 27 #include <linux/spinlock.h> 28 #include <linux/cache.h> 29 #include <linux/err.h> 30 #include <linux/sysdev.h> 31 #include <linux/cpu.h> 32 #include <linux/notifier.h> 33 #include <linux/topology.h> 34 35 #include <asm/ptrace.h> 36 #include <linux/atomic.h> 37 #include <asm/irq.h> 38 #include <asm/page.h> 39 #include <asm/pgtable.h> 40 #include <asm/prom.h> 41 #include <asm/smp.h> 42 #include <asm/time.h> 43 #include <asm/machdep.h> 44 #include <asm/cputhreads.h> 45 #include <asm/cputable.h> 46 #include <asm/system.h> 47 #include <asm/mpic.h> 48 #include <asm/vdso_datapage.h> 49 #ifdef CONFIG_PPC64 50 #include <asm/paca.h> 51 #endif 52 53 #ifdef DEBUG 54 #include <asm/udbg.h> 55 #define DBG(fmt...) udbg_printf(fmt) 56 #else 57 #define DBG(fmt...) 58 #endif 59 60 61 /* Store all idle threads, this can be reused instead of creating 62 * a new thread. Also avoids complicated thread destroy functionality 63 * for idle threads. 64 */ 65 #ifdef CONFIG_HOTPLUG_CPU 66 /* 67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is 68 * removed after init for !CONFIG_HOTPLUG_CPU. 69 */ 70 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); 71 #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 72 #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 73 #else 74 static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 75 #define get_idle_for_cpu(x) (idle_thread_array[(x)]) 76 #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) 77 #endif 78 79 struct thread_info *secondary_ti; 80 81 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 82 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); 83 84 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 85 EXPORT_PER_CPU_SYMBOL(cpu_core_map); 86 87 /* SMP operations for this machine */ 88 struct smp_ops_t *smp_ops; 89 90 /* Can't be static due to PowerMac hackery */ 91 volatile unsigned int cpu_callin_map[NR_CPUS]; 92 93 int smt_enabled_at_boot = 1; 94 95 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 96 97 #ifdef CONFIG_PPC64 98 int __devinit smp_generic_kick_cpu(int nr) 99 { 100 BUG_ON(nr < 0 || nr >= NR_CPUS); 101 102 /* 103 * The processor is currently spinning, waiting for the 104 * cpu_start field to become non-zero After we set cpu_start, 105 * the processor will continue on to secondary_start 106 */ 107 paca[nr].cpu_start = 1; 108 smp_mb(); 109 110 return 0; 111 } 112 #endif 113 114 static irqreturn_t call_function_action(int irq, void *data) 115 { 116 generic_smp_call_function_interrupt(); 117 return IRQ_HANDLED; 118 } 119 120 static irqreturn_t reschedule_action(int irq, void *data) 121 { 122 scheduler_ipi(); 123 return IRQ_HANDLED; 124 } 125 126 static irqreturn_t call_function_single_action(int irq, void *data) 127 { 128 generic_smp_call_function_single_interrupt(); 129 return IRQ_HANDLED; 130 } 131 132 static irqreturn_t debug_ipi_action(int irq, void *data) 133 { 134 if (crash_ipi_function_ptr) { 135 crash_ipi_function_ptr(get_irq_regs()); 136 return IRQ_HANDLED; 137 } 138 139 #ifdef CONFIG_DEBUGGER 140 debugger_ipi(get_irq_regs()); 141 #endif /* CONFIG_DEBUGGER */ 142 143 return IRQ_HANDLED; 144 } 145 146 static irq_handler_t smp_ipi_action[] = { 147 [PPC_MSG_CALL_FUNCTION] = call_function_action, 148 [PPC_MSG_RESCHEDULE] = reschedule_action, 149 [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action, 150 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action, 151 }; 152 153 const char *smp_ipi_name[] = { 154 [PPC_MSG_CALL_FUNCTION] = "ipi call function", 155 [PPC_MSG_RESCHEDULE] = "ipi reschedule", 156 [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single", 157 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger", 158 }; 159 160 /* optional function to request ipi, for controllers with >= 4 ipis */ 161 int smp_request_message_ipi(int virq, int msg) 162 { 163 int err; 164 165 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) { 166 return -EINVAL; 167 } 168 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC) 169 if (msg == PPC_MSG_DEBUGGER_BREAK) { 170 return 1; 171 } 172 #endif 173 err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU, 174 smp_ipi_name[msg], 0); 175 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", 176 virq, smp_ipi_name[msg], err); 177 178 return err; 179 } 180 181 #ifdef CONFIG_PPC_SMP_MUXED_IPI 182 struct cpu_messages { 183 int messages; /* current messages */ 184 unsigned long data; /* data for cause ipi */ 185 }; 186 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); 187 188 void smp_muxed_ipi_set_data(int cpu, unsigned long data) 189 { 190 struct cpu_messages *info = &per_cpu(ipi_message, cpu); 191 192 info->data = data; 193 } 194 195 void smp_muxed_ipi_message_pass(int cpu, int msg) 196 { 197 struct cpu_messages *info = &per_cpu(ipi_message, cpu); 198 char *message = (char *)&info->messages; 199 200 message[msg] = 1; 201 mb(); 202 smp_ops->cause_ipi(cpu, info->data); 203 } 204 205 irqreturn_t smp_ipi_demux(void) 206 { 207 struct cpu_messages *info = &__get_cpu_var(ipi_message); 208 unsigned int all; 209 210 mb(); /* order any irq clear */ 211 212 do { 213 all = xchg_local(&info->messages, 0); 214 215 #ifdef __BIG_ENDIAN 216 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION))) 217 generic_smp_call_function_interrupt(); 218 if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE))) 219 scheduler_ipi(); 220 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE))) 221 generic_smp_call_function_single_interrupt(); 222 if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK))) 223 debug_ipi_action(0, NULL); 224 #else 225 #error Unsupported ENDIAN 226 #endif 227 } while (info->messages); 228 229 return IRQ_HANDLED; 230 } 231 #endif /* CONFIG_PPC_SMP_MUXED_IPI */ 232 233 static inline void do_message_pass(int cpu, int msg) 234 { 235 if (smp_ops->message_pass) 236 smp_ops->message_pass(cpu, msg); 237 #ifdef CONFIG_PPC_SMP_MUXED_IPI 238 else 239 smp_muxed_ipi_message_pass(cpu, msg); 240 #endif 241 } 242 243 void smp_send_reschedule(int cpu) 244 { 245 if (likely(smp_ops)) 246 do_message_pass(cpu, PPC_MSG_RESCHEDULE); 247 } 248 EXPORT_SYMBOL_GPL(smp_send_reschedule); 249 250 void arch_send_call_function_single_ipi(int cpu) 251 { 252 do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); 253 } 254 255 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 256 { 257 unsigned int cpu; 258 259 for_each_cpu(cpu, mask) 260 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); 261 } 262 263 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 264 void smp_send_debugger_break(void) 265 { 266 int cpu; 267 int me = raw_smp_processor_id(); 268 269 if (unlikely(!smp_ops)) 270 return; 271 272 for_each_online_cpu(cpu) 273 if (cpu != me) 274 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); 275 } 276 #endif 277 278 #ifdef CONFIG_KEXEC 279 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 280 { 281 crash_ipi_function_ptr = crash_ipi_callback; 282 if (crash_ipi_callback) { 283 mb(); 284 smp_send_debugger_break(); 285 } 286 } 287 #endif 288 289 static void stop_this_cpu(void *dummy) 290 { 291 /* Remove this CPU */ 292 set_cpu_online(smp_processor_id(), false); 293 294 local_irq_disable(); 295 while (1) 296 ; 297 } 298 299 void smp_send_stop(void) 300 { 301 smp_call_function(stop_this_cpu, NULL, 0); 302 } 303 304 struct thread_info *current_set[NR_CPUS]; 305 306 static void __devinit smp_store_cpu_info(int id) 307 { 308 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 309 #ifdef CONFIG_PPC_FSL_BOOK3E 310 per_cpu(next_tlbcam_idx, id) 311 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 312 #endif 313 } 314 315 void __init smp_prepare_cpus(unsigned int max_cpus) 316 { 317 unsigned int cpu; 318 319 DBG("smp_prepare_cpus\n"); 320 321 /* 322 * setup_cpu may need to be called on the boot cpu. We havent 323 * spun any cpus up but lets be paranoid. 324 */ 325 BUG_ON(boot_cpuid != smp_processor_id()); 326 327 /* Fixup boot cpu */ 328 smp_store_cpu_info(boot_cpuid); 329 cpu_callin_map[boot_cpuid] = 1; 330 331 for_each_possible_cpu(cpu) { 332 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), 333 GFP_KERNEL, cpu_to_node(cpu)); 334 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), 335 GFP_KERNEL, cpu_to_node(cpu)); 336 } 337 338 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); 339 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); 340 341 if (smp_ops) 342 if (smp_ops->probe) 343 max_cpus = smp_ops->probe(); 344 else 345 max_cpus = NR_CPUS; 346 else 347 max_cpus = 1; 348 } 349 350 void __devinit smp_prepare_boot_cpu(void) 351 { 352 BUG_ON(smp_processor_id() != boot_cpuid); 353 #ifdef CONFIG_PPC64 354 paca[boot_cpuid].__current = current; 355 #endif 356 current_set[boot_cpuid] = task_thread_info(current); 357 } 358 359 #ifdef CONFIG_HOTPLUG_CPU 360 /* State of each CPU during hotplug phases */ 361 static DEFINE_PER_CPU(int, cpu_state) = { 0 }; 362 363 int generic_cpu_disable(void) 364 { 365 unsigned int cpu = smp_processor_id(); 366 367 if (cpu == boot_cpuid) 368 return -EBUSY; 369 370 set_cpu_online(cpu, false); 371 #ifdef CONFIG_PPC64 372 vdso_data->processorCount--; 373 #endif 374 migrate_irqs(); 375 return 0; 376 } 377 378 void generic_cpu_die(unsigned int cpu) 379 { 380 int i; 381 382 for (i = 0; i < 100; i++) { 383 smp_rmb(); 384 if (per_cpu(cpu_state, cpu) == CPU_DEAD) 385 return; 386 msleep(100); 387 } 388 printk(KERN_ERR "CPU%d didn't die...\n", cpu); 389 } 390 391 void generic_mach_cpu_die(void) 392 { 393 unsigned int cpu; 394 395 local_irq_disable(); 396 idle_task_exit(); 397 cpu = smp_processor_id(); 398 printk(KERN_DEBUG "CPU%d offline\n", cpu); 399 __get_cpu_var(cpu_state) = CPU_DEAD; 400 smp_wmb(); 401 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 402 cpu_relax(); 403 } 404 405 void generic_set_cpu_dead(unsigned int cpu) 406 { 407 per_cpu(cpu_state, cpu) = CPU_DEAD; 408 } 409 #endif 410 411 struct create_idle { 412 struct work_struct work; 413 struct task_struct *idle; 414 struct completion done; 415 int cpu; 416 }; 417 418 static void __cpuinit do_fork_idle(struct work_struct *work) 419 { 420 struct create_idle *c_idle = 421 container_of(work, struct create_idle, work); 422 423 c_idle->idle = fork_idle(c_idle->cpu); 424 complete(&c_idle->done); 425 } 426 427 static int __cpuinit create_idle(unsigned int cpu) 428 { 429 struct thread_info *ti; 430 struct create_idle c_idle = { 431 .cpu = cpu, 432 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 433 }; 434 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); 435 436 c_idle.idle = get_idle_for_cpu(cpu); 437 438 /* We can't use kernel_thread since we must avoid to 439 * reschedule the child. We use a workqueue because 440 * we want to fork from a kernel thread, not whatever 441 * userspace process happens to be trying to online us. 442 */ 443 if (!c_idle.idle) { 444 schedule_work(&c_idle.work); 445 wait_for_completion(&c_idle.done); 446 } else 447 init_idle(c_idle.idle, cpu); 448 if (IS_ERR(c_idle.idle)) { 449 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); 450 return PTR_ERR(c_idle.idle); 451 } 452 ti = task_thread_info(c_idle.idle); 453 454 #ifdef CONFIG_PPC64 455 paca[cpu].__current = c_idle.idle; 456 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; 457 #endif 458 ti->cpu = cpu; 459 current_set[cpu] = ti; 460 461 return 0; 462 } 463 464 int __cpuinit __cpu_up(unsigned int cpu) 465 { 466 int rc, c; 467 468 if (smp_ops == NULL || 469 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 470 return -EINVAL; 471 472 /* Make sure we have an idle thread */ 473 rc = create_idle(cpu); 474 if (rc) 475 return rc; 476 477 secondary_ti = current_set[cpu]; 478 479 /* Make sure callin-map entry is 0 (can be leftover a CPU 480 * hotplug 481 */ 482 cpu_callin_map[cpu] = 0; 483 484 /* The information for processor bringup must 485 * be written out to main store before we release 486 * the processor. 487 */ 488 smp_mb(); 489 490 /* wake up cpus */ 491 DBG("smp: kicking cpu %d\n", cpu); 492 rc = smp_ops->kick_cpu(cpu); 493 if (rc) { 494 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); 495 return rc; 496 } 497 498 /* 499 * wait to see if the cpu made a callin (is actually up). 500 * use this value that I found through experimentation. 501 * -- Cort 502 */ 503 if (system_state < SYSTEM_RUNNING) 504 for (c = 50000; c && !cpu_callin_map[cpu]; c--) 505 udelay(100); 506 #ifdef CONFIG_HOTPLUG_CPU 507 else 508 /* 509 * CPUs can take much longer to come up in the 510 * hotplug case. Wait five seconds. 511 */ 512 for (c = 5000; c && !cpu_callin_map[cpu]; c--) 513 msleep(1); 514 #endif 515 516 if (!cpu_callin_map[cpu]) { 517 printk(KERN_ERR "Processor %u is stuck.\n", cpu); 518 return -ENOENT; 519 } 520 521 DBG("Processor %u found.\n", cpu); 522 523 if (smp_ops->give_timebase) 524 smp_ops->give_timebase(); 525 526 /* Wait until cpu puts itself in the online map */ 527 while (!cpu_online(cpu)) 528 cpu_relax(); 529 530 return 0; 531 } 532 533 /* Return the value of the reg property corresponding to the given 534 * logical cpu. 535 */ 536 int cpu_to_core_id(int cpu) 537 { 538 struct device_node *np; 539 const int *reg; 540 int id = -1; 541 542 np = of_get_cpu_node(cpu, NULL); 543 if (!np) 544 goto out; 545 546 reg = of_get_property(np, "reg", NULL); 547 if (!reg) 548 goto out; 549 550 id = *reg; 551 out: 552 of_node_put(np); 553 return id; 554 } 555 556 /* Helper routines for cpu to core mapping */ 557 int cpu_core_index_of_thread(int cpu) 558 { 559 return cpu >> threads_shift; 560 } 561 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); 562 563 int cpu_first_thread_of_core(int core) 564 { 565 return core << threads_shift; 566 } 567 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); 568 569 /* Must be called when no change can occur to cpu_present_mask, 570 * i.e. during cpu online or offline. 571 */ 572 static struct device_node *cpu_to_l2cache(int cpu) 573 { 574 struct device_node *np; 575 struct device_node *cache; 576 577 if (!cpu_present(cpu)) 578 return NULL; 579 580 np = of_get_cpu_node(cpu, NULL); 581 if (np == NULL) 582 return NULL; 583 584 cache = of_find_next_cache_node(np); 585 586 of_node_put(np); 587 588 return cache; 589 } 590 591 /* Activate a secondary processor. */ 592 void __devinit start_secondary(void *unused) 593 { 594 unsigned int cpu = smp_processor_id(); 595 struct device_node *l2_cache; 596 int i, base; 597 598 atomic_inc(&init_mm.mm_count); 599 current->active_mm = &init_mm; 600 601 smp_store_cpu_info(cpu); 602 set_dec(tb_ticks_per_jiffy); 603 preempt_disable(); 604 cpu_callin_map[cpu] = 1; 605 606 if (smp_ops->setup_cpu) 607 smp_ops->setup_cpu(cpu); 608 if (smp_ops->take_timebase) 609 smp_ops->take_timebase(); 610 611 secondary_cpu_time_init(); 612 613 #ifdef CONFIG_PPC64 614 if (system_state == SYSTEM_RUNNING) 615 vdso_data->processorCount++; 616 #endif 617 ipi_call_lock(); 618 notify_cpu_starting(cpu); 619 set_cpu_online(cpu, true); 620 /* Update sibling maps */ 621 base = cpu_first_thread_sibling(cpu); 622 for (i = 0; i < threads_per_core; i++) { 623 if (cpu_is_offline(base + i)) 624 continue; 625 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); 626 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); 627 628 /* cpu_core_map should be a superset of 629 * cpu_sibling_map even if we don't have cache 630 * information, so update the former here, too. 631 */ 632 cpumask_set_cpu(cpu, cpu_core_mask(base + i)); 633 cpumask_set_cpu(base + i, cpu_core_mask(cpu)); 634 } 635 l2_cache = cpu_to_l2cache(cpu); 636 for_each_online_cpu(i) { 637 struct device_node *np = cpu_to_l2cache(i); 638 if (!np) 639 continue; 640 if (np == l2_cache) { 641 cpumask_set_cpu(cpu, cpu_core_mask(i)); 642 cpumask_set_cpu(i, cpu_core_mask(cpu)); 643 } 644 of_node_put(np); 645 } 646 of_node_put(l2_cache); 647 ipi_call_unlock(); 648 649 local_irq_enable(); 650 651 cpu_idle(); 652 653 BUG(); 654 } 655 656 int setup_profiling_timer(unsigned int multiplier) 657 { 658 return 0; 659 } 660 661 void __init smp_cpus_done(unsigned int max_cpus) 662 { 663 cpumask_var_t old_mask; 664 665 /* We want the setup_cpu() here to be called from CPU 0, but our 666 * init thread may have been "borrowed" by another CPU in the meantime 667 * se we pin us down to CPU 0 for a short while 668 */ 669 alloc_cpumask_var(&old_mask, GFP_NOWAIT); 670 cpumask_copy(old_mask, tsk_cpus_allowed(current)); 671 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); 672 673 if (smp_ops && smp_ops->setup_cpu) 674 smp_ops->setup_cpu(boot_cpuid); 675 676 set_cpus_allowed_ptr(current, old_mask); 677 678 free_cpumask_var(old_mask); 679 680 if (smp_ops && smp_ops->bringup_done) 681 smp_ops->bringup_done(); 682 683 dump_numa_cpu_topology(); 684 685 } 686 687 int arch_sd_sibling_asym_packing(void) 688 { 689 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { 690 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); 691 return SD_ASYM_PACKING; 692 } 693 return 0; 694 } 695 696 #ifdef CONFIG_HOTPLUG_CPU 697 int __cpu_disable(void) 698 { 699 struct device_node *l2_cache; 700 int cpu = smp_processor_id(); 701 int base, i; 702 int err; 703 704 if (!smp_ops->cpu_disable) 705 return -ENOSYS; 706 707 err = smp_ops->cpu_disable(); 708 if (err) 709 return err; 710 711 /* Update sibling maps */ 712 base = cpu_first_thread_sibling(cpu); 713 for (i = 0; i < threads_per_core; i++) { 714 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); 715 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); 716 cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); 717 cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); 718 } 719 720 l2_cache = cpu_to_l2cache(cpu); 721 for_each_present_cpu(i) { 722 struct device_node *np = cpu_to_l2cache(i); 723 if (!np) 724 continue; 725 if (np == l2_cache) { 726 cpumask_clear_cpu(cpu, cpu_core_mask(i)); 727 cpumask_clear_cpu(i, cpu_core_mask(cpu)); 728 } 729 of_node_put(np); 730 } 731 of_node_put(l2_cache); 732 733 734 return 0; 735 } 736 737 void __cpu_die(unsigned int cpu) 738 { 739 if (smp_ops->cpu_die) 740 smp_ops->cpu_die(cpu); 741 } 742 743 static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex); 744 745 void cpu_hotplug_driver_lock() 746 { 747 mutex_lock(&powerpc_cpu_hotplug_driver_mutex); 748 } 749 750 void cpu_hotplug_driver_unlock() 751 { 752 mutex_unlock(&powerpc_cpu_hotplug_driver_mutex); 753 } 754 755 void cpu_die(void) 756 { 757 if (ppc_md.cpu_die) 758 ppc_md.cpu_die(); 759 760 /* If we return, we re-enter start_secondary */ 761 start_secondary_resume(); 762 } 763 764 #endif 765