1 /* 2 * SMP initialisation and IPI support 3 * Based on arch/arm/kernel/smp.c 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/acpi.h> 21 #include <linux/delay.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/sched.h> 25 #include <linux/interrupt.h> 26 #include <linux/cache.h> 27 #include <linux/profile.h> 28 #include <linux/errno.h> 29 #include <linux/mm.h> 30 #include <linux/err.h> 31 #include <linux/cpu.h> 32 #include <linux/smp.h> 33 #include <linux/seq_file.h> 34 #include <linux/irq.h> 35 #include <linux/percpu.h> 36 #include <linux/clockchips.h> 37 #include <linux/completion.h> 38 #include <linux/of.h> 39 #include <linux/irq_work.h> 40 41 #include <asm/alternative.h> 42 #include <asm/atomic.h> 43 #include <asm/cacheflush.h> 44 #include <asm/cpu.h> 45 #include <asm/cputype.h> 46 #include <asm/cpu_ops.h> 47 #include <asm/mmu_context.h> 48 #include <asm/pgtable.h> 49 #include <asm/pgalloc.h> 50 #include <asm/processor.h> 51 #include <asm/smp_plat.h> 52 #include <asm/sections.h> 53 #include <asm/tlbflush.h> 54 #include <asm/ptrace.h> 55 #include <asm/virt.h> 56 57 #define CREATE_TRACE_POINTS 58 #include <trace/events/ipi.h> 59 60 /* 61 * as from 2.5, kernels no longer have an init_tasks structure 62 * so we need some other way of telling a new secondary core 63 * where to place its SVC stack 64 */ 65 struct secondary_data secondary_data; 66 /* Number of CPUs which aren't online, but looping in kernel text. */ 67 int cpus_stuck_in_kernel; 68 69 enum ipi_msg_type { 70 IPI_RESCHEDULE, 71 IPI_CALL_FUNC, 72 IPI_CPU_STOP, 73 IPI_TIMER, 74 IPI_IRQ_WORK, 75 IPI_WAKEUP 76 }; 77 78 #ifdef CONFIG_HOTPLUG_CPU 79 static int op_cpu_kill(unsigned int cpu); 80 #else 81 static inline int op_cpu_kill(unsigned int cpu) 82 { 83 return -ENOSYS; 84 } 85 #endif 86 87 88 /* 89 * Boot a secondary CPU, and assign it the specified idle task. 90 * This also gives us the initial stack to use for this CPU. 91 */ 92 static int boot_secondary(unsigned int cpu, struct task_struct *idle) 93 { 94 if (cpu_ops[cpu]->cpu_boot) 95 return cpu_ops[cpu]->cpu_boot(cpu); 96 97 return -EOPNOTSUPP; 98 } 99 100 static DECLARE_COMPLETION(cpu_running); 101 102 int __cpu_up(unsigned int cpu, struct task_struct *idle) 103 { 104 int ret; 105 long status; 106 107 /* 108 * We need to tell the secondary core where to find its stack and the 109 * page tables. 110 */ 111 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 112 update_cpu_boot_status(CPU_MMU_OFF); 113 __flush_dcache_area(&secondary_data, sizeof(secondary_data)); 114 115 /* 116 * Now bring the CPU into our world. 117 */ 118 ret = boot_secondary(cpu, idle); 119 if (ret == 0) { 120 /* 121 * CPU was successfully started, wait for it to come online or 122 * time out. 123 */ 124 wait_for_completion_timeout(&cpu_running, 125 msecs_to_jiffies(1000)); 126 127 if (!cpu_online(cpu)) { 128 pr_crit("CPU%u: failed to come online\n", cpu); 129 ret = -EIO; 130 } 131 } else { 132 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); 133 } 134 135 secondary_data.stack = NULL; 136 status = READ_ONCE(secondary_data.status); 137 if (ret && status) { 138 139 if (status == CPU_MMU_OFF) 140 status = READ_ONCE(__early_cpu_boot_status); 141 142 switch (status) { 143 default: 144 pr_err("CPU%u: failed in unknown state : 0x%lx\n", 145 cpu, status); 146 break; 147 case CPU_KILL_ME: 148 if (!op_cpu_kill(cpu)) { 149 pr_crit("CPU%u: died during early boot\n", cpu); 150 break; 151 } 152 /* Fall through */ 153 pr_crit("CPU%u: may not have shut down cleanly\n", cpu); 154 case CPU_STUCK_IN_KERNEL: 155 pr_crit("CPU%u: is stuck in kernel\n", cpu); 156 cpus_stuck_in_kernel++; 157 break; 158 case CPU_PANIC_KERNEL: 159 panic("CPU%u detected unsupported configuration\n", cpu); 160 } 161 } 162 163 return ret; 164 } 165 166 static void smp_store_cpu_info(unsigned int cpuid) 167 { 168 store_cpu_topology(cpuid); 169 } 170 171 /* 172 * This is the secondary CPU boot entry. We're using this CPUs 173 * idle thread stack, but a set of temporary page tables. 174 */ 175 asmlinkage void secondary_start_kernel(void) 176 { 177 struct mm_struct *mm = &init_mm; 178 unsigned int cpu = smp_processor_id(); 179 180 /* 181 * All kernel threads share the same mm context; grab a 182 * reference and switch to it. 183 */ 184 atomic_inc(&mm->mm_count); 185 current->active_mm = mm; 186 187 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 188 189 /* 190 * TTBR0 is only used for the identity mapping at this stage. Make it 191 * point to zero page to avoid speculatively fetching new entries. 192 */ 193 cpu_uninstall_idmap(); 194 195 preempt_disable(); 196 trace_hardirqs_off(); 197 198 /* 199 * If the system has established the capabilities, make sure 200 * this CPU ticks all of those. If it doesn't, the CPU will 201 * fail to come online. 202 */ 203 verify_local_cpu_capabilities(); 204 205 if (cpu_ops[cpu]->cpu_postboot) 206 cpu_ops[cpu]->cpu_postboot(); 207 208 /* 209 * Log the CPU info before it is marked online and might get read. 210 */ 211 cpuinfo_store_cpu(); 212 213 /* 214 * Enable GIC and timers. 215 */ 216 notify_cpu_starting(cpu); 217 218 smp_store_cpu_info(cpu); 219 220 /* 221 * OK, now it's safe to let the boot CPU continue. Wait for 222 * the CPU migration code to notice that the CPU is online 223 * before we continue. 224 */ 225 pr_info("CPU%u: Booted secondary processor [%08x]\n", 226 cpu, read_cpuid_id()); 227 update_cpu_boot_status(CPU_BOOT_SUCCESS); 228 /* Make sure the status update is visible before we complete */ 229 smp_wmb(); 230 set_cpu_online(cpu, true); 231 complete(&cpu_running); 232 233 local_dbg_enable(); 234 local_irq_enable(); 235 local_async_enable(); 236 237 /* 238 * OK, it's off to the idle thread for us 239 */ 240 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 241 } 242 243 #ifdef CONFIG_HOTPLUG_CPU 244 static int op_cpu_disable(unsigned int cpu) 245 { 246 /* 247 * If we don't have a cpu_die method, abort before we reach the point 248 * of no return. CPU0 may not have an cpu_ops, so test for it. 249 */ 250 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die) 251 return -EOPNOTSUPP; 252 253 /* 254 * We may need to abort a hot unplug for some other mechanism-specific 255 * reason. 256 */ 257 if (cpu_ops[cpu]->cpu_disable) 258 return cpu_ops[cpu]->cpu_disable(cpu); 259 260 return 0; 261 } 262 263 /* 264 * __cpu_disable runs on the processor to be shutdown. 265 */ 266 int __cpu_disable(void) 267 { 268 unsigned int cpu = smp_processor_id(); 269 int ret; 270 271 ret = op_cpu_disable(cpu); 272 if (ret) 273 return ret; 274 275 /* 276 * Take this CPU offline. Once we clear this, we can't return, 277 * and we must not schedule until we're ready to give up the cpu. 278 */ 279 set_cpu_online(cpu, false); 280 281 /* 282 * OK - migrate IRQs away from this CPU 283 */ 284 irq_migrate_all_off_this_cpu(); 285 286 return 0; 287 } 288 289 static int op_cpu_kill(unsigned int cpu) 290 { 291 /* 292 * If we have no means of synchronising with the dying CPU, then assume 293 * that it is really dead. We can only wait for an arbitrary length of 294 * time and hope that it's dead, so let's skip the wait and just hope. 295 */ 296 if (!cpu_ops[cpu]->cpu_kill) 297 return 0; 298 299 return cpu_ops[cpu]->cpu_kill(cpu); 300 } 301 302 /* 303 * called on the thread which is asking for a CPU to be shutdown - 304 * waits until shutdown has completed, or it is timed out. 305 */ 306 void __cpu_die(unsigned int cpu) 307 { 308 int err; 309 310 if (!cpu_wait_death(cpu, 5)) { 311 pr_crit("CPU%u: cpu didn't die\n", cpu); 312 return; 313 } 314 pr_notice("CPU%u: shutdown\n", cpu); 315 316 /* 317 * Now that the dying CPU is beyond the point of no return w.r.t. 318 * in-kernel synchronisation, try to get the firwmare to help us to 319 * verify that it has really left the kernel before we consider 320 * clobbering anything it might still be using. 321 */ 322 err = op_cpu_kill(cpu); 323 if (err) 324 pr_warn("CPU%d may not have shut down cleanly: %d\n", 325 cpu, err); 326 } 327 328 /* 329 * Called from the idle thread for the CPU which has been shutdown. 330 * 331 * Note that we disable IRQs here, but do not re-enable them 332 * before returning to the caller. This is also the behaviour 333 * of the other hotplug-cpu capable cores, so presumably coming 334 * out of idle fixes this. 335 */ 336 void cpu_die(void) 337 { 338 unsigned int cpu = smp_processor_id(); 339 340 idle_task_exit(); 341 342 local_irq_disable(); 343 344 /* Tell __cpu_die() that this CPU is now safe to dispose of */ 345 (void)cpu_report_death(); 346 347 /* 348 * Actually shutdown the CPU. This must never fail. The specific hotplug 349 * mechanism must perform all required cache maintenance to ensure that 350 * no dirty lines are lost in the process of shutting down the CPU. 351 */ 352 cpu_ops[cpu]->cpu_die(cpu); 353 354 BUG(); 355 } 356 #endif 357 358 /* 359 * Kill the calling secondary CPU, early in bringup before it is turned 360 * online. 361 */ 362 void cpu_die_early(void) 363 { 364 int cpu = smp_processor_id(); 365 366 pr_crit("CPU%d: will not boot\n", cpu); 367 368 /* Mark this CPU absent */ 369 set_cpu_present(cpu, 0); 370 371 #ifdef CONFIG_HOTPLUG_CPU 372 update_cpu_boot_status(CPU_KILL_ME); 373 /* Check if we can park ourselves */ 374 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die) 375 cpu_ops[cpu]->cpu_die(cpu); 376 #endif 377 update_cpu_boot_status(CPU_STUCK_IN_KERNEL); 378 379 cpu_park_loop(); 380 } 381 382 static void __init hyp_mode_check(void) 383 { 384 if (is_hyp_mode_available()) 385 pr_info("CPU: All CPU(s) started at EL2\n"); 386 else if (is_hyp_mode_mismatched()) 387 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC, 388 "CPU: CPUs started in inconsistent modes"); 389 else 390 pr_info("CPU: All CPU(s) started at EL1\n"); 391 } 392 393 void __init smp_cpus_done(unsigned int max_cpus) 394 { 395 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); 396 setup_cpu_features(); 397 hyp_mode_check(); 398 apply_alternatives_all(); 399 } 400 401 void __init smp_prepare_boot_cpu(void) 402 { 403 cpuinfo_store_boot_cpu(); 404 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 405 } 406 407 static u64 __init of_get_cpu_mpidr(struct device_node *dn) 408 { 409 const __be32 *cell; 410 u64 hwid; 411 412 /* 413 * A cpu node with missing "reg" property is 414 * considered invalid to build a cpu_logical_map 415 * entry. 416 */ 417 cell = of_get_property(dn, "reg", NULL); 418 if (!cell) { 419 pr_err("%s: missing reg property\n", dn->full_name); 420 return INVALID_HWID; 421 } 422 423 hwid = of_read_number(cell, of_n_addr_cells(dn)); 424 /* 425 * Non affinity bits must be set to 0 in the DT 426 */ 427 if (hwid & ~MPIDR_HWID_BITMASK) { 428 pr_err("%s: invalid reg property\n", dn->full_name); 429 return INVALID_HWID; 430 } 431 return hwid; 432 } 433 434 /* 435 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized 436 * entries and check for duplicates. If any is found just ignore the 437 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid 438 * matching valid MPIDR values. 439 */ 440 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) 441 { 442 unsigned int i; 443 444 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) 445 if (cpu_logical_map(i) == hwid) 446 return true; 447 return false; 448 } 449 450 /* 451 * Initialize cpu operations for a logical cpu and 452 * set it in the possible mask on success 453 */ 454 static int __init smp_cpu_setup(int cpu) 455 { 456 if (cpu_read_ops(cpu)) 457 return -ENODEV; 458 459 if (cpu_ops[cpu]->cpu_init(cpu)) 460 return -ENODEV; 461 462 set_cpu_possible(cpu, true); 463 464 return 0; 465 } 466 467 static bool bootcpu_valid __initdata; 468 static unsigned int cpu_count = 1; 469 470 #ifdef CONFIG_ACPI 471 /* 472 * acpi_map_gic_cpu_interface - parse processor MADT entry 473 * 474 * Carry out sanity checks on MADT processor entry and initialize 475 * cpu_logical_map on success 476 */ 477 static void __init 478 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) 479 { 480 u64 hwid = processor->arm_mpidr; 481 482 if (!(processor->flags & ACPI_MADT_ENABLED)) { 483 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); 484 return; 485 } 486 487 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { 488 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); 489 return; 490 } 491 492 if (is_mpidr_duplicate(cpu_count, hwid)) { 493 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); 494 return; 495 } 496 497 /* Check if GICC structure of boot CPU is available in the MADT */ 498 if (cpu_logical_map(0) == hwid) { 499 if (bootcpu_valid) { 500 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", 501 hwid); 502 return; 503 } 504 bootcpu_valid = true; 505 return; 506 } 507 508 if (cpu_count >= NR_CPUS) 509 return; 510 511 /* map the logical cpu id to cpu MPIDR */ 512 cpu_logical_map(cpu_count) = hwid; 513 514 /* 515 * Set-up the ACPI parking protocol cpu entries 516 * while initializing the cpu_logical_map to 517 * avoid parsing MADT entries multiple times for 518 * nothing (ie a valid cpu_logical_map entry should 519 * contain a valid parking protocol data set to 520 * initialize the cpu if the parking protocol is 521 * the only available enable method). 522 */ 523 acpi_set_mailbox_entry(cpu_count, processor); 524 525 cpu_count++; 526 } 527 528 static int __init 529 acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, 530 const unsigned long end) 531 { 532 struct acpi_madt_generic_interrupt *processor; 533 534 processor = (struct acpi_madt_generic_interrupt *)header; 535 if (BAD_MADT_GICC_ENTRY(processor, end)) 536 return -EINVAL; 537 538 acpi_table_print_madt_entry(header); 539 540 acpi_map_gic_cpu_interface(processor); 541 542 return 0; 543 } 544 #else 545 #define acpi_table_parse_madt(...) do { } while (0) 546 #endif 547 548 /* 549 * Enumerate the possible CPU set from the device tree and build the 550 * cpu logical map array containing MPIDR values related to logical 551 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 552 */ 553 static void __init of_parse_and_init_cpus(void) 554 { 555 struct device_node *dn = NULL; 556 557 while ((dn = of_find_node_by_type(dn, "cpu"))) { 558 u64 hwid = of_get_cpu_mpidr(dn); 559 560 if (hwid == INVALID_HWID) 561 goto next; 562 563 if (is_mpidr_duplicate(cpu_count, hwid)) { 564 pr_err("%s: duplicate cpu reg properties in the DT\n", 565 dn->full_name); 566 goto next; 567 } 568 569 /* 570 * The numbering scheme requires that the boot CPU 571 * must be assigned logical id 0. Record it so that 572 * the logical map built from DT is validated and can 573 * be used. 574 */ 575 if (hwid == cpu_logical_map(0)) { 576 if (bootcpu_valid) { 577 pr_err("%s: duplicate boot cpu reg property in DT\n", 578 dn->full_name); 579 goto next; 580 } 581 582 bootcpu_valid = true; 583 584 /* 585 * cpu_logical_map has already been 586 * initialized and the boot cpu doesn't need 587 * the enable-method so continue without 588 * incrementing cpu. 589 */ 590 continue; 591 } 592 593 if (cpu_count >= NR_CPUS) 594 goto next; 595 596 pr_debug("cpu logical map 0x%llx\n", hwid); 597 cpu_logical_map(cpu_count) = hwid; 598 next: 599 cpu_count++; 600 } 601 } 602 603 /* 604 * Enumerate the possible CPU set from the device tree or ACPI and build the 605 * cpu logical map array containing MPIDR values related to logical 606 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 607 */ 608 void __init smp_init_cpus(void) 609 { 610 int i; 611 612 if (acpi_disabled) 613 of_parse_and_init_cpus(); 614 else 615 /* 616 * do a walk of MADT to determine how many CPUs 617 * we have including disabled CPUs, and get information 618 * we need for SMP init 619 */ 620 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 621 acpi_parse_gic_cpu_interface, 0); 622 623 if (cpu_count > NR_CPUS) 624 pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n", 625 cpu_count, NR_CPUS); 626 627 if (!bootcpu_valid) { 628 pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); 629 return; 630 } 631 632 /* 633 * We need to set the cpu_logical_map entries before enabling 634 * the cpus so that cpu processor description entries (DT cpu nodes 635 * and ACPI MADT entries) can be retrieved by matching the cpu hwid 636 * with entries in cpu_logical_map while initializing the cpus. 637 * If the cpu set-up fails, invalidate the cpu_logical_map entry. 638 */ 639 for (i = 1; i < NR_CPUS; i++) { 640 if (cpu_logical_map(i) != INVALID_HWID) { 641 if (smp_cpu_setup(i)) 642 cpu_logical_map(i) = INVALID_HWID; 643 } 644 } 645 } 646 647 void __init smp_prepare_cpus(unsigned int max_cpus) 648 { 649 int err; 650 unsigned int cpu, ncores = num_possible_cpus(); 651 652 init_cpu_topology(); 653 654 smp_store_cpu_info(smp_processor_id()); 655 656 /* 657 * are we trying to boot more cores than exist? 658 */ 659 if (max_cpus > ncores) 660 max_cpus = ncores; 661 662 /* Don't bother if we're effectively UP */ 663 if (max_cpus <= 1) 664 return; 665 666 /* 667 * Initialise the present map (which describes the set of CPUs 668 * actually populated at the present time) and release the 669 * secondaries from the bootloader. 670 * 671 * Make sure we online at most (max_cpus - 1) additional CPUs. 672 */ 673 max_cpus--; 674 for_each_possible_cpu(cpu) { 675 if (max_cpus == 0) 676 break; 677 678 if (cpu == smp_processor_id()) 679 continue; 680 681 if (!cpu_ops[cpu]) 682 continue; 683 684 err = cpu_ops[cpu]->cpu_prepare(cpu); 685 if (err) 686 continue; 687 688 set_cpu_present(cpu, true); 689 max_cpus--; 690 } 691 } 692 693 void (*__smp_cross_call)(const struct cpumask *, unsigned int); 694 695 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) 696 { 697 __smp_cross_call = fn; 698 } 699 700 static const char *ipi_types[NR_IPI] __tracepoint_string = { 701 #define S(x,s) [x] = s 702 S(IPI_RESCHEDULE, "Rescheduling interrupts"), 703 S(IPI_CALL_FUNC, "Function call interrupts"), 704 S(IPI_CPU_STOP, "CPU stop interrupts"), 705 S(IPI_TIMER, "Timer broadcast interrupts"), 706 S(IPI_IRQ_WORK, "IRQ work interrupts"), 707 S(IPI_WAKEUP, "CPU wake-up interrupts"), 708 }; 709 710 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) 711 { 712 trace_ipi_raise(target, ipi_types[ipinr]); 713 __smp_cross_call(target, ipinr); 714 } 715 716 void show_ipi_list(struct seq_file *p, int prec) 717 { 718 unsigned int cpu, i; 719 720 for (i = 0; i < NR_IPI; i++) { 721 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, 722 prec >= 4 ? " " : ""); 723 for_each_online_cpu(cpu) 724 seq_printf(p, "%10u ", 725 __get_irq_stat(cpu, ipi_irqs[i])); 726 seq_printf(p, " %s\n", ipi_types[i]); 727 } 728 } 729 730 u64 smp_irq_stat_cpu(unsigned int cpu) 731 { 732 u64 sum = 0; 733 int i; 734 735 for (i = 0; i < NR_IPI; i++) 736 sum += __get_irq_stat(cpu, ipi_irqs[i]); 737 738 return sum; 739 } 740 741 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 742 { 743 smp_cross_call(mask, IPI_CALL_FUNC); 744 } 745 746 void arch_send_call_function_single_ipi(int cpu) 747 { 748 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); 749 } 750 751 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL 752 void arch_send_wakeup_ipi_mask(const struct cpumask *mask) 753 { 754 smp_cross_call(mask, IPI_WAKEUP); 755 } 756 #endif 757 758 #ifdef CONFIG_IRQ_WORK 759 void arch_irq_work_raise(void) 760 { 761 if (__smp_cross_call) 762 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); 763 } 764 #endif 765 766 static DEFINE_RAW_SPINLOCK(stop_lock); 767 768 /* 769 * ipi_cpu_stop - handle IPI from smp_send_stop() 770 */ 771 static void ipi_cpu_stop(unsigned int cpu) 772 { 773 if (system_state == SYSTEM_BOOTING || 774 system_state == SYSTEM_RUNNING) { 775 raw_spin_lock(&stop_lock); 776 pr_crit("CPU%u: stopping\n", cpu); 777 dump_stack(); 778 raw_spin_unlock(&stop_lock); 779 } 780 781 set_cpu_online(cpu, false); 782 783 local_irq_disable(); 784 785 while (1) 786 cpu_relax(); 787 } 788 789 /* 790 * Main handler for inter-processor interrupts 791 */ 792 void handle_IPI(int ipinr, struct pt_regs *regs) 793 { 794 unsigned int cpu = smp_processor_id(); 795 struct pt_regs *old_regs = set_irq_regs(regs); 796 797 if ((unsigned)ipinr < NR_IPI) { 798 trace_ipi_entry_rcuidle(ipi_types[ipinr]); 799 __inc_irq_stat(cpu, ipi_irqs[ipinr]); 800 } 801 802 switch (ipinr) { 803 case IPI_RESCHEDULE: 804 scheduler_ipi(); 805 break; 806 807 case IPI_CALL_FUNC: 808 irq_enter(); 809 generic_smp_call_function_interrupt(); 810 irq_exit(); 811 break; 812 813 case IPI_CPU_STOP: 814 irq_enter(); 815 ipi_cpu_stop(cpu); 816 irq_exit(); 817 break; 818 819 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 820 case IPI_TIMER: 821 irq_enter(); 822 tick_receive_broadcast(); 823 irq_exit(); 824 break; 825 #endif 826 827 #ifdef CONFIG_IRQ_WORK 828 case IPI_IRQ_WORK: 829 irq_enter(); 830 irq_work_run(); 831 irq_exit(); 832 break; 833 #endif 834 835 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL 836 case IPI_WAKEUP: 837 WARN_ONCE(!acpi_parking_protocol_valid(cpu), 838 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", 839 cpu); 840 break; 841 #endif 842 843 default: 844 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); 845 break; 846 } 847 848 if ((unsigned)ipinr < NR_IPI) 849 trace_ipi_exit_rcuidle(ipi_types[ipinr]); 850 set_irq_regs(old_regs); 851 } 852 853 void smp_send_reschedule(int cpu) 854 { 855 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 856 } 857 858 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 859 void tick_broadcast(const struct cpumask *mask) 860 { 861 smp_cross_call(mask, IPI_TIMER); 862 } 863 #endif 864 865 void smp_send_stop(void) 866 { 867 unsigned long timeout; 868 869 if (num_online_cpus() > 1) { 870 cpumask_t mask; 871 872 cpumask_copy(&mask, cpu_online_mask); 873 cpumask_clear_cpu(smp_processor_id(), &mask); 874 875 smp_cross_call(&mask, IPI_CPU_STOP); 876 } 877 878 /* Wait up to one second for other CPUs to stop */ 879 timeout = USEC_PER_SEC; 880 while (num_online_cpus() > 1 && timeout--) 881 udelay(1); 882 883 if (num_online_cpus() > 1) 884 pr_warning("SMP: failed to stop secondary CPUs\n"); 885 } 886 887 /* 888 * not supported here 889 */ 890 int setup_profiling_timer(unsigned int multiplier) 891 { 892 return -EINVAL; 893 } 894