1 /* 2 * SMP initialisation and IPI support 3 * Based on arch/arm/kernel/smp.c 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/acpi.h> 21 #include <linux/delay.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/sched.h> 25 #include <linux/interrupt.h> 26 #include <linux/cache.h> 27 #include <linux/profile.h> 28 #include <linux/errno.h> 29 #include <linux/mm.h> 30 #include <linux/err.h> 31 #include <linux/cpu.h> 32 #include <linux/smp.h> 33 #include <linux/seq_file.h> 34 #include <linux/irq.h> 35 #include <linux/percpu.h> 36 #include <linux/clockchips.h> 37 #include <linux/completion.h> 38 #include <linux/of.h> 39 #include <linux/irq_work.h> 40 41 #include <asm/alternative.h> 42 #include <asm/atomic.h> 43 #include <asm/cacheflush.h> 44 #include <asm/cpu.h> 45 #include <asm/cputype.h> 46 #include <asm/cpu_ops.h> 47 #include <asm/mmu_context.h> 48 #include <asm/numa.h> 49 #include <asm/pgtable.h> 50 #include <asm/pgalloc.h> 51 #include <asm/processor.h> 52 #include <asm/smp_plat.h> 53 #include <asm/sections.h> 54 #include <asm/tlbflush.h> 55 #include <asm/ptrace.h> 56 #include <asm/virt.h> 57 58 #define CREATE_TRACE_POINTS 59 #include <trace/events/ipi.h> 60 61 /* 62 * as from 2.5, kernels no longer have an init_tasks structure 63 * so we need some other way of telling a new secondary core 64 * where to place its SVC stack 65 */ 66 struct secondary_data secondary_data; 67 /* Number of CPUs which aren't online, but looping in kernel text. */ 68 int cpus_stuck_in_kernel; 69 70 enum ipi_msg_type { 71 IPI_RESCHEDULE, 72 IPI_CALL_FUNC, 73 IPI_CPU_STOP, 74 IPI_TIMER, 75 IPI_IRQ_WORK, 76 IPI_WAKEUP 77 }; 78 79 #ifdef CONFIG_ARM64_VHE 80 81 /* Whether the boot CPU is running in HYP mode or not*/ 82 static bool boot_cpu_hyp_mode; 83 84 static inline void save_boot_cpu_run_el(void) 85 { 86 boot_cpu_hyp_mode = is_kernel_in_hyp_mode(); 87 } 88 89 static inline bool is_boot_cpu_in_hyp_mode(void) 90 { 91 return boot_cpu_hyp_mode; 92 } 93 94 /* 95 * Verify that a secondary CPU is running the kernel at the same 96 * EL as that of the boot CPU. 97 */ 98 void verify_cpu_run_el(void) 99 { 100 bool in_el2 = is_kernel_in_hyp_mode(); 101 bool boot_cpu_el2 = is_boot_cpu_in_hyp_mode(); 102 103 if (in_el2 ^ boot_cpu_el2) { 104 pr_crit("CPU%d: mismatched Exception Level(EL%d) with boot CPU(EL%d)\n", 105 smp_processor_id(), 106 in_el2 ? 2 : 1, 107 boot_cpu_el2 ? 2 : 1); 108 cpu_panic_kernel(); 109 } 110 } 111 112 #else 113 static inline void save_boot_cpu_run_el(void) {} 114 #endif 115 116 #ifdef CONFIG_HOTPLUG_CPU 117 static int op_cpu_kill(unsigned int cpu); 118 #else 119 static inline int op_cpu_kill(unsigned int cpu) 120 { 121 return -ENOSYS; 122 } 123 #endif 124 125 126 /* 127 * Boot a secondary CPU, and assign it the specified idle task. 128 * This also gives us the initial stack to use for this CPU. 129 */ 130 static int boot_secondary(unsigned int cpu, struct task_struct *idle) 131 { 132 if (cpu_ops[cpu]->cpu_boot) 133 return cpu_ops[cpu]->cpu_boot(cpu); 134 135 return -EOPNOTSUPP; 136 } 137 138 static DECLARE_COMPLETION(cpu_running); 139 140 int __cpu_up(unsigned int cpu, struct task_struct *idle) 141 { 142 int ret; 143 long status; 144 145 /* 146 * We need to tell the secondary core where to find its stack and the 147 * page tables. 148 */ 149 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 150 update_cpu_boot_status(CPU_MMU_OFF); 151 __flush_dcache_area(&secondary_data, sizeof(secondary_data)); 152 153 /* 154 * Now bring the CPU into our world. 155 */ 156 ret = boot_secondary(cpu, idle); 157 if (ret == 0) { 158 /* 159 * CPU was successfully started, wait for it to come online or 160 * time out. 161 */ 162 wait_for_completion_timeout(&cpu_running, 163 msecs_to_jiffies(1000)); 164 165 if (!cpu_online(cpu)) { 166 pr_crit("CPU%u: failed to come online\n", cpu); 167 ret = -EIO; 168 } 169 } else { 170 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); 171 } 172 173 secondary_data.stack = NULL; 174 status = READ_ONCE(secondary_data.status); 175 if (ret && status) { 176 177 if (status == CPU_MMU_OFF) 178 status = READ_ONCE(__early_cpu_boot_status); 179 180 switch (status) { 181 default: 182 pr_err("CPU%u: failed in unknown state : 0x%lx\n", 183 cpu, status); 184 break; 185 case CPU_KILL_ME: 186 if (!op_cpu_kill(cpu)) { 187 pr_crit("CPU%u: died during early boot\n", cpu); 188 break; 189 } 190 /* Fall through */ 191 pr_crit("CPU%u: may not have shut down cleanly\n", cpu); 192 case CPU_STUCK_IN_KERNEL: 193 pr_crit("CPU%u: is stuck in kernel\n", cpu); 194 cpus_stuck_in_kernel++; 195 break; 196 case CPU_PANIC_KERNEL: 197 panic("CPU%u detected unsupported configuration\n", cpu); 198 } 199 } 200 201 return ret; 202 } 203 204 static void smp_store_cpu_info(unsigned int cpuid) 205 { 206 store_cpu_topology(cpuid); 207 numa_store_cpu_info(cpuid); 208 } 209 210 /* 211 * This is the secondary CPU boot entry. We're using this CPUs 212 * idle thread stack, but a set of temporary page tables. 213 */ 214 asmlinkage void secondary_start_kernel(void) 215 { 216 struct mm_struct *mm = &init_mm; 217 unsigned int cpu = smp_processor_id(); 218 219 /* 220 * All kernel threads share the same mm context; grab a 221 * reference and switch to it. 222 */ 223 atomic_inc(&mm->mm_count); 224 current->active_mm = mm; 225 226 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 227 228 /* 229 * TTBR0 is only used for the identity mapping at this stage. Make it 230 * point to zero page to avoid speculatively fetching new entries. 231 */ 232 cpu_uninstall_idmap(); 233 234 preempt_disable(); 235 trace_hardirqs_off(); 236 237 /* 238 * If the system has established the capabilities, make sure 239 * this CPU ticks all of those. If it doesn't, the CPU will 240 * fail to come online. 241 */ 242 verify_local_cpu_capabilities(); 243 244 if (cpu_ops[cpu]->cpu_postboot) 245 cpu_ops[cpu]->cpu_postboot(); 246 247 /* 248 * Log the CPU info before it is marked online and might get read. 249 */ 250 cpuinfo_store_cpu(); 251 252 /* 253 * Enable GIC and timers. 254 */ 255 notify_cpu_starting(cpu); 256 257 smp_store_cpu_info(cpu); 258 259 /* 260 * OK, now it's safe to let the boot CPU continue. Wait for 261 * the CPU migration code to notice that the CPU is online 262 * before we continue. 263 */ 264 pr_info("CPU%u: Booted secondary processor [%08x]\n", 265 cpu, read_cpuid_id()); 266 update_cpu_boot_status(CPU_BOOT_SUCCESS); 267 set_cpu_online(cpu, true); 268 complete(&cpu_running); 269 270 local_irq_enable(); 271 local_async_enable(); 272 273 /* 274 * OK, it's off to the idle thread for us 275 */ 276 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 277 } 278 279 #ifdef CONFIG_HOTPLUG_CPU 280 static int op_cpu_disable(unsigned int cpu) 281 { 282 /* 283 * If we don't have a cpu_die method, abort before we reach the point 284 * of no return. CPU0 may not have an cpu_ops, so test for it. 285 */ 286 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die) 287 return -EOPNOTSUPP; 288 289 /* 290 * We may need to abort a hot unplug for some other mechanism-specific 291 * reason. 292 */ 293 if (cpu_ops[cpu]->cpu_disable) 294 return cpu_ops[cpu]->cpu_disable(cpu); 295 296 return 0; 297 } 298 299 /* 300 * __cpu_disable runs on the processor to be shutdown. 301 */ 302 int __cpu_disable(void) 303 { 304 unsigned int cpu = smp_processor_id(); 305 int ret; 306 307 ret = op_cpu_disable(cpu); 308 if (ret) 309 return ret; 310 311 /* 312 * Take this CPU offline. Once we clear this, we can't return, 313 * and we must not schedule until we're ready to give up the cpu. 314 */ 315 set_cpu_online(cpu, false); 316 317 /* 318 * OK - migrate IRQs away from this CPU 319 */ 320 irq_migrate_all_off_this_cpu(); 321 322 return 0; 323 } 324 325 static int op_cpu_kill(unsigned int cpu) 326 { 327 /* 328 * If we have no means of synchronising with the dying CPU, then assume 329 * that it is really dead. We can only wait for an arbitrary length of 330 * time and hope that it's dead, so let's skip the wait and just hope. 331 */ 332 if (!cpu_ops[cpu]->cpu_kill) 333 return 0; 334 335 return cpu_ops[cpu]->cpu_kill(cpu); 336 } 337 338 /* 339 * called on the thread which is asking for a CPU to be shutdown - 340 * waits until shutdown has completed, or it is timed out. 341 */ 342 void __cpu_die(unsigned int cpu) 343 { 344 int err; 345 346 if (!cpu_wait_death(cpu, 5)) { 347 pr_crit("CPU%u: cpu didn't die\n", cpu); 348 return; 349 } 350 pr_notice("CPU%u: shutdown\n", cpu); 351 352 /* 353 * Now that the dying CPU is beyond the point of no return w.r.t. 354 * in-kernel synchronisation, try to get the firwmare to help us to 355 * verify that it has really left the kernel before we consider 356 * clobbering anything it might still be using. 357 */ 358 err = op_cpu_kill(cpu); 359 if (err) 360 pr_warn("CPU%d may not have shut down cleanly: %d\n", 361 cpu, err); 362 } 363 364 /* 365 * Called from the idle thread for the CPU which has been shutdown. 366 * 367 * Note that we disable IRQs here, but do not re-enable them 368 * before returning to the caller. This is also the behaviour 369 * of the other hotplug-cpu capable cores, so presumably coming 370 * out of idle fixes this. 371 */ 372 void cpu_die(void) 373 { 374 unsigned int cpu = smp_processor_id(); 375 376 idle_task_exit(); 377 378 local_irq_disable(); 379 380 /* Tell __cpu_die() that this CPU is now safe to dispose of */ 381 (void)cpu_report_death(); 382 383 /* 384 * Actually shutdown the CPU. This must never fail. The specific hotplug 385 * mechanism must perform all required cache maintenance to ensure that 386 * no dirty lines are lost in the process of shutting down the CPU. 387 */ 388 cpu_ops[cpu]->cpu_die(cpu); 389 390 BUG(); 391 } 392 #endif 393 394 /* 395 * Kill the calling secondary CPU, early in bringup before it is turned 396 * online. 397 */ 398 void cpu_die_early(void) 399 { 400 int cpu = smp_processor_id(); 401 402 pr_crit("CPU%d: will not boot\n", cpu); 403 404 /* Mark this CPU absent */ 405 set_cpu_present(cpu, 0); 406 407 #ifdef CONFIG_HOTPLUG_CPU 408 update_cpu_boot_status(CPU_KILL_ME); 409 /* Check if we can park ourselves */ 410 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die) 411 cpu_ops[cpu]->cpu_die(cpu); 412 #endif 413 update_cpu_boot_status(CPU_STUCK_IN_KERNEL); 414 415 cpu_park_loop(); 416 } 417 418 static void __init hyp_mode_check(void) 419 { 420 if (is_hyp_mode_available()) 421 pr_info("CPU: All CPU(s) started at EL2\n"); 422 else if (is_hyp_mode_mismatched()) 423 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC, 424 "CPU: CPUs started in inconsistent modes"); 425 else 426 pr_info("CPU: All CPU(s) started at EL1\n"); 427 } 428 429 void __init smp_cpus_done(unsigned int max_cpus) 430 { 431 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); 432 setup_cpu_features(); 433 hyp_mode_check(); 434 apply_alternatives_all(); 435 } 436 437 void __init smp_prepare_boot_cpu(void) 438 { 439 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 440 cpuinfo_store_boot_cpu(); 441 save_boot_cpu_run_el(); 442 } 443 444 static u64 __init of_get_cpu_mpidr(struct device_node *dn) 445 { 446 const __be32 *cell; 447 u64 hwid; 448 449 /* 450 * A cpu node with missing "reg" property is 451 * considered invalid to build a cpu_logical_map 452 * entry. 453 */ 454 cell = of_get_property(dn, "reg", NULL); 455 if (!cell) { 456 pr_err("%s: missing reg property\n", dn->full_name); 457 return INVALID_HWID; 458 } 459 460 hwid = of_read_number(cell, of_n_addr_cells(dn)); 461 /* 462 * Non affinity bits must be set to 0 in the DT 463 */ 464 if (hwid & ~MPIDR_HWID_BITMASK) { 465 pr_err("%s: invalid reg property\n", dn->full_name); 466 return INVALID_HWID; 467 } 468 return hwid; 469 } 470 471 /* 472 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized 473 * entries and check for duplicates. If any is found just ignore the 474 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid 475 * matching valid MPIDR values. 476 */ 477 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) 478 { 479 unsigned int i; 480 481 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) 482 if (cpu_logical_map(i) == hwid) 483 return true; 484 return false; 485 } 486 487 /* 488 * Initialize cpu operations for a logical cpu and 489 * set it in the possible mask on success 490 */ 491 static int __init smp_cpu_setup(int cpu) 492 { 493 if (cpu_read_ops(cpu)) 494 return -ENODEV; 495 496 if (cpu_ops[cpu]->cpu_init(cpu)) 497 return -ENODEV; 498 499 set_cpu_possible(cpu, true); 500 501 return 0; 502 } 503 504 static bool bootcpu_valid __initdata; 505 static unsigned int cpu_count = 1; 506 507 #ifdef CONFIG_ACPI 508 /* 509 * acpi_map_gic_cpu_interface - parse processor MADT entry 510 * 511 * Carry out sanity checks on MADT processor entry and initialize 512 * cpu_logical_map on success 513 */ 514 static void __init 515 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) 516 { 517 u64 hwid = processor->arm_mpidr; 518 519 if (!(processor->flags & ACPI_MADT_ENABLED)) { 520 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); 521 return; 522 } 523 524 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { 525 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); 526 return; 527 } 528 529 if (is_mpidr_duplicate(cpu_count, hwid)) { 530 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); 531 return; 532 } 533 534 /* Check if GICC structure of boot CPU is available in the MADT */ 535 if (cpu_logical_map(0) == hwid) { 536 if (bootcpu_valid) { 537 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", 538 hwid); 539 return; 540 } 541 bootcpu_valid = true; 542 return; 543 } 544 545 if (cpu_count >= NR_CPUS) 546 return; 547 548 /* map the logical cpu id to cpu MPIDR */ 549 cpu_logical_map(cpu_count) = hwid; 550 551 /* 552 * Set-up the ACPI parking protocol cpu entries 553 * while initializing the cpu_logical_map to 554 * avoid parsing MADT entries multiple times for 555 * nothing (ie a valid cpu_logical_map entry should 556 * contain a valid parking protocol data set to 557 * initialize the cpu if the parking protocol is 558 * the only available enable method). 559 */ 560 acpi_set_mailbox_entry(cpu_count, processor); 561 562 early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count, hwid)); 563 564 cpu_count++; 565 } 566 567 static int __init 568 acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, 569 const unsigned long end) 570 { 571 struct acpi_madt_generic_interrupt *processor; 572 573 processor = (struct acpi_madt_generic_interrupt *)header; 574 if (BAD_MADT_GICC_ENTRY(processor, end)) 575 return -EINVAL; 576 577 acpi_table_print_madt_entry(header); 578 579 acpi_map_gic_cpu_interface(processor); 580 581 return 0; 582 } 583 #else 584 #define acpi_table_parse_madt(...) do { } while (0) 585 #endif 586 587 /* 588 * Enumerate the possible CPU set from the device tree and build the 589 * cpu logical map array containing MPIDR values related to logical 590 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 591 */ 592 static void __init of_parse_and_init_cpus(void) 593 { 594 struct device_node *dn = NULL; 595 596 while ((dn = of_find_node_by_type(dn, "cpu"))) { 597 u64 hwid = of_get_cpu_mpidr(dn); 598 599 if (hwid == INVALID_HWID) 600 goto next; 601 602 if (is_mpidr_duplicate(cpu_count, hwid)) { 603 pr_err("%s: duplicate cpu reg properties in the DT\n", 604 dn->full_name); 605 goto next; 606 } 607 608 /* 609 * The numbering scheme requires that the boot CPU 610 * must be assigned logical id 0. Record it so that 611 * the logical map built from DT is validated and can 612 * be used. 613 */ 614 if (hwid == cpu_logical_map(0)) { 615 if (bootcpu_valid) { 616 pr_err("%s: duplicate boot cpu reg property in DT\n", 617 dn->full_name); 618 goto next; 619 } 620 621 bootcpu_valid = true; 622 623 /* 624 * cpu_logical_map has already been 625 * initialized and the boot cpu doesn't need 626 * the enable-method so continue without 627 * incrementing cpu. 628 */ 629 continue; 630 } 631 632 if (cpu_count >= NR_CPUS) 633 goto next; 634 635 pr_debug("cpu logical map 0x%llx\n", hwid); 636 cpu_logical_map(cpu_count) = hwid; 637 638 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn)); 639 next: 640 cpu_count++; 641 } 642 } 643 644 /* 645 * Enumerate the possible CPU set from the device tree or ACPI and build the 646 * cpu logical map array containing MPIDR values related to logical 647 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 648 */ 649 void __init smp_init_cpus(void) 650 { 651 int i; 652 653 if (acpi_disabled) 654 of_parse_and_init_cpus(); 655 else 656 /* 657 * do a walk of MADT to determine how many CPUs 658 * we have including disabled CPUs, and get information 659 * we need for SMP init 660 */ 661 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 662 acpi_parse_gic_cpu_interface, 0); 663 664 if (cpu_count > NR_CPUS) 665 pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n", 666 cpu_count, NR_CPUS); 667 668 if (!bootcpu_valid) { 669 pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); 670 return; 671 } 672 673 /* 674 * We need to set the cpu_logical_map entries before enabling 675 * the cpus so that cpu processor description entries (DT cpu nodes 676 * and ACPI MADT entries) can be retrieved by matching the cpu hwid 677 * with entries in cpu_logical_map while initializing the cpus. 678 * If the cpu set-up fails, invalidate the cpu_logical_map entry. 679 */ 680 for (i = 1; i < NR_CPUS; i++) { 681 if (cpu_logical_map(i) != INVALID_HWID) { 682 if (smp_cpu_setup(i)) 683 cpu_logical_map(i) = INVALID_HWID; 684 } 685 } 686 } 687 688 void __init smp_prepare_cpus(unsigned int max_cpus) 689 { 690 int err; 691 unsigned int cpu; 692 693 init_cpu_topology(); 694 695 smp_store_cpu_info(smp_processor_id()); 696 697 /* 698 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set 699 * secondary CPUs present. 700 */ 701 if (max_cpus == 0) 702 return; 703 704 /* 705 * Initialise the present map (which describes the set of CPUs 706 * actually populated at the present time) and release the 707 * secondaries from the bootloader. 708 */ 709 for_each_possible_cpu(cpu) { 710 711 if (cpu == smp_processor_id()) 712 continue; 713 714 if (!cpu_ops[cpu]) 715 continue; 716 717 err = cpu_ops[cpu]->cpu_prepare(cpu); 718 if (err) 719 continue; 720 721 set_cpu_present(cpu, true); 722 } 723 } 724 725 void (*__smp_cross_call)(const struct cpumask *, unsigned int); 726 727 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) 728 { 729 __smp_cross_call = fn; 730 } 731 732 static const char *ipi_types[NR_IPI] __tracepoint_string = { 733 #define S(x,s) [x] = s 734 S(IPI_RESCHEDULE, "Rescheduling interrupts"), 735 S(IPI_CALL_FUNC, "Function call interrupts"), 736 S(IPI_CPU_STOP, "CPU stop interrupts"), 737 S(IPI_TIMER, "Timer broadcast interrupts"), 738 S(IPI_IRQ_WORK, "IRQ work interrupts"), 739 S(IPI_WAKEUP, "CPU wake-up interrupts"), 740 }; 741 742 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) 743 { 744 trace_ipi_raise(target, ipi_types[ipinr]); 745 __smp_cross_call(target, ipinr); 746 } 747 748 void show_ipi_list(struct seq_file *p, int prec) 749 { 750 unsigned int cpu, i; 751 752 for (i = 0; i < NR_IPI; i++) { 753 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, 754 prec >= 4 ? " " : ""); 755 for_each_online_cpu(cpu) 756 seq_printf(p, "%10u ", 757 __get_irq_stat(cpu, ipi_irqs[i])); 758 seq_printf(p, " %s\n", ipi_types[i]); 759 } 760 } 761 762 u64 smp_irq_stat_cpu(unsigned int cpu) 763 { 764 u64 sum = 0; 765 int i; 766 767 for (i = 0; i < NR_IPI; i++) 768 sum += __get_irq_stat(cpu, ipi_irqs[i]); 769 770 return sum; 771 } 772 773 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 774 { 775 smp_cross_call(mask, IPI_CALL_FUNC); 776 } 777 778 void arch_send_call_function_single_ipi(int cpu) 779 { 780 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); 781 } 782 783 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL 784 void arch_send_wakeup_ipi_mask(const struct cpumask *mask) 785 { 786 smp_cross_call(mask, IPI_WAKEUP); 787 } 788 #endif 789 790 #ifdef CONFIG_IRQ_WORK 791 void arch_irq_work_raise(void) 792 { 793 if (__smp_cross_call) 794 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); 795 } 796 #endif 797 798 /* 799 * ipi_cpu_stop - handle IPI from smp_send_stop() 800 */ 801 static void ipi_cpu_stop(unsigned int cpu) 802 { 803 set_cpu_online(cpu, false); 804 805 local_irq_disable(); 806 807 while (1) 808 cpu_relax(); 809 } 810 811 /* 812 * Main handler for inter-processor interrupts 813 */ 814 void handle_IPI(int ipinr, struct pt_regs *regs) 815 { 816 unsigned int cpu = smp_processor_id(); 817 struct pt_regs *old_regs = set_irq_regs(regs); 818 819 if ((unsigned)ipinr < NR_IPI) { 820 trace_ipi_entry_rcuidle(ipi_types[ipinr]); 821 __inc_irq_stat(cpu, ipi_irqs[ipinr]); 822 } 823 824 switch (ipinr) { 825 case IPI_RESCHEDULE: 826 scheduler_ipi(); 827 break; 828 829 case IPI_CALL_FUNC: 830 irq_enter(); 831 generic_smp_call_function_interrupt(); 832 irq_exit(); 833 break; 834 835 case IPI_CPU_STOP: 836 irq_enter(); 837 ipi_cpu_stop(cpu); 838 irq_exit(); 839 break; 840 841 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 842 case IPI_TIMER: 843 irq_enter(); 844 tick_receive_broadcast(); 845 irq_exit(); 846 break; 847 #endif 848 849 #ifdef CONFIG_IRQ_WORK 850 case IPI_IRQ_WORK: 851 irq_enter(); 852 irq_work_run(); 853 irq_exit(); 854 break; 855 #endif 856 857 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL 858 case IPI_WAKEUP: 859 WARN_ONCE(!acpi_parking_protocol_valid(cpu), 860 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", 861 cpu); 862 break; 863 #endif 864 865 default: 866 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); 867 break; 868 } 869 870 if ((unsigned)ipinr < NR_IPI) 871 trace_ipi_exit_rcuidle(ipi_types[ipinr]); 872 set_irq_regs(old_regs); 873 } 874 875 void smp_send_reschedule(int cpu) 876 { 877 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 878 } 879 880 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 881 void tick_broadcast(const struct cpumask *mask) 882 { 883 smp_cross_call(mask, IPI_TIMER); 884 } 885 #endif 886 887 void smp_send_stop(void) 888 { 889 unsigned long timeout; 890 891 if (num_online_cpus() > 1) { 892 cpumask_t mask; 893 894 cpumask_copy(&mask, cpu_online_mask); 895 cpumask_clear_cpu(smp_processor_id(), &mask); 896 897 if (system_state == SYSTEM_BOOTING || 898 system_state == SYSTEM_RUNNING) 899 pr_crit("SMP: stopping secondary CPUs\n"); 900 smp_cross_call(&mask, IPI_CPU_STOP); 901 } 902 903 /* Wait up to one second for other CPUs to stop */ 904 timeout = USEC_PER_SEC; 905 while (num_online_cpus() > 1 && timeout--) 906 udelay(1); 907 908 if (num_online_cpus() > 1) 909 pr_warning("SMP: failed to stop secondary CPUs %*pbl\n", 910 cpumask_pr_args(cpu_online_mask)); 911 } 912 913 /* 914 * not supported here 915 */ 916 int setup_profiling_timer(unsigned int multiplier) 917 { 918 return -EINVAL; 919 } 920 921 static bool have_cpu_die(void) 922 { 923 #ifdef CONFIG_HOTPLUG_CPU 924 int any_cpu = raw_smp_processor_id(); 925 926 if (cpu_ops[any_cpu]->cpu_die) 927 return true; 928 #endif 929 return false; 930 } 931 932 bool cpus_are_stuck_in_kernel(void) 933 { 934 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die()); 935 936 return !!cpus_stuck_in_kernel || smp_spin_tables; 937 } 938