1 /* 2 * SMP initialisation and IPI support 3 * Based on arch/arm/kernel/smp.c 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/acpi.h> 21 #include <linux/delay.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/sched.h> 25 #include <linux/interrupt.h> 26 #include <linux/cache.h> 27 #include <linux/profile.h> 28 #include <linux/errno.h> 29 #include <linux/mm.h> 30 #include <linux/err.h> 31 #include <linux/cpu.h> 32 #include <linux/smp.h> 33 #include <linux/seq_file.h> 34 #include <linux/irq.h> 35 #include <linux/percpu.h> 36 #include <linux/clockchips.h> 37 #include <linux/completion.h> 38 #include <linux/of.h> 39 #include <linux/irq_work.h> 40 41 #include <asm/alternative.h> 42 #include <asm/atomic.h> 43 #include <asm/cacheflush.h> 44 #include <asm/cpu.h> 45 #include <asm/cputype.h> 46 #include <asm/cpu_ops.h> 47 #include <asm/mmu_context.h> 48 #include <asm/numa.h> 49 #include <asm/pgtable.h> 50 #include <asm/pgalloc.h> 51 #include <asm/processor.h> 52 #include <asm/smp_plat.h> 53 #include <asm/sections.h> 54 #include <asm/tlbflush.h> 55 #include <asm/ptrace.h> 56 #include <asm/virt.h> 57 58 #define CREATE_TRACE_POINTS 59 #include <trace/events/ipi.h> 60 61 /* 62 * as from 2.5, kernels no longer have an init_tasks structure 63 * so we need some other way of telling a new secondary core 64 * where to place its SVC stack 65 */ 66 struct secondary_data secondary_data; 67 /* Number of CPUs which aren't online, but looping in kernel text. */ 68 int cpus_stuck_in_kernel; 69 70 enum ipi_msg_type { 71 IPI_RESCHEDULE, 72 IPI_CALL_FUNC, 73 IPI_CPU_STOP, 74 IPI_TIMER, 75 IPI_IRQ_WORK, 76 IPI_WAKEUP 77 }; 78 79 #ifdef CONFIG_ARM64_VHE 80 81 /* Whether the boot CPU is running in HYP mode or not*/ 82 static bool boot_cpu_hyp_mode; 83 84 static inline void save_boot_cpu_run_el(void) 85 { 86 boot_cpu_hyp_mode = is_kernel_in_hyp_mode(); 87 } 88 89 static inline bool is_boot_cpu_in_hyp_mode(void) 90 { 91 return boot_cpu_hyp_mode; 92 } 93 94 /* 95 * Verify that a secondary CPU is running the kernel at the same 96 * EL as that of the boot CPU. 97 */ 98 void verify_cpu_run_el(void) 99 { 100 bool in_el2 = is_kernel_in_hyp_mode(); 101 bool boot_cpu_el2 = is_boot_cpu_in_hyp_mode(); 102 103 if (in_el2 ^ boot_cpu_el2) { 104 pr_crit("CPU%d: mismatched Exception Level(EL%d) with boot CPU(EL%d)\n", 105 smp_processor_id(), 106 in_el2 ? 2 : 1, 107 boot_cpu_el2 ? 2 : 1); 108 cpu_panic_kernel(); 109 } 110 } 111 112 #else 113 static inline void save_boot_cpu_run_el(void) {} 114 #endif 115 116 #ifdef CONFIG_HOTPLUG_CPU 117 static int op_cpu_kill(unsigned int cpu); 118 #else 119 static inline int op_cpu_kill(unsigned int cpu) 120 { 121 return -ENOSYS; 122 } 123 #endif 124 125 126 /* 127 * Boot a secondary CPU, and assign it the specified idle task. 128 * This also gives us the initial stack to use for this CPU. 129 */ 130 static int boot_secondary(unsigned int cpu, struct task_struct *idle) 131 { 132 if (cpu_ops[cpu]->cpu_boot) 133 return cpu_ops[cpu]->cpu_boot(cpu); 134 135 return -EOPNOTSUPP; 136 } 137 138 static DECLARE_COMPLETION(cpu_running); 139 140 int __cpu_up(unsigned int cpu, struct task_struct *idle) 141 { 142 int ret; 143 long status; 144 145 /* 146 * We need to tell the secondary core where to find its stack and the 147 * page tables. 148 */ 149 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 150 update_cpu_boot_status(CPU_MMU_OFF); 151 __flush_dcache_area(&secondary_data, sizeof(secondary_data)); 152 153 /* 154 * Now bring the CPU into our world. 155 */ 156 ret = boot_secondary(cpu, idle); 157 if (ret == 0) { 158 /* 159 * CPU was successfully started, wait for it to come online or 160 * time out. 161 */ 162 wait_for_completion_timeout(&cpu_running, 163 msecs_to_jiffies(1000)); 164 165 if (!cpu_online(cpu)) { 166 pr_crit("CPU%u: failed to come online\n", cpu); 167 ret = -EIO; 168 } 169 } else { 170 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); 171 } 172 173 secondary_data.stack = NULL; 174 status = READ_ONCE(secondary_data.status); 175 if (ret && status) { 176 177 if (status == CPU_MMU_OFF) 178 status = READ_ONCE(__early_cpu_boot_status); 179 180 switch (status) { 181 default: 182 pr_err("CPU%u: failed in unknown state : 0x%lx\n", 183 cpu, status); 184 break; 185 case CPU_KILL_ME: 186 if (!op_cpu_kill(cpu)) { 187 pr_crit("CPU%u: died during early boot\n", cpu); 188 break; 189 } 190 /* Fall through */ 191 pr_crit("CPU%u: may not have shut down cleanly\n", cpu); 192 case CPU_STUCK_IN_KERNEL: 193 pr_crit("CPU%u: is stuck in kernel\n", cpu); 194 cpus_stuck_in_kernel++; 195 break; 196 case CPU_PANIC_KERNEL: 197 panic("CPU%u detected unsupported configuration\n", cpu); 198 } 199 } 200 201 return ret; 202 } 203 204 /* 205 * This is the secondary CPU boot entry. We're using this CPUs 206 * idle thread stack, but a set of temporary page tables. 207 */ 208 asmlinkage void secondary_start_kernel(void) 209 { 210 struct mm_struct *mm = &init_mm; 211 unsigned int cpu = smp_processor_id(); 212 213 /* 214 * All kernel threads share the same mm context; grab a 215 * reference and switch to it. 216 */ 217 atomic_inc(&mm->mm_count); 218 current->active_mm = mm; 219 220 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 221 222 /* 223 * TTBR0 is only used for the identity mapping at this stage. Make it 224 * point to zero page to avoid speculatively fetching new entries. 225 */ 226 cpu_uninstall_idmap(); 227 228 preempt_disable(); 229 trace_hardirqs_off(); 230 231 /* 232 * If the system has established the capabilities, make sure 233 * this CPU ticks all of those. If it doesn't, the CPU will 234 * fail to come online. 235 */ 236 check_local_cpu_capabilities(); 237 238 if (cpu_ops[cpu]->cpu_postboot) 239 cpu_ops[cpu]->cpu_postboot(); 240 241 /* 242 * Log the CPU info before it is marked online and might get read. 243 */ 244 cpuinfo_store_cpu(); 245 246 /* 247 * Enable GIC and timers. 248 */ 249 notify_cpu_starting(cpu); 250 251 store_cpu_topology(cpu); 252 253 /* 254 * OK, now it's safe to let the boot CPU continue. Wait for 255 * the CPU migration code to notice that the CPU is online 256 * before we continue. 257 */ 258 pr_info("CPU%u: Booted secondary processor [%08x]\n", 259 cpu, read_cpuid_id()); 260 update_cpu_boot_status(CPU_BOOT_SUCCESS); 261 set_cpu_online(cpu, true); 262 complete(&cpu_running); 263 264 local_irq_enable(); 265 local_async_enable(); 266 267 /* 268 * OK, it's off to the idle thread for us 269 */ 270 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 271 } 272 273 #ifdef CONFIG_HOTPLUG_CPU 274 static int op_cpu_disable(unsigned int cpu) 275 { 276 /* 277 * If we don't have a cpu_die method, abort before we reach the point 278 * of no return. CPU0 may not have an cpu_ops, so test for it. 279 */ 280 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die) 281 return -EOPNOTSUPP; 282 283 /* 284 * We may need to abort a hot unplug for some other mechanism-specific 285 * reason. 286 */ 287 if (cpu_ops[cpu]->cpu_disable) 288 return cpu_ops[cpu]->cpu_disable(cpu); 289 290 return 0; 291 } 292 293 /* 294 * __cpu_disable runs on the processor to be shutdown. 295 */ 296 int __cpu_disable(void) 297 { 298 unsigned int cpu = smp_processor_id(); 299 int ret; 300 301 ret = op_cpu_disable(cpu); 302 if (ret) 303 return ret; 304 305 /* 306 * Take this CPU offline. Once we clear this, we can't return, 307 * and we must not schedule until we're ready to give up the cpu. 308 */ 309 set_cpu_online(cpu, false); 310 311 /* 312 * OK - migrate IRQs away from this CPU 313 */ 314 irq_migrate_all_off_this_cpu(); 315 316 return 0; 317 } 318 319 static int op_cpu_kill(unsigned int cpu) 320 { 321 /* 322 * If we have no means of synchronising with the dying CPU, then assume 323 * that it is really dead. We can only wait for an arbitrary length of 324 * time and hope that it's dead, so let's skip the wait and just hope. 325 */ 326 if (!cpu_ops[cpu]->cpu_kill) 327 return 0; 328 329 return cpu_ops[cpu]->cpu_kill(cpu); 330 } 331 332 /* 333 * called on the thread which is asking for a CPU to be shutdown - 334 * waits until shutdown has completed, or it is timed out. 335 */ 336 void __cpu_die(unsigned int cpu) 337 { 338 int err; 339 340 if (!cpu_wait_death(cpu, 5)) { 341 pr_crit("CPU%u: cpu didn't die\n", cpu); 342 return; 343 } 344 pr_notice("CPU%u: shutdown\n", cpu); 345 346 /* 347 * Now that the dying CPU is beyond the point of no return w.r.t. 348 * in-kernel synchronisation, try to get the firwmare to help us to 349 * verify that it has really left the kernel before we consider 350 * clobbering anything it might still be using. 351 */ 352 err = op_cpu_kill(cpu); 353 if (err) 354 pr_warn("CPU%d may not have shut down cleanly: %d\n", 355 cpu, err); 356 } 357 358 /* 359 * Called from the idle thread for the CPU which has been shutdown. 360 * 361 * Note that we disable IRQs here, but do not re-enable them 362 * before returning to the caller. This is also the behaviour 363 * of the other hotplug-cpu capable cores, so presumably coming 364 * out of idle fixes this. 365 */ 366 void cpu_die(void) 367 { 368 unsigned int cpu = smp_processor_id(); 369 370 idle_task_exit(); 371 372 local_irq_disable(); 373 374 /* Tell __cpu_die() that this CPU is now safe to dispose of */ 375 (void)cpu_report_death(); 376 377 /* 378 * Actually shutdown the CPU. This must never fail. The specific hotplug 379 * mechanism must perform all required cache maintenance to ensure that 380 * no dirty lines are lost in the process of shutting down the CPU. 381 */ 382 cpu_ops[cpu]->cpu_die(cpu); 383 384 BUG(); 385 } 386 #endif 387 388 /* 389 * Kill the calling secondary CPU, early in bringup before it is turned 390 * online. 391 */ 392 void cpu_die_early(void) 393 { 394 int cpu = smp_processor_id(); 395 396 pr_crit("CPU%d: will not boot\n", cpu); 397 398 /* Mark this CPU absent */ 399 set_cpu_present(cpu, 0); 400 401 #ifdef CONFIG_HOTPLUG_CPU 402 update_cpu_boot_status(CPU_KILL_ME); 403 /* Check if we can park ourselves */ 404 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die) 405 cpu_ops[cpu]->cpu_die(cpu); 406 #endif 407 update_cpu_boot_status(CPU_STUCK_IN_KERNEL); 408 409 cpu_park_loop(); 410 } 411 412 static void __init hyp_mode_check(void) 413 { 414 if (is_hyp_mode_available()) 415 pr_info("CPU: All CPU(s) started at EL2\n"); 416 else if (is_hyp_mode_mismatched()) 417 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC, 418 "CPU: CPUs started in inconsistent modes"); 419 else 420 pr_info("CPU: All CPU(s) started at EL1\n"); 421 } 422 423 void __init smp_cpus_done(unsigned int max_cpus) 424 { 425 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); 426 setup_cpu_features(); 427 hyp_mode_check(); 428 apply_alternatives_all(); 429 } 430 431 void __init smp_prepare_boot_cpu(void) 432 { 433 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 434 /* 435 * Initialise the static keys early as they may be enabled by the 436 * cpufeature code. 437 */ 438 jump_label_init(); 439 cpuinfo_store_boot_cpu(); 440 save_boot_cpu_run_el(); 441 /* 442 * Run the errata work around checks on the boot CPU, once we have 443 * initialised the cpu feature infrastructure from 444 * cpuinfo_store_boot_cpu() above. 445 */ 446 update_cpu_errata_workarounds(); 447 } 448 449 static u64 __init of_get_cpu_mpidr(struct device_node *dn) 450 { 451 const __be32 *cell; 452 u64 hwid; 453 454 /* 455 * A cpu node with missing "reg" property is 456 * considered invalid to build a cpu_logical_map 457 * entry. 458 */ 459 cell = of_get_property(dn, "reg", NULL); 460 if (!cell) { 461 pr_err("%s: missing reg property\n", dn->full_name); 462 return INVALID_HWID; 463 } 464 465 hwid = of_read_number(cell, of_n_addr_cells(dn)); 466 /* 467 * Non affinity bits must be set to 0 in the DT 468 */ 469 if (hwid & ~MPIDR_HWID_BITMASK) { 470 pr_err("%s: invalid reg property\n", dn->full_name); 471 return INVALID_HWID; 472 } 473 return hwid; 474 } 475 476 /* 477 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized 478 * entries and check for duplicates. If any is found just ignore the 479 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid 480 * matching valid MPIDR values. 481 */ 482 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) 483 { 484 unsigned int i; 485 486 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) 487 if (cpu_logical_map(i) == hwid) 488 return true; 489 return false; 490 } 491 492 /* 493 * Initialize cpu operations for a logical cpu and 494 * set it in the possible mask on success 495 */ 496 static int __init smp_cpu_setup(int cpu) 497 { 498 if (cpu_read_ops(cpu)) 499 return -ENODEV; 500 501 if (cpu_ops[cpu]->cpu_init(cpu)) 502 return -ENODEV; 503 504 set_cpu_possible(cpu, true); 505 506 return 0; 507 } 508 509 static bool bootcpu_valid __initdata; 510 static unsigned int cpu_count = 1; 511 512 #ifdef CONFIG_ACPI 513 /* 514 * acpi_map_gic_cpu_interface - parse processor MADT entry 515 * 516 * Carry out sanity checks on MADT processor entry and initialize 517 * cpu_logical_map on success 518 */ 519 static void __init 520 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) 521 { 522 u64 hwid = processor->arm_mpidr; 523 524 if (!(processor->flags & ACPI_MADT_ENABLED)) { 525 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); 526 return; 527 } 528 529 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { 530 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); 531 return; 532 } 533 534 if (is_mpidr_duplicate(cpu_count, hwid)) { 535 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); 536 return; 537 } 538 539 /* Check if GICC structure of boot CPU is available in the MADT */ 540 if (cpu_logical_map(0) == hwid) { 541 if (bootcpu_valid) { 542 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", 543 hwid); 544 return; 545 } 546 bootcpu_valid = true; 547 early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid)); 548 return; 549 } 550 551 if (cpu_count >= NR_CPUS) 552 return; 553 554 /* map the logical cpu id to cpu MPIDR */ 555 cpu_logical_map(cpu_count) = hwid; 556 557 /* 558 * Set-up the ACPI parking protocol cpu entries 559 * while initializing the cpu_logical_map to 560 * avoid parsing MADT entries multiple times for 561 * nothing (ie a valid cpu_logical_map entry should 562 * contain a valid parking protocol data set to 563 * initialize the cpu if the parking protocol is 564 * the only available enable method). 565 */ 566 acpi_set_mailbox_entry(cpu_count, processor); 567 568 early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count, hwid)); 569 570 cpu_count++; 571 } 572 573 static int __init 574 acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, 575 const unsigned long end) 576 { 577 struct acpi_madt_generic_interrupt *processor; 578 579 processor = (struct acpi_madt_generic_interrupt *)header; 580 if (BAD_MADT_GICC_ENTRY(processor, end)) 581 return -EINVAL; 582 583 acpi_table_print_madt_entry(header); 584 585 acpi_map_gic_cpu_interface(processor); 586 587 return 0; 588 } 589 #else 590 #define acpi_table_parse_madt(...) do { } while (0) 591 #endif 592 593 /* 594 * Enumerate the possible CPU set from the device tree and build the 595 * cpu logical map array containing MPIDR values related to logical 596 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 597 */ 598 static void __init of_parse_and_init_cpus(void) 599 { 600 struct device_node *dn = NULL; 601 602 while ((dn = of_find_node_by_type(dn, "cpu"))) { 603 u64 hwid = of_get_cpu_mpidr(dn); 604 605 if (hwid == INVALID_HWID) 606 goto next; 607 608 if (is_mpidr_duplicate(cpu_count, hwid)) { 609 pr_err("%s: duplicate cpu reg properties in the DT\n", 610 dn->full_name); 611 goto next; 612 } 613 614 /* 615 * The numbering scheme requires that the boot CPU 616 * must be assigned logical id 0. Record it so that 617 * the logical map built from DT is validated and can 618 * be used. 619 */ 620 if (hwid == cpu_logical_map(0)) { 621 if (bootcpu_valid) { 622 pr_err("%s: duplicate boot cpu reg property in DT\n", 623 dn->full_name); 624 goto next; 625 } 626 627 bootcpu_valid = true; 628 early_map_cpu_to_node(0, of_node_to_nid(dn)); 629 630 /* 631 * cpu_logical_map has already been 632 * initialized and the boot cpu doesn't need 633 * the enable-method so continue without 634 * incrementing cpu. 635 */ 636 continue; 637 } 638 639 if (cpu_count >= NR_CPUS) 640 goto next; 641 642 pr_debug("cpu logical map 0x%llx\n", hwid); 643 cpu_logical_map(cpu_count) = hwid; 644 645 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn)); 646 next: 647 cpu_count++; 648 } 649 } 650 651 /* 652 * Enumerate the possible CPU set from the device tree or ACPI and build the 653 * cpu logical map array containing MPIDR values related to logical 654 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 655 */ 656 void __init smp_init_cpus(void) 657 { 658 int i; 659 660 if (acpi_disabled) 661 of_parse_and_init_cpus(); 662 else 663 /* 664 * do a walk of MADT to determine how many CPUs 665 * we have including disabled CPUs, and get information 666 * we need for SMP init 667 */ 668 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 669 acpi_parse_gic_cpu_interface, 0); 670 671 if (cpu_count > nr_cpu_ids) 672 pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n", 673 cpu_count, nr_cpu_ids); 674 675 if (!bootcpu_valid) { 676 pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); 677 return; 678 } 679 680 /* 681 * We need to set the cpu_logical_map entries before enabling 682 * the cpus so that cpu processor description entries (DT cpu nodes 683 * and ACPI MADT entries) can be retrieved by matching the cpu hwid 684 * with entries in cpu_logical_map while initializing the cpus. 685 * If the cpu set-up fails, invalidate the cpu_logical_map entry. 686 */ 687 for (i = 1; i < nr_cpu_ids; i++) { 688 if (cpu_logical_map(i) != INVALID_HWID) { 689 if (smp_cpu_setup(i)) 690 cpu_logical_map(i) = INVALID_HWID; 691 } 692 } 693 } 694 695 void __init smp_prepare_cpus(unsigned int max_cpus) 696 { 697 int err; 698 unsigned int cpu; 699 unsigned int this_cpu; 700 701 init_cpu_topology(); 702 703 this_cpu = smp_processor_id(); 704 store_cpu_topology(this_cpu); 705 numa_store_cpu_info(this_cpu); 706 707 /* 708 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set 709 * secondary CPUs present. 710 */ 711 if (max_cpus == 0) 712 return; 713 714 /* 715 * Initialise the present map (which describes the set of CPUs 716 * actually populated at the present time) and release the 717 * secondaries from the bootloader. 718 */ 719 for_each_possible_cpu(cpu) { 720 721 if (cpu == smp_processor_id()) 722 continue; 723 724 if (!cpu_ops[cpu]) 725 continue; 726 727 err = cpu_ops[cpu]->cpu_prepare(cpu); 728 if (err) 729 continue; 730 731 set_cpu_present(cpu, true); 732 numa_store_cpu_info(cpu); 733 } 734 } 735 736 void (*__smp_cross_call)(const struct cpumask *, unsigned int); 737 738 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) 739 { 740 __smp_cross_call = fn; 741 } 742 743 static const char *ipi_types[NR_IPI] __tracepoint_string = { 744 #define S(x,s) [x] = s 745 S(IPI_RESCHEDULE, "Rescheduling interrupts"), 746 S(IPI_CALL_FUNC, "Function call interrupts"), 747 S(IPI_CPU_STOP, "CPU stop interrupts"), 748 S(IPI_TIMER, "Timer broadcast interrupts"), 749 S(IPI_IRQ_WORK, "IRQ work interrupts"), 750 S(IPI_WAKEUP, "CPU wake-up interrupts"), 751 }; 752 753 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) 754 { 755 trace_ipi_raise(target, ipi_types[ipinr]); 756 __smp_cross_call(target, ipinr); 757 } 758 759 void show_ipi_list(struct seq_file *p, int prec) 760 { 761 unsigned int cpu, i; 762 763 for (i = 0; i < NR_IPI; i++) { 764 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, 765 prec >= 4 ? " " : ""); 766 for_each_online_cpu(cpu) 767 seq_printf(p, "%10u ", 768 __get_irq_stat(cpu, ipi_irqs[i])); 769 seq_printf(p, " %s\n", ipi_types[i]); 770 } 771 } 772 773 u64 smp_irq_stat_cpu(unsigned int cpu) 774 { 775 u64 sum = 0; 776 int i; 777 778 for (i = 0; i < NR_IPI; i++) 779 sum += __get_irq_stat(cpu, ipi_irqs[i]); 780 781 return sum; 782 } 783 784 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 785 { 786 smp_cross_call(mask, IPI_CALL_FUNC); 787 } 788 789 void arch_send_call_function_single_ipi(int cpu) 790 { 791 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); 792 } 793 794 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL 795 void arch_send_wakeup_ipi_mask(const struct cpumask *mask) 796 { 797 smp_cross_call(mask, IPI_WAKEUP); 798 } 799 #endif 800 801 #ifdef CONFIG_IRQ_WORK 802 void arch_irq_work_raise(void) 803 { 804 if (__smp_cross_call) 805 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); 806 } 807 #endif 808 809 /* 810 * ipi_cpu_stop - handle IPI from smp_send_stop() 811 */ 812 static void ipi_cpu_stop(unsigned int cpu) 813 { 814 set_cpu_online(cpu, false); 815 816 local_irq_disable(); 817 818 while (1) 819 cpu_relax(); 820 } 821 822 /* 823 * Main handler for inter-processor interrupts 824 */ 825 void handle_IPI(int ipinr, struct pt_regs *regs) 826 { 827 unsigned int cpu = smp_processor_id(); 828 struct pt_regs *old_regs = set_irq_regs(regs); 829 830 if ((unsigned)ipinr < NR_IPI) { 831 trace_ipi_entry_rcuidle(ipi_types[ipinr]); 832 __inc_irq_stat(cpu, ipi_irqs[ipinr]); 833 } 834 835 switch (ipinr) { 836 case IPI_RESCHEDULE: 837 scheduler_ipi(); 838 break; 839 840 case IPI_CALL_FUNC: 841 irq_enter(); 842 generic_smp_call_function_interrupt(); 843 irq_exit(); 844 break; 845 846 case IPI_CPU_STOP: 847 irq_enter(); 848 ipi_cpu_stop(cpu); 849 irq_exit(); 850 break; 851 852 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 853 case IPI_TIMER: 854 irq_enter(); 855 tick_receive_broadcast(); 856 irq_exit(); 857 break; 858 #endif 859 860 #ifdef CONFIG_IRQ_WORK 861 case IPI_IRQ_WORK: 862 irq_enter(); 863 irq_work_run(); 864 irq_exit(); 865 break; 866 #endif 867 868 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL 869 case IPI_WAKEUP: 870 WARN_ONCE(!acpi_parking_protocol_valid(cpu), 871 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", 872 cpu); 873 break; 874 #endif 875 876 default: 877 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); 878 break; 879 } 880 881 if ((unsigned)ipinr < NR_IPI) 882 trace_ipi_exit_rcuidle(ipi_types[ipinr]); 883 set_irq_regs(old_regs); 884 } 885 886 void smp_send_reschedule(int cpu) 887 { 888 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 889 } 890 891 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 892 void tick_broadcast(const struct cpumask *mask) 893 { 894 smp_cross_call(mask, IPI_TIMER); 895 } 896 #endif 897 898 void smp_send_stop(void) 899 { 900 unsigned long timeout; 901 902 if (num_online_cpus() > 1) { 903 cpumask_t mask; 904 905 cpumask_copy(&mask, cpu_online_mask); 906 cpumask_clear_cpu(smp_processor_id(), &mask); 907 908 if (system_state == SYSTEM_BOOTING || 909 system_state == SYSTEM_RUNNING) 910 pr_crit("SMP: stopping secondary CPUs\n"); 911 smp_cross_call(&mask, IPI_CPU_STOP); 912 } 913 914 /* Wait up to one second for other CPUs to stop */ 915 timeout = USEC_PER_SEC; 916 while (num_online_cpus() > 1 && timeout--) 917 udelay(1); 918 919 if (num_online_cpus() > 1) 920 pr_warning("SMP: failed to stop secondary CPUs %*pbl\n", 921 cpumask_pr_args(cpu_online_mask)); 922 } 923 924 /* 925 * not supported here 926 */ 927 int setup_profiling_timer(unsigned int multiplier) 928 { 929 return -EINVAL; 930 } 931 932 static bool have_cpu_die(void) 933 { 934 #ifdef CONFIG_HOTPLUG_CPU 935 int any_cpu = raw_smp_processor_id(); 936 937 if (cpu_ops[any_cpu]->cpu_die) 938 return true; 939 #endif 940 return false; 941 } 942 943 bool cpus_are_stuck_in_kernel(void) 944 { 945 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die()); 946 947 return !!cpus_stuck_in_kernel || smp_spin_tables; 948 } 949