smp.c (0ea8a56de21be24cb79abb03dee79aabcd60a316) | smp.c (56afcd3dbd1995c526bfbd920cebde6158b22c4a) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * linux/arch/arm/kernel/smp.c 4 * 5 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 6 */ 7#include <linux/module.h> 8#include <linux/delay.h> --- 55 unchanged lines hidden (view full) --- 64 IPI_CALL_FUNC, 65 IPI_CPU_STOP, 66 IPI_IRQ_WORK, 67 IPI_COMPLETION, 68 /* 69 * CPU_BACKTRACE is special and not included in NR_IPI 70 * or tracable with trace_ipi_* 71 */ | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * linux/arch/arm/kernel/smp.c 4 * 5 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 6 */ 7#include <linux/module.h> 8#include <linux/delay.h> --- 55 unchanged lines hidden (view full) --- 64 IPI_CALL_FUNC, 65 IPI_CPU_STOP, 66 IPI_IRQ_WORK, 67 IPI_COMPLETION, 68 /* 69 * CPU_BACKTRACE is special and not included in NR_IPI 70 * or tracable with trace_ipi_* 71 */ |
72 IPI_CPU_BACKTRACE, | 72 IPI_CPU_BACKTRACE = NR_IPI, |
73 /* 74 * SGI8-15 can be reserved by secure firmware, and thus may 75 * not be usable by the kernel. Please keep the above limited 76 * to at most 8 entries. 77 */ | 73 /* 74 * SGI8-15 can be reserved by secure firmware, and thus may 75 * not be usable by the kernel. Please keep the above limited 76 * to at most 8 entries. 77 */ |
78 MAX_IPI |
|
78}; 79 | 79}; 80 |
81static int ipi_irq_base __read_mostly; 82static int nr_ipi __read_mostly = NR_IPI; 83static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly; 84 85static void ipi_setup(int cpu); 86static void ipi_teardown(int cpu); 87 |
|
80static DECLARE_COMPLETION(cpu_running); 81 82static struct smp_operations smp_ops __ro_after_init; 83 84void __init smp_set_ops(const struct smp_operations *ops) 85{ 86 if (ops) 87 smp_ops = *ops; --- 154 unchanged lines hidden (view full) --- 242 remove_cpu_topology(cpu); 243#endif 244 245 /* 246 * Take this CPU offline. Once we clear this, we can't return, 247 * and we must not schedule until we're ready to give up the cpu. 248 */ 249 set_cpu_online(cpu, false); | 88static DECLARE_COMPLETION(cpu_running); 89 90static struct smp_operations smp_ops __ro_after_init; 91 92void __init smp_set_ops(const struct smp_operations *ops) 93{ 94 if (ops) 95 smp_ops = *ops; --- 154 unchanged lines hidden (view full) --- 250 remove_cpu_topology(cpu); 251#endif 252 253 /* 254 * Take this CPU offline. Once we clear this, we can't return, 255 * and we must not schedule until we're ready to give up the cpu. 256 */ 257 set_cpu_online(cpu, false); |
258 ipi_teardown(cpu); |
|
250 251 /* 252 * OK - migrate IRQs away from this CPU 253 */ 254 irq_migrate_all_off_this_cpu(); 255 256 /* 257 * Flush user cache and TLB mappings, and then remove this CPU --- 159 unchanged lines hidden (view full) --- 417 /* 418 * Give the platform a chance to do its own initialisation. 419 */ 420 if (smp_ops.smp_secondary_init) 421 smp_ops.smp_secondary_init(cpu); 422 423 notify_cpu_starting(cpu); 424 | 259 260 /* 261 * OK - migrate IRQs away from this CPU 262 */ 263 irq_migrate_all_off_this_cpu(); 264 265 /* 266 * Flush user cache and TLB mappings, and then remove this CPU --- 159 unchanged lines hidden (view full) --- 426 /* 427 * Give the platform a chance to do its own initialisation. 428 */ 429 if (smp_ops.smp_secondary_init) 430 smp_ops.smp_secondary_init(cpu); 431 432 notify_cpu_starting(cpu); 433 |
434 ipi_setup(cpu); 435 |
|
425 calibrate_delay(); 426 427 smp_store_cpu_info(cpu); 428 429 /* 430 * OK, now it's safe to let the boot CPU continue. Wait for 431 * the CPU migration code to notice that the CPU is online 432 * before we continue - which happens after __cpu_up returns. --- 189 unchanged lines hidden (view full) --- 622/* 623 * Main handler for inter-processor interrupts 624 */ 625asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) 626{ 627 handle_IPI(ipinr, regs); 628} 629 | 436 calibrate_delay(); 437 438 smp_store_cpu_info(cpu); 439 440 /* 441 * OK, now it's safe to let the boot CPU continue. Wait for 442 * the CPU migration code to notice that the CPU is online 443 * before we continue - which happens after __cpu_up returns. --- 189 unchanged lines hidden (view full) --- 633/* 634 * Main handler for inter-processor interrupts 635 */ 636asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) 637{ 638 handle_IPI(ipinr, regs); 639} 640 |
630void handle_IPI(int ipinr, struct pt_regs *regs) | 641static void do_handle_IPI(int ipinr) |
631{ 632 unsigned int cpu = smp_processor_id(); | 642{ 643 unsigned int cpu = smp_processor_id(); |
633 struct pt_regs *old_regs = set_irq_regs(regs); | |
634 635 if ((unsigned)ipinr < NR_IPI) { 636 trace_ipi_entry_rcuidle(ipi_types[ipinr]); 637 __inc_irq_stat(cpu, ipi_irqs[ipinr]); 638 } 639 640 switch (ipinr) { 641 case IPI_WAKEUP: 642 break; 643 644#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 645 case IPI_TIMER: | 644 645 if ((unsigned)ipinr < NR_IPI) { 646 trace_ipi_entry_rcuidle(ipi_types[ipinr]); 647 __inc_irq_stat(cpu, ipi_irqs[ipinr]); 648 } 649 650 switch (ipinr) { 651 case IPI_WAKEUP: 652 break; 653 654#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 655 case IPI_TIMER: |
646 irq_enter(); | |
647 tick_receive_broadcast(); | 656 tick_receive_broadcast(); |
648 irq_exit(); | |
649 break; 650#endif 651 652 case IPI_RESCHEDULE: 653 scheduler_ipi(); 654 break; 655 656 case IPI_CALL_FUNC: | 657 break; 658#endif 659 660 case IPI_RESCHEDULE: 661 scheduler_ipi(); 662 break; 663 664 case IPI_CALL_FUNC: |
657 irq_enter(); | |
658 generic_smp_call_function_interrupt(); | 665 generic_smp_call_function_interrupt(); |
659 irq_exit(); | |
660 break; 661 662 case IPI_CPU_STOP: | 666 break; 667 668 case IPI_CPU_STOP: |
663 irq_enter(); | |
664 ipi_cpu_stop(cpu); | 669 ipi_cpu_stop(cpu); |
665 irq_exit(); | |
666 break; 667 668#ifdef CONFIG_IRQ_WORK 669 case IPI_IRQ_WORK: | 670 break; 671 672#ifdef CONFIG_IRQ_WORK 673 case IPI_IRQ_WORK: |
670 irq_enter(); | |
671 irq_work_run(); | 674 irq_work_run(); |
672 irq_exit(); | |
673 break; 674#endif 675 676 case IPI_COMPLETION: | 675 break; 676#endif 677 678 case IPI_COMPLETION: |
677 irq_enter(); | |
678 ipi_complete(cpu); | 679 ipi_complete(cpu); |
679 irq_exit(); | |
680 break; 681 682 case IPI_CPU_BACKTRACE: 683 printk_nmi_enter(); | 680 break; 681 682 case IPI_CPU_BACKTRACE: 683 printk_nmi_enter(); |
684 irq_enter(); 685 nmi_cpu_backtrace(regs); 686 irq_exit(); | 684 nmi_cpu_backtrace(get_irq_regs()); |
687 printk_nmi_exit(); 688 break; 689 690 default: 691 pr_crit("CPU%u: Unknown IPI message 0x%x\n", 692 cpu, ipinr); 693 break; 694 } 695 696 if ((unsigned)ipinr < NR_IPI) 697 trace_ipi_exit_rcuidle(ipi_types[ipinr]); | 685 printk_nmi_exit(); 686 break; 687 688 default: 689 pr_crit("CPU%u: Unknown IPI message 0x%x\n", 690 cpu, ipinr); 691 break; 692 } 693 694 if ((unsigned)ipinr < NR_IPI) 695 trace_ipi_exit_rcuidle(ipi_types[ipinr]); |
696} 697 698/* Legacy version, should go away once all irqchips have been converted */ 699void handle_IPI(int ipinr, struct pt_regs *regs) 700{ 701 struct pt_regs *old_regs = set_irq_regs(regs); 702 703 irq_enter(); 704 do_handle_IPI(ipinr); 705 irq_exit(); 706 |
|
698 set_irq_regs(old_regs); 699} 700 | 707 set_irq_regs(old_regs); 708} 709 |
710static irqreturn_t ipi_handler(int irq, void *data) 711{ 712 do_handle_IPI(irq - ipi_irq_base); 713 return IRQ_HANDLED; 714} 715 716static void ipi_send(const struct cpumask *target, unsigned int ipi) 717{ 718 __ipi_send_mask(ipi_desc[ipi], target); 719} 720 721static void ipi_setup(int cpu) 722{ 723 int i; 724 725 if (!ipi_irq_base) 726 return; 727 728 for (i = 0; i < nr_ipi; i++) 729 enable_percpu_irq(ipi_irq_base + i, 0); 730} 731 732static void ipi_teardown(int cpu) 733{ 734 int i; 735 736 if (!ipi_irq_base) 737 return; 738 739 for (i = 0; i < nr_ipi; i++) 740 disable_percpu_irq(ipi_irq_base + i); 741} 742 743void __init set_smp_ipi_range(int ipi_base, int n) 744{ 745 int i; 746 747 WARN_ON(n < MAX_IPI); 748 nr_ipi = min(n, MAX_IPI); 749 750 for (i = 0; i < nr_ipi; i++) { 751 int err; 752 753 err = request_percpu_irq(ipi_base + i, ipi_handler, 754 "IPI", &irq_stat); 755 WARN_ON(err); 756 757 ipi_desc[i] = irq_to_desc(ipi_base + i); 758 irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); 759 } 760 761 ipi_irq_base = ipi_base; 762 set_smp_cross_call(ipi_send); 763 764 /* Setup the boot CPU immediately */ 765 ipi_setup(smp_processor_id()); 766} 767 |
|
701void smp_send_reschedule(int cpu) 702{ 703 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 704} 705 706void smp_send_stop(void) 707{ 708 unsigned long timeout; --- 106 unchanged lines hidden --- | 768void smp_send_reschedule(int cpu) 769{ 770 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 771} 772 773void smp_send_stop(void) 774{ 775 unsigned long timeout; --- 106 unchanged lines hidden --- |