xref: /openbmc/linux/arch/arc/kernel/smp.c (revision 4d369680)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
241195d23SVineet Gupta /*
341195d23SVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
441195d23SVineet Gupta  *
541195d23SVineet Gupta  * RajeshwarR: Dec 11, 2007
641195d23SVineet Gupta  *   -- Added support for Inter Processor Interrupts
741195d23SVineet Gupta  *
841195d23SVineet Gupta  * Vineetg: Nov 1st, 2007
941195d23SVineet Gupta  *    -- Initial Write (Borrowed heavily from ARM)
1041195d23SVineet Gupta  */
1141195d23SVineet Gupta 
1241195d23SVineet Gupta #include <linux/spinlock.h>
1368e21be2SIngo Molnar #include <linux/sched/mm.h>
1441195d23SVineet Gupta #include <linux/interrupt.h>
1541195d23SVineet Gupta #include <linux/profile.h>
1641195d23SVineet Gupta #include <linux/mm.h>
1741195d23SVineet Gupta #include <linux/cpu.h>
1841195d23SVineet Gupta #include <linux/irq.h>
1941195d23SVineet Gupta #include <linux/atomic.h>
2041195d23SVineet Gupta #include <linux/cpumask.h>
2141195d23SVineet Gupta #include <linux/reboot.h>
2234e71e4cSYuriy Kolerov #include <linux/irqdomain.h>
23fdbed196SVineet Gupta #include <linux/export.h>
24a29a2527SEugeniy Paltsev #include <linux/of_fdt.h>
25fdbed196SVineet Gupta 
2603a6d28cSVineet Gupta #include <asm/mach_desc.h>
27*4d369680SVineet Gupta #include <asm/setup.h>
28*4d369680SVineet Gupta #include <asm/smp.h>
29*4d369680SVineet Gupta #include <asm/processor.h>
3041195d23SVineet Gupta 
319fb92eb1SVineet Gupta #ifndef CONFIG_ARC_HAS_LLSC
3241195d23SVineet Gupta arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
33fdbed196SVineet Gupta 
34fdbed196SVineet Gupta EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
359fb92eb1SVineet Gupta #endif
3641195d23SVineet Gupta 
37173eaafaSVineet Gupta struct plat_smp_ops  __weak plat_smp_ops;
3810b12718SVineet Gupta 
39ecaa054fSJulia Lawall /* XXX: per cpu ? Only needed once in early secondary boot */
4041195d23SVineet Gupta struct task_struct *secondary_idle_tsk;
4141195d23SVineet Gupta 
4241195d23SVineet Gupta /* Called from start_kernel */
smp_prepare_boot_cpu(void)4341195d23SVineet Gupta void __init smp_prepare_boot_cpu(void)
4441195d23SVineet Gupta {
4541195d23SVineet Gupta }
4641195d23SVineet Gupta 
arc_get_cpu_map(const char * name,struct cpumask * cpumask)47a29a2527SEugeniy Paltsev static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
48a29a2527SEugeniy Paltsev {
49a29a2527SEugeniy Paltsev 	unsigned long dt_root = of_get_flat_dt_root();
50a29a2527SEugeniy Paltsev 	const char *buf;
51a29a2527SEugeniy Paltsev 
52a29a2527SEugeniy Paltsev 	buf = of_get_flat_dt_prop(dt_root, name, NULL);
53a29a2527SEugeniy Paltsev 	if (!buf)
54a29a2527SEugeniy Paltsev 		return -EINVAL;
55a29a2527SEugeniy Paltsev 
56a29a2527SEugeniy Paltsev 	if (cpulist_parse(buf, cpumask))
57a29a2527SEugeniy Paltsev 		return -EINVAL;
58a29a2527SEugeniy Paltsev 
59a29a2527SEugeniy Paltsev 	return 0;
60a29a2527SEugeniy Paltsev }
61a29a2527SEugeniy Paltsev 
62a29a2527SEugeniy Paltsev /*
63a29a2527SEugeniy Paltsev  * Read from DeviceTree and setup cpu possible mask. If there is no
64a29a2527SEugeniy Paltsev  * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
65a29a2527SEugeniy Paltsev  */
arc_init_cpu_possible(void)66a29a2527SEugeniy Paltsev static void __init arc_init_cpu_possible(void)
67a29a2527SEugeniy Paltsev {
68a29a2527SEugeniy Paltsev 	struct cpumask cpumask;
69a29a2527SEugeniy Paltsev 
70a29a2527SEugeniy Paltsev 	if (arc_get_cpu_map("possible-cpus", &cpumask)) {
71a29a2527SEugeniy Paltsev 		pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
72a29a2527SEugeniy Paltsev 			NR_CPUS);
73a29a2527SEugeniy Paltsev 
74a29a2527SEugeniy Paltsev 		cpumask_setall(&cpumask);
75a29a2527SEugeniy Paltsev 	}
76a29a2527SEugeniy Paltsev 
77a29a2527SEugeniy Paltsev 	if (!cpumask_test_cpu(0, &cpumask))
78a29a2527SEugeniy Paltsev 		panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
79a29a2527SEugeniy Paltsev 
80a29a2527SEugeniy Paltsev 	init_cpu_possible(&cpumask);
81a29a2527SEugeniy Paltsev }
82a29a2527SEugeniy Paltsev 
8341195d23SVineet Gupta /*
84e55af4daSVineet Gupta  * Called from setup_arch() before calling setup_processor()
85e55af4daSVineet Gupta  *
86e55af4daSVineet Gupta  * - Initialise the CPU possible map early - this describes the CPUs
8741195d23SVineet Gupta  *   which may be present or become present in the system.
88e55af4daSVineet Gupta  * - Call early smp init hook. This can initialize a specific multi-core
89e55af4daSVineet Gupta  *   IP which is say common to several platforms (hence not part of
90e55af4daSVineet Gupta  *   platform specific int_early() hook)
9141195d23SVineet Gupta  */
smp_init_cpus(void)9241195d23SVineet Gupta void __init smp_init_cpus(void)
9341195d23SVineet Gupta {
94a29a2527SEugeniy Paltsev 	arc_init_cpu_possible();
95e55af4daSVineet Gupta 
96e55af4daSVineet Gupta 	if (plat_smp_ops.init_early_smp)
97e55af4daSVineet Gupta 		plat_smp_ops.init_early_smp();
9841195d23SVineet Gupta }
9941195d23SVineet Gupta 
10041195d23SVineet Gupta /* called from init ( ) =>  process 1 */
smp_prepare_cpus(unsigned int max_cpus)10141195d23SVineet Gupta void __init smp_prepare_cpus(unsigned int max_cpus)
10241195d23SVineet Gupta {
10341195d23SVineet Gupta 	/*
1048f6d9eb2SNoam Camus 	 * if platform didn't set the present map already, do it now
1058f6d9eb2SNoam Camus 	 * boot cpu is set to present already by init/main.c
10641195d23SVineet Gupta 	 */
107a29a2527SEugeniy Paltsev 	if (num_present_cpus() <= 1)
108a29a2527SEugeniy Paltsev 		init_cpu_present(cpu_possible_mask);
1098f6d9eb2SNoam Camus }
11041195d23SVineet Gupta 
smp_cpus_done(unsigned int max_cpus)11141195d23SVineet Gupta void __init smp_cpus_done(unsigned int max_cpus)
11241195d23SVineet Gupta {
11341195d23SVineet Gupta 
11441195d23SVineet Gupta }
11541195d23SVineet Gupta 
11641195d23SVineet Gupta /*
117f33e9c43SVineet Gupta  * Default smp boot helper for Run-on-reset case where all cores start off
118f33e9c43SVineet Gupta  * together. Non-masters need to wait for Master to start running.
119f33e9c43SVineet Gupta  * This is implemented using a flag in memory, which Non-masters spin-wait on.
120f33e9c43SVineet Gupta  * Master sets it to cpu-id of core to "ungate" it.
12141195d23SVineet Gupta  */
122f33e9c43SVineet Gupta static volatile int wake_flag;
123f33e9c43SVineet Gupta 
12478f824d4SVineet Gupta #ifdef CONFIG_ISA_ARCOMPACT
12578f824d4SVineet Gupta 
12678f824d4SVineet Gupta #define __boot_read(f)		f
12778f824d4SVineet Gupta #define __boot_write(f, v)	f = v
12878f824d4SVineet Gupta 
12978f824d4SVineet Gupta #else
13078f824d4SVineet Gupta 
13178f824d4SVineet Gupta #define __boot_read(f)		arc_read_uncached_32(&f)
13278f824d4SVineet Gupta #define __boot_write(f, v)	arc_write_uncached_32(&f, v)
13378f824d4SVineet Gupta 
13478f824d4SVineet Gupta #endif
13578f824d4SVineet Gupta 
arc_default_smp_cpu_kick(int cpu,unsigned long pc)136f33e9c43SVineet Gupta static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
13741195d23SVineet Gupta {
138f33e9c43SVineet Gupta 	BUG_ON(cpu == 0);
13978f824d4SVineet Gupta 
14078f824d4SVineet Gupta 	__boot_write(wake_flag, cpu);
14141195d23SVineet Gupta }
14241195d23SVineet Gupta 
arc_platform_smp_wait_to_boot(int cpu)143f33e9c43SVineet Gupta void arc_platform_smp_wait_to_boot(int cpu)
144f33e9c43SVineet Gupta {
145bf02454aSVineet Gupta 	/* for halt-on-reset, we've waited already */
146bf02454aSVineet Gupta 	if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
147bf02454aSVineet Gupta 		return;
148bf02454aSVineet Gupta 
14978f824d4SVineet Gupta 	while (__boot_read(wake_flag) != cpu)
150f33e9c43SVineet Gupta 		;
151f33e9c43SVineet Gupta 
15278f824d4SVineet Gupta 	__boot_write(wake_flag, 0);
153f33e9c43SVineet Gupta }
154f33e9c43SVineet Gupta 
arc_platform_smp_cpuinfo(void)15510b12718SVineet Gupta const char *arc_platform_smp_cpuinfo(void)
15610b12718SVineet Gupta {
157619f3018SVineet Gupta 	return plat_smp_ops.info ? : "";
15810b12718SVineet Gupta }
15910b12718SVineet Gupta 
16041195d23SVineet Gupta /*
16141195d23SVineet Gupta  * The very first "C" code executed by secondary
16241195d23SVineet Gupta  * Called from asm stub in head.S
16341195d23SVineet Gupta  * "current"/R25 already setup by low level boot code
16441195d23SVineet Gupta  */
start_kernel_secondary(void)165ce759956SPaul Gortmaker void start_kernel_secondary(void)
16641195d23SVineet Gupta {
16741195d23SVineet Gupta 	struct mm_struct *mm = &init_mm;
16841195d23SVineet Gupta 	unsigned int cpu = smp_processor_id();
16941195d23SVineet Gupta 
17041195d23SVineet Gupta 	/* MMU, Caches, Vector Table, Interrupts etc */
17141195d23SVineet Gupta 	setup_processor();
17241195d23SVineet Gupta 
1733fce371bSVegard Nossum 	mmget(mm);
174f1f10076SVegard Nossum 	mmgrab(mm);
17541195d23SVineet Gupta 	current->active_mm = mm;
1765ea72a90SVineet Gupta 	cpumask_set_cpu(cpu, mm_cpumask(mm));
17741195d23SVineet Gupta 
178286130ebSVineet Gupta 	/* Some SMP H/w setup - for each cpu */
179b474a023SNoam Camus 	if (plat_smp_ops.init_per_cpu)
180b474a023SNoam Camus 		plat_smp_ops.init_per_cpu(cpu);
181286130ebSVineet Gupta 
182575a9d4eSVineet Gupta 	if (machine_desc->init_per_cpu)
183575a9d4eSVineet Gupta 		machine_desc->init_per_cpu(cpu);
18441195d23SVineet Gupta 
18571f9cf8fSNoam Camus 	notify_cpu_starting(cpu);
18671f9cf8fSNoam Camus 	set_cpu_online(cpu, true);
18771f9cf8fSNoam Camus 
18871f9cf8fSNoam Camus 	pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
18971f9cf8fSNoam Camus 
19041195d23SVineet Gupta 	local_irq_enable();
191fc6d73d6SThomas Gleixner 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
19241195d23SVineet Gupta }
19341195d23SVineet Gupta 
19441195d23SVineet Gupta /*
19541195d23SVineet Gupta  * Called from kernel_init( ) -> smp_init( ) - for each CPU
19641195d23SVineet Gupta  *
19741195d23SVineet Gupta  * At this point, Secondary Processor  is "HALT"ed:
19841195d23SVineet Gupta  *  -It booted, but was halted in head.S
19941195d23SVineet Gupta  *  -It was configured to halt-on-reset
20041195d23SVineet Gupta  *  So need to wake it up.
20141195d23SVineet Gupta  *
20241195d23SVineet Gupta  * Essential requirements being where to run from (PC) and stack (SP)
20341195d23SVineet Gupta */
__cpu_up(unsigned int cpu,struct task_struct * idle)204ce759956SPaul Gortmaker int __cpu_up(unsigned int cpu, struct task_struct *idle)
20541195d23SVineet Gupta {
20641195d23SVineet Gupta 	unsigned long wait_till;
20741195d23SVineet Gupta 
20841195d23SVineet Gupta 	secondary_idle_tsk = idle;
20941195d23SVineet Gupta 
21041195d23SVineet Gupta 	pr_info("Idle Task [%d] %p", cpu, idle);
21141195d23SVineet Gupta 	pr_info("Trying to bring up CPU%u ...\n", cpu);
21241195d23SVineet Gupta 
21310b12718SVineet Gupta 	if (plat_smp_ops.cpu_kick)
21410b12718SVineet Gupta 		plat_smp_ops.cpu_kick(cpu,
21541195d23SVineet Gupta 				(unsigned long)first_lines_of_secondary);
216f33e9c43SVineet Gupta 	else
217f33e9c43SVineet Gupta 		arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
21841195d23SVineet Gupta 
21941195d23SVineet Gupta 	/* wait for 1 sec after kicking the secondary */
22041195d23SVineet Gupta 	wait_till = jiffies + HZ;
22141195d23SVineet Gupta 	while (time_before(jiffies, wait_till)) {
22241195d23SVineet Gupta 		if (cpu_online(cpu))
22341195d23SVineet Gupta 			break;
22441195d23SVineet Gupta 	}
22541195d23SVineet Gupta 
22641195d23SVineet Gupta 	if (!cpu_online(cpu)) {
22741195d23SVineet Gupta 		pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu);
22841195d23SVineet Gupta 		return -1;
22941195d23SVineet Gupta 	}
23041195d23SVineet Gupta 
23141195d23SVineet Gupta 	secondary_idle_tsk = NULL;
23241195d23SVineet Gupta 
23341195d23SVineet Gupta 	return 0;
23441195d23SVineet Gupta }
23541195d23SVineet Gupta 
23641195d23SVineet Gupta /*****************************************************************************/
23741195d23SVineet Gupta /*              Inter Processor Interrupt Handling                           */
23841195d23SVineet Gupta /*****************************************************************************/
23941195d23SVineet Gupta 
24041195d23SVineet Gupta enum ipi_msg_type {
241f2a4aa56SVineet Gupta 	IPI_EMPTY = 0,
24241195d23SVineet Gupta 	IPI_RESCHEDULE = 1,
24341195d23SVineet Gupta 	IPI_CALL_FUNC,
244f2a4aa56SVineet Gupta 	IPI_CPU_STOP,
24541195d23SVineet Gupta };
24641195d23SVineet Gupta 
247f2a4aa56SVineet Gupta /*
248f2a4aa56SVineet Gupta  * In arches with IRQ for each msg type (above), receiver can use IRQ-id  to
249f2a4aa56SVineet Gupta  * figure out what msg was sent. For those which don't (ARC has dedicated IPI
250f2a4aa56SVineet Gupta  * IRQ), the msg-type needs to be conveyed via per-cpu data
251f2a4aa56SVineet Gupta  */
25241195d23SVineet Gupta 
253f2a4aa56SVineet Gupta static DEFINE_PER_CPU(unsigned long, ipi_data);
25441195d23SVineet Gupta 
ipi_send_msg_one(int cpu,enum ipi_msg_type msg)255ddf84433SVineet Gupta static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
25641195d23SVineet Gupta {
257f2a4aa56SVineet Gupta 	unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
258d8e8c7ddSVineet Gupta 	unsigned long old, new;
25941195d23SVineet Gupta 	unsigned long flags;
26041195d23SVineet Gupta 
261f2a4aa56SVineet Gupta 	pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
262f2a4aa56SVineet Gupta 
26341195d23SVineet Gupta 	local_irq_save(flags);
26441195d23SVineet Gupta 
265d8e8c7ddSVineet Gupta 	/*
266d8e8c7ddSVineet Gupta 	 * Atomically write new msg bit (in case others are writing too),
267d8e8c7ddSVineet Gupta 	 * and read back old value
268d8e8c7ddSVineet Gupta 	 */
269d8e8c7ddSVineet Gupta 	do {
270c6ed4d84SBang Li 		new = old = *ipi_data_ptr;
271d8e8c7ddSVineet Gupta 		new |= 1U << msg;
272d8e8c7ddSVineet Gupta 	} while (cmpxchg(ipi_data_ptr, old, new) != old);
27341195d23SVineet Gupta 
274d8e8c7ddSVineet Gupta 	/*
275d8e8c7ddSVineet Gupta 	 * Call the platform specific IPI kick function, but avoid if possible:
276d8e8c7ddSVineet Gupta 	 * Only do so if there's no pending msg from other concurrent sender(s).
27782a42305SChangcheng Deng 	 * Otherwise, receiver will see this msg as well when it takes the
278d8e8c7ddSVineet Gupta 	 * IPI corresponding to that msg. This is true, even if it is already in
279d8e8c7ddSVineet Gupta 	 * IPI handler, because !@old means it has not yet dequeued the msg(s)
280d8e8c7ddSVineet Gupta 	 * so @new msg can be a free-loader
281d8e8c7ddSVineet Gupta 	 */
282d8e8c7ddSVineet Gupta 	if (plat_smp_ops.ipi_send && !old)
283ddf84433SVineet Gupta 		plat_smp_ops.ipi_send(cpu);
28441195d23SVineet Gupta 
28541195d23SVineet Gupta 	local_irq_restore(flags);
28641195d23SVineet Gupta }
28741195d23SVineet Gupta 
ipi_send_msg(const struct cpumask * callmap,enum ipi_msg_type msg)288ddf84433SVineet Gupta static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
289ddf84433SVineet Gupta {
290ddf84433SVineet Gupta 	unsigned int cpu;
291ddf84433SVineet Gupta 
292ddf84433SVineet Gupta 	for_each_cpu(cpu, callmap)
293ddf84433SVineet Gupta 		ipi_send_msg_one(cpu, msg);
294ddf84433SVineet Gupta }
295ddf84433SVineet Gupta 
arch_smp_send_reschedule(int cpu)2964c8c3c7fSValentin Schneider void arch_smp_send_reschedule(int cpu)
29741195d23SVineet Gupta {
298ddf84433SVineet Gupta 	ipi_send_msg_one(cpu, IPI_RESCHEDULE);
29941195d23SVineet Gupta }
30041195d23SVineet Gupta 
smp_send_stop(void)30141195d23SVineet Gupta void smp_send_stop(void)
30241195d23SVineet Gupta {
30341195d23SVineet Gupta 	struct cpumask targets;
30441195d23SVineet Gupta 	cpumask_copy(&targets, cpu_online_mask);
30541195d23SVineet Gupta 	cpumask_clear_cpu(smp_processor_id(), &targets);
30641195d23SVineet Gupta 	ipi_send_msg(&targets, IPI_CPU_STOP);
30741195d23SVineet Gupta }
30841195d23SVineet Gupta 
arch_send_call_function_single_ipi(int cpu)30941195d23SVineet Gupta void arch_send_call_function_single_ipi(int cpu)
31041195d23SVineet Gupta {
311ddf84433SVineet Gupta 	ipi_send_msg_one(cpu, IPI_CALL_FUNC);
31241195d23SVineet Gupta }
31341195d23SVineet Gupta 
arch_send_call_function_ipi_mask(const struct cpumask * mask)31441195d23SVineet Gupta void arch_send_call_function_ipi_mask(const struct cpumask *mask)
31541195d23SVineet Gupta {
31641195d23SVineet Gupta 	ipi_send_msg(mask, IPI_CALL_FUNC);
31741195d23SVineet Gupta }
31841195d23SVineet Gupta 
31941195d23SVineet Gupta /*
32041195d23SVineet Gupta  * ipi_cpu_stop - handle IPI from smp_send_stop()
32141195d23SVineet Gupta  */
ipi_cpu_stop(void)32253dc110cSVineet Gupta static void ipi_cpu_stop(void)
32341195d23SVineet Gupta {
32441195d23SVineet Gupta 	machine_halt();
32541195d23SVineet Gupta }
32641195d23SVineet Gupta 
__do_IPI(unsigned long msg)327aa6083edSVineet Gupta static inline int __do_IPI(unsigned long msg)
32841195d23SVineet Gupta {
329aa6083edSVineet Gupta 	int rc = 0;
330aa6083edSVineet Gupta 
33141195d23SVineet Gupta 	switch (msg) {
33241195d23SVineet Gupta 	case IPI_RESCHEDULE:
33341195d23SVineet Gupta 		scheduler_ipi();
33441195d23SVineet Gupta 		break;
33541195d23SVineet Gupta 
33641195d23SVineet Gupta 	case IPI_CALL_FUNC:
33741195d23SVineet Gupta 		generic_smp_call_function_interrupt();
33841195d23SVineet Gupta 		break;
33941195d23SVineet Gupta 
34041195d23SVineet Gupta 	case IPI_CPU_STOP:
34153dc110cSVineet Gupta 		ipi_cpu_stop();
34241195d23SVineet Gupta 		break;
343f2a4aa56SVineet Gupta 
344f2a4aa56SVineet Gupta 	default:
345aa6083edSVineet Gupta 		rc = 1;
346f2a4aa56SVineet Gupta 	}
347aa6083edSVineet Gupta 
348aa6083edSVineet Gupta 	return rc;
34941195d23SVineet Gupta }
35041195d23SVineet Gupta 
35141195d23SVineet Gupta /*
35241195d23SVineet Gupta  * arch-common ISR to handle for inter-processor interrupts
35341195d23SVineet Gupta  * Has hooks for platform specific IPI
35441195d23SVineet Gupta  */
do_IPI(int irq,void * dev_id)355*4d369680SVineet Gupta static irqreturn_t do_IPI(int irq, void *dev_id)
35641195d23SVineet Gupta {
357f2a4aa56SVineet Gupta 	unsigned long pending;
358aa6083edSVineet Gupta 	unsigned long __maybe_unused copy;
359f2a4aa56SVineet Gupta 
360f2a4aa56SVineet Gupta 	pr_debug("IPI [%ld] received on cpu %d\n",
361f2a4aa56SVineet Gupta 		 *this_cpu_ptr(&ipi_data), smp_processor_id());
36241195d23SVineet Gupta 
36310b12718SVineet Gupta 	if (plat_smp_ops.ipi_clear)
364ccdaa6e0SVineet Gupta 		plat_smp_ops.ipi_clear(irq);
36541195d23SVineet Gupta 
36641195d23SVineet Gupta 	/*
367d8e8c7ddSVineet Gupta 	 * "dequeue" the msg corresponding to this IPI (and possibly other
368d8e8c7ddSVineet Gupta 	 * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
36941195d23SVineet Gupta 	 */
370aa6083edSVineet Gupta 	copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
371d8e8c7ddSVineet Gupta 
372d8e8c7ddSVineet Gupta 	do {
373d8e8c7ddSVineet Gupta 		unsigned long msg = __ffs(pending);
374aa6083edSVineet Gupta 		int rc;
375aa6083edSVineet Gupta 
376aa6083edSVineet Gupta 		rc = __do_IPI(msg);
377aa6083edSVineet Gupta 		if (rc)
378aa6083edSVineet Gupta 			pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
379d8e8c7ddSVineet Gupta 		pending &= ~(1U << msg);
380d8e8c7ddSVineet Gupta 	} while (pending);
38141195d23SVineet Gupta 
38241195d23SVineet Gupta 	return IRQ_HANDLED;
38341195d23SVineet Gupta }
38441195d23SVineet Gupta 
38541195d23SVineet Gupta /*
38641195d23SVineet Gupta  * API called by platform code to hookup arch-common ISR to their IPI IRQ
38756957940SVineet Gupta  *
38856957940SVineet Gupta  * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
38963d1dfd0SJilin Yuan  * function needs to call irq_set_percpu_devid() for IPI IRQ, otherwise
39056957940SVineet Gupta  * request_percpu_irq() below will fail
39141195d23SVineet Gupta  */
39241195d23SVineet Gupta static DEFINE_PER_CPU(int, ipi_dev);
3937e512219SNoam Camus 
smp_ipi_irq_setup(int cpu,irq_hw_number_t hwirq)39434e71e4cSYuriy Kolerov int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
39541195d23SVineet Gupta {
3962b75c0f9SVineet Gupta 	int *dev = per_cpu_ptr(&ipi_dev, cpu);
39734e71e4cSYuriy Kolerov 	unsigned int virq = irq_find_mapping(NULL, hwirq);
39834e71e4cSYuriy Kolerov 
39934e71e4cSYuriy Kolerov 	if (!virq)
40034e71e4cSYuriy Kolerov 		panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
4012b75c0f9SVineet Gupta 
40256957940SVineet Gupta 	/* Boot cpu calls request, all call enable */
40356957940SVineet Gupta 	if (!cpu) {
40456957940SVineet Gupta 		int rc;
40556957940SVineet Gupta 
40634e71e4cSYuriy Kolerov 		rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
40756957940SVineet Gupta 		if (rc)
40834e71e4cSYuriy Kolerov 			panic("Percpu IRQ request failed for %u\n", virq);
40956957940SVineet Gupta 	}
41056957940SVineet Gupta 
41134e71e4cSYuriy Kolerov 	enable_percpu_irq(virq, 0);
4127e512219SNoam Camus 
4137e512219SNoam Camus 	return 0;
41441195d23SVineet Gupta }
415