xref: /openbmc/linux/arch/arc/kernel/smp.c (revision 68e21be2)
141195d23SVineet Gupta /*
241195d23SVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
341195d23SVineet Gupta  *
441195d23SVineet Gupta  * This program is free software; you can redistribute it and/or modify
541195d23SVineet Gupta  * it under the terms of the GNU General Public License version 2 as
641195d23SVineet Gupta  * published by the Free Software Foundation.
741195d23SVineet Gupta  *
841195d23SVineet Gupta  * RajeshwarR: Dec 11, 2007
941195d23SVineet Gupta  *   -- Added support for Inter Processor Interrupts
1041195d23SVineet Gupta  *
1141195d23SVineet Gupta  * Vineetg: Nov 1st, 2007
1241195d23SVineet Gupta  *    -- Initial Write (Borrowed heavily from ARM)
1341195d23SVineet Gupta  */
1441195d23SVineet Gupta 
1541195d23SVineet Gupta #include <linux/spinlock.h>
1668e21be2SIngo Molnar #include <linux/sched/mm.h>
1741195d23SVineet Gupta #include <linux/interrupt.h>
1841195d23SVineet Gupta #include <linux/profile.h>
1941195d23SVineet Gupta #include <linux/mm.h>
2041195d23SVineet Gupta #include <linux/cpu.h>
2141195d23SVineet Gupta #include <linux/irq.h>
2241195d23SVineet Gupta #include <linux/atomic.h>
2341195d23SVineet Gupta #include <linux/cpumask.h>
2441195d23SVineet Gupta #include <linux/reboot.h>
2534e71e4cSYuriy Kolerov #include <linux/irqdomain.h>
2641195d23SVineet Gupta #include <asm/processor.h>
2741195d23SVineet Gupta #include <asm/setup.h>
2803a6d28cSVineet Gupta #include <asm/mach_desc.h>
2941195d23SVineet Gupta 
309fb92eb1SVineet Gupta #ifndef CONFIG_ARC_HAS_LLSC
3141195d23SVineet Gupta arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3241195d23SVineet Gupta arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
339fb92eb1SVineet Gupta #endif
3441195d23SVineet Gupta 
35173eaafaSVineet Gupta struct plat_smp_ops  __weak plat_smp_ops;
3610b12718SVineet Gupta 
3741195d23SVineet Gupta /* XXX: per cpu ? Only needed once in early seconday boot */
3841195d23SVineet Gupta struct task_struct *secondary_idle_tsk;
3941195d23SVineet Gupta 
4041195d23SVineet Gupta /* Called from start_kernel */
4141195d23SVineet Gupta void __init smp_prepare_boot_cpu(void)
4241195d23SVineet Gupta {
4341195d23SVineet Gupta }
4441195d23SVineet Gupta 
4541195d23SVineet Gupta /*
46e55af4daSVineet Gupta  * Called from setup_arch() before calling setup_processor()
47e55af4daSVineet Gupta  *
48e55af4daSVineet Gupta  * - Initialise the CPU possible map early - this describes the CPUs
4941195d23SVineet Gupta  *   which may be present or become present in the system.
50e55af4daSVineet Gupta  * - Call early smp init hook. This can initialize a specific multi-core
51e55af4daSVineet Gupta  *   IP which is say common to several platforms (hence not part of
52e55af4daSVineet Gupta  *   platform specific int_early() hook)
5341195d23SVineet Gupta  */
5441195d23SVineet Gupta void __init smp_init_cpus(void)
5541195d23SVineet Gupta {
5641195d23SVineet Gupta 	unsigned int i;
5741195d23SVineet Gupta 
5841195d23SVineet Gupta 	for (i = 0; i < NR_CPUS; i++)
5941195d23SVineet Gupta 		set_cpu_possible(i, true);
60e55af4daSVineet Gupta 
61e55af4daSVineet Gupta 	if (plat_smp_ops.init_early_smp)
62e55af4daSVineet Gupta 		plat_smp_ops.init_early_smp();
6341195d23SVineet Gupta }
6441195d23SVineet Gupta 
6541195d23SVineet Gupta /* called from init ( ) =>  process 1 */
6641195d23SVineet Gupta void __init smp_prepare_cpus(unsigned int max_cpus)
6741195d23SVineet Gupta {
6841195d23SVineet Gupta 	int i;
6941195d23SVineet Gupta 
7041195d23SVineet Gupta 	/*
718f6d9eb2SNoam Camus 	 * if platform didn't set the present map already, do it now
728f6d9eb2SNoam Camus 	 * boot cpu is set to present already by init/main.c
7341195d23SVineet Gupta 	 */
748f6d9eb2SNoam Camus 	if (num_present_cpus() <= 1) {
7541195d23SVineet Gupta 		for (i = 0; i < max_cpus; i++)
7641195d23SVineet Gupta 			set_cpu_present(i, true);
7741195d23SVineet Gupta 	}
788f6d9eb2SNoam Camus }
7941195d23SVineet Gupta 
8041195d23SVineet Gupta void __init smp_cpus_done(unsigned int max_cpus)
8141195d23SVineet Gupta {
8241195d23SVineet Gupta 
8341195d23SVineet Gupta }
8441195d23SVineet Gupta 
8541195d23SVineet Gupta /*
86f33e9c43SVineet Gupta  * Default smp boot helper for Run-on-reset case where all cores start off
87f33e9c43SVineet Gupta  * together. Non-masters need to wait for Master to start running.
88f33e9c43SVineet Gupta  * This is implemented using a flag in memory, which Non-masters spin-wait on.
89f33e9c43SVineet Gupta  * Master sets it to cpu-id of core to "ungate" it.
9041195d23SVineet Gupta  */
91f33e9c43SVineet Gupta static volatile int wake_flag;
92f33e9c43SVineet Gupta 
9378f824d4SVineet Gupta #ifdef CONFIG_ISA_ARCOMPACT
9478f824d4SVineet Gupta 
9578f824d4SVineet Gupta #define __boot_read(f)		f
9678f824d4SVineet Gupta #define __boot_write(f, v)	f = v
9778f824d4SVineet Gupta 
9878f824d4SVineet Gupta #else
9978f824d4SVineet Gupta 
10078f824d4SVineet Gupta #define __boot_read(f)		arc_read_uncached_32(&f)
10178f824d4SVineet Gupta #define __boot_write(f, v)	arc_write_uncached_32(&f, v)
10278f824d4SVineet Gupta 
10378f824d4SVineet Gupta #endif
10478f824d4SVineet Gupta 
105f33e9c43SVineet Gupta static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
10641195d23SVineet Gupta {
107f33e9c43SVineet Gupta 	BUG_ON(cpu == 0);
10878f824d4SVineet Gupta 
10978f824d4SVineet Gupta 	__boot_write(wake_flag, cpu);
11041195d23SVineet Gupta }
11141195d23SVineet Gupta 
112f33e9c43SVineet Gupta void arc_platform_smp_wait_to_boot(int cpu)
113f33e9c43SVineet Gupta {
114bf02454aSVineet Gupta 	/* for halt-on-reset, we've waited already */
115bf02454aSVineet Gupta 	if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
116bf02454aSVineet Gupta 		return;
117bf02454aSVineet Gupta 
11878f824d4SVineet Gupta 	while (__boot_read(wake_flag) != cpu)
119f33e9c43SVineet Gupta 		;
120f33e9c43SVineet Gupta 
12178f824d4SVineet Gupta 	__boot_write(wake_flag, 0);
122f33e9c43SVineet Gupta }
123f33e9c43SVineet Gupta 
12410b12718SVineet Gupta const char *arc_platform_smp_cpuinfo(void)
12510b12718SVineet Gupta {
126619f3018SVineet Gupta 	return plat_smp_ops.info ? : "";
12710b12718SVineet Gupta }
12810b12718SVineet Gupta 
12941195d23SVineet Gupta /*
13041195d23SVineet Gupta  * The very first "C" code executed by secondary
13141195d23SVineet Gupta  * Called from asm stub in head.S
13241195d23SVineet Gupta  * "current"/R25 already setup by low level boot code
13341195d23SVineet Gupta  */
134ce759956SPaul Gortmaker void start_kernel_secondary(void)
13541195d23SVineet Gupta {
13641195d23SVineet Gupta 	struct mm_struct *mm = &init_mm;
13741195d23SVineet Gupta 	unsigned int cpu = smp_processor_id();
13841195d23SVineet Gupta 
13941195d23SVineet Gupta 	/* MMU, Caches, Vector Table, Interrupts etc */
14041195d23SVineet Gupta 	setup_processor();
14141195d23SVineet Gupta 
1423fce371bSVegard Nossum 	mmget(mm);
143f1f10076SVegard Nossum 	mmgrab(mm);
14441195d23SVineet Gupta 	current->active_mm = mm;
1455ea72a90SVineet Gupta 	cpumask_set_cpu(cpu, mm_cpumask(mm));
14641195d23SVineet Gupta 
147286130ebSVineet Gupta 	/* Some SMP H/w setup - for each cpu */
148b474a023SNoam Camus 	if (plat_smp_ops.init_per_cpu)
149b474a023SNoam Camus 		plat_smp_ops.init_per_cpu(cpu);
150286130ebSVineet Gupta 
151575a9d4eSVineet Gupta 	if (machine_desc->init_per_cpu)
152575a9d4eSVineet Gupta 		machine_desc->init_per_cpu(cpu);
15341195d23SVineet Gupta 
15471f9cf8fSNoam Camus 	notify_cpu_starting(cpu);
15571f9cf8fSNoam Camus 	set_cpu_online(cpu, true);
15671f9cf8fSNoam Camus 
15771f9cf8fSNoam Camus 	pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
15871f9cf8fSNoam Camus 
15941195d23SVineet Gupta 	local_irq_enable();
16041195d23SVineet Gupta 	preempt_disable();
161fc6d73d6SThomas Gleixner 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
16241195d23SVineet Gupta }
16341195d23SVineet Gupta 
16441195d23SVineet Gupta /*
16541195d23SVineet Gupta  * Called from kernel_init( ) -> smp_init( ) - for each CPU
16641195d23SVineet Gupta  *
16741195d23SVineet Gupta  * At this point, Secondary Processor  is "HALT"ed:
16841195d23SVineet Gupta  *  -It booted, but was halted in head.S
16941195d23SVineet Gupta  *  -It was configured to halt-on-reset
17041195d23SVineet Gupta  *  So need to wake it up.
17141195d23SVineet Gupta  *
17241195d23SVineet Gupta  * Essential requirements being where to run from (PC) and stack (SP)
17341195d23SVineet Gupta */
174ce759956SPaul Gortmaker int __cpu_up(unsigned int cpu, struct task_struct *idle)
17541195d23SVineet Gupta {
17641195d23SVineet Gupta 	unsigned long wait_till;
17741195d23SVineet Gupta 
17841195d23SVineet Gupta 	secondary_idle_tsk = idle;
17941195d23SVineet Gupta 
18041195d23SVineet Gupta 	pr_info("Idle Task [%d] %p", cpu, idle);
18141195d23SVineet Gupta 	pr_info("Trying to bring up CPU%u ...\n", cpu);
18241195d23SVineet Gupta 
18310b12718SVineet Gupta 	if (plat_smp_ops.cpu_kick)
18410b12718SVineet Gupta 		plat_smp_ops.cpu_kick(cpu,
18541195d23SVineet Gupta 				(unsigned long)first_lines_of_secondary);
186f33e9c43SVineet Gupta 	else
187f33e9c43SVineet Gupta 		arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
18841195d23SVineet Gupta 
18941195d23SVineet Gupta 	/* wait for 1 sec after kicking the secondary */
19041195d23SVineet Gupta 	wait_till = jiffies + HZ;
19141195d23SVineet Gupta 	while (time_before(jiffies, wait_till)) {
19241195d23SVineet Gupta 		if (cpu_online(cpu))
19341195d23SVineet Gupta 			break;
19441195d23SVineet Gupta 	}
19541195d23SVineet Gupta 
19641195d23SVineet Gupta 	if (!cpu_online(cpu)) {
19741195d23SVineet Gupta 		pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
19841195d23SVineet Gupta 		return -1;
19941195d23SVineet Gupta 	}
20041195d23SVineet Gupta 
20141195d23SVineet Gupta 	secondary_idle_tsk = NULL;
20241195d23SVineet Gupta 
20341195d23SVineet Gupta 	return 0;
20441195d23SVineet Gupta }
20541195d23SVineet Gupta 
20641195d23SVineet Gupta /*
20741195d23SVineet Gupta  * not supported here
20841195d23SVineet Gupta  */
209b27f7391SVineet Gupta int setup_profiling_timer(unsigned int multiplier)
21041195d23SVineet Gupta {
21141195d23SVineet Gupta 	return -EINVAL;
21241195d23SVineet Gupta }
21341195d23SVineet Gupta 
21441195d23SVineet Gupta /*****************************************************************************/
21541195d23SVineet Gupta /*              Inter Processor Interrupt Handling                           */
21641195d23SVineet Gupta /*****************************************************************************/
21741195d23SVineet Gupta 
21841195d23SVineet Gupta enum ipi_msg_type {
219f2a4aa56SVineet Gupta 	IPI_EMPTY = 0,
22041195d23SVineet Gupta 	IPI_RESCHEDULE = 1,
22141195d23SVineet Gupta 	IPI_CALL_FUNC,
222f2a4aa56SVineet Gupta 	IPI_CPU_STOP,
22341195d23SVineet Gupta };
22441195d23SVineet Gupta 
225f2a4aa56SVineet Gupta /*
226f2a4aa56SVineet Gupta  * In arches with IRQ for each msg type (above), receiver can use IRQ-id  to
227f2a4aa56SVineet Gupta  * figure out what msg was sent. For those which don't (ARC has dedicated IPI
228f2a4aa56SVineet Gupta  * IRQ), the msg-type needs to be conveyed via per-cpu data
229f2a4aa56SVineet Gupta  */
23041195d23SVineet Gupta 
231f2a4aa56SVineet Gupta static DEFINE_PER_CPU(unsigned long, ipi_data);
23241195d23SVineet Gupta 
233ddf84433SVineet Gupta static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
23441195d23SVineet Gupta {
235f2a4aa56SVineet Gupta 	unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
236d8e8c7ddSVineet Gupta 	unsigned long old, new;
23741195d23SVineet Gupta 	unsigned long flags;
23841195d23SVineet Gupta 
239f2a4aa56SVineet Gupta 	pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
240f2a4aa56SVineet Gupta 
24141195d23SVineet Gupta 	local_irq_save(flags);
24241195d23SVineet Gupta 
243d8e8c7ddSVineet Gupta 	/*
244d8e8c7ddSVineet Gupta 	 * Atomically write new msg bit (in case others are writing too),
245d8e8c7ddSVineet Gupta 	 * and read back old value
246d8e8c7ddSVineet Gupta 	 */
247d8e8c7ddSVineet Gupta 	do {
2487082a29cSVineet Gupta 		new = old = ACCESS_ONCE(*ipi_data_ptr);
249d8e8c7ddSVineet Gupta 		new |= 1U << msg;
250d8e8c7ddSVineet Gupta 	} while (cmpxchg(ipi_data_ptr, old, new) != old);
25141195d23SVineet Gupta 
252d8e8c7ddSVineet Gupta 	/*
253d8e8c7ddSVineet Gupta 	 * Call the platform specific IPI kick function, but avoid if possible:
254d8e8c7ddSVineet Gupta 	 * Only do so if there's no pending msg from other concurrent sender(s).
255d8e8c7ddSVineet Gupta 	 * Otherwise, recevier will see this msg as well when it takes the
256d8e8c7ddSVineet Gupta 	 * IPI corresponding to that msg. This is true, even if it is already in
257d8e8c7ddSVineet Gupta 	 * IPI handler, because !@old means it has not yet dequeued the msg(s)
258d8e8c7ddSVineet Gupta 	 * so @new msg can be a free-loader
259d8e8c7ddSVineet Gupta 	 */
260d8e8c7ddSVineet Gupta 	if (plat_smp_ops.ipi_send && !old)
261ddf84433SVineet Gupta 		plat_smp_ops.ipi_send(cpu);
26241195d23SVineet Gupta 
26341195d23SVineet Gupta 	local_irq_restore(flags);
26441195d23SVineet Gupta }
26541195d23SVineet Gupta 
266ddf84433SVineet Gupta static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
267ddf84433SVineet Gupta {
268ddf84433SVineet Gupta 	unsigned int cpu;
269ddf84433SVineet Gupta 
270ddf84433SVineet Gupta 	for_each_cpu(cpu, callmap)
271ddf84433SVineet Gupta 		ipi_send_msg_one(cpu, msg);
272ddf84433SVineet Gupta }
273ddf84433SVineet Gupta 
27441195d23SVineet Gupta void smp_send_reschedule(int cpu)
27541195d23SVineet Gupta {
276ddf84433SVineet Gupta 	ipi_send_msg_one(cpu, IPI_RESCHEDULE);
27741195d23SVineet Gupta }
27841195d23SVineet Gupta 
27941195d23SVineet Gupta void smp_send_stop(void)
28041195d23SVineet Gupta {
28141195d23SVineet Gupta 	struct cpumask targets;
28241195d23SVineet Gupta 	cpumask_copy(&targets, cpu_online_mask);
28341195d23SVineet Gupta 	cpumask_clear_cpu(smp_processor_id(), &targets);
28441195d23SVineet Gupta 	ipi_send_msg(&targets, IPI_CPU_STOP);
28541195d23SVineet Gupta }
28641195d23SVineet Gupta 
28741195d23SVineet Gupta void arch_send_call_function_single_ipi(int cpu)
28841195d23SVineet Gupta {
289ddf84433SVineet Gupta 	ipi_send_msg_one(cpu, IPI_CALL_FUNC);
29041195d23SVineet Gupta }
29141195d23SVineet Gupta 
29241195d23SVineet Gupta void arch_send_call_function_ipi_mask(const struct cpumask *mask)
29341195d23SVineet Gupta {
29441195d23SVineet Gupta 	ipi_send_msg(mask, IPI_CALL_FUNC);
29541195d23SVineet Gupta }
29641195d23SVineet Gupta 
29741195d23SVineet Gupta /*
29841195d23SVineet Gupta  * ipi_cpu_stop - handle IPI from smp_send_stop()
29941195d23SVineet Gupta  */
30053dc110cSVineet Gupta static void ipi_cpu_stop(void)
30141195d23SVineet Gupta {
30241195d23SVineet Gupta 	machine_halt();
30341195d23SVineet Gupta }
30441195d23SVineet Gupta 
305aa6083edSVineet Gupta static inline int __do_IPI(unsigned long msg)
30641195d23SVineet Gupta {
307aa6083edSVineet Gupta 	int rc = 0;
308aa6083edSVineet Gupta 
30941195d23SVineet Gupta 	switch (msg) {
31041195d23SVineet Gupta 	case IPI_RESCHEDULE:
31141195d23SVineet Gupta 		scheduler_ipi();
31241195d23SVineet Gupta 		break;
31341195d23SVineet Gupta 
31441195d23SVineet Gupta 	case IPI_CALL_FUNC:
31541195d23SVineet Gupta 		generic_smp_call_function_interrupt();
31641195d23SVineet Gupta 		break;
31741195d23SVineet Gupta 
31841195d23SVineet Gupta 	case IPI_CPU_STOP:
31953dc110cSVineet Gupta 		ipi_cpu_stop();
32041195d23SVineet Gupta 		break;
321f2a4aa56SVineet Gupta 
322f2a4aa56SVineet Gupta 	default:
323aa6083edSVineet Gupta 		rc = 1;
324f2a4aa56SVineet Gupta 	}
325aa6083edSVineet Gupta 
326aa6083edSVineet Gupta 	return rc;
32741195d23SVineet Gupta }
32841195d23SVineet Gupta 
32941195d23SVineet Gupta /*
33041195d23SVineet Gupta  * arch-common ISR to handle for inter-processor interrupts
33141195d23SVineet Gupta  * Has hooks for platform specific IPI
33241195d23SVineet Gupta  */
33341195d23SVineet Gupta irqreturn_t do_IPI(int irq, void *dev_id)
33441195d23SVineet Gupta {
335f2a4aa56SVineet Gupta 	unsigned long pending;
336aa6083edSVineet Gupta 	unsigned long __maybe_unused copy;
337f2a4aa56SVineet Gupta 
338f2a4aa56SVineet Gupta 	pr_debug("IPI [%ld] received on cpu %d\n",
339f2a4aa56SVineet Gupta 		 *this_cpu_ptr(&ipi_data), smp_processor_id());
34041195d23SVineet Gupta 
34110b12718SVineet Gupta 	if (plat_smp_ops.ipi_clear)
342ccdaa6e0SVineet Gupta 		plat_smp_ops.ipi_clear(irq);
34341195d23SVineet Gupta 
34441195d23SVineet Gupta 	/*
345d8e8c7ddSVineet Gupta 	 * "dequeue" the msg corresponding to this IPI (and possibly other
346d8e8c7ddSVineet Gupta 	 * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
34741195d23SVineet Gupta 	 */
348aa6083edSVineet Gupta 	copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
349d8e8c7ddSVineet Gupta 
350d8e8c7ddSVineet Gupta 	do {
351d8e8c7ddSVineet Gupta 		unsigned long msg = __ffs(pending);
352aa6083edSVineet Gupta 		int rc;
353aa6083edSVineet Gupta 
354aa6083edSVineet Gupta 		rc = __do_IPI(msg);
355aa6083edSVineet Gupta 		if (rc)
356aa6083edSVineet Gupta 			pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
357d8e8c7ddSVineet Gupta 		pending &= ~(1U << msg);
358d8e8c7ddSVineet Gupta 	} while (pending);
35941195d23SVineet Gupta 
36041195d23SVineet Gupta 	return IRQ_HANDLED;
36141195d23SVineet Gupta }
36241195d23SVineet Gupta 
36341195d23SVineet Gupta /*
36441195d23SVineet Gupta  * API called by platform code to hookup arch-common ISR to their IPI IRQ
36556957940SVineet Gupta  *
36656957940SVineet Gupta  * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
36756957940SVineet Gupta  * function needs to call call irq_set_percpu_devid() for IPI IRQ, otherwise
36856957940SVineet Gupta  * request_percpu_irq() below will fail
36941195d23SVineet Gupta  */
37041195d23SVineet Gupta static DEFINE_PER_CPU(int, ipi_dev);
3717e512219SNoam Camus 
37234e71e4cSYuriy Kolerov int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
37341195d23SVineet Gupta {
3742b75c0f9SVineet Gupta 	int *dev = per_cpu_ptr(&ipi_dev, cpu);
37534e71e4cSYuriy Kolerov 	unsigned int virq = irq_find_mapping(NULL, hwirq);
37634e71e4cSYuriy Kolerov 
37734e71e4cSYuriy Kolerov 	if (!virq)
37834e71e4cSYuriy Kolerov 		panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
3792b75c0f9SVineet Gupta 
38056957940SVineet Gupta 	/* Boot cpu calls request, all call enable */
38156957940SVineet Gupta 	if (!cpu) {
38256957940SVineet Gupta 		int rc;
38356957940SVineet Gupta 
38434e71e4cSYuriy Kolerov 		rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
38556957940SVineet Gupta 		if (rc)
38634e71e4cSYuriy Kolerov 			panic("Percpu IRQ request failed for %u\n", virq);
38756957940SVineet Gupta 	}
38856957940SVineet Gupta 
38934e71e4cSYuriy Kolerov 	enable_percpu_irq(virq, 0);
3907e512219SNoam Camus 
3917e512219SNoam Camus 	return 0;
39241195d23SVineet Gupta }
393