xref: /openbmc/linux/kernel/irq/manage.c (revision 20dbad75)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4  * Copyright (C) 2005-2006 Thomas Gleixner
5  *
6  * This file contains driver APIs to the irq subsystem.
7  */
8 
9 #define pr_fmt(fmt) "genirq: " fmt
10 
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/isolation.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/task_work.h>
24 
25 #include "internals.h"
26 
27 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28 DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
29 
setup_forced_irqthreads(char * arg)30 static int __init setup_forced_irqthreads(char *arg)
31 {
32 	static_branch_enable(&force_irqthreads_key);
33 	return 0;
34 }
35 early_param("threadirqs", setup_forced_irqthreads);
36 #endif
37 
__synchronize_hardirq(struct irq_desc * desc,bool sync_chip)38 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
39 {
40 	struct irq_data *irqd = irq_desc_get_irq_data(desc);
41 	bool inprogress;
42 
43 	do {
44 		unsigned long flags;
45 
46 		/*
47 		 * Wait until we're out of the critical section.  This might
48 		 * give the wrong answer due to the lack of memory barriers.
49 		 */
50 		while (irqd_irq_inprogress(&desc->irq_data))
51 			cpu_relax();
52 
53 		/* Ok, that indicated we're done: double-check carefully. */
54 		raw_spin_lock_irqsave(&desc->lock, flags);
55 		inprogress = irqd_irq_inprogress(&desc->irq_data);
56 
57 		/*
58 		 * If requested and supported, check at the chip whether it
59 		 * is in flight at the hardware level, i.e. already pending
60 		 * in a CPU and waiting for service and acknowledge.
61 		 */
62 		if (!inprogress && sync_chip) {
63 			/*
64 			 * Ignore the return code. inprogress is only updated
65 			 * when the chip supports it.
66 			 */
67 			__irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
68 						&inprogress);
69 		}
70 		raw_spin_unlock_irqrestore(&desc->lock, flags);
71 
72 		/* Oops, that failed? */
73 	} while (inprogress);
74 }
75 
76 /**
77  *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
78  *	@irq: interrupt number to wait for
79  *
80  *	This function waits for any pending hard IRQ handlers for this
81  *	interrupt to complete before returning. If you use this
82  *	function while holding a resource the IRQ handler may need you
83  *	will deadlock. It does not take associated threaded handlers
84  *	into account.
85  *
86  *	Do not use this for shutdown scenarios where you must be sure
87  *	that all parts (hardirq and threaded handler) have completed.
88  *
89  *	Returns: false if a threaded handler is active.
90  *
91  *	This function may be called - with care - from IRQ context.
92  *
93  *	It does not check whether there is an interrupt in flight at the
94  *	hardware level, but not serviced yet, as this might deadlock when
95  *	called with interrupts disabled and the target CPU of the interrupt
96  *	is the current CPU.
97  */
synchronize_hardirq(unsigned int irq)98 bool synchronize_hardirq(unsigned int irq)
99 {
100 	struct irq_desc *desc = irq_to_desc(irq);
101 
102 	if (desc) {
103 		__synchronize_hardirq(desc, false);
104 		return !atomic_read(&desc->threads_active);
105 	}
106 
107 	return true;
108 }
109 EXPORT_SYMBOL(synchronize_hardirq);
110 
__synchronize_irq(struct irq_desc * desc)111 static void __synchronize_irq(struct irq_desc *desc)
112 {
113 	__synchronize_hardirq(desc, true);
114 	/*
115 	 * We made sure that no hardirq handler is running. Now verify that no
116 	 * threaded handlers are active.
117 	 */
118 	wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
119 }
120 
121 /**
122  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
123  *	@irq: interrupt number to wait for
124  *
125  *	This function waits for any pending IRQ handlers for this interrupt
126  *	to complete before returning. If you use this function while
127  *	holding a resource the IRQ handler may need you will deadlock.
128  *
129  *	Can only be called from preemptible code as it might sleep when
130  *	an interrupt thread is associated to @irq.
131  *
132  *	It optionally makes sure (when the irq chip supports that method)
133  *	that the interrupt is not pending in any CPU and waiting for
134  *	service.
135  */
synchronize_irq(unsigned int irq)136 void synchronize_irq(unsigned int irq)
137 {
138 	struct irq_desc *desc = irq_to_desc(irq);
139 
140 	if (desc)
141 		__synchronize_irq(desc);
142 }
143 EXPORT_SYMBOL(synchronize_irq);
144 
145 #ifdef CONFIG_SMP
146 cpumask_var_t irq_default_affinity;
147 
__irq_can_set_affinity(struct irq_desc * desc)148 static bool __irq_can_set_affinity(struct irq_desc *desc)
149 {
150 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
151 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
152 		return false;
153 	return true;
154 }
155 
156 /**
157  *	irq_can_set_affinity - Check if the affinity of a given irq can be set
158  *	@irq:		Interrupt to check
159  *
160  */
irq_can_set_affinity(unsigned int irq)161 int irq_can_set_affinity(unsigned int irq)
162 {
163 	return __irq_can_set_affinity(irq_to_desc(irq));
164 }
165 
166 /**
167  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
168  * @irq:	Interrupt to check
169  *
170  * Like irq_can_set_affinity() above, but additionally checks for the
171  * AFFINITY_MANAGED flag.
172  */
irq_can_set_affinity_usr(unsigned int irq)173 bool irq_can_set_affinity_usr(unsigned int irq)
174 {
175 	struct irq_desc *desc = irq_to_desc(irq);
176 
177 	return __irq_can_set_affinity(desc) &&
178 		!irqd_affinity_is_managed(&desc->irq_data);
179 }
180 
181 /**
182  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
183  *	@desc:		irq descriptor which has affinity changed
184  *
185  *	We just set IRQTF_AFFINITY and delegate the affinity setting
186  *	to the interrupt thread itself. We can not call
187  *	set_cpus_allowed_ptr() here as we hold desc->lock and this
188  *	code can be called from hard interrupt context.
189  */
irq_set_thread_affinity(struct irq_desc * desc)190 void irq_set_thread_affinity(struct irq_desc *desc)
191 {
192 	struct irqaction *action;
193 
194 	for_each_action_of_desc(desc, action) {
195 		if (action->thread)
196 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
197 		if (action->secondary && action->secondary->thread)
198 			set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags);
199 	}
200 }
201 
202 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
irq_validate_effective_affinity(struct irq_data * data)203 static void irq_validate_effective_affinity(struct irq_data *data)
204 {
205 	const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
206 	struct irq_chip *chip = irq_data_get_irq_chip(data);
207 
208 	if (!cpumask_empty(m))
209 		return;
210 	pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
211 		     chip->name, data->irq);
212 }
213 #else
irq_validate_effective_affinity(struct irq_data * data)214 static inline void irq_validate_effective_affinity(struct irq_data *data) { }
215 #endif
216 
irq_do_set_affinity(struct irq_data * data,const struct cpumask * mask,bool force)217 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
218 			bool force)
219 {
220 	struct irq_desc *desc = irq_data_to_desc(data);
221 	struct irq_chip *chip = irq_data_get_irq_chip(data);
222 	const struct cpumask  *prog_mask;
223 	int ret;
224 
225 	static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
226 	static struct cpumask tmp_mask;
227 
228 	if (!chip || !chip->irq_set_affinity)
229 		return -EINVAL;
230 
231 	raw_spin_lock(&tmp_mask_lock);
232 	/*
233 	 * If this is a managed interrupt and housekeeping is enabled on
234 	 * it check whether the requested affinity mask intersects with
235 	 * a housekeeping CPU. If so, then remove the isolated CPUs from
236 	 * the mask and just keep the housekeeping CPU(s). This prevents
237 	 * the affinity setter from routing the interrupt to an isolated
238 	 * CPU to avoid that I/O submitted from a housekeeping CPU causes
239 	 * interrupts on an isolated one.
240 	 *
241 	 * If the masks do not intersect or include online CPU(s) then
242 	 * keep the requested mask. The isolated target CPUs are only
243 	 * receiving interrupts when the I/O operation was submitted
244 	 * directly from them.
245 	 *
246 	 * If all housekeeping CPUs in the affinity mask are offline, the
247 	 * interrupt will be migrated by the CPU hotplug code once a
248 	 * housekeeping CPU which belongs to the affinity mask comes
249 	 * online.
250 	 */
251 	if (irqd_affinity_is_managed(data) &&
252 	    housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) {
253 		const struct cpumask *hk_mask;
254 
255 		hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
256 
257 		cpumask_and(&tmp_mask, mask, hk_mask);
258 		if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
259 			prog_mask = mask;
260 		else
261 			prog_mask = &tmp_mask;
262 	} else {
263 		prog_mask = mask;
264 	}
265 
266 	/*
267 	 * Make sure we only provide online CPUs to the irqchip,
268 	 * unless we are being asked to force the affinity (in which
269 	 * case we do as we are told).
270 	 */
271 	cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
272 	if (!force && !cpumask_empty(&tmp_mask))
273 		ret = chip->irq_set_affinity(data, &tmp_mask, force);
274 	else if (force)
275 		ret = chip->irq_set_affinity(data, mask, force);
276 	else
277 		ret = -EINVAL;
278 
279 	raw_spin_unlock(&tmp_mask_lock);
280 
281 	switch (ret) {
282 	case IRQ_SET_MASK_OK:
283 	case IRQ_SET_MASK_OK_DONE:
284 		cpumask_copy(desc->irq_common_data.affinity, mask);
285 		fallthrough;
286 	case IRQ_SET_MASK_OK_NOCOPY:
287 		irq_validate_effective_affinity(data);
288 		irq_set_thread_affinity(desc);
289 		ret = 0;
290 	}
291 
292 	return ret;
293 }
294 
295 #ifdef CONFIG_GENERIC_PENDING_IRQ
irq_set_affinity_pending(struct irq_data * data,const struct cpumask * dest)296 static inline int irq_set_affinity_pending(struct irq_data *data,
297 					   const struct cpumask *dest)
298 {
299 	struct irq_desc *desc = irq_data_to_desc(data);
300 
301 	irqd_set_move_pending(data);
302 	irq_copy_pending(desc, dest);
303 	return 0;
304 }
305 #else
irq_set_affinity_pending(struct irq_data * data,const struct cpumask * dest)306 static inline int irq_set_affinity_pending(struct irq_data *data,
307 					   const struct cpumask *dest)
308 {
309 	return -EBUSY;
310 }
311 #endif
312 
irq_try_set_affinity(struct irq_data * data,const struct cpumask * dest,bool force)313 static int irq_try_set_affinity(struct irq_data *data,
314 				const struct cpumask *dest, bool force)
315 {
316 	int ret = irq_do_set_affinity(data, dest, force);
317 
318 	/*
319 	 * In case that the underlying vector management is busy and the
320 	 * architecture supports the generic pending mechanism then utilize
321 	 * this to avoid returning an error to user space.
322 	 */
323 	if (ret == -EBUSY && !force)
324 		ret = irq_set_affinity_pending(data, dest);
325 	return ret;
326 }
327 
irq_set_affinity_deactivated(struct irq_data * data,const struct cpumask * mask)328 static bool irq_set_affinity_deactivated(struct irq_data *data,
329 					 const struct cpumask *mask)
330 {
331 	struct irq_desc *desc = irq_data_to_desc(data);
332 
333 	/*
334 	 * Handle irq chips which can handle affinity only in activated
335 	 * state correctly
336 	 *
337 	 * If the interrupt is not yet activated, just store the affinity
338 	 * mask and do not call the chip driver at all. On activation the
339 	 * driver has to make sure anyway that the interrupt is in a
340 	 * usable state so startup works.
341 	 */
342 	if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
343 	    irqd_is_activated(data) || !irqd_affinity_on_activate(data))
344 		return false;
345 
346 	cpumask_copy(desc->irq_common_data.affinity, mask);
347 	irq_data_update_effective_affinity(data, mask);
348 	irqd_set(data, IRQD_AFFINITY_SET);
349 	return true;
350 }
351 
irq_set_affinity_locked(struct irq_data * data,const struct cpumask * mask,bool force)352 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
353 			    bool force)
354 {
355 	struct irq_chip *chip = irq_data_get_irq_chip(data);
356 	struct irq_desc *desc = irq_data_to_desc(data);
357 	int ret = 0;
358 
359 	if (!chip || !chip->irq_set_affinity)
360 		return -EINVAL;
361 
362 	if (irq_set_affinity_deactivated(data, mask))
363 		return 0;
364 
365 	if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
366 		ret = irq_try_set_affinity(data, mask, force);
367 	} else {
368 		irqd_set_move_pending(data);
369 		irq_copy_pending(desc, mask);
370 	}
371 
372 	if (desc->affinity_notify) {
373 		kref_get(&desc->affinity_notify->kref);
374 		if (!schedule_work(&desc->affinity_notify->work)) {
375 			/* Work was already scheduled, drop our extra ref */
376 			kref_put(&desc->affinity_notify->kref,
377 				 desc->affinity_notify->release);
378 		}
379 	}
380 	irqd_set(data, IRQD_AFFINITY_SET);
381 
382 	return ret;
383 }
384 
385 /**
386  * irq_update_affinity_desc - Update affinity management for an interrupt
387  * @irq:	The interrupt number to update
388  * @affinity:	Pointer to the affinity descriptor
389  *
390  * This interface can be used to configure the affinity management of
391  * interrupts which have been allocated already.
392  *
393  * There are certain limitations on when it may be used - attempts to use it
394  * for when the kernel is configured for generic IRQ reservation mode (in
395  * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
396  * managed/non-managed interrupt accounting. In addition, attempts to use it on
397  * an interrupt which is already started or which has already been configured
398  * as managed will also fail, as these mean invalid init state or double init.
399  */
irq_update_affinity_desc(unsigned int irq,struct irq_affinity_desc * affinity)400 int irq_update_affinity_desc(unsigned int irq,
401 			     struct irq_affinity_desc *affinity)
402 {
403 	struct irq_desc *desc;
404 	unsigned long flags;
405 	bool activated;
406 	int ret = 0;
407 
408 	/*
409 	 * Supporting this with the reservation scheme used by x86 needs
410 	 * some more thought. Fail it for now.
411 	 */
412 	if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
413 		return -EOPNOTSUPP;
414 
415 	desc = irq_get_desc_buslock(irq, &flags, 0);
416 	if (!desc)
417 		return -EINVAL;
418 
419 	/* Requires the interrupt to be shut down */
420 	if (irqd_is_started(&desc->irq_data)) {
421 		ret = -EBUSY;
422 		goto out_unlock;
423 	}
424 
425 	/* Interrupts which are already managed cannot be modified */
426 	if (irqd_affinity_is_managed(&desc->irq_data)) {
427 		ret = -EBUSY;
428 		goto out_unlock;
429 	}
430 
431 	/*
432 	 * Deactivate the interrupt. That's required to undo
433 	 * anything an earlier activation has established.
434 	 */
435 	activated = irqd_is_activated(&desc->irq_data);
436 	if (activated)
437 		irq_domain_deactivate_irq(&desc->irq_data);
438 
439 	if (affinity->is_managed) {
440 		irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
441 		irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
442 	}
443 
444 	cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
445 
446 	/* Restore the activation state */
447 	if (activated)
448 		irq_domain_activate_irq(&desc->irq_data, false);
449 
450 out_unlock:
451 	irq_put_desc_busunlock(desc, flags);
452 	return ret;
453 }
454 
__irq_set_affinity(unsigned int irq,const struct cpumask * mask,bool force)455 static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
456 			      bool force)
457 {
458 	struct irq_desc *desc = irq_to_desc(irq);
459 	unsigned long flags;
460 	int ret;
461 
462 	if (!desc)
463 		return -EINVAL;
464 
465 	raw_spin_lock_irqsave(&desc->lock, flags);
466 	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
467 	raw_spin_unlock_irqrestore(&desc->lock, flags);
468 	return ret;
469 }
470 
471 /**
472  * irq_set_affinity - Set the irq affinity of a given irq
473  * @irq:	Interrupt to set affinity
474  * @cpumask:	cpumask
475  *
476  * Fails if cpumask does not contain an online CPU
477  */
irq_set_affinity(unsigned int irq,const struct cpumask * cpumask)478 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
479 {
480 	return __irq_set_affinity(irq, cpumask, false);
481 }
482 EXPORT_SYMBOL_GPL(irq_set_affinity);
483 
484 /**
485  * irq_force_affinity - Force the irq affinity of a given irq
486  * @irq:	Interrupt to set affinity
487  * @cpumask:	cpumask
488  *
489  * Same as irq_set_affinity, but without checking the mask against
490  * online cpus.
491  *
492  * Solely for low level cpu hotplug code, where we need to make per
493  * cpu interrupts affine before the cpu becomes online.
494  */
irq_force_affinity(unsigned int irq,const struct cpumask * cpumask)495 int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
496 {
497 	return __irq_set_affinity(irq, cpumask, true);
498 }
499 EXPORT_SYMBOL_GPL(irq_force_affinity);
500 
__irq_apply_affinity_hint(unsigned int irq,const struct cpumask * m,bool setaffinity)501 int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
502 			      bool setaffinity)
503 {
504 	unsigned long flags;
505 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
506 
507 	if (!desc)
508 		return -EINVAL;
509 	desc->affinity_hint = m;
510 	irq_put_desc_unlock(desc, flags);
511 	if (m && setaffinity)
512 		__irq_set_affinity(irq, m, false);
513 	return 0;
514 }
515 EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
516 
irq_affinity_notify(struct work_struct * work)517 static void irq_affinity_notify(struct work_struct *work)
518 {
519 	struct irq_affinity_notify *notify =
520 		container_of(work, struct irq_affinity_notify, work);
521 	struct irq_desc *desc = irq_to_desc(notify->irq);
522 	cpumask_var_t cpumask;
523 	unsigned long flags;
524 
525 	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
526 		goto out;
527 
528 	raw_spin_lock_irqsave(&desc->lock, flags);
529 	if (irq_move_pending(&desc->irq_data))
530 		irq_get_pending(cpumask, desc);
531 	else
532 		cpumask_copy(cpumask, desc->irq_common_data.affinity);
533 	raw_spin_unlock_irqrestore(&desc->lock, flags);
534 
535 	notify->notify(notify, cpumask);
536 
537 	free_cpumask_var(cpumask);
538 out:
539 	kref_put(&notify->kref, notify->release);
540 }
541 
542 /**
543  *	irq_set_affinity_notifier - control notification of IRQ affinity changes
544  *	@irq:		Interrupt for which to enable/disable notification
545  *	@notify:	Context for notification, or %NULL to disable
546  *			notification.  Function pointers must be initialised;
547  *			the other fields will be initialised by this function.
548  *
549  *	Must be called in process context.  Notification may only be enabled
550  *	after the IRQ is allocated and must be disabled before the IRQ is
551  *	freed using free_irq().
552  */
553 int
irq_set_affinity_notifier(unsigned int irq,struct irq_affinity_notify * notify)554 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
555 {
556 	struct irq_desc *desc = irq_to_desc(irq);
557 	struct irq_affinity_notify *old_notify;
558 	unsigned long flags;
559 
560 	/* The release function is promised process context */
561 	might_sleep();
562 
563 	if (!desc || desc->istate & IRQS_NMI)
564 		return -EINVAL;
565 
566 	/* Complete initialisation of *notify */
567 	if (notify) {
568 		notify->irq = irq;
569 		kref_init(&notify->kref);
570 		INIT_WORK(&notify->work, irq_affinity_notify);
571 	}
572 
573 	raw_spin_lock_irqsave(&desc->lock, flags);
574 	old_notify = desc->affinity_notify;
575 	desc->affinity_notify = notify;
576 	raw_spin_unlock_irqrestore(&desc->lock, flags);
577 
578 	if (old_notify) {
579 		if (cancel_work_sync(&old_notify->work)) {
580 			/* Pending work had a ref, put that one too */
581 			kref_put(&old_notify->kref, old_notify->release);
582 		}
583 		kref_put(&old_notify->kref, old_notify->release);
584 	}
585 
586 	return 0;
587 }
588 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
589 
590 #ifndef CONFIG_AUTO_IRQ_AFFINITY
591 /*
592  * Generic version of the affinity autoselector.
593  */
irq_setup_affinity(struct irq_desc * desc)594 int irq_setup_affinity(struct irq_desc *desc)
595 {
596 	struct cpumask *set = irq_default_affinity;
597 	int ret, node = irq_desc_get_node(desc);
598 	static DEFINE_RAW_SPINLOCK(mask_lock);
599 	static struct cpumask mask;
600 
601 	/* Excludes PER_CPU and NO_BALANCE interrupts */
602 	if (!__irq_can_set_affinity(desc))
603 		return 0;
604 
605 	raw_spin_lock(&mask_lock);
606 	/*
607 	 * Preserve the managed affinity setting and a userspace affinity
608 	 * setup, but make sure that one of the targets is online.
609 	 */
610 	if (irqd_affinity_is_managed(&desc->irq_data) ||
611 	    irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
612 		if (cpumask_intersects(desc->irq_common_data.affinity,
613 				       cpu_online_mask))
614 			set = desc->irq_common_data.affinity;
615 		else
616 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
617 	}
618 
619 	cpumask_and(&mask, cpu_online_mask, set);
620 	if (cpumask_empty(&mask))
621 		cpumask_copy(&mask, cpu_online_mask);
622 
623 	if (node != NUMA_NO_NODE) {
624 		const struct cpumask *nodemask = cpumask_of_node(node);
625 
626 		/* make sure at least one of the cpus in nodemask is online */
627 		if (cpumask_intersects(&mask, nodemask))
628 			cpumask_and(&mask, &mask, nodemask);
629 	}
630 	ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
631 	raw_spin_unlock(&mask_lock);
632 	return ret;
633 }
634 #else
635 /* Wrapper for ALPHA specific affinity selector magic */
irq_setup_affinity(struct irq_desc * desc)636 int irq_setup_affinity(struct irq_desc *desc)
637 {
638 	return irq_select_affinity(irq_desc_get_irq(desc));
639 }
640 #endif /* CONFIG_AUTO_IRQ_AFFINITY */
641 #endif /* CONFIG_SMP */
642 
643 
644 /**
645  *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
646  *	@irq: interrupt number to set affinity
647  *	@vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
648  *	            specific data for percpu_devid interrupts
649  *
650  *	This function uses the vCPU specific data to set the vCPU
651  *	affinity for an irq. The vCPU specific data is passed from
652  *	outside, such as KVM. One example code path is as below:
653  *	KVM -> IOMMU -> irq_set_vcpu_affinity().
654  */
irq_set_vcpu_affinity(unsigned int irq,void * vcpu_info)655 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
656 {
657 	unsigned long flags;
658 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
659 	struct irq_data *data;
660 	struct irq_chip *chip;
661 	int ret = -ENOSYS;
662 
663 	if (!desc)
664 		return -EINVAL;
665 
666 	data = irq_desc_get_irq_data(desc);
667 	do {
668 		chip = irq_data_get_irq_chip(data);
669 		if (chip && chip->irq_set_vcpu_affinity)
670 			break;
671 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
672 		data = data->parent_data;
673 #else
674 		data = NULL;
675 #endif
676 	} while (data);
677 
678 	if (data)
679 		ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
680 	irq_put_desc_unlock(desc, flags);
681 
682 	return ret;
683 }
684 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
685 
__disable_irq(struct irq_desc * desc)686 void __disable_irq(struct irq_desc *desc)
687 {
688 	if (!desc->depth++)
689 		irq_disable(desc);
690 }
691 
__disable_irq_nosync(unsigned int irq)692 static int __disable_irq_nosync(unsigned int irq)
693 {
694 	unsigned long flags;
695 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
696 
697 	if (!desc)
698 		return -EINVAL;
699 	__disable_irq(desc);
700 	irq_put_desc_busunlock(desc, flags);
701 	return 0;
702 }
703 
704 /**
705  *	disable_irq_nosync - disable an irq without waiting
706  *	@irq: Interrupt to disable
707  *
708  *	Disable the selected interrupt line.  Disables and Enables are
709  *	nested.
710  *	Unlike disable_irq(), this function does not ensure existing
711  *	instances of the IRQ handler have completed before returning.
712  *
713  *	This function may be called from IRQ context.
714  */
disable_irq_nosync(unsigned int irq)715 void disable_irq_nosync(unsigned int irq)
716 {
717 	__disable_irq_nosync(irq);
718 }
719 EXPORT_SYMBOL(disable_irq_nosync);
720 
721 /**
722  *	disable_irq - disable an irq and wait for completion
723  *	@irq: Interrupt to disable
724  *
725  *	Disable the selected interrupt line.  Enables and Disables are
726  *	nested.
727  *	This function waits for any pending IRQ handlers for this interrupt
728  *	to complete before returning. If you use this function while
729  *	holding a resource the IRQ handler may need you will deadlock.
730  *
731  *	Can only be called from preemptible code as it might sleep when
732  *	an interrupt thread is associated to @irq.
733  *
734  */
disable_irq(unsigned int irq)735 void disable_irq(unsigned int irq)
736 {
737 	might_sleep();
738 	if (!__disable_irq_nosync(irq))
739 		synchronize_irq(irq);
740 }
741 EXPORT_SYMBOL(disable_irq);
742 
743 /**
744  *	disable_hardirq - disables an irq and waits for hardirq completion
745  *	@irq: Interrupt to disable
746  *
747  *	Disable the selected interrupt line.  Enables and Disables are
748  *	nested.
749  *	This function waits for any pending hard IRQ handlers for this
750  *	interrupt to complete before returning. If you use this function while
751  *	holding a resource the hard IRQ handler may need you will deadlock.
752  *
753  *	When used to optimistically disable an interrupt from atomic context
754  *	the return value must be checked.
755  *
756  *	Returns: false if a threaded handler is active.
757  *
758  *	This function may be called - with care - from IRQ context.
759  */
disable_hardirq(unsigned int irq)760 bool disable_hardirq(unsigned int irq)
761 {
762 	if (!__disable_irq_nosync(irq))
763 		return synchronize_hardirq(irq);
764 
765 	return false;
766 }
767 EXPORT_SYMBOL_GPL(disable_hardirq);
768 
769 /**
770  *	disable_nmi_nosync - disable an nmi without waiting
771  *	@irq: Interrupt to disable
772  *
773  *	Disable the selected interrupt line. Disables and enables are
774  *	nested.
775  *	The interrupt to disable must have been requested through request_nmi.
776  *	Unlike disable_nmi(), this function does not ensure existing
777  *	instances of the IRQ handler have completed before returning.
778  */
disable_nmi_nosync(unsigned int irq)779 void disable_nmi_nosync(unsigned int irq)
780 {
781 	disable_irq_nosync(irq);
782 }
783 
__enable_irq(struct irq_desc * desc)784 void __enable_irq(struct irq_desc *desc)
785 {
786 	switch (desc->depth) {
787 	case 0:
788  err_out:
789 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
790 		     irq_desc_get_irq(desc));
791 		break;
792 	case 1: {
793 		if (desc->istate & IRQS_SUSPENDED)
794 			goto err_out;
795 		/* Prevent probing on this irq: */
796 		irq_settings_set_noprobe(desc);
797 		/*
798 		 * Call irq_startup() not irq_enable() here because the
799 		 * interrupt might be marked NOAUTOEN so irq_startup()
800 		 * needs to be invoked when it gets enabled the first time.
801 		 * This is also required when __enable_irq() is invoked for
802 		 * a managed and shutdown interrupt from the S3 resume
803 		 * path.
804 		 *
805 		 * If it was already started up, then irq_startup() will
806 		 * invoke irq_enable() under the hood.
807 		 */
808 		irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
809 		break;
810 	}
811 	default:
812 		desc->depth--;
813 	}
814 }
815 
816 /**
817  *	enable_irq - enable handling of an irq
818  *	@irq: Interrupt to enable
819  *
820  *	Undoes the effect of one call to disable_irq().  If this
821  *	matches the last disable, processing of interrupts on this
822  *	IRQ line is re-enabled.
823  *
824  *	This function may be called from IRQ context only when
825  *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
826  */
enable_irq(unsigned int irq)827 void enable_irq(unsigned int irq)
828 {
829 	unsigned long flags;
830 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
831 
832 	if (!desc)
833 		return;
834 	if (WARN(!desc->irq_data.chip,
835 		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
836 		goto out;
837 
838 	__enable_irq(desc);
839 out:
840 	irq_put_desc_busunlock(desc, flags);
841 }
842 EXPORT_SYMBOL(enable_irq);
843 
844 /**
845  *	enable_nmi - enable handling of an nmi
846  *	@irq: Interrupt to enable
847  *
848  *	The interrupt to enable must have been requested through request_nmi.
849  *	Undoes the effect of one call to disable_nmi(). If this
850  *	matches the last disable, processing of interrupts on this
851  *	IRQ line is re-enabled.
852  */
enable_nmi(unsigned int irq)853 void enable_nmi(unsigned int irq)
854 {
855 	enable_irq(irq);
856 }
857 
set_irq_wake_real(unsigned int irq,unsigned int on)858 static int set_irq_wake_real(unsigned int irq, unsigned int on)
859 {
860 	struct irq_desc *desc = irq_to_desc(irq);
861 	int ret = -ENXIO;
862 
863 	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
864 		return 0;
865 
866 	if (desc->irq_data.chip->irq_set_wake)
867 		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
868 
869 	return ret;
870 }
871 
872 /**
873  *	irq_set_irq_wake - control irq power management wakeup
874  *	@irq:	interrupt to control
875  *	@on:	enable/disable power management wakeup
876  *
877  *	Enable/disable power management wakeup mode, which is
878  *	disabled by default.  Enables and disables must match,
879  *	just as they match for non-wakeup mode support.
880  *
881  *	Wakeup mode lets this IRQ wake the system from sleep
882  *	states like "suspend to RAM".
883  *
884  *	Note: irq enable/disable state is completely orthogonal
885  *	to the enable/disable state of irq wake. An irq can be
886  *	disabled with disable_irq() and still wake the system as
887  *	long as the irq has wake enabled. If this does not hold,
888  *	then the underlying irq chip and the related driver need
889  *	to be investigated.
890  */
irq_set_irq_wake(unsigned int irq,unsigned int on)891 int irq_set_irq_wake(unsigned int irq, unsigned int on)
892 {
893 	unsigned long flags;
894 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
895 	int ret = 0;
896 
897 	if (!desc)
898 		return -EINVAL;
899 
900 	/* Don't use NMIs as wake up interrupts please */
901 	if (desc->istate & IRQS_NMI) {
902 		ret = -EINVAL;
903 		goto out_unlock;
904 	}
905 
906 	/* wakeup-capable irqs can be shared between drivers that
907 	 * don't need to have the same sleep mode behaviors.
908 	 */
909 	if (on) {
910 		if (desc->wake_depth++ == 0) {
911 			ret = set_irq_wake_real(irq, on);
912 			if (ret)
913 				desc->wake_depth = 0;
914 			else
915 				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
916 		}
917 	} else {
918 		if (desc->wake_depth == 0) {
919 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
920 		} else if (--desc->wake_depth == 0) {
921 			ret = set_irq_wake_real(irq, on);
922 			if (ret)
923 				desc->wake_depth = 1;
924 			else
925 				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
926 		}
927 	}
928 
929 out_unlock:
930 	irq_put_desc_busunlock(desc, flags);
931 	return ret;
932 }
933 EXPORT_SYMBOL(irq_set_irq_wake);
934 
935 /*
936  * Internal function that tells the architecture code whether a
937  * particular irq has been exclusively allocated or is available
938  * for driver use.
939  */
can_request_irq(unsigned int irq,unsigned long irqflags)940 int can_request_irq(unsigned int irq, unsigned long irqflags)
941 {
942 	unsigned long flags;
943 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
944 	int canrequest = 0;
945 
946 	if (!desc)
947 		return 0;
948 
949 	if (irq_settings_can_request(desc)) {
950 		if (!desc->action ||
951 		    irqflags & desc->action->flags & IRQF_SHARED)
952 			canrequest = 1;
953 	}
954 	irq_put_desc_unlock(desc, flags);
955 	return canrequest;
956 }
957 
__irq_set_trigger(struct irq_desc * desc,unsigned long flags)958 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
959 {
960 	struct irq_chip *chip = desc->irq_data.chip;
961 	int ret, unmask = 0;
962 
963 	if (!chip || !chip->irq_set_type) {
964 		/*
965 		 * IRQF_TRIGGER_* but the PIC does not support multiple
966 		 * flow-types?
967 		 */
968 		pr_debug("No set_type function for IRQ %d (%s)\n",
969 			 irq_desc_get_irq(desc),
970 			 chip ? (chip->name ? : "unknown") : "unknown");
971 		return 0;
972 	}
973 
974 	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
975 		if (!irqd_irq_masked(&desc->irq_data))
976 			mask_irq(desc);
977 		if (!irqd_irq_disabled(&desc->irq_data))
978 			unmask = 1;
979 	}
980 
981 	/* Mask all flags except trigger mode */
982 	flags &= IRQ_TYPE_SENSE_MASK;
983 	ret = chip->irq_set_type(&desc->irq_data, flags);
984 
985 	switch (ret) {
986 	case IRQ_SET_MASK_OK:
987 	case IRQ_SET_MASK_OK_DONE:
988 		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
989 		irqd_set(&desc->irq_data, flags);
990 		fallthrough;
991 
992 	case IRQ_SET_MASK_OK_NOCOPY:
993 		flags = irqd_get_trigger_type(&desc->irq_data);
994 		irq_settings_set_trigger_mask(desc, flags);
995 		irqd_clear(&desc->irq_data, IRQD_LEVEL);
996 		irq_settings_clr_level(desc);
997 		if (flags & IRQ_TYPE_LEVEL_MASK) {
998 			irq_settings_set_level(desc);
999 			irqd_set(&desc->irq_data, IRQD_LEVEL);
1000 		}
1001 
1002 		ret = 0;
1003 		break;
1004 	default:
1005 		pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
1006 		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
1007 	}
1008 	if (unmask)
1009 		unmask_irq(desc);
1010 	return ret;
1011 }
1012 
1013 #ifdef CONFIG_HARDIRQS_SW_RESEND
irq_set_parent(int irq,int parent_irq)1014 int irq_set_parent(int irq, int parent_irq)
1015 {
1016 	unsigned long flags;
1017 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1018 
1019 	if (!desc)
1020 		return -EINVAL;
1021 
1022 	desc->parent_irq = parent_irq;
1023 
1024 	irq_put_desc_unlock(desc, flags);
1025 	return 0;
1026 }
1027 EXPORT_SYMBOL_GPL(irq_set_parent);
1028 #endif
1029 
1030 /*
1031  * Default primary interrupt handler for threaded interrupts. Is
1032  * assigned as primary handler when request_threaded_irq is called
1033  * with handler == NULL. Useful for oneshot interrupts.
1034  */
irq_default_primary_handler(int irq,void * dev_id)1035 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
1036 {
1037 	return IRQ_WAKE_THREAD;
1038 }
1039 
1040 /*
1041  * Primary handler for nested threaded interrupts. Should never be
1042  * called.
1043  */
irq_nested_primary_handler(int irq,void * dev_id)1044 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
1045 {
1046 	WARN(1, "Primary handler called for nested irq %d\n", irq);
1047 	return IRQ_NONE;
1048 }
1049 
irq_forced_secondary_handler(int irq,void * dev_id)1050 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
1051 {
1052 	WARN(1, "Secondary action handler called for irq %d\n", irq);
1053 	return IRQ_NONE;
1054 }
1055 
irq_wait_for_interrupt(struct irqaction * action)1056 static int irq_wait_for_interrupt(struct irqaction *action)
1057 {
1058 	for (;;) {
1059 		set_current_state(TASK_INTERRUPTIBLE);
1060 
1061 		if (kthread_should_stop()) {
1062 			/* may need to run one last time */
1063 			if (test_and_clear_bit(IRQTF_RUNTHREAD,
1064 					       &action->thread_flags)) {
1065 				__set_current_state(TASK_RUNNING);
1066 				return 0;
1067 			}
1068 			__set_current_state(TASK_RUNNING);
1069 			return -1;
1070 		}
1071 
1072 		if (test_and_clear_bit(IRQTF_RUNTHREAD,
1073 				       &action->thread_flags)) {
1074 			__set_current_state(TASK_RUNNING);
1075 			return 0;
1076 		}
1077 		schedule();
1078 	}
1079 }
1080 
1081 /*
1082  * Oneshot interrupts keep the irq line masked until the threaded
1083  * handler finished. unmask if the interrupt has not been disabled and
1084  * is marked MASKED.
1085  */
irq_finalize_oneshot(struct irq_desc * desc,struct irqaction * action)1086 static void irq_finalize_oneshot(struct irq_desc *desc,
1087 				 struct irqaction *action)
1088 {
1089 	if (!(desc->istate & IRQS_ONESHOT) ||
1090 	    action->handler == irq_forced_secondary_handler)
1091 		return;
1092 again:
1093 	chip_bus_lock(desc);
1094 	raw_spin_lock_irq(&desc->lock);
1095 
1096 	/*
1097 	 * Implausible though it may be we need to protect us against
1098 	 * the following scenario:
1099 	 *
1100 	 * The thread is faster done than the hard interrupt handler
1101 	 * on the other CPU. If we unmask the irq line then the
1102 	 * interrupt can come in again and masks the line, leaves due
1103 	 * to IRQS_INPROGRESS and the irq line is masked forever.
1104 	 *
1105 	 * This also serializes the state of shared oneshot handlers
1106 	 * versus "desc->threads_oneshot |= action->thread_mask;" in
1107 	 * irq_wake_thread(). See the comment there which explains the
1108 	 * serialization.
1109 	 */
1110 	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1111 		raw_spin_unlock_irq(&desc->lock);
1112 		chip_bus_sync_unlock(desc);
1113 		cpu_relax();
1114 		goto again;
1115 	}
1116 
1117 	/*
1118 	 * Now check again, whether the thread should run. Otherwise
1119 	 * we would clear the threads_oneshot bit of this thread which
1120 	 * was just set.
1121 	 */
1122 	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1123 		goto out_unlock;
1124 
1125 	desc->threads_oneshot &= ~action->thread_mask;
1126 
1127 	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1128 	    irqd_irq_masked(&desc->irq_data))
1129 		unmask_threaded_irq(desc);
1130 
1131 out_unlock:
1132 	raw_spin_unlock_irq(&desc->lock);
1133 	chip_bus_sync_unlock(desc);
1134 }
1135 
1136 #ifdef CONFIG_SMP
1137 /*
1138  * Check whether we need to change the affinity of the interrupt thread.
1139  */
1140 static void
irq_thread_check_affinity(struct irq_desc * desc,struct irqaction * action)1141 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1142 {
1143 	cpumask_var_t mask;
1144 	bool valid = true;
1145 
1146 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1147 		return;
1148 
1149 	/*
1150 	 * In case we are out of memory we set IRQTF_AFFINITY again and
1151 	 * try again next time
1152 	 */
1153 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1154 		set_bit(IRQTF_AFFINITY, &action->thread_flags);
1155 		return;
1156 	}
1157 
1158 	raw_spin_lock_irq(&desc->lock);
1159 	/*
1160 	 * This code is triggered unconditionally. Check the affinity
1161 	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1162 	 */
1163 	if (cpumask_available(desc->irq_common_data.affinity)) {
1164 		const struct cpumask *m;
1165 
1166 		m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1167 		cpumask_copy(mask, m);
1168 	} else {
1169 		valid = false;
1170 	}
1171 	raw_spin_unlock_irq(&desc->lock);
1172 
1173 	if (valid)
1174 		set_cpus_allowed_ptr(current, mask);
1175 	free_cpumask_var(mask);
1176 }
1177 #else
1178 static inline void
irq_thread_check_affinity(struct irq_desc * desc,struct irqaction * action)1179 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1180 #endif
1181 
1182 /*
1183  * Interrupts which are not explicitly requested as threaded
1184  * interrupts rely on the implicit bh/preempt disable of the hard irq
1185  * context. So we need to disable bh here to avoid deadlocks and other
1186  * side effects.
1187  */
1188 static irqreturn_t
irq_forced_thread_fn(struct irq_desc * desc,struct irqaction * action)1189 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1190 {
1191 	irqreturn_t ret;
1192 
1193 	local_bh_disable();
1194 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1195 		local_irq_disable();
1196 	ret = action->thread_fn(action->irq, action->dev_id);
1197 	if (ret == IRQ_HANDLED)
1198 		atomic_inc(&desc->threads_handled);
1199 
1200 	irq_finalize_oneshot(desc, action);
1201 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1202 		local_irq_enable();
1203 	local_bh_enable();
1204 	return ret;
1205 }
1206 
1207 /*
1208  * Interrupts explicitly requested as threaded interrupts want to be
1209  * preemptible - many of them need to sleep and wait for slow busses to
1210  * complete.
1211  */
irq_thread_fn(struct irq_desc * desc,struct irqaction * action)1212 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1213 		struct irqaction *action)
1214 {
1215 	irqreturn_t ret;
1216 
1217 	ret = action->thread_fn(action->irq, action->dev_id);
1218 	if (ret == IRQ_HANDLED)
1219 		atomic_inc(&desc->threads_handled);
1220 
1221 	irq_finalize_oneshot(desc, action);
1222 	return ret;
1223 }
1224 
wake_threads_waitq(struct irq_desc * desc)1225 void wake_threads_waitq(struct irq_desc *desc)
1226 {
1227 	if (atomic_dec_and_test(&desc->threads_active))
1228 		wake_up(&desc->wait_for_threads);
1229 }
1230 
irq_thread_dtor(struct callback_head * unused)1231 static void irq_thread_dtor(struct callback_head *unused)
1232 {
1233 	struct task_struct *tsk = current;
1234 	struct irq_desc *desc;
1235 	struct irqaction *action;
1236 
1237 	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1238 		return;
1239 
1240 	action = kthread_data(tsk);
1241 
1242 	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1243 	       tsk->comm, tsk->pid, action->irq);
1244 
1245 
1246 	desc = irq_to_desc(action->irq);
1247 	/*
1248 	 * If IRQTF_RUNTHREAD is set, we need to decrement
1249 	 * desc->threads_active and wake possible waiters.
1250 	 */
1251 	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1252 		wake_threads_waitq(desc);
1253 
1254 	/* Prevent a stale desc->threads_oneshot */
1255 	irq_finalize_oneshot(desc, action);
1256 }
1257 
irq_wake_secondary(struct irq_desc * desc,struct irqaction * action)1258 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1259 {
1260 	struct irqaction *secondary = action->secondary;
1261 
1262 	if (WARN_ON_ONCE(!secondary))
1263 		return;
1264 
1265 	raw_spin_lock_irq(&desc->lock);
1266 	__irq_wake_thread(desc, secondary);
1267 	raw_spin_unlock_irq(&desc->lock);
1268 }
1269 
1270 /*
1271  * Internal function to notify that a interrupt thread is ready.
1272  */
irq_thread_set_ready(struct irq_desc * desc,struct irqaction * action)1273 static void irq_thread_set_ready(struct irq_desc *desc,
1274 				 struct irqaction *action)
1275 {
1276 	set_bit(IRQTF_READY, &action->thread_flags);
1277 	wake_up(&desc->wait_for_threads);
1278 }
1279 
1280 /*
1281  * Internal function to wake up a interrupt thread and wait until it is
1282  * ready.
1283  */
wake_up_and_wait_for_irq_thread_ready(struct irq_desc * desc,struct irqaction * action)1284 static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
1285 						  struct irqaction *action)
1286 {
1287 	if (!action || !action->thread)
1288 		return;
1289 
1290 	wake_up_process(action->thread);
1291 	wait_event(desc->wait_for_threads,
1292 		   test_bit(IRQTF_READY, &action->thread_flags));
1293 }
1294 
1295 /*
1296  * Interrupt handler thread
1297  */
irq_thread(void * data)1298 static int irq_thread(void *data)
1299 {
1300 	struct callback_head on_exit_work;
1301 	struct irqaction *action = data;
1302 	struct irq_desc *desc = irq_to_desc(action->irq);
1303 	irqreturn_t (*handler_fn)(struct irq_desc *desc,
1304 			struct irqaction *action);
1305 
1306 	irq_thread_set_ready(desc, action);
1307 
1308 	sched_set_fifo(current);
1309 
1310 	if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
1311 					   &action->thread_flags))
1312 		handler_fn = irq_forced_thread_fn;
1313 	else
1314 		handler_fn = irq_thread_fn;
1315 
1316 	init_task_work(&on_exit_work, irq_thread_dtor);
1317 	task_work_add(current, &on_exit_work, TWA_NONE);
1318 
1319 	irq_thread_check_affinity(desc, action);
1320 
1321 	while (!irq_wait_for_interrupt(action)) {
1322 		irqreturn_t action_ret;
1323 
1324 		irq_thread_check_affinity(desc, action);
1325 
1326 		action_ret = handler_fn(desc, action);
1327 		if (action_ret == IRQ_WAKE_THREAD)
1328 			irq_wake_secondary(desc, action);
1329 
1330 		wake_threads_waitq(desc);
1331 	}
1332 
1333 	/*
1334 	 * This is the regular exit path. __free_irq() is stopping the
1335 	 * thread via kthread_stop() after calling
1336 	 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1337 	 * oneshot mask bit can be set.
1338 	 */
1339 	task_work_cancel_func(current, irq_thread_dtor);
1340 	return 0;
1341 }
1342 
1343 /**
1344  *	irq_wake_thread - wake the irq thread for the action identified by dev_id
1345  *	@irq:		Interrupt line
1346  *	@dev_id:	Device identity for which the thread should be woken
1347  *
1348  */
irq_wake_thread(unsigned int irq,void * dev_id)1349 void irq_wake_thread(unsigned int irq, void *dev_id)
1350 {
1351 	struct irq_desc *desc = irq_to_desc(irq);
1352 	struct irqaction *action;
1353 	unsigned long flags;
1354 
1355 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1356 		return;
1357 
1358 	raw_spin_lock_irqsave(&desc->lock, flags);
1359 	for_each_action_of_desc(desc, action) {
1360 		if (action->dev_id == dev_id) {
1361 			if (action->thread)
1362 				__irq_wake_thread(desc, action);
1363 			break;
1364 		}
1365 	}
1366 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1367 }
1368 EXPORT_SYMBOL_GPL(irq_wake_thread);
1369 
irq_setup_forced_threading(struct irqaction * new)1370 static int irq_setup_forced_threading(struct irqaction *new)
1371 {
1372 	if (!force_irqthreads())
1373 		return 0;
1374 	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1375 		return 0;
1376 
1377 	/*
1378 	 * No further action required for interrupts which are requested as
1379 	 * threaded interrupts already
1380 	 */
1381 	if (new->handler == irq_default_primary_handler)
1382 		return 0;
1383 
1384 	new->flags |= IRQF_ONESHOT;
1385 
1386 	/*
1387 	 * Handle the case where we have a real primary handler and a
1388 	 * thread handler. We force thread them as well by creating a
1389 	 * secondary action.
1390 	 */
1391 	if (new->handler && new->thread_fn) {
1392 		/* Allocate the secondary action */
1393 		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1394 		if (!new->secondary)
1395 			return -ENOMEM;
1396 		new->secondary->handler = irq_forced_secondary_handler;
1397 		new->secondary->thread_fn = new->thread_fn;
1398 		new->secondary->dev_id = new->dev_id;
1399 		new->secondary->irq = new->irq;
1400 		new->secondary->name = new->name;
1401 	}
1402 	/* Deal with the primary handler */
1403 	set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1404 	new->thread_fn = new->handler;
1405 	new->handler = irq_default_primary_handler;
1406 	return 0;
1407 }
1408 
irq_request_resources(struct irq_desc * desc)1409 static int irq_request_resources(struct irq_desc *desc)
1410 {
1411 	struct irq_data *d = &desc->irq_data;
1412 	struct irq_chip *c = d->chip;
1413 
1414 	return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1415 }
1416 
irq_release_resources(struct irq_desc * desc)1417 static void irq_release_resources(struct irq_desc *desc)
1418 {
1419 	struct irq_data *d = &desc->irq_data;
1420 	struct irq_chip *c = d->chip;
1421 
1422 	if (c->irq_release_resources)
1423 		c->irq_release_resources(d);
1424 }
1425 
irq_supports_nmi(struct irq_desc * desc)1426 static bool irq_supports_nmi(struct irq_desc *desc)
1427 {
1428 	struct irq_data *d = irq_desc_get_irq_data(desc);
1429 
1430 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1431 	/* Only IRQs directly managed by the root irqchip can be set as NMI */
1432 	if (d->parent_data)
1433 		return false;
1434 #endif
1435 	/* Don't support NMIs for chips behind a slow bus */
1436 	if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1437 		return false;
1438 
1439 	return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1440 }
1441 
irq_nmi_setup(struct irq_desc * desc)1442 static int irq_nmi_setup(struct irq_desc *desc)
1443 {
1444 	struct irq_data *d = irq_desc_get_irq_data(desc);
1445 	struct irq_chip *c = d->chip;
1446 
1447 	return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1448 }
1449 
irq_nmi_teardown(struct irq_desc * desc)1450 static void irq_nmi_teardown(struct irq_desc *desc)
1451 {
1452 	struct irq_data *d = irq_desc_get_irq_data(desc);
1453 	struct irq_chip *c = d->chip;
1454 
1455 	if (c->irq_nmi_teardown)
1456 		c->irq_nmi_teardown(d);
1457 }
1458 
1459 static int
setup_irq_thread(struct irqaction * new,unsigned int irq,bool secondary)1460 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1461 {
1462 	struct task_struct *t;
1463 
1464 	if (!secondary) {
1465 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1466 				   new->name);
1467 	} else {
1468 		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1469 				   new->name);
1470 	}
1471 
1472 	if (IS_ERR(t))
1473 		return PTR_ERR(t);
1474 
1475 	/*
1476 	 * We keep the reference to the task struct even if
1477 	 * the thread dies to avoid that the interrupt code
1478 	 * references an already freed task_struct.
1479 	 */
1480 	new->thread = get_task_struct(t);
1481 	/*
1482 	 * Tell the thread to set its affinity. This is
1483 	 * important for shared interrupt handlers as we do
1484 	 * not invoke setup_affinity() for the secondary
1485 	 * handlers as everything is already set up. Even for
1486 	 * interrupts marked with IRQF_NO_BALANCE this is
1487 	 * correct as we want the thread to move to the cpu(s)
1488 	 * on which the requesting code placed the interrupt.
1489 	 */
1490 	set_bit(IRQTF_AFFINITY, &new->thread_flags);
1491 	return 0;
1492 }
1493 
1494 /*
1495  * Internal function to register an irqaction - typically used to
1496  * allocate special interrupts that are part of the architecture.
1497  *
1498  * Locking rules:
1499  *
1500  * desc->request_mutex	Provides serialization against a concurrent free_irq()
1501  *   chip_bus_lock	Provides serialization for slow bus operations
1502  *     desc->lock	Provides serialization against hard interrupts
1503  *
1504  * chip_bus_lock and desc->lock are sufficient for all other management and
1505  * interrupt related functions. desc->request_mutex solely serializes
1506  * request/free_irq().
1507  */
1508 static int
__setup_irq(unsigned int irq,struct irq_desc * desc,struct irqaction * new)1509 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1510 {
1511 	struct irqaction *old, **old_ptr;
1512 	unsigned long flags, thread_mask = 0;
1513 	int ret, nested, shared = 0;
1514 
1515 	if (!desc)
1516 		return -EINVAL;
1517 
1518 	if (desc->irq_data.chip == &no_irq_chip)
1519 		return -ENOSYS;
1520 	if (!try_module_get(desc->owner))
1521 		return -ENODEV;
1522 
1523 	new->irq = irq;
1524 
1525 	/*
1526 	 * If the trigger type is not specified by the caller,
1527 	 * then use the default for this interrupt.
1528 	 */
1529 	if (!(new->flags & IRQF_TRIGGER_MASK))
1530 		new->flags |= irqd_get_trigger_type(&desc->irq_data);
1531 
1532 	/*
1533 	 * Check whether the interrupt nests into another interrupt
1534 	 * thread.
1535 	 */
1536 	nested = irq_settings_is_nested_thread(desc);
1537 	if (nested) {
1538 		if (!new->thread_fn) {
1539 			ret = -EINVAL;
1540 			goto out_mput;
1541 		}
1542 		/*
1543 		 * Replace the primary handler which was provided from
1544 		 * the driver for non nested interrupt handling by the
1545 		 * dummy function which warns when called.
1546 		 */
1547 		new->handler = irq_nested_primary_handler;
1548 	} else {
1549 		if (irq_settings_can_thread(desc)) {
1550 			ret = irq_setup_forced_threading(new);
1551 			if (ret)
1552 				goto out_mput;
1553 		}
1554 	}
1555 
1556 	/*
1557 	 * Create a handler thread when a thread function is supplied
1558 	 * and the interrupt does not nest into another interrupt
1559 	 * thread.
1560 	 */
1561 	if (new->thread_fn && !nested) {
1562 		ret = setup_irq_thread(new, irq, false);
1563 		if (ret)
1564 			goto out_mput;
1565 		if (new->secondary) {
1566 			ret = setup_irq_thread(new->secondary, irq, true);
1567 			if (ret)
1568 				goto out_thread;
1569 		}
1570 	}
1571 
1572 	/*
1573 	 * Drivers are often written to work w/o knowledge about the
1574 	 * underlying irq chip implementation, so a request for a
1575 	 * threaded irq without a primary hard irq context handler
1576 	 * requires the ONESHOT flag to be set. Some irq chips like
1577 	 * MSI based interrupts are per se one shot safe. Check the
1578 	 * chip flags, so we can avoid the unmask dance at the end of
1579 	 * the threaded handler for those.
1580 	 */
1581 	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1582 		new->flags &= ~IRQF_ONESHOT;
1583 
1584 	/*
1585 	 * Protects against a concurrent __free_irq() call which might wait
1586 	 * for synchronize_hardirq() to complete without holding the optional
1587 	 * chip bus lock and desc->lock. Also protects against handing out
1588 	 * a recycled oneshot thread_mask bit while it's still in use by
1589 	 * its previous owner.
1590 	 */
1591 	mutex_lock(&desc->request_mutex);
1592 
1593 	/*
1594 	 * Acquire bus lock as the irq_request_resources() callback below
1595 	 * might rely on the serialization or the magic power management
1596 	 * functions which are abusing the irq_bus_lock() callback,
1597 	 */
1598 	chip_bus_lock(desc);
1599 
1600 	/* First installed action requests resources. */
1601 	if (!desc->action) {
1602 		ret = irq_request_resources(desc);
1603 		if (ret) {
1604 			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1605 			       new->name, irq, desc->irq_data.chip->name);
1606 			goto out_bus_unlock;
1607 		}
1608 	}
1609 
1610 	/*
1611 	 * The following block of code has to be executed atomically
1612 	 * protected against a concurrent interrupt and any of the other
1613 	 * management calls which are not serialized via
1614 	 * desc->request_mutex or the optional bus lock.
1615 	 */
1616 	raw_spin_lock_irqsave(&desc->lock, flags);
1617 	old_ptr = &desc->action;
1618 	old = *old_ptr;
1619 	if (old) {
1620 		/*
1621 		 * Can't share interrupts unless both agree to and are
1622 		 * the same type (level, edge, polarity). So both flag
1623 		 * fields must have IRQF_SHARED set and the bits which
1624 		 * set the trigger type must match. Also all must
1625 		 * agree on ONESHOT.
1626 		 * Interrupt lines used for NMIs cannot be shared.
1627 		 */
1628 		unsigned int oldtype;
1629 
1630 		if (desc->istate & IRQS_NMI) {
1631 			pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1632 				new->name, irq, desc->irq_data.chip->name);
1633 			ret = -EINVAL;
1634 			goto out_unlock;
1635 		}
1636 
1637 		/*
1638 		 * If nobody did set the configuration before, inherit
1639 		 * the one provided by the requester.
1640 		 */
1641 		if (irqd_trigger_type_was_set(&desc->irq_data)) {
1642 			oldtype = irqd_get_trigger_type(&desc->irq_data);
1643 		} else {
1644 			oldtype = new->flags & IRQF_TRIGGER_MASK;
1645 			irqd_set_trigger_type(&desc->irq_data, oldtype);
1646 		}
1647 
1648 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
1649 		    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1650 		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1651 			goto mismatch;
1652 
1653 		/* All handlers must agree on per-cpuness */
1654 		if ((old->flags & IRQF_PERCPU) !=
1655 		    (new->flags & IRQF_PERCPU))
1656 			goto mismatch;
1657 
1658 		/* add new interrupt at end of irq queue */
1659 		do {
1660 			/*
1661 			 * Or all existing action->thread_mask bits,
1662 			 * so we can find the next zero bit for this
1663 			 * new action.
1664 			 */
1665 			thread_mask |= old->thread_mask;
1666 			old_ptr = &old->next;
1667 			old = *old_ptr;
1668 		} while (old);
1669 		shared = 1;
1670 	}
1671 
1672 	/*
1673 	 * Setup the thread mask for this irqaction for ONESHOT. For
1674 	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1675 	 * conditional in irq_wake_thread().
1676 	 */
1677 	if (new->flags & IRQF_ONESHOT) {
1678 		/*
1679 		 * Unlikely to have 32 resp 64 irqs sharing one line,
1680 		 * but who knows.
1681 		 */
1682 		if (thread_mask == ~0UL) {
1683 			ret = -EBUSY;
1684 			goto out_unlock;
1685 		}
1686 		/*
1687 		 * The thread_mask for the action is or'ed to
1688 		 * desc->thread_active to indicate that the
1689 		 * IRQF_ONESHOT thread handler has been woken, but not
1690 		 * yet finished. The bit is cleared when a thread
1691 		 * completes. When all threads of a shared interrupt
1692 		 * line have completed desc->threads_active becomes
1693 		 * zero and the interrupt line is unmasked. See
1694 		 * handle.c:irq_wake_thread() for further information.
1695 		 *
1696 		 * If no thread is woken by primary (hard irq context)
1697 		 * interrupt handlers, then desc->threads_active is
1698 		 * also checked for zero to unmask the irq line in the
1699 		 * affected hard irq flow handlers
1700 		 * (handle_[fasteoi|level]_irq).
1701 		 *
1702 		 * The new action gets the first zero bit of
1703 		 * thread_mask assigned. See the loop above which or's
1704 		 * all existing action->thread_mask bits.
1705 		 */
1706 		new->thread_mask = 1UL << ffz(thread_mask);
1707 
1708 	} else if (new->handler == irq_default_primary_handler &&
1709 		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1710 		/*
1711 		 * The interrupt was requested with handler = NULL, so
1712 		 * we use the default primary handler for it. But it
1713 		 * does not have the oneshot flag set. In combination
1714 		 * with level interrupts this is deadly, because the
1715 		 * default primary handler just wakes the thread, then
1716 		 * the irq lines is reenabled, but the device still
1717 		 * has the level irq asserted. Rinse and repeat....
1718 		 *
1719 		 * While this works for edge type interrupts, we play
1720 		 * it safe and reject unconditionally because we can't
1721 		 * say for sure which type this interrupt really
1722 		 * has. The type flags are unreliable as the
1723 		 * underlying chip implementation can override them.
1724 		 */
1725 		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1726 		       new->name, irq);
1727 		ret = -EINVAL;
1728 		goto out_unlock;
1729 	}
1730 
1731 	if (!shared) {
1732 		/* Setup the type (level, edge polarity) if configured: */
1733 		if (new->flags & IRQF_TRIGGER_MASK) {
1734 			ret = __irq_set_trigger(desc,
1735 						new->flags & IRQF_TRIGGER_MASK);
1736 
1737 			if (ret)
1738 				goto out_unlock;
1739 		}
1740 
1741 		/*
1742 		 * Activate the interrupt. That activation must happen
1743 		 * independently of IRQ_NOAUTOEN. request_irq() can fail
1744 		 * and the callers are supposed to handle
1745 		 * that. enable_irq() of an interrupt requested with
1746 		 * IRQ_NOAUTOEN is not supposed to fail. The activation
1747 		 * keeps it in shutdown mode, it merily associates
1748 		 * resources if necessary and if that's not possible it
1749 		 * fails. Interrupts which are in managed shutdown mode
1750 		 * will simply ignore that activation request.
1751 		 */
1752 		ret = irq_activate(desc);
1753 		if (ret)
1754 			goto out_unlock;
1755 
1756 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1757 				  IRQS_ONESHOT | IRQS_WAITING);
1758 		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1759 
1760 		if (new->flags & IRQF_PERCPU) {
1761 			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1762 			irq_settings_set_per_cpu(desc);
1763 			if (new->flags & IRQF_NO_DEBUG)
1764 				irq_settings_set_no_debug(desc);
1765 		}
1766 
1767 		if (noirqdebug)
1768 			irq_settings_set_no_debug(desc);
1769 
1770 		if (new->flags & IRQF_ONESHOT)
1771 			desc->istate |= IRQS_ONESHOT;
1772 
1773 		/* Exclude IRQ from balancing if requested */
1774 		if (new->flags & IRQF_NOBALANCING) {
1775 			irq_settings_set_no_balancing(desc);
1776 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1777 		}
1778 
1779 		if (!(new->flags & IRQF_NO_AUTOEN) &&
1780 		    irq_settings_can_autoenable(desc)) {
1781 			irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1782 		} else {
1783 			/*
1784 			 * Shared interrupts do not go well with disabling
1785 			 * auto enable. The sharing interrupt might request
1786 			 * it while it's still disabled and then wait for
1787 			 * interrupts forever.
1788 			 */
1789 			WARN_ON_ONCE(new->flags & IRQF_SHARED);
1790 			/* Undo nested disables: */
1791 			desc->depth = 1;
1792 		}
1793 
1794 	} else if (new->flags & IRQF_TRIGGER_MASK) {
1795 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1796 		unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1797 
1798 		if (nmsk != omsk)
1799 			/* hope the handler works with current  trigger mode */
1800 			pr_warn("irq %d uses trigger mode %u; requested %u\n",
1801 				irq, omsk, nmsk);
1802 	}
1803 
1804 	*old_ptr = new;
1805 
1806 	irq_pm_install_action(desc, new);
1807 
1808 	/* Reset broken irq detection when installing new handler */
1809 	desc->irq_count = 0;
1810 	desc->irqs_unhandled = 0;
1811 
1812 	/*
1813 	 * Check whether we disabled the irq via the spurious handler
1814 	 * before. Reenable it and give it another chance.
1815 	 */
1816 	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1817 		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1818 		__enable_irq(desc);
1819 	}
1820 
1821 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1822 	chip_bus_sync_unlock(desc);
1823 	mutex_unlock(&desc->request_mutex);
1824 
1825 	irq_setup_timings(desc, new);
1826 
1827 	wake_up_and_wait_for_irq_thread_ready(desc, new);
1828 	wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
1829 
1830 	register_irq_proc(irq, desc);
1831 	new->dir = NULL;
1832 	register_handler_proc(irq, new);
1833 	return 0;
1834 
1835 mismatch:
1836 	if (!(new->flags & IRQF_PROBE_SHARED)) {
1837 		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1838 		       irq, new->flags, new->name, old->flags, old->name);
1839 #ifdef CONFIG_DEBUG_SHIRQ
1840 		dump_stack();
1841 #endif
1842 	}
1843 	ret = -EBUSY;
1844 
1845 out_unlock:
1846 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1847 
1848 	if (!desc->action)
1849 		irq_release_resources(desc);
1850 out_bus_unlock:
1851 	chip_bus_sync_unlock(desc);
1852 	mutex_unlock(&desc->request_mutex);
1853 
1854 out_thread:
1855 	if (new->thread) {
1856 		struct task_struct *t = new->thread;
1857 
1858 		new->thread = NULL;
1859 		kthread_stop_put(t);
1860 	}
1861 	if (new->secondary && new->secondary->thread) {
1862 		struct task_struct *t = new->secondary->thread;
1863 
1864 		new->secondary->thread = NULL;
1865 		kthread_stop_put(t);
1866 	}
1867 out_mput:
1868 	module_put(desc->owner);
1869 	return ret;
1870 }
1871 
1872 /*
1873  * Internal function to unregister an irqaction - used to free
1874  * regular and special interrupts that are part of the architecture.
1875  */
__free_irq(struct irq_desc * desc,void * dev_id)1876 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1877 {
1878 	unsigned irq = desc->irq_data.irq;
1879 	struct irqaction *action, **action_ptr;
1880 	unsigned long flags;
1881 
1882 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1883 
1884 	mutex_lock(&desc->request_mutex);
1885 	chip_bus_lock(desc);
1886 	raw_spin_lock_irqsave(&desc->lock, flags);
1887 
1888 	/*
1889 	 * There can be multiple actions per IRQ descriptor, find the right
1890 	 * one based on the dev_id:
1891 	 */
1892 	action_ptr = &desc->action;
1893 	for (;;) {
1894 		action = *action_ptr;
1895 
1896 		if (!action) {
1897 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1898 			raw_spin_unlock_irqrestore(&desc->lock, flags);
1899 			chip_bus_sync_unlock(desc);
1900 			mutex_unlock(&desc->request_mutex);
1901 			return NULL;
1902 		}
1903 
1904 		if (action->dev_id == dev_id)
1905 			break;
1906 		action_ptr = &action->next;
1907 	}
1908 
1909 	/* Found it - now remove it from the list of entries: */
1910 	*action_ptr = action->next;
1911 
1912 	irq_pm_remove_action(desc, action);
1913 
1914 	/* If this was the last handler, shut down the IRQ line: */
1915 	if (!desc->action) {
1916 		irq_settings_clr_disable_unlazy(desc);
1917 		/* Only shutdown. Deactivate after synchronize_hardirq() */
1918 		irq_shutdown(desc);
1919 	}
1920 
1921 #ifdef CONFIG_SMP
1922 	/* make sure affinity_hint is cleaned up */
1923 	if (WARN_ON_ONCE(desc->affinity_hint))
1924 		desc->affinity_hint = NULL;
1925 #endif
1926 
1927 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1928 	/*
1929 	 * Drop bus_lock here so the changes which were done in the chip
1930 	 * callbacks above are synced out to the irq chips which hang
1931 	 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1932 	 *
1933 	 * Aside of that the bus_lock can also be taken from the threaded
1934 	 * handler in irq_finalize_oneshot() which results in a deadlock
1935 	 * because kthread_stop() would wait forever for the thread to
1936 	 * complete, which is blocked on the bus lock.
1937 	 *
1938 	 * The still held desc->request_mutex() protects against a
1939 	 * concurrent request_irq() of this irq so the release of resources
1940 	 * and timing data is properly serialized.
1941 	 */
1942 	chip_bus_sync_unlock(desc);
1943 
1944 	unregister_handler_proc(irq, action);
1945 
1946 	/*
1947 	 * Make sure it's not being used on another CPU and if the chip
1948 	 * supports it also make sure that there is no (not yet serviced)
1949 	 * interrupt in flight at the hardware level.
1950 	 */
1951 	__synchronize_irq(desc);
1952 
1953 #ifdef CONFIG_DEBUG_SHIRQ
1954 	/*
1955 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1956 	 * event to happen even now it's being freed, so let's make sure that
1957 	 * is so by doing an extra call to the handler ....
1958 	 *
1959 	 * ( We do this after actually deregistering it, to make sure that a
1960 	 *   'real' IRQ doesn't run in parallel with our fake. )
1961 	 */
1962 	if (action->flags & IRQF_SHARED) {
1963 		local_irq_save(flags);
1964 		action->handler(irq, dev_id);
1965 		local_irq_restore(flags);
1966 	}
1967 #endif
1968 
1969 	/*
1970 	 * The action has already been removed above, but the thread writes
1971 	 * its oneshot mask bit when it completes. Though request_mutex is
1972 	 * held across this which prevents __setup_irq() from handing out
1973 	 * the same bit to a newly requested action.
1974 	 */
1975 	if (action->thread) {
1976 		kthread_stop_put(action->thread);
1977 		if (action->secondary && action->secondary->thread)
1978 			kthread_stop_put(action->secondary->thread);
1979 	}
1980 
1981 	/* Last action releases resources */
1982 	if (!desc->action) {
1983 		/*
1984 		 * Reacquire bus lock as irq_release_resources() might
1985 		 * require it to deallocate resources over the slow bus.
1986 		 */
1987 		chip_bus_lock(desc);
1988 		/*
1989 		 * There is no interrupt on the fly anymore. Deactivate it
1990 		 * completely.
1991 		 */
1992 		raw_spin_lock_irqsave(&desc->lock, flags);
1993 		irq_domain_deactivate_irq(&desc->irq_data);
1994 		raw_spin_unlock_irqrestore(&desc->lock, flags);
1995 
1996 		irq_release_resources(desc);
1997 		chip_bus_sync_unlock(desc);
1998 		irq_remove_timings(desc);
1999 	}
2000 
2001 	mutex_unlock(&desc->request_mutex);
2002 
2003 	irq_chip_pm_put(&desc->irq_data);
2004 	module_put(desc->owner);
2005 	kfree(action->secondary);
2006 	return action;
2007 }
2008 
2009 /**
2010  *	free_irq - free an interrupt allocated with request_irq
2011  *	@irq: Interrupt line to free
2012  *	@dev_id: Device identity to free
2013  *
2014  *	Remove an interrupt handler. The handler is removed and if the
2015  *	interrupt line is no longer in use by any driver it is disabled.
2016  *	On a shared IRQ the caller must ensure the interrupt is disabled
2017  *	on the card it drives before calling this function. The function
2018  *	does not return until any executing interrupts for this IRQ
2019  *	have completed.
2020  *
2021  *	This function must not be called from interrupt context.
2022  *
2023  *	Returns the devname argument passed to request_irq.
2024  */
free_irq(unsigned int irq,void * dev_id)2025 const void *free_irq(unsigned int irq, void *dev_id)
2026 {
2027 	struct irq_desc *desc = irq_to_desc(irq);
2028 	struct irqaction *action;
2029 	const char *devname;
2030 
2031 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2032 		return NULL;
2033 
2034 #ifdef CONFIG_SMP
2035 	if (WARN_ON(desc->affinity_notify))
2036 		desc->affinity_notify = NULL;
2037 #endif
2038 
2039 	action = __free_irq(desc, dev_id);
2040 
2041 	if (!action)
2042 		return NULL;
2043 
2044 	devname = action->name;
2045 	kfree(action);
2046 	return devname;
2047 }
2048 EXPORT_SYMBOL(free_irq);
2049 
2050 /* This function must be called with desc->lock held */
__cleanup_nmi(unsigned int irq,struct irq_desc * desc)2051 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2052 {
2053 	const char *devname = NULL;
2054 
2055 	desc->istate &= ~IRQS_NMI;
2056 
2057 	if (!WARN_ON(desc->action == NULL)) {
2058 		irq_pm_remove_action(desc, desc->action);
2059 		devname = desc->action->name;
2060 		unregister_handler_proc(irq, desc->action);
2061 
2062 		kfree(desc->action);
2063 		desc->action = NULL;
2064 	}
2065 
2066 	irq_settings_clr_disable_unlazy(desc);
2067 	irq_shutdown_and_deactivate(desc);
2068 
2069 	irq_release_resources(desc);
2070 
2071 	irq_chip_pm_put(&desc->irq_data);
2072 	module_put(desc->owner);
2073 
2074 	return devname;
2075 }
2076 
free_nmi(unsigned int irq,void * dev_id)2077 const void *free_nmi(unsigned int irq, void *dev_id)
2078 {
2079 	struct irq_desc *desc = irq_to_desc(irq);
2080 	unsigned long flags;
2081 	const void *devname;
2082 
2083 	if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
2084 		return NULL;
2085 
2086 	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2087 		return NULL;
2088 
2089 	/* NMI still enabled */
2090 	if (WARN_ON(desc->depth == 0))
2091 		disable_nmi_nosync(irq);
2092 
2093 	raw_spin_lock_irqsave(&desc->lock, flags);
2094 
2095 	irq_nmi_teardown(desc);
2096 	devname = __cleanup_nmi(irq, desc);
2097 
2098 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2099 
2100 	return devname;
2101 }
2102 
2103 /**
2104  *	request_threaded_irq - allocate an interrupt line
2105  *	@irq: Interrupt line to allocate
2106  *	@handler: Function to be called when the IRQ occurs.
2107  *		  Primary handler for threaded interrupts.
2108  *		  If handler is NULL and thread_fn != NULL
2109  *		  the default primary handler is installed.
2110  *	@thread_fn: Function called from the irq handler thread
2111  *		    If NULL, no irq thread is created
2112  *	@irqflags: Interrupt type flags
2113  *	@devname: An ascii name for the claiming device
2114  *	@dev_id: A cookie passed back to the handler function
2115  *
2116  *	This call allocates interrupt resources and enables the
2117  *	interrupt line and IRQ handling. From the point this
2118  *	call is made your handler function may be invoked. Since
2119  *	your handler function must clear any interrupt the board
2120  *	raises, you must take care both to initialise your hardware
2121  *	and to set up the interrupt handler in the right order.
2122  *
2123  *	If you want to set up a threaded irq handler for your device
2124  *	then you need to supply @handler and @thread_fn. @handler is
2125  *	still called in hard interrupt context and has to check
2126  *	whether the interrupt originates from the device. If yes it
2127  *	needs to disable the interrupt on the device and return
2128  *	IRQ_WAKE_THREAD which will wake up the handler thread and run
2129  *	@thread_fn. This split handler design is necessary to support
2130  *	shared interrupts.
2131  *
2132  *	Dev_id must be globally unique. Normally the address of the
2133  *	device data structure is used as the cookie. Since the handler
2134  *	receives this value it makes sense to use it.
2135  *
2136  *	If your interrupt is shared you must pass a non NULL dev_id
2137  *	as this is required when freeing the interrupt.
2138  *
2139  *	Flags:
2140  *
2141  *	IRQF_SHARED		Interrupt is shared
2142  *	IRQF_TRIGGER_*		Specify active edge(s) or level
2143  *	IRQF_ONESHOT		Run thread_fn with interrupt line masked
2144  */
request_threaded_irq(unsigned int irq,irq_handler_t handler,irq_handler_t thread_fn,unsigned long irqflags,const char * devname,void * dev_id)2145 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2146 			 irq_handler_t thread_fn, unsigned long irqflags,
2147 			 const char *devname, void *dev_id)
2148 {
2149 	struct irqaction *action;
2150 	struct irq_desc *desc;
2151 	int retval;
2152 
2153 	if (irq == IRQ_NOTCONNECTED)
2154 		return -ENOTCONN;
2155 
2156 	/*
2157 	 * Sanity-check: shared interrupts must pass in a real dev-ID,
2158 	 * otherwise we'll have trouble later trying to figure out
2159 	 * which interrupt is which (messes up the interrupt freeing
2160 	 * logic etc).
2161 	 *
2162 	 * Also shared interrupts do not go well with disabling auto enable.
2163 	 * The sharing interrupt might request it while it's still disabled
2164 	 * and then wait for interrupts forever.
2165 	 *
2166 	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2167 	 * it cannot be set along with IRQF_NO_SUSPEND.
2168 	 */
2169 	if (((irqflags & IRQF_SHARED) && !dev_id) ||
2170 	    ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
2171 	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2172 	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2173 		return -EINVAL;
2174 
2175 	desc = irq_to_desc(irq);
2176 	if (!desc)
2177 		return -EINVAL;
2178 
2179 	if (!irq_settings_can_request(desc) ||
2180 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2181 		return -EINVAL;
2182 
2183 	if (!handler) {
2184 		if (!thread_fn)
2185 			return -EINVAL;
2186 		handler = irq_default_primary_handler;
2187 	}
2188 
2189 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2190 	if (!action)
2191 		return -ENOMEM;
2192 
2193 	action->handler = handler;
2194 	action->thread_fn = thread_fn;
2195 	action->flags = irqflags;
2196 	action->name = devname;
2197 	action->dev_id = dev_id;
2198 
2199 	retval = irq_chip_pm_get(&desc->irq_data);
2200 	if (retval < 0) {
2201 		kfree(action);
2202 		return retval;
2203 	}
2204 
2205 	retval = __setup_irq(irq, desc, action);
2206 
2207 	if (retval) {
2208 		irq_chip_pm_put(&desc->irq_data);
2209 		kfree(action->secondary);
2210 		kfree(action);
2211 	}
2212 
2213 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
2214 	if (!retval && (irqflags & IRQF_SHARED)) {
2215 		/*
2216 		 * It's a shared IRQ -- the driver ought to be prepared for it
2217 		 * to happen immediately, so let's make sure....
2218 		 * We disable the irq to make sure that a 'real' IRQ doesn't
2219 		 * run in parallel with our fake.
2220 		 */
2221 		unsigned long flags;
2222 
2223 		disable_irq(irq);
2224 		local_irq_save(flags);
2225 
2226 		handler(irq, dev_id);
2227 
2228 		local_irq_restore(flags);
2229 		enable_irq(irq);
2230 	}
2231 #endif
2232 	return retval;
2233 }
2234 EXPORT_SYMBOL(request_threaded_irq);
2235 
2236 /**
2237  *	request_any_context_irq - allocate an interrupt line
2238  *	@irq: Interrupt line to allocate
2239  *	@handler: Function to be called when the IRQ occurs.
2240  *		  Threaded handler for threaded interrupts.
2241  *	@flags: Interrupt type flags
2242  *	@name: An ascii name for the claiming device
2243  *	@dev_id: A cookie passed back to the handler function
2244  *
2245  *	This call allocates interrupt resources and enables the
2246  *	interrupt line and IRQ handling. It selects either a
2247  *	hardirq or threaded handling method depending on the
2248  *	context.
2249  *
2250  *	On failure, it returns a negative value. On success,
2251  *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2252  */
request_any_context_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * name,void * dev_id)2253 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2254 			    unsigned long flags, const char *name, void *dev_id)
2255 {
2256 	struct irq_desc *desc;
2257 	int ret;
2258 
2259 	if (irq == IRQ_NOTCONNECTED)
2260 		return -ENOTCONN;
2261 
2262 	desc = irq_to_desc(irq);
2263 	if (!desc)
2264 		return -EINVAL;
2265 
2266 	if (irq_settings_is_nested_thread(desc)) {
2267 		ret = request_threaded_irq(irq, NULL, handler,
2268 					   flags, name, dev_id);
2269 		return !ret ? IRQC_IS_NESTED : ret;
2270 	}
2271 
2272 	ret = request_irq(irq, handler, flags, name, dev_id);
2273 	return !ret ? IRQC_IS_HARDIRQ : ret;
2274 }
2275 EXPORT_SYMBOL_GPL(request_any_context_irq);
2276 
2277 /**
2278  *	request_nmi - allocate an interrupt line for NMI delivery
2279  *	@irq: Interrupt line to allocate
2280  *	@handler: Function to be called when the IRQ occurs.
2281  *		  Threaded handler for threaded interrupts.
2282  *	@irqflags: Interrupt type flags
2283  *	@name: An ascii name for the claiming device
2284  *	@dev_id: A cookie passed back to the handler function
2285  *
2286  *	This call allocates interrupt resources and enables the
2287  *	interrupt line and IRQ handling. It sets up the IRQ line
2288  *	to be handled as an NMI.
2289  *
2290  *	An interrupt line delivering NMIs cannot be shared and IRQ handling
2291  *	cannot be threaded.
2292  *
2293  *	Interrupt lines requested for NMI delivering must produce per cpu
2294  *	interrupts and have auto enabling setting disabled.
2295  *
2296  *	Dev_id must be globally unique. Normally the address of the
2297  *	device data structure is used as the cookie. Since the handler
2298  *	receives this value it makes sense to use it.
2299  *
2300  *	If the interrupt line cannot be used to deliver NMIs, function
2301  *	will fail and return a negative value.
2302  */
request_nmi(unsigned int irq,irq_handler_t handler,unsigned long irqflags,const char * name,void * dev_id)2303 int request_nmi(unsigned int irq, irq_handler_t handler,
2304 		unsigned long irqflags, const char *name, void *dev_id)
2305 {
2306 	struct irqaction *action;
2307 	struct irq_desc *desc;
2308 	unsigned long flags;
2309 	int retval;
2310 
2311 	if (irq == IRQ_NOTCONNECTED)
2312 		return -ENOTCONN;
2313 
2314 	/* NMI cannot be shared, used for Polling */
2315 	if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2316 		return -EINVAL;
2317 
2318 	if (!(irqflags & IRQF_PERCPU))
2319 		return -EINVAL;
2320 
2321 	if (!handler)
2322 		return -EINVAL;
2323 
2324 	desc = irq_to_desc(irq);
2325 
2326 	if (!desc || (irq_settings_can_autoenable(desc) &&
2327 	    !(irqflags & IRQF_NO_AUTOEN)) ||
2328 	    !irq_settings_can_request(desc) ||
2329 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2330 	    !irq_supports_nmi(desc))
2331 		return -EINVAL;
2332 
2333 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2334 	if (!action)
2335 		return -ENOMEM;
2336 
2337 	action->handler = handler;
2338 	action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2339 	action->name = name;
2340 	action->dev_id = dev_id;
2341 
2342 	retval = irq_chip_pm_get(&desc->irq_data);
2343 	if (retval < 0)
2344 		goto err_out;
2345 
2346 	retval = __setup_irq(irq, desc, action);
2347 	if (retval)
2348 		goto err_irq_setup;
2349 
2350 	raw_spin_lock_irqsave(&desc->lock, flags);
2351 
2352 	/* Setup NMI state */
2353 	desc->istate |= IRQS_NMI;
2354 	retval = irq_nmi_setup(desc);
2355 	if (retval) {
2356 		__cleanup_nmi(irq, desc);
2357 		raw_spin_unlock_irqrestore(&desc->lock, flags);
2358 		return -EINVAL;
2359 	}
2360 
2361 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2362 
2363 	return 0;
2364 
2365 err_irq_setup:
2366 	irq_chip_pm_put(&desc->irq_data);
2367 err_out:
2368 	kfree(action);
2369 
2370 	return retval;
2371 }
2372 
enable_percpu_irq(unsigned int irq,unsigned int type)2373 void enable_percpu_irq(unsigned int irq, unsigned int type)
2374 {
2375 	unsigned int cpu = smp_processor_id();
2376 	unsigned long flags;
2377 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2378 
2379 	if (!desc)
2380 		return;
2381 
2382 	/*
2383 	 * If the trigger type is not specified by the caller, then
2384 	 * use the default for this interrupt.
2385 	 */
2386 	type &= IRQ_TYPE_SENSE_MASK;
2387 	if (type == IRQ_TYPE_NONE)
2388 		type = irqd_get_trigger_type(&desc->irq_data);
2389 
2390 	if (type != IRQ_TYPE_NONE) {
2391 		int ret;
2392 
2393 		ret = __irq_set_trigger(desc, type);
2394 
2395 		if (ret) {
2396 			WARN(1, "failed to set type for IRQ%d\n", irq);
2397 			goto out;
2398 		}
2399 	}
2400 
2401 	irq_percpu_enable(desc, cpu);
2402 out:
2403 	irq_put_desc_unlock(desc, flags);
2404 }
2405 EXPORT_SYMBOL_GPL(enable_percpu_irq);
2406 
enable_percpu_nmi(unsigned int irq,unsigned int type)2407 void enable_percpu_nmi(unsigned int irq, unsigned int type)
2408 {
2409 	enable_percpu_irq(irq, type);
2410 }
2411 
2412 /**
2413  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2414  * @irq:	Linux irq number to check for
2415  *
2416  * Must be called from a non migratable context. Returns the enable
2417  * state of a per cpu interrupt on the current cpu.
2418  */
irq_percpu_is_enabled(unsigned int irq)2419 bool irq_percpu_is_enabled(unsigned int irq)
2420 {
2421 	unsigned int cpu = smp_processor_id();
2422 	struct irq_desc *desc;
2423 	unsigned long flags;
2424 	bool is_enabled;
2425 
2426 	desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2427 	if (!desc)
2428 		return false;
2429 
2430 	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2431 	irq_put_desc_unlock(desc, flags);
2432 
2433 	return is_enabled;
2434 }
2435 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2436 
disable_percpu_irq(unsigned int irq)2437 void disable_percpu_irq(unsigned int irq)
2438 {
2439 	unsigned int cpu = smp_processor_id();
2440 	unsigned long flags;
2441 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2442 
2443 	if (!desc)
2444 		return;
2445 
2446 	irq_percpu_disable(desc, cpu);
2447 	irq_put_desc_unlock(desc, flags);
2448 }
2449 EXPORT_SYMBOL_GPL(disable_percpu_irq);
2450 
disable_percpu_nmi(unsigned int irq)2451 void disable_percpu_nmi(unsigned int irq)
2452 {
2453 	disable_percpu_irq(irq);
2454 }
2455 
2456 /*
2457  * Internal function to unregister a percpu irqaction.
2458  */
__free_percpu_irq(unsigned int irq,void __percpu * dev_id)2459 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2460 {
2461 	struct irq_desc *desc = irq_to_desc(irq);
2462 	struct irqaction *action;
2463 	unsigned long flags;
2464 
2465 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2466 
2467 	if (!desc)
2468 		return NULL;
2469 
2470 	raw_spin_lock_irqsave(&desc->lock, flags);
2471 
2472 	action = desc->action;
2473 	if (!action || action->percpu_dev_id != dev_id) {
2474 		WARN(1, "Trying to free already-free IRQ %d\n", irq);
2475 		goto bad;
2476 	}
2477 
2478 	if (!cpumask_empty(desc->percpu_enabled)) {
2479 		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2480 		     irq, cpumask_first(desc->percpu_enabled));
2481 		goto bad;
2482 	}
2483 
2484 	/* Found it - now remove it from the list of entries: */
2485 	desc->action = NULL;
2486 
2487 	desc->istate &= ~IRQS_NMI;
2488 
2489 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2490 
2491 	unregister_handler_proc(irq, action);
2492 
2493 	irq_chip_pm_put(&desc->irq_data);
2494 	module_put(desc->owner);
2495 	return action;
2496 
2497 bad:
2498 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2499 	return NULL;
2500 }
2501 
2502 /**
2503  *	remove_percpu_irq - free a per-cpu interrupt
2504  *	@irq: Interrupt line to free
2505  *	@act: irqaction for the interrupt
2506  *
2507  * Used to remove interrupts statically setup by the early boot process.
2508  */
remove_percpu_irq(unsigned int irq,struct irqaction * act)2509 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2510 {
2511 	struct irq_desc *desc = irq_to_desc(irq);
2512 
2513 	if (desc && irq_settings_is_per_cpu_devid(desc))
2514 	    __free_percpu_irq(irq, act->percpu_dev_id);
2515 }
2516 
2517 /**
2518  *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
2519  *	@irq: Interrupt line to free
2520  *	@dev_id: Device identity to free
2521  *
2522  *	Remove a percpu interrupt handler. The handler is removed, but
2523  *	the interrupt line is not disabled. This must be done on each
2524  *	CPU before calling this function. The function does not return
2525  *	until any executing interrupts for this IRQ have completed.
2526  *
2527  *	This function must not be called from interrupt context.
2528  */
free_percpu_irq(unsigned int irq,void __percpu * dev_id)2529 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2530 {
2531 	struct irq_desc *desc = irq_to_desc(irq);
2532 
2533 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2534 		return;
2535 
2536 	chip_bus_lock(desc);
2537 	kfree(__free_percpu_irq(irq, dev_id));
2538 	chip_bus_sync_unlock(desc);
2539 }
2540 EXPORT_SYMBOL_GPL(free_percpu_irq);
2541 
free_percpu_nmi(unsigned int irq,void __percpu * dev_id)2542 void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2543 {
2544 	struct irq_desc *desc = irq_to_desc(irq);
2545 
2546 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2547 		return;
2548 
2549 	if (WARN_ON(!(desc->istate & IRQS_NMI)))
2550 		return;
2551 
2552 	kfree(__free_percpu_irq(irq, dev_id));
2553 }
2554 
2555 /**
2556  *	setup_percpu_irq - setup a per-cpu interrupt
2557  *	@irq: Interrupt line to setup
2558  *	@act: irqaction for the interrupt
2559  *
2560  * Used to statically setup per-cpu interrupts in the early boot process.
2561  */
setup_percpu_irq(unsigned int irq,struct irqaction * act)2562 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2563 {
2564 	struct irq_desc *desc = irq_to_desc(irq);
2565 	int retval;
2566 
2567 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2568 		return -EINVAL;
2569 
2570 	retval = irq_chip_pm_get(&desc->irq_data);
2571 	if (retval < 0)
2572 		return retval;
2573 
2574 	retval = __setup_irq(irq, desc, act);
2575 
2576 	if (retval)
2577 		irq_chip_pm_put(&desc->irq_data);
2578 
2579 	return retval;
2580 }
2581 
2582 /**
2583  *	__request_percpu_irq - allocate a percpu interrupt line
2584  *	@irq: Interrupt line to allocate
2585  *	@handler: Function to be called when the IRQ occurs.
2586  *	@flags: Interrupt type flags (IRQF_TIMER only)
2587  *	@devname: An ascii name for the claiming device
2588  *	@dev_id: A percpu cookie passed back to the handler function
2589  *
2590  *	This call allocates interrupt resources and enables the
2591  *	interrupt on the local CPU. If the interrupt is supposed to be
2592  *	enabled on other CPUs, it has to be done on each CPU using
2593  *	enable_percpu_irq().
2594  *
2595  *	Dev_id must be globally unique. It is a per-cpu variable, and
2596  *	the handler gets called with the interrupted CPU's instance of
2597  *	that variable.
2598  */
__request_percpu_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * devname,void __percpu * dev_id)2599 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2600 			 unsigned long flags, const char *devname,
2601 			 void __percpu *dev_id)
2602 {
2603 	struct irqaction *action;
2604 	struct irq_desc *desc;
2605 	int retval;
2606 
2607 	if (!dev_id)
2608 		return -EINVAL;
2609 
2610 	desc = irq_to_desc(irq);
2611 	if (!desc || !irq_settings_can_request(desc) ||
2612 	    !irq_settings_is_per_cpu_devid(desc))
2613 		return -EINVAL;
2614 
2615 	if (flags && flags != IRQF_TIMER)
2616 		return -EINVAL;
2617 
2618 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2619 	if (!action)
2620 		return -ENOMEM;
2621 
2622 	action->handler = handler;
2623 	action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2624 	action->name = devname;
2625 	action->percpu_dev_id = dev_id;
2626 
2627 	retval = irq_chip_pm_get(&desc->irq_data);
2628 	if (retval < 0) {
2629 		kfree(action);
2630 		return retval;
2631 	}
2632 
2633 	retval = __setup_irq(irq, desc, action);
2634 
2635 	if (retval) {
2636 		irq_chip_pm_put(&desc->irq_data);
2637 		kfree(action);
2638 	}
2639 
2640 	return retval;
2641 }
2642 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2643 
2644 /**
2645  *	request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2646  *	@irq: Interrupt line to allocate
2647  *	@handler: Function to be called when the IRQ occurs.
2648  *	@name: An ascii name for the claiming device
2649  *	@dev_id: A percpu cookie passed back to the handler function
2650  *
2651  *	This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2652  *	have to be setup on each CPU by calling prepare_percpu_nmi() before
2653  *	being enabled on the same CPU by using enable_percpu_nmi().
2654  *
2655  *	Dev_id must be globally unique. It is a per-cpu variable, and
2656  *	the handler gets called with the interrupted CPU's instance of
2657  *	that variable.
2658  *
2659  *	Interrupt lines requested for NMI delivering should have auto enabling
2660  *	setting disabled.
2661  *
2662  *	If the interrupt line cannot be used to deliver NMIs, function
2663  *	will fail returning a negative value.
2664  */
request_percpu_nmi(unsigned int irq,irq_handler_t handler,const char * name,void __percpu * dev_id)2665 int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2666 		       const char *name, void __percpu *dev_id)
2667 {
2668 	struct irqaction *action;
2669 	struct irq_desc *desc;
2670 	unsigned long flags;
2671 	int retval;
2672 
2673 	if (!handler)
2674 		return -EINVAL;
2675 
2676 	desc = irq_to_desc(irq);
2677 
2678 	if (!desc || !irq_settings_can_request(desc) ||
2679 	    !irq_settings_is_per_cpu_devid(desc) ||
2680 	    irq_settings_can_autoenable(desc) ||
2681 	    !irq_supports_nmi(desc))
2682 		return -EINVAL;
2683 
2684 	/* The line cannot already be NMI */
2685 	if (desc->istate & IRQS_NMI)
2686 		return -EINVAL;
2687 
2688 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2689 	if (!action)
2690 		return -ENOMEM;
2691 
2692 	action->handler = handler;
2693 	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2694 		| IRQF_NOBALANCING;
2695 	action->name = name;
2696 	action->percpu_dev_id = dev_id;
2697 
2698 	retval = irq_chip_pm_get(&desc->irq_data);
2699 	if (retval < 0)
2700 		goto err_out;
2701 
2702 	retval = __setup_irq(irq, desc, action);
2703 	if (retval)
2704 		goto err_irq_setup;
2705 
2706 	raw_spin_lock_irqsave(&desc->lock, flags);
2707 	desc->istate |= IRQS_NMI;
2708 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2709 
2710 	return 0;
2711 
2712 err_irq_setup:
2713 	irq_chip_pm_put(&desc->irq_data);
2714 err_out:
2715 	kfree(action);
2716 
2717 	return retval;
2718 }
2719 
2720 /**
2721  *	prepare_percpu_nmi - performs CPU local setup for NMI delivery
2722  *	@irq: Interrupt line to prepare for NMI delivery
2723  *
2724  *	This call prepares an interrupt line to deliver NMI on the current CPU,
2725  *	before that interrupt line gets enabled with enable_percpu_nmi().
2726  *
2727  *	As a CPU local operation, this should be called from non-preemptible
2728  *	context.
2729  *
2730  *	If the interrupt line cannot be used to deliver NMIs, function
2731  *	will fail returning a negative value.
2732  */
prepare_percpu_nmi(unsigned int irq)2733 int prepare_percpu_nmi(unsigned int irq)
2734 {
2735 	unsigned long flags;
2736 	struct irq_desc *desc;
2737 	int ret = 0;
2738 
2739 	WARN_ON(preemptible());
2740 
2741 	desc = irq_get_desc_lock(irq, &flags,
2742 				 IRQ_GET_DESC_CHECK_PERCPU);
2743 	if (!desc)
2744 		return -EINVAL;
2745 
2746 	if (WARN(!(desc->istate & IRQS_NMI),
2747 		 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2748 		 irq)) {
2749 		ret = -EINVAL;
2750 		goto out;
2751 	}
2752 
2753 	ret = irq_nmi_setup(desc);
2754 	if (ret) {
2755 		pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2756 		goto out;
2757 	}
2758 
2759 out:
2760 	irq_put_desc_unlock(desc, flags);
2761 	return ret;
2762 }
2763 
2764 /**
2765  *	teardown_percpu_nmi - undoes NMI setup of IRQ line
2766  *	@irq: Interrupt line from which CPU local NMI configuration should be
2767  *	      removed
2768  *
2769  *	This call undoes the setup done by prepare_percpu_nmi().
2770  *
2771  *	IRQ line should not be enabled for the current CPU.
2772  *
2773  *	As a CPU local operation, this should be called from non-preemptible
2774  *	context.
2775  */
teardown_percpu_nmi(unsigned int irq)2776 void teardown_percpu_nmi(unsigned int irq)
2777 {
2778 	unsigned long flags;
2779 	struct irq_desc *desc;
2780 
2781 	WARN_ON(preemptible());
2782 
2783 	desc = irq_get_desc_lock(irq, &flags,
2784 				 IRQ_GET_DESC_CHECK_PERCPU);
2785 	if (!desc)
2786 		return;
2787 
2788 	if (WARN_ON(!(desc->istate & IRQS_NMI)))
2789 		goto out;
2790 
2791 	irq_nmi_teardown(desc);
2792 out:
2793 	irq_put_desc_unlock(desc, flags);
2794 }
2795 
__irq_get_irqchip_state(struct irq_data * data,enum irqchip_irq_state which,bool * state)2796 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2797 			    bool *state)
2798 {
2799 	struct irq_chip *chip;
2800 	int err = -EINVAL;
2801 
2802 	do {
2803 		chip = irq_data_get_irq_chip(data);
2804 		if (WARN_ON_ONCE(!chip))
2805 			return -ENODEV;
2806 		if (chip->irq_get_irqchip_state)
2807 			break;
2808 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2809 		data = data->parent_data;
2810 #else
2811 		data = NULL;
2812 #endif
2813 	} while (data);
2814 
2815 	if (data)
2816 		err = chip->irq_get_irqchip_state(data, which, state);
2817 	return err;
2818 }
2819 
2820 /**
2821  *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
2822  *	@irq: Interrupt line that is forwarded to a VM
2823  *	@which: One of IRQCHIP_STATE_* the caller wants to know about
2824  *	@state: a pointer to a boolean where the state is to be stored
2825  *
2826  *	This call snapshots the internal irqchip state of an
2827  *	interrupt, returning into @state the bit corresponding to
2828  *	stage @which
2829  *
2830  *	This function should be called with preemption disabled if the
2831  *	interrupt controller has per-cpu registers.
2832  */
irq_get_irqchip_state(unsigned int irq,enum irqchip_irq_state which,bool * state)2833 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2834 			  bool *state)
2835 {
2836 	struct irq_desc *desc;
2837 	struct irq_data *data;
2838 	unsigned long flags;
2839 	int err = -EINVAL;
2840 
2841 	desc = irq_get_desc_buslock(irq, &flags, 0);
2842 	if (!desc)
2843 		return err;
2844 
2845 	data = irq_desc_get_irq_data(desc);
2846 
2847 	err = __irq_get_irqchip_state(data, which, state);
2848 
2849 	irq_put_desc_busunlock(desc, flags);
2850 	return err;
2851 }
2852 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2853 
2854 /**
2855  *	irq_set_irqchip_state - set the state of a forwarded interrupt.
2856  *	@irq: Interrupt line that is forwarded to a VM
2857  *	@which: State to be restored (one of IRQCHIP_STATE_*)
2858  *	@val: Value corresponding to @which
2859  *
2860  *	This call sets the internal irqchip state of an interrupt,
2861  *	depending on the value of @which.
2862  *
2863  *	This function should be called with migration disabled if the
2864  *	interrupt controller has per-cpu registers.
2865  */
irq_set_irqchip_state(unsigned int irq,enum irqchip_irq_state which,bool val)2866 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2867 			  bool val)
2868 {
2869 	struct irq_desc *desc;
2870 	struct irq_data *data;
2871 	struct irq_chip *chip;
2872 	unsigned long flags;
2873 	int err = -EINVAL;
2874 
2875 	desc = irq_get_desc_buslock(irq, &flags, 0);
2876 	if (!desc)
2877 		return err;
2878 
2879 	data = irq_desc_get_irq_data(desc);
2880 
2881 	do {
2882 		chip = irq_data_get_irq_chip(data);
2883 		if (WARN_ON_ONCE(!chip)) {
2884 			err = -ENODEV;
2885 			goto out_unlock;
2886 		}
2887 		if (chip->irq_set_irqchip_state)
2888 			break;
2889 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2890 		data = data->parent_data;
2891 #else
2892 		data = NULL;
2893 #endif
2894 	} while (data);
2895 
2896 	if (data)
2897 		err = chip->irq_set_irqchip_state(data, which, val);
2898 
2899 out_unlock:
2900 	irq_put_desc_busunlock(desc, flags);
2901 	return err;
2902 }
2903 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2904 
2905 /**
2906  * irq_has_action - Check whether an interrupt is requested
2907  * @irq:	The linux irq number
2908  *
2909  * Returns: A snapshot of the current state
2910  */
irq_has_action(unsigned int irq)2911 bool irq_has_action(unsigned int irq)
2912 {
2913 	bool res;
2914 
2915 	rcu_read_lock();
2916 	res = irq_desc_has_action(irq_to_desc(irq));
2917 	rcu_read_unlock();
2918 	return res;
2919 }
2920 EXPORT_SYMBOL_GPL(irq_has_action);
2921 
2922 /**
2923  * irq_check_status_bit - Check whether bits in the irq descriptor status are set
2924  * @irq:	The linux irq number
2925  * @bitmask:	The bitmask to evaluate
2926  *
2927  * Returns: True if one of the bits in @bitmask is set
2928  */
irq_check_status_bit(unsigned int irq,unsigned int bitmask)2929 bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2930 {
2931 	struct irq_desc *desc;
2932 	bool res = false;
2933 
2934 	rcu_read_lock();
2935 	desc = irq_to_desc(irq);
2936 	if (desc)
2937 		res = !!(desc->status_use_accessors & bitmask);
2938 	rcu_read_unlock();
2939 	return res;
2940 }
2941 EXPORT_SYMBOL_GPL(irq_check_status_bit);
2942