xref: /openbmc/linux/kernel/irq/proc.c (revision e2c75e76)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/kernel/irq/proc.c
4  *
5  * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
6  *
7  * This file contains the /proc/irq/ handling code.
8  */
9 
10 #include <linux/irq.h>
11 #include <linux/gfp.h>
12 #include <linux/proc_fs.h>
13 #include <linux/seq_file.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/mutex.h>
17 
18 #include "internals.h"
19 
20 /*
21  * Access rules:
22  *
23  * procfs protects read/write of /proc/irq/N/ files against a
24  * concurrent free of the interrupt descriptor. remove_proc_entry()
25  * immediately prevents new read/writes to happen and waits for
26  * already running read/write functions to complete.
27  *
28  * We remove the proc entries first and then delete the interrupt
29  * descriptor from the radix tree and free it. So it is guaranteed
30  * that irq_to_desc(N) is valid as long as the read/writes are
31  * permitted by procfs.
32  *
33  * The read from /proc/interrupts is a different problem because there
34  * is no protection. So the lookup and the access to irqdesc
35  * information must be protected by sparse_irq_lock.
36  */
37 static struct proc_dir_entry *root_irq_dir;
38 
39 #ifdef CONFIG_SMP
40 
41 enum {
42 	AFFINITY,
43 	AFFINITY_LIST,
44 	EFFECTIVE,
45 	EFFECTIVE_LIST,
46 };
47 
48 static int show_irq_affinity(int type, struct seq_file *m)
49 {
50 	struct irq_desc *desc = irq_to_desc((long)m->private);
51 	const struct cpumask *mask;
52 
53 	switch (type) {
54 	case AFFINITY:
55 	case AFFINITY_LIST:
56 		mask = desc->irq_common_data.affinity;
57 #ifdef CONFIG_GENERIC_PENDING_IRQ
58 		if (irqd_is_setaffinity_pending(&desc->irq_data))
59 			mask = desc->pending_mask;
60 #endif
61 		break;
62 	case EFFECTIVE:
63 	case EFFECTIVE_LIST:
64 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
65 		mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
66 		break;
67 #endif
68 	default:
69 		return -EINVAL;
70 	}
71 
72 	switch (type) {
73 	case AFFINITY_LIST:
74 	case EFFECTIVE_LIST:
75 		seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
76 		break;
77 	case AFFINITY:
78 	case EFFECTIVE:
79 		seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
80 		break;
81 	}
82 	return 0;
83 }
84 
85 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
86 {
87 	struct irq_desc *desc = irq_to_desc((long)m->private);
88 	unsigned long flags;
89 	cpumask_var_t mask;
90 
91 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
92 		return -ENOMEM;
93 
94 	raw_spin_lock_irqsave(&desc->lock, flags);
95 	if (desc->affinity_hint)
96 		cpumask_copy(mask, desc->affinity_hint);
97 	raw_spin_unlock_irqrestore(&desc->lock, flags);
98 
99 	seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
100 	free_cpumask_var(mask);
101 
102 	return 0;
103 }
104 
105 #ifndef is_affinity_mask_valid
106 #define is_affinity_mask_valid(val) 1
107 #endif
108 
109 int no_irq_affinity;
110 static int irq_affinity_proc_show(struct seq_file *m, void *v)
111 {
112 	return show_irq_affinity(AFFINITY, m);
113 }
114 
115 static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
116 {
117 	return show_irq_affinity(AFFINITY_LIST, m);
118 }
119 
120 
121 static ssize_t write_irq_affinity(int type, struct file *file,
122 		const char __user *buffer, size_t count, loff_t *pos)
123 {
124 	unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
125 	cpumask_var_t new_value;
126 	int err;
127 
128 	if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
129 		return -EIO;
130 
131 	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
132 		return -ENOMEM;
133 
134 	if (type)
135 		err = cpumask_parselist_user(buffer, count, new_value);
136 	else
137 		err = cpumask_parse_user(buffer, count, new_value);
138 	if (err)
139 		goto free_cpumask;
140 
141 	if (!is_affinity_mask_valid(new_value)) {
142 		err = -EINVAL;
143 		goto free_cpumask;
144 	}
145 
146 	/*
147 	 * Do not allow disabling IRQs completely - it's a too easy
148 	 * way to make the system unusable accidentally :-) At least
149 	 * one online CPU still has to be targeted.
150 	 */
151 	if (!cpumask_intersects(new_value, cpu_online_mask)) {
152 		/*
153 		 * Special case for empty set - allow the architecture code
154 		 * to set default SMP affinity.
155 		 */
156 		err = irq_select_affinity_usr(irq) ? -EINVAL : count;
157 	} else {
158 		err = irq_set_affinity(irq, new_value);
159 		if (!err)
160 			err = count;
161 	}
162 
163 free_cpumask:
164 	free_cpumask_var(new_value);
165 	return err;
166 }
167 
168 static ssize_t irq_affinity_proc_write(struct file *file,
169 		const char __user *buffer, size_t count, loff_t *pos)
170 {
171 	return write_irq_affinity(0, file, buffer, count, pos);
172 }
173 
174 static ssize_t irq_affinity_list_proc_write(struct file *file,
175 		const char __user *buffer, size_t count, loff_t *pos)
176 {
177 	return write_irq_affinity(1, file, buffer, count, pos);
178 }
179 
180 static int irq_affinity_proc_open(struct inode *inode, struct file *file)
181 {
182 	return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
183 }
184 
185 static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
186 {
187 	return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
188 }
189 
190 static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
191 {
192 	return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode));
193 }
194 
195 static const struct file_operations irq_affinity_proc_fops = {
196 	.open		= irq_affinity_proc_open,
197 	.read		= seq_read,
198 	.llseek		= seq_lseek,
199 	.release	= single_release,
200 	.write		= irq_affinity_proc_write,
201 };
202 
203 static const struct file_operations irq_affinity_hint_proc_fops = {
204 	.open		= irq_affinity_hint_proc_open,
205 	.read		= seq_read,
206 	.llseek		= seq_lseek,
207 	.release	= single_release,
208 };
209 
210 static const struct file_operations irq_affinity_list_proc_fops = {
211 	.open		= irq_affinity_list_proc_open,
212 	.read		= seq_read,
213 	.llseek		= seq_lseek,
214 	.release	= single_release,
215 	.write		= irq_affinity_list_proc_write,
216 };
217 
218 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
219 static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
220 {
221 	return show_irq_affinity(EFFECTIVE, m);
222 }
223 
224 static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
225 {
226 	return show_irq_affinity(EFFECTIVE_LIST, m);
227 }
228 
229 static int irq_effective_aff_proc_open(struct inode *inode, struct file *file)
230 {
231 	return single_open(file, irq_effective_aff_proc_show, PDE_DATA(inode));
232 }
233 
234 static int irq_effective_aff_list_proc_open(struct inode *inode,
235 					    struct file *file)
236 {
237 	return single_open(file, irq_effective_aff_list_proc_show,
238 			   PDE_DATA(inode));
239 }
240 
241 static const struct file_operations irq_effective_aff_proc_fops = {
242 	.open		= irq_effective_aff_proc_open,
243 	.read		= seq_read,
244 	.llseek		= seq_lseek,
245 	.release	= single_release,
246 };
247 
248 static const struct file_operations irq_effective_aff_list_proc_fops = {
249 	.open		= irq_effective_aff_list_proc_open,
250 	.read		= seq_read,
251 	.llseek		= seq_lseek,
252 	.release	= single_release,
253 };
254 #endif
255 
256 static int default_affinity_show(struct seq_file *m, void *v)
257 {
258 	seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
259 	return 0;
260 }
261 
262 static ssize_t default_affinity_write(struct file *file,
263 		const char __user *buffer, size_t count, loff_t *ppos)
264 {
265 	cpumask_var_t new_value;
266 	int err;
267 
268 	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
269 		return -ENOMEM;
270 
271 	err = cpumask_parse_user(buffer, count, new_value);
272 	if (err)
273 		goto out;
274 
275 	if (!is_affinity_mask_valid(new_value)) {
276 		err = -EINVAL;
277 		goto out;
278 	}
279 
280 	/*
281 	 * Do not allow disabling IRQs completely - it's a too easy
282 	 * way to make the system unusable accidentally :-) At least
283 	 * one online CPU still has to be targeted.
284 	 */
285 	if (!cpumask_intersects(new_value, cpu_online_mask)) {
286 		err = -EINVAL;
287 		goto out;
288 	}
289 
290 	cpumask_copy(irq_default_affinity, new_value);
291 	err = count;
292 
293 out:
294 	free_cpumask_var(new_value);
295 	return err;
296 }
297 
298 static int default_affinity_open(struct inode *inode, struct file *file)
299 {
300 	return single_open(file, default_affinity_show, PDE_DATA(inode));
301 }
302 
303 static const struct file_operations default_affinity_proc_fops = {
304 	.open		= default_affinity_open,
305 	.read		= seq_read,
306 	.llseek		= seq_lseek,
307 	.release	= single_release,
308 	.write		= default_affinity_write,
309 };
310 
311 static int irq_node_proc_show(struct seq_file *m, void *v)
312 {
313 	struct irq_desc *desc = irq_to_desc((long) m->private);
314 
315 	seq_printf(m, "%d\n", irq_desc_get_node(desc));
316 	return 0;
317 }
318 
319 static int irq_node_proc_open(struct inode *inode, struct file *file)
320 {
321 	return single_open(file, irq_node_proc_show, PDE_DATA(inode));
322 }
323 
324 static const struct file_operations irq_node_proc_fops = {
325 	.open		= irq_node_proc_open,
326 	.read		= seq_read,
327 	.llseek		= seq_lseek,
328 	.release	= single_release,
329 };
330 #endif
331 
332 static int irq_spurious_proc_show(struct seq_file *m, void *v)
333 {
334 	struct irq_desc *desc = irq_to_desc((long) m->private);
335 
336 	seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
337 		   desc->irq_count, desc->irqs_unhandled,
338 		   jiffies_to_msecs(desc->last_unhandled));
339 	return 0;
340 }
341 
342 static int irq_spurious_proc_open(struct inode *inode, struct file *file)
343 {
344 	return single_open(file, irq_spurious_proc_show, PDE_DATA(inode));
345 }
346 
347 static const struct file_operations irq_spurious_proc_fops = {
348 	.open		= irq_spurious_proc_open,
349 	.read		= seq_read,
350 	.llseek		= seq_lseek,
351 	.release	= single_release,
352 };
353 
354 #define MAX_NAMELEN 128
355 
356 static int name_unique(unsigned int irq, struct irqaction *new_action)
357 {
358 	struct irq_desc *desc = irq_to_desc(irq);
359 	struct irqaction *action;
360 	unsigned long flags;
361 	int ret = 1;
362 
363 	raw_spin_lock_irqsave(&desc->lock, flags);
364 	for_each_action_of_desc(desc, action) {
365 		if ((action != new_action) && action->name &&
366 				!strcmp(new_action->name, action->name)) {
367 			ret = 0;
368 			break;
369 		}
370 	}
371 	raw_spin_unlock_irqrestore(&desc->lock, flags);
372 	return ret;
373 }
374 
375 void register_handler_proc(unsigned int irq, struct irqaction *action)
376 {
377 	char name [MAX_NAMELEN];
378 	struct irq_desc *desc = irq_to_desc(irq);
379 
380 	if (!desc->dir || action->dir || !action->name ||
381 					!name_unique(irq, action))
382 		return;
383 
384 	snprintf(name, MAX_NAMELEN, "%s", action->name);
385 
386 	/* create /proc/irq/1234/handler/ */
387 	action->dir = proc_mkdir(name, desc->dir);
388 }
389 
390 #undef MAX_NAMELEN
391 
392 #define MAX_NAMELEN 10
393 
394 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
395 {
396 	static DEFINE_MUTEX(register_lock);
397 	void __maybe_unused *irqp = (void *)(unsigned long) irq;
398 	char name [MAX_NAMELEN];
399 
400 	if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
401 		return;
402 
403 	/*
404 	 * irq directories are registered only when a handler is
405 	 * added, not when the descriptor is created, so multiple
406 	 * tasks might try to register at the same time.
407 	 */
408 	mutex_lock(&register_lock);
409 
410 	if (desc->dir)
411 		goto out_unlock;
412 
413 	sprintf(name, "%d", irq);
414 
415 	/* create /proc/irq/1234 */
416 	desc->dir = proc_mkdir(name, root_irq_dir);
417 	if (!desc->dir)
418 		goto out_unlock;
419 
420 #ifdef CONFIG_SMP
421 	/* create /proc/irq/<irq>/smp_affinity */
422 	proc_create_data("smp_affinity", 0644, desc->dir,
423 			 &irq_affinity_proc_fops, irqp);
424 
425 	/* create /proc/irq/<irq>/affinity_hint */
426 	proc_create_data("affinity_hint", 0444, desc->dir,
427 			 &irq_affinity_hint_proc_fops, irqp);
428 
429 	/* create /proc/irq/<irq>/smp_affinity_list */
430 	proc_create_data("smp_affinity_list", 0644, desc->dir,
431 			 &irq_affinity_list_proc_fops, irqp);
432 
433 	proc_create_data("node", 0444, desc->dir,
434 			 &irq_node_proc_fops, irqp);
435 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
436 	proc_create_data("effective_affinity", 0444, desc->dir,
437 			 &irq_effective_aff_proc_fops, irqp);
438 	proc_create_data("effective_affinity_list", 0444, desc->dir,
439 			 &irq_effective_aff_list_proc_fops, irqp);
440 # endif
441 #endif
442 	proc_create_data("spurious", 0444, desc->dir,
443 			 &irq_spurious_proc_fops, (void *)(long)irq);
444 
445 out_unlock:
446 	mutex_unlock(&register_lock);
447 }
448 
449 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
450 {
451 	char name [MAX_NAMELEN];
452 
453 	if (!root_irq_dir || !desc->dir)
454 		return;
455 #ifdef CONFIG_SMP
456 	remove_proc_entry("smp_affinity", desc->dir);
457 	remove_proc_entry("affinity_hint", desc->dir);
458 	remove_proc_entry("smp_affinity_list", desc->dir);
459 	remove_proc_entry("node", desc->dir);
460 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
461 	remove_proc_entry("effective_affinity", desc->dir);
462 	remove_proc_entry("effective_affinity_list", desc->dir);
463 # endif
464 #endif
465 	remove_proc_entry("spurious", desc->dir);
466 
467 	sprintf(name, "%u", irq);
468 	remove_proc_entry(name, root_irq_dir);
469 }
470 
471 #undef MAX_NAMELEN
472 
473 void unregister_handler_proc(unsigned int irq, struct irqaction *action)
474 {
475 	proc_remove(action->dir);
476 }
477 
478 static void register_default_affinity_proc(void)
479 {
480 #ifdef CONFIG_SMP
481 	proc_create("irq/default_smp_affinity", 0644, NULL,
482 		    &default_affinity_proc_fops);
483 #endif
484 }
485 
486 void init_irq_proc(void)
487 {
488 	unsigned int irq;
489 	struct irq_desc *desc;
490 
491 	/* create /proc/irq */
492 	root_irq_dir = proc_mkdir("irq", NULL);
493 	if (!root_irq_dir)
494 		return;
495 
496 	register_default_affinity_proc();
497 
498 	/*
499 	 * Create entries for all existing IRQs.
500 	 */
501 	for_each_irq_desc(irq, desc)
502 		register_irq_proc(irq, desc);
503 }
504 
505 #ifdef CONFIG_GENERIC_IRQ_SHOW
506 
507 int __weak arch_show_interrupts(struct seq_file *p, int prec)
508 {
509 	return 0;
510 }
511 
512 #ifndef ACTUAL_NR_IRQS
513 # define ACTUAL_NR_IRQS nr_irqs
514 #endif
515 
516 int show_interrupts(struct seq_file *p, void *v)
517 {
518 	static int prec;
519 
520 	unsigned long flags, any_count = 0;
521 	int i = *(loff_t *) v, j;
522 	struct irqaction *action;
523 	struct irq_desc *desc;
524 
525 	if (i > ACTUAL_NR_IRQS)
526 		return 0;
527 
528 	if (i == ACTUAL_NR_IRQS)
529 		return arch_show_interrupts(p, prec);
530 
531 	/* print header and calculate the width of the first column */
532 	if (i == 0) {
533 		for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
534 			j *= 10;
535 
536 		seq_printf(p, "%*s", prec + 8, "");
537 		for_each_online_cpu(j)
538 			seq_printf(p, "CPU%-8d", j);
539 		seq_putc(p, '\n');
540 	}
541 
542 	irq_lock_sparse();
543 	desc = irq_to_desc(i);
544 	if (!desc)
545 		goto outsparse;
546 
547 	raw_spin_lock_irqsave(&desc->lock, flags);
548 	for_each_online_cpu(j)
549 		any_count |= kstat_irqs_cpu(i, j);
550 	action = desc->action;
551 	if ((!action || irq_desc_is_chained(desc)) && !any_count)
552 		goto out;
553 
554 	seq_printf(p, "%*d: ", prec, i);
555 	for_each_online_cpu(j)
556 		seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
557 
558 	if (desc->irq_data.chip) {
559 		if (desc->irq_data.chip->irq_print_chip)
560 			desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
561 		else if (desc->irq_data.chip->name)
562 			seq_printf(p, " %8s", desc->irq_data.chip->name);
563 		else
564 			seq_printf(p, " %8s", "-");
565 	} else {
566 		seq_printf(p, " %8s", "None");
567 	}
568 	if (desc->irq_data.domain)
569 		seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
570 	else
571 		seq_printf(p, " %*s", prec, "");
572 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
573 	seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
574 #endif
575 	if (desc->name)
576 		seq_printf(p, "-%-8s", desc->name);
577 
578 	if (action) {
579 		seq_printf(p, "  %s", action->name);
580 		while ((action = action->next) != NULL)
581 			seq_printf(p, ", %s", action->name);
582 	}
583 
584 	seq_putc(p, '\n');
585 out:
586 	raw_spin_unlock_irqrestore(&desc->lock, flags);
587 outsparse:
588 	irq_unlock_sparse();
589 	return 0;
590 }
591 #endif
592