1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar 4 * 5 * This file contains the /proc/irq/ handling code. 6 */ 7 8 #include <linux/irq.h> 9 #include <linux/gfp.h> 10 #include <linux/proc_fs.h> 11 #include <linux/seq_file.h> 12 #include <linux/interrupt.h> 13 #include <linux/kernel_stat.h> 14 #include <linux/mutex.h> 15 16 #include "internals.h" 17 18 /* 19 * Access rules: 20 * 21 * procfs protects read/write of /proc/irq/N/ files against a 22 * concurrent free of the interrupt descriptor. remove_proc_entry() 23 * immediately prevents new read/writes to happen and waits for 24 * already running read/write functions to complete. 25 * 26 * We remove the proc entries first and then delete the interrupt 27 * descriptor from the radix tree and free it. So it is guaranteed 28 * that irq_to_desc(N) is valid as long as the read/writes are 29 * permitted by procfs. 30 * 31 * The read from /proc/interrupts is a different problem because there 32 * is no protection. So the lookup and the access to irqdesc 33 * information must be protected by sparse_irq_lock. 34 */ 35 static struct proc_dir_entry *root_irq_dir; 36 37 #ifdef CONFIG_SMP 38 39 enum { 40 AFFINITY, 41 AFFINITY_LIST, 42 EFFECTIVE, 43 EFFECTIVE_LIST, 44 }; 45 46 static int show_irq_affinity(int type, struct seq_file *m) 47 { 48 struct irq_desc *desc = irq_to_desc((long)m->private); 49 const struct cpumask *mask; 50 51 switch (type) { 52 case AFFINITY: 53 case AFFINITY_LIST: 54 mask = desc->irq_common_data.affinity; 55 #ifdef CONFIG_GENERIC_PENDING_IRQ 56 if (irqd_is_setaffinity_pending(&desc->irq_data)) 57 mask = desc->pending_mask; 58 #endif 59 break; 60 case EFFECTIVE: 61 case EFFECTIVE_LIST: 62 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 63 mask = irq_data_get_effective_affinity_mask(&desc->irq_data); 64 break; 65 #endif 66 default: 67 return -EINVAL; 68 } 69 70 switch (type) { 71 case AFFINITY_LIST: 72 case EFFECTIVE_LIST: 73 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask)); 74 break; 75 case AFFINITY: 76 case EFFECTIVE: 77 seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); 78 break; 79 } 80 return 0; 81 } 82 83 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) 84 { 85 struct irq_desc *desc = irq_to_desc((long)m->private); 86 unsigned long flags; 87 cpumask_var_t mask; 88 89 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 90 return -ENOMEM; 91 92 raw_spin_lock_irqsave(&desc->lock, flags); 93 if (desc->affinity_hint) 94 cpumask_copy(mask, desc->affinity_hint); 95 raw_spin_unlock_irqrestore(&desc->lock, flags); 96 97 seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); 98 free_cpumask_var(mask); 99 100 return 0; 101 } 102 103 int no_irq_affinity; 104 static int irq_affinity_proc_show(struct seq_file *m, void *v) 105 { 106 return show_irq_affinity(AFFINITY, m); 107 } 108 109 static int irq_affinity_list_proc_show(struct seq_file *m, void *v) 110 { 111 return show_irq_affinity(AFFINITY_LIST, m); 112 } 113 114 115 static ssize_t write_irq_affinity(int type, struct file *file, 116 const char __user *buffer, size_t count, loff_t *pos) 117 { 118 unsigned int irq = (int)(long)PDE_DATA(file_inode(file)); 119 cpumask_var_t new_value; 120 int err; 121 122 if (!irq_can_set_affinity_usr(irq) || no_irq_affinity) 123 return -EIO; 124 125 if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 126 return -ENOMEM; 127 128 if (type) 129 err = cpumask_parselist_user(buffer, count, new_value); 130 else 131 err = cpumask_parse_user(buffer, count, new_value); 132 if (err) 133 goto free_cpumask; 134 135 /* 136 * Do not allow disabling IRQs completely - it's a too easy 137 * way to make the system unusable accidentally :-) At least 138 * one online CPU still has to be targeted. 139 */ 140 if (!cpumask_intersects(new_value, cpu_online_mask)) { 141 /* 142 * Special case for empty set - allow the architecture code 143 * to set default SMP affinity. 144 */ 145 err = irq_select_affinity_usr(irq) ? -EINVAL : count; 146 } else { 147 err = irq_set_affinity(irq, new_value); 148 if (!err) 149 err = count; 150 } 151 152 free_cpumask: 153 free_cpumask_var(new_value); 154 return err; 155 } 156 157 static ssize_t irq_affinity_proc_write(struct file *file, 158 const char __user *buffer, size_t count, loff_t *pos) 159 { 160 return write_irq_affinity(0, file, buffer, count, pos); 161 } 162 163 static ssize_t irq_affinity_list_proc_write(struct file *file, 164 const char __user *buffer, size_t count, loff_t *pos) 165 { 166 return write_irq_affinity(1, file, buffer, count, pos); 167 } 168 169 static int irq_affinity_proc_open(struct inode *inode, struct file *file) 170 { 171 return single_open(file, irq_affinity_proc_show, PDE_DATA(inode)); 172 } 173 174 static int irq_affinity_list_proc_open(struct inode *inode, struct file *file) 175 { 176 return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode)); 177 } 178 179 static const struct proc_ops irq_affinity_proc_ops = { 180 .proc_open = irq_affinity_proc_open, 181 .proc_read = seq_read, 182 .proc_lseek = seq_lseek, 183 .proc_release = single_release, 184 .proc_write = irq_affinity_proc_write, 185 }; 186 187 static const struct proc_ops irq_affinity_list_proc_ops = { 188 .proc_open = irq_affinity_list_proc_open, 189 .proc_read = seq_read, 190 .proc_lseek = seq_lseek, 191 .proc_release = single_release, 192 .proc_write = irq_affinity_list_proc_write, 193 }; 194 195 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 196 static int irq_effective_aff_proc_show(struct seq_file *m, void *v) 197 { 198 return show_irq_affinity(EFFECTIVE, m); 199 } 200 201 static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v) 202 { 203 return show_irq_affinity(EFFECTIVE_LIST, m); 204 } 205 #endif 206 207 static int default_affinity_show(struct seq_file *m, void *v) 208 { 209 seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity)); 210 return 0; 211 } 212 213 static ssize_t default_affinity_write(struct file *file, 214 const char __user *buffer, size_t count, loff_t *ppos) 215 { 216 cpumask_var_t new_value; 217 int err; 218 219 if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 220 return -ENOMEM; 221 222 err = cpumask_parse_user(buffer, count, new_value); 223 if (err) 224 goto out; 225 226 /* 227 * Do not allow disabling IRQs completely - it's a too easy 228 * way to make the system unusable accidentally :-) At least 229 * one online CPU still has to be targeted. 230 */ 231 if (!cpumask_intersects(new_value, cpu_online_mask)) { 232 err = -EINVAL; 233 goto out; 234 } 235 236 cpumask_copy(irq_default_affinity, new_value); 237 err = count; 238 239 out: 240 free_cpumask_var(new_value); 241 return err; 242 } 243 244 static int default_affinity_open(struct inode *inode, struct file *file) 245 { 246 return single_open(file, default_affinity_show, PDE_DATA(inode)); 247 } 248 249 static const struct proc_ops default_affinity_proc_ops = { 250 .proc_open = default_affinity_open, 251 .proc_read = seq_read, 252 .proc_lseek = seq_lseek, 253 .proc_release = single_release, 254 .proc_write = default_affinity_write, 255 }; 256 257 static int irq_node_proc_show(struct seq_file *m, void *v) 258 { 259 struct irq_desc *desc = irq_to_desc((long) m->private); 260 261 seq_printf(m, "%d\n", irq_desc_get_node(desc)); 262 return 0; 263 } 264 #endif 265 266 static int irq_spurious_proc_show(struct seq_file *m, void *v) 267 { 268 struct irq_desc *desc = irq_to_desc((long) m->private); 269 270 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", 271 desc->irq_count, desc->irqs_unhandled, 272 jiffies_to_msecs(desc->last_unhandled)); 273 return 0; 274 } 275 276 #define MAX_NAMELEN 128 277 278 static int name_unique(unsigned int irq, struct irqaction *new_action) 279 { 280 struct irq_desc *desc = irq_to_desc(irq); 281 struct irqaction *action; 282 unsigned long flags; 283 int ret = 1; 284 285 raw_spin_lock_irqsave(&desc->lock, flags); 286 for_each_action_of_desc(desc, action) { 287 if ((action != new_action) && action->name && 288 !strcmp(new_action->name, action->name)) { 289 ret = 0; 290 break; 291 } 292 } 293 raw_spin_unlock_irqrestore(&desc->lock, flags); 294 return ret; 295 } 296 297 void register_handler_proc(unsigned int irq, struct irqaction *action) 298 { 299 char name [MAX_NAMELEN]; 300 struct irq_desc *desc = irq_to_desc(irq); 301 302 if (!desc->dir || action->dir || !action->name || 303 !name_unique(irq, action)) 304 return; 305 306 snprintf(name, MAX_NAMELEN, "%s", action->name); 307 308 /* create /proc/irq/1234/handler/ */ 309 action->dir = proc_mkdir(name, desc->dir); 310 } 311 312 #undef MAX_NAMELEN 313 314 #define MAX_NAMELEN 10 315 316 void register_irq_proc(unsigned int irq, struct irq_desc *desc) 317 { 318 static DEFINE_MUTEX(register_lock); 319 void __maybe_unused *irqp = (void *)(unsigned long) irq; 320 char name [MAX_NAMELEN]; 321 322 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) 323 return; 324 325 /* 326 * irq directories are registered only when a handler is 327 * added, not when the descriptor is created, so multiple 328 * tasks might try to register at the same time. 329 */ 330 mutex_lock(®ister_lock); 331 332 if (desc->dir) 333 goto out_unlock; 334 335 sprintf(name, "%d", irq); 336 337 /* create /proc/irq/1234 */ 338 desc->dir = proc_mkdir(name, root_irq_dir); 339 if (!desc->dir) 340 goto out_unlock; 341 342 #ifdef CONFIG_SMP 343 /* create /proc/irq/<irq>/smp_affinity */ 344 proc_create_data("smp_affinity", 0644, desc->dir, 345 &irq_affinity_proc_ops, irqp); 346 347 /* create /proc/irq/<irq>/affinity_hint */ 348 proc_create_single_data("affinity_hint", 0444, desc->dir, 349 irq_affinity_hint_proc_show, irqp); 350 351 /* create /proc/irq/<irq>/smp_affinity_list */ 352 proc_create_data("smp_affinity_list", 0644, desc->dir, 353 &irq_affinity_list_proc_ops, irqp); 354 355 proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show, 356 irqp); 357 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 358 proc_create_single_data("effective_affinity", 0444, desc->dir, 359 irq_effective_aff_proc_show, irqp); 360 proc_create_single_data("effective_affinity_list", 0444, desc->dir, 361 irq_effective_aff_list_proc_show, irqp); 362 # endif 363 #endif 364 proc_create_single_data("spurious", 0444, desc->dir, 365 irq_spurious_proc_show, (void *)(long)irq); 366 367 out_unlock: 368 mutex_unlock(®ister_lock); 369 } 370 371 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) 372 { 373 char name [MAX_NAMELEN]; 374 375 if (!root_irq_dir || !desc->dir) 376 return; 377 #ifdef CONFIG_SMP 378 remove_proc_entry("smp_affinity", desc->dir); 379 remove_proc_entry("affinity_hint", desc->dir); 380 remove_proc_entry("smp_affinity_list", desc->dir); 381 remove_proc_entry("node", desc->dir); 382 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 383 remove_proc_entry("effective_affinity", desc->dir); 384 remove_proc_entry("effective_affinity_list", desc->dir); 385 # endif 386 #endif 387 remove_proc_entry("spurious", desc->dir); 388 389 sprintf(name, "%u", irq); 390 remove_proc_entry(name, root_irq_dir); 391 } 392 393 #undef MAX_NAMELEN 394 395 void unregister_handler_proc(unsigned int irq, struct irqaction *action) 396 { 397 proc_remove(action->dir); 398 } 399 400 static void register_default_affinity_proc(void) 401 { 402 #ifdef CONFIG_SMP 403 proc_create("irq/default_smp_affinity", 0644, NULL, 404 &default_affinity_proc_ops); 405 #endif 406 } 407 408 void init_irq_proc(void) 409 { 410 unsigned int irq; 411 struct irq_desc *desc; 412 413 /* create /proc/irq */ 414 root_irq_dir = proc_mkdir("irq", NULL); 415 if (!root_irq_dir) 416 return; 417 418 register_default_affinity_proc(); 419 420 /* 421 * Create entries for all existing IRQs. 422 */ 423 for_each_irq_desc(irq, desc) 424 register_irq_proc(irq, desc); 425 } 426 427 #ifdef CONFIG_GENERIC_IRQ_SHOW 428 429 int __weak arch_show_interrupts(struct seq_file *p, int prec) 430 { 431 return 0; 432 } 433 434 #ifndef ACTUAL_NR_IRQS 435 # define ACTUAL_NR_IRQS nr_irqs 436 #endif 437 438 int show_interrupts(struct seq_file *p, void *v) 439 { 440 static int prec; 441 442 unsigned long flags, any_count = 0; 443 int i = *(loff_t *) v, j; 444 struct irqaction *action; 445 struct irq_desc *desc; 446 447 if (i > ACTUAL_NR_IRQS) 448 return 0; 449 450 if (i == ACTUAL_NR_IRQS) 451 return arch_show_interrupts(p, prec); 452 453 /* print header and calculate the width of the first column */ 454 if (i == 0) { 455 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) 456 j *= 10; 457 458 seq_printf(p, "%*s", prec + 8, ""); 459 for_each_online_cpu(j) 460 seq_printf(p, "CPU%-8d", j); 461 seq_putc(p, '\n'); 462 } 463 464 rcu_read_lock(); 465 desc = irq_to_desc(i); 466 if (!desc) 467 goto outsparse; 468 469 if (desc->kstat_irqs) 470 for_each_online_cpu(j) 471 any_count |= *per_cpu_ptr(desc->kstat_irqs, j); 472 473 if ((!desc->action || irq_desc_is_chained(desc)) && !any_count) 474 goto outsparse; 475 476 seq_printf(p, "%*d: ", prec, i); 477 for_each_online_cpu(j) 478 seq_printf(p, "%10u ", desc->kstat_irqs ? 479 *per_cpu_ptr(desc->kstat_irqs, j) : 0); 480 481 raw_spin_lock_irqsave(&desc->lock, flags); 482 if (desc->irq_data.chip) { 483 if (desc->irq_data.chip->irq_print_chip) 484 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); 485 else if (desc->irq_data.chip->name) 486 seq_printf(p, " %8s", desc->irq_data.chip->name); 487 else 488 seq_printf(p, " %8s", "-"); 489 } else { 490 seq_printf(p, " %8s", "None"); 491 } 492 if (desc->irq_data.domain) 493 seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq); 494 else 495 seq_printf(p, " %*s", prec, ""); 496 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL 497 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); 498 #endif 499 if (desc->name) 500 seq_printf(p, "-%-8s", desc->name); 501 502 action = desc->action; 503 if (action) { 504 seq_printf(p, " %s", action->name); 505 while ((action = action->next) != NULL) 506 seq_printf(p, ", %s", action->name); 507 } 508 509 seq_putc(p, '\n'); 510 raw_spin_unlock_irqrestore(&desc->lock, flags); 511 outsparse: 512 rcu_read_unlock(); 513 return 0; 514 } 515 #endif 516