1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar 4 * 5 * This file contains the /proc/irq/ handling code. 6 */ 7 8 #include <linux/irq.h> 9 #include <linux/gfp.h> 10 #include <linux/proc_fs.h> 11 #include <linux/seq_file.h> 12 #include <linux/interrupt.h> 13 #include <linux/kernel_stat.h> 14 #include <linux/mutex.h> 15 16 #include "internals.h" 17 18 /* 19 * Access rules: 20 * 21 * procfs protects read/write of /proc/irq/N/ files against a 22 * concurrent free of the interrupt descriptor. remove_proc_entry() 23 * immediately prevents new read/writes to happen and waits for 24 * already running read/write functions to complete. 25 * 26 * We remove the proc entries first and then delete the interrupt 27 * descriptor from the radix tree and free it. So it is guaranteed 28 * that irq_to_desc(N) is valid as long as the read/writes are 29 * permitted by procfs. 30 * 31 * The read from /proc/interrupts is a different problem because there 32 * is no protection. So the lookup and the access to irqdesc 33 * information must be protected by sparse_irq_lock. 34 */ 35 static struct proc_dir_entry *root_irq_dir; 36 37 #ifdef CONFIG_SMP 38 39 enum { 40 AFFINITY, 41 AFFINITY_LIST, 42 EFFECTIVE, 43 EFFECTIVE_LIST, 44 }; 45 46 static int show_irq_affinity(int type, struct seq_file *m) 47 { 48 struct irq_desc *desc = irq_to_desc((long)m->private); 49 const struct cpumask *mask; 50 51 switch (type) { 52 case AFFINITY: 53 case AFFINITY_LIST: 54 mask = desc->irq_common_data.affinity; 55 #ifdef CONFIG_GENERIC_PENDING_IRQ 56 if (irqd_is_setaffinity_pending(&desc->irq_data)) 57 mask = desc->pending_mask; 58 #endif 59 break; 60 case EFFECTIVE: 61 case EFFECTIVE_LIST: 62 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 63 mask = irq_data_get_effective_affinity_mask(&desc->irq_data); 64 break; 65 #endif 66 default: 67 return -EINVAL; 68 } 69 70 switch (type) { 71 case AFFINITY_LIST: 72 case EFFECTIVE_LIST: 73 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask)); 74 break; 75 case AFFINITY: 76 case EFFECTIVE: 77 seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); 78 break; 79 } 80 return 0; 81 } 82 83 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) 84 { 85 struct irq_desc *desc = irq_to_desc((long)m->private); 86 unsigned long flags; 87 cpumask_var_t mask; 88 89 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 90 return -ENOMEM; 91 92 raw_spin_lock_irqsave(&desc->lock, flags); 93 if (desc->affinity_hint) 94 cpumask_copy(mask, desc->affinity_hint); 95 raw_spin_unlock_irqrestore(&desc->lock, flags); 96 97 seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); 98 free_cpumask_var(mask); 99 100 return 0; 101 } 102 103 #ifndef is_affinity_mask_valid 104 #define is_affinity_mask_valid(val) 1 105 #endif 106 107 int no_irq_affinity; 108 static int irq_affinity_proc_show(struct seq_file *m, void *v) 109 { 110 return show_irq_affinity(AFFINITY, m); 111 } 112 113 static int irq_affinity_list_proc_show(struct seq_file *m, void *v) 114 { 115 return show_irq_affinity(AFFINITY_LIST, m); 116 } 117 118 119 static ssize_t write_irq_affinity(int type, struct file *file, 120 const char __user *buffer, size_t count, loff_t *pos) 121 { 122 unsigned int irq = (int)(long)PDE_DATA(file_inode(file)); 123 cpumask_var_t new_value; 124 int err; 125 126 if (!irq_can_set_affinity_usr(irq) || no_irq_affinity) 127 return -EIO; 128 129 if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 130 return -ENOMEM; 131 132 if (type) 133 err = cpumask_parselist_user(buffer, count, new_value); 134 else 135 err = cpumask_parse_user(buffer, count, new_value); 136 if (err) 137 goto free_cpumask; 138 139 if (!is_affinity_mask_valid(new_value)) { 140 err = -EINVAL; 141 goto free_cpumask; 142 } 143 144 /* 145 * Do not allow disabling IRQs completely - it's a too easy 146 * way to make the system unusable accidentally :-) At least 147 * one online CPU still has to be targeted. 148 */ 149 if (!cpumask_intersects(new_value, cpu_online_mask)) { 150 /* 151 * Special case for empty set - allow the architecture code 152 * to set default SMP affinity. 153 */ 154 err = irq_select_affinity_usr(irq) ? -EINVAL : count; 155 } else { 156 err = irq_set_affinity(irq, new_value); 157 if (!err) 158 err = count; 159 } 160 161 free_cpumask: 162 free_cpumask_var(new_value); 163 return err; 164 } 165 166 static ssize_t irq_affinity_proc_write(struct file *file, 167 const char __user *buffer, size_t count, loff_t *pos) 168 { 169 return write_irq_affinity(0, file, buffer, count, pos); 170 } 171 172 static ssize_t irq_affinity_list_proc_write(struct file *file, 173 const char __user *buffer, size_t count, loff_t *pos) 174 { 175 return write_irq_affinity(1, file, buffer, count, pos); 176 } 177 178 static int irq_affinity_proc_open(struct inode *inode, struct file *file) 179 { 180 return single_open(file, irq_affinity_proc_show, PDE_DATA(inode)); 181 } 182 183 static int irq_affinity_list_proc_open(struct inode *inode, struct file *file) 184 { 185 return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode)); 186 } 187 188 static const struct file_operations irq_affinity_proc_fops = { 189 .open = irq_affinity_proc_open, 190 .read = seq_read, 191 .llseek = seq_lseek, 192 .release = single_release, 193 .write = irq_affinity_proc_write, 194 }; 195 196 static const struct file_operations irq_affinity_list_proc_fops = { 197 .open = irq_affinity_list_proc_open, 198 .read = seq_read, 199 .llseek = seq_lseek, 200 .release = single_release, 201 .write = irq_affinity_list_proc_write, 202 }; 203 204 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 205 static int irq_effective_aff_proc_show(struct seq_file *m, void *v) 206 { 207 return show_irq_affinity(EFFECTIVE, m); 208 } 209 210 static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v) 211 { 212 return show_irq_affinity(EFFECTIVE_LIST, m); 213 } 214 #endif 215 216 static int default_affinity_show(struct seq_file *m, void *v) 217 { 218 seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity)); 219 return 0; 220 } 221 222 static ssize_t default_affinity_write(struct file *file, 223 const char __user *buffer, size_t count, loff_t *ppos) 224 { 225 cpumask_var_t new_value; 226 int err; 227 228 if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 229 return -ENOMEM; 230 231 err = cpumask_parse_user(buffer, count, new_value); 232 if (err) 233 goto out; 234 235 if (!is_affinity_mask_valid(new_value)) { 236 err = -EINVAL; 237 goto out; 238 } 239 240 /* 241 * Do not allow disabling IRQs completely - it's a too easy 242 * way to make the system unusable accidentally :-) At least 243 * one online CPU still has to be targeted. 244 */ 245 if (!cpumask_intersects(new_value, cpu_online_mask)) { 246 err = -EINVAL; 247 goto out; 248 } 249 250 cpumask_copy(irq_default_affinity, new_value); 251 err = count; 252 253 out: 254 free_cpumask_var(new_value); 255 return err; 256 } 257 258 static int default_affinity_open(struct inode *inode, struct file *file) 259 { 260 return single_open(file, default_affinity_show, PDE_DATA(inode)); 261 } 262 263 static const struct file_operations default_affinity_proc_fops = { 264 .open = default_affinity_open, 265 .read = seq_read, 266 .llseek = seq_lseek, 267 .release = single_release, 268 .write = default_affinity_write, 269 }; 270 271 static int irq_node_proc_show(struct seq_file *m, void *v) 272 { 273 struct irq_desc *desc = irq_to_desc((long) m->private); 274 275 seq_printf(m, "%d\n", irq_desc_get_node(desc)); 276 return 0; 277 } 278 #endif 279 280 static int irq_spurious_proc_show(struct seq_file *m, void *v) 281 { 282 struct irq_desc *desc = irq_to_desc((long) m->private); 283 284 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", 285 desc->irq_count, desc->irqs_unhandled, 286 jiffies_to_msecs(desc->last_unhandled)); 287 return 0; 288 } 289 290 #define MAX_NAMELEN 128 291 292 static int name_unique(unsigned int irq, struct irqaction *new_action) 293 { 294 struct irq_desc *desc = irq_to_desc(irq); 295 struct irqaction *action; 296 unsigned long flags; 297 int ret = 1; 298 299 raw_spin_lock_irqsave(&desc->lock, flags); 300 for_each_action_of_desc(desc, action) { 301 if ((action != new_action) && action->name && 302 !strcmp(new_action->name, action->name)) { 303 ret = 0; 304 break; 305 } 306 } 307 raw_spin_unlock_irqrestore(&desc->lock, flags); 308 return ret; 309 } 310 311 void register_handler_proc(unsigned int irq, struct irqaction *action) 312 { 313 char name [MAX_NAMELEN]; 314 struct irq_desc *desc = irq_to_desc(irq); 315 316 if (!desc->dir || action->dir || !action->name || 317 !name_unique(irq, action)) 318 return; 319 320 snprintf(name, MAX_NAMELEN, "%s", action->name); 321 322 /* create /proc/irq/1234/handler/ */ 323 action->dir = proc_mkdir(name, desc->dir); 324 } 325 326 #undef MAX_NAMELEN 327 328 #define MAX_NAMELEN 10 329 330 void register_irq_proc(unsigned int irq, struct irq_desc *desc) 331 { 332 static DEFINE_MUTEX(register_lock); 333 void __maybe_unused *irqp = (void *)(unsigned long) irq; 334 char name [MAX_NAMELEN]; 335 336 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) 337 return; 338 339 /* 340 * irq directories are registered only when a handler is 341 * added, not when the descriptor is created, so multiple 342 * tasks might try to register at the same time. 343 */ 344 mutex_lock(®ister_lock); 345 346 if (desc->dir) 347 goto out_unlock; 348 349 sprintf(name, "%d", irq); 350 351 /* create /proc/irq/1234 */ 352 desc->dir = proc_mkdir(name, root_irq_dir); 353 if (!desc->dir) 354 goto out_unlock; 355 356 #ifdef CONFIG_SMP 357 /* create /proc/irq/<irq>/smp_affinity */ 358 proc_create_data("smp_affinity", 0644, desc->dir, 359 &irq_affinity_proc_fops, irqp); 360 361 /* create /proc/irq/<irq>/affinity_hint */ 362 proc_create_single_data("affinity_hint", 0444, desc->dir, 363 irq_affinity_hint_proc_show, irqp); 364 365 /* create /proc/irq/<irq>/smp_affinity_list */ 366 proc_create_data("smp_affinity_list", 0644, desc->dir, 367 &irq_affinity_list_proc_fops, irqp); 368 369 proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show, 370 irqp); 371 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 372 proc_create_single_data("effective_affinity", 0444, desc->dir, 373 irq_effective_aff_proc_show, irqp); 374 proc_create_single_data("effective_affinity_list", 0444, desc->dir, 375 irq_effective_aff_list_proc_show, irqp); 376 # endif 377 #endif 378 proc_create_single_data("spurious", 0444, desc->dir, 379 irq_spurious_proc_show, (void *)(long)irq); 380 381 out_unlock: 382 mutex_unlock(®ister_lock); 383 } 384 385 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) 386 { 387 char name [MAX_NAMELEN]; 388 389 if (!root_irq_dir || !desc->dir) 390 return; 391 #ifdef CONFIG_SMP 392 remove_proc_entry("smp_affinity", desc->dir); 393 remove_proc_entry("affinity_hint", desc->dir); 394 remove_proc_entry("smp_affinity_list", desc->dir); 395 remove_proc_entry("node", desc->dir); 396 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 397 remove_proc_entry("effective_affinity", desc->dir); 398 remove_proc_entry("effective_affinity_list", desc->dir); 399 # endif 400 #endif 401 remove_proc_entry("spurious", desc->dir); 402 403 sprintf(name, "%u", irq); 404 remove_proc_entry(name, root_irq_dir); 405 } 406 407 #undef MAX_NAMELEN 408 409 void unregister_handler_proc(unsigned int irq, struct irqaction *action) 410 { 411 proc_remove(action->dir); 412 } 413 414 static void register_default_affinity_proc(void) 415 { 416 #ifdef CONFIG_SMP 417 proc_create("irq/default_smp_affinity", 0644, NULL, 418 &default_affinity_proc_fops); 419 #endif 420 } 421 422 void init_irq_proc(void) 423 { 424 unsigned int irq; 425 struct irq_desc *desc; 426 427 /* create /proc/irq */ 428 root_irq_dir = proc_mkdir("irq", NULL); 429 if (!root_irq_dir) 430 return; 431 432 register_default_affinity_proc(); 433 434 /* 435 * Create entries for all existing IRQs. 436 */ 437 for_each_irq_desc(irq, desc) 438 register_irq_proc(irq, desc); 439 } 440 441 #ifdef CONFIG_GENERIC_IRQ_SHOW 442 443 int __weak arch_show_interrupts(struct seq_file *p, int prec) 444 { 445 return 0; 446 } 447 448 #ifndef ACTUAL_NR_IRQS 449 # define ACTUAL_NR_IRQS nr_irqs 450 #endif 451 452 int show_interrupts(struct seq_file *p, void *v) 453 { 454 static int prec; 455 456 unsigned long flags, any_count = 0; 457 int i = *(loff_t *) v, j; 458 struct irqaction *action; 459 struct irq_desc *desc; 460 461 if (i > ACTUAL_NR_IRQS) 462 return 0; 463 464 if (i == ACTUAL_NR_IRQS) 465 return arch_show_interrupts(p, prec); 466 467 /* print header and calculate the width of the first column */ 468 if (i == 0) { 469 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) 470 j *= 10; 471 472 seq_printf(p, "%*s", prec + 8, ""); 473 for_each_online_cpu(j) 474 seq_printf(p, "CPU%-8d", j); 475 seq_putc(p, '\n'); 476 } 477 478 rcu_read_lock(); 479 desc = irq_to_desc(i); 480 if (!desc) 481 goto outsparse; 482 483 if (desc->kstat_irqs) 484 for_each_online_cpu(j) 485 any_count |= *per_cpu_ptr(desc->kstat_irqs, j); 486 487 if ((!desc->action || irq_desc_is_chained(desc)) && !any_count) 488 goto outsparse; 489 490 seq_printf(p, "%*d: ", prec, i); 491 for_each_online_cpu(j) 492 seq_printf(p, "%10u ", desc->kstat_irqs ? 493 *per_cpu_ptr(desc->kstat_irqs, j) : 0); 494 495 raw_spin_lock_irqsave(&desc->lock, flags); 496 if (desc->irq_data.chip) { 497 if (desc->irq_data.chip->irq_print_chip) 498 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); 499 else if (desc->irq_data.chip->name) 500 seq_printf(p, " %8s", desc->irq_data.chip->name); 501 else 502 seq_printf(p, " %8s", "-"); 503 } else { 504 seq_printf(p, " %8s", "None"); 505 } 506 if (desc->irq_data.domain) 507 seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq); 508 else 509 seq_printf(p, " %*s", prec, ""); 510 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL 511 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); 512 #endif 513 if (desc->name) 514 seq_printf(p, "-%-8s", desc->name); 515 516 action = desc->action; 517 if (action) { 518 seq_printf(p, " %s", action->name); 519 while ((action = action->next) != NULL) 520 seq_printf(p, ", %s", action->name); 521 } 522 523 seq_putc(p, '\n'); 524 raw_spin_unlock_irqrestore(&desc->lock, flags); 525 outsparse: 526 rcu_read_unlock(); 527 return 0; 528 } 529 #endif 530