irq.c (9d4d8572a539ef807e21c196f145aa365fd52f0e) irq.c (c8177aba37cac6b6dd0e5511fde9fc2d9e7f2f38)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017 - Cambridge Greys Ltd
4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
8 */

--- 6 unchanged lines hidden (view full) ---

15#include <linux/sched.h>
16#include <linux/seq_file.h>
17#include <linux/slab.h>
18#include <as-layout.h>
19#include <kern_util.h>
20#include <os.h>
21#include <irq_user.h>
22#include <irq_kern.h>
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017 - Cambridge Greys Ltd
4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
8 */

--- 6 unchanged lines hidden (view full) ---

15#include <linux/sched.h>
16#include <linux/seq_file.h>
17#include <linux/slab.h>
18#include <as-layout.h>
19#include <kern_util.h>
20#include <os.h>
21#include <irq_user.h>
22#include <irq_kern.h>
23#include <as-layout.h>
23#include <linux/time-internal.h>
24
25
26extern void free_irqs(void);
27
28/* When epoll triggers we do not know why it did so
29 * we can also have different IRQs for read and write.
30 * This is why we keep a small irq_reg array for each fd -
31 * one entry per IRQ type
32 */
33struct irq_reg {
34 void *id;
35 int irq;
36 /* it's cheaper to store this than to query it */
37 int events;
38 bool active;
39 bool pending;
40 bool wakeup;
24
25
26extern void free_irqs(void);
27
28/* When epoll triggers we do not know why it did so
29 * we can also have different IRQs for read and write.
30 * This is why we keep a small irq_reg array for each fd -
31 * one entry per IRQ type
32 */
33struct irq_reg {
34 void *id;
35 int irq;
36 /* it's cheaper to store this than to query it */
37 int events;
38 bool active;
39 bool pending;
40 bool wakeup;
41#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
42 bool pending_on_resume;
43 void (*timetravel_handler)(int, int, void *,
44 struct time_travel_event *);
45 struct time_travel_event event;
46#endif
41};
42
43struct irq_entry {
44 struct list_head list;
45 int fd;
46 struct irq_reg reg[NUM_IRQ_TYPES];
47 bool suspended;
48 bool sigio_workaround;
49};
50
51static DEFINE_SPINLOCK(irq_lock);
52static LIST_HEAD(active_fds);
53static DECLARE_BITMAP(irqs_allocated, NR_IRQS);
47};
48
49struct irq_entry {
50 struct list_head list;
51 int fd;
52 struct irq_reg reg[NUM_IRQ_TYPES];
53 bool suspended;
54 bool sigio_workaround;
55};
56
57static DEFINE_SPINLOCK(irq_lock);
58static LIST_HEAD(active_fds);
59static DECLARE_BITMAP(irqs_allocated, NR_IRQS);
60static bool irqs_suspended;
54
55static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
56{
57/*
58 * irq->active guards against reentry
59 * irq->pending accumulates pending requests
60 * if pending is raised the irq_handler is re-run
61 * until pending is cleared

--- 7 unchanged lines hidden (view full) ---

69 } while (irq->pending);
70
71 irq->active = true;
72 } else {
73 irq->pending = true;
74 }
75}
76
61
62static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
63{
64/*
65 * irq->active guards against reentry
66 * irq->pending accumulates pending requests
67 * if pending is raised the irq_handler is re-run
68 * until pending is cleared

--- 7 unchanged lines hidden (view full) ---

76 } while (irq->pending);
77
78 irq->active = true;
79 } else {
80 irq->pending = true;
81 }
82}
83
77void sigio_handler_suspend(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
84#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
85static void irq_event_handler(struct time_travel_event *ev)
78{
86{
79 /* nothing */
87 struct irq_reg *reg = container_of(ev, struct irq_reg, event);
88
89 /* do nothing if suspended - just to cause a wakeup */
90 if (irqs_suspended)
91 return;
92
93 generic_handle_irq(reg->irq);
80}
81
94}
95
96static bool irq_do_timetravel_handler(struct irq_entry *entry,
97 enum um_irq_type t)
98{
99 struct irq_reg *reg = &entry->reg[t];
100
101 if (!reg->timetravel_handler)
102 return false;
103
104 /* prevent nesting - we'll get it again later when we SIGIO ourselves */
105 if (reg->pending_on_resume)
106 return true;
107
108 reg->timetravel_handler(reg->irq, entry->fd, reg->id, &reg->event);
109
110 if (!reg->event.pending)
111 return false;
112
113 if (irqs_suspended)
114 reg->pending_on_resume = true;
115 return true;
116}
117#else
118static bool irq_do_timetravel_handler(struct irq_entry *entry,
119 enum um_irq_type t)
120{
121 return false;
122}
123#endif
124
125static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
126 struct uml_pt_regs *regs)
127{
128 struct irq_reg *reg = &entry->reg[t];
129
130 if (!reg->events)
131 return;
132
133 if (os_epoll_triggered(idx, reg->events) <= 0)
134 return;
135
136 if (irq_do_timetravel_handler(entry, t))
137 return;
138
139 if (irqs_suspended)
140 return;
141
142 irq_io_loop(reg, regs);
143}
144
82void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
83{
84 struct irq_entry *irq_entry;
85 int n, i;
86
145void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
146{
147 struct irq_entry *irq_entry;
148 int n, i;
149
150 if (irqs_suspended && !um_irq_timetravel_handler_used())
151 return;
152
87 while (1) {
88 /* This is now lockless - epoll keeps back-referencesto the irqs
89 * which have trigger it so there is no need to walk the irq
90 * list and lock it every time. We avoid locking by turning off
91 * IO for a specific fd by executing os_del_epoll_fd(fd) before
92 * we do any changes to the actual data structures
93 */
94 n = os_waiting_for_events_epoll();

--- 5 unchanged lines hidden (view full) ---

100 break;
101 }
102
103 for (i = 0; i < n ; i++) {
104 enum um_irq_type t;
105
106 irq_entry = os_epoll_get_data_pointer(i);
107
153 while (1) {
154 /* This is now lockless - epoll keeps back-referencesto the irqs
155 * which have trigger it so there is no need to walk the irq
156 * list and lock it every time. We avoid locking by turning off
157 * IO for a specific fd by executing os_del_epoll_fd(fd) before
158 * we do any changes to the actual data structures
159 */
160 n = os_waiting_for_events_epoll();

--- 5 unchanged lines hidden (view full) ---

166 break;
167 }
168
169 for (i = 0; i < n ; i++) {
170 enum um_irq_type t;
171
172 irq_entry = os_epoll_get_data_pointer(i);
173
108 for (t = 0; t < NUM_IRQ_TYPES; t++) {
109 int events = irq_entry->reg[t].events;
110
111 if (!events)
112 continue;
113
114 if (os_epoll_triggered(i, events) > 0)
115 irq_io_loop(&irq_entry->reg[t], regs);
116 }
174 for (t = 0; t < NUM_IRQ_TYPES; t++)
175 sigio_reg_handler(i, irq_entry, t, regs);
117 }
118 }
119
176 }
177 }
178
120 free_irqs();
179 if (!irqs_suspended)
180 free_irqs();
121}
122
123static struct irq_entry *get_irq_entry_by_fd(int fd)
124{
125 struct irq_entry *walk;
126
127 lockdep_assert_held(&irq_lock);
128

--- 35 unchanged lines hidden (view full) ---

164}
165
166static void update_or_free_irq_entry(struct irq_entry *entry)
167{
168 if (!update_irq_entry(entry))
169 free_irq_entry(entry, false);
170}
171
181}
182
183static struct irq_entry *get_irq_entry_by_fd(int fd)
184{
185 struct irq_entry *walk;
186
187 lockdep_assert_held(&irq_lock);
188

--- 35 unchanged lines hidden (view full) ---

224}
225
226static void update_or_free_irq_entry(struct irq_entry *entry)
227{
228 if (!update_irq_entry(entry))
229 free_irq_entry(entry, false);
230}
231
172static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id)
232static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
233 void (*timetravel_handler)(int, int, void *,
234 struct time_travel_event *))
173{
174 struct irq_entry *irq_entry;
175 int err, events = os_event_mask(type);
176 unsigned long flags;
177
178 err = os_set_fd_async(fd);
179 if (err < 0)
180 goto out;

--- 20 unchanged lines hidden (view full) ---

201 maybe_sigio_broken(fd);
202 }
203
204 irq_entry->reg[type].id = dev_id;
205 irq_entry->reg[type].irq = irq;
206 irq_entry->reg[type].active = true;
207 irq_entry->reg[type].events = events;
208
235{
236 struct irq_entry *irq_entry;
237 int err, events = os_event_mask(type);
238 unsigned long flags;
239
240 err = os_set_fd_async(fd);
241 if (err < 0)
242 goto out;

--- 20 unchanged lines hidden (view full) ---

263 maybe_sigio_broken(fd);
264 }
265
266 irq_entry->reg[type].id = dev_id;
267 irq_entry->reg[type].irq = irq;
268 irq_entry->reg[type].active = true;
269 irq_entry->reg[type].events = events;
270
271#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
272 if (um_irq_timetravel_handler_used()) {
273 irq_entry->reg[type].timetravel_handler = timetravel_handler;
274 irq_entry->reg[type].event.fn = irq_event_handler;
275 }
276#endif
277
209 WARN_ON(!update_irq_entry(irq_entry));
210 spin_unlock_irqrestore(&irq_lock, flags);
211
212 return 0;
213out_unlock:
214 spin_unlock_irqrestore(&irq_lock, flags);
215out:
216 return err;

--- 117 unchanged lines hidden (view full) ---

334 return;
335
336 free_irq_by_irq_and_dev(irq, dev);
337 free_irq(irq, dev);
338 clear_bit(irq, irqs_allocated);
339}
340EXPORT_SYMBOL(um_free_irq);
341
278 WARN_ON(!update_irq_entry(irq_entry));
279 spin_unlock_irqrestore(&irq_lock, flags);
280
281 return 0;
282out_unlock:
283 spin_unlock_irqrestore(&irq_lock, flags);
284out:
285 return err;

--- 117 unchanged lines hidden (view full) ---

403 return;
404
405 free_irq_by_irq_and_dev(irq, dev);
406 free_irq(irq, dev);
407 clear_bit(irq, irqs_allocated);
408}
409EXPORT_SYMBOL(um_free_irq);
410
342int um_request_irq(int irq, int fd, enum um_irq_type type,
343 irq_handler_t handler, unsigned long irqflags,
344 const char *devname, void *dev_id)
411static int
412_um_request_irq(int irq, int fd, enum um_irq_type type,
413 irq_handler_t handler, unsigned long irqflags,
414 const char *devname, void *dev_id,
415 void (*timetravel_handler)(int, int, void *,
416 struct time_travel_event *))
345{
346 int err;
347
348 if (irq == UM_IRQ_ALLOC) {
349 int i;
350
351 for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
352 if (!test_and_set_bit(i, irqs_allocated)) {
353 irq = i;
354 break;
355 }
356 }
357 }
358
359 if (irq < 0)
360 return -ENOSPC;
361
362 if (fd != -1) {
417{
418 int err;
419
420 if (irq == UM_IRQ_ALLOC) {
421 int i;
422
423 for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
424 if (!test_and_set_bit(i, irqs_allocated)) {
425 irq = i;
426 break;
427 }
428 }
429 }
430
431 if (irq < 0)
432 return -ENOSPC;
433
434 if (fd != -1) {
363 err = activate_fd(irq, fd, type, dev_id);
435 err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
364 if (err)
365 goto error;
366 }
367
368 err = request_irq(irq, handler, irqflags, devname, dev_id);
369 if (err < 0)
370 goto error;
371
372 return irq;
373error:
374 clear_bit(irq, irqs_allocated);
375 return err;
376}
436 if (err)
437 goto error;
438 }
439
440 err = request_irq(irq, handler, irqflags, devname, dev_id);
441 if (err < 0)
442 goto error;
443
444 return irq;
445error:
446 clear_bit(irq, irqs_allocated);
447 return err;
448}
449
450int um_request_irq(int irq, int fd, enum um_irq_type type,
451 irq_handler_t handler, unsigned long irqflags,
452 const char *devname, void *dev_id)
453{
454 return _um_request_irq(irq, fd, type, handler, irqflags,
455 devname, dev_id, NULL);
456}
377EXPORT_SYMBOL(um_request_irq);
378
457EXPORT_SYMBOL(um_request_irq);
458
459#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
460int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
461 irq_handler_t handler, unsigned long irqflags,
462 const char *devname, void *dev_id,
463 void (*timetravel_handler)(int, int, void *,
464 struct time_travel_event *))
465{
466 return _um_request_irq(irq, fd, type, handler, irqflags,
467 devname, dev_id, timetravel_handler);
468}
469EXPORT_SYMBOL(um_request_irq_tt);
470#endif
471
379#ifdef CONFIG_PM_SLEEP
380void um_irqs_suspend(void)
381{
382 struct irq_entry *entry;
383 unsigned long flags;
384
472#ifdef CONFIG_PM_SLEEP
473void um_irqs_suspend(void)
474{
475 struct irq_entry *entry;
476 unsigned long flags;
477
385 sig_info[SIGIO] = sigio_handler_suspend;
478 irqs_suspended = true;
386
387 spin_lock_irqsave(&irq_lock, flags);
388 list_for_each_entry(entry, &active_fds, list) {
389 enum um_irq_type t;
479
480 spin_lock_irqsave(&irq_lock, flags);
481 list_for_each_entry(entry, &active_fds, list) {
482 enum um_irq_type t;
390 bool wake = false;
483 bool clear = true;
391
392 for (t = 0; t < NUM_IRQ_TYPES; t++) {
393 if (!entry->reg[t].events)
394 continue;
395
396 /*
397 * For the SIGIO_WRITE_IRQ, which is used to handle the
398 * SIGIO workaround thread, we need special handling:
399 * enable wake for it itself, but below we tell it about
400 * any FDs that should be suspended.
401 */
402 if (entry->reg[t].wakeup ||
484
485 for (t = 0; t < NUM_IRQ_TYPES; t++) {
486 if (!entry->reg[t].events)
487 continue;
488
489 /*
490 * For the SIGIO_WRITE_IRQ, which is used to handle the
491 * SIGIO workaround thread, we need special handling:
492 * enable wake for it itself, but below we tell it about
493 * any FDs that should be suspended.
494 */
495 if (entry->reg[t].wakeup ||
403 entry->reg[t].irq == SIGIO_WRITE_IRQ) {
404 wake = true;
496 entry->reg[t].irq == SIGIO_WRITE_IRQ
497#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
498 || entry->reg[t].timetravel_handler
499#endif
500 ) {
501 clear = false;
405 break;
406 }
407 }
408
502 break;
503 }
504 }
505
409 if (!wake) {
506 if (clear) {
410 entry->suspended = true;
411 os_clear_fd_async(entry->fd);
412 entry->sigio_workaround =
413 !__ignore_sigio_fd(entry->fd);
414 }
415 }
416 spin_unlock_irqrestore(&irq_lock, flags);
417}
418
419void um_irqs_resume(void)
420{
421 struct irq_entry *entry;
422 unsigned long flags;
423
507 entry->suspended = true;
508 os_clear_fd_async(entry->fd);
509 entry->sigio_workaround =
510 !__ignore_sigio_fd(entry->fd);
511 }
512 }
513 spin_unlock_irqrestore(&irq_lock, flags);
514}
515
516void um_irqs_resume(void)
517{
518 struct irq_entry *entry;
519 unsigned long flags;
520
424 spin_lock_irqsave(&irq_lock, flags);
521
522 local_irq_save(flags);
523#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
524 /*
525 * We don't need to lock anything here since we're in resume
526 * and nothing else is running, but have disabled IRQs so we
527 * don't try anything else with the interrupt list from there.
528 */
425 list_for_each_entry(entry, &active_fds, list) {
529 list_for_each_entry(entry, &active_fds, list) {
530 enum um_irq_type t;
531
532 for (t = 0; t < NUM_IRQ_TYPES; t++) {
533 struct irq_reg *reg = &entry->reg[t];
534
535 if (reg->pending_on_resume) {
536 irq_enter();
537 generic_handle_irq(reg->irq);
538 irq_exit();
539 reg->pending_on_resume = false;
540 }
541 }
542 }
543#endif
544
545 spin_lock(&irq_lock);
546 list_for_each_entry(entry, &active_fds, list) {
426 if (entry->suspended) {
427 int err = os_set_fd_async(entry->fd);
428
429 WARN(err < 0, "os_set_fd_async returned %d\n", err);
430 entry->suspended = false;
431
432 if (entry->sigio_workaround) {
433 err = __add_sigio_fd(entry->fd);
434 WARN(err < 0, "add_sigio_returned %d\n", err);
435 }
436 }
437 }
438 spin_unlock_irqrestore(&irq_lock, flags);
439
547 if (entry->suspended) {
548 int err = os_set_fd_async(entry->fd);
549
550 WARN(err < 0, "os_set_fd_async returned %d\n", err);
551 entry->suspended = false;
552
553 if (entry->sigio_workaround) {
554 err = __add_sigio_fd(entry->fd);
555 WARN(err < 0, "add_sigio_returned %d\n", err);
556 }
557 }
558 }
559 spin_unlock_irqrestore(&irq_lock, flags);
560
440 sig_info[SIGIO] = sigio_handler;
561 irqs_suspended = false;
441 send_sigio_to_self();
442}
443
444static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
445{
446 struct irq_entry *entry;
447 unsigned long flags;
448

--- 173 unchanged lines hidden ---
562 send_sigio_to_self();
563}
564
565static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
566{
567 struct irq_entry *entry;
568 unsigned long flags;
569

--- 173 unchanged lines hidden ---