xref: /openbmc/linux/arch/um/kernel/irq.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
3  * Licensed under the GPL
4  * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
5  *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6  */
7 
8 #include "linux/kernel.h"
9 #include "linux/module.h"
10 #include "linux/smp.h"
11 #include "linux/kernel_stat.h"
12 #include "linux/interrupt.h"
13 #include "linux/random.h"
14 #include "linux/slab.h"
15 #include "linux/file.h"
16 #include "linux/proc_fs.h"
17 #include "linux/init.h"
18 #include "linux/seq_file.h"
19 #include "linux/profile.h"
20 #include "linux/hardirq.h"
21 #include "asm/irq.h"
22 #include "asm/hw_irq.h"
23 #include "asm/atomic.h"
24 #include "asm/signal.h"
25 #include "asm/system.h"
26 #include "asm/errno.h"
27 #include "asm/uaccess.h"
28 #include "kern_util.h"
29 #include "irq_user.h"
30 #include "irq_kern.h"
31 #include "os.h"
32 #include "sigio.h"
33 #include "um_malloc.h"
34 #include "misc_constants.h"
35 #include "as-layout.h"
36 
37 /*
38  * Generic, controller-independent functions:
39  */
40 
41 int show_interrupts(struct seq_file *p, void *v)
42 {
43 	int i = *(loff_t *) v, j;
44 	struct irqaction * action;
45 	unsigned long flags;
46 
47 	if (i == 0) {
48 		seq_printf(p, "           ");
49 		for_each_online_cpu(j)
50 			seq_printf(p, "CPU%d       ",j);
51 		seq_putc(p, '\n');
52 	}
53 
54 	if (i < NR_IRQS) {
55 		spin_lock_irqsave(&irq_desc[i].lock, flags);
56 		action = irq_desc[i].action;
57 		if (!action)
58 			goto skip;
59 		seq_printf(p, "%3d: ",i);
60 #ifndef CONFIG_SMP
61 		seq_printf(p, "%10u ", kstat_irqs(i));
62 #else
63 		for_each_online_cpu(j)
64 			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
65 #endif
66 		seq_printf(p, " %14s", irq_desc[i].chip->typename);
67 		seq_printf(p, "  %s", action->name);
68 
69 		for (action=action->next; action; action = action->next)
70 			seq_printf(p, ", %s", action->name);
71 
72 		seq_putc(p, '\n');
73 skip:
74 		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
75 	} else if (i == NR_IRQS) {
76 		seq_putc(p, '\n');
77 	}
78 
79 	return 0;
80 }
81 
82 /*
83  * This list is accessed under irq_lock, except in sigio_handler,
84  * where it is safe from being modified.  IRQ handlers won't change it -
85  * if an IRQ source has vanished, it will be freed by free_irqs just
86  * before returning from sigio_handler.  That will process a separate
87  * list of irqs to free, with its own locking, coming back here to
88  * remove list elements, taking the irq_lock to do so.
89  */
90 static struct irq_fd *active_fds = NULL;
91 static struct irq_fd **last_irq_ptr = &active_fds;
92 
93 extern void free_irqs(void);
94 
95 void sigio_handler(int sig, union uml_pt_regs *regs)
96 {
97 	struct irq_fd *irq_fd;
98 	int n;
99 
100 	if (smp_sigio_handler())
101 		return;
102 
103 	while (1) {
104 		n = os_waiting_for_events(active_fds);
105 		if (n <= 0) {
106 			if(n == -EINTR) continue;
107 			else break;
108 		}
109 
110 		for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
111 			if (irq_fd->current_events != 0) {
112 				irq_fd->current_events = 0;
113 				do_IRQ(irq_fd->irq, regs);
114 			}
115 		}
116 	}
117 
118 	free_irqs();
119 }
120 
121 static DEFINE_SPINLOCK(irq_lock);
122 
123 int activate_fd(int irq, int fd, int type, void *dev_id)
124 {
125 	struct pollfd *tmp_pfd;
126 	struct irq_fd *new_fd, *irq_fd;
127 	unsigned long flags;
128 	int pid, events, err, n;
129 
130 	pid = os_getpid();
131 	err = os_set_fd_async(fd, pid);
132 	if (err < 0)
133 		goto out;
134 
135 	err = -ENOMEM;
136 	new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
137 	if (new_fd == NULL)
138 		goto out;
139 
140 	if (type == IRQ_READ)
141 		events = UM_POLLIN | UM_POLLPRI;
142 	else
143 		events = UM_POLLOUT;
144 	*new_fd = ((struct irq_fd) { .next  		= NULL,
145 				     .id 		= dev_id,
146 				     .fd 		= fd,
147 				     .type 		= type,
148 				     .irq 		= irq,
149 				     .pid  		= pid,
150 				     .events 		= events,
151 				     .current_events 	= 0 } );
152 
153 	err = -EBUSY;
154 	spin_lock_irqsave(&irq_lock, flags);
155 	for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
156 		if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
157 			printk("Registering fd %d twice\n", fd);
158 			printk("Irqs : %d, %d\n", irq_fd->irq, irq);
159 			printk("Ids : 0x%p, 0x%p\n", irq_fd->id, dev_id);
160 			goto out_unlock;
161 		}
162 	}
163 
164 	if (type == IRQ_WRITE)
165 		fd = -1;
166 
167 	tmp_pfd = NULL;
168 	n = 0;
169 
170 	while (1) {
171 		n = os_create_pollfd(fd, events, tmp_pfd, n);
172 		if (n == 0)
173 			break;
174 
175 		/* n > 0
176 		 * It means we couldn't put new pollfd to current pollfds
177 		 * and tmp_fds is NULL or too small for new pollfds array.
178 		 * Needed size is equal to n as minimum.
179 		 *
180 		 * Here we have to drop the lock in order to call
181 		 * kmalloc, which might sleep.
182 		 * If something else came in and changed the pollfds array
183 		 * so we will not be able to put new pollfd struct to pollfds
184 		 * then we free the buffer tmp_fds and try again.
185 		 */
186 		spin_unlock_irqrestore(&irq_lock, flags);
187 		kfree(tmp_pfd);
188 
189 		tmp_pfd = kmalloc(n, GFP_KERNEL);
190 		if (tmp_pfd == NULL)
191 			goto out_kfree;
192 
193 		spin_lock_irqsave(&irq_lock, flags);
194 	}
195 
196 	*last_irq_ptr = new_fd;
197 	last_irq_ptr = &new_fd->next;
198 
199 	spin_unlock_irqrestore(&irq_lock, flags);
200 
201 	/* This calls activate_fd, so it has to be outside the critical
202 	 * section.
203 	 */
204 	maybe_sigio_broken(fd, (type == IRQ_READ));
205 
206 	return 0;
207 
208  out_unlock:
209 	spin_unlock_irqrestore(&irq_lock, flags);
210  out_kfree:
211 	kfree(new_fd);
212  out:
213 	return err;
214 }
215 
216 static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
217 {
218 	unsigned long flags;
219 
220 	spin_lock_irqsave(&irq_lock, flags);
221 	os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
222 	spin_unlock_irqrestore(&irq_lock, flags);
223 }
224 
225 struct irq_and_dev {
226 	int irq;
227 	void *dev;
228 };
229 
230 static int same_irq_and_dev(struct irq_fd *irq, void *d)
231 {
232 	struct irq_and_dev *data = d;
233 
234 	return ((irq->irq == data->irq) && (irq->id == data->dev));
235 }
236 
237 void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
238 {
239 	struct irq_and_dev data = ((struct irq_and_dev) { .irq  = irq,
240 							  .dev  = dev });
241 
242 	free_irq_by_cb(same_irq_and_dev, &data);
243 }
244 
245 static int same_fd(struct irq_fd *irq, void *fd)
246 {
247 	return (irq->fd == *((int *)fd));
248 }
249 
250 void free_irq_by_fd(int fd)
251 {
252 	free_irq_by_cb(same_fd, &fd);
253 }
254 
255 /* Must be called with irq_lock held */
256 static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
257 {
258 	struct irq_fd *irq;
259 	int i = 0;
260 	int fdi;
261 
262 	for (irq = active_fds; irq != NULL; irq = irq->next) {
263 		if ((irq->fd == fd) && (irq->irq == irqnum))
264 			break;
265 		i++;
266 	}
267 	if (irq == NULL) {
268 		printk("find_irq_by_fd doesn't have descriptor %d\n", fd);
269 		goto out;
270 	}
271 	fdi = os_get_pollfd(i);
272 	if ((fdi != -1) && (fdi != fd)) {
273 		printk("find_irq_by_fd - mismatch between active_fds and "
274 		       "pollfds, fd %d vs %d, need %d\n", irq->fd,
275 		       fdi, fd);
276 		irq = NULL;
277 		goto out;
278 	}
279 	*index_out = i;
280  out:
281 	return irq;
282 }
283 
284 void reactivate_fd(int fd, int irqnum)
285 {
286 	struct irq_fd *irq;
287 	unsigned long flags;
288 	int i;
289 
290 	spin_lock_irqsave(&irq_lock, flags);
291 	irq = find_irq_by_fd(fd, irqnum, &i);
292 	if (irq == NULL) {
293 		spin_unlock_irqrestore(&irq_lock, flags);
294 		return;
295 	}
296 	os_set_pollfd(i, irq->fd);
297 	spin_unlock_irqrestore(&irq_lock, flags);
298 
299 	add_sigio_fd(fd);
300 }
301 
302 void deactivate_fd(int fd, int irqnum)
303 {
304 	struct irq_fd *irq;
305 	unsigned long flags;
306 	int i;
307 
308 	spin_lock_irqsave(&irq_lock, flags);
309 	irq = find_irq_by_fd(fd, irqnum, &i);
310 	if(irq == NULL){
311 		spin_unlock_irqrestore(&irq_lock, flags);
312 		return;
313 	}
314 
315 	os_set_pollfd(i, -1);
316 	spin_unlock_irqrestore(&irq_lock, flags);
317 
318 	ignore_sigio_fd(fd);
319 }
320 
321 /*
322  * Called just before shutdown in order to provide a clean exec
323  * environment in case the system is rebooting.  No locking because
324  * that would cause a pointless shutdown hang if something hadn't
325  * released the lock.
326  */
327 int deactivate_all_fds(void)
328 {
329 	struct irq_fd *irq;
330 	int err;
331 
332 	for (irq = active_fds; irq != NULL; irq = irq->next) {
333 		err = os_clear_fd_async(irq->fd);
334 		if (err)
335 			return err;
336 	}
337 	/* If there is a signal already queued, after unblocking ignore it */
338 	os_set_ioignore();
339 
340 	return 0;
341 }
342 
343 #ifdef CONFIG_MODE_TT
344 void forward_interrupts(int pid)
345 {
346 	struct irq_fd *irq;
347 	unsigned long flags;
348 	int err;
349 
350 	spin_lock_irqsave(&irq_lock, flags);
351 	for (irq = active_fds; irq != NULL; irq = irq->next) {
352 		err = os_set_owner(irq->fd, pid);
353 		if (err < 0) {
354 			/* XXX Just remove the irq rather than
355 			 * print out an infinite stream of these
356 			 */
357 			printk("Failed to forward %d to pid %d, err = %d\n",
358 			       irq->fd, pid, -err);
359 		}
360 
361 		irq->pid = pid;
362 	}
363 	spin_unlock_irqrestore(&irq_lock, flags);
364 }
365 #endif
366 
367 /*
368  * do_IRQ handles all normal device IRQ's (the special
369  * SMP cross-CPU interrupts have their own specific
370  * handlers).
371  */
372 unsigned int do_IRQ(int irq, union uml_pt_regs *regs)
373 {
374 	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
375 	irq_enter();
376 	__do_IRQ(irq);
377 	irq_exit();
378 	set_irq_regs(old_regs);
379 	return 1;
380 }
381 
382 int um_request_irq(unsigned int irq, int fd, int type,
383 		   irq_handler_t handler,
384 		   unsigned long irqflags, const char * devname,
385 		   void *dev_id)
386 {
387 	int err;
388 
389 	err = request_irq(irq, handler, irqflags, devname, dev_id);
390 	if (err)
391 		return err;
392 
393 	if (fd != -1)
394 		err = activate_fd(irq, fd, type, dev_id);
395 	return err;
396 }
397 EXPORT_SYMBOL(um_request_irq);
398 EXPORT_SYMBOL(reactivate_fd);
399 
400 /* hw_interrupt_type must define (startup || enable) &&
401  * (shutdown || disable) && end */
402 static void dummy(unsigned int irq)
403 {
404 }
405 
406 /* This is used for everything else than the timer. */
407 static struct hw_interrupt_type normal_irq_type = {
408 	.typename = "SIGIO",
409 	.release = free_irq_by_irq_and_dev,
410 	.disable = dummy,
411 	.enable = dummy,
412 	.ack = dummy,
413 	.end = dummy
414 };
415 
416 static struct hw_interrupt_type SIGVTALRM_irq_type = {
417 	.typename = "SIGVTALRM",
418 	.release = free_irq_by_irq_and_dev,
419 	.shutdown = dummy, /* never called */
420 	.disable = dummy,
421 	.enable = dummy,
422 	.ack = dummy,
423 	.end = dummy
424 };
425 
426 void __init init_IRQ(void)
427 {
428 	int i;
429 
430 	irq_desc[TIMER_IRQ].status = IRQ_DISABLED;
431 	irq_desc[TIMER_IRQ].action = NULL;
432 	irq_desc[TIMER_IRQ].depth = 1;
433 	irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type;
434 	enable_irq(TIMER_IRQ);
435 	for (i = 1; i < NR_IRQS; i++) {
436 		irq_desc[i].status = IRQ_DISABLED;
437 		irq_desc[i].action = NULL;
438 		irq_desc[i].depth = 1;
439 		irq_desc[i].chip = &normal_irq_type;
440 		enable_irq(i);
441 	}
442 }
443 
444 int init_aio_irq(int irq, char *name, irq_handler_t handler)
445 {
446 	int fds[2], err;
447 
448 	err = os_pipe(fds, 1, 1);
449 	if (err) {
450 		printk("init_aio_irq - os_pipe failed, err = %d\n", -err);
451 		goto out;
452 	}
453 
454 	err = um_request_irq(irq, fds[0], IRQ_READ, handler,
455 			     IRQF_DISABLED | IRQF_SAMPLE_RANDOM, name,
456 			     (void *) (long) fds[0]);
457 	if (err) {
458 		printk("init_aio_irq - : um_request_irq failed, err = %d\n",
459 		       err);
460 		goto out_close;
461 	}
462 
463 	err = fds[1];
464 	goto out;
465 
466  out_close:
467 	os_close_file(fds[0]);
468 	os_close_file(fds[1]);
469  out:
470 	return err;
471 }
472 
473 /*
474  * IRQ stack entry and exit:
475  *
476  * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
477  * and switch over to the IRQ stack after some preparation.  We use
478  * sigaltstack to receive signals on a separate stack from the start.
479  * These two functions make sure the rest of the kernel won't be too
480  * upset by being on a different stack.  The IRQ stack has a
481  * thread_info structure at the bottom so that current et al continue
482  * to work.
483  *
484  * to_irq_stack copies the current task's thread_info to the IRQ stack
485  * thread_info and sets the tasks's stack to point to the IRQ stack.
486  *
487  * from_irq_stack copies the thread_info struct back (flags may have
488  * been modified) and resets the task's stack pointer.
489  *
490  * Tricky bits -
491  *
492  * What happens when two signals race each other?  UML doesn't block
493  * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
494  * could arrive while a previous one is still setting up the
495  * thread_info.
496  *
497  * There are three cases -
498  *     The first interrupt on the stack - sets up the thread_info and
499  * handles the interrupt
500  *     A nested interrupt interrupting the copying of the thread_info -
501  * can't handle the interrupt, as the stack is in an unknown state
502  *     A nested interrupt not interrupting the copying of the
503  * thread_info - doesn't do any setup, just handles the interrupt
504  *
505  * The first job is to figure out whether we interrupted stack setup.
506  * This is done by xchging the signal mask with thread_info->pending.
507  * If the value that comes back is zero, then there is no setup in
508  * progress, and the interrupt can be handled.  If the value is
509  * non-zero, then there is stack setup in progress.  In order to have
510  * the interrupt handled, we leave our signal in the mask, and it will
511  * be handled by the upper handler after it has set up the stack.
512  *
513  * Next is to figure out whether we are the outer handler or a nested
514  * one.  As part of setting up the stack, thread_info->real_thread is
515  * set to non-NULL (and is reset to NULL on exit).  This is the
516  * nesting indicator.  If it is non-NULL, then the stack is already
517  * set up and the handler can run.
518  */
519 
520 static unsigned long pending_mask;
521 
522 unsigned long to_irq_stack(int sig, unsigned long *mask_out)
523 {
524 	struct thread_info *ti;
525 	unsigned long mask, old;
526 	int nested;
527 
528 	mask = xchg(&pending_mask, 1 << sig);
529 	if(mask != 0){
530 		/* If any interrupts come in at this point, we want to
531 		 * make sure that their bits aren't lost by our
532 		 * putting our bit in.  So, this loop accumulates bits
533 		 * until xchg returns the same value that we put in.
534 		 * When that happens, there were no new interrupts,
535 		 * and pending_mask contains a bit for each interrupt
536 		 * that came in.
537 		 */
538 		old = 1 << sig;
539 		do {
540 			old |= mask;
541 			mask = xchg(&pending_mask, old);
542 		} while(mask != old);
543 		return 1;
544 	}
545 
546 	ti = current_thread_info();
547 	nested = (ti->real_thread != NULL);
548 	if(!nested){
549 		struct task_struct *task;
550 		struct thread_info *tti;
551 
552 		task = cpu_tasks[ti->cpu].task;
553 		tti = task_thread_info(task);
554 		*ti = *tti;
555 		ti->real_thread = tti;
556 		task->stack = ti;
557 	}
558 
559 	mask = xchg(&pending_mask, 0);
560 	*mask_out |= mask | nested;
561 	return 0;
562 }
563 
564 unsigned long from_irq_stack(int nested)
565 {
566 	struct thread_info *ti, *to;
567 	unsigned long mask;
568 
569 	ti = current_thread_info();
570 
571 	pending_mask = 1;
572 
573 	to = ti->real_thread;
574 	current->stack = to;
575 	ti->real_thread = NULL;
576 	*to = *ti;
577 
578 	mask = xchg(&pending_mask, 0);
579 	return mask & ~1;
580 }
581 
582