xref: /openbmc/linux/arch/um/kernel/irq.c (revision f42b3800)
1 /*
2  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Licensed under the GPL
4  * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
5  *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6  */
7 
8 #include "linux/cpumask.h"
9 #include "linux/hardirq.h"
10 #include "linux/interrupt.h"
11 #include "linux/kernel_stat.h"
12 #include "linux/module.h"
13 #include "linux/seq_file.h"
14 #include "as-layout.h"
15 #include "kern_util.h"
16 #include "os.h"
17 
18 /*
19  * Generic, controller-independent functions:
20  */
21 
22 int show_interrupts(struct seq_file *p, void *v)
23 {
24 	int i = *(loff_t *) v, j;
25 	struct irqaction * action;
26 	unsigned long flags;
27 
28 	if (i == 0) {
29 		seq_printf(p, "           ");
30 		for_each_online_cpu(j)
31 			seq_printf(p, "CPU%d       ",j);
32 		seq_putc(p, '\n');
33 	}
34 
35 	if (i < NR_IRQS) {
36 		spin_lock_irqsave(&irq_desc[i].lock, flags);
37 		action = irq_desc[i].action;
38 		if (!action)
39 			goto skip;
40 		seq_printf(p, "%3d: ",i);
41 #ifndef CONFIG_SMP
42 		seq_printf(p, "%10u ", kstat_irqs(i));
43 #else
44 		for_each_online_cpu(j)
45 			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
46 #endif
47 		seq_printf(p, " %14s", irq_desc[i].chip->typename);
48 		seq_printf(p, "  %s", action->name);
49 
50 		for (action=action->next; action; action = action->next)
51 			seq_printf(p, ", %s", action->name);
52 
53 		seq_putc(p, '\n');
54 skip:
55 		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
56 	} else if (i == NR_IRQS)
57 		seq_putc(p, '\n');
58 
59 	return 0;
60 }
61 
62 /*
63  * This list is accessed under irq_lock, except in sigio_handler,
64  * where it is safe from being modified.  IRQ handlers won't change it -
65  * if an IRQ source has vanished, it will be freed by free_irqs just
66  * before returning from sigio_handler.  That will process a separate
67  * list of irqs to free, with its own locking, coming back here to
68  * remove list elements, taking the irq_lock to do so.
69  */
70 static struct irq_fd *active_fds = NULL;
71 static struct irq_fd **last_irq_ptr = &active_fds;
72 
73 extern void free_irqs(void);
74 
75 void sigio_handler(int sig, struct uml_pt_regs *regs)
76 {
77 	struct irq_fd *irq_fd;
78 	int n;
79 
80 	if (smp_sigio_handler())
81 		return;
82 
83 	while (1) {
84 		n = os_waiting_for_events(active_fds);
85 		if (n <= 0) {
86 			if (n == -EINTR)
87 				continue;
88 			else break;
89 		}
90 
91 		for (irq_fd = active_fds; irq_fd != NULL;
92 		     irq_fd = irq_fd->next) {
93 			if (irq_fd->current_events != 0) {
94 				irq_fd->current_events = 0;
95 				do_IRQ(irq_fd->irq, regs);
96 			}
97 		}
98 	}
99 
100 	free_irqs();
101 }
102 
103 static DEFINE_SPINLOCK(irq_lock);
104 
105 int activate_fd(int irq, int fd, int type, void *dev_id)
106 {
107 	struct pollfd *tmp_pfd;
108 	struct irq_fd *new_fd, *irq_fd;
109 	unsigned long flags;
110 	int events, err, n;
111 
112 	err = os_set_fd_async(fd);
113 	if (err < 0)
114 		goto out;
115 
116 	err = -ENOMEM;
117 	new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
118 	if (new_fd == NULL)
119 		goto out;
120 
121 	if (type == IRQ_READ)
122 		events = UM_POLLIN | UM_POLLPRI;
123 	else events = UM_POLLOUT;
124 	*new_fd = ((struct irq_fd) { .next  		= NULL,
125 				     .id 		= dev_id,
126 				     .fd 		= fd,
127 				     .type 		= type,
128 				     .irq 		= irq,
129 				     .events 		= events,
130 				     .current_events 	= 0 } );
131 
132 	err = -EBUSY;
133 	spin_lock_irqsave(&irq_lock, flags);
134 	for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
135 		if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
136 			printk(KERN_ERR "Registering fd %d twice\n", fd);
137 			printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
138 			printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
139 			       dev_id);
140 			goto out_unlock;
141 		}
142 	}
143 
144 	if (type == IRQ_WRITE)
145 		fd = -1;
146 
147 	tmp_pfd = NULL;
148 	n = 0;
149 
150 	while (1) {
151 		n = os_create_pollfd(fd, events, tmp_pfd, n);
152 		if (n == 0)
153 			break;
154 
155 		/*
156 		 * n > 0
157 		 * It means we couldn't put new pollfd to current pollfds
158 		 * and tmp_fds is NULL or too small for new pollfds array.
159 		 * Needed size is equal to n as minimum.
160 		 *
161 		 * Here we have to drop the lock in order to call
162 		 * kmalloc, which might sleep.
163 		 * If something else came in and changed the pollfds array
164 		 * so we will not be able to put new pollfd struct to pollfds
165 		 * then we free the buffer tmp_fds and try again.
166 		 */
167 		spin_unlock_irqrestore(&irq_lock, flags);
168 		kfree(tmp_pfd);
169 
170 		tmp_pfd = kmalloc(n, GFP_KERNEL);
171 		if (tmp_pfd == NULL)
172 			goto out_kfree;
173 
174 		spin_lock_irqsave(&irq_lock, flags);
175 	}
176 
177 	*last_irq_ptr = new_fd;
178 	last_irq_ptr = &new_fd->next;
179 
180 	spin_unlock_irqrestore(&irq_lock, flags);
181 
182 	/*
183 	 * This calls activate_fd, so it has to be outside the critical
184 	 * section.
185 	 */
186 	maybe_sigio_broken(fd, (type == IRQ_READ));
187 
188 	return 0;
189 
190  out_unlock:
191 	spin_unlock_irqrestore(&irq_lock, flags);
192  out_kfree:
193 	kfree(new_fd);
194  out:
195 	return err;
196 }
197 
198 static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
199 {
200 	unsigned long flags;
201 
202 	spin_lock_irqsave(&irq_lock, flags);
203 	os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
204 	spin_unlock_irqrestore(&irq_lock, flags);
205 }
206 
207 struct irq_and_dev {
208 	int irq;
209 	void *dev;
210 };
211 
212 static int same_irq_and_dev(struct irq_fd *irq, void *d)
213 {
214 	struct irq_and_dev *data = d;
215 
216 	return ((irq->irq == data->irq) && (irq->id == data->dev));
217 }
218 
219 void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
220 {
221 	struct irq_and_dev data = ((struct irq_and_dev) { .irq  = irq,
222 							  .dev  = dev });
223 
224 	free_irq_by_cb(same_irq_and_dev, &data);
225 }
226 
227 static int same_fd(struct irq_fd *irq, void *fd)
228 {
229 	return (irq->fd == *((int *)fd));
230 }
231 
232 void free_irq_by_fd(int fd)
233 {
234 	free_irq_by_cb(same_fd, &fd);
235 }
236 
237 /* Must be called with irq_lock held */
238 static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
239 {
240 	struct irq_fd *irq;
241 	int i = 0;
242 	int fdi;
243 
244 	for (irq = active_fds; irq != NULL; irq = irq->next) {
245 		if ((irq->fd == fd) && (irq->irq == irqnum))
246 			break;
247 		i++;
248 	}
249 	if (irq == NULL) {
250 		printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
251 		       fd);
252 		goto out;
253 	}
254 	fdi = os_get_pollfd(i);
255 	if ((fdi != -1) && (fdi != fd)) {
256 		printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
257 		       "and pollfds, fd %d vs %d, need %d\n", irq->fd,
258 		       fdi, fd);
259 		irq = NULL;
260 		goto out;
261 	}
262 	*index_out = i;
263  out:
264 	return irq;
265 }
266 
267 void reactivate_fd(int fd, int irqnum)
268 {
269 	struct irq_fd *irq;
270 	unsigned long flags;
271 	int i;
272 
273 	spin_lock_irqsave(&irq_lock, flags);
274 	irq = find_irq_by_fd(fd, irqnum, &i);
275 	if (irq == NULL) {
276 		spin_unlock_irqrestore(&irq_lock, flags);
277 		return;
278 	}
279 	os_set_pollfd(i, irq->fd);
280 	spin_unlock_irqrestore(&irq_lock, flags);
281 
282 	add_sigio_fd(fd);
283 }
284 
285 void deactivate_fd(int fd, int irqnum)
286 {
287 	struct irq_fd *irq;
288 	unsigned long flags;
289 	int i;
290 
291 	spin_lock_irqsave(&irq_lock, flags);
292 	irq = find_irq_by_fd(fd, irqnum, &i);
293 	if (irq == NULL) {
294 		spin_unlock_irqrestore(&irq_lock, flags);
295 		return;
296 	}
297 
298 	os_set_pollfd(i, -1);
299 	spin_unlock_irqrestore(&irq_lock, flags);
300 
301 	ignore_sigio_fd(fd);
302 }
303 
304 /*
305  * Called just before shutdown in order to provide a clean exec
306  * environment in case the system is rebooting.  No locking because
307  * that would cause a pointless shutdown hang if something hadn't
308  * released the lock.
309  */
310 int deactivate_all_fds(void)
311 {
312 	struct irq_fd *irq;
313 	int err;
314 
315 	for (irq = active_fds; irq != NULL; irq = irq->next) {
316 		err = os_clear_fd_async(irq->fd);
317 		if (err)
318 			return err;
319 	}
320 	/* If there is a signal already queued, after unblocking ignore it */
321 	os_set_ioignore();
322 
323 	return 0;
324 }
325 
326 /*
327  * do_IRQ handles all normal device IRQs (the special
328  * SMP cross-CPU interrupts have their own specific
329  * handlers).
330  */
331 unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
332 {
333 	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
334 	irq_enter();
335 	__do_IRQ(irq);
336 	irq_exit();
337 	set_irq_regs(old_regs);
338 	return 1;
339 }
340 
341 int um_request_irq(unsigned int irq, int fd, int type,
342 		   irq_handler_t handler,
343 		   unsigned long irqflags, const char * devname,
344 		   void *dev_id)
345 {
346 	int err;
347 
348 	if (fd != -1) {
349 		err = activate_fd(irq, fd, type, dev_id);
350 		if (err)
351 			return err;
352 	}
353 
354 	return request_irq(irq, handler, irqflags, devname, dev_id);
355 }
356 
357 EXPORT_SYMBOL(um_request_irq);
358 EXPORT_SYMBOL(reactivate_fd);
359 
360 /*
361  * hw_interrupt_type must define (startup || enable) &&
362  * (shutdown || disable) && end
363  */
364 static void dummy(unsigned int irq)
365 {
366 }
367 
368 /* This is used for everything else than the timer. */
369 static struct hw_interrupt_type normal_irq_type = {
370 	.typename = "SIGIO",
371 	.release = free_irq_by_irq_and_dev,
372 	.disable = dummy,
373 	.enable = dummy,
374 	.ack = dummy,
375 	.end = dummy
376 };
377 
378 static struct hw_interrupt_type SIGVTALRM_irq_type = {
379 	.typename = "SIGVTALRM",
380 	.release = free_irq_by_irq_and_dev,
381 	.shutdown = dummy, /* never called */
382 	.disable = dummy,
383 	.enable = dummy,
384 	.ack = dummy,
385 	.end = dummy
386 };
387 
388 void __init init_IRQ(void)
389 {
390 	int i;
391 
392 	irq_desc[TIMER_IRQ].status = IRQ_DISABLED;
393 	irq_desc[TIMER_IRQ].action = NULL;
394 	irq_desc[TIMER_IRQ].depth = 1;
395 	irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type;
396 	enable_irq(TIMER_IRQ);
397 	for (i = 1; i < NR_IRQS; i++) {
398 		irq_desc[i].status = IRQ_DISABLED;
399 		irq_desc[i].action = NULL;
400 		irq_desc[i].depth = 1;
401 		irq_desc[i].chip = &normal_irq_type;
402 		enable_irq(i);
403 	}
404 }
405 
406 int init_aio_irq(int irq, char *name, irq_handler_t handler)
407 {
408 	int fds[2], err;
409 
410 	err = os_pipe(fds, 1, 1);
411 	if (err) {
412 		printk(KERN_ERR "init_aio_irq - os_pipe failed, err = %d\n",
413 		       -err);
414 		goto out;
415 	}
416 
417 	err = um_request_irq(irq, fds[0], IRQ_READ, handler,
418 			     IRQF_DISABLED | IRQF_SAMPLE_RANDOM, name,
419 			     (void *) (long) fds[0]);
420 	if (err) {
421 		printk(KERN_ERR "init_aio_irq - : um_request_irq failed, "
422 		       "err = %d\n",
423 		       err);
424 		goto out_close;
425 	}
426 
427 	err = fds[1];
428 	goto out;
429 
430  out_close:
431 	os_close_file(fds[0]);
432 	os_close_file(fds[1]);
433  out:
434 	return err;
435 }
436 
437 /*
438  * IRQ stack entry and exit:
439  *
440  * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
441  * and switch over to the IRQ stack after some preparation.  We use
442  * sigaltstack to receive signals on a separate stack from the start.
443  * These two functions make sure the rest of the kernel won't be too
444  * upset by being on a different stack.  The IRQ stack has a
445  * thread_info structure at the bottom so that current et al continue
446  * to work.
447  *
448  * to_irq_stack copies the current task's thread_info to the IRQ stack
449  * thread_info and sets the tasks's stack to point to the IRQ stack.
450  *
451  * from_irq_stack copies the thread_info struct back (flags may have
452  * been modified) and resets the task's stack pointer.
453  *
454  * Tricky bits -
455  *
456  * What happens when two signals race each other?  UML doesn't block
457  * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
458  * could arrive while a previous one is still setting up the
459  * thread_info.
460  *
461  * There are three cases -
462  *     The first interrupt on the stack - sets up the thread_info and
463  * handles the interrupt
464  *     A nested interrupt interrupting the copying of the thread_info -
465  * can't handle the interrupt, as the stack is in an unknown state
466  *     A nested interrupt not interrupting the copying of the
467  * thread_info - doesn't do any setup, just handles the interrupt
468  *
469  * The first job is to figure out whether we interrupted stack setup.
470  * This is done by xchging the signal mask with thread_info->pending.
471  * If the value that comes back is zero, then there is no setup in
472  * progress, and the interrupt can be handled.  If the value is
473  * non-zero, then there is stack setup in progress.  In order to have
474  * the interrupt handled, we leave our signal in the mask, and it will
475  * be handled by the upper handler after it has set up the stack.
476  *
477  * Next is to figure out whether we are the outer handler or a nested
478  * one.  As part of setting up the stack, thread_info->real_thread is
479  * set to non-NULL (and is reset to NULL on exit).  This is the
480  * nesting indicator.  If it is non-NULL, then the stack is already
481  * set up and the handler can run.
482  */
483 
484 static unsigned long pending_mask;
485 
486 unsigned long to_irq_stack(unsigned long *mask_out)
487 {
488 	struct thread_info *ti;
489 	unsigned long mask, old;
490 	int nested;
491 
492 	mask = xchg(&pending_mask, *mask_out);
493 	if (mask != 0) {
494 		/*
495 		 * If any interrupts come in at this point, we want to
496 		 * make sure that their bits aren't lost by our
497 		 * putting our bit in.  So, this loop accumulates bits
498 		 * until xchg returns the same value that we put in.
499 		 * When that happens, there were no new interrupts,
500 		 * and pending_mask contains a bit for each interrupt
501 		 * that came in.
502 		 */
503 		old = *mask_out;
504 		do {
505 			old |= mask;
506 			mask = xchg(&pending_mask, old);
507 		} while (mask != old);
508 		return 1;
509 	}
510 
511 	ti = current_thread_info();
512 	nested = (ti->real_thread != NULL);
513 	if (!nested) {
514 		struct task_struct *task;
515 		struct thread_info *tti;
516 
517 		task = cpu_tasks[ti->cpu].task;
518 		tti = task_thread_info(task);
519 
520 		*ti = *tti;
521 		ti->real_thread = tti;
522 		task->stack = ti;
523 	}
524 
525 	mask = xchg(&pending_mask, 0);
526 	*mask_out |= mask | nested;
527 	return 0;
528 }
529 
530 unsigned long from_irq_stack(int nested)
531 {
532 	struct thread_info *ti, *to;
533 	unsigned long mask;
534 
535 	ti = current_thread_info();
536 
537 	pending_mask = 1;
538 
539 	to = ti->real_thread;
540 	current->stack = to;
541 	ti->real_thread = NULL;
542 	*to = *ti;
543 
544 	mask = xchg(&pending_mask, 0);
545 	return mask & ~1;
546 }
547 
548