xref: /openbmc/linux/arch/um/os-Linux/sigio.c (revision f42b3800)
1 /*
2  * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Licensed under the GPL
4  */
5 
6 #include <unistd.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <poll.h>
10 #include <pty.h>
11 #include <sched.h>
12 #include <signal.h>
13 #include <string.h>
14 #include "kern_constants.h"
15 #include "kern_util.h"
16 #include "init.h"
17 #include "os.h"
18 #include "sigio.h"
19 #include "um_malloc.h"
20 #include "user.h"
21 
22 /*
23  * Protected by sigio_lock(), also used by sigio_cleanup, which is an
24  * exitcall.
25  */
26 static int write_sigio_pid = -1;
27 static unsigned long write_sigio_stack;
28 
29 /*
30  * These arrays are initialized before the sigio thread is started, and
31  * the descriptors closed after it is killed.  So, it can't see them change.
32  * On the UML side, they are changed under the sigio_lock.
33  */
34 #define SIGIO_FDS_INIT {-1, -1}
35 
36 static int write_sigio_fds[2] = SIGIO_FDS_INIT;
37 static int sigio_private[2] = SIGIO_FDS_INIT;
38 
39 struct pollfds {
40 	struct pollfd *poll;
41 	int size;
42 	int used;
43 };
44 
45 /*
46  * Protected by sigio_lock().  Used by the sigio thread, but the UML thread
47  * synchronizes with it.
48  */
49 static struct pollfds current_poll;
50 static struct pollfds next_poll;
51 static struct pollfds all_sigio_fds;
52 
53 static int write_sigio_thread(void *unused)
54 {
55 	struct pollfds *fds, tmp;
56 	struct pollfd *p;
57 	int i, n, respond_fd;
58 	char c;
59 
60 	signal(SIGWINCH, SIG_IGN);
61 	fds = &current_poll;
62 	while (1) {
63 		n = poll(fds->poll, fds->used, -1);
64 		if (n < 0) {
65 			if (errno == EINTR)
66 				continue;
67 			printk(UM_KERN_ERR "write_sigio_thread : poll returned "
68 			       "%d, errno = %d\n", n, errno);
69 		}
70 		for (i = 0; i < fds->used; i++) {
71 			p = &fds->poll[i];
72 			if (p->revents == 0)
73 				continue;
74 			if (p->fd == sigio_private[1]) {
75 				CATCH_EINTR(n = read(sigio_private[1], &c,
76 						     sizeof(c)));
77 				if (n != sizeof(c))
78 					printk(UM_KERN_ERR
79 					       "write_sigio_thread : "
80 					       "read on socket failed, "
81 					       "err = %d\n", errno);
82 				tmp = current_poll;
83 				current_poll = next_poll;
84 				next_poll = tmp;
85 				respond_fd = sigio_private[1];
86 			}
87 			else {
88 				respond_fd = write_sigio_fds[1];
89 				fds->used--;
90 				memmove(&fds->poll[i], &fds->poll[i + 1],
91 					(fds->used - i) * sizeof(*fds->poll));
92 			}
93 
94 			CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
95 			if (n != sizeof(c))
96 				printk(UM_KERN_ERR "write_sigio_thread : "
97 				       "write on socket failed, err = %d\n",
98 				       errno);
99 		}
100 	}
101 
102 	return 0;
103 }
104 
105 static int need_poll(struct pollfds *polls, int n)
106 {
107 	struct pollfd *new;
108 
109 	if (n <= polls->size)
110 		return 0;
111 
112 	new = kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
113 	if (new == NULL) {
114 		printk(UM_KERN_ERR "need_poll : failed to allocate new "
115 		       "pollfds\n");
116 		return -ENOMEM;
117 	}
118 
119 	memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
120 	kfree(polls->poll);
121 
122 	polls->poll = new;
123 	polls->size = n;
124 	return 0;
125 }
126 
127 /*
128  * Must be called with sigio_lock held, because it's needed by the marked
129  * critical section.
130  */
131 static void update_thread(void)
132 {
133 	unsigned long flags;
134 	int n;
135 	char c;
136 
137 	flags = set_signals(0);
138 	CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
139 	if (n != sizeof(c)) {
140 		printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
141 		       errno);
142 		goto fail;
143 	}
144 
145 	CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
146 	if (n != sizeof(c)) {
147 		printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
148 		       errno);
149 		goto fail;
150 	}
151 
152 	set_signals(flags);
153 	return;
154  fail:
155 	/* Critical section start */
156 	if (write_sigio_pid != -1) {
157 		os_kill_process(write_sigio_pid, 1);
158 		free_stack(write_sigio_stack, 0);
159 	}
160 	write_sigio_pid = -1;
161 	close(sigio_private[0]);
162 	close(sigio_private[1]);
163 	close(write_sigio_fds[0]);
164 	close(write_sigio_fds[1]);
165 	/* Critical section end */
166 	set_signals(flags);
167 }
168 
169 int add_sigio_fd(int fd)
170 {
171 	struct pollfd *p;
172 	int err = 0, i, n;
173 
174 	sigio_lock();
175 	for (i = 0; i < all_sigio_fds.used; i++) {
176 		if (all_sigio_fds.poll[i].fd == fd)
177 			break;
178 	}
179 	if (i == all_sigio_fds.used)
180 		goto out;
181 
182 	p = &all_sigio_fds.poll[i];
183 
184 	for (i = 0; i < current_poll.used; i++) {
185 		if (current_poll.poll[i].fd == fd)
186 			goto out;
187 	}
188 
189 	n = current_poll.used;
190 	err = need_poll(&next_poll, n + 1);
191 	if (err)
192 		goto out;
193 
194 	memcpy(next_poll.poll, current_poll.poll,
195 	       current_poll.used * sizeof(struct pollfd));
196 	next_poll.poll[n] = *p;
197 	next_poll.used = n + 1;
198 	update_thread();
199  out:
200 	sigio_unlock();
201 	return err;
202 }
203 
204 int ignore_sigio_fd(int fd)
205 {
206 	struct pollfd *p;
207 	int err = 0, i, n = 0;
208 
209 	/*
210 	 * This is called from exitcalls elsewhere in UML - if
211 	 * sigio_cleanup has already run, then update_thread will hang
212 	 * or fail because the thread is no longer running.
213 	 */
214 	if (write_sigio_pid == -1)
215 		return -EIO;
216 
217 	sigio_lock();
218 	for (i = 0; i < current_poll.used; i++) {
219 		if (current_poll.poll[i].fd == fd)
220 			break;
221 	}
222 	if (i == current_poll.used)
223 		goto out;
224 
225 	err = need_poll(&next_poll, current_poll.used - 1);
226 	if (err)
227 		goto out;
228 
229 	for (i = 0; i < current_poll.used; i++) {
230 		p = &current_poll.poll[i];
231 		if (p->fd != fd)
232 			next_poll.poll[n++] = *p;
233 	}
234 	next_poll.used = current_poll.used - 1;
235 
236 	update_thread();
237  out:
238 	sigio_unlock();
239 	return err;
240 }
241 
242 static struct pollfd *setup_initial_poll(int fd)
243 {
244 	struct pollfd *p;
245 
246 	p = kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
247 	if (p == NULL) {
248 		printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
249 		       "poll\n");
250 		return NULL;
251 	}
252 	*p = ((struct pollfd) { .fd		= fd,
253 				.events 	= POLLIN,
254 				.revents 	= 0 });
255 	return p;
256 }
257 
258 static void write_sigio_workaround(void)
259 {
260 	struct pollfd *p;
261 	int err;
262 	int l_write_sigio_fds[2];
263 	int l_sigio_private[2];
264 	int l_write_sigio_pid;
265 
266 	/* We call this *tons* of times - and most ones we must just fail. */
267 	sigio_lock();
268 	l_write_sigio_pid = write_sigio_pid;
269 	sigio_unlock();
270 
271 	if (l_write_sigio_pid != -1)
272 		return;
273 
274 	err = os_pipe(l_write_sigio_fds, 1, 1);
275 	if (err < 0) {
276 		printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
277 		       "err = %d\n", -err);
278 		return;
279 	}
280 	err = os_pipe(l_sigio_private, 1, 1);
281 	if (err < 0) {
282 		printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
283 		       "err = %d\n", -err);
284 		goto out_close1;
285 	}
286 
287 	p = setup_initial_poll(l_sigio_private[1]);
288 	if (!p)
289 		goto out_close2;
290 
291 	sigio_lock();
292 
293 	/*
294 	 * Did we race? Don't try to optimize this, please, it's not so likely
295 	 * to happen, and no more than once at the boot.
296 	 */
297 	if (write_sigio_pid != -1)
298 		goto out_free;
299 
300 	current_poll = ((struct pollfds) { .poll 	= p,
301 					   .used 	= 1,
302 					   .size 	= 1 });
303 
304 	if (write_sigio_irq(l_write_sigio_fds[0]))
305 		goto out_clear_poll;
306 
307 	memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
308 	memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
309 
310 	write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
311 					    CLONE_FILES | CLONE_VM,
312 					    &write_sigio_stack);
313 
314 	if (write_sigio_pid < 0)
315 		goto out_clear;
316 
317 	sigio_unlock();
318 	return;
319 
320 out_clear:
321 	write_sigio_pid = -1;
322 	write_sigio_fds[0] = -1;
323 	write_sigio_fds[1] = -1;
324 	sigio_private[0] = -1;
325 	sigio_private[1] = -1;
326 out_clear_poll:
327 	current_poll = ((struct pollfds) { .poll	= NULL,
328 					   .size	= 0,
329 					   .used	= 0 });
330 out_free:
331 	sigio_unlock();
332 	kfree(p);
333 out_close2:
334 	close(l_sigio_private[0]);
335 	close(l_sigio_private[1]);
336 out_close1:
337 	close(l_write_sigio_fds[0]);
338 	close(l_write_sigio_fds[1]);
339 }
340 
341 /* Changed during early boot */
342 static int pty_output_sigio = 0;
343 static int pty_close_sigio = 0;
344 
345 void maybe_sigio_broken(int fd, int read)
346 {
347 	int err;
348 
349 	if (!isatty(fd))
350 		return;
351 
352 	if ((read || pty_output_sigio) && (!read || pty_close_sigio))
353 		return;
354 
355 	write_sigio_workaround();
356 
357 	sigio_lock();
358 	err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
359 	if (err) {
360 		printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
361 		       "for descriptor %d\n", fd);
362 		goto out;
363 	}
364 
365 	all_sigio_fds.poll[all_sigio_fds.used++] =
366 		((struct pollfd) { .fd  	= fd,
367 				   .events 	= read ? POLLIN : POLLOUT,
368 				   .revents 	= 0 });
369 out:
370 	sigio_unlock();
371 }
372 
373 static void sigio_cleanup(void)
374 {
375 	if (write_sigio_pid == -1)
376 		return;
377 
378 	os_kill_process(write_sigio_pid, 1);
379 	free_stack(write_sigio_stack, 0);
380 	write_sigio_pid = -1;
381 }
382 
383 __uml_exitcall(sigio_cleanup);
384 
385 /* Used as a flag during SIGIO testing early in boot */
386 static volatile int got_sigio = 0;
387 
388 static void __init handler(int sig)
389 {
390 	got_sigio = 1;
391 }
392 
393 struct openpty_arg {
394 	int master;
395 	int slave;
396 	int err;
397 };
398 
399 static void openpty_cb(void *arg)
400 {
401 	struct openpty_arg *info = arg;
402 
403 	info->err = 0;
404 	if (openpty(&info->master, &info->slave, NULL, NULL, NULL))
405 		info->err = -errno;
406 }
407 
408 static int async_pty(int master, int slave)
409 {
410 	int flags;
411 
412 	flags = fcntl(master, F_GETFL);
413 	if (flags < 0)
414 		return -errno;
415 
416 	if ((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
417 	    (fcntl(master, F_SETOWN, os_getpid()) < 0))
418 		return -errno;
419 
420 	if ((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
421 		return -errno;
422 
423 	return 0;
424 }
425 
426 static void __init check_one_sigio(void (*proc)(int, int))
427 {
428 	struct sigaction old, new;
429 	struct openpty_arg pty = { .master = -1, .slave = -1 };
430 	int master, slave, err;
431 
432 	initial_thread_cb(openpty_cb, &pty);
433 	if (pty.err) {
434 		printk(UM_KERN_ERR "check_one_sigio failed, errno = %d\n",
435 		       -pty.err);
436 		return;
437 	}
438 
439 	master = pty.master;
440 	slave = pty.slave;
441 
442 	if ((master == -1) || (slave == -1)) {
443 		printk(UM_KERN_ERR "check_one_sigio failed to allocate a "
444 		       "pty\n");
445 		return;
446 	}
447 
448 	/* Not now, but complain so we now where we failed. */
449 	err = raw(master);
450 	if (err < 0) {
451 		printk(UM_KERN_ERR "check_one_sigio : raw failed, errno = %d\n",
452 		      -err);
453 		return;
454 	}
455 
456 	err = async_pty(master, slave);
457 	if (err < 0) {
458 		printk(UM_KERN_ERR "check_one_sigio : sigio_async failed, "
459 		       "err = %d\n", -err);
460 		return;
461 	}
462 
463 	if (sigaction(SIGIO, NULL, &old) < 0) {
464 		printk(UM_KERN_ERR "check_one_sigio : sigaction 1 failed, "
465 		       "errno = %d\n", errno);
466 		return;
467 	}
468 
469 	new = old;
470 	new.sa_handler = handler;
471 	if (sigaction(SIGIO, &new, NULL) < 0) {
472 		printk(UM_KERN_ERR "check_one_sigio : sigaction 2 failed, "
473 		       "errno = %d\n", errno);
474 		return;
475 	}
476 
477 	got_sigio = 0;
478 	(*proc)(master, slave);
479 
480 	close(master);
481 	close(slave);
482 
483 	if (sigaction(SIGIO, &old, NULL) < 0)
484 		printk(UM_KERN_ERR "check_one_sigio : sigaction 3 failed, "
485 		       "errno = %d\n", errno);
486 }
487 
488 static void tty_output(int master, int slave)
489 {
490 	int n;
491 	char buf[512];
492 
493 	printk(UM_KERN_INFO "Checking that host ptys support output SIGIO...");
494 
495 	memset(buf, 0, sizeof(buf));
496 
497 	while (write(master, buf, sizeof(buf)) > 0) ;
498 	if (errno != EAGAIN)
499 		printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n",
500 		       errno);
501 	while (((n = read(slave, buf, sizeof(buf))) > 0) && !got_sigio)
502 		;
503 
504 	if (got_sigio) {
505 		printk(UM_KERN_CONT "Yes\n");
506 		pty_output_sigio = 1;
507 	} else if (n == -EAGAIN)
508 		printk(UM_KERN_CONT "No, enabling workaround\n");
509 	else
510 		printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n);
511 }
512 
513 static void tty_close(int master, int slave)
514 {
515 	printk(UM_KERN_INFO "Checking that host ptys support SIGIO on "
516 	       "close...");
517 
518 	close(slave);
519 	if (got_sigio) {
520 		printk(UM_KERN_CONT "Yes\n");
521 		pty_close_sigio = 1;
522 	} else
523 		printk(UM_KERN_CONT "No, enabling workaround\n");
524 }
525 
526 void __init check_sigio(void)
527 {
528 	if ((access("/dev/ptmx", R_OK) < 0) &&
529 	    (access("/dev/ptyp0", R_OK) < 0)) {
530 		printk(UM_KERN_WARNING "No pseudo-terminals available - "
531 		       "skipping pty SIGIO check\n");
532 		return;
533 	}
534 	check_one_sigio(tty_output);
535 	check_one_sigio(tty_close);
536 }
537 
538 /* Here because it only does the SIGIO testing for now */
539 void __init os_check_bugs(void)
540 {
541 	check_sigio();
542 }
543