1 /* 2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 3 * Licensed under the GPL 4 */ 5 6 #include <unistd.h> 7 #include <stdlib.h> 8 #include <termios.h> 9 #include <pty.h> 10 #include <signal.h> 11 #include <fcntl.h> 12 #include <errno.h> 13 #include <string.h> 14 #include <sched.h> 15 #include <sys/socket.h> 16 #include <sys/poll.h> 17 #include "init.h" 18 #include "user.h" 19 #include "kern_util.h" 20 #include "sigio.h" 21 #include "os.h" 22 #include "um_malloc.h" 23 #include "init.h" 24 25 /* Protected by sigio_lock(), also used by sigio_cleanup, which is an 26 * exitcall. 27 */ 28 static int write_sigio_pid = -1; 29 static unsigned long write_sigio_stack; 30 31 /* These arrays are initialized before the sigio thread is started, and 32 * the descriptors closed after it is killed. So, it can't see them change. 33 * On the UML side, they are changed under the sigio_lock. 34 */ 35 #define SIGIO_FDS_INIT {-1, -1} 36 37 static int write_sigio_fds[2] = SIGIO_FDS_INIT; 38 static int sigio_private[2] = SIGIO_FDS_INIT; 39 40 struct pollfds { 41 struct pollfd *poll; 42 int size; 43 int used; 44 }; 45 46 /* Protected by sigio_lock(). Used by the sigio thread, but the UML thread 47 * synchronizes with it. 48 */ 49 static struct pollfds current_poll; 50 static struct pollfds next_poll; 51 static struct pollfds all_sigio_fds; 52 53 static int write_sigio_thread(void *unused) 54 { 55 struct pollfds *fds, tmp; 56 struct pollfd *p; 57 int i, n, respond_fd; 58 char c; 59 60 signal(SIGWINCH, SIG_IGN); 61 fds = ¤t_poll; 62 while(1){ 63 n = poll(fds->poll, fds->used, -1); 64 if(n < 0){ 65 if(errno == EINTR) continue; 66 printk("write_sigio_thread : poll returned %d, " 67 "errno = %d\n", n, errno); 68 } 69 for(i = 0; i < fds->used; i++){ 70 p = &fds->poll[i]; 71 if(p->revents == 0) continue; 72 if(p->fd == sigio_private[1]){ 73 CATCH_EINTR(n = read(sigio_private[1], &c, 74 sizeof(c))); 75 if(n != sizeof(c)) 76 printk("write_sigio_thread : " 77 "read on socket failed, " 78 "err = %d\n", errno); 79 tmp = current_poll; 80 current_poll = next_poll; 81 next_poll = tmp; 82 respond_fd = sigio_private[1]; 83 } 84 else { 85 respond_fd = write_sigio_fds[1]; 86 fds->used--; 87 memmove(&fds->poll[i], &fds->poll[i + 1], 88 (fds->used - i) * sizeof(*fds->poll)); 89 } 90 91 CATCH_EINTR(n = write(respond_fd, &c, sizeof(c))); 92 if(n != sizeof(c)) 93 printk("write_sigio_thread : write on socket " 94 "failed, err = %d\n", errno); 95 } 96 } 97 98 return 0; 99 } 100 101 static int need_poll(struct pollfds *polls, int n) 102 { 103 struct pollfd *new; 104 105 if(n <= polls->size) 106 return 0; 107 108 new = um_kmalloc_atomic(n * sizeof(struct pollfd)); 109 if(new == NULL){ 110 printk("need_poll : failed to allocate new pollfds\n"); 111 return -ENOMEM; 112 } 113 114 memcpy(new, polls->poll, polls->used * sizeof(struct pollfd)); 115 kfree(polls->poll); 116 117 polls->poll = new; 118 polls->size = n; 119 return 0; 120 } 121 122 /* Must be called with sigio_lock held, because it's needed by the marked 123 * critical section. 124 */ 125 static void update_thread(void) 126 { 127 unsigned long flags; 128 int n; 129 char c; 130 131 flags = set_signals(0); 132 n = write(sigio_private[0], &c, sizeof(c)); 133 if(n != sizeof(c)){ 134 printk("update_thread : write failed, err = %d\n", errno); 135 goto fail; 136 } 137 138 CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c))); 139 if(n != sizeof(c)){ 140 printk("update_thread : read failed, err = %d\n", errno); 141 goto fail; 142 } 143 144 set_signals(flags); 145 return; 146 fail: 147 /* Critical section start */ 148 if (write_sigio_pid != -1) { 149 os_kill_process(write_sigio_pid, 1); 150 free_stack(write_sigio_stack, 0); 151 } 152 write_sigio_pid = -1; 153 close(sigio_private[0]); 154 close(sigio_private[1]); 155 close(write_sigio_fds[0]); 156 close(write_sigio_fds[1]); 157 /* Critical section end */ 158 set_signals(flags); 159 } 160 161 int add_sigio_fd(int fd) 162 { 163 struct pollfd *p; 164 int err = 0, i, n; 165 166 sigio_lock(); 167 for(i = 0; i < all_sigio_fds.used; i++){ 168 if(all_sigio_fds.poll[i].fd == fd) 169 break; 170 } 171 if(i == all_sigio_fds.used) 172 goto out; 173 174 p = &all_sigio_fds.poll[i]; 175 176 for(i = 0; i < current_poll.used; i++){ 177 if(current_poll.poll[i].fd == fd) 178 goto out; 179 } 180 181 n = current_poll.used; 182 err = need_poll(&next_poll, n + 1); 183 if(err) 184 goto out; 185 186 memcpy(next_poll.poll, current_poll.poll, 187 current_poll.used * sizeof(struct pollfd)); 188 next_poll.poll[n] = *p; 189 next_poll.used = n + 1; 190 update_thread(); 191 out: 192 sigio_unlock(); 193 return err; 194 } 195 196 int ignore_sigio_fd(int fd) 197 { 198 struct pollfd *p; 199 int err = 0, i, n = 0; 200 201 /* This is called from exitcalls elsewhere in UML - if 202 * sigio_cleanup has already run, then update_thread will hang 203 * or fail because the thread is no longer running. 204 */ 205 if(write_sigio_pid == -1) 206 return -EIO; 207 208 sigio_lock(); 209 for(i = 0; i < current_poll.used; i++){ 210 if(current_poll.poll[i].fd == fd) break; 211 } 212 if(i == current_poll.used) 213 goto out; 214 215 err = need_poll(&next_poll, current_poll.used - 1); 216 if(err) 217 goto out; 218 219 for(i = 0; i < current_poll.used; i++){ 220 p = ¤t_poll.poll[i]; 221 if(p->fd != fd) 222 next_poll.poll[n++] = *p; 223 } 224 next_poll.used = current_poll.used - 1; 225 226 update_thread(); 227 out: 228 sigio_unlock(); 229 return err; 230 } 231 232 static struct pollfd *setup_initial_poll(int fd) 233 { 234 struct pollfd *p; 235 236 p = um_kmalloc(sizeof(struct pollfd)); 237 if (p == NULL) { 238 printk("setup_initial_poll : failed to allocate poll\n"); 239 return NULL; 240 } 241 *p = ((struct pollfd) { .fd = fd, 242 .events = POLLIN, 243 .revents = 0 }); 244 return p; 245 } 246 247 static void write_sigio_workaround(void) 248 { 249 struct pollfd *p; 250 int err; 251 int l_write_sigio_fds[2]; 252 int l_sigio_private[2]; 253 int l_write_sigio_pid; 254 255 /* We call this *tons* of times - and most ones we must just fail. */ 256 sigio_lock(); 257 l_write_sigio_pid = write_sigio_pid; 258 sigio_unlock(); 259 260 if (l_write_sigio_pid != -1) 261 return; 262 263 err = os_pipe(l_write_sigio_fds, 1, 1); 264 if(err < 0){ 265 printk("write_sigio_workaround - os_pipe 1 failed, " 266 "err = %d\n", -err); 267 return; 268 } 269 err = os_pipe(l_sigio_private, 1, 1); 270 if(err < 0){ 271 printk("write_sigio_workaround - os_pipe 2 failed, " 272 "err = %d\n", -err); 273 goto out_close1; 274 } 275 276 p = setup_initial_poll(l_sigio_private[1]); 277 if(!p) 278 goto out_close2; 279 280 sigio_lock(); 281 282 /* Did we race? Don't try to optimize this, please, it's not so likely 283 * to happen, and no more than once at the boot. */ 284 if(write_sigio_pid != -1) 285 goto out_free; 286 287 current_poll = ((struct pollfds) { .poll = p, 288 .used = 1, 289 .size = 1 }); 290 291 if (write_sigio_irq(l_write_sigio_fds[0])) 292 goto out_clear_poll; 293 294 memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds)); 295 memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private)); 296 297 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL, 298 CLONE_FILES | CLONE_VM, 299 &write_sigio_stack); 300 301 if (write_sigio_pid < 0) 302 goto out_clear; 303 304 sigio_unlock(); 305 return; 306 307 out_clear: 308 write_sigio_pid = -1; 309 write_sigio_fds[0] = -1; 310 write_sigio_fds[1] = -1; 311 sigio_private[0] = -1; 312 sigio_private[1] = -1; 313 out_clear_poll: 314 current_poll = ((struct pollfds) { .poll = NULL, 315 .size = 0, 316 .used = 0 }); 317 out_free: 318 sigio_unlock(); 319 kfree(p); 320 out_close2: 321 close(l_sigio_private[0]); 322 close(l_sigio_private[1]); 323 out_close1: 324 close(l_write_sigio_fds[0]); 325 close(l_write_sigio_fds[1]); 326 } 327 328 /* Changed during early boot */ 329 static int pty_output_sigio = 0; 330 static int pty_close_sigio = 0; 331 332 void maybe_sigio_broken(int fd, int read) 333 { 334 int err; 335 336 if(!isatty(fd)) 337 return; 338 339 if((read || pty_output_sigio) && (!read || pty_close_sigio)) 340 return; 341 342 write_sigio_workaround(); 343 344 sigio_lock(); 345 err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1); 346 if(err){ 347 printk("maybe_sigio_broken - failed to add pollfd for " 348 "descriptor %d\n", fd); 349 goto out; 350 } 351 352 all_sigio_fds.poll[all_sigio_fds.used++] = 353 ((struct pollfd) { .fd = fd, 354 .events = read ? POLLIN : POLLOUT, 355 .revents = 0 }); 356 out: 357 sigio_unlock(); 358 } 359 360 static void sigio_cleanup(void) 361 { 362 if (write_sigio_pid == -1) 363 return; 364 365 os_kill_process(write_sigio_pid, 1); 366 free_stack(write_sigio_stack, 0); 367 write_sigio_pid = -1; 368 } 369 370 __uml_exitcall(sigio_cleanup); 371 372 /* Used as a flag during SIGIO testing early in boot */ 373 static volatile int got_sigio = 0; 374 375 static void __init handler(int sig) 376 { 377 got_sigio = 1; 378 } 379 380 struct openpty_arg { 381 int master; 382 int slave; 383 int err; 384 }; 385 386 static void openpty_cb(void *arg) 387 { 388 struct openpty_arg *info = arg; 389 390 info->err = 0; 391 if(openpty(&info->master, &info->slave, NULL, NULL, NULL)) 392 info->err = -errno; 393 } 394 395 static int async_pty(int master, int slave) 396 { 397 int flags; 398 399 flags = fcntl(master, F_GETFL); 400 if(flags < 0) 401 return -errno; 402 403 if((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) || 404 (fcntl(master, F_SETOWN, os_getpid()) < 0)) 405 return -errno; 406 407 if((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0)) 408 return -errno; 409 410 return(0); 411 } 412 413 static void __init check_one_sigio(void (*proc)(int, int)) 414 { 415 struct sigaction old, new; 416 struct openpty_arg pty = { .master = -1, .slave = -1 }; 417 int master, slave, err; 418 419 initial_thread_cb(openpty_cb, &pty); 420 if(pty.err){ 421 printk("openpty failed, errno = %d\n", -pty.err); 422 return; 423 } 424 425 master = pty.master; 426 slave = pty.slave; 427 428 if((master == -1) || (slave == -1)){ 429 printk("openpty failed to allocate a pty\n"); 430 return; 431 } 432 433 /* Not now, but complain so we now where we failed. */ 434 err = raw(master); 435 if (err < 0) 436 panic("check_sigio : __raw failed, errno = %d\n", -err); 437 438 err = async_pty(master, slave); 439 if(err < 0) 440 panic("tty_fds : sigio_async failed, err = %d\n", -err); 441 442 if(sigaction(SIGIO, NULL, &old) < 0) 443 panic("check_sigio : sigaction 1 failed, errno = %d\n", errno); 444 new = old; 445 new.sa_handler = handler; 446 if(sigaction(SIGIO, &new, NULL) < 0) 447 panic("check_sigio : sigaction 2 failed, errno = %d\n", errno); 448 449 got_sigio = 0; 450 (*proc)(master, slave); 451 452 close(master); 453 close(slave); 454 455 if(sigaction(SIGIO, &old, NULL) < 0) 456 panic("check_sigio : sigaction 3 failed, errno = %d\n", errno); 457 } 458 459 static void tty_output(int master, int slave) 460 { 461 int n; 462 char buf[512]; 463 464 printk("Checking that host ptys support output SIGIO..."); 465 466 memset(buf, 0, sizeof(buf)); 467 468 while(write(master, buf, sizeof(buf)) > 0) ; 469 if(errno != EAGAIN) 470 panic("tty_output : write failed, errno = %d\n", errno); 471 while(((n = read(slave, buf, sizeof(buf))) > 0) && !got_sigio) ; 472 473 if(got_sigio){ 474 printk("Yes\n"); 475 pty_output_sigio = 1; 476 } 477 else if(n == -EAGAIN) 478 printk("No, enabling workaround\n"); 479 else panic("tty_output : read failed, err = %d\n", n); 480 } 481 482 static void tty_close(int master, int slave) 483 { 484 printk("Checking that host ptys support SIGIO on close..."); 485 486 close(slave); 487 if(got_sigio){ 488 printk("Yes\n"); 489 pty_close_sigio = 1; 490 } 491 else printk("No, enabling workaround\n"); 492 } 493 494 void __init check_sigio(void) 495 { 496 if((os_access("/dev/ptmx", OS_ACC_R_OK) < 0) && 497 (os_access("/dev/ptyp0", OS_ACC_R_OK) < 0)){ 498 printk("No pseudo-terminals available - skipping pty SIGIO " 499 "check\n"); 500 return; 501 } 502 check_one_sigio(tty_output); 503 check_one_sigio(tty_close); 504 } 505 506 /* Here because it only does the SIGIO testing for now */ 507 void __init os_check_bugs(void) 508 { 509 check_sigio(); 510 } 511