1 /* 2 * linux/fs/fcntl.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/syscalls.h> 8 #include <linux/init.h> 9 #include <linux/mm.h> 10 #include <linux/fs.h> 11 #include <linux/file.h> 12 #include <linux/fdtable.h> 13 #include <linux/capability.h> 14 #include <linux/dnotify.h> 15 #include <linux/slab.h> 16 #include <linux/module.h> 17 #include <linux/security.h> 18 #include <linux/ptrace.h> 19 #include <linux/signal.h> 20 #include <linux/rcupdate.h> 21 #include <linux/pid_namespace.h> 22 23 #include <asm/poll.h> 24 #include <asm/siginfo.h> 25 #include <asm/uaccess.h> 26 27 void set_close_on_exec(unsigned int fd, int flag) 28 { 29 struct files_struct *files = current->files; 30 struct fdtable *fdt; 31 spin_lock(&files->file_lock); 32 fdt = files_fdtable(files); 33 if (flag) 34 FD_SET(fd, fdt->close_on_exec); 35 else 36 FD_CLR(fd, fdt->close_on_exec); 37 spin_unlock(&files->file_lock); 38 } 39 40 static int get_close_on_exec(unsigned int fd) 41 { 42 struct files_struct *files = current->files; 43 struct fdtable *fdt; 44 int res; 45 rcu_read_lock(); 46 fdt = files_fdtable(files); 47 res = FD_ISSET(fd, fdt->close_on_exec); 48 rcu_read_unlock(); 49 return res; 50 } 51 52 /* 53 * locate_fd finds a free file descriptor in the open_fds fdset, 54 * expanding the fd arrays if necessary. Must be called with the 55 * file_lock held for write. 56 */ 57 58 static int locate_fd(unsigned int orig_start, int cloexec) 59 { 60 struct files_struct *files = current->files; 61 unsigned int newfd; 62 unsigned int start; 63 int error; 64 struct fdtable *fdt; 65 66 spin_lock(&files->file_lock); 67 68 error = -EINVAL; 69 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) 70 goto out; 71 72 repeat: 73 fdt = files_fdtable(files); 74 /* 75 * Someone might have closed fd's in the range 76 * orig_start..fdt->next_fd 77 */ 78 start = orig_start; 79 if (start < files->next_fd) 80 start = files->next_fd; 81 82 newfd = start; 83 if (start < fdt->max_fds) 84 newfd = find_next_zero_bit(fdt->open_fds->fds_bits, 85 fdt->max_fds, start); 86 87 error = -EMFILE; 88 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) 89 goto out; 90 91 error = expand_files(files, newfd); 92 if (error < 0) 93 goto out; 94 95 /* 96 * If we needed to expand the fs array we 97 * might have blocked - try again. 98 */ 99 if (error) 100 goto repeat; 101 102 if (start <= files->next_fd) 103 files->next_fd = newfd + 1; 104 105 FD_SET(newfd, fdt->open_fds); 106 if (cloexec) 107 FD_SET(newfd, fdt->close_on_exec); 108 else 109 FD_CLR(newfd, fdt->close_on_exec); 110 error = newfd; 111 112 out: 113 spin_unlock(&files->file_lock); 114 return error; 115 } 116 117 static int dupfd(struct file *file, unsigned int start, int cloexec) 118 { 119 int fd = locate_fd(start, cloexec); 120 if (fd >= 0) 121 fd_install(fd, file); 122 else 123 fput(file); 124 125 return fd; 126 } 127 128 asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd) 129 { 130 int err = -EBADF; 131 struct file * file, *tofree; 132 struct files_struct * files = current->files; 133 struct fdtable *fdt; 134 135 spin_lock(&files->file_lock); 136 if (!(file = fcheck(oldfd))) 137 goto out_unlock; 138 err = newfd; 139 if (newfd == oldfd) 140 goto out_unlock; 141 err = -EBADF; 142 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) 143 goto out_unlock; 144 get_file(file); /* We are now finished with oldfd */ 145 146 err = expand_files(files, newfd); 147 if (err < 0) 148 goto out_fput; 149 150 /* To avoid races with open() and dup(), we will mark the fd as 151 * in-use in the open-file bitmap throughout the entire dup2() 152 * process. This is quite safe: do_close() uses the fd array 153 * entry, not the bitmap, to decide what work needs to be 154 * done. --sct */ 155 /* Doesn't work. open() might be there first. --AV */ 156 157 /* Yes. It's a race. In user space. Nothing sane to do */ 158 err = -EBUSY; 159 fdt = files_fdtable(files); 160 tofree = fdt->fd[newfd]; 161 if (!tofree && FD_ISSET(newfd, fdt->open_fds)) 162 goto out_fput; 163 164 rcu_assign_pointer(fdt->fd[newfd], file); 165 FD_SET(newfd, fdt->open_fds); 166 FD_CLR(newfd, fdt->close_on_exec); 167 spin_unlock(&files->file_lock); 168 169 if (tofree) 170 filp_close(tofree, files); 171 err = newfd; 172 out: 173 return err; 174 out_unlock: 175 spin_unlock(&files->file_lock); 176 goto out; 177 178 out_fput: 179 spin_unlock(&files->file_lock); 180 fput(file); 181 goto out; 182 } 183 184 asmlinkage long sys_dup(unsigned int fildes) 185 { 186 int ret = -EBADF; 187 struct file * file = fget(fildes); 188 189 if (file) 190 ret = dupfd(file, 0, 0); 191 return ret; 192 } 193 194 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME) 195 196 static int setfl(int fd, struct file * filp, unsigned long arg) 197 { 198 struct inode * inode = filp->f_path.dentry->d_inode; 199 int error = 0; 200 201 /* 202 * O_APPEND cannot be cleared if the file is marked as append-only 203 * and the file is open for write. 204 */ 205 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode)) 206 return -EPERM; 207 208 /* O_NOATIME can only be set by the owner or superuser */ 209 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME)) 210 if (!is_owner_or_cap(inode)) 211 return -EPERM; 212 213 /* required for strict SunOS emulation */ 214 if (O_NONBLOCK != O_NDELAY) 215 if (arg & O_NDELAY) 216 arg |= O_NONBLOCK; 217 218 if (arg & O_DIRECT) { 219 if (!filp->f_mapping || !filp->f_mapping->a_ops || 220 !filp->f_mapping->a_ops->direct_IO) 221 return -EINVAL; 222 } 223 224 if (filp->f_op && filp->f_op->check_flags) 225 error = filp->f_op->check_flags(arg); 226 if (error) 227 return error; 228 229 if ((arg ^ filp->f_flags) & FASYNC) { 230 if (filp->f_op && filp->f_op->fasync) { 231 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0); 232 if (error < 0) 233 goto out; 234 } 235 } 236 237 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK); 238 out: 239 return error; 240 } 241 242 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, 243 uid_t uid, uid_t euid, int force) 244 { 245 write_lock_irq(&filp->f_owner.lock); 246 if (force || !filp->f_owner.pid) { 247 put_pid(filp->f_owner.pid); 248 filp->f_owner.pid = get_pid(pid); 249 filp->f_owner.pid_type = type; 250 filp->f_owner.uid = uid; 251 filp->f_owner.euid = euid; 252 } 253 write_unlock_irq(&filp->f_owner.lock); 254 } 255 256 int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, 257 int force) 258 { 259 int err; 260 261 err = security_file_set_fowner(filp); 262 if (err) 263 return err; 264 265 f_modown(filp, pid, type, current->uid, current->euid, force); 266 return 0; 267 } 268 EXPORT_SYMBOL(__f_setown); 269 270 int f_setown(struct file *filp, unsigned long arg, int force) 271 { 272 enum pid_type type; 273 struct pid *pid; 274 int who = arg; 275 int result; 276 type = PIDTYPE_PID; 277 if (who < 0) { 278 type = PIDTYPE_PGID; 279 who = -who; 280 } 281 rcu_read_lock(); 282 pid = find_vpid(who); 283 result = __f_setown(filp, pid, type, force); 284 rcu_read_unlock(); 285 return result; 286 } 287 EXPORT_SYMBOL(f_setown); 288 289 void f_delown(struct file *filp) 290 { 291 f_modown(filp, NULL, PIDTYPE_PID, 0, 0, 1); 292 } 293 294 pid_t f_getown(struct file *filp) 295 { 296 pid_t pid; 297 read_lock(&filp->f_owner.lock); 298 pid = pid_vnr(filp->f_owner.pid); 299 if (filp->f_owner.pid_type == PIDTYPE_PGID) 300 pid = -pid; 301 read_unlock(&filp->f_owner.lock); 302 return pid; 303 } 304 305 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, 306 struct file *filp) 307 { 308 long err = -EINVAL; 309 310 switch (cmd) { 311 case F_DUPFD: 312 case F_DUPFD_CLOEXEC: 313 get_file(filp); 314 err = dupfd(filp, arg, cmd == F_DUPFD_CLOEXEC); 315 break; 316 case F_GETFD: 317 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0; 318 break; 319 case F_SETFD: 320 err = 0; 321 set_close_on_exec(fd, arg & FD_CLOEXEC); 322 break; 323 case F_GETFL: 324 err = filp->f_flags; 325 break; 326 case F_SETFL: 327 err = setfl(fd, filp, arg); 328 break; 329 case F_GETLK: 330 err = fcntl_getlk(filp, (struct flock __user *) arg); 331 break; 332 case F_SETLK: 333 case F_SETLKW: 334 err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg); 335 break; 336 case F_GETOWN: 337 /* 338 * XXX If f_owner is a process group, the 339 * negative return value will get converted 340 * into an error. Oops. If we keep the 341 * current syscall conventions, the only way 342 * to fix this will be in libc. 343 */ 344 err = f_getown(filp); 345 force_successful_syscall_return(); 346 break; 347 case F_SETOWN: 348 err = f_setown(filp, arg, 1); 349 break; 350 case F_GETSIG: 351 err = filp->f_owner.signum; 352 break; 353 case F_SETSIG: 354 /* arg == 0 restores default behaviour. */ 355 if (!valid_signal(arg)) { 356 break; 357 } 358 err = 0; 359 filp->f_owner.signum = arg; 360 break; 361 case F_GETLEASE: 362 err = fcntl_getlease(filp); 363 break; 364 case F_SETLEASE: 365 err = fcntl_setlease(fd, filp, arg); 366 break; 367 case F_NOTIFY: 368 err = fcntl_dirnotify(fd, filp, arg); 369 break; 370 default: 371 break; 372 } 373 return err; 374 } 375 376 asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) 377 { 378 struct file *filp; 379 long err = -EBADF; 380 381 filp = fget(fd); 382 if (!filp) 383 goto out; 384 385 err = security_file_fcntl(filp, cmd, arg); 386 if (err) { 387 fput(filp); 388 return err; 389 } 390 391 err = do_fcntl(fd, cmd, arg, filp); 392 393 fput(filp); 394 out: 395 return err; 396 } 397 398 #if BITS_PER_LONG == 32 399 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg) 400 { 401 struct file * filp; 402 long err; 403 404 err = -EBADF; 405 filp = fget(fd); 406 if (!filp) 407 goto out; 408 409 err = security_file_fcntl(filp, cmd, arg); 410 if (err) { 411 fput(filp); 412 return err; 413 } 414 err = -EBADF; 415 416 switch (cmd) { 417 case F_GETLK64: 418 err = fcntl_getlk64(filp, (struct flock64 __user *) arg); 419 break; 420 case F_SETLK64: 421 case F_SETLKW64: 422 err = fcntl_setlk64(fd, filp, cmd, 423 (struct flock64 __user *) arg); 424 break; 425 default: 426 err = do_fcntl(fd, cmd, arg, filp); 427 break; 428 } 429 fput(filp); 430 out: 431 return err; 432 } 433 #endif 434 435 /* Table to convert sigio signal codes into poll band bitmaps */ 436 437 static const long band_table[NSIGPOLL] = { 438 POLLIN | POLLRDNORM, /* POLL_IN */ 439 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ 440 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ 441 POLLERR, /* POLL_ERR */ 442 POLLPRI | POLLRDBAND, /* POLL_PRI */ 443 POLLHUP | POLLERR /* POLL_HUP */ 444 }; 445 446 static inline int sigio_perm(struct task_struct *p, 447 struct fown_struct *fown, int sig) 448 { 449 return (((fown->euid == 0) || 450 (fown->euid == p->suid) || (fown->euid == p->uid) || 451 (fown->uid == p->suid) || (fown->uid == p->uid)) && 452 !security_file_send_sigiotask(p, fown, sig)); 453 } 454 455 static void send_sigio_to_task(struct task_struct *p, 456 struct fown_struct *fown, 457 int fd, 458 int reason) 459 { 460 if (!sigio_perm(p, fown, fown->signum)) 461 return; 462 463 switch (fown->signum) { 464 siginfo_t si; 465 default: 466 /* Queue a rt signal with the appropriate fd as its 467 value. We use SI_SIGIO as the source, not 468 SI_KERNEL, since kernel signals always get 469 delivered even if we can't queue. Failure to 470 queue in this case _should_ be reported; we fall 471 back to SIGIO in that case. --sct */ 472 si.si_signo = fown->signum; 473 si.si_errno = 0; 474 si.si_code = reason; 475 /* Make sure we are called with one of the POLL_* 476 reasons, otherwise we could leak kernel stack into 477 userspace. */ 478 BUG_ON((reason & __SI_MASK) != __SI_POLL); 479 if (reason - POLL_IN >= NSIGPOLL) 480 si.si_band = ~0L; 481 else 482 si.si_band = band_table[reason - POLL_IN]; 483 si.si_fd = fd; 484 if (!group_send_sig_info(fown->signum, &si, p)) 485 break; 486 /* fall-through: fall back on the old plain SIGIO signal */ 487 case 0: 488 group_send_sig_info(SIGIO, SEND_SIG_PRIV, p); 489 } 490 } 491 492 void send_sigio(struct fown_struct *fown, int fd, int band) 493 { 494 struct task_struct *p; 495 enum pid_type type; 496 struct pid *pid; 497 498 read_lock(&fown->lock); 499 type = fown->pid_type; 500 pid = fown->pid; 501 if (!pid) 502 goto out_unlock_fown; 503 504 read_lock(&tasklist_lock); 505 do_each_pid_task(pid, type, p) { 506 send_sigio_to_task(p, fown, fd, band); 507 } while_each_pid_task(pid, type, p); 508 read_unlock(&tasklist_lock); 509 out_unlock_fown: 510 read_unlock(&fown->lock); 511 } 512 513 static void send_sigurg_to_task(struct task_struct *p, 514 struct fown_struct *fown) 515 { 516 if (sigio_perm(p, fown, SIGURG)) 517 group_send_sig_info(SIGURG, SEND_SIG_PRIV, p); 518 } 519 520 int send_sigurg(struct fown_struct *fown) 521 { 522 struct task_struct *p; 523 enum pid_type type; 524 struct pid *pid; 525 int ret = 0; 526 527 read_lock(&fown->lock); 528 type = fown->pid_type; 529 pid = fown->pid; 530 if (!pid) 531 goto out_unlock_fown; 532 533 ret = 1; 534 535 read_lock(&tasklist_lock); 536 do_each_pid_task(pid, type, p) { 537 send_sigurg_to_task(p, fown); 538 } while_each_pid_task(pid, type, p); 539 read_unlock(&tasklist_lock); 540 out_unlock_fown: 541 read_unlock(&fown->lock); 542 return ret; 543 } 544 545 static DEFINE_RWLOCK(fasync_lock); 546 static struct kmem_cache *fasync_cache __read_mostly; 547 548 /* 549 * fasync_helper() is used by some character device drivers (mainly mice) 550 * to set up the fasync queue. It returns negative on error, 0 if it did 551 * no changes and positive if it added/deleted the entry. 552 */ 553 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) 554 { 555 struct fasync_struct *fa, **fp; 556 struct fasync_struct *new = NULL; 557 int result = 0; 558 559 if (on) { 560 new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); 561 if (!new) 562 return -ENOMEM; 563 } 564 write_lock_irq(&fasync_lock); 565 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { 566 if (fa->fa_file == filp) { 567 if(on) { 568 fa->fa_fd = fd; 569 kmem_cache_free(fasync_cache, new); 570 } else { 571 *fp = fa->fa_next; 572 kmem_cache_free(fasync_cache, fa); 573 result = 1; 574 } 575 goto out; 576 } 577 } 578 579 if (on) { 580 new->magic = FASYNC_MAGIC; 581 new->fa_file = filp; 582 new->fa_fd = fd; 583 new->fa_next = *fapp; 584 *fapp = new; 585 result = 1; 586 } 587 out: 588 write_unlock_irq(&fasync_lock); 589 return result; 590 } 591 592 EXPORT_SYMBOL(fasync_helper); 593 594 void __kill_fasync(struct fasync_struct *fa, int sig, int band) 595 { 596 while (fa) { 597 struct fown_struct * fown; 598 if (fa->magic != FASYNC_MAGIC) { 599 printk(KERN_ERR "kill_fasync: bad magic number in " 600 "fasync_struct!\n"); 601 return; 602 } 603 fown = &fa->fa_file->f_owner; 604 /* Don't send SIGURG to processes which have not set a 605 queued signum: SIGURG has its own default signalling 606 mechanism. */ 607 if (!(sig == SIGURG && fown->signum == 0)) 608 send_sigio(fown, fa->fa_fd, band); 609 fa = fa->fa_next; 610 } 611 } 612 613 EXPORT_SYMBOL(__kill_fasync); 614 615 void kill_fasync(struct fasync_struct **fp, int sig, int band) 616 { 617 /* First a quick test without locking: usually 618 * the list is empty. 619 */ 620 if (*fp) { 621 read_lock(&fasync_lock); 622 /* reread *fp after obtaining the lock */ 623 __kill_fasync(*fp, sig, band); 624 read_unlock(&fasync_lock); 625 } 626 } 627 EXPORT_SYMBOL(kill_fasync); 628 629 static int __init fasync_init(void) 630 { 631 fasync_cache = kmem_cache_create("fasync_cache", 632 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL); 633 return 0; 634 } 635 636 module_init(fasync_init) 637