1 /* 2 * linux/fs/locks.c 3 * 4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls. 5 * Doug Evans (dje@spiff.uucp), August 07, 1992 6 * 7 * Deadlock detection added. 8 * FIXME: one thing isn't handled yet: 9 * - mandatory locks (requires lots of changes elsewhere) 10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994. 11 * 12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code. 13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994 14 * 15 * Converted file_lock_table to a linked list from an array, which eliminates 16 * the limits on how many active file locks are open. 17 * Chad Page (pageone@netcom.com), November 27, 1994 18 * 19 * Removed dependency on file descriptors. dup()'ed file descriptors now 20 * get the same locks as the original file descriptors, and a close() on 21 * any file descriptor removes ALL the locks on the file for the current 22 * process. Since locks still depend on the process id, locks are inherited 23 * after an exec() but not after a fork(). This agrees with POSIX, and both 24 * BSD and SVR4 practice. 25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995 26 * 27 * Scrapped free list which is redundant now that we allocate locks 28 * dynamically with kmalloc()/kfree(). 29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995 30 * 31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX. 32 * 33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the 34 * fcntl() system call. They have the semantics described above. 35 * 36 * FL_FLOCK locks are created with calls to flock(), through the flock() 37 * system call, which is new. Old C libraries implement flock() via fcntl() 38 * and will continue to use the old, broken implementation. 39 * 40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated 41 * with a file pointer (filp). As a result they can be shared by a parent 42 * process and its children after a fork(). They are removed when the last 43 * file descriptor referring to the file pointer is closed (unless explicitly 44 * unlocked). 45 * 46 * FL_FLOCK locks never deadlock, an existing lock is always removed before 47 * upgrading from shared to exclusive (or vice versa). When this happens 48 * any processes blocked by the current lock are woken up and allowed to 49 * run before the new lock is applied. 50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995 51 * 52 * Removed some race conditions in flock_lock_file(), marked other possible 53 * races. Just grep for FIXME to see them. 54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996. 55 * 56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive. 57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep 58 * once we've checked for blocking and deadlocking. 59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996. 60 * 61 * Initial implementation of mandatory locks. SunOS turned out to be 62 * a rotten model, so I implemented the "obvious" semantics. 63 * See 'Documentation/mandatory.txt' for details. 64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996. 65 * 66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to 67 * check if a file has mandatory locks, used by mmap(), open() and creat() to 68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference 69 * Manual, Section 2. 70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996. 71 * 72 * Tidied up block list handling. Added '/proc/locks' interface. 73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996. 74 * 75 * Fixed deadlock condition for pathological code that mixes calls to 76 * flock() and fcntl(). 77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996. 78 * 79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use 80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to 81 * guarantee sensible behaviour in the case where file system modules might 82 * be compiled with different options than the kernel itself. 83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 84 * 85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel 86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this. 87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 88 * 89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK 90 * locks. Changed process synchronisation to avoid dereferencing locks that 91 * have already been freed. 92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996. 93 * 94 * Made the block list a circular list to minimise searching in the list. 95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996. 96 * 97 * Made mandatory locking a mount option. Default is not to allow mandatory 98 * locking. 99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996. 100 * 101 * Some adaptations for NFS support. 102 * Olaf Kirch (okir@monad.swb.de), Dec 1996, 103 * 104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed. 105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997. 106 * 107 * Use slab allocator instead of kmalloc/kfree. 108 * Use generic list implementation from <linux/list.h>. 109 * Sped up posix_locks_deadlock by only considering blocked locks. 110 * Matthew Wilcox <willy@debian.org>, March, 2000. 111 * 112 * Leases and LOCK_MAND 113 * Matthew Wilcox <willy@debian.org>, June, 2000. 114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000. 115 */ 116 117 #include <linux/capability.h> 118 #include <linux/file.h> 119 #include <linux/fs.h> 120 #include <linux/init.h> 121 #include <linux/module.h> 122 #include <linux/security.h> 123 #include <linux/slab.h> 124 #include <linux/smp_lock.h> 125 #include <linux/syscalls.h> 126 #include <linux/time.h> 127 #include <linux/rcupdate.h> 128 129 #include <asm/semaphore.h> 130 #include <asm/uaccess.h> 131 132 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) 133 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) 134 #define IS_LEASE(fl) (fl->fl_flags & FL_LEASE) 135 136 int leases_enable = 1; 137 int lease_break_time = 45; 138 139 #define for_each_lock(inode, lockp) \ 140 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) 141 142 LIST_HEAD(file_lock_list); 143 144 EXPORT_SYMBOL(file_lock_list); 145 146 static LIST_HEAD(blocked_list); 147 148 static kmem_cache_t *filelock_cache; 149 150 /* Allocate an empty lock structure. */ 151 static struct file_lock *locks_alloc_lock(void) 152 { 153 return kmem_cache_alloc(filelock_cache, SLAB_KERNEL); 154 } 155 156 /* Free a lock which is not in use. */ 157 static void locks_free_lock(struct file_lock *fl) 158 { 159 if (fl == NULL) { 160 BUG(); 161 return; 162 } 163 if (waitqueue_active(&fl->fl_wait)) 164 panic("Attempting to free lock with active wait queue"); 165 166 if (!list_empty(&fl->fl_block)) 167 panic("Attempting to free lock with active block list"); 168 169 if (!list_empty(&fl->fl_link)) 170 panic("Attempting to free lock on active lock list"); 171 172 if (fl->fl_ops) { 173 if (fl->fl_ops->fl_release_private) 174 fl->fl_ops->fl_release_private(fl); 175 fl->fl_ops = NULL; 176 } 177 178 if (fl->fl_lmops) { 179 if (fl->fl_lmops->fl_release_private) 180 fl->fl_lmops->fl_release_private(fl); 181 fl->fl_lmops = NULL; 182 } 183 184 kmem_cache_free(filelock_cache, fl); 185 } 186 187 void locks_init_lock(struct file_lock *fl) 188 { 189 INIT_LIST_HEAD(&fl->fl_link); 190 INIT_LIST_HEAD(&fl->fl_block); 191 init_waitqueue_head(&fl->fl_wait); 192 fl->fl_next = NULL; 193 fl->fl_fasync = NULL; 194 fl->fl_owner = NULL; 195 fl->fl_pid = 0; 196 fl->fl_file = NULL; 197 fl->fl_flags = 0; 198 fl->fl_type = 0; 199 fl->fl_start = fl->fl_end = 0; 200 fl->fl_ops = NULL; 201 fl->fl_lmops = NULL; 202 } 203 204 EXPORT_SYMBOL(locks_init_lock); 205 206 /* 207 * Initialises the fields of the file lock which are invariant for 208 * free file_locks. 209 */ 210 static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags) 211 { 212 struct file_lock *lock = (struct file_lock *) foo; 213 214 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) != 215 SLAB_CTOR_CONSTRUCTOR) 216 return; 217 218 locks_init_lock(lock); 219 } 220 221 /* 222 * Initialize a new lock from an existing file_lock structure. 223 */ 224 void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 225 { 226 new->fl_owner = fl->fl_owner; 227 new->fl_pid = fl->fl_pid; 228 new->fl_file = fl->fl_file; 229 new->fl_flags = fl->fl_flags; 230 new->fl_type = fl->fl_type; 231 new->fl_start = fl->fl_start; 232 new->fl_end = fl->fl_end; 233 new->fl_ops = fl->fl_ops; 234 new->fl_lmops = fl->fl_lmops; 235 if (fl->fl_ops && fl->fl_ops->fl_copy_lock) 236 fl->fl_ops->fl_copy_lock(new, fl); 237 if (fl->fl_lmops && fl->fl_lmops->fl_copy_lock) 238 fl->fl_lmops->fl_copy_lock(new, fl); 239 } 240 241 EXPORT_SYMBOL(locks_copy_lock); 242 243 static inline int flock_translate_cmd(int cmd) { 244 if (cmd & LOCK_MAND) 245 return cmd & (LOCK_MAND | LOCK_RW); 246 switch (cmd) { 247 case LOCK_SH: 248 return F_RDLCK; 249 case LOCK_EX: 250 return F_WRLCK; 251 case LOCK_UN: 252 return F_UNLCK; 253 } 254 return -EINVAL; 255 } 256 257 /* Fill in a file_lock structure with an appropriate FLOCK lock. */ 258 static int flock_make_lock(struct file *filp, struct file_lock **lock, 259 unsigned int cmd) 260 { 261 struct file_lock *fl; 262 int type = flock_translate_cmd(cmd); 263 if (type < 0) 264 return type; 265 266 fl = locks_alloc_lock(); 267 if (fl == NULL) 268 return -ENOMEM; 269 270 fl->fl_file = filp; 271 fl->fl_pid = current->tgid; 272 fl->fl_flags = FL_FLOCK; 273 fl->fl_type = type; 274 fl->fl_end = OFFSET_MAX; 275 276 *lock = fl; 277 return 0; 278 } 279 280 static int assign_type(struct file_lock *fl, int type) 281 { 282 switch (type) { 283 case F_RDLCK: 284 case F_WRLCK: 285 case F_UNLCK: 286 fl->fl_type = type; 287 break; 288 default: 289 return -EINVAL; 290 } 291 return 0; 292 } 293 294 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX 295 * style lock. 296 */ 297 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, 298 struct flock *l) 299 { 300 off_t start, end; 301 302 switch (l->l_whence) { 303 case 0: /*SEEK_SET*/ 304 start = 0; 305 break; 306 case 1: /*SEEK_CUR*/ 307 start = filp->f_pos; 308 break; 309 case 2: /*SEEK_END*/ 310 start = i_size_read(filp->f_dentry->d_inode); 311 break; 312 default: 313 return -EINVAL; 314 } 315 316 /* POSIX-1996 leaves the case l->l_len < 0 undefined; 317 POSIX-2001 defines it. */ 318 start += l->l_start; 319 if (start < 0) 320 return -EINVAL; 321 fl->fl_end = OFFSET_MAX; 322 if (l->l_len > 0) { 323 end = start + l->l_len - 1; 324 fl->fl_end = end; 325 } else if (l->l_len < 0) { 326 end = start - 1; 327 fl->fl_end = end; 328 start += l->l_len; 329 if (start < 0) 330 return -EINVAL; 331 } 332 fl->fl_start = start; /* we record the absolute position */ 333 if (fl->fl_end < fl->fl_start) 334 return -EOVERFLOW; 335 336 fl->fl_owner = current->files; 337 fl->fl_pid = current->tgid; 338 fl->fl_file = filp; 339 fl->fl_flags = FL_POSIX; 340 fl->fl_ops = NULL; 341 fl->fl_lmops = NULL; 342 343 return assign_type(fl, l->l_type); 344 } 345 346 #if BITS_PER_LONG == 32 347 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, 348 struct flock64 *l) 349 { 350 loff_t start; 351 352 switch (l->l_whence) { 353 case 0: /*SEEK_SET*/ 354 start = 0; 355 break; 356 case 1: /*SEEK_CUR*/ 357 start = filp->f_pos; 358 break; 359 case 2: /*SEEK_END*/ 360 start = i_size_read(filp->f_dentry->d_inode); 361 break; 362 default: 363 return -EINVAL; 364 } 365 366 start += l->l_start; 367 if (start < 0) 368 return -EINVAL; 369 fl->fl_end = OFFSET_MAX; 370 if (l->l_len > 0) { 371 fl->fl_end = start + l->l_len - 1; 372 } else if (l->l_len < 0) { 373 fl->fl_end = start - 1; 374 start += l->l_len; 375 if (start < 0) 376 return -EINVAL; 377 } 378 fl->fl_start = start; /* we record the absolute position */ 379 if (fl->fl_end < fl->fl_start) 380 return -EOVERFLOW; 381 382 fl->fl_owner = current->files; 383 fl->fl_pid = current->tgid; 384 fl->fl_file = filp; 385 fl->fl_flags = FL_POSIX; 386 fl->fl_ops = NULL; 387 fl->fl_lmops = NULL; 388 389 switch (l->l_type) { 390 case F_RDLCK: 391 case F_WRLCK: 392 case F_UNLCK: 393 fl->fl_type = l->l_type; 394 break; 395 default: 396 return -EINVAL; 397 } 398 399 return (0); 400 } 401 #endif 402 403 /* default lease lock manager operations */ 404 static void lease_break_callback(struct file_lock *fl) 405 { 406 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); 407 } 408 409 static void lease_release_private_callback(struct file_lock *fl) 410 { 411 if (!fl->fl_file) 412 return; 413 414 f_delown(fl->fl_file); 415 fl->fl_file->f_owner.signum = 0; 416 } 417 418 static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) 419 { 420 return fl->fl_file == try->fl_file; 421 } 422 423 static struct lock_manager_operations lease_manager_ops = { 424 .fl_break = lease_break_callback, 425 .fl_release_private = lease_release_private_callback, 426 .fl_mylease = lease_mylease_callback, 427 .fl_change = lease_modify, 428 }; 429 430 /* 431 * Initialize a lease, use the default lock manager operations 432 */ 433 static int lease_init(struct file *filp, int type, struct file_lock *fl) 434 { 435 fl->fl_owner = current->files; 436 fl->fl_pid = current->tgid; 437 438 fl->fl_file = filp; 439 fl->fl_flags = FL_LEASE; 440 if (assign_type(fl, type) != 0) { 441 locks_free_lock(fl); 442 return -EINVAL; 443 } 444 fl->fl_start = 0; 445 fl->fl_end = OFFSET_MAX; 446 fl->fl_ops = NULL; 447 fl->fl_lmops = &lease_manager_ops; 448 return 0; 449 } 450 451 /* Allocate a file_lock initialised to this type of lease */ 452 static int lease_alloc(struct file *filp, int type, struct file_lock **flp) 453 { 454 struct file_lock *fl = locks_alloc_lock(); 455 int error; 456 457 if (fl == NULL) 458 return -ENOMEM; 459 460 error = lease_init(filp, type, fl); 461 if (error) 462 return error; 463 *flp = fl; 464 return 0; 465 } 466 467 /* Check if two locks overlap each other. 468 */ 469 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) 470 { 471 return ((fl1->fl_end >= fl2->fl_start) && 472 (fl2->fl_end >= fl1->fl_start)); 473 } 474 475 /* 476 * Check whether two locks have the same owner. 477 */ 478 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) 479 { 480 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner) 481 return fl2->fl_lmops == fl1->fl_lmops && 482 fl1->fl_lmops->fl_compare_owner(fl1, fl2); 483 return fl1->fl_owner == fl2->fl_owner; 484 } 485 486 /* Remove waiter from blocker's block list. 487 * When blocker ends up pointing to itself then the list is empty. 488 */ 489 static void __locks_delete_block(struct file_lock *waiter) 490 { 491 list_del_init(&waiter->fl_block); 492 list_del_init(&waiter->fl_link); 493 waiter->fl_next = NULL; 494 } 495 496 /* 497 */ 498 static void locks_delete_block(struct file_lock *waiter) 499 { 500 lock_kernel(); 501 __locks_delete_block(waiter); 502 unlock_kernel(); 503 } 504 505 /* Insert waiter into blocker's block list. 506 * We use a circular list so that processes can be easily woken up in 507 * the order they blocked. The documentation doesn't require this but 508 * it seems like the reasonable thing to do. 509 */ 510 static void locks_insert_block(struct file_lock *blocker, 511 struct file_lock *waiter) 512 { 513 if (!list_empty(&waiter->fl_block)) { 514 printk(KERN_ERR "locks_insert_block: removing duplicated lock " 515 "(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid, 516 waiter->fl_start, waiter->fl_end, waiter->fl_type); 517 __locks_delete_block(waiter); 518 } 519 list_add_tail(&waiter->fl_block, &blocker->fl_block); 520 waiter->fl_next = blocker; 521 if (IS_POSIX(blocker)) 522 list_add(&waiter->fl_link, &blocked_list); 523 } 524 525 /* Wake up processes blocked waiting for blocker. 526 * If told to wait then schedule the processes until the block list 527 * is empty, otherwise empty the block list ourselves. 528 */ 529 static void locks_wake_up_blocks(struct file_lock *blocker) 530 { 531 while (!list_empty(&blocker->fl_block)) { 532 struct file_lock *waiter = list_entry(blocker->fl_block.next, 533 struct file_lock, fl_block); 534 __locks_delete_block(waiter); 535 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify) 536 waiter->fl_lmops->fl_notify(waiter); 537 else 538 wake_up(&waiter->fl_wait); 539 } 540 } 541 542 /* Insert file lock fl into an inode's lock list at the position indicated 543 * by pos. At the same time add the lock to the global file lock list. 544 */ 545 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) 546 { 547 list_add(&fl->fl_link, &file_lock_list); 548 549 /* insert into file's list */ 550 fl->fl_next = *pos; 551 *pos = fl; 552 553 if (fl->fl_ops && fl->fl_ops->fl_insert) 554 fl->fl_ops->fl_insert(fl); 555 } 556 557 /* 558 * Delete a lock and then free it. 559 * Wake up processes that are blocked waiting for this lock, 560 * notify the FS that the lock has been cleared and 561 * finally free the lock. 562 */ 563 static void locks_delete_lock(struct file_lock **thisfl_p) 564 { 565 struct file_lock *fl = *thisfl_p; 566 567 *thisfl_p = fl->fl_next; 568 fl->fl_next = NULL; 569 list_del_init(&fl->fl_link); 570 571 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 572 if (fl->fl_fasync != NULL) { 573 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 574 fl->fl_fasync = NULL; 575 } 576 577 if (fl->fl_ops && fl->fl_ops->fl_remove) 578 fl->fl_ops->fl_remove(fl); 579 580 locks_wake_up_blocks(fl); 581 locks_free_lock(fl); 582 } 583 584 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality 585 * checks for shared/exclusive status of overlapping locks. 586 */ 587 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 588 { 589 if (sys_fl->fl_type == F_WRLCK) 590 return 1; 591 if (caller_fl->fl_type == F_WRLCK) 592 return 1; 593 return 0; 594 } 595 596 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific 597 * checking before calling the locks_conflict(). 598 */ 599 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 600 { 601 /* POSIX locks owned by the same process do not conflict with 602 * each other. 603 */ 604 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl)) 605 return (0); 606 607 /* Check whether they overlap */ 608 if (!locks_overlap(caller_fl, sys_fl)) 609 return 0; 610 611 return (locks_conflict(caller_fl, sys_fl)); 612 } 613 614 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific 615 * checking before calling the locks_conflict(). 616 */ 617 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 618 { 619 /* FLOCK locks referring to the same filp do not conflict with 620 * each other. 621 */ 622 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file)) 623 return (0); 624 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) 625 return 0; 626 627 return (locks_conflict(caller_fl, sys_fl)); 628 } 629 630 static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout) 631 { 632 int result = 0; 633 DECLARE_WAITQUEUE(wait, current); 634 635 __set_current_state(TASK_INTERRUPTIBLE); 636 add_wait_queue(fl_wait, &wait); 637 if (timeout == 0) 638 schedule(); 639 else 640 result = schedule_timeout(timeout); 641 if (signal_pending(current)) 642 result = -ERESTARTSYS; 643 remove_wait_queue(fl_wait, &wait); 644 __set_current_state(TASK_RUNNING); 645 return result; 646 } 647 648 static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time) 649 { 650 int result; 651 locks_insert_block(blocker, waiter); 652 result = interruptible_sleep_on_locked(&waiter->fl_wait, time); 653 __locks_delete_block(waiter); 654 return result; 655 } 656 657 struct file_lock * 658 posix_test_lock(struct file *filp, struct file_lock *fl) 659 { 660 struct file_lock *cfl; 661 662 lock_kernel(); 663 for (cfl = filp->f_dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { 664 if (!IS_POSIX(cfl)) 665 continue; 666 if (posix_locks_conflict(cfl, fl)) 667 break; 668 } 669 unlock_kernel(); 670 671 return (cfl); 672 } 673 674 EXPORT_SYMBOL(posix_test_lock); 675 676 /* This function tests for deadlock condition before putting a process to 677 * sleep. The detection scheme is no longer recursive. Recursive was neat, 678 * but dangerous - we risked stack corruption if the lock data was bad, or 679 * if the recursion was too deep for any other reason. 680 * 681 * We rely on the fact that a task can only be on one lock's wait queue 682 * at a time. When we find blocked_task on a wait queue we can re-search 683 * with blocked_task equal to that queue's owner, until either blocked_task 684 * isn't found, or blocked_task is found on a queue owned by my_task. 685 * 686 * Note: the above assumption may not be true when handling lock requests 687 * from a broken NFS client. But broken NFS clients have a lot more to 688 * worry about than proper deadlock detection anyway... --okir 689 */ 690 int posix_locks_deadlock(struct file_lock *caller_fl, 691 struct file_lock *block_fl) 692 { 693 struct list_head *tmp; 694 695 next_task: 696 if (posix_same_owner(caller_fl, block_fl)) 697 return 1; 698 list_for_each(tmp, &blocked_list) { 699 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); 700 if (posix_same_owner(fl, block_fl)) { 701 fl = fl->fl_next; 702 block_fl = fl; 703 goto next_task; 704 } 705 } 706 return 0; 707 } 708 709 EXPORT_SYMBOL(posix_locks_deadlock); 710 711 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 712 * at the head of the list, but that's secret knowledge known only to 713 * flock_lock_file and posix_lock_file. 714 */ 715 static int flock_lock_file(struct file *filp, struct file_lock *new_fl) 716 { 717 struct file_lock **before; 718 struct inode * inode = filp->f_dentry->d_inode; 719 int error = 0; 720 int found = 0; 721 722 lock_kernel(); 723 for_each_lock(inode, before) { 724 struct file_lock *fl = *before; 725 if (IS_POSIX(fl)) 726 break; 727 if (IS_LEASE(fl)) 728 continue; 729 if (filp != fl->fl_file) 730 continue; 731 if (new_fl->fl_type == fl->fl_type) 732 goto out; 733 found = 1; 734 locks_delete_lock(before); 735 break; 736 } 737 unlock_kernel(); 738 739 if (new_fl->fl_type == F_UNLCK) 740 return 0; 741 742 /* 743 * If a higher-priority process was blocked on the old file lock, 744 * give it the opportunity to lock the file. 745 */ 746 if (found) 747 cond_resched(); 748 749 lock_kernel(); 750 for_each_lock(inode, before) { 751 struct file_lock *fl = *before; 752 if (IS_POSIX(fl)) 753 break; 754 if (IS_LEASE(fl)) 755 continue; 756 if (!flock_locks_conflict(new_fl, fl)) 757 continue; 758 error = -EAGAIN; 759 if (new_fl->fl_flags & FL_SLEEP) { 760 locks_insert_block(fl, new_fl); 761 } 762 goto out; 763 } 764 locks_insert_lock(&inode->i_flock, new_fl); 765 error = 0; 766 767 out: 768 unlock_kernel(); 769 return error; 770 } 771 772 EXPORT_SYMBOL(posix_lock_file); 773 774 static int __posix_lock_file(struct inode *inode, struct file_lock *request) 775 { 776 struct file_lock *fl; 777 struct file_lock *new_fl, *new_fl2; 778 struct file_lock *left = NULL; 779 struct file_lock *right = NULL; 780 struct file_lock **before; 781 int error, added = 0; 782 783 /* 784 * We may need two file_lock structures for this operation, 785 * so we get them in advance to avoid races. 786 */ 787 new_fl = locks_alloc_lock(); 788 new_fl2 = locks_alloc_lock(); 789 790 lock_kernel(); 791 if (request->fl_type != F_UNLCK) { 792 for_each_lock(inode, before) { 793 struct file_lock *fl = *before; 794 if (!IS_POSIX(fl)) 795 continue; 796 if (!posix_locks_conflict(request, fl)) 797 continue; 798 error = -EAGAIN; 799 if (!(request->fl_flags & FL_SLEEP)) 800 goto out; 801 error = -EDEADLK; 802 if (posix_locks_deadlock(request, fl)) 803 goto out; 804 error = -EAGAIN; 805 locks_insert_block(fl, request); 806 goto out; 807 } 808 } 809 810 /* If we're just looking for a conflict, we're done. */ 811 error = 0; 812 if (request->fl_flags & FL_ACCESS) 813 goto out; 814 815 error = -ENOLCK; /* "no luck" */ 816 if (!(new_fl && new_fl2)) 817 goto out; 818 819 /* 820 * We've allocated the new locks in advance, so there are no 821 * errors possible (and no blocking operations) from here on. 822 * 823 * Find the first old lock with the same owner as the new lock. 824 */ 825 826 before = &inode->i_flock; 827 828 /* First skip locks owned by other processes. */ 829 while ((fl = *before) && (!IS_POSIX(fl) || 830 !posix_same_owner(request, fl))) { 831 before = &fl->fl_next; 832 } 833 834 /* Process locks with this owner. */ 835 while ((fl = *before) && posix_same_owner(request, fl)) { 836 /* Detect adjacent or overlapping regions (if same lock type) 837 */ 838 if (request->fl_type == fl->fl_type) { 839 /* In all comparisons of start vs end, use 840 * "start - 1" rather than "end + 1". If end 841 * is OFFSET_MAX, end + 1 will become negative. 842 */ 843 if (fl->fl_end < request->fl_start - 1) 844 goto next_lock; 845 /* If the next lock in the list has entirely bigger 846 * addresses than the new one, insert the lock here. 847 */ 848 if (fl->fl_start - 1 > request->fl_end) 849 break; 850 851 /* If we come here, the new and old lock are of the 852 * same type and adjacent or overlapping. Make one 853 * lock yielding from the lower start address of both 854 * locks to the higher end address. 855 */ 856 if (fl->fl_start > request->fl_start) 857 fl->fl_start = request->fl_start; 858 else 859 request->fl_start = fl->fl_start; 860 if (fl->fl_end < request->fl_end) 861 fl->fl_end = request->fl_end; 862 else 863 request->fl_end = fl->fl_end; 864 if (added) { 865 locks_delete_lock(before); 866 continue; 867 } 868 request = fl; 869 added = 1; 870 } 871 else { 872 /* Processing for different lock types is a bit 873 * more complex. 874 */ 875 if (fl->fl_end < request->fl_start) 876 goto next_lock; 877 if (fl->fl_start > request->fl_end) 878 break; 879 if (request->fl_type == F_UNLCK) 880 added = 1; 881 if (fl->fl_start < request->fl_start) 882 left = fl; 883 /* If the next lock in the list has a higher end 884 * address than the new one, insert the new one here. 885 */ 886 if (fl->fl_end > request->fl_end) { 887 right = fl; 888 break; 889 } 890 if (fl->fl_start >= request->fl_start) { 891 /* The new lock completely replaces an old 892 * one (This may happen several times). 893 */ 894 if (added) { 895 locks_delete_lock(before); 896 continue; 897 } 898 /* Replace the old lock with the new one. 899 * Wake up anybody waiting for the old one, 900 * as the change in lock type might satisfy 901 * their needs. 902 */ 903 locks_wake_up_blocks(fl); 904 fl->fl_start = request->fl_start; 905 fl->fl_end = request->fl_end; 906 fl->fl_type = request->fl_type; 907 fl->fl_u = request->fl_u; 908 request = fl; 909 added = 1; 910 } 911 } 912 /* Go on to next lock. 913 */ 914 next_lock: 915 before = &fl->fl_next; 916 } 917 918 error = 0; 919 if (!added) { 920 if (request->fl_type == F_UNLCK) 921 goto out; 922 locks_copy_lock(new_fl, request); 923 locks_insert_lock(before, new_fl); 924 new_fl = NULL; 925 } 926 if (right) { 927 if (left == right) { 928 /* The new lock breaks the old one in two pieces, 929 * so we have to use the second new lock. 930 */ 931 left = new_fl2; 932 new_fl2 = NULL; 933 locks_copy_lock(left, right); 934 locks_insert_lock(before, left); 935 } 936 right->fl_start = request->fl_end + 1; 937 locks_wake_up_blocks(right); 938 } 939 if (left) { 940 left->fl_end = request->fl_start - 1; 941 locks_wake_up_blocks(left); 942 } 943 out: 944 unlock_kernel(); 945 /* 946 * Free any unused locks. 947 */ 948 if (new_fl) 949 locks_free_lock(new_fl); 950 if (new_fl2) 951 locks_free_lock(new_fl2); 952 return error; 953 } 954 955 /** 956 * posix_lock_file - Apply a POSIX-style lock to a file 957 * @filp: The file to apply the lock to 958 * @fl: The lock to be applied 959 * 960 * Add a POSIX style lock to a file. 961 * We merge adjacent & overlapping locks whenever possible. 962 * POSIX locks are sorted by owner task, then by starting address 963 */ 964 int posix_lock_file(struct file *filp, struct file_lock *fl) 965 { 966 return __posix_lock_file(filp->f_dentry->d_inode, fl); 967 } 968 969 /** 970 * posix_lock_file_wait - Apply a POSIX-style lock to a file 971 * @filp: The file to apply the lock to 972 * @fl: The lock to be applied 973 * 974 * Add a POSIX style lock to a file. 975 * We merge adjacent & overlapping locks whenever possible. 976 * POSIX locks are sorted by owner task, then by starting address 977 */ 978 int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 979 { 980 int error; 981 might_sleep (); 982 for (;;) { 983 error = __posix_lock_file(filp->f_dentry->d_inode, fl); 984 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 985 break; 986 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 987 if (!error) 988 continue; 989 990 locks_delete_block(fl); 991 break; 992 } 993 return error; 994 } 995 EXPORT_SYMBOL(posix_lock_file_wait); 996 997 /** 998 * locks_mandatory_locked - Check for an active lock 999 * @inode: the file to check 1000 * 1001 * Searches the inode's list of locks to find any POSIX locks which conflict. 1002 * This function is called from locks_verify_locked() only. 1003 */ 1004 int locks_mandatory_locked(struct inode *inode) 1005 { 1006 fl_owner_t owner = current->files; 1007 struct file_lock *fl; 1008 1009 /* 1010 * Search the lock list for this inode for any POSIX locks. 1011 */ 1012 lock_kernel(); 1013 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 1014 if (!IS_POSIX(fl)) 1015 continue; 1016 if (fl->fl_owner != owner) 1017 break; 1018 } 1019 unlock_kernel(); 1020 return fl ? -EAGAIN : 0; 1021 } 1022 1023 /** 1024 * locks_mandatory_area - Check for a conflicting lock 1025 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ 1026 * for shared 1027 * @inode: the file to check 1028 * @filp: how the file was opened (if it was) 1029 * @offset: start of area to check 1030 * @count: length of area to check 1031 * 1032 * Searches the inode's list of locks to find any POSIX locks which conflict. 1033 * This function is called from rw_verify_area() and 1034 * locks_verify_truncate(). 1035 */ 1036 int locks_mandatory_area(int read_write, struct inode *inode, 1037 struct file *filp, loff_t offset, 1038 size_t count) 1039 { 1040 struct file_lock fl; 1041 int error; 1042 1043 locks_init_lock(&fl); 1044 fl.fl_owner = current->files; 1045 fl.fl_pid = current->tgid; 1046 fl.fl_file = filp; 1047 fl.fl_flags = FL_POSIX | FL_ACCESS; 1048 if (filp && !(filp->f_flags & O_NONBLOCK)) 1049 fl.fl_flags |= FL_SLEEP; 1050 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; 1051 fl.fl_start = offset; 1052 fl.fl_end = offset + count - 1; 1053 1054 for (;;) { 1055 error = __posix_lock_file(inode, &fl); 1056 if (error != -EAGAIN) 1057 break; 1058 if (!(fl.fl_flags & FL_SLEEP)) 1059 break; 1060 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); 1061 if (!error) { 1062 /* 1063 * If we've been sleeping someone might have 1064 * changed the permissions behind our back. 1065 */ 1066 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) 1067 continue; 1068 } 1069 1070 locks_delete_block(&fl); 1071 break; 1072 } 1073 1074 return error; 1075 } 1076 1077 EXPORT_SYMBOL(locks_mandatory_area); 1078 1079 /* We already had a lease on this file; just change its type */ 1080 int lease_modify(struct file_lock **before, int arg) 1081 { 1082 struct file_lock *fl = *before; 1083 int error = assign_type(fl, arg); 1084 1085 if (error) 1086 return error; 1087 locks_wake_up_blocks(fl); 1088 if (arg == F_UNLCK) 1089 locks_delete_lock(before); 1090 return 0; 1091 } 1092 1093 EXPORT_SYMBOL(lease_modify); 1094 1095 static void time_out_leases(struct inode *inode) 1096 { 1097 struct file_lock **before; 1098 struct file_lock *fl; 1099 1100 before = &inode->i_flock; 1101 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) { 1102 if ((fl->fl_break_time == 0) 1103 || time_before(jiffies, fl->fl_break_time)) { 1104 before = &fl->fl_next; 1105 continue; 1106 } 1107 lease_modify(before, fl->fl_type & ~F_INPROGRESS); 1108 if (fl == *before) /* lease_modify may have freed fl */ 1109 before = &fl->fl_next; 1110 } 1111 } 1112 1113 /** 1114 * __break_lease - revoke all outstanding leases on file 1115 * @inode: the inode of the file to return 1116 * @mode: the open mode (read or write) 1117 * 1118 * break_lease (inlined for speed) has checked there already 1119 * is a lease on this file. Leases are broken on a call to open() 1120 * or truncate(). This function can sleep unless you 1121 * specified %O_NONBLOCK to your open(). 1122 */ 1123 int __break_lease(struct inode *inode, unsigned int mode) 1124 { 1125 int error = 0, future; 1126 struct file_lock *new_fl, *flock; 1127 struct file_lock *fl; 1128 int alloc_err; 1129 unsigned long break_time; 1130 int i_have_this_lease = 0; 1131 1132 alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK, 1133 &new_fl); 1134 1135 lock_kernel(); 1136 1137 time_out_leases(inode); 1138 1139 flock = inode->i_flock; 1140 if ((flock == NULL) || !IS_LEASE(flock)) 1141 goto out; 1142 1143 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) 1144 if (fl->fl_owner == current->files) 1145 i_have_this_lease = 1; 1146 1147 if (mode & FMODE_WRITE) { 1148 /* If we want write access, we have to revoke any lease. */ 1149 future = F_UNLCK | F_INPROGRESS; 1150 } else if (flock->fl_type & F_INPROGRESS) { 1151 /* If the lease is already being broken, we just leave it */ 1152 future = flock->fl_type; 1153 } else if (flock->fl_type & F_WRLCK) { 1154 /* Downgrade the exclusive lease to a read-only lease. */ 1155 future = F_RDLCK | F_INPROGRESS; 1156 } else { 1157 /* the existing lease was read-only, so we can read too. */ 1158 goto out; 1159 } 1160 1161 if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) { 1162 error = alloc_err; 1163 goto out; 1164 } 1165 1166 break_time = 0; 1167 if (lease_break_time > 0) { 1168 break_time = jiffies + lease_break_time * HZ; 1169 if (break_time == 0) 1170 break_time++; /* so that 0 means no break time */ 1171 } 1172 1173 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { 1174 if (fl->fl_type != future) { 1175 fl->fl_type = future; 1176 fl->fl_break_time = break_time; 1177 /* lease must have lmops break callback */ 1178 fl->fl_lmops->fl_break(fl); 1179 } 1180 } 1181 1182 if (i_have_this_lease || (mode & O_NONBLOCK)) { 1183 error = -EWOULDBLOCK; 1184 goto out; 1185 } 1186 1187 restart: 1188 break_time = flock->fl_break_time; 1189 if (break_time != 0) { 1190 break_time -= jiffies; 1191 if (break_time == 0) 1192 break_time++; 1193 } 1194 error = locks_block_on_timeout(flock, new_fl, break_time); 1195 if (error >= 0) { 1196 if (error == 0) 1197 time_out_leases(inode); 1198 /* Wait for the next lease that has not been broken yet */ 1199 for (flock = inode->i_flock; flock && IS_LEASE(flock); 1200 flock = flock->fl_next) { 1201 if (flock->fl_type & F_INPROGRESS) 1202 goto restart; 1203 } 1204 error = 0; 1205 } 1206 1207 out: 1208 unlock_kernel(); 1209 if (!alloc_err) 1210 locks_free_lock(new_fl); 1211 return error; 1212 } 1213 1214 EXPORT_SYMBOL(__break_lease); 1215 1216 /** 1217 * lease_get_mtime 1218 * @inode: the inode 1219 * @time: pointer to a timespec which will contain the last modified time 1220 * 1221 * This is to force NFS clients to flush their caches for files with 1222 * exclusive leases. The justification is that if someone has an 1223 * exclusive lease, then they could be modifiying it. 1224 */ 1225 void lease_get_mtime(struct inode *inode, struct timespec *time) 1226 { 1227 struct file_lock *flock = inode->i_flock; 1228 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK)) 1229 *time = current_fs_time(inode->i_sb); 1230 else 1231 *time = inode->i_mtime; 1232 } 1233 1234 EXPORT_SYMBOL(lease_get_mtime); 1235 1236 /** 1237 * fcntl_getlease - Enquire what lease is currently active 1238 * @filp: the file 1239 * 1240 * The value returned by this function will be one of 1241 * (if no lease break is pending): 1242 * 1243 * %F_RDLCK to indicate a shared lease is held. 1244 * 1245 * %F_WRLCK to indicate an exclusive lease is held. 1246 * 1247 * %F_UNLCK to indicate no lease is held. 1248 * 1249 * (if a lease break is pending): 1250 * 1251 * %F_RDLCK to indicate an exclusive lease needs to be 1252 * changed to a shared lease (or removed). 1253 * 1254 * %F_UNLCK to indicate the lease needs to be removed. 1255 * 1256 * XXX: sfr & willy disagree over whether F_INPROGRESS 1257 * should be returned to userspace. 1258 */ 1259 int fcntl_getlease(struct file *filp) 1260 { 1261 struct file_lock *fl; 1262 int type = F_UNLCK; 1263 1264 lock_kernel(); 1265 time_out_leases(filp->f_dentry->d_inode); 1266 for (fl = filp->f_dentry->d_inode->i_flock; fl && IS_LEASE(fl); 1267 fl = fl->fl_next) { 1268 if (fl->fl_file == filp) { 1269 type = fl->fl_type & ~F_INPROGRESS; 1270 break; 1271 } 1272 } 1273 unlock_kernel(); 1274 return type; 1275 } 1276 1277 /** 1278 * __setlease - sets a lease on an open file 1279 * @filp: file pointer 1280 * @arg: type of lease to obtain 1281 * @flp: input - file_lock to use, output - file_lock inserted 1282 * 1283 * The (input) flp->fl_lmops->fl_break function is required 1284 * by break_lease(). 1285 * 1286 * Called with kernel lock held. 1287 */ 1288 static int __setlease(struct file *filp, long arg, struct file_lock **flp) 1289 { 1290 struct file_lock *fl, **before, **my_before = NULL, *lease; 1291 struct dentry *dentry = filp->f_dentry; 1292 struct inode *inode = dentry->d_inode; 1293 int error, rdlease_count = 0, wrlease_count = 0; 1294 1295 time_out_leases(inode); 1296 1297 error = -EINVAL; 1298 if (!flp || !(*flp) || !(*flp)->fl_lmops || !(*flp)->fl_lmops->fl_break) 1299 goto out; 1300 1301 lease = *flp; 1302 1303 error = -EAGAIN; 1304 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) 1305 goto out; 1306 if ((arg == F_WRLCK) 1307 && ((atomic_read(&dentry->d_count) > 1) 1308 || (atomic_read(&inode->i_count) > 1))) 1309 goto out; 1310 1311 /* 1312 * At this point, we know that if there is an exclusive 1313 * lease on this file, then we hold it on this filp 1314 * (otherwise our open of this file would have blocked). 1315 * And if we are trying to acquire an exclusive lease, 1316 * then the file is not open by anyone (including us) 1317 * except for this filp. 1318 */ 1319 for (before = &inode->i_flock; 1320 ((fl = *before) != NULL) && IS_LEASE(fl); 1321 before = &fl->fl_next) { 1322 if (lease->fl_lmops->fl_mylease(fl, lease)) 1323 my_before = before; 1324 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) 1325 /* 1326 * Someone is in the process of opening this 1327 * file for writing so we may not take an 1328 * exclusive lease on it. 1329 */ 1330 wrlease_count++; 1331 else 1332 rdlease_count++; 1333 } 1334 1335 if ((arg == F_RDLCK && (wrlease_count > 0)) || 1336 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) 1337 goto out; 1338 1339 if (my_before != NULL) { 1340 error = lease->fl_lmops->fl_change(my_before, arg); 1341 goto out; 1342 } 1343 1344 error = 0; 1345 if (arg == F_UNLCK) 1346 goto out; 1347 1348 error = -EINVAL; 1349 if (!leases_enable) 1350 goto out; 1351 1352 error = lease_alloc(filp, arg, &fl); 1353 if (error) 1354 goto out; 1355 1356 locks_copy_lock(fl, lease); 1357 1358 locks_insert_lock(before, fl); 1359 1360 *flp = fl; 1361 out: 1362 return error; 1363 } 1364 1365 /** 1366 * setlease - sets a lease on an open file 1367 * @filp: file pointer 1368 * @arg: type of lease to obtain 1369 * @lease: file_lock to use 1370 * 1371 * Call this to establish a lease on the file. 1372 * The fl_lmops fl_break function is required by break_lease 1373 */ 1374 1375 int setlease(struct file *filp, long arg, struct file_lock **lease) 1376 { 1377 struct dentry *dentry = filp->f_dentry; 1378 struct inode *inode = dentry->d_inode; 1379 int error; 1380 1381 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) 1382 return -EACCES; 1383 if (!S_ISREG(inode->i_mode)) 1384 return -EINVAL; 1385 error = security_file_lock(filp, arg); 1386 if (error) 1387 return error; 1388 1389 lock_kernel(); 1390 error = __setlease(filp, arg, lease); 1391 unlock_kernel(); 1392 1393 return error; 1394 } 1395 1396 EXPORT_SYMBOL(setlease); 1397 1398 /** 1399 * fcntl_setlease - sets a lease on an open file 1400 * @fd: open file descriptor 1401 * @filp: file pointer 1402 * @arg: type of lease to obtain 1403 * 1404 * Call this fcntl to establish a lease on the file. 1405 * Note that you also need to call %F_SETSIG to 1406 * receive a signal when the lease is broken. 1407 */ 1408 int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1409 { 1410 struct file_lock fl, *flp = &fl; 1411 struct dentry *dentry = filp->f_dentry; 1412 struct inode *inode = dentry->d_inode; 1413 int error; 1414 1415 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) 1416 return -EACCES; 1417 if (!S_ISREG(inode->i_mode)) 1418 return -EINVAL; 1419 error = security_file_lock(filp, arg); 1420 if (error) 1421 return error; 1422 1423 locks_init_lock(&fl); 1424 error = lease_init(filp, arg, &fl); 1425 if (error) 1426 return error; 1427 1428 lock_kernel(); 1429 1430 error = __setlease(filp, arg, &flp); 1431 if (error || arg == F_UNLCK) 1432 goto out_unlock; 1433 1434 error = fasync_helper(fd, filp, 1, &flp->fl_fasync); 1435 if (error < 0) { 1436 /* remove lease just inserted by __setlease */ 1437 flp->fl_type = F_UNLCK | F_INPROGRESS; 1438 flp->fl_break_time = jiffies- 10; 1439 time_out_leases(inode); 1440 goto out_unlock; 1441 } 1442 1443 error = f_setown(filp, current->pid, 0); 1444 out_unlock: 1445 unlock_kernel(); 1446 return error; 1447 } 1448 1449 /** 1450 * flock_lock_file_wait - Apply a FLOCK-style lock to a file 1451 * @filp: The file to apply the lock to 1452 * @fl: The lock to be applied 1453 * 1454 * Add a FLOCK style lock to a file. 1455 */ 1456 int flock_lock_file_wait(struct file *filp, struct file_lock *fl) 1457 { 1458 int error; 1459 might_sleep(); 1460 for (;;) { 1461 error = flock_lock_file(filp, fl); 1462 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 1463 break; 1464 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1465 if (!error) 1466 continue; 1467 1468 locks_delete_block(fl); 1469 break; 1470 } 1471 return error; 1472 } 1473 1474 EXPORT_SYMBOL(flock_lock_file_wait); 1475 1476 /** 1477 * sys_flock: - flock() system call. 1478 * @fd: the file descriptor to lock. 1479 * @cmd: the type of lock to apply. 1480 * 1481 * Apply a %FL_FLOCK style lock to an open file descriptor. 1482 * The @cmd can be one of 1483 * 1484 * %LOCK_SH -- a shared lock. 1485 * 1486 * %LOCK_EX -- an exclusive lock. 1487 * 1488 * %LOCK_UN -- remove an existing lock. 1489 * 1490 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes. 1491 * 1492 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other 1493 * processes read and write access respectively. 1494 */ 1495 asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) 1496 { 1497 struct file *filp; 1498 struct file_lock *lock; 1499 int can_sleep, unlock; 1500 int error; 1501 1502 error = -EBADF; 1503 filp = fget(fd); 1504 if (!filp) 1505 goto out; 1506 1507 can_sleep = !(cmd & LOCK_NB); 1508 cmd &= ~LOCK_NB; 1509 unlock = (cmd == LOCK_UN); 1510 1511 if (!unlock && !(cmd & LOCK_MAND) && !(filp->f_mode & 3)) 1512 goto out_putf; 1513 1514 error = flock_make_lock(filp, &lock, cmd); 1515 if (error) 1516 goto out_putf; 1517 if (can_sleep) 1518 lock->fl_flags |= FL_SLEEP; 1519 1520 error = security_file_lock(filp, cmd); 1521 if (error) 1522 goto out_free; 1523 1524 if (filp->f_op && filp->f_op->flock) 1525 error = filp->f_op->flock(filp, 1526 (can_sleep) ? F_SETLKW : F_SETLK, 1527 lock); 1528 else 1529 error = flock_lock_file_wait(filp, lock); 1530 1531 out_free: 1532 if (list_empty(&lock->fl_link)) { 1533 locks_free_lock(lock); 1534 } 1535 1536 out_putf: 1537 fput(filp); 1538 out: 1539 return error; 1540 } 1541 1542 /* Report the first existing lock that would conflict with l. 1543 * This implements the F_GETLK command of fcntl(). 1544 */ 1545 int fcntl_getlk(struct file *filp, struct flock __user *l) 1546 { 1547 struct file_lock *fl, file_lock; 1548 struct flock flock; 1549 int error; 1550 1551 error = -EFAULT; 1552 if (copy_from_user(&flock, l, sizeof(flock))) 1553 goto out; 1554 error = -EINVAL; 1555 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1556 goto out; 1557 1558 error = flock_to_posix_lock(filp, &file_lock, &flock); 1559 if (error) 1560 goto out; 1561 1562 if (filp->f_op && filp->f_op->lock) { 1563 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1564 if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) 1565 file_lock.fl_ops->fl_release_private(&file_lock); 1566 if (error < 0) 1567 goto out; 1568 else 1569 fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); 1570 } else { 1571 fl = posix_test_lock(filp, &file_lock); 1572 } 1573 1574 flock.l_type = F_UNLCK; 1575 if (fl != NULL) { 1576 flock.l_pid = fl->fl_pid; 1577 #if BITS_PER_LONG == 32 1578 /* 1579 * Make sure we can represent the posix lock via 1580 * legacy 32bit flock. 1581 */ 1582 error = -EOVERFLOW; 1583 if (fl->fl_start > OFFT_OFFSET_MAX) 1584 goto out; 1585 if ((fl->fl_end != OFFSET_MAX) 1586 && (fl->fl_end > OFFT_OFFSET_MAX)) 1587 goto out; 1588 #endif 1589 flock.l_start = fl->fl_start; 1590 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : 1591 fl->fl_end - fl->fl_start + 1; 1592 flock.l_whence = 0; 1593 flock.l_type = fl->fl_type; 1594 } 1595 error = -EFAULT; 1596 if (!copy_to_user(l, &flock, sizeof(flock))) 1597 error = 0; 1598 out: 1599 return error; 1600 } 1601 1602 /* Apply the lock described by l to an open file descriptor. 1603 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1604 */ 1605 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, 1606 struct flock __user *l) 1607 { 1608 struct file_lock *file_lock = locks_alloc_lock(); 1609 struct flock flock; 1610 struct inode *inode; 1611 int error; 1612 1613 if (file_lock == NULL) 1614 return -ENOLCK; 1615 1616 /* 1617 * This might block, so we do it before checking the inode. 1618 */ 1619 error = -EFAULT; 1620 if (copy_from_user(&flock, l, sizeof(flock))) 1621 goto out; 1622 1623 inode = filp->f_dentry->d_inode; 1624 1625 /* Don't allow mandatory locks on files that may be memory mapped 1626 * and shared. 1627 */ 1628 if (IS_MANDLOCK(inode) && 1629 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 1630 mapping_writably_mapped(filp->f_mapping)) { 1631 error = -EAGAIN; 1632 goto out; 1633 } 1634 1635 again: 1636 error = flock_to_posix_lock(filp, file_lock, &flock); 1637 if (error) 1638 goto out; 1639 if (cmd == F_SETLKW) { 1640 file_lock->fl_flags |= FL_SLEEP; 1641 } 1642 1643 error = -EBADF; 1644 switch (flock.l_type) { 1645 case F_RDLCK: 1646 if (!(filp->f_mode & FMODE_READ)) 1647 goto out; 1648 break; 1649 case F_WRLCK: 1650 if (!(filp->f_mode & FMODE_WRITE)) 1651 goto out; 1652 break; 1653 case F_UNLCK: 1654 break; 1655 default: 1656 error = -EINVAL; 1657 goto out; 1658 } 1659 1660 error = security_file_lock(filp, file_lock->fl_type); 1661 if (error) 1662 goto out; 1663 1664 if (filp->f_op && filp->f_op->lock != NULL) 1665 error = filp->f_op->lock(filp, cmd, file_lock); 1666 else { 1667 for (;;) { 1668 error = __posix_lock_file(inode, file_lock); 1669 if ((error != -EAGAIN) || (cmd == F_SETLK)) 1670 break; 1671 error = wait_event_interruptible(file_lock->fl_wait, 1672 !file_lock->fl_next); 1673 if (!error) 1674 continue; 1675 1676 locks_delete_block(file_lock); 1677 break; 1678 } 1679 } 1680 1681 /* 1682 * Attempt to detect a close/fcntl race and recover by 1683 * releasing the lock that was just acquired. 1684 */ 1685 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1686 flock.l_type = F_UNLCK; 1687 goto again; 1688 } 1689 1690 out: 1691 locks_free_lock(file_lock); 1692 return error; 1693 } 1694 1695 #if BITS_PER_LONG == 32 1696 /* Report the first existing lock that would conflict with l. 1697 * This implements the F_GETLK command of fcntl(). 1698 */ 1699 int fcntl_getlk64(struct file *filp, struct flock64 __user *l) 1700 { 1701 struct file_lock *fl, file_lock; 1702 struct flock64 flock; 1703 int error; 1704 1705 error = -EFAULT; 1706 if (copy_from_user(&flock, l, sizeof(flock))) 1707 goto out; 1708 error = -EINVAL; 1709 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1710 goto out; 1711 1712 error = flock64_to_posix_lock(filp, &file_lock, &flock); 1713 if (error) 1714 goto out; 1715 1716 if (filp->f_op && filp->f_op->lock) { 1717 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1718 if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) 1719 file_lock.fl_ops->fl_release_private(&file_lock); 1720 if (error < 0) 1721 goto out; 1722 else 1723 fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); 1724 } else { 1725 fl = posix_test_lock(filp, &file_lock); 1726 } 1727 1728 flock.l_type = F_UNLCK; 1729 if (fl != NULL) { 1730 flock.l_pid = fl->fl_pid; 1731 flock.l_start = fl->fl_start; 1732 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : 1733 fl->fl_end - fl->fl_start + 1; 1734 flock.l_whence = 0; 1735 flock.l_type = fl->fl_type; 1736 } 1737 error = -EFAULT; 1738 if (!copy_to_user(l, &flock, sizeof(flock))) 1739 error = 0; 1740 1741 out: 1742 return error; 1743 } 1744 1745 /* Apply the lock described by l to an open file descriptor. 1746 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1747 */ 1748 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, 1749 struct flock64 __user *l) 1750 { 1751 struct file_lock *file_lock = locks_alloc_lock(); 1752 struct flock64 flock; 1753 struct inode *inode; 1754 int error; 1755 1756 if (file_lock == NULL) 1757 return -ENOLCK; 1758 1759 /* 1760 * This might block, so we do it before checking the inode. 1761 */ 1762 error = -EFAULT; 1763 if (copy_from_user(&flock, l, sizeof(flock))) 1764 goto out; 1765 1766 inode = filp->f_dentry->d_inode; 1767 1768 /* Don't allow mandatory locks on files that may be memory mapped 1769 * and shared. 1770 */ 1771 if (IS_MANDLOCK(inode) && 1772 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 1773 mapping_writably_mapped(filp->f_mapping)) { 1774 error = -EAGAIN; 1775 goto out; 1776 } 1777 1778 again: 1779 error = flock64_to_posix_lock(filp, file_lock, &flock); 1780 if (error) 1781 goto out; 1782 if (cmd == F_SETLKW64) { 1783 file_lock->fl_flags |= FL_SLEEP; 1784 } 1785 1786 error = -EBADF; 1787 switch (flock.l_type) { 1788 case F_RDLCK: 1789 if (!(filp->f_mode & FMODE_READ)) 1790 goto out; 1791 break; 1792 case F_WRLCK: 1793 if (!(filp->f_mode & FMODE_WRITE)) 1794 goto out; 1795 break; 1796 case F_UNLCK: 1797 break; 1798 default: 1799 error = -EINVAL; 1800 goto out; 1801 } 1802 1803 error = security_file_lock(filp, file_lock->fl_type); 1804 if (error) 1805 goto out; 1806 1807 if (filp->f_op && filp->f_op->lock != NULL) 1808 error = filp->f_op->lock(filp, cmd, file_lock); 1809 else { 1810 for (;;) { 1811 error = __posix_lock_file(inode, file_lock); 1812 if ((error != -EAGAIN) || (cmd == F_SETLK64)) 1813 break; 1814 error = wait_event_interruptible(file_lock->fl_wait, 1815 !file_lock->fl_next); 1816 if (!error) 1817 continue; 1818 1819 locks_delete_block(file_lock); 1820 break; 1821 } 1822 } 1823 1824 /* 1825 * Attempt to detect a close/fcntl race and recover by 1826 * releasing the lock that was just acquired. 1827 */ 1828 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1829 flock.l_type = F_UNLCK; 1830 goto again; 1831 } 1832 1833 out: 1834 locks_free_lock(file_lock); 1835 return error; 1836 } 1837 #endif /* BITS_PER_LONG == 32 */ 1838 1839 /* 1840 * This function is called when the file is being removed 1841 * from the task's fd array. POSIX locks belonging to this task 1842 * are deleted at this time. 1843 */ 1844 void locks_remove_posix(struct file *filp, fl_owner_t owner) 1845 { 1846 struct file_lock lock, **before; 1847 1848 /* 1849 * If there are no locks held on this file, we don't need to call 1850 * posix_lock_file(). Another process could be setting a lock on this 1851 * file at the same time, but we wouldn't remove that lock anyway. 1852 */ 1853 before = &filp->f_dentry->d_inode->i_flock; 1854 if (*before == NULL) 1855 return; 1856 1857 lock.fl_type = F_UNLCK; 1858 lock.fl_flags = FL_POSIX; 1859 lock.fl_start = 0; 1860 lock.fl_end = OFFSET_MAX; 1861 lock.fl_owner = owner; 1862 lock.fl_pid = current->tgid; 1863 lock.fl_file = filp; 1864 lock.fl_ops = NULL; 1865 lock.fl_lmops = NULL; 1866 1867 if (filp->f_op && filp->f_op->lock != NULL) { 1868 filp->f_op->lock(filp, F_SETLK, &lock); 1869 goto out; 1870 } 1871 1872 /* Can't use posix_lock_file here; we need to remove it no matter 1873 * which pid we have. 1874 */ 1875 lock_kernel(); 1876 while (*before != NULL) { 1877 struct file_lock *fl = *before; 1878 if (IS_POSIX(fl) && posix_same_owner(fl, &lock)) { 1879 locks_delete_lock(before); 1880 continue; 1881 } 1882 before = &fl->fl_next; 1883 } 1884 unlock_kernel(); 1885 out: 1886 if (lock.fl_ops && lock.fl_ops->fl_release_private) 1887 lock.fl_ops->fl_release_private(&lock); 1888 } 1889 1890 EXPORT_SYMBOL(locks_remove_posix); 1891 1892 /* 1893 * This function is called on the last close of an open file. 1894 */ 1895 void locks_remove_flock(struct file *filp) 1896 { 1897 struct inode * inode = filp->f_dentry->d_inode; 1898 struct file_lock *fl; 1899 struct file_lock **before; 1900 1901 if (!inode->i_flock) 1902 return; 1903 1904 if (filp->f_op && filp->f_op->flock) { 1905 struct file_lock fl = { 1906 .fl_pid = current->tgid, 1907 .fl_file = filp, 1908 .fl_flags = FL_FLOCK, 1909 .fl_type = F_UNLCK, 1910 .fl_end = OFFSET_MAX, 1911 }; 1912 filp->f_op->flock(filp, F_SETLKW, &fl); 1913 if (fl.fl_ops && fl.fl_ops->fl_release_private) 1914 fl.fl_ops->fl_release_private(&fl); 1915 } 1916 1917 lock_kernel(); 1918 before = &inode->i_flock; 1919 1920 while ((fl = *before) != NULL) { 1921 if (fl->fl_file == filp) { 1922 if (IS_FLOCK(fl)) { 1923 locks_delete_lock(before); 1924 continue; 1925 } 1926 if (IS_LEASE(fl)) { 1927 lease_modify(before, F_UNLCK); 1928 continue; 1929 } 1930 /* What? */ 1931 BUG(); 1932 } 1933 before = &fl->fl_next; 1934 } 1935 unlock_kernel(); 1936 } 1937 1938 /** 1939 * posix_block_lock - blocks waiting for a file lock 1940 * @blocker: the lock which is blocking 1941 * @waiter: the lock which conflicts and has to wait 1942 * 1943 * lockd needs to block waiting for locks. 1944 */ 1945 void 1946 posix_block_lock(struct file_lock *blocker, struct file_lock *waiter) 1947 { 1948 locks_insert_block(blocker, waiter); 1949 } 1950 1951 EXPORT_SYMBOL(posix_block_lock); 1952 1953 /** 1954 * posix_unblock_lock - stop waiting for a file lock 1955 * @filp: how the file was opened 1956 * @waiter: the lock which was waiting 1957 * 1958 * lockd needs to block waiting for locks. 1959 */ 1960 int 1961 posix_unblock_lock(struct file *filp, struct file_lock *waiter) 1962 { 1963 int status = 0; 1964 1965 lock_kernel(); 1966 if (waiter->fl_next) 1967 __locks_delete_block(waiter); 1968 else 1969 status = -ENOENT; 1970 unlock_kernel(); 1971 return status; 1972 } 1973 1974 EXPORT_SYMBOL(posix_unblock_lock); 1975 1976 static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx) 1977 { 1978 struct inode *inode = NULL; 1979 1980 if (fl->fl_file != NULL) 1981 inode = fl->fl_file->f_dentry->d_inode; 1982 1983 out += sprintf(out, "%d:%s ", id, pfx); 1984 if (IS_POSIX(fl)) { 1985 out += sprintf(out, "%6s %s ", 1986 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ", 1987 (inode == NULL) ? "*NOINODE*" : 1988 (IS_MANDLOCK(inode) && 1989 (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ? 1990 "MANDATORY" : "ADVISORY "); 1991 } else if (IS_FLOCK(fl)) { 1992 if (fl->fl_type & LOCK_MAND) { 1993 out += sprintf(out, "FLOCK MSNFS "); 1994 } else { 1995 out += sprintf(out, "FLOCK ADVISORY "); 1996 } 1997 } else if (IS_LEASE(fl)) { 1998 out += sprintf(out, "LEASE "); 1999 if (fl->fl_type & F_INPROGRESS) 2000 out += sprintf(out, "BREAKING "); 2001 else if (fl->fl_file) 2002 out += sprintf(out, "ACTIVE "); 2003 else 2004 out += sprintf(out, "BREAKER "); 2005 } else { 2006 out += sprintf(out, "UNKNOWN UNKNOWN "); 2007 } 2008 if (fl->fl_type & LOCK_MAND) { 2009 out += sprintf(out, "%s ", 2010 (fl->fl_type & LOCK_READ) 2011 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " 2012 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); 2013 } else { 2014 out += sprintf(out, "%s ", 2015 (fl->fl_type & F_INPROGRESS) 2016 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ " 2017 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); 2018 } 2019 if (inode) { 2020 #ifdef WE_CAN_BREAK_LSLK_NOW 2021 out += sprintf(out, "%d %s:%ld ", fl->fl_pid, 2022 inode->i_sb->s_id, inode->i_ino); 2023 #else 2024 /* userspace relies on this representation of dev_t ;-( */ 2025 out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid, 2026 MAJOR(inode->i_sb->s_dev), 2027 MINOR(inode->i_sb->s_dev), inode->i_ino); 2028 #endif 2029 } else { 2030 out += sprintf(out, "%d <none>:0 ", fl->fl_pid); 2031 } 2032 if (IS_POSIX(fl)) { 2033 if (fl->fl_end == OFFSET_MAX) 2034 out += sprintf(out, "%Ld EOF\n", fl->fl_start); 2035 else 2036 out += sprintf(out, "%Ld %Ld\n", fl->fl_start, 2037 fl->fl_end); 2038 } else { 2039 out += sprintf(out, "0 EOF\n"); 2040 } 2041 } 2042 2043 static void move_lock_status(char **p, off_t* pos, off_t offset) 2044 { 2045 int len; 2046 len = strlen(*p); 2047 if(*pos >= offset) { 2048 /* the complete line is valid */ 2049 *p += len; 2050 *pos += len; 2051 return; 2052 } 2053 if(*pos+len > offset) { 2054 /* use the second part of the line */ 2055 int i = offset-*pos; 2056 memmove(*p,*p+i,len-i); 2057 *p += len-i; 2058 *pos += len; 2059 return; 2060 } 2061 /* discard the complete line */ 2062 *pos += len; 2063 } 2064 2065 /** 2066 * get_locks_status - reports lock usage in /proc/locks 2067 * @buffer: address in userspace to write into 2068 * @start: ? 2069 * @offset: how far we are through the buffer 2070 * @length: how much to read 2071 */ 2072 2073 int get_locks_status(char *buffer, char **start, off_t offset, int length) 2074 { 2075 struct list_head *tmp; 2076 char *q = buffer; 2077 off_t pos = 0; 2078 int i = 0; 2079 2080 lock_kernel(); 2081 list_for_each(tmp, &file_lock_list) { 2082 struct list_head *btmp; 2083 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); 2084 lock_get_status(q, fl, ++i, ""); 2085 move_lock_status(&q, &pos, offset); 2086 2087 if(pos >= offset+length) 2088 goto done; 2089 2090 list_for_each(btmp, &fl->fl_block) { 2091 struct file_lock *bfl = list_entry(btmp, 2092 struct file_lock, fl_block); 2093 lock_get_status(q, bfl, i, " ->"); 2094 move_lock_status(&q, &pos, offset); 2095 2096 if(pos >= offset+length) 2097 goto done; 2098 } 2099 } 2100 done: 2101 unlock_kernel(); 2102 *start = buffer; 2103 if(q-buffer < length) 2104 return (q-buffer); 2105 return length; 2106 } 2107 2108 /** 2109 * lock_may_read - checks that the region is free of locks 2110 * @inode: the inode that is being read 2111 * @start: the first byte to read 2112 * @len: the number of bytes to read 2113 * 2114 * Emulates Windows locking requirements. Whole-file 2115 * mandatory locks (share modes) can prohibit a read and 2116 * byte-range POSIX locks can prohibit a read if they overlap. 2117 * 2118 * N.B. this function is only ever called 2119 * from knfsd and ownership of locks is never checked. 2120 */ 2121 int lock_may_read(struct inode *inode, loff_t start, unsigned long len) 2122 { 2123 struct file_lock *fl; 2124 int result = 1; 2125 lock_kernel(); 2126 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2127 if (IS_POSIX(fl)) { 2128 if (fl->fl_type == F_RDLCK) 2129 continue; 2130 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2131 continue; 2132 } else if (IS_FLOCK(fl)) { 2133 if (!(fl->fl_type & LOCK_MAND)) 2134 continue; 2135 if (fl->fl_type & LOCK_READ) 2136 continue; 2137 } else 2138 continue; 2139 result = 0; 2140 break; 2141 } 2142 unlock_kernel(); 2143 return result; 2144 } 2145 2146 EXPORT_SYMBOL(lock_may_read); 2147 2148 /** 2149 * lock_may_write - checks that the region is free of locks 2150 * @inode: the inode that is being written 2151 * @start: the first byte to write 2152 * @len: the number of bytes to write 2153 * 2154 * Emulates Windows locking requirements. Whole-file 2155 * mandatory locks (share modes) can prohibit a write and 2156 * byte-range POSIX locks can prohibit a write if they overlap. 2157 * 2158 * N.B. this function is only ever called 2159 * from knfsd and ownership of locks is never checked. 2160 */ 2161 int lock_may_write(struct inode *inode, loff_t start, unsigned long len) 2162 { 2163 struct file_lock *fl; 2164 int result = 1; 2165 lock_kernel(); 2166 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2167 if (IS_POSIX(fl)) { 2168 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2169 continue; 2170 } else if (IS_FLOCK(fl)) { 2171 if (!(fl->fl_type & LOCK_MAND)) 2172 continue; 2173 if (fl->fl_type & LOCK_WRITE) 2174 continue; 2175 } else 2176 continue; 2177 result = 0; 2178 break; 2179 } 2180 unlock_kernel(); 2181 return result; 2182 } 2183 2184 EXPORT_SYMBOL(lock_may_write); 2185 2186 static inline void __steal_locks(struct file *file, fl_owner_t from) 2187 { 2188 struct inode *inode = file->f_dentry->d_inode; 2189 struct file_lock *fl = inode->i_flock; 2190 2191 while (fl) { 2192 if (fl->fl_file == file && fl->fl_owner == from) 2193 fl->fl_owner = current->files; 2194 fl = fl->fl_next; 2195 } 2196 } 2197 2198 /* When getting ready for executing a binary, we make sure that current 2199 * has a files_struct on its own. Before dropping the old files_struct, 2200 * we take over ownership of all locks for all file descriptors we own. 2201 * Note that we may accidentally steal a lock for a file that a sibling 2202 * has created since the unshare_files() call. 2203 */ 2204 void steal_locks(fl_owner_t from) 2205 { 2206 struct files_struct *files = current->files; 2207 int i, j; 2208 struct fdtable *fdt; 2209 2210 if (from == files) 2211 return; 2212 2213 lock_kernel(); 2214 j = 0; 2215 rcu_read_lock(); 2216 fdt = files_fdtable(files); 2217 for (;;) { 2218 unsigned long set; 2219 i = j * __NFDBITS; 2220 if (i >= fdt->max_fdset || i >= fdt->max_fds) 2221 break; 2222 set = fdt->open_fds->fds_bits[j++]; 2223 while (set) { 2224 if (set & 1) { 2225 struct file *file = fdt->fd[i]; 2226 if (file) 2227 __steal_locks(file, from); 2228 } 2229 i++; 2230 set >>= 1; 2231 } 2232 } 2233 rcu_read_unlock(); 2234 unlock_kernel(); 2235 } 2236 EXPORT_SYMBOL(steal_locks); 2237 2238 static int __init filelock_init(void) 2239 { 2240 filelock_cache = kmem_cache_create("file_lock_cache", 2241 sizeof(struct file_lock), 0, SLAB_PANIC, 2242 init_once, NULL); 2243 return 0; 2244 } 2245 2246 core_initcall(filelock_init); 2247