1 /* 2 * linux/fs/locks.c 3 * 4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls. 5 * Doug Evans (dje@spiff.uucp), August 07, 1992 6 * 7 * Deadlock detection added. 8 * FIXME: one thing isn't handled yet: 9 * - mandatory locks (requires lots of changes elsewhere) 10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994. 11 * 12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code. 13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994 14 * 15 * Converted file_lock_table to a linked list from an array, which eliminates 16 * the limits on how many active file locks are open. 17 * Chad Page (pageone@netcom.com), November 27, 1994 18 * 19 * Removed dependency on file descriptors. dup()'ed file descriptors now 20 * get the same locks as the original file descriptors, and a close() on 21 * any file descriptor removes ALL the locks on the file for the current 22 * process. Since locks still depend on the process id, locks are inherited 23 * after an exec() but not after a fork(). This agrees with POSIX, and both 24 * BSD and SVR4 practice. 25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995 26 * 27 * Scrapped free list which is redundant now that we allocate locks 28 * dynamically with kmalloc()/kfree(). 29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995 30 * 31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX. 32 * 33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the 34 * fcntl() system call. They have the semantics described above. 35 * 36 * FL_FLOCK locks are created with calls to flock(), through the flock() 37 * system call, which is new. Old C libraries implement flock() via fcntl() 38 * and will continue to use the old, broken implementation. 39 * 40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated 41 * with a file pointer (filp). As a result they can be shared by a parent 42 * process and its children after a fork(). They are removed when the last 43 * file descriptor referring to the file pointer is closed (unless explicitly 44 * unlocked). 45 * 46 * FL_FLOCK locks never deadlock, an existing lock is always removed before 47 * upgrading from shared to exclusive (or vice versa). When this happens 48 * any processes blocked by the current lock are woken up and allowed to 49 * run before the new lock is applied. 50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995 51 * 52 * Removed some race conditions in flock_lock_file(), marked other possible 53 * races. Just grep for FIXME to see them. 54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996. 55 * 56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive. 57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep 58 * once we've checked for blocking and deadlocking. 59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996. 60 * 61 * Initial implementation of mandatory locks. SunOS turned out to be 62 * a rotten model, so I implemented the "obvious" semantics. 63 * See 'Documentation/mandatory.txt' for details. 64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996. 65 * 66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to 67 * check if a file has mandatory locks, used by mmap(), open() and creat() to 68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference 69 * Manual, Section 2. 70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996. 71 * 72 * Tidied up block list handling. Added '/proc/locks' interface. 73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996. 74 * 75 * Fixed deadlock condition for pathological code that mixes calls to 76 * flock() and fcntl(). 77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996. 78 * 79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use 80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to 81 * guarantee sensible behaviour in the case where file system modules might 82 * be compiled with different options than the kernel itself. 83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 84 * 85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel 86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this. 87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 88 * 89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK 90 * locks. Changed process synchronisation to avoid dereferencing locks that 91 * have already been freed. 92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996. 93 * 94 * Made the block list a circular list to minimise searching in the list. 95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996. 96 * 97 * Made mandatory locking a mount option. Default is not to allow mandatory 98 * locking. 99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996. 100 * 101 * Some adaptations for NFS support. 102 * Olaf Kirch (okir@monad.swb.de), Dec 1996, 103 * 104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed. 105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997. 106 * 107 * Use slab allocator instead of kmalloc/kfree. 108 * Use generic list implementation from <linux/list.h>. 109 * Sped up posix_locks_deadlock by only considering blocked locks. 110 * Matthew Wilcox <willy@debian.org>, March, 2000. 111 * 112 * Leases and LOCK_MAND 113 * Matthew Wilcox <willy@debian.org>, June, 2000. 114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000. 115 */ 116 117 #include <linux/capability.h> 118 #include <linux/file.h> 119 #include <linux/fs.h> 120 #include <linux/init.h> 121 #include <linux/module.h> 122 #include <linux/security.h> 123 #include <linux/slab.h> 124 #include <linux/smp_lock.h> 125 #include <linux/syscalls.h> 126 #include <linux/time.h> 127 #include <linux/rcupdate.h> 128 #include <linux/pid_namespace.h> 129 130 #include <asm/uaccess.h> 131 132 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) 133 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) 134 #define IS_LEASE(fl) (fl->fl_flags & FL_LEASE) 135 136 int leases_enable = 1; 137 int lease_break_time = 45; 138 139 #define for_each_lock(inode, lockp) \ 140 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) 141 142 static LIST_HEAD(file_lock_list); 143 static LIST_HEAD(blocked_list); 144 145 static struct kmem_cache *filelock_cache __read_mostly; 146 147 /* Allocate an empty lock structure. */ 148 static struct file_lock *locks_alloc_lock(void) 149 { 150 return kmem_cache_alloc(filelock_cache, GFP_KERNEL); 151 } 152 153 static void locks_release_private(struct file_lock *fl) 154 { 155 if (fl->fl_ops) { 156 if (fl->fl_ops->fl_release_private) 157 fl->fl_ops->fl_release_private(fl); 158 fl->fl_ops = NULL; 159 } 160 if (fl->fl_lmops) { 161 if (fl->fl_lmops->fl_release_private) 162 fl->fl_lmops->fl_release_private(fl); 163 fl->fl_lmops = NULL; 164 } 165 166 } 167 168 /* Free a lock which is not in use. */ 169 static void locks_free_lock(struct file_lock *fl) 170 { 171 BUG_ON(waitqueue_active(&fl->fl_wait)); 172 BUG_ON(!list_empty(&fl->fl_block)); 173 BUG_ON(!list_empty(&fl->fl_link)); 174 175 locks_release_private(fl); 176 kmem_cache_free(filelock_cache, fl); 177 } 178 179 void locks_init_lock(struct file_lock *fl) 180 { 181 INIT_LIST_HEAD(&fl->fl_link); 182 INIT_LIST_HEAD(&fl->fl_block); 183 init_waitqueue_head(&fl->fl_wait); 184 fl->fl_next = NULL; 185 fl->fl_fasync = NULL; 186 fl->fl_owner = NULL; 187 fl->fl_pid = 0; 188 fl->fl_nspid = NULL; 189 fl->fl_file = NULL; 190 fl->fl_flags = 0; 191 fl->fl_type = 0; 192 fl->fl_start = fl->fl_end = 0; 193 fl->fl_ops = NULL; 194 fl->fl_lmops = NULL; 195 } 196 197 EXPORT_SYMBOL(locks_init_lock); 198 199 /* 200 * Initialises the fields of the file lock which are invariant for 201 * free file_locks. 202 */ 203 static void init_once(struct kmem_cache *cache, void *foo) 204 { 205 struct file_lock *lock = (struct file_lock *) foo; 206 207 locks_init_lock(lock); 208 } 209 210 static void locks_copy_private(struct file_lock *new, struct file_lock *fl) 211 { 212 if (fl->fl_ops) { 213 if (fl->fl_ops->fl_copy_lock) 214 fl->fl_ops->fl_copy_lock(new, fl); 215 new->fl_ops = fl->fl_ops; 216 } 217 if (fl->fl_lmops) { 218 if (fl->fl_lmops->fl_copy_lock) 219 fl->fl_lmops->fl_copy_lock(new, fl); 220 new->fl_lmops = fl->fl_lmops; 221 } 222 } 223 224 /* 225 * Initialize a new lock from an existing file_lock structure. 226 */ 227 static void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) 228 { 229 new->fl_owner = fl->fl_owner; 230 new->fl_pid = fl->fl_pid; 231 new->fl_file = NULL; 232 new->fl_flags = fl->fl_flags; 233 new->fl_type = fl->fl_type; 234 new->fl_start = fl->fl_start; 235 new->fl_end = fl->fl_end; 236 new->fl_ops = NULL; 237 new->fl_lmops = NULL; 238 } 239 240 void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 241 { 242 locks_release_private(new); 243 244 __locks_copy_lock(new, fl); 245 new->fl_file = fl->fl_file; 246 new->fl_ops = fl->fl_ops; 247 new->fl_lmops = fl->fl_lmops; 248 249 locks_copy_private(new, fl); 250 } 251 252 EXPORT_SYMBOL(locks_copy_lock); 253 254 static inline int flock_translate_cmd(int cmd) { 255 if (cmd & LOCK_MAND) 256 return cmd & (LOCK_MAND | LOCK_RW); 257 switch (cmd) { 258 case LOCK_SH: 259 return F_RDLCK; 260 case LOCK_EX: 261 return F_WRLCK; 262 case LOCK_UN: 263 return F_UNLCK; 264 } 265 return -EINVAL; 266 } 267 268 /* Fill in a file_lock structure with an appropriate FLOCK lock. */ 269 static int flock_make_lock(struct file *filp, struct file_lock **lock, 270 unsigned int cmd) 271 { 272 struct file_lock *fl; 273 int type = flock_translate_cmd(cmd); 274 if (type < 0) 275 return type; 276 277 fl = locks_alloc_lock(); 278 if (fl == NULL) 279 return -ENOMEM; 280 281 fl->fl_file = filp; 282 fl->fl_pid = current->tgid; 283 fl->fl_flags = FL_FLOCK; 284 fl->fl_type = type; 285 fl->fl_end = OFFSET_MAX; 286 287 *lock = fl; 288 return 0; 289 } 290 291 static int assign_type(struct file_lock *fl, int type) 292 { 293 switch (type) { 294 case F_RDLCK: 295 case F_WRLCK: 296 case F_UNLCK: 297 fl->fl_type = type; 298 break; 299 default: 300 return -EINVAL; 301 } 302 return 0; 303 } 304 305 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX 306 * style lock. 307 */ 308 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, 309 struct flock *l) 310 { 311 off_t start, end; 312 313 switch (l->l_whence) { 314 case SEEK_SET: 315 start = 0; 316 break; 317 case SEEK_CUR: 318 start = filp->f_pos; 319 break; 320 case SEEK_END: 321 start = i_size_read(filp->f_path.dentry->d_inode); 322 break; 323 default: 324 return -EINVAL; 325 } 326 327 /* POSIX-1996 leaves the case l->l_len < 0 undefined; 328 POSIX-2001 defines it. */ 329 start += l->l_start; 330 if (start < 0) 331 return -EINVAL; 332 fl->fl_end = OFFSET_MAX; 333 if (l->l_len > 0) { 334 end = start + l->l_len - 1; 335 fl->fl_end = end; 336 } else if (l->l_len < 0) { 337 end = start - 1; 338 fl->fl_end = end; 339 start += l->l_len; 340 if (start < 0) 341 return -EINVAL; 342 } 343 fl->fl_start = start; /* we record the absolute position */ 344 if (fl->fl_end < fl->fl_start) 345 return -EOVERFLOW; 346 347 fl->fl_owner = current->files; 348 fl->fl_pid = current->tgid; 349 fl->fl_file = filp; 350 fl->fl_flags = FL_POSIX; 351 fl->fl_ops = NULL; 352 fl->fl_lmops = NULL; 353 354 return assign_type(fl, l->l_type); 355 } 356 357 #if BITS_PER_LONG == 32 358 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, 359 struct flock64 *l) 360 { 361 loff_t start; 362 363 switch (l->l_whence) { 364 case SEEK_SET: 365 start = 0; 366 break; 367 case SEEK_CUR: 368 start = filp->f_pos; 369 break; 370 case SEEK_END: 371 start = i_size_read(filp->f_path.dentry->d_inode); 372 break; 373 default: 374 return -EINVAL; 375 } 376 377 start += l->l_start; 378 if (start < 0) 379 return -EINVAL; 380 fl->fl_end = OFFSET_MAX; 381 if (l->l_len > 0) { 382 fl->fl_end = start + l->l_len - 1; 383 } else if (l->l_len < 0) { 384 fl->fl_end = start - 1; 385 start += l->l_len; 386 if (start < 0) 387 return -EINVAL; 388 } 389 fl->fl_start = start; /* we record the absolute position */ 390 if (fl->fl_end < fl->fl_start) 391 return -EOVERFLOW; 392 393 fl->fl_owner = current->files; 394 fl->fl_pid = current->tgid; 395 fl->fl_file = filp; 396 fl->fl_flags = FL_POSIX; 397 fl->fl_ops = NULL; 398 fl->fl_lmops = NULL; 399 400 switch (l->l_type) { 401 case F_RDLCK: 402 case F_WRLCK: 403 case F_UNLCK: 404 fl->fl_type = l->l_type; 405 break; 406 default: 407 return -EINVAL; 408 } 409 410 return (0); 411 } 412 #endif 413 414 /* default lease lock manager operations */ 415 static void lease_break_callback(struct file_lock *fl) 416 { 417 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); 418 } 419 420 static void lease_release_private_callback(struct file_lock *fl) 421 { 422 if (!fl->fl_file) 423 return; 424 425 f_delown(fl->fl_file); 426 fl->fl_file->f_owner.signum = 0; 427 } 428 429 static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) 430 { 431 return fl->fl_file == try->fl_file; 432 } 433 434 static struct lock_manager_operations lease_manager_ops = { 435 .fl_break = lease_break_callback, 436 .fl_release_private = lease_release_private_callback, 437 .fl_mylease = lease_mylease_callback, 438 .fl_change = lease_modify, 439 }; 440 441 /* 442 * Initialize a lease, use the default lock manager operations 443 */ 444 static int lease_init(struct file *filp, int type, struct file_lock *fl) 445 { 446 if (assign_type(fl, type) != 0) 447 return -EINVAL; 448 449 fl->fl_owner = current->files; 450 fl->fl_pid = current->tgid; 451 452 fl->fl_file = filp; 453 fl->fl_flags = FL_LEASE; 454 fl->fl_start = 0; 455 fl->fl_end = OFFSET_MAX; 456 fl->fl_ops = NULL; 457 fl->fl_lmops = &lease_manager_ops; 458 return 0; 459 } 460 461 /* Allocate a file_lock initialised to this type of lease */ 462 static struct file_lock *lease_alloc(struct file *filp, int type) 463 { 464 struct file_lock *fl = locks_alloc_lock(); 465 int error = -ENOMEM; 466 467 if (fl == NULL) 468 return ERR_PTR(error); 469 470 error = lease_init(filp, type, fl); 471 if (error) { 472 locks_free_lock(fl); 473 return ERR_PTR(error); 474 } 475 return fl; 476 } 477 478 /* Check if two locks overlap each other. 479 */ 480 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) 481 { 482 return ((fl1->fl_end >= fl2->fl_start) && 483 (fl2->fl_end >= fl1->fl_start)); 484 } 485 486 /* 487 * Check whether two locks have the same owner. 488 */ 489 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) 490 { 491 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner) 492 return fl2->fl_lmops == fl1->fl_lmops && 493 fl1->fl_lmops->fl_compare_owner(fl1, fl2); 494 return fl1->fl_owner == fl2->fl_owner; 495 } 496 497 /* Remove waiter from blocker's block list. 498 * When blocker ends up pointing to itself then the list is empty. 499 */ 500 static void __locks_delete_block(struct file_lock *waiter) 501 { 502 list_del_init(&waiter->fl_block); 503 list_del_init(&waiter->fl_link); 504 waiter->fl_next = NULL; 505 } 506 507 /* 508 */ 509 static void locks_delete_block(struct file_lock *waiter) 510 { 511 lock_kernel(); 512 __locks_delete_block(waiter); 513 unlock_kernel(); 514 } 515 516 /* Insert waiter into blocker's block list. 517 * We use a circular list so that processes can be easily woken up in 518 * the order they blocked. The documentation doesn't require this but 519 * it seems like the reasonable thing to do. 520 */ 521 static void locks_insert_block(struct file_lock *blocker, 522 struct file_lock *waiter) 523 { 524 BUG_ON(!list_empty(&waiter->fl_block)); 525 list_add_tail(&waiter->fl_block, &blocker->fl_block); 526 waiter->fl_next = blocker; 527 if (IS_POSIX(blocker)) 528 list_add(&waiter->fl_link, &blocked_list); 529 } 530 531 /* Wake up processes blocked waiting for blocker. 532 * If told to wait then schedule the processes until the block list 533 * is empty, otherwise empty the block list ourselves. 534 */ 535 static void locks_wake_up_blocks(struct file_lock *blocker) 536 { 537 while (!list_empty(&blocker->fl_block)) { 538 struct file_lock *waiter; 539 540 waiter = list_first_entry(&blocker->fl_block, 541 struct file_lock, fl_block); 542 __locks_delete_block(waiter); 543 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify) 544 waiter->fl_lmops->fl_notify(waiter); 545 else 546 wake_up(&waiter->fl_wait); 547 } 548 } 549 550 /* Insert file lock fl into an inode's lock list at the position indicated 551 * by pos. At the same time add the lock to the global file lock list. 552 */ 553 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) 554 { 555 list_add(&fl->fl_link, &file_lock_list); 556 557 fl->fl_nspid = get_pid(task_tgid(current)); 558 559 /* insert into file's list */ 560 fl->fl_next = *pos; 561 *pos = fl; 562 563 if (fl->fl_ops && fl->fl_ops->fl_insert) 564 fl->fl_ops->fl_insert(fl); 565 } 566 567 /* 568 * Delete a lock and then free it. 569 * Wake up processes that are blocked waiting for this lock, 570 * notify the FS that the lock has been cleared and 571 * finally free the lock. 572 */ 573 static void locks_delete_lock(struct file_lock **thisfl_p) 574 { 575 struct file_lock *fl = *thisfl_p; 576 577 *thisfl_p = fl->fl_next; 578 fl->fl_next = NULL; 579 list_del_init(&fl->fl_link); 580 581 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 582 if (fl->fl_fasync != NULL) { 583 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 584 fl->fl_fasync = NULL; 585 } 586 587 if (fl->fl_ops && fl->fl_ops->fl_remove) 588 fl->fl_ops->fl_remove(fl); 589 590 if (fl->fl_nspid) { 591 put_pid(fl->fl_nspid); 592 fl->fl_nspid = NULL; 593 } 594 595 locks_wake_up_blocks(fl); 596 locks_free_lock(fl); 597 } 598 599 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality 600 * checks for shared/exclusive status of overlapping locks. 601 */ 602 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 603 { 604 if (sys_fl->fl_type == F_WRLCK) 605 return 1; 606 if (caller_fl->fl_type == F_WRLCK) 607 return 1; 608 return 0; 609 } 610 611 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific 612 * checking before calling the locks_conflict(). 613 */ 614 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 615 { 616 /* POSIX locks owned by the same process do not conflict with 617 * each other. 618 */ 619 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl)) 620 return (0); 621 622 /* Check whether they overlap */ 623 if (!locks_overlap(caller_fl, sys_fl)) 624 return 0; 625 626 return (locks_conflict(caller_fl, sys_fl)); 627 } 628 629 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific 630 * checking before calling the locks_conflict(). 631 */ 632 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 633 { 634 /* FLOCK locks referring to the same filp do not conflict with 635 * each other. 636 */ 637 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file)) 638 return (0); 639 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) 640 return 0; 641 642 return (locks_conflict(caller_fl, sys_fl)); 643 } 644 645 void 646 posix_test_lock(struct file *filp, struct file_lock *fl) 647 { 648 struct file_lock *cfl; 649 650 lock_kernel(); 651 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { 652 if (!IS_POSIX(cfl)) 653 continue; 654 if (posix_locks_conflict(fl, cfl)) 655 break; 656 } 657 if (cfl) { 658 __locks_copy_lock(fl, cfl); 659 if (cfl->fl_nspid) 660 fl->fl_pid = pid_vnr(cfl->fl_nspid); 661 } else 662 fl->fl_type = F_UNLCK; 663 unlock_kernel(); 664 return; 665 } 666 EXPORT_SYMBOL(posix_test_lock); 667 668 /* 669 * Deadlock detection: 670 * 671 * We attempt to detect deadlocks that are due purely to posix file 672 * locks. 673 * 674 * We assume that a task can be waiting for at most one lock at a time. 675 * So for any acquired lock, the process holding that lock may be 676 * waiting on at most one other lock. That lock in turns may be held by 677 * someone waiting for at most one other lock. Given a requested lock 678 * caller_fl which is about to wait for a conflicting lock block_fl, we 679 * follow this chain of waiters to ensure we are not about to create a 680 * cycle. 681 * 682 * Since we do this before we ever put a process to sleep on a lock, we 683 * are ensured that there is never a cycle; that is what guarantees that 684 * the while() loop in posix_locks_deadlock() eventually completes. 685 * 686 * Note: the above assumption may not be true when handling lock 687 * requests from a broken NFS client. It may also fail in the presence 688 * of tasks (such as posix threads) sharing the same open file table. 689 * 690 * To handle those cases, we just bail out after a few iterations. 691 */ 692 693 #define MAX_DEADLK_ITERATIONS 10 694 695 /* Find a lock that the owner of the given block_fl is blocking on. */ 696 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) 697 { 698 struct file_lock *fl; 699 700 list_for_each_entry(fl, &blocked_list, fl_link) { 701 if (posix_same_owner(fl, block_fl)) 702 return fl->fl_next; 703 } 704 return NULL; 705 } 706 707 static int posix_locks_deadlock(struct file_lock *caller_fl, 708 struct file_lock *block_fl) 709 { 710 int i = 0; 711 712 while ((block_fl = what_owner_is_waiting_for(block_fl))) { 713 if (i++ > MAX_DEADLK_ITERATIONS) 714 return 0; 715 if (posix_same_owner(caller_fl, block_fl)) 716 return 1; 717 } 718 return 0; 719 } 720 721 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 722 * after any leases, but before any posix locks. 723 * 724 * Note that if called with an FL_EXISTS argument, the caller may determine 725 * whether or not a lock was successfully freed by testing the return 726 * value for -ENOENT. 727 */ 728 static int flock_lock_file(struct file *filp, struct file_lock *request) 729 { 730 struct file_lock *new_fl = NULL; 731 struct file_lock **before; 732 struct inode * inode = filp->f_path.dentry->d_inode; 733 int error = 0; 734 int found = 0; 735 736 lock_kernel(); 737 if (request->fl_flags & FL_ACCESS) 738 goto find_conflict; 739 740 if (request->fl_type != F_UNLCK) { 741 error = -ENOMEM; 742 new_fl = locks_alloc_lock(); 743 if (new_fl == NULL) 744 goto out; 745 error = 0; 746 } 747 748 for_each_lock(inode, before) { 749 struct file_lock *fl = *before; 750 if (IS_POSIX(fl)) 751 break; 752 if (IS_LEASE(fl)) 753 continue; 754 if (filp != fl->fl_file) 755 continue; 756 if (request->fl_type == fl->fl_type) 757 goto out; 758 found = 1; 759 locks_delete_lock(before); 760 break; 761 } 762 763 if (request->fl_type == F_UNLCK) { 764 if ((request->fl_flags & FL_EXISTS) && !found) 765 error = -ENOENT; 766 goto out; 767 } 768 769 /* 770 * If a higher-priority process was blocked on the old file lock, 771 * give it the opportunity to lock the file. 772 */ 773 if (found) 774 cond_resched(); 775 776 find_conflict: 777 for_each_lock(inode, before) { 778 struct file_lock *fl = *before; 779 if (IS_POSIX(fl)) 780 break; 781 if (IS_LEASE(fl)) 782 continue; 783 if (!flock_locks_conflict(request, fl)) 784 continue; 785 error = -EAGAIN; 786 if (request->fl_flags & FL_SLEEP) 787 locks_insert_block(fl, request); 788 goto out; 789 } 790 if (request->fl_flags & FL_ACCESS) 791 goto out; 792 locks_copy_lock(new_fl, request); 793 locks_insert_lock(before, new_fl); 794 new_fl = NULL; 795 error = 0; 796 797 out: 798 unlock_kernel(); 799 if (new_fl) 800 locks_free_lock(new_fl); 801 return error; 802 } 803 804 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) 805 { 806 struct file_lock *fl; 807 struct file_lock *new_fl = NULL; 808 struct file_lock *new_fl2 = NULL; 809 struct file_lock *left = NULL; 810 struct file_lock *right = NULL; 811 struct file_lock **before; 812 int error, added = 0; 813 814 /* 815 * We may need two file_lock structures for this operation, 816 * so we get them in advance to avoid races. 817 * 818 * In some cases we can be sure, that no new locks will be needed 819 */ 820 if (!(request->fl_flags & FL_ACCESS) && 821 (request->fl_type != F_UNLCK || 822 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { 823 new_fl = locks_alloc_lock(); 824 new_fl2 = locks_alloc_lock(); 825 } 826 827 lock_kernel(); 828 if (request->fl_type != F_UNLCK) { 829 for_each_lock(inode, before) { 830 fl = *before; 831 if (!IS_POSIX(fl)) 832 continue; 833 if (!posix_locks_conflict(request, fl)) 834 continue; 835 if (conflock) 836 locks_copy_lock(conflock, fl); 837 error = -EAGAIN; 838 if (!(request->fl_flags & FL_SLEEP)) 839 goto out; 840 error = -EDEADLK; 841 if (posix_locks_deadlock(request, fl)) 842 goto out; 843 error = -EAGAIN; 844 locks_insert_block(fl, request); 845 goto out; 846 } 847 } 848 849 /* If we're just looking for a conflict, we're done. */ 850 error = 0; 851 if (request->fl_flags & FL_ACCESS) 852 goto out; 853 854 /* 855 * Find the first old lock with the same owner as the new lock. 856 */ 857 858 before = &inode->i_flock; 859 860 /* First skip locks owned by other processes. */ 861 while ((fl = *before) && (!IS_POSIX(fl) || 862 !posix_same_owner(request, fl))) { 863 before = &fl->fl_next; 864 } 865 866 /* Process locks with this owner. */ 867 while ((fl = *before) && posix_same_owner(request, fl)) { 868 /* Detect adjacent or overlapping regions (if same lock type) 869 */ 870 if (request->fl_type == fl->fl_type) { 871 /* In all comparisons of start vs end, use 872 * "start - 1" rather than "end + 1". If end 873 * is OFFSET_MAX, end + 1 will become negative. 874 */ 875 if (fl->fl_end < request->fl_start - 1) 876 goto next_lock; 877 /* If the next lock in the list has entirely bigger 878 * addresses than the new one, insert the lock here. 879 */ 880 if (fl->fl_start - 1 > request->fl_end) 881 break; 882 883 /* If we come here, the new and old lock are of the 884 * same type and adjacent or overlapping. Make one 885 * lock yielding from the lower start address of both 886 * locks to the higher end address. 887 */ 888 if (fl->fl_start > request->fl_start) 889 fl->fl_start = request->fl_start; 890 else 891 request->fl_start = fl->fl_start; 892 if (fl->fl_end < request->fl_end) 893 fl->fl_end = request->fl_end; 894 else 895 request->fl_end = fl->fl_end; 896 if (added) { 897 locks_delete_lock(before); 898 continue; 899 } 900 request = fl; 901 added = 1; 902 } 903 else { 904 /* Processing for different lock types is a bit 905 * more complex. 906 */ 907 if (fl->fl_end < request->fl_start) 908 goto next_lock; 909 if (fl->fl_start > request->fl_end) 910 break; 911 if (request->fl_type == F_UNLCK) 912 added = 1; 913 if (fl->fl_start < request->fl_start) 914 left = fl; 915 /* If the next lock in the list has a higher end 916 * address than the new one, insert the new one here. 917 */ 918 if (fl->fl_end > request->fl_end) { 919 right = fl; 920 break; 921 } 922 if (fl->fl_start >= request->fl_start) { 923 /* The new lock completely replaces an old 924 * one (This may happen several times). 925 */ 926 if (added) { 927 locks_delete_lock(before); 928 continue; 929 } 930 /* Replace the old lock with the new one. 931 * Wake up anybody waiting for the old one, 932 * as the change in lock type might satisfy 933 * their needs. 934 */ 935 locks_wake_up_blocks(fl); 936 fl->fl_start = request->fl_start; 937 fl->fl_end = request->fl_end; 938 fl->fl_type = request->fl_type; 939 locks_release_private(fl); 940 locks_copy_private(fl, request); 941 request = fl; 942 added = 1; 943 } 944 } 945 /* Go on to next lock. 946 */ 947 next_lock: 948 before = &fl->fl_next; 949 } 950 951 /* 952 * The above code only modifies existing locks in case of 953 * merging or replacing. If new lock(s) need to be inserted 954 * all modifications are done bellow this, so it's safe yet to 955 * bail out. 956 */ 957 error = -ENOLCK; /* "no luck" */ 958 if (right && left == right && !new_fl2) 959 goto out; 960 961 error = 0; 962 if (!added) { 963 if (request->fl_type == F_UNLCK) { 964 if (request->fl_flags & FL_EXISTS) 965 error = -ENOENT; 966 goto out; 967 } 968 969 if (!new_fl) { 970 error = -ENOLCK; 971 goto out; 972 } 973 locks_copy_lock(new_fl, request); 974 locks_insert_lock(before, new_fl); 975 new_fl = NULL; 976 } 977 if (right) { 978 if (left == right) { 979 /* The new lock breaks the old one in two pieces, 980 * so we have to use the second new lock. 981 */ 982 left = new_fl2; 983 new_fl2 = NULL; 984 locks_copy_lock(left, right); 985 locks_insert_lock(before, left); 986 } 987 right->fl_start = request->fl_end + 1; 988 locks_wake_up_blocks(right); 989 } 990 if (left) { 991 left->fl_end = request->fl_start - 1; 992 locks_wake_up_blocks(left); 993 } 994 out: 995 unlock_kernel(); 996 /* 997 * Free any unused locks. 998 */ 999 if (new_fl) 1000 locks_free_lock(new_fl); 1001 if (new_fl2) 1002 locks_free_lock(new_fl2); 1003 return error; 1004 } 1005 1006 /** 1007 * posix_lock_file - Apply a POSIX-style lock to a file 1008 * @filp: The file to apply the lock to 1009 * @fl: The lock to be applied 1010 * @conflock: Place to return a copy of the conflicting lock, if found. 1011 * 1012 * Add a POSIX style lock to a file. 1013 * We merge adjacent & overlapping locks whenever possible. 1014 * POSIX locks are sorted by owner task, then by starting address 1015 * 1016 * Note that if called with an FL_EXISTS argument, the caller may determine 1017 * whether or not a lock was successfully freed by testing the return 1018 * value for -ENOENT. 1019 */ 1020 int posix_lock_file(struct file *filp, struct file_lock *fl, 1021 struct file_lock *conflock) 1022 { 1023 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock); 1024 } 1025 EXPORT_SYMBOL(posix_lock_file); 1026 1027 /** 1028 * posix_lock_file_wait - Apply a POSIX-style lock to a file 1029 * @filp: The file to apply the lock to 1030 * @fl: The lock to be applied 1031 * 1032 * Add a POSIX style lock to a file. 1033 * We merge adjacent & overlapping locks whenever possible. 1034 * POSIX locks are sorted by owner task, then by starting address 1035 */ 1036 int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 1037 { 1038 int error; 1039 might_sleep (); 1040 for (;;) { 1041 error = posix_lock_file(filp, fl, NULL); 1042 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 1043 break; 1044 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1045 if (!error) 1046 continue; 1047 1048 locks_delete_block(fl); 1049 break; 1050 } 1051 return error; 1052 } 1053 EXPORT_SYMBOL(posix_lock_file_wait); 1054 1055 /** 1056 * locks_mandatory_locked - Check for an active lock 1057 * @inode: the file to check 1058 * 1059 * Searches the inode's list of locks to find any POSIX locks which conflict. 1060 * This function is called from locks_verify_locked() only. 1061 */ 1062 int locks_mandatory_locked(struct inode *inode) 1063 { 1064 fl_owner_t owner = current->files; 1065 struct file_lock *fl; 1066 1067 /* 1068 * Search the lock list for this inode for any POSIX locks. 1069 */ 1070 lock_kernel(); 1071 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 1072 if (!IS_POSIX(fl)) 1073 continue; 1074 if (fl->fl_owner != owner) 1075 break; 1076 } 1077 unlock_kernel(); 1078 return fl ? -EAGAIN : 0; 1079 } 1080 1081 /** 1082 * locks_mandatory_area - Check for a conflicting lock 1083 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ 1084 * for shared 1085 * @inode: the file to check 1086 * @filp: how the file was opened (if it was) 1087 * @offset: start of area to check 1088 * @count: length of area to check 1089 * 1090 * Searches the inode's list of locks to find any POSIX locks which conflict. 1091 * This function is called from rw_verify_area() and 1092 * locks_verify_truncate(). 1093 */ 1094 int locks_mandatory_area(int read_write, struct inode *inode, 1095 struct file *filp, loff_t offset, 1096 size_t count) 1097 { 1098 struct file_lock fl; 1099 int error; 1100 1101 locks_init_lock(&fl); 1102 fl.fl_owner = current->files; 1103 fl.fl_pid = current->tgid; 1104 fl.fl_file = filp; 1105 fl.fl_flags = FL_POSIX | FL_ACCESS; 1106 if (filp && !(filp->f_flags & O_NONBLOCK)) 1107 fl.fl_flags |= FL_SLEEP; 1108 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; 1109 fl.fl_start = offset; 1110 fl.fl_end = offset + count - 1; 1111 1112 for (;;) { 1113 error = __posix_lock_file(inode, &fl, NULL); 1114 if (error != -EAGAIN) 1115 break; 1116 if (!(fl.fl_flags & FL_SLEEP)) 1117 break; 1118 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); 1119 if (!error) { 1120 /* 1121 * If we've been sleeping someone might have 1122 * changed the permissions behind our back. 1123 */ 1124 if (__mandatory_lock(inode)) 1125 continue; 1126 } 1127 1128 locks_delete_block(&fl); 1129 break; 1130 } 1131 1132 return error; 1133 } 1134 1135 EXPORT_SYMBOL(locks_mandatory_area); 1136 1137 /* We already had a lease on this file; just change its type */ 1138 int lease_modify(struct file_lock **before, int arg) 1139 { 1140 struct file_lock *fl = *before; 1141 int error = assign_type(fl, arg); 1142 1143 if (error) 1144 return error; 1145 locks_wake_up_blocks(fl); 1146 if (arg == F_UNLCK) 1147 locks_delete_lock(before); 1148 return 0; 1149 } 1150 1151 EXPORT_SYMBOL(lease_modify); 1152 1153 static void time_out_leases(struct inode *inode) 1154 { 1155 struct file_lock **before; 1156 struct file_lock *fl; 1157 1158 before = &inode->i_flock; 1159 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) { 1160 if ((fl->fl_break_time == 0) 1161 || time_before(jiffies, fl->fl_break_time)) { 1162 before = &fl->fl_next; 1163 continue; 1164 } 1165 lease_modify(before, fl->fl_type & ~F_INPROGRESS); 1166 if (fl == *before) /* lease_modify may have freed fl */ 1167 before = &fl->fl_next; 1168 } 1169 } 1170 1171 /** 1172 * __break_lease - revoke all outstanding leases on file 1173 * @inode: the inode of the file to return 1174 * @mode: the open mode (read or write) 1175 * 1176 * break_lease (inlined for speed) has checked there already is at least 1177 * some kind of lock (maybe a lease) on this file. Leases are broken on 1178 * a call to open() or truncate(). This function can sleep unless you 1179 * specified %O_NONBLOCK to your open(). 1180 */ 1181 int __break_lease(struct inode *inode, unsigned int mode) 1182 { 1183 int error = 0, future; 1184 struct file_lock *new_fl, *flock; 1185 struct file_lock *fl; 1186 unsigned long break_time; 1187 int i_have_this_lease = 0; 1188 1189 new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK); 1190 1191 lock_kernel(); 1192 1193 time_out_leases(inode); 1194 1195 flock = inode->i_flock; 1196 if ((flock == NULL) || !IS_LEASE(flock)) 1197 goto out; 1198 1199 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) 1200 if (fl->fl_owner == current->files) 1201 i_have_this_lease = 1; 1202 1203 if (mode & FMODE_WRITE) { 1204 /* If we want write access, we have to revoke any lease. */ 1205 future = F_UNLCK | F_INPROGRESS; 1206 } else if (flock->fl_type & F_INPROGRESS) { 1207 /* If the lease is already being broken, we just leave it */ 1208 future = flock->fl_type; 1209 } else if (flock->fl_type & F_WRLCK) { 1210 /* Downgrade the exclusive lease to a read-only lease. */ 1211 future = F_RDLCK | F_INPROGRESS; 1212 } else { 1213 /* the existing lease was read-only, so we can read too. */ 1214 goto out; 1215 } 1216 1217 if (IS_ERR(new_fl) && !i_have_this_lease 1218 && ((mode & O_NONBLOCK) == 0)) { 1219 error = PTR_ERR(new_fl); 1220 goto out; 1221 } 1222 1223 break_time = 0; 1224 if (lease_break_time > 0) { 1225 break_time = jiffies + lease_break_time * HZ; 1226 if (break_time == 0) 1227 break_time++; /* so that 0 means no break time */ 1228 } 1229 1230 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { 1231 if (fl->fl_type != future) { 1232 fl->fl_type = future; 1233 fl->fl_break_time = break_time; 1234 /* lease must have lmops break callback */ 1235 fl->fl_lmops->fl_break(fl); 1236 } 1237 } 1238 1239 if (i_have_this_lease || (mode & O_NONBLOCK)) { 1240 error = -EWOULDBLOCK; 1241 goto out; 1242 } 1243 1244 restart: 1245 break_time = flock->fl_break_time; 1246 if (break_time != 0) { 1247 break_time -= jiffies; 1248 if (break_time == 0) 1249 break_time++; 1250 } 1251 locks_insert_block(flock, new_fl); 1252 error = wait_event_interruptible_timeout(new_fl->fl_wait, 1253 !new_fl->fl_next, break_time); 1254 __locks_delete_block(new_fl); 1255 if (error >= 0) { 1256 if (error == 0) 1257 time_out_leases(inode); 1258 /* Wait for the next lease that has not been broken yet */ 1259 for (flock = inode->i_flock; flock && IS_LEASE(flock); 1260 flock = flock->fl_next) { 1261 if (flock->fl_type & F_INPROGRESS) 1262 goto restart; 1263 } 1264 error = 0; 1265 } 1266 1267 out: 1268 unlock_kernel(); 1269 if (!IS_ERR(new_fl)) 1270 locks_free_lock(new_fl); 1271 return error; 1272 } 1273 1274 EXPORT_SYMBOL(__break_lease); 1275 1276 /** 1277 * lease_get_mtime - get the last modified time of an inode 1278 * @inode: the inode 1279 * @time: pointer to a timespec which will contain the last modified time 1280 * 1281 * This is to force NFS clients to flush their caches for files with 1282 * exclusive leases. The justification is that if someone has an 1283 * exclusive lease, then they could be modifying it. 1284 */ 1285 void lease_get_mtime(struct inode *inode, struct timespec *time) 1286 { 1287 struct file_lock *flock = inode->i_flock; 1288 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK)) 1289 *time = current_fs_time(inode->i_sb); 1290 else 1291 *time = inode->i_mtime; 1292 } 1293 1294 EXPORT_SYMBOL(lease_get_mtime); 1295 1296 /** 1297 * fcntl_getlease - Enquire what lease is currently active 1298 * @filp: the file 1299 * 1300 * The value returned by this function will be one of 1301 * (if no lease break is pending): 1302 * 1303 * %F_RDLCK to indicate a shared lease is held. 1304 * 1305 * %F_WRLCK to indicate an exclusive lease is held. 1306 * 1307 * %F_UNLCK to indicate no lease is held. 1308 * 1309 * (if a lease break is pending): 1310 * 1311 * %F_RDLCK to indicate an exclusive lease needs to be 1312 * changed to a shared lease (or removed). 1313 * 1314 * %F_UNLCK to indicate the lease needs to be removed. 1315 * 1316 * XXX: sfr & willy disagree over whether F_INPROGRESS 1317 * should be returned to userspace. 1318 */ 1319 int fcntl_getlease(struct file *filp) 1320 { 1321 struct file_lock *fl; 1322 int type = F_UNLCK; 1323 1324 lock_kernel(); 1325 time_out_leases(filp->f_path.dentry->d_inode); 1326 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl); 1327 fl = fl->fl_next) { 1328 if (fl->fl_file == filp) { 1329 type = fl->fl_type & ~F_INPROGRESS; 1330 break; 1331 } 1332 } 1333 unlock_kernel(); 1334 return type; 1335 } 1336 1337 /** 1338 * generic_setlease - sets a lease on an open file 1339 * @filp: file pointer 1340 * @arg: type of lease to obtain 1341 * @flp: input - file_lock to use, output - file_lock inserted 1342 * 1343 * The (input) flp->fl_lmops->fl_break function is required 1344 * by break_lease(). 1345 * 1346 * Called with kernel lock held. 1347 */ 1348 int generic_setlease(struct file *filp, long arg, struct file_lock **flp) 1349 { 1350 struct file_lock *fl, **before, **my_before = NULL, *lease; 1351 struct file_lock *new_fl = NULL; 1352 struct dentry *dentry = filp->f_path.dentry; 1353 struct inode *inode = dentry->d_inode; 1354 int error, rdlease_count = 0, wrlease_count = 0; 1355 1356 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) 1357 return -EACCES; 1358 if (!S_ISREG(inode->i_mode)) 1359 return -EINVAL; 1360 error = security_file_lock(filp, arg); 1361 if (error) 1362 return error; 1363 1364 time_out_leases(inode); 1365 1366 BUG_ON(!(*flp)->fl_lmops->fl_break); 1367 1368 lease = *flp; 1369 1370 error = -EAGAIN; 1371 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) 1372 goto out; 1373 if ((arg == F_WRLCK) 1374 && ((atomic_read(&dentry->d_count) > 1) 1375 || (atomic_read(&inode->i_count) > 1))) 1376 goto out; 1377 1378 error = -ENOMEM; 1379 new_fl = locks_alloc_lock(); 1380 if (new_fl == NULL) 1381 goto out; 1382 1383 /* 1384 * At this point, we know that if there is an exclusive 1385 * lease on this file, then we hold it on this filp 1386 * (otherwise our open of this file would have blocked). 1387 * And if we are trying to acquire an exclusive lease, 1388 * then the file is not open by anyone (including us) 1389 * except for this filp. 1390 */ 1391 for (before = &inode->i_flock; 1392 ((fl = *before) != NULL) && IS_LEASE(fl); 1393 before = &fl->fl_next) { 1394 if (lease->fl_lmops->fl_mylease(fl, lease)) 1395 my_before = before; 1396 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) 1397 /* 1398 * Someone is in the process of opening this 1399 * file for writing so we may not take an 1400 * exclusive lease on it. 1401 */ 1402 wrlease_count++; 1403 else 1404 rdlease_count++; 1405 } 1406 1407 if ((arg == F_RDLCK && (wrlease_count > 0)) || 1408 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) 1409 goto out; 1410 1411 if (my_before != NULL) { 1412 *flp = *my_before; 1413 error = lease->fl_lmops->fl_change(my_before, arg); 1414 goto out; 1415 } 1416 1417 error = 0; 1418 if (arg == F_UNLCK) 1419 goto out; 1420 1421 error = -EINVAL; 1422 if (!leases_enable) 1423 goto out; 1424 1425 locks_copy_lock(new_fl, lease); 1426 locks_insert_lock(before, new_fl); 1427 1428 *flp = new_fl; 1429 return 0; 1430 1431 out: 1432 if (new_fl != NULL) 1433 locks_free_lock(new_fl); 1434 return error; 1435 } 1436 EXPORT_SYMBOL(generic_setlease); 1437 1438 /** 1439 * vfs_setlease - sets a lease on an open file 1440 * @filp: file pointer 1441 * @arg: type of lease to obtain 1442 * @lease: file_lock to use 1443 * 1444 * Call this to establish a lease on the file. 1445 * The (*lease)->fl_lmops->fl_break operation must be set; if not, 1446 * break_lease will oops! 1447 * 1448 * This will call the filesystem's setlease file method, if 1449 * defined. Note that there is no getlease method; instead, the 1450 * filesystem setlease method should call back to setlease() to 1451 * add a lease to the inode's lease list, where fcntl_getlease() can 1452 * find it. Since fcntl_getlease() only reports whether the current 1453 * task holds a lease, a cluster filesystem need only do this for 1454 * leases held by processes on this node. 1455 * 1456 * There is also no break_lease method; filesystems that 1457 * handle their own leases shoud break leases themselves from the 1458 * filesystem's open, create, and (on truncate) setattr methods. 1459 * 1460 * Warning: the only current setlease methods exist only to disable 1461 * leases in certain cases. More vfs changes may be required to 1462 * allow a full filesystem lease implementation. 1463 */ 1464 1465 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease) 1466 { 1467 int error; 1468 1469 lock_kernel(); 1470 if (filp->f_op && filp->f_op->setlease) 1471 error = filp->f_op->setlease(filp, arg, lease); 1472 else 1473 error = generic_setlease(filp, arg, lease); 1474 unlock_kernel(); 1475 1476 return error; 1477 } 1478 EXPORT_SYMBOL_GPL(vfs_setlease); 1479 1480 /** 1481 * fcntl_setlease - sets a lease on an open file 1482 * @fd: open file descriptor 1483 * @filp: file pointer 1484 * @arg: type of lease to obtain 1485 * 1486 * Call this fcntl to establish a lease on the file. 1487 * Note that you also need to call %F_SETSIG to 1488 * receive a signal when the lease is broken. 1489 */ 1490 int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1491 { 1492 struct file_lock fl, *flp = &fl; 1493 struct dentry *dentry = filp->f_path.dentry; 1494 struct inode *inode = dentry->d_inode; 1495 int error; 1496 1497 locks_init_lock(&fl); 1498 error = lease_init(filp, arg, &fl); 1499 if (error) 1500 return error; 1501 1502 lock_kernel(); 1503 1504 error = vfs_setlease(filp, arg, &flp); 1505 if (error || arg == F_UNLCK) 1506 goto out_unlock; 1507 1508 error = fasync_helper(fd, filp, 1, &flp->fl_fasync); 1509 if (error < 0) { 1510 /* remove lease just inserted by setlease */ 1511 flp->fl_type = F_UNLCK | F_INPROGRESS; 1512 flp->fl_break_time = jiffies - 10; 1513 time_out_leases(inode); 1514 goto out_unlock; 1515 } 1516 1517 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); 1518 out_unlock: 1519 unlock_kernel(); 1520 return error; 1521 } 1522 1523 /** 1524 * flock_lock_file_wait - Apply a FLOCK-style lock to a file 1525 * @filp: The file to apply the lock to 1526 * @fl: The lock to be applied 1527 * 1528 * Add a FLOCK style lock to a file. 1529 */ 1530 int flock_lock_file_wait(struct file *filp, struct file_lock *fl) 1531 { 1532 int error; 1533 might_sleep(); 1534 for (;;) { 1535 error = flock_lock_file(filp, fl); 1536 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 1537 break; 1538 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1539 if (!error) 1540 continue; 1541 1542 locks_delete_block(fl); 1543 break; 1544 } 1545 return error; 1546 } 1547 1548 EXPORT_SYMBOL(flock_lock_file_wait); 1549 1550 /** 1551 * sys_flock: - flock() system call. 1552 * @fd: the file descriptor to lock. 1553 * @cmd: the type of lock to apply. 1554 * 1555 * Apply a %FL_FLOCK style lock to an open file descriptor. 1556 * The @cmd can be one of 1557 * 1558 * %LOCK_SH -- a shared lock. 1559 * 1560 * %LOCK_EX -- an exclusive lock. 1561 * 1562 * %LOCK_UN -- remove an existing lock. 1563 * 1564 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes. 1565 * 1566 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other 1567 * processes read and write access respectively. 1568 */ 1569 asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) 1570 { 1571 struct file *filp; 1572 struct file_lock *lock; 1573 int can_sleep, unlock; 1574 int error; 1575 1576 error = -EBADF; 1577 filp = fget(fd); 1578 if (!filp) 1579 goto out; 1580 1581 can_sleep = !(cmd & LOCK_NB); 1582 cmd &= ~LOCK_NB; 1583 unlock = (cmd == LOCK_UN); 1584 1585 if (!unlock && !(cmd & LOCK_MAND) && !(filp->f_mode & 3)) 1586 goto out_putf; 1587 1588 error = flock_make_lock(filp, &lock, cmd); 1589 if (error) 1590 goto out_putf; 1591 if (can_sleep) 1592 lock->fl_flags |= FL_SLEEP; 1593 1594 error = security_file_lock(filp, cmd); 1595 if (error) 1596 goto out_free; 1597 1598 if (filp->f_op && filp->f_op->flock) 1599 error = filp->f_op->flock(filp, 1600 (can_sleep) ? F_SETLKW : F_SETLK, 1601 lock); 1602 else 1603 error = flock_lock_file_wait(filp, lock); 1604 1605 out_free: 1606 locks_free_lock(lock); 1607 1608 out_putf: 1609 fput(filp); 1610 out: 1611 return error; 1612 } 1613 1614 /** 1615 * vfs_test_lock - test file byte range lock 1616 * @filp: The file to test lock for 1617 * @fl: The lock to test; also used to hold result 1618 * 1619 * Returns -ERRNO on failure. Indicates presence of conflicting lock by 1620 * setting conf->fl_type to something other than F_UNLCK. 1621 */ 1622 int vfs_test_lock(struct file *filp, struct file_lock *fl) 1623 { 1624 if (filp->f_op && filp->f_op->lock) 1625 return filp->f_op->lock(filp, F_GETLK, fl); 1626 posix_test_lock(filp, fl); 1627 return 0; 1628 } 1629 EXPORT_SYMBOL_GPL(vfs_test_lock); 1630 1631 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) 1632 { 1633 flock->l_pid = fl->fl_pid; 1634 #if BITS_PER_LONG == 32 1635 /* 1636 * Make sure we can represent the posix lock via 1637 * legacy 32bit flock. 1638 */ 1639 if (fl->fl_start > OFFT_OFFSET_MAX) 1640 return -EOVERFLOW; 1641 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX) 1642 return -EOVERFLOW; 1643 #endif 1644 flock->l_start = fl->fl_start; 1645 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 1646 fl->fl_end - fl->fl_start + 1; 1647 flock->l_whence = 0; 1648 flock->l_type = fl->fl_type; 1649 return 0; 1650 } 1651 1652 #if BITS_PER_LONG == 32 1653 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) 1654 { 1655 flock->l_pid = fl->fl_pid; 1656 flock->l_start = fl->fl_start; 1657 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 1658 fl->fl_end - fl->fl_start + 1; 1659 flock->l_whence = 0; 1660 flock->l_type = fl->fl_type; 1661 } 1662 #endif 1663 1664 /* Report the first existing lock that would conflict with l. 1665 * This implements the F_GETLK command of fcntl(). 1666 */ 1667 int fcntl_getlk(struct file *filp, struct flock __user *l) 1668 { 1669 struct file_lock file_lock; 1670 struct flock flock; 1671 int error; 1672 1673 error = -EFAULT; 1674 if (copy_from_user(&flock, l, sizeof(flock))) 1675 goto out; 1676 error = -EINVAL; 1677 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1678 goto out; 1679 1680 error = flock_to_posix_lock(filp, &file_lock, &flock); 1681 if (error) 1682 goto out; 1683 1684 error = vfs_test_lock(filp, &file_lock); 1685 if (error) 1686 goto out; 1687 1688 flock.l_type = file_lock.fl_type; 1689 if (file_lock.fl_type != F_UNLCK) { 1690 error = posix_lock_to_flock(&flock, &file_lock); 1691 if (error) 1692 goto out; 1693 } 1694 error = -EFAULT; 1695 if (!copy_to_user(l, &flock, sizeof(flock))) 1696 error = 0; 1697 out: 1698 return error; 1699 } 1700 1701 /** 1702 * vfs_lock_file - file byte range lock 1703 * @filp: The file to apply the lock to 1704 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.) 1705 * @fl: The lock to be applied 1706 * @conf: Place to return a copy of the conflicting lock, if found. 1707 * 1708 * A caller that doesn't care about the conflicting lock may pass NULL 1709 * as the final argument. 1710 * 1711 * If the filesystem defines a private ->lock() method, then @conf will 1712 * be left unchanged; so a caller that cares should initialize it to 1713 * some acceptable default. 1714 * 1715 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX 1716 * locks, the ->lock() interface may return asynchronously, before the lock has 1717 * been granted or denied by the underlying filesystem, if (and only if) 1718 * fl_grant is set. Callers expecting ->lock() to return asynchronously 1719 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if) 1720 * the request is for a blocking lock. When ->lock() does return asynchronously, 1721 * it must return -EINPROGRESS, and call ->fl_grant() when the lock 1722 * request completes. 1723 * If the request is for non-blocking lock the file system should return 1724 * -EINPROGRESS then try to get the lock and call the callback routine with 1725 * the result. If the request timed out the callback routine will return a 1726 * nonzero return code and the file system should release the lock. The file 1727 * system is also responsible to keep a corresponding posix lock when it 1728 * grants a lock so the VFS can find out which locks are locally held and do 1729 * the correct lock cleanup when required. 1730 * The underlying filesystem must not drop the kernel lock or call 1731 * ->fl_grant() before returning to the caller with a -EINPROGRESS 1732 * return code. 1733 */ 1734 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) 1735 { 1736 if (filp->f_op && filp->f_op->lock) 1737 return filp->f_op->lock(filp, cmd, fl); 1738 else 1739 return posix_lock_file(filp, fl, conf); 1740 } 1741 EXPORT_SYMBOL_GPL(vfs_lock_file); 1742 1743 /* Apply the lock described by l to an open file descriptor. 1744 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1745 */ 1746 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, 1747 struct flock __user *l) 1748 { 1749 struct file_lock *file_lock = locks_alloc_lock(); 1750 struct flock flock; 1751 struct inode *inode; 1752 int error; 1753 1754 if (file_lock == NULL) 1755 return -ENOLCK; 1756 1757 /* 1758 * This might block, so we do it before checking the inode. 1759 */ 1760 error = -EFAULT; 1761 if (copy_from_user(&flock, l, sizeof(flock))) 1762 goto out; 1763 1764 inode = filp->f_path.dentry->d_inode; 1765 1766 /* Don't allow mandatory locks on files that may be memory mapped 1767 * and shared. 1768 */ 1769 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { 1770 error = -EAGAIN; 1771 goto out; 1772 } 1773 1774 again: 1775 error = flock_to_posix_lock(filp, file_lock, &flock); 1776 if (error) 1777 goto out; 1778 if (cmd == F_SETLKW) { 1779 file_lock->fl_flags |= FL_SLEEP; 1780 } 1781 1782 error = -EBADF; 1783 switch (flock.l_type) { 1784 case F_RDLCK: 1785 if (!(filp->f_mode & FMODE_READ)) 1786 goto out; 1787 break; 1788 case F_WRLCK: 1789 if (!(filp->f_mode & FMODE_WRITE)) 1790 goto out; 1791 break; 1792 case F_UNLCK: 1793 break; 1794 default: 1795 error = -EINVAL; 1796 goto out; 1797 } 1798 1799 error = security_file_lock(filp, file_lock->fl_type); 1800 if (error) 1801 goto out; 1802 1803 if (filp->f_op && filp->f_op->lock != NULL) 1804 error = filp->f_op->lock(filp, cmd, file_lock); 1805 else { 1806 for (;;) { 1807 error = posix_lock_file(filp, file_lock, NULL); 1808 if (error != -EAGAIN || cmd == F_SETLK) 1809 break; 1810 error = wait_event_interruptible(file_lock->fl_wait, 1811 !file_lock->fl_next); 1812 if (!error) 1813 continue; 1814 1815 locks_delete_block(file_lock); 1816 break; 1817 } 1818 } 1819 1820 /* 1821 * Attempt to detect a close/fcntl race and recover by 1822 * releasing the lock that was just acquired. 1823 */ 1824 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1825 flock.l_type = F_UNLCK; 1826 goto again; 1827 } 1828 1829 out: 1830 locks_free_lock(file_lock); 1831 return error; 1832 } 1833 1834 #if BITS_PER_LONG == 32 1835 /* Report the first existing lock that would conflict with l. 1836 * This implements the F_GETLK command of fcntl(). 1837 */ 1838 int fcntl_getlk64(struct file *filp, struct flock64 __user *l) 1839 { 1840 struct file_lock file_lock; 1841 struct flock64 flock; 1842 int error; 1843 1844 error = -EFAULT; 1845 if (copy_from_user(&flock, l, sizeof(flock))) 1846 goto out; 1847 error = -EINVAL; 1848 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1849 goto out; 1850 1851 error = flock64_to_posix_lock(filp, &file_lock, &flock); 1852 if (error) 1853 goto out; 1854 1855 error = vfs_test_lock(filp, &file_lock); 1856 if (error) 1857 goto out; 1858 1859 flock.l_type = file_lock.fl_type; 1860 if (file_lock.fl_type != F_UNLCK) 1861 posix_lock_to_flock64(&flock, &file_lock); 1862 1863 error = -EFAULT; 1864 if (!copy_to_user(l, &flock, sizeof(flock))) 1865 error = 0; 1866 1867 out: 1868 return error; 1869 } 1870 1871 /* Apply the lock described by l to an open file descriptor. 1872 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1873 */ 1874 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, 1875 struct flock64 __user *l) 1876 { 1877 struct file_lock *file_lock = locks_alloc_lock(); 1878 struct flock64 flock; 1879 struct inode *inode; 1880 int error; 1881 1882 if (file_lock == NULL) 1883 return -ENOLCK; 1884 1885 /* 1886 * This might block, so we do it before checking the inode. 1887 */ 1888 error = -EFAULT; 1889 if (copy_from_user(&flock, l, sizeof(flock))) 1890 goto out; 1891 1892 inode = filp->f_path.dentry->d_inode; 1893 1894 /* Don't allow mandatory locks on files that may be memory mapped 1895 * and shared. 1896 */ 1897 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { 1898 error = -EAGAIN; 1899 goto out; 1900 } 1901 1902 again: 1903 error = flock64_to_posix_lock(filp, file_lock, &flock); 1904 if (error) 1905 goto out; 1906 if (cmd == F_SETLKW64) { 1907 file_lock->fl_flags |= FL_SLEEP; 1908 } 1909 1910 error = -EBADF; 1911 switch (flock.l_type) { 1912 case F_RDLCK: 1913 if (!(filp->f_mode & FMODE_READ)) 1914 goto out; 1915 break; 1916 case F_WRLCK: 1917 if (!(filp->f_mode & FMODE_WRITE)) 1918 goto out; 1919 break; 1920 case F_UNLCK: 1921 break; 1922 default: 1923 error = -EINVAL; 1924 goto out; 1925 } 1926 1927 error = security_file_lock(filp, file_lock->fl_type); 1928 if (error) 1929 goto out; 1930 1931 if (filp->f_op && filp->f_op->lock != NULL) 1932 error = filp->f_op->lock(filp, cmd, file_lock); 1933 else { 1934 for (;;) { 1935 error = posix_lock_file(filp, file_lock, NULL); 1936 if (error != -EAGAIN || cmd == F_SETLK64) 1937 break; 1938 error = wait_event_interruptible(file_lock->fl_wait, 1939 !file_lock->fl_next); 1940 if (!error) 1941 continue; 1942 1943 locks_delete_block(file_lock); 1944 break; 1945 } 1946 } 1947 1948 /* 1949 * Attempt to detect a close/fcntl race and recover by 1950 * releasing the lock that was just acquired. 1951 */ 1952 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1953 flock.l_type = F_UNLCK; 1954 goto again; 1955 } 1956 1957 out: 1958 locks_free_lock(file_lock); 1959 return error; 1960 } 1961 #endif /* BITS_PER_LONG == 32 */ 1962 1963 /* 1964 * This function is called when the file is being removed 1965 * from the task's fd array. POSIX locks belonging to this task 1966 * are deleted at this time. 1967 */ 1968 void locks_remove_posix(struct file *filp, fl_owner_t owner) 1969 { 1970 struct file_lock lock; 1971 1972 /* 1973 * If there are no locks held on this file, we don't need to call 1974 * posix_lock_file(). Another process could be setting a lock on this 1975 * file at the same time, but we wouldn't remove that lock anyway. 1976 */ 1977 if (!filp->f_path.dentry->d_inode->i_flock) 1978 return; 1979 1980 lock.fl_type = F_UNLCK; 1981 lock.fl_flags = FL_POSIX | FL_CLOSE; 1982 lock.fl_start = 0; 1983 lock.fl_end = OFFSET_MAX; 1984 lock.fl_owner = owner; 1985 lock.fl_pid = current->tgid; 1986 lock.fl_file = filp; 1987 lock.fl_ops = NULL; 1988 lock.fl_lmops = NULL; 1989 1990 vfs_lock_file(filp, F_SETLK, &lock, NULL); 1991 1992 if (lock.fl_ops && lock.fl_ops->fl_release_private) 1993 lock.fl_ops->fl_release_private(&lock); 1994 } 1995 1996 EXPORT_SYMBOL(locks_remove_posix); 1997 1998 /* 1999 * This function is called on the last close of an open file. 2000 */ 2001 void locks_remove_flock(struct file *filp) 2002 { 2003 struct inode * inode = filp->f_path.dentry->d_inode; 2004 struct file_lock *fl; 2005 struct file_lock **before; 2006 2007 if (!inode->i_flock) 2008 return; 2009 2010 if (filp->f_op && filp->f_op->flock) { 2011 struct file_lock fl = { 2012 .fl_pid = current->tgid, 2013 .fl_file = filp, 2014 .fl_flags = FL_FLOCK, 2015 .fl_type = F_UNLCK, 2016 .fl_end = OFFSET_MAX, 2017 }; 2018 filp->f_op->flock(filp, F_SETLKW, &fl); 2019 if (fl.fl_ops && fl.fl_ops->fl_release_private) 2020 fl.fl_ops->fl_release_private(&fl); 2021 } 2022 2023 lock_kernel(); 2024 before = &inode->i_flock; 2025 2026 while ((fl = *before) != NULL) { 2027 if (fl->fl_file == filp) { 2028 if (IS_FLOCK(fl)) { 2029 locks_delete_lock(before); 2030 continue; 2031 } 2032 if (IS_LEASE(fl)) { 2033 lease_modify(before, F_UNLCK); 2034 continue; 2035 } 2036 /* What? */ 2037 BUG(); 2038 } 2039 before = &fl->fl_next; 2040 } 2041 unlock_kernel(); 2042 } 2043 2044 /** 2045 * posix_unblock_lock - stop waiting for a file lock 2046 * @filp: how the file was opened 2047 * @waiter: the lock which was waiting 2048 * 2049 * lockd needs to block waiting for locks. 2050 */ 2051 int 2052 posix_unblock_lock(struct file *filp, struct file_lock *waiter) 2053 { 2054 int status = 0; 2055 2056 lock_kernel(); 2057 if (waiter->fl_next) 2058 __locks_delete_block(waiter); 2059 else 2060 status = -ENOENT; 2061 unlock_kernel(); 2062 return status; 2063 } 2064 2065 EXPORT_SYMBOL(posix_unblock_lock); 2066 2067 /** 2068 * vfs_cancel_lock - file byte range unblock lock 2069 * @filp: The file to apply the unblock to 2070 * @fl: The lock to be unblocked 2071 * 2072 * Used by lock managers to cancel blocked requests 2073 */ 2074 int vfs_cancel_lock(struct file *filp, struct file_lock *fl) 2075 { 2076 if (filp->f_op && filp->f_op->lock) 2077 return filp->f_op->lock(filp, F_CANCELLK, fl); 2078 return 0; 2079 } 2080 2081 EXPORT_SYMBOL_GPL(vfs_cancel_lock); 2082 2083 #ifdef CONFIG_PROC_FS 2084 #include <linux/seq_file.h> 2085 2086 static void lock_get_status(struct seq_file *f, struct file_lock *fl, 2087 int id, char *pfx) 2088 { 2089 struct inode *inode = NULL; 2090 unsigned int fl_pid; 2091 2092 if (fl->fl_nspid) 2093 fl_pid = pid_vnr(fl->fl_nspid); 2094 else 2095 fl_pid = fl->fl_pid; 2096 2097 if (fl->fl_file != NULL) 2098 inode = fl->fl_file->f_path.dentry->d_inode; 2099 2100 seq_printf(f, "%d:%s ", id, pfx); 2101 if (IS_POSIX(fl)) { 2102 seq_printf(f, "%6s %s ", 2103 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ", 2104 (inode == NULL) ? "*NOINODE*" : 2105 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY "); 2106 } else if (IS_FLOCK(fl)) { 2107 if (fl->fl_type & LOCK_MAND) { 2108 seq_printf(f, "FLOCK MSNFS "); 2109 } else { 2110 seq_printf(f, "FLOCK ADVISORY "); 2111 } 2112 } else if (IS_LEASE(fl)) { 2113 seq_printf(f, "LEASE "); 2114 if (fl->fl_type & F_INPROGRESS) 2115 seq_printf(f, "BREAKING "); 2116 else if (fl->fl_file) 2117 seq_printf(f, "ACTIVE "); 2118 else 2119 seq_printf(f, "BREAKER "); 2120 } else { 2121 seq_printf(f, "UNKNOWN UNKNOWN "); 2122 } 2123 if (fl->fl_type & LOCK_MAND) { 2124 seq_printf(f, "%s ", 2125 (fl->fl_type & LOCK_READ) 2126 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " 2127 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); 2128 } else { 2129 seq_printf(f, "%s ", 2130 (fl->fl_type & F_INPROGRESS) 2131 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ " 2132 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); 2133 } 2134 if (inode) { 2135 #ifdef WE_CAN_BREAK_LSLK_NOW 2136 seq_printf(f, "%d %s:%ld ", fl_pid, 2137 inode->i_sb->s_id, inode->i_ino); 2138 #else 2139 /* userspace relies on this representation of dev_t ;-( */ 2140 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, 2141 MAJOR(inode->i_sb->s_dev), 2142 MINOR(inode->i_sb->s_dev), inode->i_ino); 2143 #endif 2144 } else { 2145 seq_printf(f, "%d <none>:0 ", fl_pid); 2146 } 2147 if (IS_POSIX(fl)) { 2148 if (fl->fl_end == OFFSET_MAX) 2149 seq_printf(f, "%Ld EOF\n", fl->fl_start); 2150 else 2151 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end); 2152 } else { 2153 seq_printf(f, "0 EOF\n"); 2154 } 2155 } 2156 2157 static int locks_show(struct seq_file *f, void *v) 2158 { 2159 struct file_lock *fl, *bfl; 2160 2161 fl = list_entry(v, struct file_lock, fl_link); 2162 2163 lock_get_status(f, fl, (long)f->private, ""); 2164 2165 list_for_each_entry(bfl, &fl->fl_block, fl_block) 2166 lock_get_status(f, bfl, (long)f->private, " ->"); 2167 2168 f->private++; 2169 return 0; 2170 } 2171 2172 static void *locks_start(struct seq_file *f, loff_t *pos) 2173 { 2174 lock_kernel(); 2175 f->private = (void *)1; 2176 return seq_list_start(&file_lock_list, *pos); 2177 } 2178 2179 static void *locks_next(struct seq_file *f, void *v, loff_t *pos) 2180 { 2181 return seq_list_next(v, &file_lock_list, pos); 2182 } 2183 2184 static void locks_stop(struct seq_file *f, void *v) 2185 { 2186 unlock_kernel(); 2187 } 2188 2189 struct seq_operations locks_seq_operations = { 2190 .start = locks_start, 2191 .next = locks_next, 2192 .stop = locks_stop, 2193 .show = locks_show, 2194 }; 2195 #endif 2196 2197 /** 2198 * lock_may_read - checks that the region is free of locks 2199 * @inode: the inode that is being read 2200 * @start: the first byte to read 2201 * @len: the number of bytes to read 2202 * 2203 * Emulates Windows locking requirements. Whole-file 2204 * mandatory locks (share modes) can prohibit a read and 2205 * byte-range POSIX locks can prohibit a read if they overlap. 2206 * 2207 * N.B. this function is only ever called 2208 * from knfsd and ownership of locks is never checked. 2209 */ 2210 int lock_may_read(struct inode *inode, loff_t start, unsigned long len) 2211 { 2212 struct file_lock *fl; 2213 int result = 1; 2214 lock_kernel(); 2215 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2216 if (IS_POSIX(fl)) { 2217 if (fl->fl_type == F_RDLCK) 2218 continue; 2219 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2220 continue; 2221 } else if (IS_FLOCK(fl)) { 2222 if (!(fl->fl_type & LOCK_MAND)) 2223 continue; 2224 if (fl->fl_type & LOCK_READ) 2225 continue; 2226 } else 2227 continue; 2228 result = 0; 2229 break; 2230 } 2231 unlock_kernel(); 2232 return result; 2233 } 2234 2235 EXPORT_SYMBOL(lock_may_read); 2236 2237 /** 2238 * lock_may_write - checks that the region is free of locks 2239 * @inode: the inode that is being written 2240 * @start: the first byte to write 2241 * @len: the number of bytes to write 2242 * 2243 * Emulates Windows locking requirements. Whole-file 2244 * mandatory locks (share modes) can prohibit a write and 2245 * byte-range POSIX locks can prohibit a write if they overlap. 2246 * 2247 * N.B. this function is only ever called 2248 * from knfsd and ownership of locks is never checked. 2249 */ 2250 int lock_may_write(struct inode *inode, loff_t start, unsigned long len) 2251 { 2252 struct file_lock *fl; 2253 int result = 1; 2254 lock_kernel(); 2255 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2256 if (IS_POSIX(fl)) { 2257 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2258 continue; 2259 } else if (IS_FLOCK(fl)) { 2260 if (!(fl->fl_type & LOCK_MAND)) 2261 continue; 2262 if (fl->fl_type & LOCK_WRITE) 2263 continue; 2264 } else 2265 continue; 2266 result = 0; 2267 break; 2268 } 2269 unlock_kernel(); 2270 return result; 2271 } 2272 2273 EXPORT_SYMBOL(lock_may_write); 2274 2275 static int __init filelock_init(void) 2276 { 2277 filelock_cache = kmem_cache_create("file_lock_cache", 2278 sizeof(struct file_lock), 0, SLAB_PANIC, 2279 init_once); 2280 return 0; 2281 } 2282 2283 core_initcall(filelock_init); 2284