1 /* 2 * linux/fs/locks.c 3 * 4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls. 5 * Doug Evans (dje@spiff.uucp), August 07, 1992 6 * 7 * Deadlock detection added. 8 * FIXME: one thing isn't handled yet: 9 * - mandatory locks (requires lots of changes elsewhere) 10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994. 11 * 12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code. 13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994 14 * 15 * Converted file_lock_table to a linked list from an array, which eliminates 16 * the limits on how many active file locks are open. 17 * Chad Page (pageone@netcom.com), November 27, 1994 18 * 19 * Removed dependency on file descriptors. dup()'ed file descriptors now 20 * get the same locks as the original file descriptors, and a close() on 21 * any file descriptor removes ALL the locks on the file for the current 22 * process. Since locks still depend on the process id, locks are inherited 23 * after an exec() but not after a fork(). This agrees with POSIX, and both 24 * BSD and SVR4 practice. 25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995 26 * 27 * Scrapped free list which is redundant now that we allocate locks 28 * dynamically with kmalloc()/kfree(). 29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995 30 * 31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX. 32 * 33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the 34 * fcntl() system call. They have the semantics described above. 35 * 36 * FL_FLOCK locks are created with calls to flock(), through the flock() 37 * system call, which is new. Old C libraries implement flock() via fcntl() 38 * and will continue to use the old, broken implementation. 39 * 40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated 41 * with a file pointer (filp). As a result they can be shared by a parent 42 * process and its children after a fork(). They are removed when the last 43 * file descriptor referring to the file pointer is closed (unless explicitly 44 * unlocked). 45 * 46 * FL_FLOCK locks never deadlock, an existing lock is always removed before 47 * upgrading from shared to exclusive (or vice versa). When this happens 48 * any processes blocked by the current lock are woken up and allowed to 49 * run before the new lock is applied. 50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995 51 * 52 * Removed some race conditions in flock_lock_file(), marked other possible 53 * races. Just grep for FIXME to see them. 54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996. 55 * 56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive. 57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep 58 * once we've checked for blocking and deadlocking. 59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996. 60 * 61 * Initial implementation of mandatory locks. SunOS turned out to be 62 * a rotten model, so I implemented the "obvious" semantics. 63 * See 'Documentation/filesystems/mandatory-locking.txt' for details. 64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996. 65 * 66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to 67 * check if a file has mandatory locks, used by mmap(), open() and creat() to 68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference 69 * Manual, Section 2. 70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996. 71 * 72 * Tidied up block list handling. Added '/proc/locks' interface. 73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996. 74 * 75 * Fixed deadlock condition for pathological code that mixes calls to 76 * flock() and fcntl(). 77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996. 78 * 79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use 80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to 81 * guarantee sensible behaviour in the case where file system modules might 82 * be compiled with different options than the kernel itself. 83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 84 * 85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel 86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this. 87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 88 * 89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK 90 * locks. Changed process synchronisation to avoid dereferencing locks that 91 * have already been freed. 92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996. 93 * 94 * Made the block list a circular list to minimise searching in the list. 95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996. 96 * 97 * Made mandatory locking a mount option. Default is not to allow mandatory 98 * locking. 99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996. 100 * 101 * Some adaptations for NFS support. 102 * Olaf Kirch (okir@monad.swb.de), Dec 1996, 103 * 104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed. 105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997. 106 * 107 * Use slab allocator instead of kmalloc/kfree. 108 * Use generic list implementation from <linux/list.h>. 109 * Sped up posix_locks_deadlock by only considering blocked locks. 110 * Matthew Wilcox <willy@debian.org>, March, 2000. 111 * 112 * Leases and LOCK_MAND 113 * Matthew Wilcox <willy@debian.org>, June, 2000. 114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000. 115 */ 116 117 #include <linux/capability.h> 118 #include <linux/file.h> 119 #include <linux/fdtable.h> 120 #include <linux/fs.h> 121 #include <linux/init.h> 122 #include <linux/module.h> 123 #include <linux/security.h> 124 #include <linux/slab.h> 125 #include <linux/syscalls.h> 126 #include <linux/time.h> 127 #include <linux/rcupdate.h> 128 #include <linux/pid_namespace.h> 129 #include <linux/hashtable.h> 130 #include <linux/percpu.h> 131 #include <linux/lglock.h> 132 133 #define CREATE_TRACE_POINTS 134 #include <trace/events/filelock.h> 135 136 #include <asm/uaccess.h> 137 138 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) 139 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) 140 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG)) 141 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK) 142 143 static bool lease_breaking(struct file_lock *fl) 144 { 145 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); 146 } 147 148 static int target_leasetype(struct file_lock *fl) 149 { 150 if (fl->fl_flags & FL_UNLOCK_PENDING) 151 return F_UNLCK; 152 if (fl->fl_flags & FL_DOWNGRADE_PENDING) 153 return F_RDLCK; 154 return fl->fl_type; 155 } 156 157 int leases_enable = 1; 158 int lease_break_time = 45; 159 160 #define for_each_lock(inode, lockp) \ 161 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) 162 163 /* 164 * The global file_lock_list is only used for displaying /proc/locks, so we 165 * keep a list on each CPU, with each list protected by its own spinlock via 166 * the file_lock_lglock. Note that alterations to the list also require that 167 * the relevant i_lock is held. 168 */ 169 DEFINE_STATIC_LGLOCK(file_lock_lglock); 170 static DEFINE_PER_CPU(struct hlist_head, file_lock_list); 171 172 /* 173 * The blocked_hash is used to find POSIX lock loops for deadlock detection. 174 * It is protected by blocked_lock_lock. 175 * 176 * We hash locks by lockowner in order to optimize searching for the lock a 177 * particular lockowner is waiting on. 178 * 179 * FIXME: make this value scale via some heuristic? We generally will want more 180 * buckets when we have more lockowners holding locks, but that's a little 181 * difficult to determine without knowing what the workload will look like. 182 */ 183 #define BLOCKED_HASH_BITS 7 184 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS); 185 186 /* 187 * This lock protects the blocked_hash. Generally, if you're accessing it, you 188 * want to be holding this lock. 189 * 190 * In addition, it also protects the fl->fl_block list, and the fl->fl_next 191 * pointer for file_lock structures that are acting as lock requests (in 192 * contrast to those that are acting as records of acquired locks). 193 * 194 * Note that when we acquire this lock in order to change the above fields, 195 * we often hold the i_lock as well. In certain cases, when reading the fields 196 * protected by this lock, we can skip acquiring it iff we already hold the 197 * i_lock. 198 * 199 * In particular, adding an entry to the fl_block list requires that you hold 200 * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting 201 * an entry from the list however only requires the file_lock_lock. 202 */ 203 static DEFINE_SPINLOCK(blocked_lock_lock); 204 205 static struct kmem_cache *filelock_cache __read_mostly; 206 207 static void locks_init_lock_heads(struct file_lock *fl) 208 { 209 INIT_HLIST_NODE(&fl->fl_link); 210 INIT_LIST_HEAD(&fl->fl_block); 211 init_waitqueue_head(&fl->fl_wait); 212 } 213 214 /* Allocate an empty lock structure. */ 215 struct file_lock *locks_alloc_lock(void) 216 { 217 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL); 218 219 if (fl) 220 locks_init_lock_heads(fl); 221 222 return fl; 223 } 224 EXPORT_SYMBOL_GPL(locks_alloc_lock); 225 226 void locks_release_private(struct file_lock *fl) 227 { 228 if (fl->fl_ops) { 229 if (fl->fl_ops->fl_release_private) 230 fl->fl_ops->fl_release_private(fl); 231 fl->fl_ops = NULL; 232 } 233 fl->fl_lmops = NULL; 234 235 } 236 EXPORT_SYMBOL_GPL(locks_release_private); 237 238 /* Free a lock which is not in use. */ 239 void locks_free_lock(struct file_lock *fl) 240 { 241 BUG_ON(waitqueue_active(&fl->fl_wait)); 242 BUG_ON(!list_empty(&fl->fl_block)); 243 BUG_ON(!hlist_unhashed(&fl->fl_link)); 244 245 locks_release_private(fl); 246 kmem_cache_free(filelock_cache, fl); 247 } 248 EXPORT_SYMBOL(locks_free_lock); 249 250 static void 251 locks_dispose_list(struct list_head *dispose) 252 { 253 struct file_lock *fl; 254 255 while (!list_empty(dispose)) { 256 fl = list_first_entry(dispose, struct file_lock, fl_block); 257 list_del_init(&fl->fl_block); 258 locks_free_lock(fl); 259 } 260 } 261 262 void locks_init_lock(struct file_lock *fl) 263 { 264 memset(fl, 0, sizeof(struct file_lock)); 265 locks_init_lock_heads(fl); 266 } 267 268 EXPORT_SYMBOL(locks_init_lock); 269 270 static void locks_copy_private(struct file_lock *new, struct file_lock *fl) 271 { 272 if (fl->fl_ops) { 273 if (fl->fl_ops->fl_copy_lock) 274 fl->fl_ops->fl_copy_lock(new, fl); 275 new->fl_ops = fl->fl_ops; 276 } 277 if (fl->fl_lmops) 278 new->fl_lmops = fl->fl_lmops; 279 } 280 281 /* 282 * Initialize a new lock from an existing file_lock structure. 283 */ 284 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) 285 { 286 new->fl_owner = fl->fl_owner; 287 new->fl_pid = fl->fl_pid; 288 new->fl_file = NULL; 289 new->fl_flags = fl->fl_flags; 290 new->fl_type = fl->fl_type; 291 new->fl_start = fl->fl_start; 292 new->fl_end = fl->fl_end; 293 new->fl_ops = NULL; 294 new->fl_lmops = NULL; 295 } 296 EXPORT_SYMBOL(__locks_copy_lock); 297 298 void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 299 { 300 /* "new" must be a freshly-initialized lock */ 301 WARN_ON_ONCE(new->fl_ops); 302 303 __locks_copy_lock(new, fl); 304 new->fl_file = fl->fl_file; 305 new->fl_ops = fl->fl_ops; 306 new->fl_lmops = fl->fl_lmops; 307 308 locks_copy_private(new, fl); 309 } 310 311 EXPORT_SYMBOL(locks_copy_lock); 312 313 static inline int flock_translate_cmd(int cmd) { 314 if (cmd & LOCK_MAND) 315 return cmd & (LOCK_MAND | LOCK_RW); 316 switch (cmd) { 317 case LOCK_SH: 318 return F_RDLCK; 319 case LOCK_EX: 320 return F_WRLCK; 321 case LOCK_UN: 322 return F_UNLCK; 323 } 324 return -EINVAL; 325 } 326 327 /* Fill in a file_lock structure with an appropriate FLOCK lock. */ 328 static int flock_make_lock(struct file *filp, struct file_lock **lock, 329 unsigned int cmd) 330 { 331 struct file_lock *fl; 332 int type = flock_translate_cmd(cmd); 333 if (type < 0) 334 return type; 335 336 fl = locks_alloc_lock(); 337 if (fl == NULL) 338 return -ENOMEM; 339 340 fl->fl_file = filp; 341 fl->fl_owner = filp; 342 fl->fl_pid = current->tgid; 343 fl->fl_flags = FL_FLOCK; 344 fl->fl_type = type; 345 fl->fl_end = OFFSET_MAX; 346 347 *lock = fl; 348 return 0; 349 } 350 351 static int assign_type(struct file_lock *fl, long type) 352 { 353 switch (type) { 354 case F_RDLCK: 355 case F_WRLCK: 356 case F_UNLCK: 357 fl->fl_type = type; 358 break; 359 default: 360 return -EINVAL; 361 } 362 return 0; 363 } 364 365 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, 366 struct flock64 *l) 367 { 368 switch (l->l_whence) { 369 case SEEK_SET: 370 fl->fl_start = 0; 371 break; 372 case SEEK_CUR: 373 fl->fl_start = filp->f_pos; 374 break; 375 case SEEK_END: 376 fl->fl_start = i_size_read(file_inode(filp)); 377 break; 378 default: 379 return -EINVAL; 380 } 381 if (l->l_start > OFFSET_MAX - fl->fl_start) 382 return -EOVERFLOW; 383 fl->fl_start += l->l_start; 384 if (fl->fl_start < 0) 385 return -EINVAL; 386 387 /* POSIX-1996 leaves the case l->l_len < 0 undefined; 388 POSIX-2001 defines it. */ 389 if (l->l_len > 0) { 390 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start) 391 return -EOVERFLOW; 392 fl->fl_end = fl->fl_start + l->l_len - 1; 393 394 } else if (l->l_len < 0) { 395 if (fl->fl_start + l->l_len < 0) 396 return -EINVAL; 397 fl->fl_end = fl->fl_start - 1; 398 fl->fl_start += l->l_len; 399 } else 400 fl->fl_end = OFFSET_MAX; 401 402 fl->fl_owner = current->files; 403 fl->fl_pid = current->tgid; 404 fl->fl_file = filp; 405 fl->fl_flags = FL_POSIX; 406 fl->fl_ops = NULL; 407 fl->fl_lmops = NULL; 408 409 return assign_type(fl, l->l_type); 410 } 411 412 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX 413 * style lock. 414 */ 415 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, 416 struct flock *l) 417 { 418 struct flock64 ll = { 419 .l_type = l->l_type, 420 .l_whence = l->l_whence, 421 .l_start = l->l_start, 422 .l_len = l->l_len, 423 }; 424 425 return flock64_to_posix_lock(filp, fl, &ll); 426 } 427 428 /* default lease lock manager operations */ 429 static void lease_break_callback(struct file_lock *fl) 430 { 431 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); 432 } 433 434 static const struct lock_manager_operations lease_manager_ops = { 435 .lm_break = lease_break_callback, 436 .lm_change = lease_modify, 437 }; 438 439 /* 440 * Initialize a lease, use the default lock manager operations 441 */ 442 static int lease_init(struct file *filp, long type, struct file_lock *fl) 443 { 444 if (assign_type(fl, type) != 0) 445 return -EINVAL; 446 447 fl->fl_owner = current->files; 448 fl->fl_pid = current->tgid; 449 450 fl->fl_file = filp; 451 fl->fl_flags = FL_LEASE; 452 fl->fl_start = 0; 453 fl->fl_end = OFFSET_MAX; 454 fl->fl_ops = NULL; 455 fl->fl_lmops = &lease_manager_ops; 456 return 0; 457 } 458 459 /* Allocate a file_lock initialised to this type of lease */ 460 static struct file_lock *lease_alloc(struct file *filp, long type) 461 { 462 struct file_lock *fl = locks_alloc_lock(); 463 int error = -ENOMEM; 464 465 if (fl == NULL) 466 return ERR_PTR(error); 467 468 error = lease_init(filp, type, fl); 469 if (error) { 470 locks_free_lock(fl); 471 return ERR_PTR(error); 472 } 473 return fl; 474 } 475 476 /* Check if two locks overlap each other. 477 */ 478 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) 479 { 480 return ((fl1->fl_end >= fl2->fl_start) && 481 (fl2->fl_end >= fl1->fl_start)); 482 } 483 484 /* 485 * Check whether two locks have the same owner. 486 */ 487 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) 488 { 489 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner) 490 return fl2->fl_lmops == fl1->fl_lmops && 491 fl1->fl_lmops->lm_compare_owner(fl1, fl2); 492 return fl1->fl_owner == fl2->fl_owner; 493 } 494 495 /* Must be called with the i_lock held! */ 496 static void locks_insert_global_locks(struct file_lock *fl) 497 { 498 lg_local_lock(&file_lock_lglock); 499 fl->fl_link_cpu = smp_processor_id(); 500 hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list)); 501 lg_local_unlock(&file_lock_lglock); 502 } 503 504 /* Must be called with the i_lock held! */ 505 static void locks_delete_global_locks(struct file_lock *fl) 506 { 507 /* 508 * Avoid taking lock if already unhashed. This is safe since this check 509 * is done while holding the i_lock, and new insertions into the list 510 * also require that it be held. 511 */ 512 if (hlist_unhashed(&fl->fl_link)) 513 return; 514 lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu); 515 hlist_del_init(&fl->fl_link); 516 lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu); 517 } 518 519 static unsigned long 520 posix_owner_key(struct file_lock *fl) 521 { 522 if (fl->fl_lmops && fl->fl_lmops->lm_owner_key) 523 return fl->fl_lmops->lm_owner_key(fl); 524 return (unsigned long)fl->fl_owner; 525 } 526 527 static void locks_insert_global_blocked(struct file_lock *waiter) 528 { 529 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter)); 530 } 531 532 static void locks_delete_global_blocked(struct file_lock *waiter) 533 { 534 hash_del(&waiter->fl_link); 535 } 536 537 /* Remove waiter from blocker's block list. 538 * When blocker ends up pointing to itself then the list is empty. 539 * 540 * Must be called with blocked_lock_lock held. 541 */ 542 static void __locks_delete_block(struct file_lock *waiter) 543 { 544 locks_delete_global_blocked(waiter); 545 list_del_init(&waiter->fl_block); 546 waiter->fl_next = NULL; 547 } 548 549 static void locks_delete_block(struct file_lock *waiter) 550 { 551 spin_lock(&blocked_lock_lock); 552 __locks_delete_block(waiter); 553 spin_unlock(&blocked_lock_lock); 554 } 555 556 /* Insert waiter into blocker's block list. 557 * We use a circular list so that processes can be easily woken up in 558 * the order they blocked. The documentation doesn't require this but 559 * it seems like the reasonable thing to do. 560 * 561 * Must be called with both the i_lock and blocked_lock_lock held. The fl_block 562 * list itself is protected by the blocked_lock_lock, but by ensuring that the 563 * i_lock is also held on insertions we can avoid taking the blocked_lock_lock 564 * in some cases when we see that the fl_block list is empty. 565 */ 566 static void __locks_insert_block(struct file_lock *blocker, 567 struct file_lock *waiter) 568 { 569 BUG_ON(!list_empty(&waiter->fl_block)); 570 waiter->fl_next = blocker; 571 list_add_tail(&waiter->fl_block, &blocker->fl_block); 572 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker)) 573 locks_insert_global_blocked(waiter); 574 } 575 576 /* Must be called with i_lock held. */ 577 static void locks_insert_block(struct file_lock *blocker, 578 struct file_lock *waiter) 579 { 580 spin_lock(&blocked_lock_lock); 581 __locks_insert_block(blocker, waiter); 582 spin_unlock(&blocked_lock_lock); 583 } 584 585 /* 586 * Wake up processes blocked waiting for blocker. 587 * 588 * Must be called with the inode->i_lock held! 589 */ 590 static void locks_wake_up_blocks(struct file_lock *blocker) 591 { 592 /* 593 * Avoid taking global lock if list is empty. This is safe since new 594 * blocked requests are only added to the list under the i_lock, and 595 * the i_lock is always held here. Note that removal from the fl_block 596 * list does not require the i_lock, so we must recheck list_empty() 597 * after acquiring the blocked_lock_lock. 598 */ 599 if (list_empty(&blocker->fl_block)) 600 return; 601 602 spin_lock(&blocked_lock_lock); 603 while (!list_empty(&blocker->fl_block)) { 604 struct file_lock *waiter; 605 606 waiter = list_first_entry(&blocker->fl_block, 607 struct file_lock, fl_block); 608 __locks_delete_block(waiter); 609 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) 610 waiter->fl_lmops->lm_notify(waiter); 611 else 612 wake_up(&waiter->fl_wait); 613 } 614 spin_unlock(&blocked_lock_lock); 615 } 616 617 /* Insert file lock fl into an inode's lock list at the position indicated 618 * by pos. At the same time add the lock to the global file lock list. 619 * 620 * Must be called with the i_lock held! 621 */ 622 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) 623 { 624 fl->fl_nspid = get_pid(task_tgid(current)); 625 626 /* insert into file's list */ 627 fl->fl_next = *pos; 628 *pos = fl; 629 630 locks_insert_global_locks(fl); 631 } 632 633 /** 634 * locks_delete_lock - Delete a lock and then free it. 635 * @thisfl_p: pointer that points to the fl_next field of the previous 636 * inode->i_flock list entry 637 * 638 * Unlink a lock from all lists and free the namespace reference, but don't 639 * free it yet. Wake up processes that are blocked waiting for this lock and 640 * notify the FS that the lock has been cleared. 641 * 642 * Must be called with the i_lock held! 643 */ 644 static void locks_unlink_lock(struct file_lock **thisfl_p) 645 { 646 struct file_lock *fl = *thisfl_p; 647 648 locks_delete_global_locks(fl); 649 650 *thisfl_p = fl->fl_next; 651 fl->fl_next = NULL; 652 653 if (fl->fl_nspid) { 654 put_pid(fl->fl_nspid); 655 fl->fl_nspid = NULL; 656 } 657 658 locks_wake_up_blocks(fl); 659 } 660 661 /* 662 * Unlink a lock from all lists and free it. 663 * 664 * Must be called with i_lock held! 665 */ 666 static void locks_delete_lock(struct file_lock **thisfl_p, 667 struct list_head *dispose) 668 { 669 struct file_lock *fl = *thisfl_p; 670 671 locks_unlink_lock(thisfl_p); 672 if (dispose) 673 list_add(&fl->fl_block, dispose); 674 else 675 locks_free_lock(fl); 676 } 677 678 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality 679 * checks for shared/exclusive status of overlapping locks. 680 */ 681 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 682 { 683 if (sys_fl->fl_type == F_WRLCK) 684 return 1; 685 if (caller_fl->fl_type == F_WRLCK) 686 return 1; 687 return 0; 688 } 689 690 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific 691 * checking before calling the locks_conflict(). 692 */ 693 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 694 { 695 /* POSIX locks owned by the same process do not conflict with 696 * each other. 697 */ 698 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl)) 699 return (0); 700 701 /* Check whether they overlap */ 702 if (!locks_overlap(caller_fl, sys_fl)) 703 return 0; 704 705 return (locks_conflict(caller_fl, sys_fl)); 706 } 707 708 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific 709 * checking before calling the locks_conflict(). 710 */ 711 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 712 { 713 /* FLOCK locks referring to the same filp do not conflict with 714 * each other. 715 */ 716 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file)) 717 return (0); 718 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) 719 return 0; 720 721 return (locks_conflict(caller_fl, sys_fl)); 722 } 723 724 void 725 posix_test_lock(struct file *filp, struct file_lock *fl) 726 { 727 struct file_lock *cfl; 728 struct inode *inode = file_inode(filp); 729 730 spin_lock(&inode->i_lock); 731 for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) { 732 if (!IS_POSIX(cfl)) 733 continue; 734 if (posix_locks_conflict(fl, cfl)) 735 break; 736 } 737 if (cfl) { 738 __locks_copy_lock(fl, cfl); 739 if (cfl->fl_nspid) 740 fl->fl_pid = pid_vnr(cfl->fl_nspid); 741 } else 742 fl->fl_type = F_UNLCK; 743 spin_unlock(&inode->i_lock); 744 return; 745 } 746 EXPORT_SYMBOL(posix_test_lock); 747 748 /* 749 * Deadlock detection: 750 * 751 * We attempt to detect deadlocks that are due purely to posix file 752 * locks. 753 * 754 * We assume that a task can be waiting for at most one lock at a time. 755 * So for any acquired lock, the process holding that lock may be 756 * waiting on at most one other lock. That lock in turns may be held by 757 * someone waiting for at most one other lock. Given a requested lock 758 * caller_fl which is about to wait for a conflicting lock block_fl, we 759 * follow this chain of waiters to ensure we are not about to create a 760 * cycle. 761 * 762 * Since we do this before we ever put a process to sleep on a lock, we 763 * are ensured that there is never a cycle; that is what guarantees that 764 * the while() loop in posix_locks_deadlock() eventually completes. 765 * 766 * Note: the above assumption may not be true when handling lock 767 * requests from a broken NFS client. It may also fail in the presence 768 * of tasks (such as posix threads) sharing the same open file table. 769 * To handle those cases, we just bail out after a few iterations. 770 * 771 * For FL_OFDLCK locks, the owner is the filp, not the files_struct. 772 * Because the owner is not even nominally tied to a thread of 773 * execution, the deadlock detection below can't reasonably work well. Just 774 * skip it for those. 775 * 776 * In principle, we could do a more limited deadlock detection on FL_OFDLCK 777 * locks that just checks for the case where two tasks are attempting to 778 * upgrade from read to write locks on the same inode. 779 */ 780 781 #define MAX_DEADLK_ITERATIONS 10 782 783 /* Find a lock that the owner of the given block_fl is blocking on. */ 784 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) 785 { 786 struct file_lock *fl; 787 788 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) { 789 if (posix_same_owner(fl, block_fl)) 790 return fl->fl_next; 791 } 792 return NULL; 793 } 794 795 /* Must be called with the blocked_lock_lock held! */ 796 static int posix_locks_deadlock(struct file_lock *caller_fl, 797 struct file_lock *block_fl) 798 { 799 int i = 0; 800 801 /* 802 * This deadlock detector can't reasonably detect deadlocks with 803 * FL_OFDLCK locks, since they aren't owned by a process, per-se. 804 */ 805 if (IS_OFDLCK(caller_fl)) 806 return 0; 807 808 while ((block_fl = what_owner_is_waiting_for(block_fl))) { 809 if (i++ > MAX_DEADLK_ITERATIONS) 810 return 0; 811 if (posix_same_owner(caller_fl, block_fl)) 812 return 1; 813 } 814 return 0; 815 } 816 817 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 818 * after any leases, but before any posix locks. 819 * 820 * Note that if called with an FL_EXISTS argument, the caller may determine 821 * whether or not a lock was successfully freed by testing the return 822 * value for -ENOENT. 823 */ 824 static int flock_lock_file(struct file *filp, struct file_lock *request) 825 { 826 struct file_lock *new_fl = NULL; 827 struct file_lock **before; 828 struct inode * inode = file_inode(filp); 829 int error = 0; 830 int found = 0; 831 LIST_HEAD(dispose); 832 833 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { 834 new_fl = locks_alloc_lock(); 835 if (!new_fl) 836 return -ENOMEM; 837 } 838 839 spin_lock(&inode->i_lock); 840 if (request->fl_flags & FL_ACCESS) 841 goto find_conflict; 842 843 for_each_lock(inode, before) { 844 struct file_lock *fl = *before; 845 if (IS_POSIX(fl)) 846 break; 847 if (IS_LEASE(fl)) 848 continue; 849 if (filp != fl->fl_file) 850 continue; 851 if (request->fl_type == fl->fl_type) 852 goto out; 853 found = 1; 854 locks_delete_lock(before, &dispose); 855 break; 856 } 857 858 if (request->fl_type == F_UNLCK) { 859 if ((request->fl_flags & FL_EXISTS) && !found) 860 error = -ENOENT; 861 goto out; 862 } 863 864 /* 865 * If a higher-priority process was blocked on the old file lock, 866 * give it the opportunity to lock the file. 867 */ 868 if (found) { 869 spin_unlock(&inode->i_lock); 870 cond_resched(); 871 spin_lock(&inode->i_lock); 872 } 873 874 find_conflict: 875 for_each_lock(inode, before) { 876 struct file_lock *fl = *before; 877 if (IS_POSIX(fl)) 878 break; 879 if (IS_LEASE(fl)) 880 continue; 881 if (!flock_locks_conflict(request, fl)) 882 continue; 883 error = -EAGAIN; 884 if (!(request->fl_flags & FL_SLEEP)) 885 goto out; 886 error = FILE_LOCK_DEFERRED; 887 locks_insert_block(fl, request); 888 goto out; 889 } 890 if (request->fl_flags & FL_ACCESS) 891 goto out; 892 locks_copy_lock(new_fl, request); 893 locks_insert_lock(before, new_fl); 894 new_fl = NULL; 895 error = 0; 896 897 out: 898 spin_unlock(&inode->i_lock); 899 if (new_fl) 900 locks_free_lock(new_fl); 901 locks_dispose_list(&dispose); 902 return error; 903 } 904 905 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) 906 { 907 struct file_lock *fl; 908 struct file_lock *new_fl = NULL; 909 struct file_lock *new_fl2 = NULL; 910 struct file_lock *left = NULL; 911 struct file_lock *right = NULL; 912 struct file_lock **before; 913 int error; 914 bool added = false; 915 LIST_HEAD(dispose); 916 917 /* 918 * We may need two file_lock structures for this operation, 919 * so we get them in advance to avoid races. 920 * 921 * In some cases we can be sure, that no new locks will be needed 922 */ 923 if (!(request->fl_flags & FL_ACCESS) && 924 (request->fl_type != F_UNLCK || 925 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { 926 new_fl = locks_alloc_lock(); 927 new_fl2 = locks_alloc_lock(); 928 } 929 930 spin_lock(&inode->i_lock); 931 /* 932 * New lock request. Walk all POSIX locks and look for conflicts. If 933 * there are any, either return error or put the request on the 934 * blocker's list of waiters and the global blocked_hash. 935 */ 936 if (request->fl_type != F_UNLCK) { 937 for_each_lock(inode, before) { 938 fl = *before; 939 if (!IS_POSIX(fl)) 940 continue; 941 if (!posix_locks_conflict(request, fl)) 942 continue; 943 if (conflock) 944 __locks_copy_lock(conflock, fl); 945 error = -EAGAIN; 946 if (!(request->fl_flags & FL_SLEEP)) 947 goto out; 948 /* 949 * Deadlock detection and insertion into the blocked 950 * locks list must be done while holding the same lock! 951 */ 952 error = -EDEADLK; 953 spin_lock(&blocked_lock_lock); 954 if (likely(!posix_locks_deadlock(request, fl))) { 955 error = FILE_LOCK_DEFERRED; 956 __locks_insert_block(fl, request); 957 } 958 spin_unlock(&blocked_lock_lock); 959 goto out; 960 } 961 } 962 963 /* If we're just looking for a conflict, we're done. */ 964 error = 0; 965 if (request->fl_flags & FL_ACCESS) 966 goto out; 967 968 /* 969 * Find the first old lock with the same owner as the new lock. 970 */ 971 972 before = &inode->i_flock; 973 974 /* First skip locks owned by other processes. */ 975 while ((fl = *before) && (!IS_POSIX(fl) || 976 !posix_same_owner(request, fl))) { 977 before = &fl->fl_next; 978 } 979 980 /* Process locks with this owner. */ 981 while ((fl = *before) && posix_same_owner(request, fl)) { 982 /* Detect adjacent or overlapping regions (if same lock type) 983 */ 984 if (request->fl_type == fl->fl_type) { 985 /* In all comparisons of start vs end, use 986 * "start - 1" rather than "end + 1". If end 987 * is OFFSET_MAX, end + 1 will become negative. 988 */ 989 if (fl->fl_end < request->fl_start - 1) 990 goto next_lock; 991 /* If the next lock in the list has entirely bigger 992 * addresses than the new one, insert the lock here. 993 */ 994 if (fl->fl_start - 1 > request->fl_end) 995 break; 996 997 /* If we come here, the new and old lock are of the 998 * same type and adjacent or overlapping. Make one 999 * lock yielding from the lower start address of both 1000 * locks to the higher end address. 1001 */ 1002 if (fl->fl_start > request->fl_start) 1003 fl->fl_start = request->fl_start; 1004 else 1005 request->fl_start = fl->fl_start; 1006 if (fl->fl_end < request->fl_end) 1007 fl->fl_end = request->fl_end; 1008 else 1009 request->fl_end = fl->fl_end; 1010 if (added) { 1011 locks_delete_lock(before, &dispose); 1012 continue; 1013 } 1014 request = fl; 1015 added = true; 1016 } 1017 else { 1018 /* Processing for different lock types is a bit 1019 * more complex. 1020 */ 1021 if (fl->fl_end < request->fl_start) 1022 goto next_lock; 1023 if (fl->fl_start > request->fl_end) 1024 break; 1025 if (request->fl_type == F_UNLCK) 1026 added = true; 1027 if (fl->fl_start < request->fl_start) 1028 left = fl; 1029 /* If the next lock in the list has a higher end 1030 * address than the new one, insert the new one here. 1031 */ 1032 if (fl->fl_end > request->fl_end) { 1033 right = fl; 1034 break; 1035 } 1036 if (fl->fl_start >= request->fl_start) { 1037 /* The new lock completely replaces an old 1038 * one (This may happen several times). 1039 */ 1040 if (added) { 1041 locks_delete_lock(before, &dispose); 1042 continue; 1043 } 1044 /* 1045 * Replace the old lock with new_fl, and 1046 * remove the old one. It's safe to do the 1047 * insert here since we know that we won't be 1048 * using new_fl later, and that the lock is 1049 * just replacing an existing lock. 1050 */ 1051 error = -ENOLCK; 1052 if (!new_fl) 1053 goto out; 1054 locks_copy_lock(new_fl, request); 1055 request = new_fl; 1056 new_fl = NULL; 1057 locks_delete_lock(before, &dispose); 1058 locks_insert_lock(before, request); 1059 added = true; 1060 } 1061 } 1062 /* Go on to next lock. 1063 */ 1064 next_lock: 1065 before = &fl->fl_next; 1066 } 1067 1068 /* 1069 * The above code only modifies existing locks in case of merging or 1070 * replacing. If new lock(s) need to be inserted all modifications are 1071 * done below this, so it's safe yet to bail out. 1072 */ 1073 error = -ENOLCK; /* "no luck" */ 1074 if (right && left == right && !new_fl2) 1075 goto out; 1076 1077 error = 0; 1078 if (!added) { 1079 if (request->fl_type == F_UNLCK) { 1080 if (request->fl_flags & FL_EXISTS) 1081 error = -ENOENT; 1082 goto out; 1083 } 1084 1085 if (!new_fl) { 1086 error = -ENOLCK; 1087 goto out; 1088 } 1089 locks_copy_lock(new_fl, request); 1090 locks_insert_lock(before, new_fl); 1091 new_fl = NULL; 1092 } 1093 if (right) { 1094 if (left == right) { 1095 /* The new lock breaks the old one in two pieces, 1096 * so we have to use the second new lock. 1097 */ 1098 left = new_fl2; 1099 new_fl2 = NULL; 1100 locks_copy_lock(left, right); 1101 locks_insert_lock(before, left); 1102 } 1103 right->fl_start = request->fl_end + 1; 1104 locks_wake_up_blocks(right); 1105 } 1106 if (left) { 1107 left->fl_end = request->fl_start - 1; 1108 locks_wake_up_blocks(left); 1109 } 1110 out: 1111 spin_unlock(&inode->i_lock); 1112 /* 1113 * Free any unused locks. 1114 */ 1115 if (new_fl) 1116 locks_free_lock(new_fl); 1117 if (new_fl2) 1118 locks_free_lock(new_fl2); 1119 locks_dispose_list(&dispose); 1120 return error; 1121 } 1122 1123 /** 1124 * posix_lock_file - Apply a POSIX-style lock to a file 1125 * @filp: The file to apply the lock to 1126 * @fl: The lock to be applied 1127 * @conflock: Place to return a copy of the conflicting lock, if found. 1128 * 1129 * Add a POSIX style lock to a file. 1130 * We merge adjacent & overlapping locks whenever possible. 1131 * POSIX locks are sorted by owner task, then by starting address 1132 * 1133 * Note that if called with an FL_EXISTS argument, the caller may determine 1134 * whether or not a lock was successfully freed by testing the return 1135 * value for -ENOENT. 1136 */ 1137 int posix_lock_file(struct file *filp, struct file_lock *fl, 1138 struct file_lock *conflock) 1139 { 1140 return __posix_lock_file(file_inode(filp), fl, conflock); 1141 } 1142 EXPORT_SYMBOL(posix_lock_file); 1143 1144 /** 1145 * posix_lock_file_wait - Apply a POSIX-style lock to a file 1146 * @filp: The file to apply the lock to 1147 * @fl: The lock to be applied 1148 * 1149 * Add a POSIX style lock to a file. 1150 * We merge adjacent & overlapping locks whenever possible. 1151 * POSIX locks are sorted by owner task, then by starting address 1152 */ 1153 int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 1154 { 1155 int error; 1156 might_sleep (); 1157 for (;;) { 1158 error = posix_lock_file(filp, fl, NULL); 1159 if (error != FILE_LOCK_DEFERRED) 1160 break; 1161 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1162 if (!error) 1163 continue; 1164 1165 locks_delete_block(fl); 1166 break; 1167 } 1168 return error; 1169 } 1170 EXPORT_SYMBOL(posix_lock_file_wait); 1171 1172 /** 1173 * locks_mandatory_locked - Check for an active lock 1174 * @file: the file to check 1175 * 1176 * Searches the inode's list of locks to find any POSIX locks which conflict. 1177 * This function is called from locks_verify_locked() only. 1178 */ 1179 int locks_mandatory_locked(struct file *file) 1180 { 1181 struct inode *inode = file_inode(file); 1182 struct file_lock *fl; 1183 1184 /* 1185 * Search the lock list for this inode for any POSIX locks. 1186 */ 1187 spin_lock(&inode->i_lock); 1188 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 1189 if (!IS_POSIX(fl)) 1190 continue; 1191 if (fl->fl_owner != current->files && 1192 fl->fl_owner != file) 1193 break; 1194 } 1195 spin_unlock(&inode->i_lock); 1196 return fl ? -EAGAIN : 0; 1197 } 1198 1199 /** 1200 * locks_mandatory_area - Check for a conflicting lock 1201 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ 1202 * for shared 1203 * @inode: the file to check 1204 * @filp: how the file was opened (if it was) 1205 * @offset: start of area to check 1206 * @count: length of area to check 1207 * 1208 * Searches the inode's list of locks to find any POSIX locks which conflict. 1209 * This function is called from rw_verify_area() and 1210 * locks_verify_truncate(). 1211 */ 1212 int locks_mandatory_area(int read_write, struct inode *inode, 1213 struct file *filp, loff_t offset, 1214 size_t count) 1215 { 1216 struct file_lock fl; 1217 int error; 1218 bool sleep = false; 1219 1220 locks_init_lock(&fl); 1221 fl.fl_pid = current->tgid; 1222 fl.fl_file = filp; 1223 fl.fl_flags = FL_POSIX | FL_ACCESS; 1224 if (filp && !(filp->f_flags & O_NONBLOCK)) 1225 sleep = true; 1226 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; 1227 fl.fl_start = offset; 1228 fl.fl_end = offset + count - 1; 1229 1230 for (;;) { 1231 if (filp) { 1232 fl.fl_owner = filp; 1233 fl.fl_flags &= ~FL_SLEEP; 1234 error = __posix_lock_file(inode, &fl, NULL); 1235 if (!error) 1236 break; 1237 } 1238 1239 if (sleep) 1240 fl.fl_flags |= FL_SLEEP; 1241 fl.fl_owner = current->files; 1242 error = __posix_lock_file(inode, &fl, NULL); 1243 if (error != FILE_LOCK_DEFERRED) 1244 break; 1245 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); 1246 if (!error) { 1247 /* 1248 * If we've been sleeping someone might have 1249 * changed the permissions behind our back. 1250 */ 1251 if (__mandatory_lock(inode)) 1252 continue; 1253 } 1254 1255 locks_delete_block(&fl); 1256 break; 1257 } 1258 1259 return error; 1260 } 1261 1262 EXPORT_SYMBOL(locks_mandatory_area); 1263 1264 static void lease_clear_pending(struct file_lock *fl, int arg) 1265 { 1266 switch (arg) { 1267 case F_UNLCK: 1268 fl->fl_flags &= ~FL_UNLOCK_PENDING; 1269 /* fall through: */ 1270 case F_RDLCK: 1271 fl->fl_flags &= ~FL_DOWNGRADE_PENDING; 1272 } 1273 } 1274 1275 /* We already had a lease on this file; just change its type */ 1276 int lease_modify(struct file_lock **before, int arg) 1277 { 1278 struct file_lock *fl = *before; 1279 int error = assign_type(fl, arg); 1280 1281 if (error) 1282 return error; 1283 lease_clear_pending(fl, arg); 1284 locks_wake_up_blocks(fl); 1285 if (arg == F_UNLCK) { 1286 struct file *filp = fl->fl_file; 1287 1288 f_delown(filp); 1289 filp->f_owner.signum = 0; 1290 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 1291 if (fl->fl_fasync != NULL) { 1292 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 1293 fl->fl_fasync = NULL; 1294 } 1295 locks_delete_lock(before, NULL); 1296 } 1297 return 0; 1298 } 1299 1300 EXPORT_SYMBOL(lease_modify); 1301 1302 static bool past_time(unsigned long then) 1303 { 1304 if (!then) 1305 /* 0 is a special value meaning "this never expires": */ 1306 return false; 1307 return time_after(jiffies, then); 1308 } 1309 1310 static void time_out_leases(struct inode *inode) 1311 { 1312 struct file_lock **before; 1313 struct file_lock *fl; 1314 1315 before = &inode->i_flock; 1316 while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) { 1317 trace_time_out_leases(inode, fl); 1318 if (past_time(fl->fl_downgrade_time)) 1319 lease_modify(before, F_RDLCK); 1320 if (past_time(fl->fl_break_time)) 1321 lease_modify(before, F_UNLCK); 1322 if (fl == *before) /* lease_modify may have freed fl */ 1323 before = &fl->fl_next; 1324 } 1325 } 1326 1327 static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) 1328 { 1329 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) 1330 return false; 1331 return locks_conflict(breaker, lease); 1332 } 1333 1334 /** 1335 * __break_lease - revoke all outstanding leases on file 1336 * @inode: the inode of the file to return 1337 * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR: 1338 * break all leases 1339 * @type: FL_LEASE: break leases and delegations; FL_DELEG: break 1340 * only delegations 1341 * 1342 * break_lease (inlined for speed) has checked there already is at least 1343 * some kind of lock (maybe a lease) on this file. Leases are broken on 1344 * a call to open() or truncate(). This function can sleep unless you 1345 * specified %O_NONBLOCK to your open(). 1346 */ 1347 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) 1348 { 1349 int error = 0; 1350 struct file_lock *new_fl, *flock; 1351 struct file_lock *fl; 1352 unsigned long break_time; 1353 int i_have_this_lease = 0; 1354 bool lease_conflict = false; 1355 int want_write = (mode & O_ACCMODE) != O_RDONLY; 1356 1357 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); 1358 if (IS_ERR(new_fl)) 1359 return PTR_ERR(new_fl); 1360 new_fl->fl_flags = type; 1361 1362 spin_lock(&inode->i_lock); 1363 1364 time_out_leases(inode); 1365 1366 flock = inode->i_flock; 1367 if ((flock == NULL) || !IS_LEASE(flock)) 1368 goto out; 1369 1370 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { 1371 if (leases_conflict(fl, new_fl)) { 1372 lease_conflict = true; 1373 if (fl->fl_owner == current->files) 1374 i_have_this_lease = 1; 1375 } 1376 } 1377 if (!lease_conflict) 1378 goto out; 1379 1380 break_time = 0; 1381 if (lease_break_time > 0) { 1382 break_time = jiffies + lease_break_time * HZ; 1383 if (break_time == 0) 1384 break_time++; /* so that 0 means no break time */ 1385 } 1386 1387 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { 1388 if (!leases_conflict(fl, new_fl)) 1389 continue; 1390 if (want_write) { 1391 if (fl->fl_flags & FL_UNLOCK_PENDING) 1392 continue; 1393 fl->fl_flags |= FL_UNLOCK_PENDING; 1394 fl->fl_break_time = break_time; 1395 } else { 1396 if (lease_breaking(flock)) 1397 continue; 1398 fl->fl_flags |= FL_DOWNGRADE_PENDING; 1399 fl->fl_downgrade_time = break_time; 1400 } 1401 fl->fl_lmops->lm_break(fl); 1402 } 1403 1404 if (i_have_this_lease || (mode & O_NONBLOCK)) { 1405 trace_break_lease_noblock(inode, new_fl); 1406 error = -EWOULDBLOCK; 1407 goto out; 1408 } 1409 1410 restart: 1411 break_time = flock->fl_break_time; 1412 if (break_time != 0) 1413 break_time -= jiffies; 1414 if (break_time == 0) 1415 break_time++; 1416 locks_insert_block(flock, new_fl); 1417 trace_break_lease_block(inode, new_fl); 1418 spin_unlock(&inode->i_lock); 1419 error = wait_event_interruptible_timeout(new_fl->fl_wait, 1420 !new_fl->fl_next, break_time); 1421 spin_lock(&inode->i_lock); 1422 trace_break_lease_unblock(inode, new_fl); 1423 locks_delete_block(new_fl); 1424 if (error >= 0) { 1425 if (error == 0) 1426 time_out_leases(inode); 1427 /* 1428 * Wait for the next conflicting lease that has not been 1429 * broken yet 1430 */ 1431 for (flock = inode->i_flock; flock && IS_LEASE(flock); 1432 flock = flock->fl_next) { 1433 if (leases_conflict(new_fl, flock)) 1434 goto restart; 1435 } 1436 error = 0; 1437 } 1438 1439 out: 1440 spin_unlock(&inode->i_lock); 1441 locks_free_lock(new_fl); 1442 return error; 1443 } 1444 1445 EXPORT_SYMBOL(__break_lease); 1446 1447 /** 1448 * lease_get_mtime - get the last modified time of an inode 1449 * @inode: the inode 1450 * @time: pointer to a timespec which will contain the last modified time 1451 * 1452 * This is to force NFS clients to flush their caches for files with 1453 * exclusive leases. The justification is that if someone has an 1454 * exclusive lease, then they could be modifying it. 1455 */ 1456 void lease_get_mtime(struct inode *inode, struct timespec *time) 1457 { 1458 struct file_lock *flock = inode->i_flock; 1459 if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK)) 1460 *time = current_fs_time(inode->i_sb); 1461 else 1462 *time = inode->i_mtime; 1463 } 1464 1465 EXPORT_SYMBOL(lease_get_mtime); 1466 1467 /** 1468 * fcntl_getlease - Enquire what lease is currently active 1469 * @filp: the file 1470 * 1471 * The value returned by this function will be one of 1472 * (if no lease break is pending): 1473 * 1474 * %F_RDLCK to indicate a shared lease is held. 1475 * 1476 * %F_WRLCK to indicate an exclusive lease is held. 1477 * 1478 * %F_UNLCK to indicate no lease is held. 1479 * 1480 * (if a lease break is pending): 1481 * 1482 * %F_RDLCK to indicate an exclusive lease needs to be 1483 * changed to a shared lease (or removed). 1484 * 1485 * %F_UNLCK to indicate the lease needs to be removed. 1486 * 1487 * XXX: sfr & willy disagree over whether F_INPROGRESS 1488 * should be returned to userspace. 1489 */ 1490 int fcntl_getlease(struct file *filp) 1491 { 1492 struct file_lock *fl; 1493 struct inode *inode = file_inode(filp); 1494 int type = F_UNLCK; 1495 1496 spin_lock(&inode->i_lock); 1497 time_out_leases(file_inode(filp)); 1498 for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl); 1499 fl = fl->fl_next) { 1500 if (fl->fl_file == filp) { 1501 type = target_leasetype(fl); 1502 break; 1503 } 1504 } 1505 spin_unlock(&inode->i_lock); 1506 return type; 1507 } 1508 1509 /** 1510 * check_conflicting_open - see if the given dentry points to a file that has 1511 * an existing open that would conflict with the 1512 * desired lease. 1513 * @dentry: dentry to check 1514 * @arg: type of lease that we're trying to acquire 1515 * 1516 * Check to see if there's an existing open fd on this file that would 1517 * conflict with the lease we're trying to set. 1518 */ 1519 static int 1520 check_conflicting_open(const struct dentry *dentry, const long arg) 1521 { 1522 int ret = 0; 1523 struct inode *inode = dentry->d_inode; 1524 1525 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) 1526 return -EAGAIN; 1527 1528 if ((arg == F_WRLCK) && ((d_count(dentry) > 1) || 1529 (atomic_read(&inode->i_count) > 1))) 1530 ret = -EAGAIN; 1531 1532 return ret; 1533 } 1534 1535 static int generic_add_lease(struct file *filp, long arg, struct file_lock **flp) 1536 { 1537 struct file_lock *fl, **before, **my_before = NULL, *lease; 1538 struct dentry *dentry = filp->f_path.dentry; 1539 struct inode *inode = dentry->d_inode; 1540 bool is_deleg = (*flp)->fl_flags & FL_DELEG; 1541 int error; 1542 1543 lease = *flp; 1544 trace_generic_add_lease(inode, lease); 1545 1546 /* 1547 * In the delegation case we need mutual exclusion with 1548 * a number of operations that take the i_mutex. We trylock 1549 * because delegations are an optional optimization, and if 1550 * there's some chance of a conflict--we'd rather not 1551 * bother, maybe that's a sign this just isn't a good file to 1552 * hand out a delegation on. 1553 */ 1554 if (is_deleg && !mutex_trylock(&inode->i_mutex)) 1555 return -EAGAIN; 1556 1557 if (is_deleg && arg == F_WRLCK) { 1558 /* Write delegations are not currently supported: */ 1559 mutex_unlock(&inode->i_mutex); 1560 WARN_ON_ONCE(1); 1561 return -EINVAL; 1562 } 1563 1564 error = check_conflicting_open(dentry, arg); 1565 if (error) 1566 goto out; 1567 1568 /* 1569 * At this point, we know that if there is an exclusive 1570 * lease on this file, then we hold it on this filp 1571 * (otherwise our open of this file would have blocked). 1572 * And if we are trying to acquire an exclusive lease, 1573 * then the file is not open by anyone (including us) 1574 * except for this filp. 1575 */ 1576 error = -EAGAIN; 1577 for (before = &inode->i_flock; 1578 ((fl = *before) != NULL) && IS_LEASE(fl); 1579 before = &fl->fl_next) { 1580 if (fl->fl_file == filp) { 1581 my_before = before; 1582 continue; 1583 } 1584 /* 1585 * No exclusive leases if someone else has a lease on 1586 * this file: 1587 */ 1588 if (arg == F_WRLCK) 1589 goto out; 1590 /* 1591 * Modifying our existing lease is OK, but no getting a 1592 * new lease if someone else is opening for write: 1593 */ 1594 if (fl->fl_flags & FL_UNLOCK_PENDING) 1595 goto out; 1596 } 1597 1598 if (my_before != NULL) { 1599 error = lease->fl_lmops->lm_change(my_before, arg); 1600 if (!error) 1601 *flp = *my_before; 1602 goto out; 1603 } 1604 1605 error = -EINVAL; 1606 if (!leases_enable) 1607 goto out; 1608 1609 locks_insert_lock(before, lease); 1610 /* 1611 * The check in break_lease() is lockless. It's possible for another 1612 * open to race in after we did the earlier check for a conflicting 1613 * open but before the lease was inserted. Check again for a 1614 * conflicting open and cancel the lease if there is one. 1615 * 1616 * We also add a barrier here to ensure that the insertion of the lock 1617 * precedes these checks. 1618 */ 1619 smp_mb(); 1620 error = check_conflicting_open(dentry, arg); 1621 if (error) 1622 locks_unlink_lock(before); 1623 out: 1624 if (is_deleg) 1625 mutex_unlock(&inode->i_mutex); 1626 return error; 1627 } 1628 1629 static int generic_delete_lease(struct file *filp, struct file_lock **flp) 1630 { 1631 struct file_lock *fl, **before; 1632 struct dentry *dentry = filp->f_path.dentry; 1633 struct inode *inode = dentry->d_inode; 1634 1635 trace_generic_delete_lease(inode, *flp); 1636 1637 for (before = &inode->i_flock; 1638 ((fl = *before) != NULL) && IS_LEASE(fl); 1639 before = &fl->fl_next) { 1640 if (fl->fl_file != filp) 1641 continue; 1642 return (*flp)->fl_lmops->lm_change(before, F_UNLCK); 1643 } 1644 return -EAGAIN; 1645 } 1646 1647 /** 1648 * generic_setlease - sets a lease on an open file 1649 * @filp: file pointer 1650 * @arg: type of lease to obtain 1651 * @flp: input - file_lock to use, output - file_lock inserted 1652 * 1653 * The (input) flp->fl_lmops->lm_break function is required 1654 * by break_lease(). 1655 * 1656 * Called with inode->i_lock held. 1657 */ 1658 int generic_setlease(struct file *filp, long arg, struct file_lock **flp) 1659 { 1660 struct dentry *dentry = filp->f_path.dentry; 1661 struct inode *inode = dentry->d_inode; 1662 int error; 1663 1664 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE)) 1665 return -EACCES; 1666 if (!S_ISREG(inode->i_mode)) 1667 return -EINVAL; 1668 error = security_file_lock(filp, arg); 1669 if (error) 1670 return error; 1671 1672 time_out_leases(inode); 1673 1674 BUG_ON(!(*flp)->fl_lmops->lm_break); 1675 1676 switch (arg) { 1677 case F_UNLCK: 1678 return generic_delete_lease(filp, flp); 1679 case F_RDLCK: 1680 case F_WRLCK: 1681 return generic_add_lease(filp, arg, flp); 1682 default: 1683 return -EINVAL; 1684 } 1685 } 1686 EXPORT_SYMBOL(generic_setlease); 1687 1688 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease) 1689 { 1690 if (filp->f_op->setlease) 1691 return filp->f_op->setlease(filp, arg, lease); 1692 else 1693 return generic_setlease(filp, arg, lease); 1694 } 1695 1696 /** 1697 * vfs_setlease - sets a lease on an open file 1698 * @filp: file pointer 1699 * @arg: type of lease to obtain 1700 * @lease: file_lock to use 1701 * 1702 * Call this to establish a lease on the file. 1703 * The (*lease)->fl_lmops->lm_break operation must be set; if not, 1704 * break_lease will oops! 1705 * 1706 * This will call the filesystem's setlease file method, if 1707 * defined. Note that there is no getlease method; instead, the 1708 * filesystem setlease method should call back to setlease() to 1709 * add a lease to the inode's lease list, where fcntl_getlease() can 1710 * find it. Since fcntl_getlease() only reports whether the current 1711 * task holds a lease, a cluster filesystem need only do this for 1712 * leases held by processes on this node. 1713 * 1714 * There is also no break_lease method; filesystems that 1715 * handle their own leases should break leases themselves from the 1716 * filesystem's open, create, and (on truncate) setattr methods. 1717 * 1718 * Warning: the only current setlease methods exist only to disable 1719 * leases in certain cases. More vfs changes may be required to 1720 * allow a full filesystem lease implementation. 1721 */ 1722 1723 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease) 1724 { 1725 struct inode *inode = file_inode(filp); 1726 int error; 1727 1728 spin_lock(&inode->i_lock); 1729 error = __vfs_setlease(filp, arg, lease); 1730 spin_unlock(&inode->i_lock); 1731 1732 return error; 1733 } 1734 EXPORT_SYMBOL_GPL(vfs_setlease); 1735 1736 static int do_fcntl_delete_lease(struct file *filp) 1737 { 1738 struct file_lock fl, *flp = &fl; 1739 1740 lease_init(filp, F_UNLCK, flp); 1741 1742 return vfs_setlease(filp, F_UNLCK, &flp); 1743 } 1744 1745 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) 1746 { 1747 struct file_lock *fl, *ret; 1748 struct inode *inode = file_inode(filp); 1749 struct fasync_struct *new; 1750 int error; 1751 1752 fl = lease_alloc(filp, arg); 1753 if (IS_ERR(fl)) 1754 return PTR_ERR(fl); 1755 1756 new = fasync_alloc(); 1757 if (!new) { 1758 locks_free_lock(fl); 1759 return -ENOMEM; 1760 } 1761 ret = fl; 1762 spin_lock(&inode->i_lock); 1763 error = __vfs_setlease(filp, arg, &ret); 1764 if (error) 1765 goto out_unlock; 1766 if (ret == fl) 1767 fl = NULL; 1768 1769 /* 1770 * fasync_insert_entry() returns the old entry if any. 1771 * If there was no old entry, then it used 'new' and 1772 * inserted it into the fasync list. Clear new so that 1773 * we don't release it here. 1774 */ 1775 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new)) 1776 new = NULL; 1777 1778 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); 1779 out_unlock: 1780 spin_unlock(&inode->i_lock); 1781 if (fl) 1782 locks_free_lock(fl); 1783 if (new) 1784 fasync_free(new); 1785 return error; 1786 } 1787 1788 /** 1789 * fcntl_setlease - sets a lease on an open file 1790 * @fd: open file descriptor 1791 * @filp: file pointer 1792 * @arg: type of lease to obtain 1793 * 1794 * Call this fcntl to establish a lease on the file. 1795 * Note that you also need to call %F_SETSIG to 1796 * receive a signal when the lease is broken. 1797 */ 1798 int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1799 { 1800 if (arg == F_UNLCK) 1801 return do_fcntl_delete_lease(filp); 1802 return do_fcntl_add_lease(fd, filp, arg); 1803 } 1804 1805 /** 1806 * flock_lock_file_wait - Apply a FLOCK-style lock to a file 1807 * @filp: The file to apply the lock to 1808 * @fl: The lock to be applied 1809 * 1810 * Add a FLOCK style lock to a file. 1811 */ 1812 int flock_lock_file_wait(struct file *filp, struct file_lock *fl) 1813 { 1814 int error; 1815 might_sleep(); 1816 for (;;) { 1817 error = flock_lock_file(filp, fl); 1818 if (error != FILE_LOCK_DEFERRED) 1819 break; 1820 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1821 if (!error) 1822 continue; 1823 1824 locks_delete_block(fl); 1825 break; 1826 } 1827 return error; 1828 } 1829 1830 EXPORT_SYMBOL(flock_lock_file_wait); 1831 1832 /** 1833 * sys_flock: - flock() system call. 1834 * @fd: the file descriptor to lock. 1835 * @cmd: the type of lock to apply. 1836 * 1837 * Apply a %FL_FLOCK style lock to an open file descriptor. 1838 * The @cmd can be one of 1839 * 1840 * %LOCK_SH -- a shared lock. 1841 * 1842 * %LOCK_EX -- an exclusive lock. 1843 * 1844 * %LOCK_UN -- remove an existing lock. 1845 * 1846 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes. 1847 * 1848 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other 1849 * processes read and write access respectively. 1850 */ 1851 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) 1852 { 1853 struct fd f = fdget(fd); 1854 struct file_lock *lock; 1855 int can_sleep, unlock; 1856 int error; 1857 1858 error = -EBADF; 1859 if (!f.file) 1860 goto out; 1861 1862 can_sleep = !(cmd & LOCK_NB); 1863 cmd &= ~LOCK_NB; 1864 unlock = (cmd == LOCK_UN); 1865 1866 if (!unlock && !(cmd & LOCK_MAND) && 1867 !(f.file->f_mode & (FMODE_READ|FMODE_WRITE))) 1868 goto out_putf; 1869 1870 error = flock_make_lock(f.file, &lock, cmd); 1871 if (error) 1872 goto out_putf; 1873 if (can_sleep) 1874 lock->fl_flags |= FL_SLEEP; 1875 1876 error = security_file_lock(f.file, lock->fl_type); 1877 if (error) 1878 goto out_free; 1879 1880 if (f.file->f_op->flock) 1881 error = f.file->f_op->flock(f.file, 1882 (can_sleep) ? F_SETLKW : F_SETLK, 1883 lock); 1884 else 1885 error = flock_lock_file_wait(f.file, lock); 1886 1887 out_free: 1888 locks_free_lock(lock); 1889 1890 out_putf: 1891 fdput(f); 1892 out: 1893 return error; 1894 } 1895 1896 /** 1897 * vfs_test_lock - test file byte range lock 1898 * @filp: The file to test lock for 1899 * @fl: The lock to test; also used to hold result 1900 * 1901 * Returns -ERRNO on failure. Indicates presence of conflicting lock by 1902 * setting conf->fl_type to something other than F_UNLCK. 1903 */ 1904 int vfs_test_lock(struct file *filp, struct file_lock *fl) 1905 { 1906 if (filp->f_op->lock) 1907 return filp->f_op->lock(filp, F_GETLK, fl); 1908 posix_test_lock(filp, fl); 1909 return 0; 1910 } 1911 EXPORT_SYMBOL_GPL(vfs_test_lock); 1912 1913 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) 1914 { 1915 flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid; 1916 #if BITS_PER_LONG == 32 1917 /* 1918 * Make sure we can represent the posix lock via 1919 * legacy 32bit flock. 1920 */ 1921 if (fl->fl_start > OFFT_OFFSET_MAX) 1922 return -EOVERFLOW; 1923 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX) 1924 return -EOVERFLOW; 1925 #endif 1926 flock->l_start = fl->fl_start; 1927 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 1928 fl->fl_end - fl->fl_start + 1; 1929 flock->l_whence = 0; 1930 flock->l_type = fl->fl_type; 1931 return 0; 1932 } 1933 1934 #if BITS_PER_LONG == 32 1935 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) 1936 { 1937 flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid; 1938 flock->l_start = fl->fl_start; 1939 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 1940 fl->fl_end - fl->fl_start + 1; 1941 flock->l_whence = 0; 1942 flock->l_type = fl->fl_type; 1943 } 1944 #endif 1945 1946 /* Report the first existing lock that would conflict with l. 1947 * This implements the F_GETLK command of fcntl(). 1948 */ 1949 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l) 1950 { 1951 struct file_lock file_lock; 1952 struct flock flock; 1953 int error; 1954 1955 error = -EFAULT; 1956 if (copy_from_user(&flock, l, sizeof(flock))) 1957 goto out; 1958 error = -EINVAL; 1959 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1960 goto out; 1961 1962 error = flock_to_posix_lock(filp, &file_lock, &flock); 1963 if (error) 1964 goto out; 1965 1966 if (cmd == F_OFD_GETLK) { 1967 error = -EINVAL; 1968 if (flock.l_pid != 0) 1969 goto out; 1970 1971 cmd = F_GETLK; 1972 file_lock.fl_flags |= FL_OFDLCK; 1973 file_lock.fl_owner = filp; 1974 } 1975 1976 error = vfs_test_lock(filp, &file_lock); 1977 if (error) 1978 goto out; 1979 1980 flock.l_type = file_lock.fl_type; 1981 if (file_lock.fl_type != F_UNLCK) { 1982 error = posix_lock_to_flock(&flock, &file_lock); 1983 if (error) 1984 goto out; 1985 } 1986 error = -EFAULT; 1987 if (!copy_to_user(l, &flock, sizeof(flock))) 1988 error = 0; 1989 out: 1990 return error; 1991 } 1992 1993 /** 1994 * vfs_lock_file - file byte range lock 1995 * @filp: The file to apply the lock to 1996 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.) 1997 * @fl: The lock to be applied 1998 * @conf: Place to return a copy of the conflicting lock, if found. 1999 * 2000 * A caller that doesn't care about the conflicting lock may pass NULL 2001 * as the final argument. 2002 * 2003 * If the filesystem defines a private ->lock() method, then @conf will 2004 * be left unchanged; so a caller that cares should initialize it to 2005 * some acceptable default. 2006 * 2007 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX 2008 * locks, the ->lock() interface may return asynchronously, before the lock has 2009 * been granted or denied by the underlying filesystem, if (and only if) 2010 * lm_grant is set. Callers expecting ->lock() to return asynchronously 2011 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if) 2012 * the request is for a blocking lock. When ->lock() does return asynchronously, 2013 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock 2014 * request completes. 2015 * If the request is for non-blocking lock the file system should return 2016 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine 2017 * with the result. If the request timed out the callback routine will return a 2018 * nonzero return code and the file system should release the lock. The file 2019 * system is also responsible to keep a corresponding posix lock when it 2020 * grants a lock so the VFS can find out which locks are locally held and do 2021 * the correct lock cleanup when required. 2022 * The underlying filesystem must not drop the kernel lock or call 2023 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED 2024 * return code. 2025 */ 2026 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) 2027 { 2028 if (filp->f_op->lock) 2029 return filp->f_op->lock(filp, cmd, fl); 2030 else 2031 return posix_lock_file(filp, fl, conf); 2032 } 2033 EXPORT_SYMBOL_GPL(vfs_lock_file); 2034 2035 static int do_lock_file_wait(struct file *filp, unsigned int cmd, 2036 struct file_lock *fl) 2037 { 2038 int error; 2039 2040 error = security_file_lock(filp, fl->fl_type); 2041 if (error) 2042 return error; 2043 2044 for (;;) { 2045 error = vfs_lock_file(filp, cmd, fl, NULL); 2046 if (error != FILE_LOCK_DEFERRED) 2047 break; 2048 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 2049 if (!error) 2050 continue; 2051 2052 locks_delete_block(fl); 2053 break; 2054 } 2055 2056 return error; 2057 } 2058 2059 /* Ensure that fl->fl_filp has compatible f_mode for F_SETLK calls */ 2060 static int 2061 check_fmode_for_setlk(struct file_lock *fl) 2062 { 2063 switch (fl->fl_type) { 2064 case F_RDLCK: 2065 if (!(fl->fl_file->f_mode & FMODE_READ)) 2066 return -EBADF; 2067 break; 2068 case F_WRLCK: 2069 if (!(fl->fl_file->f_mode & FMODE_WRITE)) 2070 return -EBADF; 2071 } 2072 return 0; 2073 } 2074 2075 /* Apply the lock described by l to an open file descriptor. 2076 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 2077 */ 2078 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, 2079 struct flock __user *l) 2080 { 2081 struct file_lock *file_lock = locks_alloc_lock(); 2082 struct flock flock; 2083 struct inode *inode; 2084 struct file *f; 2085 int error; 2086 2087 if (file_lock == NULL) 2088 return -ENOLCK; 2089 2090 /* 2091 * This might block, so we do it before checking the inode. 2092 */ 2093 error = -EFAULT; 2094 if (copy_from_user(&flock, l, sizeof(flock))) 2095 goto out; 2096 2097 inode = file_inode(filp); 2098 2099 /* Don't allow mandatory locks on files that may be memory mapped 2100 * and shared. 2101 */ 2102 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { 2103 error = -EAGAIN; 2104 goto out; 2105 } 2106 2107 again: 2108 error = flock_to_posix_lock(filp, file_lock, &flock); 2109 if (error) 2110 goto out; 2111 2112 error = check_fmode_for_setlk(file_lock); 2113 if (error) 2114 goto out; 2115 2116 /* 2117 * If the cmd is requesting file-private locks, then set the 2118 * FL_OFDLCK flag and override the owner. 2119 */ 2120 switch (cmd) { 2121 case F_OFD_SETLK: 2122 error = -EINVAL; 2123 if (flock.l_pid != 0) 2124 goto out; 2125 2126 cmd = F_SETLK; 2127 file_lock->fl_flags |= FL_OFDLCK; 2128 file_lock->fl_owner = filp; 2129 break; 2130 case F_OFD_SETLKW: 2131 error = -EINVAL; 2132 if (flock.l_pid != 0) 2133 goto out; 2134 2135 cmd = F_SETLKW; 2136 file_lock->fl_flags |= FL_OFDLCK; 2137 file_lock->fl_owner = filp; 2138 /* Fallthrough */ 2139 case F_SETLKW: 2140 file_lock->fl_flags |= FL_SLEEP; 2141 } 2142 2143 error = do_lock_file_wait(filp, cmd, file_lock); 2144 2145 /* 2146 * Attempt to detect a close/fcntl race and recover by 2147 * releasing the lock that was just acquired. 2148 */ 2149 /* 2150 * we need that spin_lock here - it prevents reordering between 2151 * update of inode->i_flock and check for it done in close(). 2152 * rcu_read_lock() wouldn't do. 2153 */ 2154 spin_lock(¤t->files->file_lock); 2155 f = fcheck(fd); 2156 spin_unlock(¤t->files->file_lock); 2157 if (!error && f != filp && flock.l_type != F_UNLCK) { 2158 flock.l_type = F_UNLCK; 2159 goto again; 2160 } 2161 2162 out: 2163 locks_free_lock(file_lock); 2164 return error; 2165 } 2166 2167 #if BITS_PER_LONG == 32 2168 /* Report the first existing lock that would conflict with l. 2169 * This implements the F_GETLK command of fcntl(). 2170 */ 2171 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l) 2172 { 2173 struct file_lock file_lock; 2174 struct flock64 flock; 2175 int error; 2176 2177 error = -EFAULT; 2178 if (copy_from_user(&flock, l, sizeof(flock))) 2179 goto out; 2180 error = -EINVAL; 2181 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 2182 goto out; 2183 2184 error = flock64_to_posix_lock(filp, &file_lock, &flock); 2185 if (error) 2186 goto out; 2187 2188 if (cmd == F_OFD_GETLK) { 2189 error = -EINVAL; 2190 if (flock.l_pid != 0) 2191 goto out; 2192 2193 cmd = F_GETLK64; 2194 file_lock.fl_flags |= FL_OFDLCK; 2195 file_lock.fl_owner = filp; 2196 } 2197 2198 error = vfs_test_lock(filp, &file_lock); 2199 if (error) 2200 goto out; 2201 2202 flock.l_type = file_lock.fl_type; 2203 if (file_lock.fl_type != F_UNLCK) 2204 posix_lock_to_flock64(&flock, &file_lock); 2205 2206 error = -EFAULT; 2207 if (!copy_to_user(l, &flock, sizeof(flock))) 2208 error = 0; 2209 2210 out: 2211 return error; 2212 } 2213 2214 /* Apply the lock described by l to an open file descriptor. 2215 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 2216 */ 2217 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, 2218 struct flock64 __user *l) 2219 { 2220 struct file_lock *file_lock = locks_alloc_lock(); 2221 struct flock64 flock; 2222 struct inode *inode; 2223 struct file *f; 2224 int error; 2225 2226 if (file_lock == NULL) 2227 return -ENOLCK; 2228 2229 /* 2230 * This might block, so we do it before checking the inode. 2231 */ 2232 error = -EFAULT; 2233 if (copy_from_user(&flock, l, sizeof(flock))) 2234 goto out; 2235 2236 inode = file_inode(filp); 2237 2238 /* Don't allow mandatory locks on files that may be memory mapped 2239 * and shared. 2240 */ 2241 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { 2242 error = -EAGAIN; 2243 goto out; 2244 } 2245 2246 again: 2247 error = flock64_to_posix_lock(filp, file_lock, &flock); 2248 if (error) 2249 goto out; 2250 2251 error = check_fmode_for_setlk(file_lock); 2252 if (error) 2253 goto out; 2254 2255 /* 2256 * If the cmd is requesting file-private locks, then set the 2257 * FL_OFDLCK flag and override the owner. 2258 */ 2259 switch (cmd) { 2260 case F_OFD_SETLK: 2261 error = -EINVAL; 2262 if (flock.l_pid != 0) 2263 goto out; 2264 2265 cmd = F_SETLK64; 2266 file_lock->fl_flags |= FL_OFDLCK; 2267 file_lock->fl_owner = filp; 2268 break; 2269 case F_OFD_SETLKW: 2270 error = -EINVAL; 2271 if (flock.l_pid != 0) 2272 goto out; 2273 2274 cmd = F_SETLKW64; 2275 file_lock->fl_flags |= FL_OFDLCK; 2276 file_lock->fl_owner = filp; 2277 /* Fallthrough */ 2278 case F_SETLKW64: 2279 file_lock->fl_flags |= FL_SLEEP; 2280 } 2281 2282 error = do_lock_file_wait(filp, cmd, file_lock); 2283 2284 /* 2285 * Attempt to detect a close/fcntl race and recover by 2286 * releasing the lock that was just acquired. 2287 */ 2288 spin_lock(¤t->files->file_lock); 2289 f = fcheck(fd); 2290 spin_unlock(¤t->files->file_lock); 2291 if (!error && f != filp && flock.l_type != F_UNLCK) { 2292 flock.l_type = F_UNLCK; 2293 goto again; 2294 } 2295 2296 out: 2297 locks_free_lock(file_lock); 2298 return error; 2299 } 2300 #endif /* BITS_PER_LONG == 32 */ 2301 2302 /* 2303 * This function is called when the file is being removed 2304 * from the task's fd array. POSIX locks belonging to this task 2305 * are deleted at this time. 2306 */ 2307 void locks_remove_posix(struct file *filp, fl_owner_t owner) 2308 { 2309 struct file_lock lock; 2310 2311 /* 2312 * If there are no locks held on this file, we don't need to call 2313 * posix_lock_file(). Another process could be setting a lock on this 2314 * file at the same time, but we wouldn't remove that lock anyway. 2315 */ 2316 if (!file_inode(filp)->i_flock) 2317 return; 2318 2319 lock.fl_type = F_UNLCK; 2320 lock.fl_flags = FL_POSIX | FL_CLOSE; 2321 lock.fl_start = 0; 2322 lock.fl_end = OFFSET_MAX; 2323 lock.fl_owner = owner; 2324 lock.fl_pid = current->tgid; 2325 lock.fl_file = filp; 2326 lock.fl_ops = NULL; 2327 lock.fl_lmops = NULL; 2328 2329 vfs_lock_file(filp, F_SETLK, &lock, NULL); 2330 2331 if (lock.fl_ops && lock.fl_ops->fl_release_private) 2332 lock.fl_ops->fl_release_private(&lock); 2333 } 2334 2335 EXPORT_SYMBOL(locks_remove_posix); 2336 2337 /* 2338 * This function is called on the last close of an open file. 2339 */ 2340 void locks_remove_file(struct file *filp) 2341 { 2342 struct inode * inode = file_inode(filp); 2343 struct file_lock *fl; 2344 struct file_lock **before; 2345 LIST_HEAD(dispose); 2346 2347 if (!inode->i_flock) 2348 return; 2349 2350 locks_remove_posix(filp, filp); 2351 2352 if (filp->f_op->flock) { 2353 struct file_lock fl = { 2354 .fl_owner = filp, 2355 .fl_pid = current->tgid, 2356 .fl_file = filp, 2357 .fl_flags = FL_FLOCK, 2358 .fl_type = F_UNLCK, 2359 .fl_end = OFFSET_MAX, 2360 }; 2361 filp->f_op->flock(filp, F_SETLKW, &fl); 2362 if (fl.fl_ops && fl.fl_ops->fl_release_private) 2363 fl.fl_ops->fl_release_private(&fl); 2364 } 2365 2366 spin_lock(&inode->i_lock); 2367 before = &inode->i_flock; 2368 2369 while ((fl = *before) != NULL) { 2370 if (fl->fl_file == filp) { 2371 if (IS_LEASE(fl)) { 2372 lease_modify(before, F_UNLCK); 2373 continue; 2374 } 2375 2376 /* 2377 * There's a leftover lock on the list of a type that 2378 * we didn't expect to see. Most likely a classic 2379 * POSIX lock that ended up not getting released 2380 * properly, or that raced onto the list somehow. Log 2381 * some info about it and then just remove it from 2382 * the list. 2383 */ 2384 WARN(!IS_FLOCK(fl), 2385 "leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n", 2386 MAJOR(inode->i_sb->s_dev), 2387 MINOR(inode->i_sb->s_dev), inode->i_ino, 2388 fl->fl_type, fl->fl_flags, 2389 fl->fl_start, fl->fl_end); 2390 2391 locks_delete_lock(before, &dispose); 2392 continue; 2393 } 2394 before = &fl->fl_next; 2395 } 2396 spin_unlock(&inode->i_lock); 2397 locks_dispose_list(&dispose); 2398 } 2399 2400 /** 2401 * posix_unblock_lock - stop waiting for a file lock 2402 * @waiter: the lock which was waiting 2403 * 2404 * lockd needs to block waiting for locks. 2405 */ 2406 int 2407 posix_unblock_lock(struct file_lock *waiter) 2408 { 2409 int status = 0; 2410 2411 spin_lock(&blocked_lock_lock); 2412 if (waiter->fl_next) 2413 __locks_delete_block(waiter); 2414 else 2415 status = -ENOENT; 2416 spin_unlock(&blocked_lock_lock); 2417 return status; 2418 } 2419 EXPORT_SYMBOL(posix_unblock_lock); 2420 2421 /** 2422 * vfs_cancel_lock - file byte range unblock lock 2423 * @filp: The file to apply the unblock to 2424 * @fl: The lock to be unblocked 2425 * 2426 * Used by lock managers to cancel blocked requests 2427 */ 2428 int vfs_cancel_lock(struct file *filp, struct file_lock *fl) 2429 { 2430 if (filp->f_op->lock) 2431 return filp->f_op->lock(filp, F_CANCELLK, fl); 2432 return 0; 2433 } 2434 2435 EXPORT_SYMBOL_GPL(vfs_cancel_lock); 2436 2437 #ifdef CONFIG_PROC_FS 2438 #include <linux/proc_fs.h> 2439 #include <linux/seq_file.h> 2440 2441 struct locks_iterator { 2442 int li_cpu; 2443 loff_t li_pos; 2444 }; 2445 2446 static void lock_get_status(struct seq_file *f, struct file_lock *fl, 2447 loff_t id, char *pfx) 2448 { 2449 struct inode *inode = NULL; 2450 unsigned int fl_pid; 2451 2452 if (fl->fl_nspid) 2453 fl_pid = pid_vnr(fl->fl_nspid); 2454 else 2455 fl_pid = fl->fl_pid; 2456 2457 if (fl->fl_file != NULL) 2458 inode = file_inode(fl->fl_file); 2459 2460 seq_printf(f, "%lld:%s ", id, pfx); 2461 if (IS_POSIX(fl)) { 2462 if (fl->fl_flags & FL_ACCESS) 2463 seq_puts(f, "ACCESS"); 2464 else if (IS_OFDLCK(fl)) 2465 seq_puts(f, "OFDLCK"); 2466 else 2467 seq_puts(f, "POSIX "); 2468 2469 seq_printf(f, " %s ", 2470 (inode == NULL) ? "*NOINODE*" : 2471 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY "); 2472 } else if (IS_FLOCK(fl)) { 2473 if (fl->fl_type & LOCK_MAND) { 2474 seq_puts(f, "FLOCK MSNFS "); 2475 } else { 2476 seq_puts(f, "FLOCK ADVISORY "); 2477 } 2478 } else if (IS_LEASE(fl)) { 2479 if (fl->fl_flags & FL_DELEG) 2480 seq_puts(f, "DELEG "); 2481 else 2482 seq_puts(f, "LEASE "); 2483 2484 if (lease_breaking(fl)) 2485 seq_puts(f, "BREAKING "); 2486 else if (fl->fl_file) 2487 seq_puts(f, "ACTIVE "); 2488 else 2489 seq_puts(f, "BREAKER "); 2490 } else { 2491 seq_puts(f, "UNKNOWN UNKNOWN "); 2492 } 2493 if (fl->fl_type & LOCK_MAND) { 2494 seq_printf(f, "%s ", 2495 (fl->fl_type & LOCK_READ) 2496 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " 2497 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); 2498 } else { 2499 seq_printf(f, "%s ", 2500 (lease_breaking(fl)) 2501 ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ " 2502 : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ "); 2503 } 2504 if (inode) { 2505 #ifdef WE_CAN_BREAK_LSLK_NOW 2506 seq_printf(f, "%d %s:%ld ", fl_pid, 2507 inode->i_sb->s_id, inode->i_ino); 2508 #else 2509 /* userspace relies on this representation of dev_t ;-( */ 2510 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, 2511 MAJOR(inode->i_sb->s_dev), 2512 MINOR(inode->i_sb->s_dev), inode->i_ino); 2513 #endif 2514 } else { 2515 seq_printf(f, "%d <none>:0 ", fl_pid); 2516 } 2517 if (IS_POSIX(fl)) { 2518 if (fl->fl_end == OFFSET_MAX) 2519 seq_printf(f, "%Ld EOF\n", fl->fl_start); 2520 else 2521 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end); 2522 } else { 2523 seq_puts(f, "0 EOF\n"); 2524 } 2525 } 2526 2527 static int locks_show(struct seq_file *f, void *v) 2528 { 2529 struct locks_iterator *iter = f->private; 2530 struct file_lock *fl, *bfl; 2531 2532 fl = hlist_entry(v, struct file_lock, fl_link); 2533 2534 lock_get_status(f, fl, iter->li_pos, ""); 2535 2536 list_for_each_entry(bfl, &fl->fl_block, fl_block) 2537 lock_get_status(f, bfl, iter->li_pos, " ->"); 2538 2539 return 0; 2540 } 2541 2542 static void *locks_start(struct seq_file *f, loff_t *pos) 2543 __acquires(&blocked_lock_lock) 2544 { 2545 struct locks_iterator *iter = f->private; 2546 2547 iter->li_pos = *pos + 1; 2548 lg_global_lock(&file_lock_lglock); 2549 spin_lock(&blocked_lock_lock); 2550 return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos); 2551 } 2552 2553 static void *locks_next(struct seq_file *f, void *v, loff_t *pos) 2554 { 2555 struct locks_iterator *iter = f->private; 2556 2557 ++iter->li_pos; 2558 return seq_hlist_next_percpu(v, &file_lock_list, &iter->li_cpu, pos); 2559 } 2560 2561 static void locks_stop(struct seq_file *f, void *v) 2562 __releases(&blocked_lock_lock) 2563 { 2564 spin_unlock(&blocked_lock_lock); 2565 lg_global_unlock(&file_lock_lglock); 2566 } 2567 2568 static const struct seq_operations locks_seq_operations = { 2569 .start = locks_start, 2570 .next = locks_next, 2571 .stop = locks_stop, 2572 .show = locks_show, 2573 }; 2574 2575 static int locks_open(struct inode *inode, struct file *filp) 2576 { 2577 return seq_open_private(filp, &locks_seq_operations, 2578 sizeof(struct locks_iterator)); 2579 } 2580 2581 static const struct file_operations proc_locks_operations = { 2582 .open = locks_open, 2583 .read = seq_read, 2584 .llseek = seq_lseek, 2585 .release = seq_release_private, 2586 }; 2587 2588 static int __init proc_locks_init(void) 2589 { 2590 proc_create("locks", 0, NULL, &proc_locks_operations); 2591 return 0; 2592 } 2593 module_init(proc_locks_init); 2594 #endif 2595 2596 /** 2597 * lock_may_read - checks that the region is free of locks 2598 * @inode: the inode that is being read 2599 * @start: the first byte to read 2600 * @len: the number of bytes to read 2601 * 2602 * Emulates Windows locking requirements. Whole-file 2603 * mandatory locks (share modes) can prohibit a read and 2604 * byte-range POSIX locks can prohibit a read if they overlap. 2605 * 2606 * N.B. this function is only ever called 2607 * from knfsd and ownership of locks is never checked. 2608 */ 2609 int lock_may_read(struct inode *inode, loff_t start, unsigned long len) 2610 { 2611 struct file_lock *fl; 2612 int result = 1; 2613 2614 spin_lock(&inode->i_lock); 2615 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2616 if (IS_POSIX(fl)) { 2617 if (fl->fl_type == F_RDLCK) 2618 continue; 2619 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2620 continue; 2621 } else if (IS_FLOCK(fl)) { 2622 if (!(fl->fl_type & LOCK_MAND)) 2623 continue; 2624 if (fl->fl_type & LOCK_READ) 2625 continue; 2626 } else 2627 continue; 2628 result = 0; 2629 break; 2630 } 2631 spin_unlock(&inode->i_lock); 2632 return result; 2633 } 2634 2635 EXPORT_SYMBOL(lock_may_read); 2636 2637 /** 2638 * lock_may_write - checks that the region is free of locks 2639 * @inode: the inode that is being written 2640 * @start: the first byte to write 2641 * @len: the number of bytes to write 2642 * 2643 * Emulates Windows locking requirements. Whole-file 2644 * mandatory locks (share modes) can prohibit a write and 2645 * byte-range POSIX locks can prohibit a write if they overlap. 2646 * 2647 * N.B. this function is only ever called 2648 * from knfsd and ownership of locks is never checked. 2649 */ 2650 int lock_may_write(struct inode *inode, loff_t start, unsigned long len) 2651 { 2652 struct file_lock *fl; 2653 int result = 1; 2654 2655 spin_lock(&inode->i_lock); 2656 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2657 if (IS_POSIX(fl)) { 2658 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2659 continue; 2660 } else if (IS_FLOCK(fl)) { 2661 if (!(fl->fl_type & LOCK_MAND)) 2662 continue; 2663 if (fl->fl_type & LOCK_WRITE) 2664 continue; 2665 } else 2666 continue; 2667 result = 0; 2668 break; 2669 } 2670 spin_unlock(&inode->i_lock); 2671 return result; 2672 } 2673 2674 EXPORT_SYMBOL(lock_may_write); 2675 2676 static int __init filelock_init(void) 2677 { 2678 int i; 2679 2680 filelock_cache = kmem_cache_create("file_lock_cache", 2681 sizeof(struct file_lock), 0, SLAB_PANIC, NULL); 2682 2683 lg_lock_init(&file_lock_lglock, "file_lock_lglock"); 2684 2685 for_each_possible_cpu(i) 2686 INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i)); 2687 2688 return 0; 2689 } 2690 2691 core_initcall(filelock_init); 2692