1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/locks.c 4 * 5 * We implement four types of file locks: BSD locks, posix locks, open 6 * file description locks, and leases. For details about BSD locks, 7 * see the flock(2) man page; for details about the other three, see 8 * fcntl(2). 9 * 10 * 11 * Locking conflicts and dependencies: 12 * If multiple threads attempt to lock the same byte (or flock the same file) 13 * only one can be granted the lock, and other must wait their turn. 14 * The first lock has been "applied" or "granted", the others are "waiting" 15 * and are "blocked" by the "applied" lock.. 16 * 17 * Waiting and applied locks are all kept in trees whose properties are: 18 * 19 * - the root of a tree may be an applied or waiting lock. 20 * - every other node in the tree is a waiting lock that 21 * conflicts with every ancestor of that node. 22 * 23 * Every such tree begins life as a waiting singleton which obviously 24 * satisfies the above properties. 25 * 26 * The only ways we modify trees preserve these properties: 27 * 28 * 1. We may add a new leaf node, but only after first verifying that it 29 * conflicts with all of its ancestors. 30 * 2. We may remove the root of a tree, creating a new singleton 31 * tree from the root and N new trees rooted in the immediate 32 * children. 33 * 3. If the root of a tree is not currently an applied lock, we may 34 * apply it (if possible). 35 * 4. We may upgrade the root of the tree (either extend its range, 36 * or upgrade its entire range from read to write). 37 * 38 * When an applied lock is modified in a way that reduces or downgrades any 39 * part of its range, we remove all its children (2 above). This particularly 40 * happens when a lock is unlocked. 41 * 42 * For each of those child trees we "wake up" the thread which is 43 * waiting for the lock so it can continue handling as follows: if the 44 * root of the tree applies, we do so (3). If it doesn't, it must 45 * conflict with some applied lock. We remove (wake up) all of its children 46 * (2), and add it is a new leaf to the tree rooted in the applied 47 * lock (1). We then repeat the process recursively with those 48 * children. 49 * 50 */ 51 52 #include <linux/capability.h> 53 #include <linux/file.h> 54 #include <linux/fdtable.h> 55 #include <linux/filelock.h> 56 #include <linux/fs.h> 57 #include <linux/init.h> 58 #include <linux/security.h> 59 #include <linux/slab.h> 60 #include <linux/syscalls.h> 61 #include <linux/time.h> 62 #include <linux/rcupdate.h> 63 #include <linux/pid_namespace.h> 64 #include <linux/hashtable.h> 65 #include <linux/percpu.h> 66 #include <linux/sysctl.h> 67 68 #define CREATE_TRACE_POINTS 69 #include <trace/events/filelock.h> 70 71 #include <linux/uaccess.h> 72 73 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) 74 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) 75 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) 76 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK) 77 #define IS_REMOTELCK(fl) (fl->fl_pid <= 0) 78 79 static bool lease_breaking(struct file_lock *fl) 80 { 81 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); 82 } 83 84 static int target_leasetype(struct file_lock *fl) 85 { 86 if (fl->fl_flags & FL_UNLOCK_PENDING) 87 return F_UNLCK; 88 if (fl->fl_flags & FL_DOWNGRADE_PENDING) 89 return F_RDLCK; 90 return fl->fl_type; 91 } 92 93 static int leases_enable = 1; 94 static int lease_break_time = 45; 95 96 #ifdef CONFIG_SYSCTL 97 static struct ctl_table locks_sysctls[] = { 98 { 99 .procname = "leases-enable", 100 .data = &leases_enable, 101 .maxlen = sizeof(int), 102 .mode = 0644, 103 .proc_handler = proc_dointvec, 104 }, 105 #ifdef CONFIG_MMU 106 { 107 .procname = "lease-break-time", 108 .data = &lease_break_time, 109 .maxlen = sizeof(int), 110 .mode = 0644, 111 .proc_handler = proc_dointvec, 112 }, 113 #endif /* CONFIG_MMU */ 114 {} 115 }; 116 117 static int __init init_fs_locks_sysctls(void) 118 { 119 register_sysctl_init("fs", locks_sysctls); 120 return 0; 121 } 122 early_initcall(init_fs_locks_sysctls); 123 #endif /* CONFIG_SYSCTL */ 124 125 /* 126 * The global file_lock_list is only used for displaying /proc/locks, so we 127 * keep a list on each CPU, with each list protected by its own spinlock. 128 * Global serialization is done using file_rwsem. 129 * 130 * Note that alterations to the list also require that the relevant flc_lock is 131 * held. 132 */ 133 struct file_lock_list_struct { 134 spinlock_t lock; 135 struct hlist_head hlist; 136 }; 137 static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list); 138 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem); 139 140 141 /* 142 * The blocked_hash is used to find POSIX lock loops for deadlock detection. 143 * It is protected by blocked_lock_lock. 144 * 145 * We hash locks by lockowner in order to optimize searching for the lock a 146 * particular lockowner is waiting on. 147 * 148 * FIXME: make this value scale via some heuristic? We generally will want more 149 * buckets when we have more lockowners holding locks, but that's a little 150 * difficult to determine without knowing what the workload will look like. 151 */ 152 #define BLOCKED_HASH_BITS 7 153 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS); 154 155 /* 156 * This lock protects the blocked_hash. Generally, if you're accessing it, you 157 * want to be holding this lock. 158 * 159 * In addition, it also protects the fl->fl_blocked_requests list, and the 160 * fl->fl_blocker pointer for file_lock structures that are acting as lock 161 * requests (in contrast to those that are acting as records of acquired locks). 162 * 163 * Note that when we acquire this lock in order to change the above fields, 164 * we often hold the flc_lock as well. In certain cases, when reading the fields 165 * protected by this lock, we can skip acquiring it iff we already hold the 166 * flc_lock. 167 */ 168 static DEFINE_SPINLOCK(blocked_lock_lock); 169 170 static struct kmem_cache *flctx_cache __read_mostly; 171 static struct kmem_cache *filelock_cache __read_mostly; 172 173 static struct file_lock_context * 174 locks_get_lock_context(struct inode *inode, int type) 175 { 176 struct file_lock_context *ctx; 177 178 /* paired with cmpxchg() below */ 179 ctx = locks_inode_context(inode); 180 if (likely(ctx) || type == F_UNLCK) 181 goto out; 182 183 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL); 184 if (!ctx) 185 goto out; 186 187 spin_lock_init(&ctx->flc_lock); 188 INIT_LIST_HEAD(&ctx->flc_flock); 189 INIT_LIST_HEAD(&ctx->flc_posix); 190 INIT_LIST_HEAD(&ctx->flc_lease); 191 192 /* 193 * Assign the pointer if it's not already assigned. If it is, then 194 * free the context we just allocated. 195 */ 196 if (cmpxchg(&inode->i_flctx, NULL, ctx)) { 197 kmem_cache_free(flctx_cache, ctx); 198 ctx = locks_inode_context(inode); 199 } 200 out: 201 trace_locks_get_lock_context(inode, type, ctx); 202 return ctx; 203 } 204 205 static void 206 locks_dump_ctx_list(struct list_head *list, char *list_type) 207 { 208 struct file_lock *fl; 209 210 list_for_each_entry(fl, list, fl_list) { 211 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid); 212 } 213 } 214 215 static void 216 locks_check_ctx_lists(struct inode *inode) 217 { 218 struct file_lock_context *ctx = inode->i_flctx; 219 220 if (unlikely(!list_empty(&ctx->flc_flock) || 221 !list_empty(&ctx->flc_posix) || 222 !list_empty(&ctx->flc_lease))) { 223 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n", 224 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), 225 inode->i_ino); 226 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK"); 227 locks_dump_ctx_list(&ctx->flc_posix, "POSIX"); 228 locks_dump_ctx_list(&ctx->flc_lease, "LEASE"); 229 } 230 } 231 232 static void 233 locks_check_ctx_file_list(struct file *filp, struct list_head *list, 234 char *list_type) 235 { 236 struct file_lock *fl; 237 struct inode *inode = file_inode(filp); 238 239 list_for_each_entry(fl, list, fl_list) 240 if (fl->fl_file == filp) 241 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx " 242 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", 243 list_type, MAJOR(inode->i_sb->s_dev), 244 MINOR(inode->i_sb->s_dev), inode->i_ino, 245 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid); 246 } 247 248 void 249 locks_free_lock_context(struct inode *inode) 250 { 251 struct file_lock_context *ctx = locks_inode_context(inode); 252 253 if (unlikely(ctx)) { 254 locks_check_ctx_lists(inode); 255 kmem_cache_free(flctx_cache, ctx); 256 } 257 } 258 259 static void locks_init_lock_heads(struct file_lock *fl) 260 { 261 INIT_HLIST_NODE(&fl->fl_link); 262 INIT_LIST_HEAD(&fl->fl_list); 263 INIT_LIST_HEAD(&fl->fl_blocked_requests); 264 INIT_LIST_HEAD(&fl->fl_blocked_member); 265 init_waitqueue_head(&fl->fl_wait); 266 } 267 268 /* Allocate an empty lock structure. */ 269 struct file_lock *locks_alloc_lock(void) 270 { 271 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL); 272 273 if (fl) 274 locks_init_lock_heads(fl); 275 276 return fl; 277 } 278 EXPORT_SYMBOL_GPL(locks_alloc_lock); 279 280 void locks_release_private(struct file_lock *fl) 281 { 282 BUG_ON(waitqueue_active(&fl->fl_wait)); 283 BUG_ON(!list_empty(&fl->fl_list)); 284 BUG_ON(!list_empty(&fl->fl_blocked_requests)); 285 BUG_ON(!list_empty(&fl->fl_blocked_member)); 286 BUG_ON(!hlist_unhashed(&fl->fl_link)); 287 288 if (fl->fl_ops) { 289 if (fl->fl_ops->fl_release_private) 290 fl->fl_ops->fl_release_private(fl); 291 fl->fl_ops = NULL; 292 } 293 294 if (fl->fl_lmops) { 295 if (fl->fl_lmops->lm_put_owner) { 296 fl->fl_lmops->lm_put_owner(fl->fl_owner); 297 fl->fl_owner = NULL; 298 } 299 fl->fl_lmops = NULL; 300 } 301 } 302 EXPORT_SYMBOL_GPL(locks_release_private); 303 304 /** 305 * locks_owner_has_blockers - Check for blocking lock requests 306 * @flctx: file lock context 307 * @owner: lock owner 308 * 309 * Return values: 310 * %true: @owner has at least one blocker 311 * %false: @owner has no blockers 312 */ 313 bool locks_owner_has_blockers(struct file_lock_context *flctx, 314 fl_owner_t owner) 315 { 316 struct file_lock *fl; 317 318 spin_lock(&flctx->flc_lock); 319 list_for_each_entry(fl, &flctx->flc_posix, fl_list) { 320 if (fl->fl_owner != owner) 321 continue; 322 if (!list_empty(&fl->fl_blocked_requests)) { 323 spin_unlock(&flctx->flc_lock); 324 return true; 325 } 326 } 327 spin_unlock(&flctx->flc_lock); 328 return false; 329 } 330 EXPORT_SYMBOL_GPL(locks_owner_has_blockers); 331 332 /* Free a lock which is not in use. */ 333 void locks_free_lock(struct file_lock *fl) 334 { 335 locks_release_private(fl); 336 kmem_cache_free(filelock_cache, fl); 337 } 338 EXPORT_SYMBOL(locks_free_lock); 339 340 static void 341 locks_dispose_list(struct list_head *dispose) 342 { 343 struct file_lock *fl; 344 345 while (!list_empty(dispose)) { 346 fl = list_first_entry(dispose, struct file_lock, fl_list); 347 list_del_init(&fl->fl_list); 348 locks_free_lock(fl); 349 } 350 } 351 352 void locks_init_lock(struct file_lock *fl) 353 { 354 memset(fl, 0, sizeof(struct file_lock)); 355 locks_init_lock_heads(fl); 356 } 357 EXPORT_SYMBOL(locks_init_lock); 358 359 /* 360 * Initialize a new lock from an existing file_lock structure. 361 */ 362 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) 363 { 364 new->fl_owner = fl->fl_owner; 365 new->fl_pid = fl->fl_pid; 366 new->fl_file = NULL; 367 new->fl_flags = fl->fl_flags; 368 new->fl_type = fl->fl_type; 369 new->fl_start = fl->fl_start; 370 new->fl_end = fl->fl_end; 371 new->fl_lmops = fl->fl_lmops; 372 new->fl_ops = NULL; 373 374 if (fl->fl_lmops) { 375 if (fl->fl_lmops->lm_get_owner) 376 fl->fl_lmops->lm_get_owner(fl->fl_owner); 377 } 378 } 379 EXPORT_SYMBOL(locks_copy_conflock); 380 381 void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 382 { 383 /* "new" must be a freshly-initialized lock */ 384 WARN_ON_ONCE(new->fl_ops); 385 386 locks_copy_conflock(new, fl); 387 388 new->fl_file = fl->fl_file; 389 new->fl_ops = fl->fl_ops; 390 391 if (fl->fl_ops) { 392 if (fl->fl_ops->fl_copy_lock) 393 fl->fl_ops->fl_copy_lock(new, fl); 394 } 395 } 396 EXPORT_SYMBOL(locks_copy_lock); 397 398 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl) 399 { 400 struct file_lock *f; 401 402 /* 403 * As ctx->flc_lock is held, new requests cannot be added to 404 * ->fl_blocked_requests, so we don't need a lock to check if it 405 * is empty. 406 */ 407 if (list_empty(&fl->fl_blocked_requests)) 408 return; 409 spin_lock(&blocked_lock_lock); 410 list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests); 411 list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member) 412 f->fl_blocker = new; 413 spin_unlock(&blocked_lock_lock); 414 } 415 416 static inline int flock_translate_cmd(int cmd) { 417 switch (cmd) { 418 case LOCK_SH: 419 return F_RDLCK; 420 case LOCK_EX: 421 return F_WRLCK; 422 case LOCK_UN: 423 return F_UNLCK; 424 } 425 return -EINVAL; 426 } 427 428 /* Fill in a file_lock structure with an appropriate FLOCK lock. */ 429 static void flock_make_lock(struct file *filp, struct file_lock *fl, int type) 430 { 431 locks_init_lock(fl); 432 433 fl->fl_file = filp; 434 fl->fl_owner = filp; 435 fl->fl_pid = current->tgid; 436 fl->fl_flags = FL_FLOCK; 437 fl->fl_type = type; 438 fl->fl_end = OFFSET_MAX; 439 } 440 441 static int assign_type(struct file_lock *fl, long type) 442 { 443 switch (type) { 444 case F_RDLCK: 445 case F_WRLCK: 446 case F_UNLCK: 447 fl->fl_type = type; 448 break; 449 default: 450 return -EINVAL; 451 } 452 return 0; 453 } 454 455 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, 456 struct flock64 *l) 457 { 458 switch (l->l_whence) { 459 case SEEK_SET: 460 fl->fl_start = 0; 461 break; 462 case SEEK_CUR: 463 fl->fl_start = filp->f_pos; 464 break; 465 case SEEK_END: 466 fl->fl_start = i_size_read(file_inode(filp)); 467 break; 468 default: 469 return -EINVAL; 470 } 471 if (l->l_start > OFFSET_MAX - fl->fl_start) 472 return -EOVERFLOW; 473 fl->fl_start += l->l_start; 474 if (fl->fl_start < 0) 475 return -EINVAL; 476 477 /* POSIX-1996 leaves the case l->l_len < 0 undefined; 478 POSIX-2001 defines it. */ 479 if (l->l_len > 0) { 480 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start) 481 return -EOVERFLOW; 482 fl->fl_end = fl->fl_start + (l->l_len - 1); 483 484 } else if (l->l_len < 0) { 485 if (fl->fl_start + l->l_len < 0) 486 return -EINVAL; 487 fl->fl_end = fl->fl_start - 1; 488 fl->fl_start += l->l_len; 489 } else 490 fl->fl_end = OFFSET_MAX; 491 492 fl->fl_owner = current->files; 493 fl->fl_pid = current->tgid; 494 fl->fl_file = filp; 495 fl->fl_flags = FL_POSIX; 496 fl->fl_ops = NULL; 497 fl->fl_lmops = NULL; 498 499 return assign_type(fl, l->l_type); 500 } 501 502 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX 503 * style lock. 504 */ 505 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, 506 struct flock *l) 507 { 508 struct flock64 ll = { 509 .l_type = l->l_type, 510 .l_whence = l->l_whence, 511 .l_start = l->l_start, 512 .l_len = l->l_len, 513 }; 514 515 return flock64_to_posix_lock(filp, fl, &ll); 516 } 517 518 /* default lease lock manager operations */ 519 static bool 520 lease_break_callback(struct file_lock *fl) 521 { 522 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); 523 return false; 524 } 525 526 static void 527 lease_setup(struct file_lock *fl, void **priv) 528 { 529 struct file *filp = fl->fl_file; 530 struct fasync_struct *fa = *priv; 531 532 /* 533 * fasync_insert_entry() returns the old entry if any. If there was no 534 * old entry, then it used "priv" and inserted it into the fasync list. 535 * Clear the pointer to indicate that it shouldn't be freed. 536 */ 537 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa)) 538 *priv = NULL; 539 540 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0); 541 } 542 543 static const struct lock_manager_operations lease_manager_ops = { 544 .lm_break = lease_break_callback, 545 .lm_change = lease_modify, 546 .lm_setup = lease_setup, 547 }; 548 549 /* 550 * Initialize a lease, use the default lock manager operations 551 */ 552 static int lease_init(struct file *filp, long type, struct file_lock *fl) 553 { 554 if (assign_type(fl, type) != 0) 555 return -EINVAL; 556 557 fl->fl_owner = filp; 558 fl->fl_pid = current->tgid; 559 560 fl->fl_file = filp; 561 fl->fl_flags = FL_LEASE; 562 fl->fl_start = 0; 563 fl->fl_end = OFFSET_MAX; 564 fl->fl_ops = NULL; 565 fl->fl_lmops = &lease_manager_ops; 566 return 0; 567 } 568 569 /* Allocate a file_lock initialised to this type of lease */ 570 static struct file_lock *lease_alloc(struct file *filp, long type) 571 { 572 struct file_lock *fl = locks_alloc_lock(); 573 int error = -ENOMEM; 574 575 if (fl == NULL) 576 return ERR_PTR(error); 577 578 error = lease_init(filp, type, fl); 579 if (error) { 580 locks_free_lock(fl); 581 return ERR_PTR(error); 582 } 583 return fl; 584 } 585 586 /* Check if two locks overlap each other. 587 */ 588 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) 589 { 590 return ((fl1->fl_end >= fl2->fl_start) && 591 (fl2->fl_end >= fl1->fl_start)); 592 } 593 594 /* 595 * Check whether two locks have the same owner. 596 */ 597 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) 598 { 599 return fl1->fl_owner == fl2->fl_owner; 600 } 601 602 /* Must be called with the flc_lock held! */ 603 static void locks_insert_global_locks(struct file_lock *fl) 604 { 605 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list); 606 607 percpu_rwsem_assert_held(&file_rwsem); 608 609 spin_lock(&fll->lock); 610 fl->fl_link_cpu = smp_processor_id(); 611 hlist_add_head(&fl->fl_link, &fll->hlist); 612 spin_unlock(&fll->lock); 613 } 614 615 /* Must be called with the flc_lock held! */ 616 static void locks_delete_global_locks(struct file_lock *fl) 617 { 618 struct file_lock_list_struct *fll; 619 620 percpu_rwsem_assert_held(&file_rwsem); 621 622 /* 623 * Avoid taking lock if already unhashed. This is safe since this check 624 * is done while holding the flc_lock, and new insertions into the list 625 * also require that it be held. 626 */ 627 if (hlist_unhashed(&fl->fl_link)) 628 return; 629 630 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu); 631 spin_lock(&fll->lock); 632 hlist_del_init(&fl->fl_link); 633 spin_unlock(&fll->lock); 634 } 635 636 static unsigned long 637 posix_owner_key(struct file_lock *fl) 638 { 639 return (unsigned long)fl->fl_owner; 640 } 641 642 static void locks_insert_global_blocked(struct file_lock *waiter) 643 { 644 lockdep_assert_held(&blocked_lock_lock); 645 646 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter)); 647 } 648 649 static void locks_delete_global_blocked(struct file_lock *waiter) 650 { 651 lockdep_assert_held(&blocked_lock_lock); 652 653 hash_del(&waiter->fl_link); 654 } 655 656 /* Remove waiter from blocker's block list. 657 * When blocker ends up pointing to itself then the list is empty. 658 * 659 * Must be called with blocked_lock_lock held. 660 */ 661 static void __locks_delete_block(struct file_lock *waiter) 662 { 663 locks_delete_global_blocked(waiter); 664 list_del_init(&waiter->fl_blocked_member); 665 } 666 667 static void __locks_wake_up_blocks(struct file_lock *blocker) 668 { 669 while (!list_empty(&blocker->fl_blocked_requests)) { 670 struct file_lock *waiter; 671 672 waiter = list_first_entry(&blocker->fl_blocked_requests, 673 struct file_lock, fl_blocked_member); 674 __locks_delete_block(waiter); 675 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) 676 waiter->fl_lmops->lm_notify(waiter); 677 else 678 wake_up(&waiter->fl_wait); 679 680 /* 681 * The setting of fl_blocker to NULL marks the "done" 682 * point in deleting a block. Paired with acquire at the top 683 * of locks_delete_block(). 684 */ 685 smp_store_release(&waiter->fl_blocker, NULL); 686 } 687 } 688 689 /** 690 * locks_delete_block - stop waiting for a file lock 691 * @waiter: the lock which was waiting 692 * 693 * lockd/nfsd need to disconnect the lock while working on it. 694 */ 695 int locks_delete_block(struct file_lock *waiter) 696 { 697 int status = -ENOENT; 698 699 /* 700 * If fl_blocker is NULL, it won't be set again as this thread "owns" 701 * the lock and is the only one that might try to claim the lock. 702 * 703 * We use acquire/release to manage fl_blocker so that we can 704 * optimize away taking the blocked_lock_lock in many cases. 705 * 706 * The smp_load_acquire guarantees two things: 707 * 708 * 1/ that fl_blocked_requests can be tested locklessly. If something 709 * was recently added to that list it must have been in a locked region 710 * *before* the locked region when fl_blocker was set to NULL. 711 * 712 * 2/ that no other thread is accessing 'waiter', so it is safe to free 713 * it. __locks_wake_up_blocks is careful not to touch waiter after 714 * fl_blocker is released. 715 * 716 * If a lockless check of fl_blocker shows it to be NULL, we know that 717 * no new locks can be inserted into its fl_blocked_requests list, and 718 * can avoid doing anything further if the list is empty. 719 */ 720 if (!smp_load_acquire(&waiter->fl_blocker) && 721 list_empty(&waiter->fl_blocked_requests)) 722 return status; 723 724 spin_lock(&blocked_lock_lock); 725 if (waiter->fl_blocker) 726 status = 0; 727 __locks_wake_up_blocks(waiter); 728 __locks_delete_block(waiter); 729 730 /* 731 * The setting of fl_blocker to NULL marks the "done" point in deleting 732 * a block. Paired with acquire at the top of this function. 733 */ 734 smp_store_release(&waiter->fl_blocker, NULL); 735 spin_unlock(&blocked_lock_lock); 736 return status; 737 } 738 EXPORT_SYMBOL(locks_delete_block); 739 740 /* Insert waiter into blocker's block list. 741 * We use a circular list so that processes can be easily woken up in 742 * the order they blocked. The documentation doesn't require this but 743 * it seems like the reasonable thing to do. 744 * 745 * Must be called with both the flc_lock and blocked_lock_lock held. The 746 * fl_blocked_requests list itself is protected by the blocked_lock_lock, 747 * but by ensuring that the flc_lock is also held on insertions we can avoid 748 * taking the blocked_lock_lock in some cases when we see that the 749 * fl_blocked_requests list is empty. 750 * 751 * Rather than just adding to the list, we check for conflicts with any existing 752 * waiters, and add beneath any waiter that blocks the new waiter. 753 * Thus wakeups don't happen until needed. 754 */ 755 static void __locks_insert_block(struct file_lock *blocker, 756 struct file_lock *waiter, 757 bool conflict(struct file_lock *, 758 struct file_lock *)) 759 { 760 struct file_lock *fl; 761 BUG_ON(!list_empty(&waiter->fl_blocked_member)); 762 763 new_blocker: 764 list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member) 765 if (conflict(fl, waiter)) { 766 blocker = fl; 767 goto new_blocker; 768 } 769 waiter->fl_blocker = blocker; 770 list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests); 771 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker)) 772 locks_insert_global_blocked(waiter); 773 774 /* The requests in waiter->fl_blocked are known to conflict with 775 * waiter, but might not conflict with blocker, or the requests 776 * and lock which block it. So they all need to be woken. 777 */ 778 __locks_wake_up_blocks(waiter); 779 } 780 781 /* Must be called with flc_lock held. */ 782 static void locks_insert_block(struct file_lock *blocker, 783 struct file_lock *waiter, 784 bool conflict(struct file_lock *, 785 struct file_lock *)) 786 { 787 spin_lock(&blocked_lock_lock); 788 __locks_insert_block(blocker, waiter, conflict); 789 spin_unlock(&blocked_lock_lock); 790 } 791 792 /* 793 * Wake up processes blocked waiting for blocker. 794 * 795 * Must be called with the inode->flc_lock held! 796 */ 797 static void locks_wake_up_blocks(struct file_lock *blocker) 798 { 799 /* 800 * Avoid taking global lock if list is empty. This is safe since new 801 * blocked requests are only added to the list under the flc_lock, and 802 * the flc_lock is always held here. Note that removal from the 803 * fl_blocked_requests list does not require the flc_lock, so we must 804 * recheck list_empty() after acquiring the blocked_lock_lock. 805 */ 806 if (list_empty(&blocker->fl_blocked_requests)) 807 return; 808 809 spin_lock(&blocked_lock_lock); 810 __locks_wake_up_blocks(blocker); 811 spin_unlock(&blocked_lock_lock); 812 } 813 814 static void 815 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before) 816 { 817 list_add_tail(&fl->fl_list, before); 818 locks_insert_global_locks(fl); 819 } 820 821 static void 822 locks_unlink_lock_ctx(struct file_lock *fl) 823 { 824 locks_delete_global_locks(fl); 825 list_del_init(&fl->fl_list); 826 locks_wake_up_blocks(fl); 827 } 828 829 static void 830 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose) 831 { 832 locks_unlink_lock_ctx(fl); 833 if (dispose) 834 list_add(&fl->fl_list, dispose); 835 else 836 locks_free_lock(fl); 837 } 838 839 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality 840 * checks for shared/exclusive status of overlapping locks. 841 */ 842 static bool locks_conflict(struct file_lock *caller_fl, 843 struct file_lock *sys_fl) 844 { 845 if (sys_fl->fl_type == F_WRLCK) 846 return true; 847 if (caller_fl->fl_type == F_WRLCK) 848 return true; 849 return false; 850 } 851 852 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific 853 * checking before calling the locks_conflict(). 854 */ 855 static bool posix_locks_conflict(struct file_lock *caller_fl, 856 struct file_lock *sys_fl) 857 { 858 /* POSIX locks owned by the same process do not conflict with 859 * each other. 860 */ 861 if (posix_same_owner(caller_fl, sys_fl)) 862 return false; 863 864 /* Check whether they overlap */ 865 if (!locks_overlap(caller_fl, sys_fl)) 866 return false; 867 868 return locks_conflict(caller_fl, sys_fl); 869 } 870 871 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific 872 * checking before calling the locks_conflict(). 873 */ 874 static bool flock_locks_conflict(struct file_lock *caller_fl, 875 struct file_lock *sys_fl) 876 { 877 /* FLOCK locks referring to the same filp do not conflict with 878 * each other. 879 */ 880 if (caller_fl->fl_file == sys_fl->fl_file) 881 return false; 882 883 return locks_conflict(caller_fl, sys_fl); 884 } 885 886 void 887 posix_test_lock(struct file *filp, struct file_lock *fl) 888 { 889 struct file_lock *cfl; 890 struct file_lock_context *ctx; 891 struct inode *inode = file_inode(filp); 892 void *owner; 893 void (*func)(void); 894 895 ctx = locks_inode_context(inode); 896 if (!ctx || list_empty_careful(&ctx->flc_posix)) { 897 fl->fl_type = F_UNLCK; 898 return; 899 } 900 901 retry: 902 spin_lock(&ctx->flc_lock); 903 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) { 904 if (!posix_locks_conflict(fl, cfl)) 905 continue; 906 if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable 907 && (*cfl->fl_lmops->lm_lock_expirable)(cfl)) { 908 owner = cfl->fl_lmops->lm_mod_owner; 909 func = cfl->fl_lmops->lm_expire_lock; 910 __module_get(owner); 911 spin_unlock(&ctx->flc_lock); 912 (*func)(); 913 module_put(owner); 914 goto retry; 915 } 916 locks_copy_conflock(fl, cfl); 917 goto out; 918 } 919 fl->fl_type = F_UNLCK; 920 out: 921 spin_unlock(&ctx->flc_lock); 922 return; 923 } 924 EXPORT_SYMBOL(posix_test_lock); 925 926 /* 927 * Deadlock detection: 928 * 929 * We attempt to detect deadlocks that are due purely to posix file 930 * locks. 931 * 932 * We assume that a task can be waiting for at most one lock at a time. 933 * So for any acquired lock, the process holding that lock may be 934 * waiting on at most one other lock. That lock in turns may be held by 935 * someone waiting for at most one other lock. Given a requested lock 936 * caller_fl which is about to wait for a conflicting lock block_fl, we 937 * follow this chain of waiters to ensure we are not about to create a 938 * cycle. 939 * 940 * Since we do this before we ever put a process to sleep on a lock, we 941 * are ensured that there is never a cycle; that is what guarantees that 942 * the while() loop in posix_locks_deadlock() eventually completes. 943 * 944 * Note: the above assumption may not be true when handling lock 945 * requests from a broken NFS client. It may also fail in the presence 946 * of tasks (such as posix threads) sharing the same open file table. 947 * To handle those cases, we just bail out after a few iterations. 948 * 949 * For FL_OFDLCK locks, the owner is the filp, not the files_struct. 950 * Because the owner is not even nominally tied to a thread of 951 * execution, the deadlock detection below can't reasonably work well. Just 952 * skip it for those. 953 * 954 * In principle, we could do a more limited deadlock detection on FL_OFDLCK 955 * locks that just checks for the case where two tasks are attempting to 956 * upgrade from read to write locks on the same inode. 957 */ 958 959 #define MAX_DEADLK_ITERATIONS 10 960 961 /* Find a lock that the owner of the given block_fl is blocking on. */ 962 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) 963 { 964 struct file_lock *fl; 965 966 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) { 967 if (posix_same_owner(fl, block_fl)) { 968 while (fl->fl_blocker) 969 fl = fl->fl_blocker; 970 return fl; 971 } 972 } 973 return NULL; 974 } 975 976 /* Must be called with the blocked_lock_lock held! */ 977 static int posix_locks_deadlock(struct file_lock *caller_fl, 978 struct file_lock *block_fl) 979 { 980 int i = 0; 981 982 lockdep_assert_held(&blocked_lock_lock); 983 984 /* 985 * This deadlock detector can't reasonably detect deadlocks with 986 * FL_OFDLCK locks, since they aren't owned by a process, per-se. 987 */ 988 if (IS_OFDLCK(caller_fl)) 989 return 0; 990 991 while ((block_fl = what_owner_is_waiting_for(block_fl))) { 992 if (i++ > MAX_DEADLK_ITERATIONS) 993 return 0; 994 if (posix_same_owner(caller_fl, block_fl)) 995 return 1; 996 } 997 return 0; 998 } 999 1000 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 1001 * after any leases, but before any posix locks. 1002 * 1003 * Note that if called with an FL_EXISTS argument, the caller may determine 1004 * whether or not a lock was successfully freed by testing the return 1005 * value for -ENOENT. 1006 */ 1007 static int flock_lock_inode(struct inode *inode, struct file_lock *request) 1008 { 1009 struct file_lock *new_fl = NULL; 1010 struct file_lock *fl; 1011 struct file_lock_context *ctx; 1012 int error = 0; 1013 bool found = false; 1014 LIST_HEAD(dispose); 1015 1016 ctx = locks_get_lock_context(inode, request->fl_type); 1017 if (!ctx) { 1018 if (request->fl_type != F_UNLCK) 1019 return -ENOMEM; 1020 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0; 1021 } 1022 1023 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { 1024 new_fl = locks_alloc_lock(); 1025 if (!new_fl) 1026 return -ENOMEM; 1027 } 1028 1029 percpu_down_read(&file_rwsem); 1030 spin_lock(&ctx->flc_lock); 1031 if (request->fl_flags & FL_ACCESS) 1032 goto find_conflict; 1033 1034 list_for_each_entry(fl, &ctx->flc_flock, fl_list) { 1035 if (request->fl_file != fl->fl_file) 1036 continue; 1037 if (request->fl_type == fl->fl_type) 1038 goto out; 1039 found = true; 1040 locks_delete_lock_ctx(fl, &dispose); 1041 break; 1042 } 1043 1044 if (request->fl_type == F_UNLCK) { 1045 if ((request->fl_flags & FL_EXISTS) && !found) 1046 error = -ENOENT; 1047 goto out; 1048 } 1049 1050 find_conflict: 1051 list_for_each_entry(fl, &ctx->flc_flock, fl_list) { 1052 if (!flock_locks_conflict(request, fl)) 1053 continue; 1054 error = -EAGAIN; 1055 if (!(request->fl_flags & FL_SLEEP)) 1056 goto out; 1057 error = FILE_LOCK_DEFERRED; 1058 locks_insert_block(fl, request, flock_locks_conflict); 1059 goto out; 1060 } 1061 if (request->fl_flags & FL_ACCESS) 1062 goto out; 1063 locks_copy_lock(new_fl, request); 1064 locks_move_blocks(new_fl, request); 1065 locks_insert_lock_ctx(new_fl, &ctx->flc_flock); 1066 new_fl = NULL; 1067 error = 0; 1068 1069 out: 1070 spin_unlock(&ctx->flc_lock); 1071 percpu_up_read(&file_rwsem); 1072 if (new_fl) 1073 locks_free_lock(new_fl); 1074 locks_dispose_list(&dispose); 1075 trace_flock_lock_inode(inode, request, error); 1076 return error; 1077 } 1078 1079 static int posix_lock_inode(struct inode *inode, struct file_lock *request, 1080 struct file_lock *conflock) 1081 { 1082 struct file_lock *fl, *tmp; 1083 struct file_lock *new_fl = NULL; 1084 struct file_lock *new_fl2 = NULL; 1085 struct file_lock *left = NULL; 1086 struct file_lock *right = NULL; 1087 struct file_lock_context *ctx; 1088 int error; 1089 bool added = false; 1090 LIST_HEAD(dispose); 1091 void *owner; 1092 void (*func)(void); 1093 1094 ctx = locks_get_lock_context(inode, request->fl_type); 1095 if (!ctx) 1096 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM; 1097 1098 /* 1099 * We may need two file_lock structures for this operation, 1100 * so we get them in advance to avoid races. 1101 * 1102 * In some cases we can be sure, that no new locks will be needed 1103 */ 1104 if (!(request->fl_flags & FL_ACCESS) && 1105 (request->fl_type != F_UNLCK || 1106 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { 1107 new_fl = locks_alloc_lock(); 1108 new_fl2 = locks_alloc_lock(); 1109 } 1110 1111 retry: 1112 percpu_down_read(&file_rwsem); 1113 spin_lock(&ctx->flc_lock); 1114 /* 1115 * New lock request. Walk all POSIX locks and look for conflicts. If 1116 * there are any, either return error or put the request on the 1117 * blocker's list of waiters and the global blocked_hash. 1118 */ 1119 if (request->fl_type != F_UNLCK) { 1120 list_for_each_entry(fl, &ctx->flc_posix, fl_list) { 1121 if (!posix_locks_conflict(request, fl)) 1122 continue; 1123 if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable 1124 && (*fl->fl_lmops->lm_lock_expirable)(fl)) { 1125 owner = fl->fl_lmops->lm_mod_owner; 1126 func = fl->fl_lmops->lm_expire_lock; 1127 __module_get(owner); 1128 spin_unlock(&ctx->flc_lock); 1129 percpu_up_read(&file_rwsem); 1130 (*func)(); 1131 module_put(owner); 1132 goto retry; 1133 } 1134 if (conflock) 1135 locks_copy_conflock(conflock, fl); 1136 error = -EAGAIN; 1137 if (!(request->fl_flags & FL_SLEEP)) 1138 goto out; 1139 /* 1140 * Deadlock detection and insertion into the blocked 1141 * locks list must be done while holding the same lock! 1142 */ 1143 error = -EDEADLK; 1144 spin_lock(&blocked_lock_lock); 1145 /* 1146 * Ensure that we don't find any locks blocked on this 1147 * request during deadlock detection. 1148 */ 1149 __locks_wake_up_blocks(request); 1150 if (likely(!posix_locks_deadlock(request, fl))) { 1151 error = FILE_LOCK_DEFERRED; 1152 __locks_insert_block(fl, request, 1153 posix_locks_conflict); 1154 } 1155 spin_unlock(&blocked_lock_lock); 1156 goto out; 1157 } 1158 } 1159 1160 /* If we're just looking for a conflict, we're done. */ 1161 error = 0; 1162 if (request->fl_flags & FL_ACCESS) 1163 goto out; 1164 1165 /* Find the first old lock with the same owner as the new lock */ 1166 list_for_each_entry(fl, &ctx->flc_posix, fl_list) { 1167 if (posix_same_owner(request, fl)) 1168 break; 1169 } 1170 1171 /* Process locks with this owner. */ 1172 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) { 1173 if (!posix_same_owner(request, fl)) 1174 break; 1175 1176 /* Detect adjacent or overlapping regions (if same lock type) */ 1177 if (request->fl_type == fl->fl_type) { 1178 /* In all comparisons of start vs end, use 1179 * "start - 1" rather than "end + 1". If end 1180 * is OFFSET_MAX, end + 1 will become negative. 1181 */ 1182 if (fl->fl_end < request->fl_start - 1) 1183 continue; 1184 /* If the next lock in the list has entirely bigger 1185 * addresses than the new one, insert the lock here. 1186 */ 1187 if (fl->fl_start - 1 > request->fl_end) 1188 break; 1189 1190 /* If we come here, the new and old lock are of the 1191 * same type and adjacent or overlapping. Make one 1192 * lock yielding from the lower start address of both 1193 * locks to the higher end address. 1194 */ 1195 if (fl->fl_start > request->fl_start) 1196 fl->fl_start = request->fl_start; 1197 else 1198 request->fl_start = fl->fl_start; 1199 if (fl->fl_end < request->fl_end) 1200 fl->fl_end = request->fl_end; 1201 else 1202 request->fl_end = fl->fl_end; 1203 if (added) { 1204 locks_delete_lock_ctx(fl, &dispose); 1205 continue; 1206 } 1207 request = fl; 1208 added = true; 1209 } else { 1210 /* Processing for different lock types is a bit 1211 * more complex. 1212 */ 1213 if (fl->fl_end < request->fl_start) 1214 continue; 1215 if (fl->fl_start > request->fl_end) 1216 break; 1217 if (request->fl_type == F_UNLCK) 1218 added = true; 1219 if (fl->fl_start < request->fl_start) 1220 left = fl; 1221 /* If the next lock in the list has a higher end 1222 * address than the new one, insert the new one here. 1223 */ 1224 if (fl->fl_end > request->fl_end) { 1225 right = fl; 1226 break; 1227 } 1228 if (fl->fl_start >= request->fl_start) { 1229 /* The new lock completely replaces an old 1230 * one (This may happen several times). 1231 */ 1232 if (added) { 1233 locks_delete_lock_ctx(fl, &dispose); 1234 continue; 1235 } 1236 /* 1237 * Replace the old lock with new_fl, and 1238 * remove the old one. It's safe to do the 1239 * insert here since we know that we won't be 1240 * using new_fl later, and that the lock is 1241 * just replacing an existing lock. 1242 */ 1243 error = -ENOLCK; 1244 if (!new_fl) 1245 goto out; 1246 locks_copy_lock(new_fl, request); 1247 locks_move_blocks(new_fl, request); 1248 request = new_fl; 1249 new_fl = NULL; 1250 locks_insert_lock_ctx(request, &fl->fl_list); 1251 locks_delete_lock_ctx(fl, &dispose); 1252 added = true; 1253 } 1254 } 1255 } 1256 1257 /* 1258 * The above code only modifies existing locks in case of merging or 1259 * replacing. If new lock(s) need to be inserted all modifications are 1260 * done below this, so it's safe yet to bail out. 1261 */ 1262 error = -ENOLCK; /* "no luck" */ 1263 if (right && left == right && !new_fl2) 1264 goto out; 1265 1266 error = 0; 1267 if (!added) { 1268 if (request->fl_type == F_UNLCK) { 1269 if (request->fl_flags & FL_EXISTS) 1270 error = -ENOENT; 1271 goto out; 1272 } 1273 1274 if (!new_fl) { 1275 error = -ENOLCK; 1276 goto out; 1277 } 1278 locks_copy_lock(new_fl, request); 1279 locks_move_blocks(new_fl, request); 1280 locks_insert_lock_ctx(new_fl, &fl->fl_list); 1281 fl = new_fl; 1282 new_fl = NULL; 1283 } 1284 if (right) { 1285 if (left == right) { 1286 /* The new lock breaks the old one in two pieces, 1287 * so we have to use the second new lock. 1288 */ 1289 left = new_fl2; 1290 new_fl2 = NULL; 1291 locks_copy_lock(left, right); 1292 locks_insert_lock_ctx(left, &fl->fl_list); 1293 } 1294 right->fl_start = request->fl_end + 1; 1295 locks_wake_up_blocks(right); 1296 } 1297 if (left) { 1298 left->fl_end = request->fl_start - 1; 1299 locks_wake_up_blocks(left); 1300 } 1301 out: 1302 spin_unlock(&ctx->flc_lock); 1303 percpu_up_read(&file_rwsem); 1304 /* 1305 * Free any unused locks. 1306 */ 1307 if (new_fl) 1308 locks_free_lock(new_fl); 1309 if (new_fl2) 1310 locks_free_lock(new_fl2); 1311 locks_dispose_list(&dispose); 1312 trace_posix_lock_inode(inode, request, error); 1313 1314 return error; 1315 } 1316 1317 /** 1318 * posix_lock_file - Apply a POSIX-style lock to a file 1319 * @filp: The file to apply the lock to 1320 * @fl: The lock to be applied 1321 * @conflock: Place to return a copy of the conflicting lock, if found. 1322 * 1323 * Add a POSIX style lock to a file. 1324 * We merge adjacent & overlapping locks whenever possible. 1325 * POSIX locks are sorted by owner task, then by starting address 1326 * 1327 * Note that if called with an FL_EXISTS argument, the caller may determine 1328 * whether or not a lock was successfully freed by testing the return 1329 * value for -ENOENT. 1330 */ 1331 int posix_lock_file(struct file *filp, struct file_lock *fl, 1332 struct file_lock *conflock) 1333 { 1334 return posix_lock_inode(file_inode(filp), fl, conflock); 1335 } 1336 EXPORT_SYMBOL(posix_lock_file); 1337 1338 /** 1339 * posix_lock_inode_wait - Apply a POSIX-style lock to a file 1340 * @inode: inode of file to which lock request should be applied 1341 * @fl: The lock to be applied 1342 * 1343 * Apply a POSIX style lock request to an inode. 1344 */ 1345 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl) 1346 { 1347 int error; 1348 might_sleep (); 1349 for (;;) { 1350 error = posix_lock_inode(inode, fl, NULL); 1351 if (error != FILE_LOCK_DEFERRED) 1352 break; 1353 error = wait_event_interruptible(fl->fl_wait, 1354 list_empty(&fl->fl_blocked_member)); 1355 if (error) 1356 break; 1357 } 1358 locks_delete_block(fl); 1359 return error; 1360 } 1361 1362 static void lease_clear_pending(struct file_lock *fl, int arg) 1363 { 1364 switch (arg) { 1365 case F_UNLCK: 1366 fl->fl_flags &= ~FL_UNLOCK_PENDING; 1367 fallthrough; 1368 case F_RDLCK: 1369 fl->fl_flags &= ~FL_DOWNGRADE_PENDING; 1370 } 1371 } 1372 1373 /* We already had a lease on this file; just change its type */ 1374 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) 1375 { 1376 int error = assign_type(fl, arg); 1377 1378 if (error) 1379 return error; 1380 lease_clear_pending(fl, arg); 1381 locks_wake_up_blocks(fl); 1382 if (arg == F_UNLCK) { 1383 struct file *filp = fl->fl_file; 1384 1385 f_delown(filp); 1386 filp->f_owner.signum = 0; 1387 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 1388 if (fl->fl_fasync != NULL) { 1389 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 1390 fl->fl_fasync = NULL; 1391 } 1392 locks_delete_lock_ctx(fl, dispose); 1393 } 1394 return 0; 1395 } 1396 EXPORT_SYMBOL(lease_modify); 1397 1398 static bool past_time(unsigned long then) 1399 { 1400 if (!then) 1401 /* 0 is a special value meaning "this never expires": */ 1402 return false; 1403 return time_after(jiffies, then); 1404 } 1405 1406 static void time_out_leases(struct inode *inode, struct list_head *dispose) 1407 { 1408 struct file_lock_context *ctx = inode->i_flctx; 1409 struct file_lock *fl, *tmp; 1410 1411 lockdep_assert_held(&ctx->flc_lock); 1412 1413 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { 1414 trace_time_out_leases(inode, fl); 1415 if (past_time(fl->fl_downgrade_time)) 1416 lease_modify(fl, F_RDLCK, dispose); 1417 if (past_time(fl->fl_break_time)) 1418 lease_modify(fl, F_UNLCK, dispose); 1419 } 1420 } 1421 1422 static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) 1423 { 1424 bool rc; 1425 1426 if (lease->fl_lmops->lm_breaker_owns_lease 1427 && lease->fl_lmops->lm_breaker_owns_lease(lease)) 1428 return false; 1429 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) { 1430 rc = false; 1431 goto trace; 1432 } 1433 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) { 1434 rc = false; 1435 goto trace; 1436 } 1437 1438 rc = locks_conflict(breaker, lease); 1439 trace: 1440 trace_leases_conflict(rc, lease, breaker); 1441 return rc; 1442 } 1443 1444 static bool 1445 any_leases_conflict(struct inode *inode, struct file_lock *breaker) 1446 { 1447 struct file_lock_context *ctx = inode->i_flctx; 1448 struct file_lock *fl; 1449 1450 lockdep_assert_held(&ctx->flc_lock); 1451 1452 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1453 if (leases_conflict(fl, breaker)) 1454 return true; 1455 } 1456 return false; 1457 } 1458 1459 /** 1460 * __break_lease - revoke all outstanding leases on file 1461 * @inode: the inode of the file to return 1462 * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR: 1463 * break all leases 1464 * @type: FL_LEASE: break leases and delegations; FL_DELEG: break 1465 * only delegations 1466 * 1467 * break_lease (inlined for speed) has checked there already is at least 1468 * some kind of lock (maybe a lease) on this file. Leases are broken on 1469 * a call to open() or truncate(). This function can sleep unless you 1470 * specified %O_NONBLOCK to your open(). 1471 */ 1472 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) 1473 { 1474 int error = 0; 1475 struct file_lock_context *ctx; 1476 struct file_lock *new_fl, *fl, *tmp; 1477 unsigned long break_time; 1478 int want_write = (mode & O_ACCMODE) != O_RDONLY; 1479 LIST_HEAD(dispose); 1480 1481 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); 1482 if (IS_ERR(new_fl)) 1483 return PTR_ERR(new_fl); 1484 new_fl->fl_flags = type; 1485 1486 /* typically we will check that ctx is non-NULL before calling */ 1487 ctx = locks_inode_context(inode); 1488 if (!ctx) { 1489 WARN_ON_ONCE(1); 1490 goto free_lock; 1491 } 1492 1493 percpu_down_read(&file_rwsem); 1494 spin_lock(&ctx->flc_lock); 1495 1496 time_out_leases(inode, &dispose); 1497 1498 if (!any_leases_conflict(inode, new_fl)) 1499 goto out; 1500 1501 break_time = 0; 1502 if (lease_break_time > 0) { 1503 break_time = jiffies + lease_break_time * HZ; 1504 if (break_time == 0) 1505 break_time++; /* so that 0 means no break time */ 1506 } 1507 1508 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { 1509 if (!leases_conflict(fl, new_fl)) 1510 continue; 1511 if (want_write) { 1512 if (fl->fl_flags & FL_UNLOCK_PENDING) 1513 continue; 1514 fl->fl_flags |= FL_UNLOCK_PENDING; 1515 fl->fl_break_time = break_time; 1516 } else { 1517 if (lease_breaking(fl)) 1518 continue; 1519 fl->fl_flags |= FL_DOWNGRADE_PENDING; 1520 fl->fl_downgrade_time = break_time; 1521 } 1522 if (fl->fl_lmops->lm_break(fl)) 1523 locks_delete_lock_ctx(fl, &dispose); 1524 } 1525 1526 if (list_empty(&ctx->flc_lease)) 1527 goto out; 1528 1529 if (mode & O_NONBLOCK) { 1530 trace_break_lease_noblock(inode, new_fl); 1531 error = -EWOULDBLOCK; 1532 goto out; 1533 } 1534 1535 restart: 1536 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list); 1537 break_time = fl->fl_break_time; 1538 if (break_time != 0) 1539 break_time -= jiffies; 1540 if (break_time == 0) 1541 break_time++; 1542 locks_insert_block(fl, new_fl, leases_conflict); 1543 trace_break_lease_block(inode, new_fl); 1544 spin_unlock(&ctx->flc_lock); 1545 percpu_up_read(&file_rwsem); 1546 1547 locks_dispose_list(&dispose); 1548 error = wait_event_interruptible_timeout(new_fl->fl_wait, 1549 list_empty(&new_fl->fl_blocked_member), 1550 break_time); 1551 1552 percpu_down_read(&file_rwsem); 1553 spin_lock(&ctx->flc_lock); 1554 trace_break_lease_unblock(inode, new_fl); 1555 locks_delete_block(new_fl); 1556 if (error >= 0) { 1557 /* 1558 * Wait for the next conflicting lease that has not been 1559 * broken yet 1560 */ 1561 if (error == 0) 1562 time_out_leases(inode, &dispose); 1563 if (any_leases_conflict(inode, new_fl)) 1564 goto restart; 1565 error = 0; 1566 } 1567 out: 1568 spin_unlock(&ctx->flc_lock); 1569 percpu_up_read(&file_rwsem); 1570 locks_dispose_list(&dispose); 1571 free_lock: 1572 locks_free_lock(new_fl); 1573 return error; 1574 } 1575 EXPORT_SYMBOL(__break_lease); 1576 1577 /** 1578 * lease_get_mtime - update modified time of an inode with exclusive lease 1579 * @inode: the inode 1580 * @time: pointer to a timespec which contains the last modified time 1581 * 1582 * This is to force NFS clients to flush their caches for files with 1583 * exclusive leases. The justification is that if someone has an 1584 * exclusive lease, then they could be modifying it. 1585 */ 1586 void lease_get_mtime(struct inode *inode, struct timespec64 *time) 1587 { 1588 bool has_lease = false; 1589 struct file_lock_context *ctx; 1590 struct file_lock *fl; 1591 1592 ctx = locks_inode_context(inode); 1593 if (ctx && !list_empty_careful(&ctx->flc_lease)) { 1594 spin_lock(&ctx->flc_lock); 1595 fl = list_first_entry_or_null(&ctx->flc_lease, 1596 struct file_lock, fl_list); 1597 if (fl && (fl->fl_type == F_WRLCK)) 1598 has_lease = true; 1599 spin_unlock(&ctx->flc_lock); 1600 } 1601 1602 if (has_lease) 1603 *time = current_time(inode); 1604 } 1605 EXPORT_SYMBOL(lease_get_mtime); 1606 1607 /** 1608 * fcntl_getlease - Enquire what lease is currently active 1609 * @filp: the file 1610 * 1611 * The value returned by this function will be one of 1612 * (if no lease break is pending): 1613 * 1614 * %F_RDLCK to indicate a shared lease is held. 1615 * 1616 * %F_WRLCK to indicate an exclusive lease is held. 1617 * 1618 * %F_UNLCK to indicate no lease is held. 1619 * 1620 * (if a lease break is pending): 1621 * 1622 * %F_RDLCK to indicate an exclusive lease needs to be 1623 * changed to a shared lease (or removed). 1624 * 1625 * %F_UNLCK to indicate the lease needs to be removed. 1626 * 1627 * XXX: sfr & willy disagree over whether F_INPROGRESS 1628 * should be returned to userspace. 1629 */ 1630 int fcntl_getlease(struct file *filp) 1631 { 1632 struct file_lock *fl; 1633 struct inode *inode = file_inode(filp); 1634 struct file_lock_context *ctx; 1635 int type = F_UNLCK; 1636 LIST_HEAD(dispose); 1637 1638 ctx = locks_inode_context(inode); 1639 if (ctx && !list_empty_careful(&ctx->flc_lease)) { 1640 percpu_down_read(&file_rwsem); 1641 spin_lock(&ctx->flc_lock); 1642 time_out_leases(inode, &dispose); 1643 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1644 if (fl->fl_file != filp) 1645 continue; 1646 type = target_leasetype(fl); 1647 break; 1648 } 1649 spin_unlock(&ctx->flc_lock); 1650 percpu_up_read(&file_rwsem); 1651 1652 locks_dispose_list(&dispose); 1653 } 1654 return type; 1655 } 1656 1657 /** 1658 * check_conflicting_open - see if the given file points to an inode that has 1659 * an existing open that would conflict with the 1660 * desired lease. 1661 * @filp: file to check 1662 * @arg: type of lease that we're trying to acquire 1663 * @flags: current lock flags 1664 * 1665 * Check to see if there's an existing open fd on this file that would 1666 * conflict with the lease we're trying to set. 1667 */ 1668 static int 1669 check_conflicting_open(struct file *filp, const long arg, int flags) 1670 { 1671 struct inode *inode = file_inode(filp); 1672 int self_wcount = 0, self_rcount = 0; 1673 1674 if (flags & FL_LAYOUT) 1675 return 0; 1676 if (flags & FL_DELEG) 1677 /* We leave these checks to the caller */ 1678 return 0; 1679 1680 if (arg == F_RDLCK) 1681 return inode_is_open_for_write(inode) ? -EAGAIN : 0; 1682 else if (arg != F_WRLCK) 1683 return 0; 1684 1685 /* 1686 * Make sure that only read/write count is from lease requestor. 1687 * Note that this will result in denying write leases when i_writecount 1688 * is negative, which is what we want. (We shouldn't grant write leases 1689 * on files open for execution.) 1690 */ 1691 if (filp->f_mode & FMODE_WRITE) 1692 self_wcount = 1; 1693 else if (filp->f_mode & FMODE_READ) 1694 self_rcount = 1; 1695 1696 if (atomic_read(&inode->i_writecount) != self_wcount || 1697 atomic_read(&inode->i_readcount) != self_rcount) 1698 return -EAGAIN; 1699 1700 return 0; 1701 } 1702 1703 static int 1704 generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv) 1705 { 1706 struct file_lock *fl, *my_fl = NULL, *lease; 1707 struct inode *inode = file_inode(filp); 1708 struct file_lock_context *ctx; 1709 bool is_deleg = (*flp)->fl_flags & FL_DELEG; 1710 int error; 1711 LIST_HEAD(dispose); 1712 1713 lease = *flp; 1714 trace_generic_add_lease(inode, lease); 1715 1716 /* Note that arg is never F_UNLCK here */ 1717 ctx = locks_get_lock_context(inode, arg); 1718 if (!ctx) 1719 return -ENOMEM; 1720 1721 /* 1722 * In the delegation case we need mutual exclusion with 1723 * a number of operations that take the i_mutex. We trylock 1724 * because delegations are an optional optimization, and if 1725 * there's some chance of a conflict--we'd rather not 1726 * bother, maybe that's a sign this just isn't a good file to 1727 * hand out a delegation on. 1728 */ 1729 if (is_deleg && !inode_trylock(inode)) 1730 return -EAGAIN; 1731 1732 percpu_down_read(&file_rwsem); 1733 spin_lock(&ctx->flc_lock); 1734 time_out_leases(inode, &dispose); 1735 error = check_conflicting_open(filp, arg, lease->fl_flags); 1736 if (error) 1737 goto out; 1738 1739 /* 1740 * At this point, we know that if there is an exclusive 1741 * lease on this file, then we hold it on this filp 1742 * (otherwise our open of this file would have blocked). 1743 * And if we are trying to acquire an exclusive lease, 1744 * then the file is not open by anyone (including us) 1745 * except for this filp. 1746 */ 1747 error = -EAGAIN; 1748 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1749 if (fl->fl_file == filp && 1750 fl->fl_owner == lease->fl_owner) { 1751 my_fl = fl; 1752 continue; 1753 } 1754 1755 /* 1756 * No exclusive leases if someone else has a lease on 1757 * this file: 1758 */ 1759 if (arg == F_WRLCK) 1760 goto out; 1761 /* 1762 * Modifying our existing lease is OK, but no getting a 1763 * new lease if someone else is opening for write: 1764 */ 1765 if (fl->fl_flags & FL_UNLOCK_PENDING) 1766 goto out; 1767 } 1768 1769 if (my_fl != NULL) { 1770 lease = my_fl; 1771 error = lease->fl_lmops->lm_change(lease, arg, &dispose); 1772 if (error) 1773 goto out; 1774 goto out_setup; 1775 } 1776 1777 error = -EINVAL; 1778 if (!leases_enable) 1779 goto out; 1780 1781 locks_insert_lock_ctx(lease, &ctx->flc_lease); 1782 /* 1783 * The check in break_lease() is lockless. It's possible for another 1784 * open to race in after we did the earlier check for a conflicting 1785 * open but before the lease was inserted. Check again for a 1786 * conflicting open and cancel the lease if there is one. 1787 * 1788 * We also add a barrier here to ensure that the insertion of the lock 1789 * precedes these checks. 1790 */ 1791 smp_mb(); 1792 error = check_conflicting_open(filp, arg, lease->fl_flags); 1793 if (error) { 1794 locks_unlink_lock_ctx(lease); 1795 goto out; 1796 } 1797 1798 out_setup: 1799 if (lease->fl_lmops->lm_setup) 1800 lease->fl_lmops->lm_setup(lease, priv); 1801 out: 1802 spin_unlock(&ctx->flc_lock); 1803 percpu_up_read(&file_rwsem); 1804 locks_dispose_list(&dispose); 1805 if (is_deleg) 1806 inode_unlock(inode); 1807 if (!error && !my_fl) 1808 *flp = NULL; 1809 return error; 1810 } 1811 1812 static int generic_delete_lease(struct file *filp, void *owner) 1813 { 1814 int error = -EAGAIN; 1815 struct file_lock *fl, *victim = NULL; 1816 struct inode *inode = file_inode(filp); 1817 struct file_lock_context *ctx; 1818 LIST_HEAD(dispose); 1819 1820 ctx = locks_inode_context(inode); 1821 if (!ctx) { 1822 trace_generic_delete_lease(inode, NULL); 1823 return error; 1824 } 1825 1826 percpu_down_read(&file_rwsem); 1827 spin_lock(&ctx->flc_lock); 1828 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1829 if (fl->fl_file == filp && 1830 fl->fl_owner == owner) { 1831 victim = fl; 1832 break; 1833 } 1834 } 1835 trace_generic_delete_lease(inode, victim); 1836 if (victim) 1837 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); 1838 spin_unlock(&ctx->flc_lock); 1839 percpu_up_read(&file_rwsem); 1840 locks_dispose_list(&dispose); 1841 return error; 1842 } 1843 1844 /** 1845 * generic_setlease - sets a lease on an open file 1846 * @filp: file pointer 1847 * @arg: type of lease to obtain 1848 * @flp: input - file_lock to use, output - file_lock inserted 1849 * @priv: private data for lm_setup (may be NULL if lm_setup 1850 * doesn't require it) 1851 * 1852 * The (input) flp->fl_lmops->lm_break function is required 1853 * by break_lease(). 1854 */ 1855 int generic_setlease(struct file *filp, long arg, struct file_lock **flp, 1856 void **priv) 1857 { 1858 struct inode *inode = file_inode(filp); 1859 vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(filp), inode); 1860 int error; 1861 1862 if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE)) 1863 return -EACCES; 1864 if (!S_ISREG(inode->i_mode)) 1865 return -EINVAL; 1866 error = security_file_lock(filp, arg); 1867 if (error) 1868 return error; 1869 1870 switch (arg) { 1871 case F_UNLCK: 1872 return generic_delete_lease(filp, *priv); 1873 case F_RDLCK: 1874 case F_WRLCK: 1875 if (!(*flp)->fl_lmops->lm_break) { 1876 WARN_ON_ONCE(1); 1877 return -ENOLCK; 1878 } 1879 1880 return generic_add_lease(filp, arg, flp, priv); 1881 default: 1882 return -EINVAL; 1883 } 1884 } 1885 EXPORT_SYMBOL(generic_setlease); 1886 1887 /* 1888 * Kernel subsystems can register to be notified on any attempt to set 1889 * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd 1890 * to close files that it may have cached when there is an attempt to set a 1891 * conflicting lease. 1892 */ 1893 static struct srcu_notifier_head lease_notifier_chain; 1894 1895 static inline void 1896 lease_notifier_chain_init(void) 1897 { 1898 srcu_init_notifier_head(&lease_notifier_chain); 1899 } 1900 1901 static inline void 1902 setlease_notifier(long arg, struct file_lock *lease) 1903 { 1904 if (arg != F_UNLCK) 1905 srcu_notifier_call_chain(&lease_notifier_chain, arg, lease); 1906 } 1907 1908 int lease_register_notifier(struct notifier_block *nb) 1909 { 1910 return srcu_notifier_chain_register(&lease_notifier_chain, nb); 1911 } 1912 EXPORT_SYMBOL_GPL(lease_register_notifier); 1913 1914 void lease_unregister_notifier(struct notifier_block *nb) 1915 { 1916 srcu_notifier_chain_unregister(&lease_notifier_chain, nb); 1917 } 1918 EXPORT_SYMBOL_GPL(lease_unregister_notifier); 1919 1920 /** 1921 * vfs_setlease - sets a lease on an open file 1922 * @filp: file pointer 1923 * @arg: type of lease to obtain 1924 * @lease: file_lock to use when adding a lease 1925 * @priv: private info for lm_setup when adding a lease (may be 1926 * NULL if lm_setup doesn't require it) 1927 * 1928 * Call this to establish a lease on the file. The "lease" argument is not 1929 * used for F_UNLCK requests and may be NULL. For commands that set or alter 1930 * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be 1931 * set; if not, this function will return -ENOLCK (and generate a scary-looking 1932 * stack trace). 1933 * 1934 * The "priv" pointer is passed directly to the lm_setup function as-is. It 1935 * may be NULL if the lm_setup operation doesn't require it. 1936 */ 1937 int 1938 vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv) 1939 { 1940 if (lease) 1941 setlease_notifier(arg, *lease); 1942 if (filp->f_op->setlease) 1943 return filp->f_op->setlease(filp, arg, lease, priv); 1944 else 1945 return generic_setlease(filp, arg, lease, priv); 1946 } 1947 EXPORT_SYMBOL_GPL(vfs_setlease); 1948 1949 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) 1950 { 1951 struct file_lock *fl; 1952 struct fasync_struct *new; 1953 int error; 1954 1955 fl = lease_alloc(filp, arg); 1956 if (IS_ERR(fl)) 1957 return PTR_ERR(fl); 1958 1959 new = fasync_alloc(); 1960 if (!new) { 1961 locks_free_lock(fl); 1962 return -ENOMEM; 1963 } 1964 new->fa_fd = fd; 1965 1966 error = vfs_setlease(filp, arg, &fl, (void **)&new); 1967 if (fl) 1968 locks_free_lock(fl); 1969 if (new) 1970 fasync_free(new); 1971 return error; 1972 } 1973 1974 /** 1975 * fcntl_setlease - sets a lease on an open file 1976 * @fd: open file descriptor 1977 * @filp: file pointer 1978 * @arg: type of lease to obtain 1979 * 1980 * Call this fcntl to establish a lease on the file. 1981 * Note that you also need to call %F_SETSIG to 1982 * receive a signal when the lease is broken. 1983 */ 1984 int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1985 { 1986 if (arg == F_UNLCK) 1987 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp); 1988 return do_fcntl_add_lease(fd, filp, arg); 1989 } 1990 1991 /** 1992 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file 1993 * @inode: inode of the file to apply to 1994 * @fl: The lock to be applied 1995 * 1996 * Apply a FLOCK style lock request to an inode. 1997 */ 1998 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl) 1999 { 2000 int error; 2001 might_sleep(); 2002 for (;;) { 2003 error = flock_lock_inode(inode, fl); 2004 if (error != FILE_LOCK_DEFERRED) 2005 break; 2006 error = wait_event_interruptible(fl->fl_wait, 2007 list_empty(&fl->fl_blocked_member)); 2008 if (error) 2009 break; 2010 } 2011 locks_delete_block(fl); 2012 return error; 2013 } 2014 2015 /** 2016 * locks_lock_inode_wait - Apply a lock to an inode 2017 * @inode: inode of the file to apply to 2018 * @fl: The lock to be applied 2019 * 2020 * Apply a POSIX or FLOCK style lock request to an inode. 2021 */ 2022 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) 2023 { 2024 int res = 0; 2025 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 2026 case FL_POSIX: 2027 res = posix_lock_inode_wait(inode, fl); 2028 break; 2029 case FL_FLOCK: 2030 res = flock_lock_inode_wait(inode, fl); 2031 break; 2032 default: 2033 BUG(); 2034 } 2035 return res; 2036 } 2037 EXPORT_SYMBOL(locks_lock_inode_wait); 2038 2039 /** 2040 * sys_flock: - flock() system call. 2041 * @fd: the file descriptor to lock. 2042 * @cmd: the type of lock to apply. 2043 * 2044 * Apply a %FL_FLOCK style lock to an open file descriptor. 2045 * The @cmd can be one of: 2046 * 2047 * - %LOCK_SH -- a shared lock. 2048 * - %LOCK_EX -- an exclusive lock. 2049 * - %LOCK_UN -- remove an existing lock. 2050 * - %LOCK_MAND -- a 'mandatory' flock. (DEPRECATED) 2051 * 2052 * %LOCK_MAND support has been removed from the kernel. 2053 */ 2054 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) 2055 { 2056 int can_sleep, error, type; 2057 struct file_lock fl; 2058 struct fd f; 2059 2060 /* 2061 * LOCK_MAND locks were broken for a long time in that they never 2062 * conflicted with one another and didn't prevent any sort of open, 2063 * read or write activity. 2064 * 2065 * Just ignore these requests now, to preserve legacy behavior, but 2066 * throw a warning to let people know that they don't actually work. 2067 */ 2068 if (cmd & LOCK_MAND) { 2069 pr_warn_once("%s(%d): Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n", current->comm, current->pid); 2070 return 0; 2071 } 2072 2073 type = flock_translate_cmd(cmd & ~LOCK_NB); 2074 if (type < 0) 2075 return type; 2076 2077 error = -EBADF; 2078 f = fdget(fd); 2079 if (!f.file) 2080 return error; 2081 2082 if (type != F_UNLCK && !(f.file->f_mode & (FMODE_READ | FMODE_WRITE))) 2083 goto out_putf; 2084 2085 flock_make_lock(f.file, &fl, type); 2086 2087 error = security_file_lock(f.file, fl.fl_type); 2088 if (error) 2089 goto out_putf; 2090 2091 can_sleep = !(cmd & LOCK_NB); 2092 if (can_sleep) 2093 fl.fl_flags |= FL_SLEEP; 2094 2095 if (f.file->f_op->flock) 2096 error = f.file->f_op->flock(f.file, 2097 (can_sleep) ? F_SETLKW : F_SETLK, 2098 &fl); 2099 else 2100 error = locks_lock_file_wait(f.file, &fl); 2101 2102 locks_release_private(&fl); 2103 out_putf: 2104 fdput(f); 2105 2106 return error; 2107 } 2108 2109 /** 2110 * vfs_test_lock - test file byte range lock 2111 * @filp: The file to test lock for 2112 * @fl: The lock to test; also used to hold result 2113 * 2114 * Returns -ERRNO on failure. Indicates presence of conflicting lock by 2115 * setting conf->fl_type to something other than F_UNLCK. 2116 */ 2117 int vfs_test_lock(struct file *filp, struct file_lock *fl) 2118 { 2119 WARN_ON_ONCE(filp != fl->fl_file); 2120 if (filp->f_op->lock) 2121 return filp->f_op->lock(filp, F_GETLK, fl); 2122 posix_test_lock(filp, fl); 2123 return 0; 2124 } 2125 EXPORT_SYMBOL_GPL(vfs_test_lock); 2126 2127 /** 2128 * locks_translate_pid - translate a file_lock's fl_pid number into a namespace 2129 * @fl: The file_lock who's fl_pid should be translated 2130 * @ns: The namespace into which the pid should be translated 2131 * 2132 * Used to tranlate a fl_pid into a namespace virtual pid number 2133 */ 2134 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns) 2135 { 2136 pid_t vnr; 2137 struct pid *pid; 2138 2139 if (IS_OFDLCK(fl)) 2140 return -1; 2141 if (IS_REMOTELCK(fl)) 2142 return fl->fl_pid; 2143 /* 2144 * If the flock owner process is dead and its pid has been already 2145 * freed, the translation below won't work, but we still want to show 2146 * flock owner pid number in init pidns. 2147 */ 2148 if (ns == &init_pid_ns) 2149 return (pid_t)fl->fl_pid; 2150 2151 rcu_read_lock(); 2152 pid = find_pid_ns(fl->fl_pid, &init_pid_ns); 2153 vnr = pid_nr_ns(pid, ns); 2154 rcu_read_unlock(); 2155 return vnr; 2156 } 2157 2158 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) 2159 { 2160 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current)); 2161 #if BITS_PER_LONG == 32 2162 /* 2163 * Make sure we can represent the posix lock via 2164 * legacy 32bit flock. 2165 */ 2166 if (fl->fl_start > OFFT_OFFSET_MAX) 2167 return -EOVERFLOW; 2168 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX) 2169 return -EOVERFLOW; 2170 #endif 2171 flock->l_start = fl->fl_start; 2172 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 2173 fl->fl_end - fl->fl_start + 1; 2174 flock->l_whence = 0; 2175 flock->l_type = fl->fl_type; 2176 return 0; 2177 } 2178 2179 #if BITS_PER_LONG == 32 2180 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) 2181 { 2182 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current)); 2183 flock->l_start = fl->fl_start; 2184 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 2185 fl->fl_end - fl->fl_start + 1; 2186 flock->l_whence = 0; 2187 flock->l_type = fl->fl_type; 2188 } 2189 #endif 2190 2191 /* Report the first existing lock that would conflict with l. 2192 * This implements the F_GETLK command of fcntl(). 2193 */ 2194 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock) 2195 { 2196 struct file_lock *fl; 2197 int error; 2198 2199 fl = locks_alloc_lock(); 2200 if (fl == NULL) 2201 return -ENOMEM; 2202 error = -EINVAL; 2203 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK) 2204 goto out; 2205 2206 error = flock_to_posix_lock(filp, fl, flock); 2207 if (error) 2208 goto out; 2209 2210 if (cmd == F_OFD_GETLK) { 2211 error = -EINVAL; 2212 if (flock->l_pid != 0) 2213 goto out; 2214 2215 fl->fl_flags |= FL_OFDLCK; 2216 fl->fl_owner = filp; 2217 } 2218 2219 error = vfs_test_lock(filp, fl); 2220 if (error) 2221 goto out; 2222 2223 flock->l_type = fl->fl_type; 2224 if (fl->fl_type != F_UNLCK) { 2225 error = posix_lock_to_flock(flock, fl); 2226 if (error) 2227 goto out; 2228 } 2229 out: 2230 locks_free_lock(fl); 2231 return error; 2232 } 2233 2234 /** 2235 * vfs_lock_file - file byte range lock 2236 * @filp: The file to apply the lock to 2237 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.) 2238 * @fl: The lock to be applied 2239 * @conf: Place to return a copy of the conflicting lock, if found. 2240 * 2241 * A caller that doesn't care about the conflicting lock may pass NULL 2242 * as the final argument. 2243 * 2244 * If the filesystem defines a private ->lock() method, then @conf will 2245 * be left unchanged; so a caller that cares should initialize it to 2246 * some acceptable default. 2247 * 2248 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX 2249 * locks, the ->lock() interface may return asynchronously, before the lock has 2250 * been granted or denied by the underlying filesystem, if (and only if) 2251 * lm_grant is set. Callers expecting ->lock() to return asynchronously 2252 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if) 2253 * the request is for a blocking lock. When ->lock() does return asynchronously, 2254 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock 2255 * request completes. 2256 * If the request is for non-blocking lock the file system should return 2257 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine 2258 * with the result. If the request timed out the callback routine will return a 2259 * nonzero return code and the file system should release the lock. The file 2260 * system is also responsible to keep a corresponding posix lock when it 2261 * grants a lock so the VFS can find out which locks are locally held and do 2262 * the correct lock cleanup when required. 2263 * The underlying filesystem must not drop the kernel lock or call 2264 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED 2265 * return code. 2266 */ 2267 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) 2268 { 2269 WARN_ON_ONCE(filp != fl->fl_file); 2270 if (filp->f_op->lock) 2271 return filp->f_op->lock(filp, cmd, fl); 2272 else 2273 return posix_lock_file(filp, fl, conf); 2274 } 2275 EXPORT_SYMBOL_GPL(vfs_lock_file); 2276 2277 static int do_lock_file_wait(struct file *filp, unsigned int cmd, 2278 struct file_lock *fl) 2279 { 2280 int error; 2281 2282 error = security_file_lock(filp, fl->fl_type); 2283 if (error) 2284 return error; 2285 2286 for (;;) { 2287 error = vfs_lock_file(filp, cmd, fl, NULL); 2288 if (error != FILE_LOCK_DEFERRED) 2289 break; 2290 error = wait_event_interruptible(fl->fl_wait, 2291 list_empty(&fl->fl_blocked_member)); 2292 if (error) 2293 break; 2294 } 2295 locks_delete_block(fl); 2296 2297 return error; 2298 } 2299 2300 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */ 2301 static int 2302 check_fmode_for_setlk(struct file_lock *fl) 2303 { 2304 switch (fl->fl_type) { 2305 case F_RDLCK: 2306 if (!(fl->fl_file->f_mode & FMODE_READ)) 2307 return -EBADF; 2308 break; 2309 case F_WRLCK: 2310 if (!(fl->fl_file->f_mode & FMODE_WRITE)) 2311 return -EBADF; 2312 } 2313 return 0; 2314 } 2315 2316 /* Apply the lock described by l to an open file descriptor. 2317 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 2318 */ 2319 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, 2320 struct flock *flock) 2321 { 2322 struct file_lock *file_lock = locks_alloc_lock(); 2323 struct inode *inode = file_inode(filp); 2324 struct file *f; 2325 int error; 2326 2327 if (file_lock == NULL) 2328 return -ENOLCK; 2329 2330 error = flock_to_posix_lock(filp, file_lock, flock); 2331 if (error) 2332 goto out; 2333 2334 error = check_fmode_for_setlk(file_lock); 2335 if (error) 2336 goto out; 2337 2338 /* 2339 * If the cmd is requesting file-private locks, then set the 2340 * FL_OFDLCK flag and override the owner. 2341 */ 2342 switch (cmd) { 2343 case F_OFD_SETLK: 2344 error = -EINVAL; 2345 if (flock->l_pid != 0) 2346 goto out; 2347 2348 cmd = F_SETLK; 2349 file_lock->fl_flags |= FL_OFDLCK; 2350 file_lock->fl_owner = filp; 2351 break; 2352 case F_OFD_SETLKW: 2353 error = -EINVAL; 2354 if (flock->l_pid != 0) 2355 goto out; 2356 2357 cmd = F_SETLKW; 2358 file_lock->fl_flags |= FL_OFDLCK; 2359 file_lock->fl_owner = filp; 2360 fallthrough; 2361 case F_SETLKW: 2362 file_lock->fl_flags |= FL_SLEEP; 2363 } 2364 2365 error = do_lock_file_wait(filp, cmd, file_lock); 2366 2367 /* 2368 * Attempt to detect a close/fcntl race and recover by releasing the 2369 * lock that was just acquired. There is no need to do that when we're 2370 * unlocking though, or for OFD locks. 2371 */ 2372 if (!error && file_lock->fl_type != F_UNLCK && 2373 !(file_lock->fl_flags & FL_OFDLCK)) { 2374 struct files_struct *files = current->files; 2375 /* 2376 * We need that spin_lock here - it prevents reordering between 2377 * update of i_flctx->flc_posix and check for it done in 2378 * close(). rcu_read_lock() wouldn't do. 2379 */ 2380 spin_lock(&files->file_lock); 2381 f = files_lookup_fd_locked(files, fd); 2382 spin_unlock(&files->file_lock); 2383 if (f != filp) { 2384 file_lock->fl_type = F_UNLCK; 2385 error = do_lock_file_wait(filp, cmd, file_lock); 2386 WARN_ON_ONCE(error); 2387 error = -EBADF; 2388 } 2389 } 2390 out: 2391 trace_fcntl_setlk(inode, file_lock, error); 2392 locks_free_lock(file_lock); 2393 return error; 2394 } 2395 2396 #if BITS_PER_LONG == 32 2397 /* Report the first existing lock that would conflict with l. 2398 * This implements the F_GETLK command of fcntl(). 2399 */ 2400 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock) 2401 { 2402 struct file_lock *fl; 2403 int error; 2404 2405 fl = locks_alloc_lock(); 2406 if (fl == NULL) 2407 return -ENOMEM; 2408 2409 error = -EINVAL; 2410 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK) 2411 goto out; 2412 2413 error = flock64_to_posix_lock(filp, fl, flock); 2414 if (error) 2415 goto out; 2416 2417 if (cmd == F_OFD_GETLK) { 2418 error = -EINVAL; 2419 if (flock->l_pid != 0) 2420 goto out; 2421 2422 fl->fl_flags |= FL_OFDLCK; 2423 fl->fl_owner = filp; 2424 } 2425 2426 error = vfs_test_lock(filp, fl); 2427 if (error) 2428 goto out; 2429 2430 flock->l_type = fl->fl_type; 2431 if (fl->fl_type != F_UNLCK) 2432 posix_lock_to_flock64(flock, fl); 2433 2434 out: 2435 locks_free_lock(fl); 2436 return error; 2437 } 2438 2439 /* Apply the lock described by l to an open file descriptor. 2440 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 2441 */ 2442 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, 2443 struct flock64 *flock) 2444 { 2445 struct file_lock *file_lock = locks_alloc_lock(); 2446 struct file *f; 2447 int error; 2448 2449 if (file_lock == NULL) 2450 return -ENOLCK; 2451 2452 error = flock64_to_posix_lock(filp, file_lock, flock); 2453 if (error) 2454 goto out; 2455 2456 error = check_fmode_for_setlk(file_lock); 2457 if (error) 2458 goto out; 2459 2460 /* 2461 * If the cmd is requesting file-private locks, then set the 2462 * FL_OFDLCK flag and override the owner. 2463 */ 2464 switch (cmd) { 2465 case F_OFD_SETLK: 2466 error = -EINVAL; 2467 if (flock->l_pid != 0) 2468 goto out; 2469 2470 cmd = F_SETLK64; 2471 file_lock->fl_flags |= FL_OFDLCK; 2472 file_lock->fl_owner = filp; 2473 break; 2474 case F_OFD_SETLKW: 2475 error = -EINVAL; 2476 if (flock->l_pid != 0) 2477 goto out; 2478 2479 cmd = F_SETLKW64; 2480 file_lock->fl_flags |= FL_OFDLCK; 2481 file_lock->fl_owner = filp; 2482 fallthrough; 2483 case F_SETLKW64: 2484 file_lock->fl_flags |= FL_SLEEP; 2485 } 2486 2487 error = do_lock_file_wait(filp, cmd, file_lock); 2488 2489 /* 2490 * Attempt to detect a close/fcntl race and recover by releasing the 2491 * lock that was just acquired. There is no need to do that when we're 2492 * unlocking though, or for OFD locks. 2493 */ 2494 if (!error && file_lock->fl_type != F_UNLCK && 2495 !(file_lock->fl_flags & FL_OFDLCK)) { 2496 struct files_struct *files = current->files; 2497 /* 2498 * We need that spin_lock here - it prevents reordering between 2499 * update of i_flctx->flc_posix and check for it done in 2500 * close(). rcu_read_lock() wouldn't do. 2501 */ 2502 spin_lock(&files->file_lock); 2503 f = files_lookup_fd_locked(files, fd); 2504 spin_unlock(&files->file_lock); 2505 if (f != filp) { 2506 file_lock->fl_type = F_UNLCK; 2507 error = do_lock_file_wait(filp, cmd, file_lock); 2508 WARN_ON_ONCE(error); 2509 error = -EBADF; 2510 } 2511 } 2512 out: 2513 locks_free_lock(file_lock); 2514 return error; 2515 } 2516 #endif /* BITS_PER_LONG == 32 */ 2517 2518 /* 2519 * This function is called when the file is being removed 2520 * from the task's fd array. POSIX locks belonging to this task 2521 * are deleted at this time. 2522 */ 2523 void locks_remove_posix(struct file *filp, fl_owner_t owner) 2524 { 2525 int error; 2526 struct inode *inode = file_inode(filp); 2527 struct file_lock lock; 2528 struct file_lock_context *ctx; 2529 2530 /* 2531 * If there are no locks held on this file, we don't need to call 2532 * posix_lock_file(). Another process could be setting a lock on this 2533 * file at the same time, but we wouldn't remove that lock anyway. 2534 */ 2535 ctx = locks_inode_context(inode); 2536 if (!ctx || list_empty(&ctx->flc_posix)) 2537 return; 2538 2539 locks_init_lock(&lock); 2540 lock.fl_type = F_UNLCK; 2541 lock.fl_flags = FL_POSIX | FL_CLOSE; 2542 lock.fl_start = 0; 2543 lock.fl_end = OFFSET_MAX; 2544 lock.fl_owner = owner; 2545 lock.fl_pid = current->tgid; 2546 lock.fl_file = filp; 2547 lock.fl_ops = NULL; 2548 lock.fl_lmops = NULL; 2549 2550 error = vfs_lock_file(filp, F_SETLK, &lock, NULL); 2551 2552 if (lock.fl_ops && lock.fl_ops->fl_release_private) 2553 lock.fl_ops->fl_release_private(&lock); 2554 trace_locks_remove_posix(inode, &lock, error); 2555 } 2556 EXPORT_SYMBOL(locks_remove_posix); 2557 2558 /* The i_flctx must be valid when calling into here */ 2559 static void 2560 locks_remove_flock(struct file *filp, struct file_lock_context *flctx) 2561 { 2562 struct file_lock fl; 2563 struct inode *inode = file_inode(filp); 2564 2565 if (list_empty(&flctx->flc_flock)) 2566 return; 2567 2568 flock_make_lock(filp, &fl, F_UNLCK); 2569 fl.fl_flags |= FL_CLOSE; 2570 2571 if (filp->f_op->flock) 2572 filp->f_op->flock(filp, F_SETLKW, &fl); 2573 else 2574 flock_lock_inode(inode, &fl); 2575 2576 if (fl.fl_ops && fl.fl_ops->fl_release_private) 2577 fl.fl_ops->fl_release_private(&fl); 2578 } 2579 2580 /* The i_flctx must be valid when calling into here */ 2581 static void 2582 locks_remove_lease(struct file *filp, struct file_lock_context *ctx) 2583 { 2584 struct file_lock *fl, *tmp; 2585 LIST_HEAD(dispose); 2586 2587 if (list_empty(&ctx->flc_lease)) 2588 return; 2589 2590 percpu_down_read(&file_rwsem); 2591 spin_lock(&ctx->flc_lock); 2592 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) 2593 if (filp == fl->fl_file) 2594 lease_modify(fl, F_UNLCK, &dispose); 2595 spin_unlock(&ctx->flc_lock); 2596 percpu_up_read(&file_rwsem); 2597 2598 locks_dispose_list(&dispose); 2599 } 2600 2601 /* 2602 * This function is called on the last close of an open file. 2603 */ 2604 void locks_remove_file(struct file *filp) 2605 { 2606 struct file_lock_context *ctx; 2607 2608 ctx = locks_inode_context(file_inode(filp)); 2609 if (!ctx) 2610 return; 2611 2612 /* remove any OFD locks */ 2613 locks_remove_posix(filp, filp); 2614 2615 /* remove flock locks */ 2616 locks_remove_flock(filp, ctx); 2617 2618 /* remove any leases */ 2619 locks_remove_lease(filp, ctx); 2620 2621 spin_lock(&ctx->flc_lock); 2622 locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX"); 2623 locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK"); 2624 locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE"); 2625 spin_unlock(&ctx->flc_lock); 2626 } 2627 2628 /** 2629 * vfs_cancel_lock - file byte range unblock lock 2630 * @filp: The file to apply the unblock to 2631 * @fl: The lock to be unblocked 2632 * 2633 * Used by lock managers to cancel blocked requests 2634 */ 2635 int vfs_cancel_lock(struct file *filp, struct file_lock *fl) 2636 { 2637 WARN_ON_ONCE(filp != fl->fl_file); 2638 if (filp->f_op->lock) 2639 return filp->f_op->lock(filp, F_CANCELLK, fl); 2640 return 0; 2641 } 2642 EXPORT_SYMBOL_GPL(vfs_cancel_lock); 2643 2644 /** 2645 * vfs_inode_has_locks - are any file locks held on @inode? 2646 * @inode: inode to check for locks 2647 * 2648 * Return true if there are any FL_POSIX or FL_FLOCK locks currently 2649 * set on @inode. 2650 */ 2651 bool vfs_inode_has_locks(struct inode *inode) 2652 { 2653 struct file_lock_context *ctx; 2654 bool ret; 2655 2656 ctx = locks_inode_context(inode); 2657 if (!ctx) 2658 return false; 2659 2660 spin_lock(&ctx->flc_lock); 2661 ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock); 2662 spin_unlock(&ctx->flc_lock); 2663 return ret; 2664 } 2665 EXPORT_SYMBOL_GPL(vfs_inode_has_locks); 2666 2667 #ifdef CONFIG_PROC_FS 2668 #include <linux/proc_fs.h> 2669 #include <linux/seq_file.h> 2670 2671 struct locks_iterator { 2672 int li_cpu; 2673 loff_t li_pos; 2674 }; 2675 2676 static void lock_get_status(struct seq_file *f, struct file_lock *fl, 2677 loff_t id, char *pfx, int repeat) 2678 { 2679 struct inode *inode = NULL; 2680 unsigned int fl_pid; 2681 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb); 2682 int type; 2683 2684 fl_pid = locks_translate_pid(fl, proc_pidns); 2685 /* 2686 * If lock owner is dead (and pid is freed) or not visible in current 2687 * pidns, zero is shown as a pid value. Check lock info from 2688 * init_pid_ns to get saved lock pid value. 2689 */ 2690 2691 if (fl->fl_file != NULL) 2692 inode = file_inode(fl->fl_file); 2693 2694 seq_printf(f, "%lld: ", id); 2695 2696 if (repeat) 2697 seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx); 2698 2699 if (IS_POSIX(fl)) { 2700 if (fl->fl_flags & FL_ACCESS) 2701 seq_puts(f, "ACCESS"); 2702 else if (IS_OFDLCK(fl)) 2703 seq_puts(f, "OFDLCK"); 2704 else 2705 seq_puts(f, "POSIX "); 2706 2707 seq_printf(f, " %s ", 2708 (inode == NULL) ? "*NOINODE*" : "ADVISORY "); 2709 } else if (IS_FLOCK(fl)) { 2710 seq_puts(f, "FLOCK ADVISORY "); 2711 } else if (IS_LEASE(fl)) { 2712 if (fl->fl_flags & FL_DELEG) 2713 seq_puts(f, "DELEG "); 2714 else 2715 seq_puts(f, "LEASE "); 2716 2717 if (lease_breaking(fl)) 2718 seq_puts(f, "BREAKING "); 2719 else if (fl->fl_file) 2720 seq_puts(f, "ACTIVE "); 2721 else 2722 seq_puts(f, "BREAKER "); 2723 } else { 2724 seq_puts(f, "UNKNOWN UNKNOWN "); 2725 } 2726 type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type; 2727 2728 seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" : 2729 (type == F_RDLCK) ? "READ" : "UNLCK"); 2730 if (inode) { 2731 /* userspace relies on this representation of dev_t */ 2732 seq_printf(f, "%d %02x:%02x:%lu ", fl_pid, 2733 MAJOR(inode->i_sb->s_dev), 2734 MINOR(inode->i_sb->s_dev), inode->i_ino); 2735 } else { 2736 seq_printf(f, "%d <none>:0 ", fl_pid); 2737 } 2738 if (IS_POSIX(fl)) { 2739 if (fl->fl_end == OFFSET_MAX) 2740 seq_printf(f, "%Ld EOF\n", fl->fl_start); 2741 else 2742 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end); 2743 } else { 2744 seq_puts(f, "0 EOF\n"); 2745 } 2746 } 2747 2748 static struct file_lock *get_next_blocked_member(struct file_lock *node) 2749 { 2750 struct file_lock *tmp; 2751 2752 /* NULL node or root node */ 2753 if (node == NULL || node->fl_blocker == NULL) 2754 return NULL; 2755 2756 /* Next member in the linked list could be itself */ 2757 tmp = list_next_entry(node, fl_blocked_member); 2758 if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member) 2759 || tmp == node) { 2760 return NULL; 2761 } 2762 2763 return tmp; 2764 } 2765 2766 static int locks_show(struct seq_file *f, void *v) 2767 { 2768 struct locks_iterator *iter = f->private; 2769 struct file_lock *cur, *tmp; 2770 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb); 2771 int level = 0; 2772 2773 cur = hlist_entry(v, struct file_lock, fl_link); 2774 2775 if (locks_translate_pid(cur, proc_pidns) == 0) 2776 return 0; 2777 2778 /* View this crossed linked list as a binary tree, the first member of fl_blocked_requests 2779 * is the left child of current node, the next silibing in fl_blocked_member is the 2780 * right child, we can alse get the parent of current node from fl_blocker, so this 2781 * question becomes traversal of a binary tree 2782 */ 2783 while (cur != NULL) { 2784 if (level) 2785 lock_get_status(f, cur, iter->li_pos, "-> ", level); 2786 else 2787 lock_get_status(f, cur, iter->li_pos, "", level); 2788 2789 if (!list_empty(&cur->fl_blocked_requests)) { 2790 /* Turn left */ 2791 cur = list_first_entry_or_null(&cur->fl_blocked_requests, 2792 struct file_lock, fl_blocked_member); 2793 level++; 2794 } else { 2795 /* Turn right */ 2796 tmp = get_next_blocked_member(cur); 2797 /* Fall back to parent node */ 2798 while (tmp == NULL && cur->fl_blocker != NULL) { 2799 cur = cur->fl_blocker; 2800 level--; 2801 tmp = get_next_blocked_member(cur); 2802 } 2803 cur = tmp; 2804 } 2805 } 2806 2807 return 0; 2808 } 2809 2810 static void __show_fd_locks(struct seq_file *f, 2811 struct list_head *head, int *id, 2812 struct file *filp, struct files_struct *files) 2813 { 2814 struct file_lock *fl; 2815 2816 list_for_each_entry(fl, head, fl_list) { 2817 2818 if (filp != fl->fl_file) 2819 continue; 2820 if (fl->fl_owner != files && 2821 fl->fl_owner != filp) 2822 continue; 2823 2824 (*id)++; 2825 seq_puts(f, "lock:\t"); 2826 lock_get_status(f, fl, *id, "", 0); 2827 } 2828 } 2829 2830 void show_fd_locks(struct seq_file *f, 2831 struct file *filp, struct files_struct *files) 2832 { 2833 struct inode *inode = file_inode(filp); 2834 struct file_lock_context *ctx; 2835 int id = 0; 2836 2837 ctx = locks_inode_context(inode); 2838 if (!ctx) 2839 return; 2840 2841 spin_lock(&ctx->flc_lock); 2842 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files); 2843 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files); 2844 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files); 2845 spin_unlock(&ctx->flc_lock); 2846 } 2847 2848 static void *locks_start(struct seq_file *f, loff_t *pos) 2849 __acquires(&blocked_lock_lock) 2850 { 2851 struct locks_iterator *iter = f->private; 2852 2853 iter->li_pos = *pos + 1; 2854 percpu_down_write(&file_rwsem); 2855 spin_lock(&blocked_lock_lock); 2856 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos); 2857 } 2858 2859 static void *locks_next(struct seq_file *f, void *v, loff_t *pos) 2860 { 2861 struct locks_iterator *iter = f->private; 2862 2863 ++iter->li_pos; 2864 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos); 2865 } 2866 2867 static void locks_stop(struct seq_file *f, void *v) 2868 __releases(&blocked_lock_lock) 2869 { 2870 spin_unlock(&blocked_lock_lock); 2871 percpu_up_write(&file_rwsem); 2872 } 2873 2874 static const struct seq_operations locks_seq_operations = { 2875 .start = locks_start, 2876 .next = locks_next, 2877 .stop = locks_stop, 2878 .show = locks_show, 2879 }; 2880 2881 static int __init proc_locks_init(void) 2882 { 2883 proc_create_seq_private("locks", 0, NULL, &locks_seq_operations, 2884 sizeof(struct locks_iterator), NULL); 2885 return 0; 2886 } 2887 fs_initcall(proc_locks_init); 2888 #endif 2889 2890 static int __init filelock_init(void) 2891 { 2892 int i; 2893 2894 flctx_cache = kmem_cache_create("file_lock_ctx", 2895 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL); 2896 2897 filelock_cache = kmem_cache_create("file_lock_cache", 2898 sizeof(struct file_lock), 0, SLAB_PANIC, NULL); 2899 2900 for_each_possible_cpu(i) { 2901 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i); 2902 2903 spin_lock_init(&fll->lock); 2904 INIT_HLIST_HEAD(&fll->hlist); 2905 } 2906 2907 lease_notifier_chain_init(); 2908 return 0; 2909 } 2910 core_initcall(filelock_init); 2911