xref: /openbmc/linux/fs/locks.c (revision cc19db8b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/locks.c
4  *
5  * We implement four types of file locks: BSD locks, posix locks, open
6  * file description locks, and leases.  For details about BSD locks,
7  * see the flock(2) man page; for details about the other three, see
8  * fcntl(2).
9  *
10  *
11  * Locking conflicts and dependencies:
12  * If multiple threads attempt to lock the same byte (or flock the same file)
13  * only one can be granted the lock, and other must wait their turn.
14  * The first lock has been "applied" or "granted", the others are "waiting"
15  * and are "blocked" by the "applied" lock..
16  *
17  * Waiting and applied locks are all kept in trees whose properties are:
18  *
19  *	- the root of a tree may be an applied or waiting lock.
20  *	- every other node in the tree is a waiting lock that
21  *	  conflicts with every ancestor of that node.
22  *
23  * Every such tree begins life as a waiting singleton which obviously
24  * satisfies the above properties.
25  *
26  * The only ways we modify trees preserve these properties:
27  *
28  *	1. We may add a new leaf node, but only after first verifying that it
29  *	   conflicts with all of its ancestors.
30  *	2. We may remove the root of a tree, creating a new singleton
31  *	   tree from the root and N new trees rooted in the immediate
32  *	   children.
33  *	3. If the root of a tree is not currently an applied lock, we may
34  *	   apply it (if possible).
35  *	4. We may upgrade the root of the tree (either extend its range,
36  *	   or upgrade its entire range from read to write).
37  *
38  * When an applied lock is modified in a way that reduces or downgrades any
39  * part of its range, we remove all its children (2 above).  This particularly
40  * happens when a lock is unlocked.
41  *
42  * For each of those child trees we "wake up" the thread which is
43  * waiting for the lock so it can continue handling as follows: if the
44  * root of the tree applies, we do so (3).  If it doesn't, it must
45  * conflict with some applied lock.  We remove (wake up) all of its children
46  * (2), and add it is a new leaf to the tree rooted in the applied
47  * lock (1).  We then repeat the process recursively with those
48  * children.
49  *
50  */
51 
52 #include <linux/capability.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
55 #include <linux/fs.h>
56 #include <linux/init.h>
57 #include <linux/security.h>
58 #include <linux/slab.h>
59 #include <linux/syscalls.h>
60 #include <linux/time.h>
61 #include <linux/rcupdate.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/hashtable.h>
64 #include <linux/percpu.h>
65 #include <linux/sysctl.h>
66 
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/filelock.h>
69 
70 #include <linux/uaccess.h>
71 
72 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
73 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
74 #define IS_LEASE(fl)	(fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
75 #define IS_OFDLCK(fl)	(fl->fl_flags & FL_OFDLCK)
76 #define IS_REMOTELCK(fl)	(fl->fl_pid <= 0)
77 
78 static bool lease_breaking(struct file_lock *fl)
79 {
80 	return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
81 }
82 
83 static int target_leasetype(struct file_lock *fl)
84 {
85 	if (fl->fl_flags & FL_UNLOCK_PENDING)
86 		return F_UNLCK;
87 	if (fl->fl_flags & FL_DOWNGRADE_PENDING)
88 		return F_RDLCK;
89 	return fl->fl_type;
90 }
91 
92 static int leases_enable = 1;
93 static int lease_break_time = 45;
94 
95 #ifdef CONFIG_SYSCTL
96 static struct ctl_table locks_sysctls[] = {
97 	{
98 		.procname	= "leases-enable",
99 		.data		= &leases_enable,
100 		.maxlen		= sizeof(int),
101 		.mode		= 0644,
102 		.proc_handler	= proc_dointvec,
103 	},
104 #ifdef CONFIG_MMU
105 	{
106 		.procname	= "lease-break-time",
107 		.data		= &lease_break_time,
108 		.maxlen		= sizeof(int),
109 		.mode		= 0644,
110 		.proc_handler	= proc_dointvec,
111 	},
112 #endif /* CONFIG_MMU */
113 	{}
114 };
115 
116 static int __init init_fs_locks_sysctls(void)
117 {
118 	register_sysctl_init("fs", locks_sysctls);
119 	return 0;
120 }
121 early_initcall(init_fs_locks_sysctls);
122 #endif /* CONFIG_SYSCTL */
123 
124 /*
125  * The global file_lock_list is only used for displaying /proc/locks, so we
126  * keep a list on each CPU, with each list protected by its own spinlock.
127  * Global serialization is done using file_rwsem.
128  *
129  * Note that alterations to the list also require that the relevant flc_lock is
130  * held.
131  */
132 struct file_lock_list_struct {
133 	spinlock_t		lock;
134 	struct hlist_head	hlist;
135 };
136 static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
137 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
138 
139 
140 /*
141  * The blocked_hash is used to find POSIX lock loops for deadlock detection.
142  * It is protected by blocked_lock_lock.
143  *
144  * We hash locks by lockowner in order to optimize searching for the lock a
145  * particular lockowner is waiting on.
146  *
147  * FIXME: make this value scale via some heuristic? We generally will want more
148  * buckets when we have more lockowners holding locks, but that's a little
149  * difficult to determine without knowing what the workload will look like.
150  */
151 #define BLOCKED_HASH_BITS	7
152 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
153 
154 /*
155  * This lock protects the blocked_hash. Generally, if you're accessing it, you
156  * want to be holding this lock.
157  *
158  * In addition, it also protects the fl->fl_blocked_requests list, and the
159  * fl->fl_blocker pointer for file_lock structures that are acting as lock
160  * requests (in contrast to those that are acting as records of acquired locks).
161  *
162  * Note that when we acquire this lock in order to change the above fields,
163  * we often hold the flc_lock as well. In certain cases, when reading the fields
164  * protected by this lock, we can skip acquiring it iff we already hold the
165  * flc_lock.
166  */
167 static DEFINE_SPINLOCK(blocked_lock_lock);
168 
169 static struct kmem_cache *flctx_cache __read_mostly;
170 static struct kmem_cache *filelock_cache __read_mostly;
171 
172 static struct file_lock_context *
173 locks_get_lock_context(struct inode *inode, int type)
174 {
175 	struct file_lock_context *ctx;
176 
177 	/* paired with cmpxchg() below */
178 	ctx = smp_load_acquire(&inode->i_flctx);
179 	if (likely(ctx) || type == F_UNLCK)
180 		goto out;
181 
182 	ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
183 	if (!ctx)
184 		goto out;
185 
186 	spin_lock_init(&ctx->flc_lock);
187 	INIT_LIST_HEAD(&ctx->flc_flock);
188 	INIT_LIST_HEAD(&ctx->flc_posix);
189 	INIT_LIST_HEAD(&ctx->flc_lease);
190 
191 	/*
192 	 * Assign the pointer if it's not already assigned. If it is, then
193 	 * free the context we just allocated.
194 	 */
195 	if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
196 		kmem_cache_free(flctx_cache, ctx);
197 		ctx = smp_load_acquire(&inode->i_flctx);
198 	}
199 out:
200 	trace_locks_get_lock_context(inode, type, ctx);
201 	return ctx;
202 }
203 
204 static void
205 locks_dump_ctx_list(struct list_head *list, char *list_type)
206 {
207 	struct file_lock *fl;
208 
209 	list_for_each_entry(fl, list, fl_list) {
210 		pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
211 	}
212 }
213 
214 static void
215 locks_check_ctx_lists(struct inode *inode)
216 {
217 	struct file_lock_context *ctx = inode->i_flctx;
218 
219 	if (unlikely(!list_empty(&ctx->flc_flock) ||
220 		     !list_empty(&ctx->flc_posix) ||
221 		     !list_empty(&ctx->flc_lease))) {
222 		pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
223 			MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
224 			inode->i_ino);
225 		locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
226 		locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
227 		locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
228 	}
229 }
230 
231 static void
232 locks_check_ctx_file_list(struct file *filp, struct list_head *list,
233 				char *list_type)
234 {
235 	struct file_lock *fl;
236 	struct inode *inode = locks_inode(filp);
237 
238 	list_for_each_entry(fl, list, fl_list)
239 		if (fl->fl_file == filp)
240 			pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
241 				" fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
242 				list_type, MAJOR(inode->i_sb->s_dev),
243 				MINOR(inode->i_sb->s_dev), inode->i_ino,
244 				fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
245 }
246 
247 void
248 locks_free_lock_context(struct inode *inode)
249 {
250 	struct file_lock_context *ctx = inode->i_flctx;
251 
252 	if (unlikely(ctx)) {
253 		locks_check_ctx_lists(inode);
254 		kmem_cache_free(flctx_cache, ctx);
255 	}
256 }
257 
258 static void locks_init_lock_heads(struct file_lock *fl)
259 {
260 	INIT_HLIST_NODE(&fl->fl_link);
261 	INIT_LIST_HEAD(&fl->fl_list);
262 	INIT_LIST_HEAD(&fl->fl_blocked_requests);
263 	INIT_LIST_HEAD(&fl->fl_blocked_member);
264 	init_waitqueue_head(&fl->fl_wait);
265 }
266 
267 /* Allocate an empty lock structure. */
268 struct file_lock *locks_alloc_lock(void)
269 {
270 	struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
271 
272 	if (fl)
273 		locks_init_lock_heads(fl);
274 
275 	return fl;
276 }
277 EXPORT_SYMBOL_GPL(locks_alloc_lock);
278 
279 void locks_release_private(struct file_lock *fl)
280 {
281 	BUG_ON(waitqueue_active(&fl->fl_wait));
282 	BUG_ON(!list_empty(&fl->fl_list));
283 	BUG_ON(!list_empty(&fl->fl_blocked_requests));
284 	BUG_ON(!list_empty(&fl->fl_blocked_member));
285 	BUG_ON(!hlist_unhashed(&fl->fl_link));
286 
287 	if (fl->fl_ops) {
288 		if (fl->fl_ops->fl_release_private)
289 			fl->fl_ops->fl_release_private(fl);
290 		fl->fl_ops = NULL;
291 	}
292 
293 	if (fl->fl_lmops) {
294 		if (fl->fl_lmops->lm_put_owner) {
295 			fl->fl_lmops->lm_put_owner(fl->fl_owner);
296 			fl->fl_owner = NULL;
297 		}
298 		fl->fl_lmops = NULL;
299 	}
300 }
301 EXPORT_SYMBOL_GPL(locks_release_private);
302 
303 /* Free a lock which is not in use. */
304 void locks_free_lock(struct file_lock *fl)
305 {
306 	locks_release_private(fl);
307 	kmem_cache_free(filelock_cache, fl);
308 }
309 EXPORT_SYMBOL(locks_free_lock);
310 
311 static void
312 locks_dispose_list(struct list_head *dispose)
313 {
314 	struct file_lock *fl;
315 
316 	while (!list_empty(dispose)) {
317 		fl = list_first_entry(dispose, struct file_lock, fl_list);
318 		list_del_init(&fl->fl_list);
319 		locks_free_lock(fl);
320 	}
321 }
322 
323 void locks_init_lock(struct file_lock *fl)
324 {
325 	memset(fl, 0, sizeof(struct file_lock));
326 	locks_init_lock_heads(fl);
327 }
328 EXPORT_SYMBOL(locks_init_lock);
329 
330 /*
331  * Initialize a new lock from an existing file_lock structure.
332  */
333 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
334 {
335 	new->fl_owner = fl->fl_owner;
336 	new->fl_pid = fl->fl_pid;
337 	new->fl_file = NULL;
338 	new->fl_flags = fl->fl_flags;
339 	new->fl_type = fl->fl_type;
340 	new->fl_start = fl->fl_start;
341 	new->fl_end = fl->fl_end;
342 	new->fl_lmops = fl->fl_lmops;
343 	new->fl_ops = NULL;
344 
345 	if (fl->fl_lmops) {
346 		if (fl->fl_lmops->lm_get_owner)
347 			fl->fl_lmops->lm_get_owner(fl->fl_owner);
348 	}
349 }
350 EXPORT_SYMBOL(locks_copy_conflock);
351 
352 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
353 {
354 	/* "new" must be a freshly-initialized lock */
355 	WARN_ON_ONCE(new->fl_ops);
356 
357 	locks_copy_conflock(new, fl);
358 
359 	new->fl_file = fl->fl_file;
360 	new->fl_ops = fl->fl_ops;
361 
362 	if (fl->fl_ops) {
363 		if (fl->fl_ops->fl_copy_lock)
364 			fl->fl_ops->fl_copy_lock(new, fl);
365 	}
366 }
367 EXPORT_SYMBOL(locks_copy_lock);
368 
369 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
370 {
371 	struct file_lock *f;
372 
373 	/*
374 	 * As ctx->flc_lock is held, new requests cannot be added to
375 	 * ->fl_blocked_requests, so we don't need a lock to check if it
376 	 * is empty.
377 	 */
378 	if (list_empty(&fl->fl_blocked_requests))
379 		return;
380 	spin_lock(&blocked_lock_lock);
381 	list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
382 	list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
383 		f->fl_blocker = new;
384 	spin_unlock(&blocked_lock_lock);
385 }
386 
387 static inline int flock_translate_cmd(int cmd) {
388 	switch (cmd) {
389 	case LOCK_SH:
390 		return F_RDLCK;
391 	case LOCK_EX:
392 		return F_WRLCK;
393 	case LOCK_UN:
394 		return F_UNLCK;
395 	}
396 	return -EINVAL;
397 }
398 
399 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
400 static struct file_lock *
401 flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl)
402 {
403 	int type = flock_translate_cmd(cmd);
404 
405 	if (type < 0)
406 		return ERR_PTR(type);
407 
408 	if (fl == NULL) {
409 		fl = locks_alloc_lock();
410 		if (fl == NULL)
411 			return ERR_PTR(-ENOMEM);
412 	} else {
413 		locks_init_lock(fl);
414 	}
415 
416 	fl->fl_file = filp;
417 	fl->fl_owner = filp;
418 	fl->fl_pid = current->tgid;
419 	fl->fl_flags = FL_FLOCK;
420 	fl->fl_type = type;
421 	fl->fl_end = OFFSET_MAX;
422 
423 	return fl;
424 }
425 
426 static int assign_type(struct file_lock *fl, long type)
427 {
428 	switch (type) {
429 	case F_RDLCK:
430 	case F_WRLCK:
431 	case F_UNLCK:
432 		fl->fl_type = type;
433 		break;
434 	default:
435 		return -EINVAL;
436 	}
437 	return 0;
438 }
439 
440 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
441 				 struct flock64 *l)
442 {
443 	switch (l->l_whence) {
444 	case SEEK_SET:
445 		fl->fl_start = 0;
446 		break;
447 	case SEEK_CUR:
448 		fl->fl_start = filp->f_pos;
449 		break;
450 	case SEEK_END:
451 		fl->fl_start = i_size_read(file_inode(filp));
452 		break;
453 	default:
454 		return -EINVAL;
455 	}
456 	if (l->l_start > OFFSET_MAX - fl->fl_start)
457 		return -EOVERFLOW;
458 	fl->fl_start += l->l_start;
459 	if (fl->fl_start < 0)
460 		return -EINVAL;
461 
462 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
463 	   POSIX-2001 defines it. */
464 	if (l->l_len > 0) {
465 		if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
466 			return -EOVERFLOW;
467 		fl->fl_end = fl->fl_start + (l->l_len - 1);
468 
469 	} else if (l->l_len < 0) {
470 		if (fl->fl_start + l->l_len < 0)
471 			return -EINVAL;
472 		fl->fl_end = fl->fl_start - 1;
473 		fl->fl_start += l->l_len;
474 	} else
475 		fl->fl_end = OFFSET_MAX;
476 
477 	fl->fl_owner = current->files;
478 	fl->fl_pid = current->tgid;
479 	fl->fl_file = filp;
480 	fl->fl_flags = FL_POSIX;
481 	fl->fl_ops = NULL;
482 	fl->fl_lmops = NULL;
483 
484 	return assign_type(fl, l->l_type);
485 }
486 
487 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
488  * style lock.
489  */
490 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
491 			       struct flock *l)
492 {
493 	struct flock64 ll = {
494 		.l_type = l->l_type,
495 		.l_whence = l->l_whence,
496 		.l_start = l->l_start,
497 		.l_len = l->l_len,
498 	};
499 
500 	return flock64_to_posix_lock(filp, fl, &ll);
501 }
502 
503 /* default lease lock manager operations */
504 static bool
505 lease_break_callback(struct file_lock *fl)
506 {
507 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
508 	return false;
509 }
510 
511 static void
512 lease_setup(struct file_lock *fl, void **priv)
513 {
514 	struct file *filp = fl->fl_file;
515 	struct fasync_struct *fa = *priv;
516 
517 	/*
518 	 * fasync_insert_entry() returns the old entry if any. If there was no
519 	 * old entry, then it used "priv" and inserted it into the fasync list.
520 	 * Clear the pointer to indicate that it shouldn't be freed.
521 	 */
522 	if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
523 		*priv = NULL;
524 
525 	__f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
526 }
527 
528 static const struct lock_manager_operations lease_manager_ops = {
529 	.lm_break = lease_break_callback,
530 	.lm_change = lease_modify,
531 	.lm_setup = lease_setup,
532 };
533 
534 /*
535  * Initialize a lease, use the default lock manager operations
536  */
537 static int lease_init(struct file *filp, long type, struct file_lock *fl)
538 {
539 	if (assign_type(fl, type) != 0)
540 		return -EINVAL;
541 
542 	fl->fl_owner = filp;
543 	fl->fl_pid = current->tgid;
544 
545 	fl->fl_file = filp;
546 	fl->fl_flags = FL_LEASE;
547 	fl->fl_start = 0;
548 	fl->fl_end = OFFSET_MAX;
549 	fl->fl_ops = NULL;
550 	fl->fl_lmops = &lease_manager_ops;
551 	return 0;
552 }
553 
554 /* Allocate a file_lock initialised to this type of lease */
555 static struct file_lock *lease_alloc(struct file *filp, long type)
556 {
557 	struct file_lock *fl = locks_alloc_lock();
558 	int error = -ENOMEM;
559 
560 	if (fl == NULL)
561 		return ERR_PTR(error);
562 
563 	error = lease_init(filp, type, fl);
564 	if (error) {
565 		locks_free_lock(fl);
566 		return ERR_PTR(error);
567 	}
568 	return fl;
569 }
570 
571 /* Check if two locks overlap each other.
572  */
573 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
574 {
575 	return ((fl1->fl_end >= fl2->fl_start) &&
576 		(fl2->fl_end >= fl1->fl_start));
577 }
578 
579 /*
580  * Check whether two locks have the same owner.
581  */
582 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
583 {
584 	return fl1->fl_owner == fl2->fl_owner;
585 }
586 
587 /* Must be called with the flc_lock held! */
588 static void locks_insert_global_locks(struct file_lock *fl)
589 {
590 	struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
591 
592 	percpu_rwsem_assert_held(&file_rwsem);
593 
594 	spin_lock(&fll->lock);
595 	fl->fl_link_cpu = smp_processor_id();
596 	hlist_add_head(&fl->fl_link, &fll->hlist);
597 	spin_unlock(&fll->lock);
598 }
599 
600 /* Must be called with the flc_lock held! */
601 static void locks_delete_global_locks(struct file_lock *fl)
602 {
603 	struct file_lock_list_struct *fll;
604 
605 	percpu_rwsem_assert_held(&file_rwsem);
606 
607 	/*
608 	 * Avoid taking lock if already unhashed. This is safe since this check
609 	 * is done while holding the flc_lock, and new insertions into the list
610 	 * also require that it be held.
611 	 */
612 	if (hlist_unhashed(&fl->fl_link))
613 		return;
614 
615 	fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
616 	spin_lock(&fll->lock);
617 	hlist_del_init(&fl->fl_link);
618 	spin_unlock(&fll->lock);
619 }
620 
621 static unsigned long
622 posix_owner_key(struct file_lock *fl)
623 {
624 	return (unsigned long)fl->fl_owner;
625 }
626 
627 static void locks_insert_global_blocked(struct file_lock *waiter)
628 {
629 	lockdep_assert_held(&blocked_lock_lock);
630 
631 	hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
632 }
633 
634 static void locks_delete_global_blocked(struct file_lock *waiter)
635 {
636 	lockdep_assert_held(&blocked_lock_lock);
637 
638 	hash_del(&waiter->fl_link);
639 }
640 
641 /* Remove waiter from blocker's block list.
642  * When blocker ends up pointing to itself then the list is empty.
643  *
644  * Must be called with blocked_lock_lock held.
645  */
646 static void __locks_delete_block(struct file_lock *waiter)
647 {
648 	locks_delete_global_blocked(waiter);
649 	list_del_init(&waiter->fl_blocked_member);
650 }
651 
652 static void __locks_wake_up_blocks(struct file_lock *blocker)
653 {
654 	while (!list_empty(&blocker->fl_blocked_requests)) {
655 		struct file_lock *waiter;
656 
657 		waiter = list_first_entry(&blocker->fl_blocked_requests,
658 					  struct file_lock, fl_blocked_member);
659 		__locks_delete_block(waiter);
660 		if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
661 			waiter->fl_lmops->lm_notify(waiter);
662 		else
663 			wake_up(&waiter->fl_wait);
664 
665 		/*
666 		 * The setting of fl_blocker to NULL marks the "done"
667 		 * point in deleting a block. Paired with acquire at the top
668 		 * of locks_delete_block().
669 		 */
670 		smp_store_release(&waiter->fl_blocker, NULL);
671 	}
672 }
673 
674 /**
675  *	locks_delete_block - stop waiting for a file lock
676  *	@waiter: the lock which was waiting
677  *
678  *	lockd/nfsd need to disconnect the lock while working on it.
679  */
680 int locks_delete_block(struct file_lock *waiter)
681 {
682 	int status = -ENOENT;
683 
684 	/*
685 	 * If fl_blocker is NULL, it won't be set again as this thread "owns"
686 	 * the lock and is the only one that might try to claim the lock.
687 	 *
688 	 * We use acquire/release to manage fl_blocker so that we can
689 	 * optimize away taking the blocked_lock_lock in many cases.
690 	 *
691 	 * The smp_load_acquire guarantees two things:
692 	 *
693 	 * 1/ that fl_blocked_requests can be tested locklessly. If something
694 	 * was recently added to that list it must have been in a locked region
695 	 * *before* the locked region when fl_blocker was set to NULL.
696 	 *
697 	 * 2/ that no other thread is accessing 'waiter', so it is safe to free
698 	 * it.  __locks_wake_up_blocks is careful not to touch waiter after
699 	 * fl_blocker is released.
700 	 *
701 	 * If a lockless check of fl_blocker shows it to be NULL, we know that
702 	 * no new locks can be inserted into its fl_blocked_requests list, and
703 	 * can avoid doing anything further if the list is empty.
704 	 */
705 	if (!smp_load_acquire(&waiter->fl_blocker) &&
706 	    list_empty(&waiter->fl_blocked_requests))
707 		return status;
708 
709 	spin_lock(&blocked_lock_lock);
710 	if (waiter->fl_blocker)
711 		status = 0;
712 	__locks_wake_up_blocks(waiter);
713 	__locks_delete_block(waiter);
714 
715 	/*
716 	 * The setting of fl_blocker to NULL marks the "done" point in deleting
717 	 * a block. Paired with acquire at the top of this function.
718 	 */
719 	smp_store_release(&waiter->fl_blocker, NULL);
720 	spin_unlock(&blocked_lock_lock);
721 	return status;
722 }
723 EXPORT_SYMBOL(locks_delete_block);
724 
725 /* Insert waiter into blocker's block list.
726  * We use a circular list so that processes can be easily woken up in
727  * the order they blocked. The documentation doesn't require this but
728  * it seems like the reasonable thing to do.
729  *
730  * Must be called with both the flc_lock and blocked_lock_lock held. The
731  * fl_blocked_requests list itself is protected by the blocked_lock_lock,
732  * but by ensuring that the flc_lock is also held on insertions we can avoid
733  * taking the blocked_lock_lock in some cases when we see that the
734  * fl_blocked_requests list is empty.
735  *
736  * Rather than just adding to the list, we check for conflicts with any existing
737  * waiters, and add beneath any waiter that blocks the new waiter.
738  * Thus wakeups don't happen until needed.
739  */
740 static void __locks_insert_block(struct file_lock *blocker,
741 				 struct file_lock *waiter,
742 				 bool conflict(struct file_lock *,
743 					       struct file_lock *))
744 {
745 	struct file_lock *fl;
746 	BUG_ON(!list_empty(&waiter->fl_blocked_member));
747 
748 new_blocker:
749 	list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
750 		if (conflict(fl, waiter)) {
751 			blocker =  fl;
752 			goto new_blocker;
753 		}
754 	waiter->fl_blocker = blocker;
755 	list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
756 	if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
757 		locks_insert_global_blocked(waiter);
758 
759 	/* The requests in waiter->fl_blocked are known to conflict with
760 	 * waiter, but might not conflict with blocker, or the requests
761 	 * and lock which block it.  So they all need to be woken.
762 	 */
763 	__locks_wake_up_blocks(waiter);
764 }
765 
766 /* Must be called with flc_lock held. */
767 static void locks_insert_block(struct file_lock *blocker,
768 			       struct file_lock *waiter,
769 			       bool conflict(struct file_lock *,
770 					     struct file_lock *))
771 {
772 	spin_lock(&blocked_lock_lock);
773 	__locks_insert_block(blocker, waiter, conflict);
774 	spin_unlock(&blocked_lock_lock);
775 }
776 
777 /*
778  * Wake up processes blocked waiting for blocker.
779  *
780  * Must be called with the inode->flc_lock held!
781  */
782 static void locks_wake_up_blocks(struct file_lock *blocker)
783 {
784 	/*
785 	 * Avoid taking global lock if list is empty. This is safe since new
786 	 * blocked requests are only added to the list under the flc_lock, and
787 	 * the flc_lock is always held here. Note that removal from the
788 	 * fl_blocked_requests list does not require the flc_lock, so we must
789 	 * recheck list_empty() after acquiring the blocked_lock_lock.
790 	 */
791 	if (list_empty(&blocker->fl_blocked_requests))
792 		return;
793 
794 	spin_lock(&blocked_lock_lock);
795 	__locks_wake_up_blocks(blocker);
796 	spin_unlock(&blocked_lock_lock);
797 }
798 
799 static void
800 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
801 {
802 	list_add_tail(&fl->fl_list, before);
803 	locks_insert_global_locks(fl);
804 }
805 
806 static void
807 locks_unlink_lock_ctx(struct file_lock *fl)
808 {
809 	locks_delete_global_locks(fl);
810 	list_del_init(&fl->fl_list);
811 	locks_wake_up_blocks(fl);
812 }
813 
814 static void
815 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
816 {
817 	locks_unlink_lock_ctx(fl);
818 	if (dispose)
819 		list_add(&fl->fl_list, dispose);
820 	else
821 		locks_free_lock(fl);
822 }
823 
824 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
825  * checks for shared/exclusive status of overlapping locks.
826  */
827 static bool locks_conflict(struct file_lock *caller_fl,
828 			   struct file_lock *sys_fl)
829 {
830 	if (sys_fl->fl_type == F_WRLCK)
831 		return true;
832 	if (caller_fl->fl_type == F_WRLCK)
833 		return true;
834 	return false;
835 }
836 
837 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
838  * checking before calling the locks_conflict().
839  */
840 static bool posix_locks_conflict(struct file_lock *caller_fl,
841 				 struct file_lock *sys_fl)
842 {
843 	/* POSIX locks owned by the same process do not conflict with
844 	 * each other.
845 	 */
846 	if (posix_same_owner(caller_fl, sys_fl))
847 		return false;
848 
849 	/* Check whether they overlap */
850 	if (!locks_overlap(caller_fl, sys_fl))
851 		return false;
852 
853 	return locks_conflict(caller_fl, sys_fl);
854 }
855 
856 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
857  * checking before calling the locks_conflict().
858  */
859 static bool flock_locks_conflict(struct file_lock *caller_fl,
860 				 struct file_lock *sys_fl)
861 {
862 	/* FLOCK locks referring to the same filp do not conflict with
863 	 * each other.
864 	 */
865 	if (caller_fl->fl_file == sys_fl->fl_file)
866 		return false;
867 
868 	return locks_conflict(caller_fl, sys_fl);
869 }
870 
871 void
872 posix_test_lock(struct file *filp, struct file_lock *fl)
873 {
874 	struct file_lock *cfl;
875 	struct file_lock_context *ctx;
876 	struct inode *inode = locks_inode(filp);
877 
878 	ctx = smp_load_acquire(&inode->i_flctx);
879 	if (!ctx || list_empty_careful(&ctx->flc_posix)) {
880 		fl->fl_type = F_UNLCK;
881 		return;
882 	}
883 
884 	spin_lock(&ctx->flc_lock);
885 	list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
886 		if (posix_locks_conflict(fl, cfl)) {
887 			locks_copy_conflock(fl, cfl);
888 			goto out;
889 		}
890 	}
891 	fl->fl_type = F_UNLCK;
892 out:
893 	spin_unlock(&ctx->flc_lock);
894 	return;
895 }
896 EXPORT_SYMBOL(posix_test_lock);
897 
898 /*
899  * Deadlock detection:
900  *
901  * We attempt to detect deadlocks that are due purely to posix file
902  * locks.
903  *
904  * We assume that a task can be waiting for at most one lock at a time.
905  * So for any acquired lock, the process holding that lock may be
906  * waiting on at most one other lock.  That lock in turns may be held by
907  * someone waiting for at most one other lock.  Given a requested lock
908  * caller_fl which is about to wait for a conflicting lock block_fl, we
909  * follow this chain of waiters to ensure we are not about to create a
910  * cycle.
911  *
912  * Since we do this before we ever put a process to sleep on a lock, we
913  * are ensured that there is never a cycle; that is what guarantees that
914  * the while() loop in posix_locks_deadlock() eventually completes.
915  *
916  * Note: the above assumption may not be true when handling lock
917  * requests from a broken NFS client. It may also fail in the presence
918  * of tasks (such as posix threads) sharing the same open file table.
919  * To handle those cases, we just bail out after a few iterations.
920  *
921  * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
922  * Because the owner is not even nominally tied to a thread of
923  * execution, the deadlock detection below can't reasonably work well. Just
924  * skip it for those.
925  *
926  * In principle, we could do a more limited deadlock detection on FL_OFDLCK
927  * locks that just checks for the case where two tasks are attempting to
928  * upgrade from read to write locks on the same inode.
929  */
930 
931 #define MAX_DEADLK_ITERATIONS 10
932 
933 /* Find a lock that the owner of the given block_fl is blocking on. */
934 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
935 {
936 	struct file_lock *fl;
937 
938 	hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
939 		if (posix_same_owner(fl, block_fl)) {
940 			while (fl->fl_blocker)
941 				fl = fl->fl_blocker;
942 			return fl;
943 		}
944 	}
945 	return NULL;
946 }
947 
948 /* Must be called with the blocked_lock_lock held! */
949 static int posix_locks_deadlock(struct file_lock *caller_fl,
950 				struct file_lock *block_fl)
951 {
952 	int i = 0;
953 
954 	lockdep_assert_held(&blocked_lock_lock);
955 
956 	/*
957 	 * This deadlock detector can't reasonably detect deadlocks with
958 	 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
959 	 */
960 	if (IS_OFDLCK(caller_fl))
961 		return 0;
962 
963 	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
964 		if (i++ > MAX_DEADLK_ITERATIONS)
965 			return 0;
966 		if (posix_same_owner(caller_fl, block_fl))
967 			return 1;
968 	}
969 	return 0;
970 }
971 
972 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
973  * after any leases, but before any posix locks.
974  *
975  * Note that if called with an FL_EXISTS argument, the caller may determine
976  * whether or not a lock was successfully freed by testing the return
977  * value for -ENOENT.
978  */
979 static int flock_lock_inode(struct inode *inode, struct file_lock *request)
980 {
981 	struct file_lock *new_fl = NULL;
982 	struct file_lock *fl;
983 	struct file_lock_context *ctx;
984 	int error = 0;
985 	bool found = false;
986 	LIST_HEAD(dispose);
987 
988 	ctx = locks_get_lock_context(inode, request->fl_type);
989 	if (!ctx) {
990 		if (request->fl_type != F_UNLCK)
991 			return -ENOMEM;
992 		return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
993 	}
994 
995 	if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
996 		new_fl = locks_alloc_lock();
997 		if (!new_fl)
998 			return -ENOMEM;
999 	}
1000 
1001 	percpu_down_read(&file_rwsem);
1002 	spin_lock(&ctx->flc_lock);
1003 	if (request->fl_flags & FL_ACCESS)
1004 		goto find_conflict;
1005 
1006 	list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1007 		if (request->fl_file != fl->fl_file)
1008 			continue;
1009 		if (request->fl_type == fl->fl_type)
1010 			goto out;
1011 		found = true;
1012 		locks_delete_lock_ctx(fl, &dispose);
1013 		break;
1014 	}
1015 
1016 	if (request->fl_type == F_UNLCK) {
1017 		if ((request->fl_flags & FL_EXISTS) && !found)
1018 			error = -ENOENT;
1019 		goto out;
1020 	}
1021 
1022 find_conflict:
1023 	list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1024 		if (!flock_locks_conflict(request, fl))
1025 			continue;
1026 		error = -EAGAIN;
1027 		if (!(request->fl_flags & FL_SLEEP))
1028 			goto out;
1029 		error = FILE_LOCK_DEFERRED;
1030 		locks_insert_block(fl, request, flock_locks_conflict);
1031 		goto out;
1032 	}
1033 	if (request->fl_flags & FL_ACCESS)
1034 		goto out;
1035 	locks_copy_lock(new_fl, request);
1036 	locks_move_blocks(new_fl, request);
1037 	locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1038 	new_fl = NULL;
1039 	error = 0;
1040 
1041 out:
1042 	spin_unlock(&ctx->flc_lock);
1043 	percpu_up_read(&file_rwsem);
1044 	if (new_fl)
1045 		locks_free_lock(new_fl);
1046 	locks_dispose_list(&dispose);
1047 	trace_flock_lock_inode(inode, request, error);
1048 	return error;
1049 }
1050 
1051 static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1052 			    struct file_lock *conflock)
1053 {
1054 	struct file_lock *fl, *tmp;
1055 	struct file_lock *new_fl = NULL;
1056 	struct file_lock *new_fl2 = NULL;
1057 	struct file_lock *left = NULL;
1058 	struct file_lock *right = NULL;
1059 	struct file_lock_context *ctx;
1060 	int error;
1061 	bool added = false;
1062 	LIST_HEAD(dispose);
1063 
1064 	ctx = locks_get_lock_context(inode, request->fl_type);
1065 	if (!ctx)
1066 		return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1067 
1068 	/*
1069 	 * We may need two file_lock structures for this operation,
1070 	 * so we get them in advance to avoid races.
1071 	 *
1072 	 * In some cases we can be sure, that no new locks will be needed
1073 	 */
1074 	if (!(request->fl_flags & FL_ACCESS) &&
1075 	    (request->fl_type != F_UNLCK ||
1076 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1077 		new_fl = locks_alloc_lock();
1078 		new_fl2 = locks_alloc_lock();
1079 	}
1080 
1081 	percpu_down_read(&file_rwsem);
1082 	spin_lock(&ctx->flc_lock);
1083 	/*
1084 	 * New lock request. Walk all POSIX locks and look for conflicts. If
1085 	 * there are any, either return error or put the request on the
1086 	 * blocker's list of waiters and the global blocked_hash.
1087 	 */
1088 	if (request->fl_type != F_UNLCK) {
1089 		list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1090 			if (!posix_locks_conflict(request, fl))
1091 				continue;
1092 			if (conflock)
1093 				locks_copy_conflock(conflock, fl);
1094 			error = -EAGAIN;
1095 			if (!(request->fl_flags & FL_SLEEP))
1096 				goto out;
1097 			/*
1098 			 * Deadlock detection and insertion into the blocked
1099 			 * locks list must be done while holding the same lock!
1100 			 */
1101 			error = -EDEADLK;
1102 			spin_lock(&blocked_lock_lock);
1103 			/*
1104 			 * Ensure that we don't find any locks blocked on this
1105 			 * request during deadlock detection.
1106 			 */
1107 			__locks_wake_up_blocks(request);
1108 			if (likely(!posix_locks_deadlock(request, fl))) {
1109 				error = FILE_LOCK_DEFERRED;
1110 				__locks_insert_block(fl, request,
1111 						     posix_locks_conflict);
1112 			}
1113 			spin_unlock(&blocked_lock_lock);
1114 			goto out;
1115 		}
1116 	}
1117 
1118 	/* If we're just looking for a conflict, we're done. */
1119 	error = 0;
1120 	if (request->fl_flags & FL_ACCESS)
1121 		goto out;
1122 
1123 	/* Find the first old lock with the same owner as the new lock */
1124 	list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1125 		if (posix_same_owner(request, fl))
1126 			break;
1127 	}
1128 
1129 	/* Process locks with this owner. */
1130 	list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1131 		if (!posix_same_owner(request, fl))
1132 			break;
1133 
1134 		/* Detect adjacent or overlapping regions (if same lock type) */
1135 		if (request->fl_type == fl->fl_type) {
1136 			/* In all comparisons of start vs end, use
1137 			 * "start - 1" rather than "end + 1". If end
1138 			 * is OFFSET_MAX, end + 1 will become negative.
1139 			 */
1140 			if (fl->fl_end < request->fl_start - 1)
1141 				continue;
1142 			/* If the next lock in the list has entirely bigger
1143 			 * addresses than the new one, insert the lock here.
1144 			 */
1145 			if (fl->fl_start - 1 > request->fl_end)
1146 				break;
1147 
1148 			/* If we come here, the new and old lock are of the
1149 			 * same type and adjacent or overlapping. Make one
1150 			 * lock yielding from the lower start address of both
1151 			 * locks to the higher end address.
1152 			 */
1153 			if (fl->fl_start > request->fl_start)
1154 				fl->fl_start = request->fl_start;
1155 			else
1156 				request->fl_start = fl->fl_start;
1157 			if (fl->fl_end < request->fl_end)
1158 				fl->fl_end = request->fl_end;
1159 			else
1160 				request->fl_end = fl->fl_end;
1161 			if (added) {
1162 				locks_delete_lock_ctx(fl, &dispose);
1163 				continue;
1164 			}
1165 			request = fl;
1166 			added = true;
1167 		} else {
1168 			/* Processing for different lock types is a bit
1169 			 * more complex.
1170 			 */
1171 			if (fl->fl_end < request->fl_start)
1172 				continue;
1173 			if (fl->fl_start > request->fl_end)
1174 				break;
1175 			if (request->fl_type == F_UNLCK)
1176 				added = true;
1177 			if (fl->fl_start < request->fl_start)
1178 				left = fl;
1179 			/* If the next lock in the list has a higher end
1180 			 * address than the new one, insert the new one here.
1181 			 */
1182 			if (fl->fl_end > request->fl_end) {
1183 				right = fl;
1184 				break;
1185 			}
1186 			if (fl->fl_start >= request->fl_start) {
1187 				/* The new lock completely replaces an old
1188 				 * one (This may happen several times).
1189 				 */
1190 				if (added) {
1191 					locks_delete_lock_ctx(fl, &dispose);
1192 					continue;
1193 				}
1194 				/*
1195 				 * Replace the old lock with new_fl, and
1196 				 * remove the old one. It's safe to do the
1197 				 * insert here since we know that we won't be
1198 				 * using new_fl later, and that the lock is
1199 				 * just replacing an existing lock.
1200 				 */
1201 				error = -ENOLCK;
1202 				if (!new_fl)
1203 					goto out;
1204 				locks_copy_lock(new_fl, request);
1205 				locks_move_blocks(new_fl, request);
1206 				request = new_fl;
1207 				new_fl = NULL;
1208 				locks_insert_lock_ctx(request, &fl->fl_list);
1209 				locks_delete_lock_ctx(fl, &dispose);
1210 				added = true;
1211 			}
1212 		}
1213 	}
1214 
1215 	/*
1216 	 * The above code only modifies existing locks in case of merging or
1217 	 * replacing. If new lock(s) need to be inserted all modifications are
1218 	 * done below this, so it's safe yet to bail out.
1219 	 */
1220 	error = -ENOLCK; /* "no luck" */
1221 	if (right && left == right && !new_fl2)
1222 		goto out;
1223 
1224 	error = 0;
1225 	if (!added) {
1226 		if (request->fl_type == F_UNLCK) {
1227 			if (request->fl_flags & FL_EXISTS)
1228 				error = -ENOENT;
1229 			goto out;
1230 		}
1231 
1232 		if (!new_fl) {
1233 			error = -ENOLCK;
1234 			goto out;
1235 		}
1236 		locks_copy_lock(new_fl, request);
1237 		locks_move_blocks(new_fl, request);
1238 		locks_insert_lock_ctx(new_fl, &fl->fl_list);
1239 		fl = new_fl;
1240 		new_fl = NULL;
1241 	}
1242 	if (right) {
1243 		if (left == right) {
1244 			/* The new lock breaks the old one in two pieces,
1245 			 * so we have to use the second new lock.
1246 			 */
1247 			left = new_fl2;
1248 			new_fl2 = NULL;
1249 			locks_copy_lock(left, right);
1250 			locks_insert_lock_ctx(left, &fl->fl_list);
1251 		}
1252 		right->fl_start = request->fl_end + 1;
1253 		locks_wake_up_blocks(right);
1254 	}
1255 	if (left) {
1256 		left->fl_end = request->fl_start - 1;
1257 		locks_wake_up_blocks(left);
1258 	}
1259  out:
1260 	spin_unlock(&ctx->flc_lock);
1261 	percpu_up_read(&file_rwsem);
1262 	/*
1263 	 * Free any unused locks.
1264 	 */
1265 	if (new_fl)
1266 		locks_free_lock(new_fl);
1267 	if (new_fl2)
1268 		locks_free_lock(new_fl2);
1269 	locks_dispose_list(&dispose);
1270 	trace_posix_lock_inode(inode, request, error);
1271 
1272 	return error;
1273 }
1274 
1275 /**
1276  * posix_lock_file - Apply a POSIX-style lock to a file
1277  * @filp: The file to apply the lock to
1278  * @fl: The lock to be applied
1279  * @conflock: Place to return a copy of the conflicting lock, if found.
1280  *
1281  * Add a POSIX style lock to a file.
1282  * We merge adjacent & overlapping locks whenever possible.
1283  * POSIX locks are sorted by owner task, then by starting address
1284  *
1285  * Note that if called with an FL_EXISTS argument, the caller may determine
1286  * whether or not a lock was successfully freed by testing the return
1287  * value for -ENOENT.
1288  */
1289 int posix_lock_file(struct file *filp, struct file_lock *fl,
1290 			struct file_lock *conflock)
1291 {
1292 	return posix_lock_inode(locks_inode(filp), fl, conflock);
1293 }
1294 EXPORT_SYMBOL(posix_lock_file);
1295 
1296 /**
1297  * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1298  * @inode: inode of file to which lock request should be applied
1299  * @fl: The lock to be applied
1300  *
1301  * Apply a POSIX style lock request to an inode.
1302  */
1303 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1304 {
1305 	int error;
1306 	might_sleep ();
1307 	for (;;) {
1308 		error = posix_lock_inode(inode, fl, NULL);
1309 		if (error != FILE_LOCK_DEFERRED)
1310 			break;
1311 		error = wait_event_interruptible(fl->fl_wait,
1312 					list_empty(&fl->fl_blocked_member));
1313 		if (error)
1314 			break;
1315 	}
1316 	locks_delete_block(fl);
1317 	return error;
1318 }
1319 
1320 static void lease_clear_pending(struct file_lock *fl, int arg)
1321 {
1322 	switch (arg) {
1323 	case F_UNLCK:
1324 		fl->fl_flags &= ~FL_UNLOCK_PENDING;
1325 		fallthrough;
1326 	case F_RDLCK:
1327 		fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1328 	}
1329 }
1330 
1331 /* We already had a lease on this file; just change its type */
1332 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1333 {
1334 	int error = assign_type(fl, arg);
1335 
1336 	if (error)
1337 		return error;
1338 	lease_clear_pending(fl, arg);
1339 	locks_wake_up_blocks(fl);
1340 	if (arg == F_UNLCK) {
1341 		struct file *filp = fl->fl_file;
1342 
1343 		f_delown(filp);
1344 		filp->f_owner.signum = 0;
1345 		fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1346 		if (fl->fl_fasync != NULL) {
1347 			printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1348 			fl->fl_fasync = NULL;
1349 		}
1350 		locks_delete_lock_ctx(fl, dispose);
1351 	}
1352 	return 0;
1353 }
1354 EXPORT_SYMBOL(lease_modify);
1355 
1356 static bool past_time(unsigned long then)
1357 {
1358 	if (!then)
1359 		/* 0 is a special value meaning "this never expires": */
1360 		return false;
1361 	return time_after(jiffies, then);
1362 }
1363 
1364 static void time_out_leases(struct inode *inode, struct list_head *dispose)
1365 {
1366 	struct file_lock_context *ctx = inode->i_flctx;
1367 	struct file_lock *fl, *tmp;
1368 
1369 	lockdep_assert_held(&ctx->flc_lock);
1370 
1371 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1372 		trace_time_out_leases(inode, fl);
1373 		if (past_time(fl->fl_downgrade_time))
1374 			lease_modify(fl, F_RDLCK, dispose);
1375 		if (past_time(fl->fl_break_time))
1376 			lease_modify(fl, F_UNLCK, dispose);
1377 	}
1378 }
1379 
1380 static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1381 {
1382 	bool rc;
1383 
1384 	if (lease->fl_lmops->lm_breaker_owns_lease
1385 			&& lease->fl_lmops->lm_breaker_owns_lease(lease))
1386 		return false;
1387 	if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
1388 		rc = false;
1389 		goto trace;
1390 	}
1391 	if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
1392 		rc = false;
1393 		goto trace;
1394 	}
1395 
1396 	rc = locks_conflict(breaker, lease);
1397 trace:
1398 	trace_leases_conflict(rc, lease, breaker);
1399 	return rc;
1400 }
1401 
1402 static bool
1403 any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1404 {
1405 	struct file_lock_context *ctx = inode->i_flctx;
1406 	struct file_lock *fl;
1407 
1408 	lockdep_assert_held(&ctx->flc_lock);
1409 
1410 	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1411 		if (leases_conflict(fl, breaker))
1412 			return true;
1413 	}
1414 	return false;
1415 }
1416 
1417 /**
1418  *	__break_lease	-	revoke all outstanding leases on file
1419  *	@inode: the inode of the file to return
1420  *	@mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1421  *	    break all leases
1422  *	@type: FL_LEASE: break leases and delegations; FL_DELEG: break
1423  *	    only delegations
1424  *
1425  *	break_lease (inlined for speed) has checked there already is at least
1426  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1427  *	a call to open() or truncate().  This function can sleep unless you
1428  *	specified %O_NONBLOCK to your open().
1429  */
1430 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1431 {
1432 	int error = 0;
1433 	struct file_lock_context *ctx;
1434 	struct file_lock *new_fl, *fl, *tmp;
1435 	unsigned long break_time;
1436 	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1437 	LIST_HEAD(dispose);
1438 
1439 	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1440 	if (IS_ERR(new_fl))
1441 		return PTR_ERR(new_fl);
1442 	new_fl->fl_flags = type;
1443 
1444 	/* typically we will check that ctx is non-NULL before calling */
1445 	ctx = smp_load_acquire(&inode->i_flctx);
1446 	if (!ctx) {
1447 		WARN_ON_ONCE(1);
1448 		goto free_lock;
1449 	}
1450 
1451 	percpu_down_read(&file_rwsem);
1452 	spin_lock(&ctx->flc_lock);
1453 
1454 	time_out_leases(inode, &dispose);
1455 
1456 	if (!any_leases_conflict(inode, new_fl))
1457 		goto out;
1458 
1459 	break_time = 0;
1460 	if (lease_break_time > 0) {
1461 		break_time = jiffies + lease_break_time * HZ;
1462 		if (break_time == 0)
1463 			break_time++;	/* so that 0 means no break time */
1464 	}
1465 
1466 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1467 		if (!leases_conflict(fl, new_fl))
1468 			continue;
1469 		if (want_write) {
1470 			if (fl->fl_flags & FL_UNLOCK_PENDING)
1471 				continue;
1472 			fl->fl_flags |= FL_UNLOCK_PENDING;
1473 			fl->fl_break_time = break_time;
1474 		} else {
1475 			if (lease_breaking(fl))
1476 				continue;
1477 			fl->fl_flags |= FL_DOWNGRADE_PENDING;
1478 			fl->fl_downgrade_time = break_time;
1479 		}
1480 		if (fl->fl_lmops->lm_break(fl))
1481 			locks_delete_lock_ctx(fl, &dispose);
1482 	}
1483 
1484 	if (list_empty(&ctx->flc_lease))
1485 		goto out;
1486 
1487 	if (mode & O_NONBLOCK) {
1488 		trace_break_lease_noblock(inode, new_fl);
1489 		error = -EWOULDBLOCK;
1490 		goto out;
1491 	}
1492 
1493 restart:
1494 	fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1495 	break_time = fl->fl_break_time;
1496 	if (break_time != 0)
1497 		break_time -= jiffies;
1498 	if (break_time == 0)
1499 		break_time++;
1500 	locks_insert_block(fl, new_fl, leases_conflict);
1501 	trace_break_lease_block(inode, new_fl);
1502 	spin_unlock(&ctx->flc_lock);
1503 	percpu_up_read(&file_rwsem);
1504 
1505 	locks_dispose_list(&dispose);
1506 	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1507 					list_empty(&new_fl->fl_blocked_member),
1508 					break_time);
1509 
1510 	percpu_down_read(&file_rwsem);
1511 	spin_lock(&ctx->flc_lock);
1512 	trace_break_lease_unblock(inode, new_fl);
1513 	locks_delete_block(new_fl);
1514 	if (error >= 0) {
1515 		/*
1516 		 * Wait for the next conflicting lease that has not been
1517 		 * broken yet
1518 		 */
1519 		if (error == 0)
1520 			time_out_leases(inode, &dispose);
1521 		if (any_leases_conflict(inode, new_fl))
1522 			goto restart;
1523 		error = 0;
1524 	}
1525 out:
1526 	spin_unlock(&ctx->flc_lock);
1527 	percpu_up_read(&file_rwsem);
1528 	locks_dispose_list(&dispose);
1529 free_lock:
1530 	locks_free_lock(new_fl);
1531 	return error;
1532 }
1533 EXPORT_SYMBOL(__break_lease);
1534 
1535 /**
1536  *	lease_get_mtime - update modified time of an inode with exclusive lease
1537  *	@inode: the inode
1538  *      @time:  pointer to a timespec which contains the last modified time
1539  *
1540  * This is to force NFS clients to flush their caches for files with
1541  * exclusive leases.  The justification is that if someone has an
1542  * exclusive lease, then they could be modifying it.
1543  */
1544 void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1545 {
1546 	bool has_lease = false;
1547 	struct file_lock_context *ctx;
1548 	struct file_lock *fl;
1549 
1550 	ctx = smp_load_acquire(&inode->i_flctx);
1551 	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1552 		spin_lock(&ctx->flc_lock);
1553 		fl = list_first_entry_or_null(&ctx->flc_lease,
1554 					      struct file_lock, fl_list);
1555 		if (fl && (fl->fl_type == F_WRLCK))
1556 			has_lease = true;
1557 		spin_unlock(&ctx->flc_lock);
1558 	}
1559 
1560 	if (has_lease)
1561 		*time = current_time(inode);
1562 }
1563 EXPORT_SYMBOL(lease_get_mtime);
1564 
1565 /**
1566  *	fcntl_getlease - Enquire what lease is currently active
1567  *	@filp: the file
1568  *
1569  *	The value returned by this function will be one of
1570  *	(if no lease break is pending):
1571  *
1572  *	%F_RDLCK to indicate a shared lease is held.
1573  *
1574  *	%F_WRLCK to indicate an exclusive lease is held.
1575  *
1576  *	%F_UNLCK to indicate no lease is held.
1577  *
1578  *	(if a lease break is pending):
1579  *
1580  *	%F_RDLCK to indicate an exclusive lease needs to be
1581  *		changed to a shared lease (or removed).
1582  *
1583  *	%F_UNLCK to indicate the lease needs to be removed.
1584  *
1585  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1586  *	should be returned to userspace.
1587  */
1588 int fcntl_getlease(struct file *filp)
1589 {
1590 	struct file_lock *fl;
1591 	struct inode *inode = locks_inode(filp);
1592 	struct file_lock_context *ctx;
1593 	int type = F_UNLCK;
1594 	LIST_HEAD(dispose);
1595 
1596 	ctx = smp_load_acquire(&inode->i_flctx);
1597 	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1598 		percpu_down_read(&file_rwsem);
1599 		spin_lock(&ctx->flc_lock);
1600 		time_out_leases(inode, &dispose);
1601 		list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1602 			if (fl->fl_file != filp)
1603 				continue;
1604 			type = target_leasetype(fl);
1605 			break;
1606 		}
1607 		spin_unlock(&ctx->flc_lock);
1608 		percpu_up_read(&file_rwsem);
1609 
1610 		locks_dispose_list(&dispose);
1611 	}
1612 	return type;
1613 }
1614 
1615 /**
1616  * check_conflicting_open - see if the given file points to an inode that has
1617  *			    an existing open that would conflict with the
1618  *			    desired lease.
1619  * @filp:	file to check
1620  * @arg:	type of lease that we're trying to acquire
1621  * @flags:	current lock flags
1622  *
1623  * Check to see if there's an existing open fd on this file that would
1624  * conflict with the lease we're trying to set.
1625  */
1626 static int
1627 check_conflicting_open(struct file *filp, const long arg, int flags)
1628 {
1629 	struct inode *inode = locks_inode(filp);
1630 	int self_wcount = 0, self_rcount = 0;
1631 
1632 	if (flags & FL_LAYOUT)
1633 		return 0;
1634 	if (flags & FL_DELEG)
1635 		/* We leave these checks to the caller */
1636 		return 0;
1637 
1638 	if (arg == F_RDLCK)
1639 		return inode_is_open_for_write(inode) ? -EAGAIN : 0;
1640 	else if (arg != F_WRLCK)
1641 		return 0;
1642 
1643 	/*
1644 	 * Make sure that only read/write count is from lease requestor.
1645 	 * Note that this will result in denying write leases when i_writecount
1646 	 * is negative, which is what we want.  (We shouldn't grant write leases
1647 	 * on files open for execution.)
1648 	 */
1649 	if (filp->f_mode & FMODE_WRITE)
1650 		self_wcount = 1;
1651 	else if (filp->f_mode & FMODE_READ)
1652 		self_rcount = 1;
1653 
1654 	if (atomic_read(&inode->i_writecount) != self_wcount ||
1655 	    atomic_read(&inode->i_readcount) != self_rcount)
1656 		return -EAGAIN;
1657 
1658 	return 0;
1659 }
1660 
1661 static int
1662 generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1663 {
1664 	struct file_lock *fl, *my_fl = NULL, *lease;
1665 	struct inode *inode = locks_inode(filp);
1666 	struct file_lock_context *ctx;
1667 	bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1668 	int error;
1669 	LIST_HEAD(dispose);
1670 
1671 	lease = *flp;
1672 	trace_generic_add_lease(inode, lease);
1673 
1674 	/* Note that arg is never F_UNLCK here */
1675 	ctx = locks_get_lock_context(inode, arg);
1676 	if (!ctx)
1677 		return -ENOMEM;
1678 
1679 	/*
1680 	 * In the delegation case we need mutual exclusion with
1681 	 * a number of operations that take the i_mutex.  We trylock
1682 	 * because delegations are an optional optimization, and if
1683 	 * there's some chance of a conflict--we'd rather not
1684 	 * bother, maybe that's a sign this just isn't a good file to
1685 	 * hand out a delegation on.
1686 	 */
1687 	if (is_deleg && !inode_trylock(inode))
1688 		return -EAGAIN;
1689 
1690 	if (is_deleg && arg == F_WRLCK) {
1691 		/* Write delegations are not currently supported: */
1692 		inode_unlock(inode);
1693 		WARN_ON_ONCE(1);
1694 		return -EINVAL;
1695 	}
1696 
1697 	percpu_down_read(&file_rwsem);
1698 	spin_lock(&ctx->flc_lock);
1699 	time_out_leases(inode, &dispose);
1700 	error = check_conflicting_open(filp, arg, lease->fl_flags);
1701 	if (error)
1702 		goto out;
1703 
1704 	/*
1705 	 * At this point, we know that if there is an exclusive
1706 	 * lease on this file, then we hold it on this filp
1707 	 * (otherwise our open of this file would have blocked).
1708 	 * And if we are trying to acquire an exclusive lease,
1709 	 * then the file is not open by anyone (including us)
1710 	 * except for this filp.
1711 	 */
1712 	error = -EAGAIN;
1713 	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1714 		if (fl->fl_file == filp &&
1715 		    fl->fl_owner == lease->fl_owner) {
1716 			my_fl = fl;
1717 			continue;
1718 		}
1719 
1720 		/*
1721 		 * No exclusive leases if someone else has a lease on
1722 		 * this file:
1723 		 */
1724 		if (arg == F_WRLCK)
1725 			goto out;
1726 		/*
1727 		 * Modifying our existing lease is OK, but no getting a
1728 		 * new lease if someone else is opening for write:
1729 		 */
1730 		if (fl->fl_flags & FL_UNLOCK_PENDING)
1731 			goto out;
1732 	}
1733 
1734 	if (my_fl != NULL) {
1735 		lease = my_fl;
1736 		error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1737 		if (error)
1738 			goto out;
1739 		goto out_setup;
1740 	}
1741 
1742 	error = -EINVAL;
1743 	if (!leases_enable)
1744 		goto out;
1745 
1746 	locks_insert_lock_ctx(lease, &ctx->flc_lease);
1747 	/*
1748 	 * The check in break_lease() is lockless. It's possible for another
1749 	 * open to race in after we did the earlier check for a conflicting
1750 	 * open but before the lease was inserted. Check again for a
1751 	 * conflicting open and cancel the lease if there is one.
1752 	 *
1753 	 * We also add a barrier here to ensure that the insertion of the lock
1754 	 * precedes these checks.
1755 	 */
1756 	smp_mb();
1757 	error = check_conflicting_open(filp, arg, lease->fl_flags);
1758 	if (error) {
1759 		locks_unlink_lock_ctx(lease);
1760 		goto out;
1761 	}
1762 
1763 out_setup:
1764 	if (lease->fl_lmops->lm_setup)
1765 		lease->fl_lmops->lm_setup(lease, priv);
1766 out:
1767 	spin_unlock(&ctx->flc_lock);
1768 	percpu_up_read(&file_rwsem);
1769 	locks_dispose_list(&dispose);
1770 	if (is_deleg)
1771 		inode_unlock(inode);
1772 	if (!error && !my_fl)
1773 		*flp = NULL;
1774 	return error;
1775 }
1776 
1777 static int generic_delete_lease(struct file *filp, void *owner)
1778 {
1779 	int error = -EAGAIN;
1780 	struct file_lock *fl, *victim = NULL;
1781 	struct inode *inode = locks_inode(filp);
1782 	struct file_lock_context *ctx;
1783 	LIST_HEAD(dispose);
1784 
1785 	ctx = smp_load_acquire(&inode->i_flctx);
1786 	if (!ctx) {
1787 		trace_generic_delete_lease(inode, NULL);
1788 		return error;
1789 	}
1790 
1791 	percpu_down_read(&file_rwsem);
1792 	spin_lock(&ctx->flc_lock);
1793 	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1794 		if (fl->fl_file == filp &&
1795 		    fl->fl_owner == owner) {
1796 			victim = fl;
1797 			break;
1798 		}
1799 	}
1800 	trace_generic_delete_lease(inode, victim);
1801 	if (victim)
1802 		error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1803 	spin_unlock(&ctx->flc_lock);
1804 	percpu_up_read(&file_rwsem);
1805 	locks_dispose_list(&dispose);
1806 	return error;
1807 }
1808 
1809 /**
1810  *	generic_setlease	-	sets a lease on an open file
1811  *	@filp:	file pointer
1812  *	@arg:	type of lease to obtain
1813  *	@flp:	input - file_lock to use, output - file_lock inserted
1814  *	@priv:	private data for lm_setup (may be NULL if lm_setup
1815  *		doesn't require it)
1816  *
1817  *	The (input) flp->fl_lmops->lm_break function is required
1818  *	by break_lease().
1819  */
1820 int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1821 			void **priv)
1822 {
1823 	struct inode *inode = locks_inode(filp);
1824 	int error;
1825 
1826 	if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1827 		return -EACCES;
1828 	if (!S_ISREG(inode->i_mode))
1829 		return -EINVAL;
1830 	error = security_file_lock(filp, arg);
1831 	if (error)
1832 		return error;
1833 
1834 	switch (arg) {
1835 	case F_UNLCK:
1836 		return generic_delete_lease(filp, *priv);
1837 	case F_RDLCK:
1838 	case F_WRLCK:
1839 		if (!(*flp)->fl_lmops->lm_break) {
1840 			WARN_ON_ONCE(1);
1841 			return -ENOLCK;
1842 		}
1843 
1844 		return generic_add_lease(filp, arg, flp, priv);
1845 	default:
1846 		return -EINVAL;
1847 	}
1848 }
1849 EXPORT_SYMBOL(generic_setlease);
1850 
1851 #if IS_ENABLED(CONFIG_SRCU)
1852 /*
1853  * Kernel subsystems can register to be notified on any attempt to set
1854  * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
1855  * to close files that it may have cached when there is an attempt to set a
1856  * conflicting lease.
1857  */
1858 static struct srcu_notifier_head lease_notifier_chain;
1859 
1860 static inline void
1861 lease_notifier_chain_init(void)
1862 {
1863 	srcu_init_notifier_head(&lease_notifier_chain);
1864 }
1865 
1866 static inline void
1867 setlease_notifier(long arg, struct file_lock *lease)
1868 {
1869 	if (arg != F_UNLCK)
1870 		srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
1871 }
1872 
1873 int lease_register_notifier(struct notifier_block *nb)
1874 {
1875 	return srcu_notifier_chain_register(&lease_notifier_chain, nb);
1876 }
1877 EXPORT_SYMBOL_GPL(lease_register_notifier);
1878 
1879 void lease_unregister_notifier(struct notifier_block *nb)
1880 {
1881 	srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
1882 }
1883 EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1884 
1885 #else /* !IS_ENABLED(CONFIG_SRCU) */
1886 static inline void
1887 lease_notifier_chain_init(void)
1888 {
1889 }
1890 
1891 static inline void
1892 setlease_notifier(long arg, struct file_lock *lease)
1893 {
1894 }
1895 
1896 int lease_register_notifier(struct notifier_block *nb)
1897 {
1898 	return 0;
1899 }
1900 EXPORT_SYMBOL_GPL(lease_register_notifier);
1901 
1902 void lease_unregister_notifier(struct notifier_block *nb)
1903 {
1904 }
1905 EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1906 
1907 #endif /* IS_ENABLED(CONFIG_SRCU) */
1908 
1909 /**
1910  * vfs_setlease        -       sets a lease on an open file
1911  * @filp:	file pointer
1912  * @arg:	type of lease to obtain
1913  * @lease:	file_lock to use when adding a lease
1914  * @priv:	private info for lm_setup when adding a lease (may be
1915  *		NULL if lm_setup doesn't require it)
1916  *
1917  * Call this to establish a lease on the file. The "lease" argument is not
1918  * used for F_UNLCK requests and may be NULL. For commands that set or alter
1919  * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
1920  * set; if not, this function will return -ENOLCK (and generate a scary-looking
1921  * stack trace).
1922  *
1923  * The "priv" pointer is passed directly to the lm_setup function as-is. It
1924  * may be NULL if the lm_setup operation doesn't require it.
1925  */
1926 int
1927 vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1928 {
1929 	if (lease)
1930 		setlease_notifier(arg, *lease);
1931 	if (filp->f_op->setlease)
1932 		return filp->f_op->setlease(filp, arg, lease, priv);
1933 	else
1934 		return generic_setlease(filp, arg, lease, priv);
1935 }
1936 EXPORT_SYMBOL_GPL(vfs_setlease);
1937 
1938 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1939 {
1940 	struct file_lock *fl;
1941 	struct fasync_struct *new;
1942 	int error;
1943 
1944 	fl = lease_alloc(filp, arg);
1945 	if (IS_ERR(fl))
1946 		return PTR_ERR(fl);
1947 
1948 	new = fasync_alloc();
1949 	if (!new) {
1950 		locks_free_lock(fl);
1951 		return -ENOMEM;
1952 	}
1953 	new->fa_fd = fd;
1954 
1955 	error = vfs_setlease(filp, arg, &fl, (void **)&new);
1956 	if (fl)
1957 		locks_free_lock(fl);
1958 	if (new)
1959 		fasync_free(new);
1960 	return error;
1961 }
1962 
1963 /**
1964  *	fcntl_setlease	-	sets a lease on an open file
1965  *	@fd: open file descriptor
1966  *	@filp: file pointer
1967  *	@arg: type of lease to obtain
1968  *
1969  *	Call this fcntl to establish a lease on the file.
1970  *	Note that you also need to call %F_SETSIG to
1971  *	receive a signal when the lease is broken.
1972  */
1973 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1974 {
1975 	if (arg == F_UNLCK)
1976 		return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
1977 	return do_fcntl_add_lease(fd, filp, arg);
1978 }
1979 
1980 /**
1981  * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
1982  * @inode: inode of the file to apply to
1983  * @fl: The lock to be applied
1984  *
1985  * Apply a FLOCK style lock request to an inode.
1986  */
1987 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1988 {
1989 	int error;
1990 	might_sleep();
1991 	for (;;) {
1992 		error = flock_lock_inode(inode, fl);
1993 		if (error != FILE_LOCK_DEFERRED)
1994 			break;
1995 		error = wait_event_interruptible(fl->fl_wait,
1996 				list_empty(&fl->fl_blocked_member));
1997 		if (error)
1998 			break;
1999 	}
2000 	locks_delete_block(fl);
2001 	return error;
2002 }
2003 
2004 /**
2005  * locks_lock_inode_wait - Apply a lock to an inode
2006  * @inode: inode of the file to apply to
2007  * @fl: The lock to be applied
2008  *
2009  * Apply a POSIX or FLOCK style lock request to an inode.
2010  */
2011 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2012 {
2013 	int res = 0;
2014 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2015 		case FL_POSIX:
2016 			res = posix_lock_inode_wait(inode, fl);
2017 			break;
2018 		case FL_FLOCK:
2019 			res = flock_lock_inode_wait(inode, fl);
2020 			break;
2021 		default:
2022 			BUG();
2023 	}
2024 	return res;
2025 }
2026 EXPORT_SYMBOL(locks_lock_inode_wait);
2027 
2028 /**
2029  *	sys_flock: - flock() system call.
2030  *	@fd: the file descriptor to lock.
2031  *	@cmd: the type of lock to apply.
2032  *
2033  *	Apply a %FL_FLOCK style lock to an open file descriptor.
2034  *	The @cmd can be one of:
2035  *
2036  *	- %LOCK_SH -- a shared lock.
2037  *	- %LOCK_EX -- an exclusive lock.
2038  *	- %LOCK_UN -- remove an existing lock.
2039  *	- %LOCK_MAND -- a 'mandatory' flock. (DEPRECATED)
2040  *
2041  *	%LOCK_MAND support has been removed from the kernel.
2042  */
2043 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2044 {
2045 	struct fd f = fdget(fd);
2046 	struct file_lock *lock;
2047 	int can_sleep, unlock;
2048 	int error;
2049 
2050 	error = -EBADF;
2051 	if (!f.file)
2052 		goto out;
2053 
2054 	can_sleep = !(cmd & LOCK_NB);
2055 	cmd &= ~LOCK_NB;
2056 	unlock = (cmd == LOCK_UN);
2057 
2058 	if (!unlock && !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
2059 		goto out_putf;
2060 
2061 	/*
2062 	 * LOCK_MAND locks were broken for a long time in that they never
2063 	 * conflicted with one another and didn't prevent any sort of open,
2064 	 * read or write activity.
2065 	 *
2066 	 * Just ignore these requests now, to preserve legacy behavior, but
2067 	 * throw a warning to let people know that they don't actually work.
2068 	 */
2069 	if (cmd & LOCK_MAND) {
2070 		pr_warn_once("Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n");
2071 		error = 0;
2072 		goto out_putf;
2073 	}
2074 
2075 	lock = flock_make_lock(f.file, cmd, NULL);
2076 	if (IS_ERR(lock)) {
2077 		error = PTR_ERR(lock);
2078 		goto out_putf;
2079 	}
2080 
2081 	if (can_sleep)
2082 		lock->fl_flags |= FL_SLEEP;
2083 
2084 	error = security_file_lock(f.file, lock->fl_type);
2085 	if (error)
2086 		goto out_free;
2087 
2088 	if (f.file->f_op->flock)
2089 		error = f.file->f_op->flock(f.file,
2090 					  (can_sleep) ? F_SETLKW : F_SETLK,
2091 					  lock);
2092 	else
2093 		error = locks_lock_file_wait(f.file, lock);
2094 
2095  out_free:
2096 	locks_free_lock(lock);
2097 
2098  out_putf:
2099 	fdput(f);
2100  out:
2101 	return error;
2102 }
2103 
2104 /**
2105  * vfs_test_lock - test file byte range lock
2106  * @filp: The file to test lock for
2107  * @fl: The lock to test; also used to hold result
2108  *
2109  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
2110  * setting conf->fl_type to something other than F_UNLCK.
2111  */
2112 int vfs_test_lock(struct file *filp, struct file_lock *fl)
2113 {
2114 	if (filp->f_op->lock)
2115 		return filp->f_op->lock(filp, F_GETLK, fl);
2116 	posix_test_lock(filp, fl);
2117 	return 0;
2118 }
2119 EXPORT_SYMBOL_GPL(vfs_test_lock);
2120 
2121 /**
2122  * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2123  * @fl: The file_lock who's fl_pid should be translated
2124  * @ns: The namespace into which the pid should be translated
2125  *
2126  * Used to tranlate a fl_pid into a namespace virtual pid number
2127  */
2128 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2129 {
2130 	pid_t vnr;
2131 	struct pid *pid;
2132 
2133 	if (IS_OFDLCK(fl))
2134 		return -1;
2135 	if (IS_REMOTELCK(fl))
2136 		return fl->fl_pid;
2137 	/*
2138 	 * If the flock owner process is dead and its pid has been already
2139 	 * freed, the translation below won't work, but we still want to show
2140 	 * flock owner pid number in init pidns.
2141 	 */
2142 	if (ns == &init_pid_ns)
2143 		return (pid_t)fl->fl_pid;
2144 
2145 	rcu_read_lock();
2146 	pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2147 	vnr = pid_nr_ns(pid, ns);
2148 	rcu_read_unlock();
2149 	return vnr;
2150 }
2151 
2152 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2153 {
2154 	flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2155 #if BITS_PER_LONG == 32
2156 	/*
2157 	 * Make sure we can represent the posix lock via
2158 	 * legacy 32bit flock.
2159 	 */
2160 	if (fl->fl_start > OFFT_OFFSET_MAX)
2161 		return -EOVERFLOW;
2162 	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2163 		return -EOVERFLOW;
2164 #endif
2165 	flock->l_start = fl->fl_start;
2166 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2167 		fl->fl_end - fl->fl_start + 1;
2168 	flock->l_whence = 0;
2169 	flock->l_type = fl->fl_type;
2170 	return 0;
2171 }
2172 
2173 #if BITS_PER_LONG == 32
2174 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2175 {
2176 	flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2177 	flock->l_start = fl->fl_start;
2178 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2179 		fl->fl_end - fl->fl_start + 1;
2180 	flock->l_whence = 0;
2181 	flock->l_type = fl->fl_type;
2182 }
2183 #endif
2184 
2185 /* Report the first existing lock that would conflict with l.
2186  * This implements the F_GETLK command of fcntl().
2187  */
2188 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2189 {
2190 	struct file_lock *fl;
2191 	int error;
2192 
2193 	fl = locks_alloc_lock();
2194 	if (fl == NULL)
2195 		return -ENOMEM;
2196 	error = -EINVAL;
2197 	if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2198 		goto out;
2199 
2200 	error = flock_to_posix_lock(filp, fl, flock);
2201 	if (error)
2202 		goto out;
2203 
2204 	if (cmd == F_OFD_GETLK) {
2205 		error = -EINVAL;
2206 		if (flock->l_pid != 0)
2207 			goto out;
2208 
2209 		fl->fl_flags |= FL_OFDLCK;
2210 		fl->fl_owner = filp;
2211 	}
2212 
2213 	error = vfs_test_lock(filp, fl);
2214 	if (error)
2215 		goto out;
2216 
2217 	flock->l_type = fl->fl_type;
2218 	if (fl->fl_type != F_UNLCK) {
2219 		error = posix_lock_to_flock(flock, fl);
2220 		if (error)
2221 			goto out;
2222 	}
2223 out:
2224 	locks_free_lock(fl);
2225 	return error;
2226 }
2227 
2228 /**
2229  * vfs_lock_file - file byte range lock
2230  * @filp: The file to apply the lock to
2231  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2232  * @fl: The lock to be applied
2233  * @conf: Place to return a copy of the conflicting lock, if found.
2234  *
2235  * A caller that doesn't care about the conflicting lock may pass NULL
2236  * as the final argument.
2237  *
2238  * If the filesystem defines a private ->lock() method, then @conf will
2239  * be left unchanged; so a caller that cares should initialize it to
2240  * some acceptable default.
2241  *
2242  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2243  * locks, the ->lock() interface may return asynchronously, before the lock has
2244  * been granted or denied by the underlying filesystem, if (and only if)
2245  * lm_grant is set. Callers expecting ->lock() to return asynchronously
2246  * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2247  * the request is for a blocking lock. When ->lock() does return asynchronously,
2248  * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2249  * request completes.
2250  * If the request is for non-blocking lock the file system should return
2251  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2252  * with the result. If the request timed out the callback routine will return a
2253  * nonzero return code and the file system should release the lock. The file
2254  * system is also responsible to keep a corresponding posix lock when it
2255  * grants a lock so the VFS can find out which locks are locally held and do
2256  * the correct lock cleanup when required.
2257  * The underlying filesystem must not drop the kernel lock or call
2258  * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2259  * return code.
2260  */
2261 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2262 {
2263 	if (filp->f_op->lock)
2264 		return filp->f_op->lock(filp, cmd, fl);
2265 	else
2266 		return posix_lock_file(filp, fl, conf);
2267 }
2268 EXPORT_SYMBOL_GPL(vfs_lock_file);
2269 
2270 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2271 			     struct file_lock *fl)
2272 {
2273 	int error;
2274 
2275 	error = security_file_lock(filp, fl->fl_type);
2276 	if (error)
2277 		return error;
2278 
2279 	for (;;) {
2280 		error = vfs_lock_file(filp, cmd, fl, NULL);
2281 		if (error != FILE_LOCK_DEFERRED)
2282 			break;
2283 		error = wait_event_interruptible(fl->fl_wait,
2284 					list_empty(&fl->fl_blocked_member));
2285 		if (error)
2286 			break;
2287 	}
2288 	locks_delete_block(fl);
2289 
2290 	return error;
2291 }
2292 
2293 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2294 static int
2295 check_fmode_for_setlk(struct file_lock *fl)
2296 {
2297 	switch (fl->fl_type) {
2298 	case F_RDLCK:
2299 		if (!(fl->fl_file->f_mode & FMODE_READ))
2300 			return -EBADF;
2301 		break;
2302 	case F_WRLCK:
2303 		if (!(fl->fl_file->f_mode & FMODE_WRITE))
2304 			return -EBADF;
2305 	}
2306 	return 0;
2307 }
2308 
2309 /* Apply the lock described by l to an open file descriptor.
2310  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2311  */
2312 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2313 		struct flock *flock)
2314 {
2315 	struct file_lock *file_lock = locks_alloc_lock();
2316 	struct inode *inode = locks_inode(filp);
2317 	struct file *f;
2318 	int error;
2319 
2320 	if (file_lock == NULL)
2321 		return -ENOLCK;
2322 
2323 	error = flock_to_posix_lock(filp, file_lock, flock);
2324 	if (error)
2325 		goto out;
2326 
2327 	error = check_fmode_for_setlk(file_lock);
2328 	if (error)
2329 		goto out;
2330 
2331 	/*
2332 	 * If the cmd is requesting file-private locks, then set the
2333 	 * FL_OFDLCK flag and override the owner.
2334 	 */
2335 	switch (cmd) {
2336 	case F_OFD_SETLK:
2337 		error = -EINVAL;
2338 		if (flock->l_pid != 0)
2339 			goto out;
2340 
2341 		cmd = F_SETLK;
2342 		file_lock->fl_flags |= FL_OFDLCK;
2343 		file_lock->fl_owner = filp;
2344 		break;
2345 	case F_OFD_SETLKW:
2346 		error = -EINVAL;
2347 		if (flock->l_pid != 0)
2348 			goto out;
2349 
2350 		cmd = F_SETLKW;
2351 		file_lock->fl_flags |= FL_OFDLCK;
2352 		file_lock->fl_owner = filp;
2353 		fallthrough;
2354 	case F_SETLKW:
2355 		file_lock->fl_flags |= FL_SLEEP;
2356 	}
2357 
2358 	error = do_lock_file_wait(filp, cmd, file_lock);
2359 
2360 	/*
2361 	 * Attempt to detect a close/fcntl race and recover by releasing the
2362 	 * lock that was just acquired. There is no need to do that when we're
2363 	 * unlocking though, or for OFD locks.
2364 	 */
2365 	if (!error && file_lock->fl_type != F_UNLCK &&
2366 	    !(file_lock->fl_flags & FL_OFDLCK)) {
2367 		struct files_struct *files = current->files;
2368 		/*
2369 		 * We need that spin_lock here - it prevents reordering between
2370 		 * update of i_flctx->flc_posix and check for it done in
2371 		 * close(). rcu_read_lock() wouldn't do.
2372 		 */
2373 		spin_lock(&files->file_lock);
2374 		f = files_lookup_fd_locked(files, fd);
2375 		spin_unlock(&files->file_lock);
2376 		if (f != filp) {
2377 			file_lock->fl_type = F_UNLCK;
2378 			error = do_lock_file_wait(filp, cmd, file_lock);
2379 			WARN_ON_ONCE(error);
2380 			error = -EBADF;
2381 		}
2382 	}
2383 out:
2384 	trace_fcntl_setlk(inode, file_lock, error);
2385 	locks_free_lock(file_lock);
2386 	return error;
2387 }
2388 
2389 #if BITS_PER_LONG == 32
2390 /* Report the first existing lock that would conflict with l.
2391  * This implements the F_GETLK command of fcntl().
2392  */
2393 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2394 {
2395 	struct file_lock *fl;
2396 	int error;
2397 
2398 	fl = locks_alloc_lock();
2399 	if (fl == NULL)
2400 		return -ENOMEM;
2401 
2402 	error = -EINVAL;
2403 	if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2404 		goto out;
2405 
2406 	error = flock64_to_posix_lock(filp, fl, flock);
2407 	if (error)
2408 		goto out;
2409 
2410 	if (cmd == F_OFD_GETLK) {
2411 		error = -EINVAL;
2412 		if (flock->l_pid != 0)
2413 			goto out;
2414 
2415 		cmd = F_GETLK64;
2416 		fl->fl_flags |= FL_OFDLCK;
2417 		fl->fl_owner = filp;
2418 	}
2419 
2420 	error = vfs_test_lock(filp, fl);
2421 	if (error)
2422 		goto out;
2423 
2424 	flock->l_type = fl->fl_type;
2425 	if (fl->fl_type != F_UNLCK)
2426 		posix_lock_to_flock64(flock, fl);
2427 
2428 out:
2429 	locks_free_lock(fl);
2430 	return error;
2431 }
2432 
2433 /* Apply the lock described by l to an open file descriptor.
2434  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2435  */
2436 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2437 		struct flock64 *flock)
2438 {
2439 	struct file_lock *file_lock = locks_alloc_lock();
2440 	struct file *f;
2441 	int error;
2442 
2443 	if (file_lock == NULL)
2444 		return -ENOLCK;
2445 
2446 	error = flock64_to_posix_lock(filp, file_lock, flock);
2447 	if (error)
2448 		goto out;
2449 
2450 	error = check_fmode_for_setlk(file_lock);
2451 	if (error)
2452 		goto out;
2453 
2454 	/*
2455 	 * If the cmd is requesting file-private locks, then set the
2456 	 * FL_OFDLCK flag and override the owner.
2457 	 */
2458 	switch (cmd) {
2459 	case F_OFD_SETLK:
2460 		error = -EINVAL;
2461 		if (flock->l_pid != 0)
2462 			goto out;
2463 
2464 		cmd = F_SETLK64;
2465 		file_lock->fl_flags |= FL_OFDLCK;
2466 		file_lock->fl_owner = filp;
2467 		break;
2468 	case F_OFD_SETLKW:
2469 		error = -EINVAL;
2470 		if (flock->l_pid != 0)
2471 			goto out;
2472 
2473 		cmd = F_SETLKW64;
2474 		file_lock->fl_flags |= FL_OFDLCK;
2475 		file_lock->fl_owner = filp;
2476 		fallthrough;
2477 	case F_SETLKW64:
2478 		file_lock->fl_flags |= FL_SLEEP;
2479 	}
2480 
2481 	error = do_lock_file_wait(filp, cmd, file_lock);
2482 
2483 	/*
2484 	 * Attempt to detect a close/fcntl race and recover by releasing the
2485 	 * lock that was just acquired. There is no need to do that when we're
2486 	 * unlocking though, or for OFD locks.
2487 	 */
2488 	if (!error && file_lock->fl_type != F_UNLCK &&
2489 	    !(file_lock->fl_flags & FL_OFDLCK)) {
2490 		struct files_struct *files = current->files;
2491 		/*
2492 		 * We need that spin_lock here - it prevents reordering between
2493 		 * update of i_flctx->flc_posix and check for it done in
2494 		 * close(). rcu_read_lock() wouldn't do.
2495 		 */
2496 		spin_lock(&files->file_lock);
2497 		f = files_lookup_fd_locked(files, fd);
2498 		spin_unlock(&files->file_lock);
2499 		if (f != filp) {
2500 			file_lock->fl_type = F_UNLCK;
2501 			error = do_lock_file_wait(filp, cmd, file_lock);
2502 			WARN_ON_ONCE(error);
2503 			error = -EBADF;
2504 		}
2505 	}
2506 out:
2507 	locks_free_lock(file_lock);
2508 	return error;
2509 }
2510 #endif /* BITS_PER_LONG == 32 */
2511 
2512 /*
2513  * This function is called when the file is being removed
2514  * from the task's fd array.  POSIX locks belonging to this task
2515  * are deleted at this time.
2516  */
2517 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2518 {
2519 	int error;
2520 	struct inode *inode = locks_inode(filp);
2521 	struct file_lock lock;
2522 	struct file_lock_context *ctx;
2523 
2524 	/*
2525 	 * If there are no locks held on this file, we don't need to call
2526 	 * posix_lock_file().  Another process could be setting a lock on this
2527 	 * file at the same time, but we wouldn't remove that lock anyway.
2528 	 */
2529 	ctx =  smp_load_acquire(&inode->i_flctx);
2530 	if (!ctx || list_empty(&ctx->flc_posix))
2531 		return;
2532 
2533 	locks_init_lock(&lock);
2534 	lock.fl_type = F_UNLCK;
2535 	lock.fl_flags = FL_POSIX | FL_CLOSE;
2536 	lock.fl_start = 0;
2537 	lock.fl_end = OFFSET_MAX;
2538 	lock.fl_owner = owner;
2539 	lock.fl_pid = current->tgid;
2540 	lock.fl_file = filp;
2541 	lock.fl_ops = NULL;
2542 	lock.fl_lmops = NULL;
2543 
2544 	error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2545 
2546 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2547 		lock.fl_ops->fl_release_private(&lock);
2548 	trace_locks_remove_posix(inode, &lock, error);
2549 }
2550 EXPORT_SYMBOL(locks_remove_posix);
2551 
2552 /* The i_flctx must be valid when calling into here */
2553 static void
2554 locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2555 {
2556 	struct file_lock fl;
2557 	struct inode *inode = locks_inode(filp);
2558 
2559 	if (list_empty(&flctx->flc_flock))
2560 		return;
2561 
2562 	flock_make_lock(filp, LOCK_UN, &fl);
2563 	fl.fl_flags |= FL_CLOSE;
2564 
2565 	if (filp->f_op->flock)
2566 		filp->f_op->flock(filp, F_SETLKW, &fl);
2567 	else
2568 		flock_lock_inode(inode, &fl);
2569 
2570 	if (fl.fl_ops && fl.fl_ops->fl_release_private)
2571 		fl.fl_ops->fl_release_private(&fl);
2572 }
2573 
2574 /* The i_flctx must be valid when calling into here */
2575 static void
2576 locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2577 {
2578 	struct file_lock *fl, *tmp;
2579 	LIST_HEAD(dispose);
2580 
2581 	if (list_empty(&ctx->flc_lease))
2582 		return;
2583 
2584 	percpu_down_read(&file_rwsem);
2585 	spin_lock(&ctx->flc_lock);
2586 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2587 		if (filp == fl->fl_file)
2588 			lease_modify(fl, F_UNLCK, &dispose);
2589 	spin_unlock(&ctx->flc_lock);
2590 	percpu_up_read(&file_rwsem);
2591 
2592 	locks_dispose_list(&dispose);
2593 }
2594 
2595 /*
2596  * This function is called on the last close of an open file.
2597  */
2598 void locks_remove_file(struct file *filp)
2599 {
2600 	struct file_lock_context *ctx;
2601 
2602 	ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2603 	if (!ctx)
2604 		return;
2605 
2606 	/* remove any OFD locks */
2607 	locks_remove_posix(filp, filp);
2608 
2609 	/* remove flock locks */
2610 	locks_remove_flock(filp, ctx);
2611 
2612 	/* remove any leases */
2613 	locks_remove_lease(filp, ctx);
2614 
2615 	spin_lock(&ctx->flc_lock);
2616 	locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2617 	locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2618 	locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2619 	spin_unlock(&ctx->flc_lock);
2620 }
2621 
2622 /**
2623  * vfs_cancel_lock - file byte range unblock lock
2624  * @filp: The file to apply the unblock to
2625  * @fl: The lock to be unblocked
2626  *
2627  * Used by lock managers to cancel blocked requests
2628  */
2629 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2630 {
2631 	if (filp->f_op->lock)
2632 		return filp->f_op->lock(filp, F_CANCELLK, fl);
2633 	return 0;
2634 }
2635 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2636 
2637 #ifdef CONFIG_PROC_FS
2638 #include <linux/proc_fs.h>
2639 #include <linux/seq_file.h>
2640 
2641 struct locks_iterator {
2642 	int	li_cpu;
2643 	loff_t	li_pos;
2644 };
2645 
2646 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2647 			    loff_t id, char *pfx, int repeat)
2648 {
2649 	struct inode *inode = NULL;
2650 	unsigned int fl_pid;
2651 	struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2652 	int type;
2653 
2654 	fl_pid = locks_translate_pid(fl, proc_pidns);
2655 	/*
2656 	 * If lock owner is dead (and pid is freed) or not visible in current
2657 	 * pidns, zero is shown as a pid value. Check lock info from
2658 	 * init_pid_ns to get saved lock pid value.
2659 	 */
2660 
2661 	if (fl->fl_file != NULL)
2662 		inode = locks_inode(fl->fl_file);
2663 
2664 	seq_printf(f, "%lld: ", id);
2665 
2666 	if (repeat)
2667 		seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
2668 
2669 	if (IS_POSIX(fl)) {
2670 		if (fl->fl_flags & FL_ACCESS)
2671 			seq_puts(f, "ACCESS");
2672 		else if (IS_OFDLCK(fl))
2673 			seq_puts(f, "OFDLCK");
2674 		else
2675 			seq_puts(f, "POSIX ");
2676 
2677 		seq_printf(f, " %s ",
2678 			     (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
2679 	} else if (IS_FLOCK(fl)) {
2680 		seq_puts(f, "FLOCK  ADVISORY  ");
2681 	} else if (IS_LEASE(fl)) {
2682 		if (fl->fl_flags & FL_DELEG)
2683 			seq_puts(f, "DELEG  ");
2684 		else
2685 			seq_puts(f, "LEASE  ");
2686 
2687 		if (lease_breaking(fl))
2688 			seq_puts(f, "BREAKING  ");
2689 		else if (fl->fl_file)
2690 			seq_puts(f, "ACTIVE    ");
2691 		else
2692 			seq_puts(f, "BREAKER   ");
2693 	} else {
2694 		seq_puts(f, "UNKNOWN UNKNOWN  ");
2695 	}
2696 	type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
2697 
2698 	seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
2699 			     (type == F_RDLCK) ? "READ" : "UNLCK");
2700 	if (inode) {
2701 		/* userspace relies on this representation of dev_t */
2702 		seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
2703 				MAJOR(inode->i_sb->s_dev),
2704 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2705 	} else {
2706 		seq_printf(f, "%d <none>:0 ", fl_pid);
2707 	}
2708 	if (IS_POSIX(fl)) {
2709 		if (fl->fl_end == OFFSET_MAX)
2710 			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2711 		else
2712 			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2713 	} else {
2714 		seq_puts(f, "0 EOF\n");
2715 	}
2716 }
2717 
2718 static struct file_lock *get_next_blocked_member(struct file_lock *node)
2719 {
2720 	struct file_lock *tmp;
2721 
2722 	/* NULL node or root node */
2723 	if (node == NULL || node->fl_blocker == NULL)
2724 		return NULL;
2725 
2726 	/* Next member in the linked list could be itself */
2727 	tmp = list_next_entry(node, fl_blocked_member);
2728 	if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member)
2729 		|| tmp == node) {
2730 		return NULL;
2731 	}
2732 
2733 	return tmp;
2734 }
2735 
2736 static int locks_show(struct seq_file *f, void *v)
2737 {
2738 	struct locks_iterator *iter = f->private;
2739 	struct file_lock *cur, *tmp;
2740 	struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2741 	int level = 0;
2742 
2743 	cur = hlist_entry(v, struct file_lock, fl_link);
2744 
2745 	if (locks_translate_pid(cur, proc_pidns) == 0)
2746 		return 0;
2747 
2748 	/* View this crossed linked list as a binary tree, the first member of fl_blocked_requests
2749 	 * is the left child of current node, the next silibing in fl_blocked_member is the
2750 	 * right child, we can alse get the parent of current node from fl_blocker, so this
2751 	 * question becomes traversal of a binary tree
2752 	 */
2753 	while (cur != NULL) {
2754 		if (level)
2755 			lock_get_status(f, cur, iter->li_pos, "-> ", level);
2756 		else
2757 			lock_get_status(f, cur, iter->li_pos, "", level);
2758 
2759 		if (!list_empty(&cur->fl_blocked_requests)) {
2760 			/* Turn left */
2761 			cur = list_first_entry_or_null(&cur->fl_blocked_requests,
2762 				struct file_lock, fl_blocked_member);
2763 			level++;
2764 		} else {
2765 			/* Turn right */
2766 			tmp = get_next_blocked_member(cur);
2767 			/* Fall back to parent node */
2768 			while (tmp == NULL && cur->fl_blocker != NULL) {
2769 				cur = cur->fl_blocker;
2770 				level--;
2771 				tmp = get_next_blocked_member(cur);
2772 			}
2773 			cur = tmp;
2774 		}
2775 	}
2776 
2777 	return 0;
2778 }
2779 
2780 static void __show_fd_locks(struct seq_file *f,
2781 			struct list_head *head, int *id,
2782 			struct file *filp, struct files_struct *files)
2783 {
2784 	struct file_lock *fl;
2785 
2786 	list_for_each_entry(fl, head, fl_list) {
2787 
2788 		if (filp != fl->fl_file)
2789 			continue;
2790 		if (fl->fl_owner != files &&
2791 		    fl->fl_owner != filp)
2792 			continue;
2793 
2794 		(*id)++;
2795 		seq_puts(f, "lock:\t");
2796 		lock_get_status(f, fl, *id, "", 0);
2797 	}
2798 }
2799 
2800 void show_fd_locks(struct seq_file *f,
2801 		  struct file *filp, struct files_struct *files)
2802 {
2803 	struct inode *inode = locks_inode(filp);
2804 	struct file_lock_context *ctx;
2805 	int id = 0;
2806 
2807 	ctx = smp_load_acquire(&inode->i_flctx);
2808 	if (!ctx)
2809 		return;
2810 
2811 	spin_lock(&ctx->flc_lock);
2812 	__show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2813 	__show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2814 	__show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2815 	spin_unlock(&ctx->flc_lock);
2816 }
2817 
2818 static void *locks_start(struct seq_file *f, loff_t *pos)
2819 	__acquires(&blocked_lock_lock)
2820 {
2821 	struct locks_iterator *iter = f->private;
2822 
2823 	iter->li_pos = *pos + 1;
2824 	percpu_down_write(&file_rwsem);
2825 	spin_lock(&blocked_lock_lock);
2826 	return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2827 }
2828 
2829 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2830 {
2831 	struct locks_iterator *iter = f->private;
2832 
2833 	++iter->li_pos;
2834 	return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2835 }
2836 
2837 static void locks_stop(struct seq_file *f, void *v)
2838 	__releases(&blocked_lock_lock)
2839 {
2840 	spin_unlock(&blocked_lock_lock);
2841 	percpu_up_write(&file_rwsem);
2842 }
2843 
2844 static const struct seq_operations locks_seq_operations = {
2845 	.start	= locks_start,
2846 	.next	= locks_next,
2847 	.stop	= locks_stop,
2848 	.show	= locks_show,
2849 };
2850 
2851 static int __init proc_locks_init(void)
2852 {
2853 	proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2854 			sizeof(struct locks_iterator), NULL);
2855 	return 0;
2856 }
2857 fs_initcall(proc_locks_init);
2858 #endif
2859 
2860 static int __init filelock_init(void)
2861 {
2862 	int i;
2863 
2864 	flctx_cache = kmem_cache_create("file_lock_ctx",
2865 			sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2866 
2867 	filelock_cache = kmem_cache_create("file_lock_cache",
2868 			sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2869 
2870 	for_each_possible_cpu(i) {
2871 		struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2872 
2873 		spin_lock_init(&fll->lock);
2874 		INIT_HLIST_HEAD(&fll->hlist);
2875 	}
2876 
2877 	lease_notifier_chain_init();
2878 	return 0;
2879 }
2880 core_initcall(filelock_init);
2881