xref: /openbmc/linux/fs/locks.c (revision ae213c44)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/locks.c
4  *
5  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
6  *  Doug Evans (dje@spiff.uucp), August 07, 1992
7  *
8  *  Deadlock detection added.
9  *  FIXME: one thing isn't handled yet:
10  *	- mandatory locks (requires lots of changes elsewhere)
11  *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
12  *
13  *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
14  *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
15  *
16  *  Converted file_lock_table to a linked list from an array, which eliminates
17  *  the limits on how many active file locks are open.
18  *  Chad Page (pageone@netcom.com), November 27, 1994
19  *
20  *  Removed dependency on file descriptors. dup()'ed file descriptors now
21  *  get the same locks as the original file descriptors, and a close() on
22  *  any file descriptor removes ALL the locks on the file for the current
23  *  process. Since locks still depend on the process id, locks are inherited
24  *  after an exec() but not after a fork(). This agrees with POSIX, and both
25  *  BSD and SVR4 practice.
26  *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
27  *
28  *  Scrapped free list which is redundant now that we allocate locks
29  *  dynamically with kmalloc()/kfree().
30  *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
31  *
32  *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
33  *
34  *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
35  *  fcntl() system call. They have the semantics described above.
36  *
37  *  FL_FLOCK locks are created with calls to flock(), through the flock()
38  *  system call, which is new. Old C libraries implement flock() via fcntl()
39  *  and will continue to use the old, broken implementation.
40  *
41  *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
42  *  with a file pointer (filp). As a result they can be shared by a parent
43  *  process and its children after a fork(). They are removed when the last
44  *  file descriptor referring to the file pointer is closed (unless explicitly
45  *  unlocked).
46  *
47  *  FL_FLOCK locks never deadlock, an existing lock is always removed before
48  *  upgrading from shared to exclusive (or vice versa). When this happens
49  *  any processes blocked by the current lock are woken up and allowed to
50  *  run before the new lock is applied.
51  *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
52  *
53  *  Removed some race conditions in flock_lock_file(), marked other possible
54  *  races. Just grep for FIXME to see them.
55  *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
56  *
57  *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
58  *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
59  *  once we've checked for blocking and deadlocking.
60  *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
61  *
62  *  Initial implementation of mandatory locks. SunOS turned out to be
63  *  a rotten model, so I implemented the "obvious" semantics.
64  *  See 'Documentation/filesystems/mandatory-locking.txt' for details.
65  *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
66  *
67  *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
68  *  check if a file has mandatory locks, used by mmap(), open() and creat() to
69  *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
70  *  Manual, Section 2.
71  *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
72  *
73  *  Tidied up block list handling. Added '/proc/locks' interface.
74  *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
75  *
76  *  Fixed deadlock condition for pathological code that mixes calls to
77  *  flock() and fcntl().
78  *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
79  *
80  *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
81  *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
82  *  guarantee sensible behaviour in the case where file system modules might
83  *  be compiled with different options than the kernel itself.
84  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
85  *
86  *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
87  *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
88  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
89  *
90  *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
91  *  locks. Changed process synchronisation to avoid dereferencing locks that
92  *  have already been freed.
93  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
94  *
95  *  Made the block list a circular list to minimise searching in the list.
96  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
97  *
98  *  Made mandatory locking a mount option. Default is not to allow mandatory
99  *  locking.
100  *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
101  *
102  *  Some adaptations for NFS support.
103  *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
104  *
105  *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
106  *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
107  *
108  *  Use slab allocator instead of kmalloc/kfree.
109  *  Use generic list implementation from <linux/list.h>.
110  *  Sped up posix_locks_deadlock by only considering blocked locks.
111  *  Matthew Wilcox <willy@debian.org>, March, 2000.
112  *
113  *  Leases and LOCK_MAND
114  *  Matthew Wilcox <willy@debian.org>, June, 2000.
115  *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
116  *
117  * Locking conflicts and dependencies:
118  * If multiple threads attempt to lock the same byte (or flock the same file)
119  * only one can be granted the lock, and other must wait their turn.
120  * The first lock has been "applied" or "granted", the others are "waiting"
121  * and are "blocked" by the "applied" lock..
122  *
123  * Waiting and applied locks are all kept in trees whose properties are:
124  *
125  *	- the root of a tree may be an applied or waiting lock.
126  *	- every other node in the tree is a waiting lock that
127  *	  conflicts with every ancestor of that node.
128  *
129  * Every such tree begins life as a waiting singleton which obviously
130  * satisfies the above properties.
131  *
132  * The only ways we modify trees preserve these properties:
133  *
134  *	1. We may add a new leaf node, but only after first verifying that it
135  *	   conflicts with all of its ancestors.
136  *	2. We may remove the root of a tree, creating a new singleton
137  *	   tree from the root and N new trees rooted in the immediate
138  *	   children.
139  *	3. If the root of a tree is not currently an applied lock, we may
140  *	   apply it (if possible).
141  *	4. We may upgrade the root of the tree (either extend its range,
142  *	   or upgrade its entire range from read to write).
143  *
144  * When an applied lock is modified in a way that reduces or downgrades any
145  * part of its range, we remove all its children (2 above).  This particularly
146  * happens when a lock is unlocked.
147  *
148  * For each of those child trees we "wake up" the thread which is
149  * waiting for the lock so it can continue handling as follows: if the
150  * root of the tree applies, we do so (3).  If it doesn't, it must
151  * conflict with some applied lock.  We remove (wake up) all of its children
152  * (2), and add it is a new leaf to the tree rooted in the applied
153  * lock (1).  We then repeat the process recursively with those
154  * children.
155  *
156  */
157 
158 #include <linux/capability.h>
159 #include <linux/file.h>
160 #include <linux/fdtable.h>
161 #include <linux/fs.h>
162 #include <linux/init.h>
163 #include <linux/security.h>
164 #include <linux/slab.h>
165 #include <linux/syscalls.h>
166 #include <linux/time.h>
167 #include <linux/rcupdate.h>
168 #include <linux/pid_namespace.h>
169 #include <linux/hashtable.h>
170 #include <linux/percpu.h>
171 
172 #define CREATE_TRACE_POINTS
173 #include <trace/events/filelock.h>
174 
175 #include <linux/uaccess.h>
176 
177 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
178 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
179 #define IS_LEASE(fl)	(fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
180 #define IS_OFDLCK(fl)	(fl->fl_flags & FL_OFDLCK)
181 #define IS_REMOTELCK(fl)	(fl->fl_pid <= 0)
182 
183 static bool lease_breaking(struct file_lock *fl)
184 {
185 	return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
186 }
187 
188 static int target_leasetype(struct file_lock *fl)
189 {
190 	if (fl->fl_flags & FL_UNLOCK_PENDING)
191 		return F_UNLCK;
192 	if (fl->fl_flags & FL_DOWNGRADE_PENDING)
193 		return F_RDLCK;
194 	return fl->fl_type;
195 }
196 
197 int leases_enable = 1;
198 int lease_break_time = 45;
199 
200 /*
201  * The global file_lock_list is only used for displaying /proc/locks, so we
202  * keep a list on each CPU, with each list protected by its own spinlock.
203  * Global serialization is done using file_rwsem.
204  *
205  * Note that alterations to the list also require that the relevant flc_lock is
206  * held.
207  */
208 struct file_lock_list_struct {
209 	spinlock_t		lock;
210 	struct hlist_head	hlist;
211 };
212 static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
213 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
214 
215 /*
216  * The blocked_hash is used to find POSIX lock loops for deadlock detection.
217  * It is protected by blocked_lock_lock.
218  *
219  * We hash locks by lockowner in order to optimize searching for the lock a
220  * particular lockowner is waiting on.
221  *
222  * FIXME: make this value scale via some heuristic? We generally will want more
223  * buckets when we have more lockowners holding locks, but that's a little
224  * difficult to determine without knowing what the workload will look like.
225  */
226 #define BLOCKED_HASH_BITS	7
227 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
228 
229 /*
230  * This lock protects the blocked_hash. Generally, if you're accessing it, you
231  * want to be holding this lock.
232  *
233  * In addition, it also protects the fl->fl_blocked_requests list, and the
234  * fl->fl_blocker pointer for file_lock structures that are acting as lock
235  * requests (in contrast to those that are acting as records of acquired locks).
236  *
237  * Note that when we acquire this lock in order to change the above fields,
238  * we often hold the flc_lock as well. In certain cases, when reading the fields
239  * protected by this lock, we can skip acquiring it iff we already hold the
240  * flc_lock.
241  */
242 static DEFINE_SPINLOCK(blocked_lock_lock);
243 
244 static struct kmem_cache *flctx_cache __read_mostly;
245 static struct kmem_cache *filelock_cache __read_mostly;
246 
247 static struct file_lock_context *
248 locks_get_lock_context(struct inode *inode, int type)
249 {
250 	struct file_lock_context *ctx;
251 
252 	/* paired with cmpxchg() below */
253 	ctx = smp_load_acquire(&inode->i_flctx);
254 	if (likely(ctx) || type == F_UNLCK)
255 		goto out;
256 
257 	ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
258 	if (!ctx)
259 		goto out;
260 
261 	spin_lock_init(&ctx->flc_lock);
262 	INIT_LIST_HEAD(&ctx->flc_flock);
263 	INIT_LIST_HEAD(&ctx->flc_posix);
264 	INIT_LIST_HEAD(&ctx->flc_lease);
265 
266 	/*
267 	 * Assign the pointer if it's not already assigned. If it is, then
268 	 * free the context we just allocated.
269 	 */
270 	if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
271 		kmem_cache_free(flctx_cache, ctx);
272 		ctx = smp_load_acquire(&inode->i_flctx);
273 	}
274 out:
275 	trace_locks_get_lock_context(inode, type, ctx);
276 	return ctx;
277 }
278 
279 static void
280 locks_dump_ctx_list(struct list_head *list, char *list_type)
281 {
282 	struct file_lock *fl;
283 
284 	list_for_each_entry(fl, list, fl_list) {
285 		pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
286 	}
287 }
288 
289 static void
290 locks_check_ctx_lists(struct inode *inode)
291 {
292 	struct file_lock_context *ctx = inode->i_flctx;
293 
294 	if (unlikely(!list_empty(&ctx->flc_flock) ||
295 		     !list_empty(&ctx->flc_posix) ||
296 		     !list_empty(&ctx->flc_lease))) {
297 		pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
298 			MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
299 			inode->i_ino);
300 		locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
301 		locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
302 		locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
303 	}
304 }
305 
306 static void
307 locks_check_ctx_file_list(struct file *filp, struct list_head *list,
308 				char *list_type)
309 {
310 	struct file_lock *fl;
311 	struct inode *inode = locks_inode(filp);
312 
313 	list_for_each_entry(fl, list, fl_list)
314 		if (fl->fl_file == filp)
315 			pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
316 				" fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
317 				list_type, MAJOR(inode->i_sb->s_dev),
318 				MINOR(inode->i_sb->s_dev), inode->i_ino,
319 				fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
320 }
321 
322 void
323 locks_free_lock_context(struct inode *inode)
324 {
325 	struct file_lock_context *ctx = inode->i_flctx;
326 
327 	if (unlikely(ctx)) {
328 		locks_check_ctx_lists(inode);
329 		kmem_cache_free(flctx_cache, ctx);
330 	}
331 }
332 
333 static void locks_init_lock_heads(struct file_lock *fl)
334 {
335 	INIT_HLIST_NODE(&fl->fl_link);
336 	INIT_LIST_HEAD(&fl->fl_list);
337 	INIT_LIST_HEAD(&fl->fl_blocked_requests);
338 	INIT_LIST_HEAD(&fl->fl_blocked_member);
339 	init_waitqueue_head(&fl->fl_wait);
340 }
341 
342 /* Allocate an empty lock structure. */
343 struct file_lock *locks_alloc_lock(void)
344 {
345 	struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
346 
347 	if (fl)
348 		locks_init_lock_heads(fl);
349 
350 	return fl;
351 }
352 EXPORT_SYMBOL_GPL(locks_alloc_lock);
353 
354 void locks_release_private(struct file_lock *fl)
355 {
356 	BUG_ON(waitqueue_active(&fl->fl_wait));
357 	BUG_ON(!list_empty(&fl->fl_list));
358 	BUG_ON(!list_empty(&fl->fl_blocked_requests));
359 	BUG_ON(!list_empty(&fl->fl_blocked_member));
360 	BUG_ON(!hlist_unhashed(&fl->fl_link));
361 
362 	if (fl->fl_ops) {
363 		if (fl->fl_ops->fl_release_private)
364 			fl->fl_ops->fl_release_private(fl);
365 		fl->fl_ops = NULL;
366 	}
367 
368 	if (fl->fl_lmops) {
369 		if (fl->fl_lmops->lm_put_owner) {
370 			fl->fl_lmops->lm_put_owner(fl->fl_owner);
371 			fl->fl_owner = NULL;
372 		}
373 		fl->fl_lmops = NULL;
374 	}
375 }
376 EXPORT_SYMBOL_GPL(locks_release_private);
377 
378 /* Free a lock which is not in use. */
379 void locks_free_lock(struct file_lock *fl)
380 {
381 	locks_release_private(fl);
382 	kmem_cache_free(filelock_cache, fl);
383 }
384 EXPORT_SYMBOL(locks_free_lock);
385 
386 static void
387 locks_dispose_list(struct list_head *dispose)
388 {
389 	struct file_lock *fl;
390 
391 	while (!list_empty(dispose)) {
392 		fl = list_first_entry(dispose, struct file_lock, fl_list);
393 		list_del_init(&fl->fl_list);
394 		locks_free_lock(fl);
395 	}
396 }
397 
398 void locks_init_lock(struct file_lock *fl)
399 {
400 	memset(fl, 0, sizeof(struct file_lock));
401 	locks_init_lock_heads(fl);
402 }
403 EXPORT_SYMBOL(locks_init_lock);
404 
405 /*
406  * Initialize a new lock from an existing file_lock structure.
407  */
408 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
409 {
410 	new->fl_owner = fl->fl_owner;
411 	new->fl_pid = fl->fl_pid;
412 	new->fl_file = NULL;
413 	new->fl_flags = fl->fl_flags;
414 	new->fl_type = fl->fl_type;
415 	new->fl_start = fl->fl_start;
416 	new->fl_end = fl->fl_end;
417 	new->fl_lmops = fl->fl_lmops;
418 	new->fl_ops = NULL;
419 
420 	if (fl->fl_lmops) {
421 		if (fl->fl_lmops->lm_get_owner)
422 			fl->fl_lmops->lm_get_owner(fl->fl_owner);
423 	}
424 }
425 EXPORT_SYMBOL(locks_copy_conflock);
426 
427 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
428 {
429 	/* "new" must be a freshly-initialized lock */
430 	WARN_ON_ONCE(new->fl_ops);
431 
432 	locks_copy_conflock(new, fl);
433 
434 	new->fl_file = fl->fl_file;
435 	new->fl_ops = fl->fl_ops;
436 
437 	if (fl->fl_ops) {
438 		if (fl->fl_ops->fl_copy_lock)
439 			fl->fl_ops->fl_copy_lock(new, fl);
440 	}
441 }
442 EXPORT_SYMBOL(locks_copy_lock);
443 
444 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
445 {
446 	struct file_lock *f;
447 
448 	/*
449 	 * As ctx->flc_lock is held, new requests cannot be added to
450 	 * ->fl_blocked_requests, so we don't need a lock to check if it
451 	 * is empty.
452 	 */
453 	if (list_empty(&fl->fl_blocked_requests))
454 		return;
455 	spin_lock(&blocked_lock_lock);
456 	list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
457 	list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
458 		f->fl_blocker = new;
459 	spin_unlock(&blocked_lock_lock);
460 }
461 
462 static inline int flock_translate_cmd(int cmd) {
463 	if (cmd & LOCK_MAND)
464 		return cmd & (LOCK_MAND | LOCK_RW);
465 	switch (cmd) {
466 	case LOCK_SH:
467 		return F_RDLCK;
468 	case LOCK_EX:
469 		return F_WRLCK;
470 	case LOCK_UN:
471 		return F_UNLCK;
472 	}
473 	return -EINVAL;
474 }
475 
476 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
477 static struct file_lock *
478 flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl)
479 {
480 	int type = flock_translate_cmd(cmd);
481 
482 	if (type < 0)
483 		return ERR_PTR(type);
484 
485 	if (fl == NULL) {
486 		fl = locks_alloc_lock();
487 		if (fl == NULL)
488 			return ERR_PTR(-ENOMEM);
489 	} else {
490 		locks_init_lock(fl);
491 	}
492 
493 	fl->fl_file = filp;
494 	fl->fl_owner = filp;
495 	fl->fl_pid = current->tgid;
496 	fl->fl_flags = FL_FLOCK;
497 	fl->fl_type = type;
498 	fl->fl_end = OFFSET_MAX;
499 
500 	return fl;
501 }
502 
503 static int assign_type(struct file_lock *fl, long type)
504 {
505 	switch (type) {
506 	case F_RDLCK:
507 	case F_WRLCK:
508 	case F_UNLCK:
509 		fl->fl_type = type;
510 		break;
511 	default:
512 		return -EINVAL;
513 	}
514 	return 0;
515 }
516 
517 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
518 				 struct flock64 *l)
519 {
520 	switch (l->l_whence) {
521 	case SEEK_SET:
522 		fl->fl_start = 0;
523 		break;
524 	case SEEK_CUR:
525 		fl->fl_start = filp->f_pos;
526 		break;
527 	case SEEK_END:
528 		fl->fl_start = i_size_read(file_inode(filp));
529 		break;
530 	default:
531 		return -EINVAL;
532 	}
533 	if (l->l_start > OFFSET_MAX - fl->fl_start)
534 		return -EOVERFLOW;
535 	fl->fl_start += l->l_start;
536 	if (fl->fl_start < 0)
537 		return -EINVAL;
538 
539 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
540 	   POSIX-2001 defines it. */
541 	if (l->l_len > 0) {
542 		if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
543 			return -EOVERFLOW;
544 		fl->fl_end = fl->fl_start + l->l_len - 1;
545 
546 	} else if (l->l_len < 0) {
547 		if (fl->fl_start + l->l_len < 0)
548 			return -EINVAL;
549 		fl->fl_end = fl->fl_start - 1;
550 		fl->fl_start += l->l_len;
551 	} else
552 		fl->fl_end = OFFSET_MAX;
553 
554 	fl->fl_owner = current->files;
555 	fl->fl_pid = current->tgid;
556 	fl->fl_file = filp;
557 	fl->fl_flags = FL_POSIX;
558 	fl->fl_ops = NULL;
559 	fl->fl_lmops = NULL;
560 
561 	return assign_type(fl, l->l_type);
562 }
563 
564 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
565  * style lock.
566  */
567 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
568 			       struct flock *l)
569 {
570 	struct flock64 ll = {
571 		.l_type = l->l_type,
572 		.l_whence = l->l_whence,
573 		.l_start = l->l_start,
574 		.l_len = l->l_len,
575 	};
576 
577 	return flock64_to_posix_lock(filp, fl, &ll);
578 }
579 
580 /* default lease lock manager operations */
581 static bool
582 lease_break_callback(struct file_lock *fl)
583 {
584 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
585 	return false;
586 }
587 
588 static void
589 lease_setup(struct file_lock *fl, void **priv)
590 {
591 	struct file *filp = fl->fl_file;
592 	struct fasync_struct *fa = *priv;
593 
594 	/*
595 	 * fasync_insert_entry() returns the old entry if any. If there was no
596 	 * old entry, then it used "priv" and inserted it into the fasync list.
597 	 * Clear the pointer to indicate that it shouldn't be freed.
598 	 */
599 	if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
600 		*priv = NULL;
601 
602 	__f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
603 }
604 
605 static const struct lock_manager_operations lease_manager_ops = {
606 	.lm_break = lease_break_callback,
607 	.lm_change = lease_modify,
608 	.lm_setup = lease_setup,
609 };
610 
611 /*
612  * Initialize a lease, use the default lock manager operations
613  */
614 static int lease_init(struct file *filp, long type, struct file_lock *fl)
615 {
616 	if (assign_type(fl, type) != 0)
617 		return -EINVAL;
618 
619 	fl->fl_owner = filp;
620 	fl->fl_pid = current->tgid;
621 
622 	fl->fl_file = filp;
623 	fl->fl_flags = FL_LEASE;
624 	fl->fl_start = 0;
625 	fl->fl_end = OFFSET_MAX;
626 	fl->fl_ops = NULL;
627 	fl->fl_lmops = &lease_manager_ops;
628 	return 0;
629 }
630 
631 /* Allocate a file_lock initialised to this type of lease */
632 static struct file_lock *lease_alloc(struct file *filp, long type)
633 {
634 	struct file_lock *fl = locks_alloc_lock();
635 	int error = -ENOMEM;
636 
637 	if (fl == NULL)
638 		return ERR_PTR(error);
639 
640 	error = lease_init(filp, type, fl);
641 	if (error) {
642 		locks_free_lock(fl);
643 		return ERR_PTR(error);
644 	}
645 	return fl;
646 }
647 
648 /* Check if two locks overlap each other.
649  */
650 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
651 {
652 	return ((fl1->fl_end >= fl2->fl_start) &&
653 		(fl2->fl_end >= fl1->fl_start));
654 }
655 
656 /*
657  * Check whether two locks have the same owner.
658  */
659 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
660 {
661 	if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
662 		return fl2->fl_lmops == fl1->fl_lmops &&
663 			fl1->fl_lmops->lm_compare_owner(fl1, fl2);
664 	return fl1->fl_owner == fl2->fl_owner;
665 }
666 
667 /* Must be called with the flc_lock held! */
668 static void locks_insert_global_locks(struct file_lock *fl)
669 {
670 	struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
671 
672 	percpu_rwsem_assert_held(&file_rwsem);
673 
674 	spin_lock(&fll->lock);
675 	fl->fl_link_cpu = smp_processor_id();
676 	hlist_add_head(&fl->fl_link, &fll->hlist);
677 	spin_unlock(&fll->lock);
678 }
679 
680 /* Must be called with the flc_lock held! */
681 static void locks_delete_global_locks(struct file_lock *fl)
682 {
683 	struct file_lock_list_struct *fll;
684 
685 	percpu_rwsem_assert_held(&file_rwsem);
686 
687 	/*
688 	 * Avoid taking lock if already unhashed. This is safe since this check
689 	 * is done while holding the flc_lock, and new insertions into the list
690 	 * also require that it be held.
691 	 */
692 	if (hlist_unhashed(&fl->fl_link))
693 		return;
694 
695 	fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
696 	spin_lock(&fll->lock);
697 	hlist_del_init(&fl->fl_link);
698 	spin_unlock(&fll->lock);
699 }
700 
701 static unsigned long
702 posix_owner_key(struct file_lock *fl)
703 {
704 	if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
705 		return fl->fl_lmops->lm_owner_key(fl);
706 	return (unsigned long)fl->fl_owner;
707 }
708 
709 static void locks_insert_global_blocked(struct file_lock *waiter)
710 {
711 	lockdep_assert_held(&blocked_lock_lock);
712 
713 	hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
714 }
715 
716 static void locks_delete_global_blocked(struct file_lock *waiter)
717 {
718 	lockdep_assert_held(&blocked_lock_lock);
719 
720 	hash_del(&waiter->fl_link);
721 }
722 
723 /* Remove waiter from blocker's block list.
724  * When blocker ends up pointing to itself then the list is empty.
725  *
726  * Must be called with blocked_lock_lock held.
727  */
728 static void __locks_delete_block(struct file_lock *waiter)
729 {
730 	locks_delete_global_blocked(waiter);
731 	list_del_init(&waiter->fl_blocked_member);
732 	waiter->fl_blocker = NULL;
733 }
734 
735 static void __locks_wake_up_blocks(struct file_lock *blocker)
736 {
737 	while (!list_empty(&blocker->fl_blocked_requests)) {
738 		struct file_lock *waiter;
739 
740 		waiter = list_first_entry(&blocker->fl_blocked_requests,
741 					  struct file_lock, fl_blocked_member);
742 		__locks_delete_block(waiter);
743 		if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
744 			waiter->fl_lmops->lm_notify(waiter);
745 		else
746 			wake_up(&waiter->fl_wait);
747 	}
748 }
749 
750 /**
751  *	locks_delete_lock - stop waiting for a file lock
752  *	@waiter: the lock which was waiting
753  *
754  *	lockd/nfsd need to disconnect the lock while working on it.
755  */
756 int locks_delete_block(struct file_lock *waiter)
757 {
758 	int status = -ENOENT;
759 
760 	/*
761 	 * If fl_blocker is NULL, it won't be set again as this thread
762 	 * "owns" the lock and is the only one that might try to claim
763 	 * the lock.  So it is safe to test fl_blocker locklessly.
764 	 * Also if fl_blocker is NULL, this waiter is not listed on
765 	 * fl_blocked_requests for some lock, so no other request can
766 	 * be added to the list of fl_blocked_requests for this
767 	 * request.  So if fl_blocker is NULL, it is safe to
768 	 * locklessly check if fl_blocked_requests is empty.  If both
769 	 * of these checks succeed, there is no need to take the lock.
770 	 */
771 	if (waiter->fl_blocker == NULL &&
772 	    list_empty(&waiter->fl_blocked_requests))
773 		return status;
774 	spin_lock(&blocked_lock_lock);
775 	if (waiter->fl_blocker)
776 		status = 0;
777 	__locks_wake_up_blocks(waiter);
778 	__locks_delete_block(waiter);
779 	spin_unlock(&blocked_lock_lock);
780 	return status;
781 }
782 EXPORT_SYMBOL(locks_delete_block);
783 
784 /* Insert waiter into blocker's block list.
785  * We use a circular list so that processes can be easily woken up in
786  * the order they blocked. The documentation doesn't require this but
787  * it seems like the reasonable thing to do.
788  *
789  * Must be called with both the flc_lock and blocked_lock_lock held. The
790  * fl_blocked_requests list itself is protected by the blocked_lock_lock,
791  * but by ensuring that the flc_lock is also held on insertions we can avoid
792  * taking the blocked_lock_lock in some cases when we see that the
793  * fl_blocked_requests list is empty.
794  *
795  * Rather than just adding to the list, we check for conflicts with any existing
796  * waiters, and add beneath any waiter that blocks the new waiter.
797  * Thus wakeups don't happen until needed.
798  */
799 static void __locks_insert_block(struct file_lock *blocker,
800 				 struct file_lock *waiter,
801 				 bool conflict(struct file_lock *,
802 					       struct file_lock *))
803 {
804 	struct file_lock *fl;
805 	BUG_ON(!list_empty(&waiter->fl_blocked_member));
806 
807 new_blocker:
808 	list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
809 		if (conflict(fl, waiter)) {
810 			blocker =  fl;
811 			goto new_blocker;
812 		}
813 	waiter->fl_blocker = blocker;
814 	list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
815 	if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
816 		locks_insert_global_blocked(waiter);
817 
818 	/* The requests in waiter->fl_blocked are known to conflict with
819 	 * waiter, but might not conflict with blocker, or the requests
820 	 * and lock which block it.  So they all need to be woken.
821 	 */
822 	__locks_wake_up_blocks(waiter);
823 }
824 
825 /* Must be called with flc_lock held. */
826 static void locks_insert_block(struct file_lock *blocker,
827 			       struct file_lock *waiter,
828 			       bool conflict(struct file_lock *,
829 					     struct file_lock *))
830 {
831 	spin_lock(&blocked_lock_lock);
832 	__locks_insert_block(blocker, waiter, conflict);
833 	spin_unlock(&blocked_lock_lock);
834 }
835 
836 /*
837  * Wake up processes blocked waiting for blocker.
838  *
839  * Must be called with the inode->flc_lock held!
840  */
841 static void locks_wake_up_blocks(struct file_lock *blocker)
842 {
843 	/*
844 	 * Avoid taking global lock if list is empty. This is safe since new
845 	 * blocked requests are only added to the list under the flc_lock, and
846 	 * the flc_lock is always held here. Note that removal from the
847 	 * fl_blocked_requests list does not require the flc_lock, so we must
848 	 * recheck list_empty() after acquiring the blocked_lock_lock.
849 	 */
850 	if (list_empty(&blocker->fl_blocked_requests))
851 		return;
852 
853 	spin_lock(&blocked_lock_lock);
854 	__locks_wake_up_blocks(blocker);
855 	spin_unlock(&blocked_lock_lock);
856 }
857 
858 static void
859 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
860 {
861 	list_add_tail(&fl->fl_list, before);
862 	locks_insert_global_locks(fl);
863 }
864 
865 static void
866 locks_unlink_lock_ctx(struct file_lock *fl)
867 {
868 	locks_delete_global_locks(fl);
869 	list_del_init(&fl->fl_list);
870 	locks_wake_up_blocks(fl);
871 }
872 
873 static void
874 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
875 {
876 	locks_unlink_lock_ctx(fl);
877 	if (dispose)
878 		list_add(&fl->fl_list, dispose);
879 	else
880 		locks_free_lock(fl);
881 }
882 
883 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
884  * checks for shared/exclusive status of overlapping locks.
885  */
886 static bool locks_conflict(struct file_lock *caller_fl,
887 			   struct file_lock *sys_fl)
888 {
889 	if (sys_fl->fl_type == F_WRLCK)
890 		return true;
891 	if (caller_fl->fl_type == F_WRLCK)
892 		return true;
893 	return false;
894 }
895 
896 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
897  * checking before calling the locks_conflict().
898  */
899 static bool posix_locks_conflict(struct file_lock *caller_fl,
900 				 struct file_lock *sys_fl)
901 {
902 	/* POSIX locks owned by the same process do not conflict with
903 	 * each other.
904 	 */
905 	if (posix_same_owner(caller_fl, sys_fl))
906 		return false;
907 
908 	/* Check whether they overlap */
909 	if (!locks_overlap(caller_fl, sys_fl))
910 		return false;
911 
912 	return locks_conflict(caller_fl, sys_fl);
913 }
914 
915 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
916  * checking before calling the locks_conflict().
917  */
918 static bool flock_locks_conflict(struct file_lock *caller_fl,
919 				 struct file_lock *sys_fl)
920 {
921 	/* FLOCK locks referring to the same filp do not conflict with
922 	 * each other.
923 	 */
924 	if (caller_fl->fl_file == sys_fl->fl_file)
925 		return false;
926 	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
927 		return false;
928 
929 	return locks_conflict(caller_fl, sys_fl);
930 }
931 
932 void
933 posix_test_lock(struct file *filp, struct file_lock *fl)
934 {
935 	struct file_lock *cfl;
936 	struct file_lock_context *ctx;
937 	struct inode *inode = locks_inode(filp);
938 
939 	ctx = smp_load_acquire(&inode->i_flctx);
940 	if (!ctx || list_empty_careful(&ctx->flc_posix)) {
941 		fl->fl_type = F_UNLCK;
942 		return;
943 	}
944 
945 	spin_lock(&ctx->flc_lock);
946 	list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
947 		if (posix_locks_conflict(fl, cfl)) {
948 			locks_copy_conflock(fl, cfl);
949 			goto out;
950 		}
951 	}
952 	fl->fl_type = F_UNLCK;
953 out:
954 	spin_unlock(&ctx->flc_lock);
955 	return;
956 }
957 EXPORT_SYMBOL(posix_test_lock);
958 
959 /*
960  * Deadlock detection:
961  *
962  * We attempt to detect deadlocks that are due purely to posix file
963  * locks.
964  *
965  * We assume that a task can be waiting for at most one lock at a time.
966  * So for any acquired lock, the process holding that lock may be
967  * waiting on at most one other lock.  That lock in turns may be held by
968  * someone waiting for at most one other lock.  Given a requested lock
969  * caller_fl which is about to wait for a conflicting lock block_fl, we
970  * follow this chain of waiters to ensure we are not about to create a
971  * cycle.
972  *
973  * Since we do this before we ever put a process to sleep on a lock, we
974  * are ensured that there is never a cycle; that is what guarantees that
975  * the while() loop in posix_locks_deadlock() eventually completes.
976  *
977  * Note: the above assumption may not be true when handling lock
978  * requests from a broken NFS client. It may also fail in the presence
979  * of tasks (such as posix threads) sharing the same open file table.
980  * To handle those cases, we just bail out after a few iterations.
981  *
982  * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
983  * Because the owner is not even nominally tied to a thread of
984  * execution, the deadlock detection below can't reasonably work well. Just
985  * skip it for those.
986  *
987  * In principle, we could do a more limited deadlock detection on FL_OFDLCK
988  * locks that just checks for the case where two tasks are attempting to
989  * upgrade from read to write locks on the same inode.
990  */
991 
992 #define MAX_DEADLK_ITERATIONS 10
993 
994 /* Find a lock that the owner of the given block_fl is blocking on. */
995 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
996 {
997 	struct file_lock *fl;
998 
999 	hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
1000 		if (posix_same_owner(fl, block_fl)) {
1001 			while (fl->fl_blocker)
1002 				fl = fl->fl_blocker;
1003 			return fl;
1004 		}
1005 	}
1006 	return NULL;
1007 }
1008 
1009 /* Must be called with the blocked_lock_lock held! */
1010 static int posix_locks_deadlock(struct file_lock *caller_fl,
1011 				struct file_lock *block_fl)
1012 {
1013 	int i = 0;
1014 
1015 	lockdep_assert_held(&blocked_lock_lock);
1016 
1017 	/*
1018 	 * This deadlock detector can't reasonably detect deadlocks with
1019 	 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
1020 	 */
1021 	if (IS_OFDLCK(caller_fl))
1022 		return 0;
1023 
1024 	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
1025 		if (i++ > MAX_DEADLK_ITERATIONS)
1026 			return 0;
1027 		if (posix_same_owner(caller_fl, block_fl))
1028 			return 1;
1029 	}
1030 	return 0;
1031 }
1032 
1033 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
1034  * after any leases, but before any posix locks.
1035  *
1036  * Note that if called with an FL_EXISTS argument, the caller may determine
1037  * whether or not a lock was successfully freed by testing the return
1038  * value for -ENOENT.
1039  */
1040 static int flock_lock_inode(struct inode *inode, struct file_lock *request)
1041 {
1042 	struct file_lock *new_fl = NULL;
1043 	struct file_lock *fl;
1044 	struct file_lock_context *ctx;
1045 	int error = 0;
1046 	bool found = false;
1047 	LIST_HEAD(dispose);
1048 
1049 	ctx = locks_get_lock_context(inode, request->fl_type);
1050 	if (!ctx) {
1051 		if (request->fl_type != F_UNLCK)
1052 			return -ENOMEM;
1053 		return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
1054 	}
1055 
1056 	if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
1057 		new_fl = locks_alloc_lock();
1058 		if (!new_fl)
1059 			return -ENOMEM;
1060 	}
1061 
1062 	percpu_down_read(&file_rwsem);
1063 	spin_lock(&ctx->flc_lock);
1064 	if (request->fl_flags & FL_ACCESS)
1065 		goto find_conflict;
1066 
1067 	list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1068 		if (request->fl_file != fl->fl_file)
1069 			continue;
1070 		if (request->fl_type == fl->fl_type)
1071 			goto out;
1072 		found = true;
1073 		locks_delete_lock_ctx(fl, &dispose);
1074 		break;
1075 	}
1076 
1077 	if (request->fl_type == F_UNLCK) {
1078 		if ((request->fl_flags & FL_EXISTS) && !found)
1079 			error = -ENOENT;
1080 		goto out;
1081 	}
1082 
1083 find_conflict:
1084 	list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1085 		if (!flock_locks_conflict(request, fl))
1086 			continue;
1087 		error = -EAGAIN;
1088 		if (!(request->fl_flags & FL_SLEEP))
1089 			goto out;
1090 		error = FILE_LOCK_DEFERRED;
1091 		locks_insert_block(fl, request, flock_locks_conflict);
1092 		goto out;
1093 	}
1094 	if (request->fl_flags & FL_ACCESS)
1095 		goto out;
1096 	locks_copy_lock(new_fl, request);
1097 	locks_move_blocks(new_fl, request);
1098 	locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1099 	new_fl = NULL;
1100 	error = 0;
1101 
1102 out:
1103 	spin_unlock(&ctx->flc_lock);
1104 	percpu_up_read(&file_rwsem);
1105 	if (new_fl)
1106 		locks_free_lock(new_fl);
1107 	locks_dispose_list(&dispose);
1108 	trace_flock_lock_inode(inode, request, error);
1109 	return error;
1110 }
1111 
1112 static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1113 			    struct file_lock *conflock)
1114 {
1115 	struct file_lock *fl, *tmp;
1116 	struct file_lock *new_fl = NULL;
1117 	struct file_lock *new_fl2 = NULL;
1118 	struct file_lock *left = NULL;
1119 	struct file_lock *right = NULL;
1120 	struct file_lock_context *ctx;
1121 	int error;
1122 	bool added = false;
1123 	LIST_HEAD(dispose);
1124 
1125 	ctx = locks_get_lock_context(inode, request->fl_type);
1126 	if (!ctx)
1127 		return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1128 
1129 	/*
1130 	 * We may need two file_lock structures for this operation,
1131 	 * so we get them in advance to avoid races.
1132 	 *
1133 	 * In some cases we can be sure, that no new locks will be needed
1134 	 */
1135 	if (!(request->fl_flags & FL_ACCESS) &&
1136 	    (request->fl_type != F_UNLCK ||
1137 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1138 		new_fl = locks_alloc_lock();
1139 		new_fl2 = locks_alloc_lock();
1140 	}
1141 
1142 	percpu_down_read(&file_rwsem);
1143 	spin_lock(&ctx->flc_lock);
1144 	/*
1145 	 * New lock request. Walk all POSIX locks and look for conflicts. If
1146 	 * there are any, either return error or put the request on the
1147 	 * blocker's list of waiters and the global blocked_hash.
1148 	 */
1149 	if (request->fl_type != F_UNLCK) {
1150 		list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1151 			if (!posix_locks_conflict(request, fl))
1152 				continue;
1153 			if (conflock)
1154 				locks_copy_conflock(conflock, fl);
1155 			error = -EAGAIN;
1156 			if (!(request->fl_flags & FL_SLEEP))
1157 				goto out;
1158 			/*
1159 			 * Deadlock detection and insertion into the blocked
1160 			 * locks list must be done while holding the same lock!
1161 			 */
1162 			error = -EDEADLK;
1163 			spin_lock(&blocked_lock_lock);
1164 			/*
1165 			 * Ensure that we don't find any locks blocked on this
1166 			 * request during deadlock detection.
1167 			 */
1168 			__locks_wake_up_blocks(request);
1169 			if (likely(!posix_locks_deadlock(request, fl))) {
1170 				error = FILE_LOCK_DEFERRED;
1171 				__locks_insert_block(fl, request,
1172 						     posix_locks_conflict);
1173 			}
1174 			spin_unlock(&blocked_lock_lock);
1175 			goto out;
1176 		}
1177 	}
1178 
1179 	/* If we're just looking for a conflict, we're done. */
1180 	error = 0;
1181 	if (request->fl_flags & FL_ACCESS)
1182 		goto out;
1183 
1184 	/* Find the first old lock with the same owner as the new lock */
1185 	list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1186 		if (posix_same_owner(request, fl))
1187 			break;
1188 	}
1189 
1190 	/* Process locks with this owner. */
1191 	list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1192 		if (!posix_same_owner(request, fl))
1193 			break;
1194 
1195 		/* Detect adjacent or overlapping regions (if same lock type) */
1196 		if (request->fl_type == fl->fl_type) {
1197 			/* In all comparisons of start vs end, use
1198 			 * "start - 1" rather than "end + 1". If end
1199 			 * is OFFSET_MAX, end + 1 will become negative.
1200 			 */
1201 			if (fl->fl_end < request->fl_start - 1)
1202 				continue;
1203 			/* If the next lock in the list has entirely bigger
1204 			 * addresses than the new one, insert the lock here.
1205 			 */
1206 			if (fl->fl_start - 1 > request->fl_end)
1207 				break;
1208 
1209 			/* If we come here, the new and old lock are of the
1210 			 * same type and adjacent or overlapping. Make one
1211 			 * lock yielding from the lower start address of both
1212 			 * locks to the higher end address.
1213 			 */
1214 			if (fl->fl_start > request->fl_start)
1215 				fl->fl_start = request->fl_start;
1216 			else
1217 				request->fl_start = fl->fl_start;
1218 			if (fl->fl_end < request->fl_end)
1219 				fl->fl_end = request->fl_end;
1220 			else
1221 				request->fl_end = fl->fl_end;
1222 			if (added) {
1223 				locks_delete_lock_ctx(fl, &dispose);
1224 				continue;
1225 			}
1226 			request = fl;
1227 			added = true;
1228 		} else {
1229 			/* Processing for different lock types is a bit
1230 			 * more complex.
1231 			 */
1232 			if (fl->fl_end < request->fl_start)
1233 				continue;
1234 			if (fl->fl_start > request->fl_end)
1235 				break;
1236 			if (request->fl_type == F_UNLCK)
1237 				added = true;
1238 			if (fl->fl_start < request->fl_start)
1239 				left = fl;
1240 			/* If the next lock in the list has a higher end
1241 			 * address than the new one, insert the new one here.
1242 			 */
1243 			if (fl->fl_end > request->fl_end) {
1244 				right = fl;
1245 				break;
1246 			}
1247 			if (fl->fl_start >= request->fl_start) {
1248 				/* The new lock completely replaces an old
1249 				 * one (This may happen several times).
1250 				 */
1251 				if (added) {
1252 					locks_delete_lock_ctx(fl, &dispose);
1253 					continue;
1254 				}
1255 				/*
1256 				 * Replace the old lock with new_fl, and
1257 				 * remove the old one. It's safe to do the
1258 				 * insert here since we know that we won't be
1259 				 * using new_fl later, and that the lock is
1260 				 * just replacing an existing lock.
1261 				 */
1262 				error = -ENOLCK;
1263 				if (!new_fl)
1264 					goto out;
1265 				locks_copy_lock(new_fl, request);
1266 				request = new_fl;
1267 				new_fl = NULL;
1268 				locks_insert_lock_ctx(request, &fl->fl_list);
1269 				locks_delete_lock_ctx(fl, &dispose);
1270 				added = true;
1271 			}
1272 		}
1273 	}
1274 
1275 	/*
1276 	 * The above code only modifies existing locks in case of merging or
1277 	 * replacing. If new lock(s) need to be inserted all modifications are
1278 	 * done below this, so it's safe yet to bail out.
1279 	 */
1280 	error = -ENOLCK; /* "no luck" */
1281 	if (right && left == right && !new_fl2)
1282 		goto out;
1283 
1284 	error = 0;
1285 	if (!added) {
1286 		if (request->fl_type == F_UNLCK) {
1287 			if (request->fl_flags & FL_EXISTS)
1288 				error = -ENOENT;
1289 			goto out;
1290 		}
1291 
1292 		if (!new_fl) {
1293 			error = -ENOLCK;
1294 			goto out;
1295 		}
1296 		locks_copy_lock(new_fl, request);
1297 		locks_move_blocks(new_fl, request);
1298 		locks_insert_lock_ctx(new_fl, &fl->fl_list);
1299 		fl = new_fl;
1300 		new_fl = NULL;
1301 	}
1302 	if (right) {
1303 		if (left == right) {
1304 			/* The new lock breaks the old one in two pieces,
1305 			 * so we have to use the second new lock.
1306 			 */
1307 			left = new_fl2;
1308 			new_fl2 = NULL;
1309 			locks_copy_lock(left, right);
1310 			locks_insert_lock_ctx(left, &fl->fl_list);
1311 		}
1312 		right->fl_start = request->fl_end + 1;
1313 		locks_wake_up_blocks(right);
1314 	}
1315 	if (left) {
1316 		left->fl_end = request->fl_start - 1;
1317 		locks_wake_up_blocks(left);
1318 	}
1319  out:
1320 	spin_unlock(&ctx->flc_lock);
1321 	percpu_up_read(&file_rwsem);
1322 	/*
1323 	 * Free any unused locks.
1324 	 */
1325 	if (new_fl)
1326 		locks_free_lock(new_fl);
1327 	if (new_fl2)
1328 		locks_free_lock(new_fl2);
1329 	locks_dispose_list(&dispose);
1330 	trace_posix_lock_inode(inode, request, error);
1331 
1332 	return error;
1333 }
1334 
1335 /**
1336  * posix_lock_file - Apply a POSIX-style lock to a file
1337  * @filp: The file to apply the lock to
1338  * @fl: The lock to be applied
1339  * @conflock: Place to return a copy of the conflicting lock, if found.
1340  *
1341  * Add a POSIX style lock to a file.
1342  * We merge adjacent & overlapping locks whenever possible.
1343  * POSIX locks are sorted by owner task, then by starting address
1344  *
1345  * Note that if called with an FL_EXISTS argument, the caller may determine
1346  * whether or not a lock was successfully freed by testing the return
1347  * value for -ENOENT.
1348  */
1349 int posix_lock_file(struct file *filp, struct file_lock *fl,
1350 			struct file_lock *conflock)
1351 {
1352 	return posix_lock_inode(locks_inode(filp), fl, conflock);
1353 }
1354 EXPORT_SYMBOL(posix_lock_file);
1355 
1356 /**
1357  * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1358  * @inode: inode of file to which lock request should be applied
1359  * @fl: The lock to be applied
1360  *
1361  * Apply a POSIX style lock request to an inode.
1362  */
1363 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1364 {
1365 	int error;
1366 	might_sleep ();
1367 	for (;;) {
1368 		error = posix_lock_inode(inode, fl, NULL);
1369 		if (error != FILE_LOCK_DEFERRED)
1370 			break;
1371 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
1372 		if (error)
1373 			break;
1374 	}
1375 	locks_delete_block(fl);
1376 	return error;
1377 }
1378 
1379 #ifdef CONFIG_MANDATORY_FILE_LOCKING
1380 /**
1381  * locks_mandatory_locked - Check for an active lock
1382  * @file: the file to check
1383  *
1384  * Searches the inode's list of locks to find any POSIX locks which conflict.
1385  * This function is called from locks_verify_locked() only.
1386  */
1387 int locks_mandatory_locked(struct file *file)
1388 {
1389 	int ret;
1390 	struct inode *inode = locks_inode(file);
1391 	struct file_lock_context *ctx;
1392 	struct file_lock *fl;
1393 
1394 	ctx = smp_load_acquire(&inode->i_flctx);
1395 	if (!ctx || list_empty_careful(&ctx->flc_posix))
1396 		return 0;
1397 
1398 	/*
1399 	 * Search the lock list for this inode for any POSIX locks.
1400 	 */
1401 	spin_lock(&ctx->flc_lock);
1402 	ret = 0;
1403 	list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1404 		if (fl->fl_owner != current->files &&
1405 		    fl->fl_owner != file) {
1406 			ret = -EAGAIN;
1407 			break;
1408 		}
1409 	}
1410 	spin_unlock(&ctx->flc_lock);
1411 	return ret;
1412 }
1413 
1414 /**
1415  * locks_mandatory_area - Check for a conflicting lock
1416  * @inode:	the file to check
1417  * @filp:       how the file was opened (if it was)
1418  * @start:	first byte in the file to check
1419  * @end:	lastbyte in the file to check
1420  * @type:	%F_WRLCK for a write lock, else %F_RDLCK
1421  *
1422  * Searches the inode's list of locks to find any POSIX locks which conflict.
1423  */
1424 int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
1425 			 loff_t end, unsigned char type)
1426 {
1427 	struct file_lock fl;
1428 	int error;
1429 	bool sleep = false;
1430 
1431 	locks_init_lock(&fl);
1432 	fl.fl_pid = current->tgid;
1433 	fl.fl_file = filp;
1434 	fl.fl_flags = FL_POSIX | FL_ACCESS;
1435 	if (filp && !(filp->f_flags & O_NONBLOCK))
1436 		sleep = true;
1437 	fl.fl_type = type;
1438 	fl.fl_start = start;
1439 	fl.fl_end = end;
1440 
1441 	for (;;) {
1442 		if (filp) {
1443 			fl.fl_owner = filp;
1444 			fl.fl_flags &= ~FL_SLEEP;
1445 			error = posix_lock_inode(inode, &fl, NULL);
1446 			if (!error)
1447 				break;
1448 		}
1449 
1450 		if (sleep)
1451 			fl.fl_flags |= FL_SLEEP;
1452 		fl.fl_owner = current->files;
1453 		error = posix_lock_inode(inode, &fl, NULL);
1454 		if (error != FILE_LOCK_DEFERRED)
1455 			break;
1456 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_blocker);
1457 		if (!error) {
1458 			/*
1459 			 * If we've been sleeping someone might have
1460 			 * changed the permissions behind our back.
1461 			 */
1462 			if (__mandatory_lock(inode))
1463 				continue;
1464 		}
1465 
1466 		break;
1467 	}
1468 	locks_delete_block(&fl);
1469 
1470 	return error;
1471 }
1472 EXPORT_SYMBOL(locks_mandatory_area);
1473 #endif /* CONFIG_MANDATORY_FILE_LOCKING */
1474 
1475 static void lease_clear_pending(struct file_lock *fl, int arg)
1476 {
1477 	switch (arg) {
1478 	case F_UNLCK:
1479 		fl->fl_flags &= ~FL_UNLOCK_PENDING;
1480 		/* fall through */
1481 	case F_RDLCK:
1482 		fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1483 	}
1484 }
1485 
1486 /* We already had a lease on this file; just change its type */
1487 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1488 {
1489 	int error = assign_type(fl, arg);
1490 
1491 	if (error)
1492 		return error;
1493 	lease_clear_pending(fl, arg);
1494 	locks_wake_up_blocks(fl);
1495 	if (arg == F_UNLCK) {
1496 		struct file *filp = fl->fl_file;
1497 
1498 		f_delown(filp);
1499 		filp->f_owner.signum = 0;
1500 		fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1501 		if (fl->fl_fasync != NULL) {
1502 			printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1503 			fl->fl_fasync = NULL;
1504 		}
1505 		locks_delete_lock_ctx(fl, dispose);
1506 	}
1507 	return 0;
1508 }
1509 EXPORT_SYMBOL(lease_modify);
1510 
1511 static bool past_time(unsigned long then)
1512 {
1513 	if (!then)
1514 		/* 0 is a special value meaning "this never expires": */
1515 		return false;
1516 	return time_after(jiffies, then);
1517 }
1518 
1519 static void time_out_leases(struct inode *inode, struct list_head *dispose)
1520 {
1521 	struct file_lock_context *ctx = inode->i_flctx;
1522 	struct file_lock *fl, *tmp;
1523 
1524 	lockdep_assert_held(&ctx->flc_lock);
1525 
1526 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1527 		trace_time_out_leases(inode, fl);
1528 		if (past_time(fl->fl_downgrade_time))
1529 			lease_modify(fl, F_RDLCK, dispose);
1530 		if (past_time(fl->fl_break_time))
1531 			lease_modify(fl, F_UNLCK, dispose);
1532 	}
1533 }
1534 
1535 static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1536 {
1537 	if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
1538 		return false;
1539 	if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
1540 		return false;
1541 	return locks_conflict(breaker, lease);
1542 }
1543 
1544 static bool
1545 any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1546 {
1547 	struct file_lock_context *ctx = inode->i_flctx;
1548 	struct file_lock *fl;
1549 
1550 	lockdep_assert_held(&ctx->flc_lock);
1551 
1552 	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1553 		if (leases_conflict(fl, breaker))
1554 			return true;
1555 	}
1556 	return false;
1557 }
1558 
1559 /**
1560  *	__break_lease	-	revoke all outstanding leases on file
1561  *	@inode: the inode of the file to return
1562  *	@mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1563  *	    break all leases
1564  *	@type: FL_LEASE: break leases and delegations; FL_DELEG: break
1565  *	    only delegations
1566  *
1567  *	break_lease (inlined for speed) has checked there already is at least
1568  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1569  *	a call to open() or truncate().  This function can sleep unless you
1570  *	specified %O_NONBLOCK to your open().
1571  */
1572 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1573 {
1574 	int error = 0;
1575 	struct file_lock_context *ctx;
1576 	struct file_lock *new_fl, *fl, *tmp;
1577 	unsigned long break_time;
1578 	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1579 	LIST_HEAD(dispose);
1580 
1581 	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1582 	if (IS_ERR(new_fl))
1583 		return PTR_ERR(new_fl);
1584 	new_fl->fl_flags = type;
1585 
1586 	/* typically we will check that ctx is non-NULL before calling */
1587 	ctx = smp_load_acquire(&inode->i_flctx);
1588 	if (!ctx) {
1589 		WARN_ON_ONCE(1);
1590 		return error;
1591 	}
1592 
1593 	percpu_down_read(&file_rwsem);
1594 	spin_lock(&ctx->flc_lock);
1595 
1596 	time_out_leases(inode, &dispose);
1597 
1598 	if (!any_leases_conflict(inode, new_fl))
1599 		goto out;
1600 
1601 	break_time = 0;
1602 	if (lease_break_time > 0) {
1603 		break_time = jiffies + lease_break_time * HZ;
1604 		if (break_time == 0)
1605 			break_time++;	/* so that 0 means no break time */
1606 	}
1607 
1608 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1609 		if (!leases_conflict(fl, new_fl))
1610 			continue;
1611 		if (want_write) {
1612 			if (fl->fl_flags & FL_UNLOCK_PENDING)
1613 				continue;
1614 			fl->fl_flags |= FL_UNLOCK_PENDING;
1615 			fl->fl_break_time = break_time;
1616 		} else {
1617 			if (lease_breaking(fl))
1618 				continue;
1619 			fl->fl_flags |= FL_DOWNGRADE_PENDING;
1620 			fl->fl_downgrade_time = break_time;
1621 		}
1622 		if (fl->fl_lmops->lm_break(fl))
1623 			locks_delete_lock_ctx(fl, &dispose);
1624 	}
1625 
1626 	if (list_empty(&ctx->flc_lease))
1627 		goto out;
1628 
1629 	if (mode & O_NONBLOCK) {
1630 		trace_break_lease_noblock(inode, new_fl);
1631 		error = -EWOULDBLOCK;
1632 		goto out;
1633 	}
1634 
1635 restart:
1636 	fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1637 	break_time = fl->fl_break_time;
1638 	if (break_time != 0)
1639 		break_time -= jiffies;
1640 	if (break_time == 0)
1641 		break_time++;
1642 	locks_insert_block(fl, new_fl, leases_conflict);
1643 	trace_break_lease_block(inode, new_fl);
1644 	spin_unlock(&ctx->flc_lock);
1645 	percpu_up_read(&file_rwsem);
1646 
1647 	locks_dispose_list(&dispose);
1648 	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1649 						!new_fl->fl_blocker, break_time);
1650 
1651 	percpu_down_read(&file_rwsem);
1652 	spin_lock(&ctx->flc_lock);
1653 	trace_break_lease_unblock(inode, new_fl);
1654 	locks_delete_block(new_fl);
1655 	if (error >= 0) {
1656 		/*
1657 		 * Wait for the next conflicting lease that has not been
1658 		 * broken yet
1659 		 */
1660 		if (error == 0)
1661 			time_out_leases(inode, &dispose);
1662 		if (any_leases_conflict(inode, new_fl))
1663 			goto restart;
1664 		error = 0;
1665 	}
1666 out:
1667 	spin_unlock(&ctx->flc_lock);
1668 	percpu_up_read(&file_rwsem);
1669 	locks_dispose_list(&dispose);
1670 	locks_free_lock(new_fl);
1671 	return error;
1672 }
1673 EXPORT_SYMBOL(__break_lease);
1674 
1675 /**
1676  *	lease_get_mtime - update modified time of an inode with exclusive lease
1677  *	@inode: the inode
1678  *      @time:  pointer to a timespec which contains the last modified time
1679  *
1680  * This is to force NFS clients to flush their caches for files with
1681  * exclusive leases.  The justification is that if someone has an
1682  * exclusive lease, then they could be modifying it.
1683  */
1684 void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1685 {
1686 	bool has_lease = false;
1687 	struct file_lock_context *ctx;
1688 	struct file_lock *fl;
1689 
1690 	ctx = smp_load_acquire(&inode->i_flctx);
1691 	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1692 		spin_lock(&ctx->flc_lock);
1693 		fl = list_first_entry_or_null(&ctx->flc_lease,
1694 					      struct file_lock, fl_list);
1695 		if (fl && (fl->fl_type == F_WRLCK))
1696 			has_lease = true;
1697 		spin_unlock(&ctx->flc_lock);
1698 	}
1699 
1700 	if (has_lease)
1701 		*time = current_time(inode);
1702 }
1703 EXPORT_SYMBOL(lease_get_mtime);
1704 
1705 /**
1706  *	fcntl_getlease - Enquire what lease is currently active
1707  *	@filp: the file
1708  *
1709  *	The value returned by this function will be one of
1710  *	(if no lease break is pending):
1711  *
1712  *	%F_RDLCK to indicate a shared lease is held.
1713  *
1714  *	%F_WRLCK to indicate an exclusive lease is held.
1715  *
1716  *	%F_UNLCK to indicate no lease is held.
1717  *
1718  *	(if a lease break is pending):
1719  *
1720  *	%F_RDLCK to indicate an exclusive lease needs to be
1721  *		changed to a shared lease (or removed).
1722  *
1723  *	%F_UNLCK to indicate the lease needs to be removed.
1724  *
1725  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1726  *	should be returned to userspace.
1727  */
1728 int fcntl_getlease(struct file *filp)
1729 {
1730 	struct file_lock *fl;
1731 	struct inode *inode = locks_inode(filp);
1732 	struct file_lock_context *ctx;
1733 	int type = F_UNLCK;
1734 	LIST_HEAD(dispose);
1735 
1736 	ctx = smp_load_acquire(&inode->i_flctx);
1737 	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1738 		percpu_down_read(&file_rwsem);
1739 		spin_lock(&ctx->flc_lock);
1740 		time_out_leases(inode, &dispose);
1741 		list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1742 			if (fl->fl_file != filp)
1743 				continue;
1744 			type = target_leasetype(fl);
1745 			break;
1746 		}
1747 		spin_unlock(&ctx->flc_lock);
1748 		percpu_up_read(&file_rwsem);
1749 
1750 		locks_dispose_list(&dispose);
1751 	}
1752 	return type;
1753 }
1754 
1755 /**
1756  * check_conflicting_open - see if the given dentry points to a file that has
1757  *			    an existing open that would conflict with the
1758  *			    desired lease.
1759  * @dentry:	dentry to check
1760  * @arg:	type of lease that we're trying to acquire
1761  * @flags:	current lock flags
1762  *
1763  * Check to see if there's an existing open fd on this file that would
1764  * conflict with the lease we're trying to set.
1765  */
1766 static int
1767 check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
1768 {
1769 	int ret = 0;
1770 	struct inode *inode = dentry->d_inode;
1771 
1772 	if (flags & FL_LAYOUT)
1773 		return 0;
1774 
1775 	if ((arg == F_RDLCK) && inode_is_open_for_write(inode))
1776 		return -EAGAIN;
1777 
1778 	if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
1779 	    (atomic_read(&inode->i_count) > 1)))
1780 		ret = -EAGAIN;
1781 
1782 	return ret;
1783 }
1784 
1785 static int
1786 generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1787 {
1788 	struct file_lock *fl, *my_fl = NULL, *lease;
1789 	struct dentry *dentry = filp->f_path.dentry;
1790 	struct inode *inode = dentry->d_inode;
1791 	struct file_lock_context *ctx;
1792 	bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1793 	int error;
1794 	LIST_HEAD(dispose);
1795 
1796 	lease = *flp;
1797 	trace_generic_add_lease(inode, lease);
1798 
1799 	/* Note that arg is never F_UNLCK here */
1800 	ctx = locks_get_lock_context(inode, arg);
1801 	if (!ctx)
1802 		return -ENOMEM;
1803 
1804 	/*
1805 	 * In the delegation case we need mutual exclusion with
1806 	 * a number of operations that take the i_mutex.  We trylock
1807 	 * because delegations are an optional optimization, and if
1808 	 * there's some chance of a conflict--we'd rather not
1809 	 * bother, maybe that's a sign this just isn't a good file to
1810 	 * hand out a delegation on.
1811 	 */
1812 	if (is_deleg && !inode_trylock(inode))
1813 		return -EAGAIN;
1814 
1815 	if (is_deleg && arg == F_WRLCK) {
1816 		/* Write delegations are not currently supported: */
1817 		inode_unlock(inode);
1818 		WARN_ON_ONCE(1);
1819 		return -EINVAL;
1820 	}
1821 
1822 	percpu_down_read(&file_rwsem);
1823 	spin_lock(&ctx->flc_lock);
1824 	time_out_leases(inode, &dispose);
1825 	error = check_conflicting_open(dentry, arg, lease->fl_flags);
1826 	if (error)
1827 		goto out;
1828 
1829 	/*
1830 	 * At this point, we know that if there is an exclusive
1831 	 * lease on this file, then we hold it on this filp
1832 	 * (otherwise our open of this file would have blocked).
1833 	 * And if we are trying to acquire an exclusive lease,
1834 	 * then the file is not open by anyone (including us)
1835 	 * except for this filp.
1836 	 */
1837 	error = -EAGAIN;
1838 	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1839 		if (fl->fl_file == filp &&
1840 		    fl->fl_owner == lease->fl_owner) {
1841 			my_fl = fl;
1842 			continue;
1843 		}
1844 
1845 		/*
1846 		 * No exclusive leases if someone else has a lease on
1847 		 * this file:
1848 		 */
1849 		if (arg == F_WRLCK)
1850 			goto out;
1851 		/*
1852 		 * Modifying our existing lease is OK, but no getting a
1853 		 * new lease if someone else is opening for write:
1854 		 */
1855 		if (fl->fl_flags & FL_UNLOCK_PENDING)
1856 			goto out;
1857 	}
1858 
1859 	if (my_fl != NULL) {
1860 		lease = my_fl;
1861 		error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1862 		if (error)
1863 			goto out;
1864 		goto out_setup;
1865 	}
1866 
1867 	error = -EINVAL;
1868 	if (!leases_enable)
1869 		goto out;
1870 
1871 	locks_insert_lock_ctx(lease, &ctx->flc_lease);
1872 	/*
1873 	 * The check in break_lease() is lockless. It's possible for another
1874 	 * open to race in after we did the earlier check for a conflicting
1875 	 * open but before the lease was inserted. Check again for a
1876 	 * conflicting open and cancel the lease if there is one.
1877 	 *
1878 	 * We also add a barrier here to ensure that the insertion of the lock
1879 	 * precedes these checks.
1880 	 */
1881 	smp_mb();
1882 	error = check_conflicting_open(dentry, arg, lease->fl_flags);
1883 	if (error) {
1884 		locks_unlink_lock_ctx(lease);
1885 		goto out;
1886 	}
1887 
1888 out_setup:
1889 	if (lease->fl_lmops->lm_setup)
1890 		lease->fl_lmops->lm_setup(lease, priv);
1891 out:
1892 	spin_unlock(&ctx->flc_lock);
1893 	percpu_up_read(&file_rwsem);
1894 	locks_dispose_list(&dispose);
1895 	if (is_deleg)
1896 		inode_unlock(inode);
1897 	if (!error && !my_fl)
1898 		*flp = NULL;
1899 	return error;
1900 }
1901 
1902 static int generic_delete_lease(struct file *filp, void *owner)
1903 {
1904 	int error = -EAGAIN;
1905 	struct file_lock *fl, *victim = NULL;
1906 	struct inode *inode = locks_inode(filp);
1907 	struct file_lock_context *ctx;
1908 	LIST_HEAD(dispose);
1909 
1910 	ctx = smp_load_acquire(&inode->i_flctx);
1911 	if (!ctx) {
1912 		trace_generic_delete_lease(inode, NULL);
1913 		return error;
1914 	}
1915 
1916 	percpu_down_read(&file_rwsem);
1917 	spin_lock(&ctx->flc_lock);
1918 	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1919 		if (fl->fl_file == filp &&
1920 		    fl->fl_owner == owner) {
1921 			victim = fl;
1922 			break;
1923 		}
1924 	}
1925 	trace_generic_delete_lease(inode, victim);
1926 	if (victim)
1927 		error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1928 	spin_unlock(&ctx->flc_lock);
1929 	percpu_up_read(&file_rwsem);
1930 	locks_dispose_list(&dispose);
1931 	return error;
1932 }
1933 
1934 /**
1935  *	generic_setlease	-	sets a lease on an open file
1936  *	@filp:	file pointer
1937  *	@arg:	type of lease to obtain
1938  *	@flp:	input - file_lock to use, output - file_lock inserted
1939  *	@priv:	private data for lm_setup (may be NULL if lm_setup
1940  *		doesn't require it)
1941  *
1942  *	The (input) flp->fl_lmops->lm_break function is required
1943  *	by break_lease().
1944  */
1945 int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1946 			void **priv)
1947 {
1948 	struct inode *inode = locks_inode(filp);
1949 	int error;
1950 
1951 	if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1952 		return -EACCES;
1953 	if (!S_ISREG(inode->i_mode))
1954 		return -EINVAL;
1955 	error = security_file_lock(filp, arg);
1956 	if (error)
1957 		return error;
1958 
1959 	switch (arg) {
1960 	case F_UNLCK:
1961 		return generic_delete_lease(filp, *priv);
1962 	case F_RDLCK:
1963 	case F_WRLCK:
1964 		if (!(*flp)->fl_lmops->lm_break) {
1965 			WARN_ON_ONCE(1);
1966 			return -ENOLCK;
1967 		}
1968 
1969 		return generic_add_lease(filp, arg, flp, priv);
1970 	default:
1971 		return -EINVAL;
1972 	}
1973 }
1974 EXPORT_SYMBOL(generic_setlease);
1975 
1976 /**
1977  * vfs_setlease        -       sets a lease on an open file
1978  * @filp:	file pointer
1979  * @arg:	type of lease to obtain
1980  * @lease:	file_lock to use when adding a lease
1981  * @priv:	private info for lm_setup when adding a lease (may be
1982  *		NULL if lm_setup doesn't require it)
1983  *
1984  * Call this to establish a lease on the file. The "lease" argument is not
1985  * used for F_UNLCK requests and may be NULL. For commands that set or alter
1986  * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
1987  * set; if not, this function will return -ENOLCK (and generate a scary-looking
1988  * stack trace).
1989  *
1990  * The "priv" pointer is passed directly to the lm_setup function as-is. It
1991  * may be NULL if the lm_setup operation doesn't require it.
1992  */
1993 int
1994 vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1995 {
1996 	if (filp->f_op->setlease)
1997 		return filp->f_op->setlease(filp, arg, lease, priv);
1998 	else
1999 		return generic_setlease(filp, arg, lease, priv);
2000 }
2001 EXPORT_SYMBOL_GPL(vfs_setlease);
2002 
2003 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
2004 {
2005 	struct file_lock *fl;
2006 	struct fasync_struct *new;
2007 	int error;
2008 
2009 	fl = lease_alloc(filp, arg);
2010 	if (IS_ERR(fl))
2011 		return PTR_ERR(fl);
2012 
2013 	new = fasync_alloc();
2014 	if (!new) {
2015 		locks_free_lock(fl);
2016 		return -ENOMEM;
2017 	}
2018 	new->fa_fd = fd;
2019 
2020 	error = vfs_setlease(filp, arg, &fl, (void **)&new);
2021 	if (fl)
2022 		locks_free_lock(fl);
2023 	if (new)
2024 		fasync_free(new);
2025 	return error;
2026 }
2027 
2028 /**
2029  *	fcntl_setlease	-	sets a lease on an open file
2030  *	@fd: open file descriptor
2031  *	@filp: file pointer
2032  *	@arg: type of lease to obtain
2033  *
2034  *	Call this fcntl to establish a lease on the file.
2035  *	Note that you also need to call %F_SETSIG to
2036  *	receive a signal when the lease is broken.
2037  */
2038 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
2039 {
2040 	if (arg == F_UNLCK)
2041 		return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
2042 	return do_fcntl_add_lease(fd, filp, arg);
2043 }
2044 
2045 /**
2046  * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
2047  * @inode: inode of the file to apply to
2048  * @fl: The lock to be applied
2049  *
2050  * Apply a FLOCK style lock request to an inode.
2051  */
2052 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2053 {
2054 	int error;
2055 	might_sleep();
2056 	for (;;) {
2057 		error = flock_lock_inode(inode, fl);
2058 		if (error != FILE_LOCK_DEFERRED)
2059 			break;
2060 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
2061 		if (error)
2062 			break;
2063 	}
2064 	locks_delete_block(fl);
2065 	return error;
2066 }
2067 
2068 /**
2069  * locks_lock_inode_wait - Apply a lock to an inode
2070  * @inode: inode of the file to apply to
2071  * @fl: The lock to be applied
2072  *
2073  * Apply a POSIX or FLOCK style lock request to an inode.
2074  */
2075 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2076 {
2077 	int res = 0;
2078 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2079 		case FL_POSIX:
2080 			res = posix_lock_inode_wait(inode, fl);
2081 			break;
2082 		case FL_FLOCK:
2083 			res = flock_lock_inode_wait(inode, fl);
2084 			break;
2085 		default:
2086 			BUG();
2087 	}
2088 	return res;
2089 }
2090 EXPORT_SYMBOL(locks_lock_inode_wait);
2091 
2092 /**
2093  *	sys_flock: - flock() system call.
2094  *	@fd: the file descriptor to lock.
2095  *	@cmd: the type of lock to apply.
2096  *
2097  *	Apply a %FL_FLOCK style lock to an open file descriptor.
2098  *	The @cmd can be one of:
2099  *
2100  *	- %LOCK_SH -- a shared lock.
2101  *	- %LOCK_EX -- an exclusive lock.
2102  *	- %LOCK_UN -- remove an existing lock.
2103  *	- %LOCK_MAND -- a 'mandatory' flock.
2104  *	  This exists to emulate Windows Share Modes.
2105  *
2106  *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
2107  *	processes read and write access respectively.
2108  */
2109 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2110 {
2111 	struct fd f = fdget(fd);
2112 	struct file_lock *lock;
2113 	int can_sleep, unlock;
2114 	int error;
2115 
2116 	error = -EBADF;
2117 	if (!f.file)
2118 		goto out;
2119 
2120 	can_sleep = !(cmd & LOCK_NB);
2121 	cmd &= ~LOCK_NB;
2122 	unlock = (cmd == LOCK_UN);
2123 
2124 	if (!unlock && !(cmd & LOCK_MAND) &&
2125 	    !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
2126 		goto out_putf;
2127 
2128 	lock = flock_make_lock(f.file, cmd, NULL);
2129 	if (IS_ERR(lock)) {
2130 		error = PTR_ERR(lock);
2131 		goto out_putf;
2132 	}
2133 
2134 	if (can_sleep)
2135 		lock->fl_flags |= FL_SLEEP;
2136 
2137 	error = security_file_lock(f.file, lock->fl_type);
2138 	if (error)
2139 		goto out_free;
2140 
2141 	if (f.file->f_op->flock)
2142 		error = f.file->f_op->flock(f.file,
2143 					  (can_sleep) ? F_SETLKW : F_SETLK,
2144 					  lock);
2145 	else
2146 		error = locks_lock_file_wait(f.file, lock);
2147 
2148  out_free:
2149 	locks_free_lock(lock);
2150 
2151  out_putf:
2152 	fdput(f);
2153  out:
2154 	return error;
2155 }
2156 
2157 /**
2158  * vfs_test_lock - test file byte range lock
2159  * @filp: The file to test lock for
2160  * @fl: The lock to test; also used to hold result
2161  *
2162  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
2163  * setting conf->fl_type to something other than F_UNLCK.
2164  */
2165 int vfs_test_lock(struct file *filp, struct file_lock *fl)
2166 {
2167 	if (filp->f_op->lock)
2168 		return filp->f_op->lock(filp, F_GETLK, fl);
2169 	posix_test_lock(filp, fl);
2170 	return 0;
2171 }
2172 EXPORT_SYMBOL_GPL(vfs_test_lock);
2173 
2174 /**
2175  * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2176  * @fl: The file_lock who's fl_pid should be translated
2177  * @ns: The namespace into which the pid should be translated
2178  *
2179  * Used to tranlate a fl_pid into a namespace virtual pid number
2180  */
2181 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2182 {
2183 	pid_t vnr;
2184 	struct pid *pid;
2185 
2186 	if (IS_OFDLCK(fl))
2187 		return -1;
2188 	if (IS_REMOTELCK(fl))
2189 		return fl->fl_pid;
2190 	/*
2191 	 * If the flock owner process is dead and its pid has been already
2192 	 * freed, the translation below won't work, but we still want to show
2193 	 * flock owner pid number in init pidns.
2194 	 */
2195 	if (ns == &init_pid_ns)
2196 		return (pid_t)fl->fl_pid;
2197 
2198 	rcu_read_lock();
2199 	pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2200 	vnr = pid_nr_ns(pid, ns);
2201 	rcu_read_unlock();
2202 	return vnr;
2203 }
2204 
2205 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2206 {
2207 	flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2208 #if BITS_PER_LONG == 32
2209 	/*
2210 	 * Make sure we can represent the posix lock via
2211 	 * legacy 32bit flock.
2212 	 */
2213 	if (fl->fl_start > OFFT_OFFSET_MAX)
2214 		return -EOVERFLOW;
2215 	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2216 		return -EOVERFLOW;
2217 #endif
2218 	flock->l_start = fl->fl_start;
2219 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2220 		fl->fl_end - fl->fl_start + 1;
2221 	flock->l_whence = 0;
2222 	flock->l_type = fl->fl_type;
2223 	return 0;
2224 }
2225 
2226 #if BITS_PER_LONG == 32
2227 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2228 {
2229 	flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2230 	flock->l_start = fl->fl_start;
2231 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2232 		fl->fl_end - fl->fl_start + 1;
2233 	flock->l_whence = 0;
2234 	flock->l_type = fl->fl_type;
2235 }
2236 #endif
2237 
2238 /* Report the first existing lock that would conflict with l.
2239  * This implements the F_GETLK command of fcntl().
2240  */
2241 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2242 {
2243 	struct file_lock *fl;
2244 	int error;
2245 
2246 	fl = locks_alloc_lock();
2247 	if (fl == NULL)
2248 		return -ENOMEM;
2249 	error = -EINVAL;
2250 	if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2251 		goto out;
2252 
2253 	error = flock_to_posix_lock(filp, fl, flock);
2254 	if (error)
2255 		goto out;
2256 
2257 	if (cmd == F_OFD_GETLK) {
2258 		error = -EINVAL;
2259 		if (flock->l_pid != 0)
2260 			goto out;
2261 
2262 		cmd = F_GETLK;
2263 		fl->fl_flags |= FL_OFDLCK;
2264 		fl->fl_owner = filp;
2265 	}
2266 
2267 	error = vfs_test_lock(filp, fl);
2268 	if (error)
2269 		goto out;
2270 
2271 	flock->l_type = fl->fl_type;
2272 	if (fl->fl_type != F_UNLCK) {
2273 		error = posix_lock_to_flock(flock, fl);
2274 		if (error)
2275 			goto out;
2276 	}
2277 out:
2278 	locks_free_lock(fl);
2279 	return error;
2280 }
2281 
2282 /**
2283  * vfs_lock_file - file byte range lock
2284  * @filp: The file to apply the lock to
2285  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2286  * @fl: The lock to be applied
2287  * @conf: Place to return a copy of the conflicting lock, if found.
2288  *
2289  * A caller that doesn't care about the conflicting lock may pass NULL
2290  * as the final argument.
2291  *
2292  * If the filesystem defines a private ->lock() method, then @conf will
2293  * be left unchanged; so a caller that cares should initialize it to
2294  * some acceptable default.
2295  *
2296  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2297  * locks, the ->lock() interface may return asynchronously, before the lock has
2298  * been granted or denied by the underlying filesystem, if (and only if)
2299  * lm_grant is set. Callers expecting ->lock() to return asynchronously
2300  * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2301  * the request is for a blocking lock. When ->lock() does return asynchronously,
2302  * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2303  * request completes.
2304  * If the request is for non-blocking lock the file system should return
2305  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2306  * with the result. If the request timed out the callback routine will return a
2307  * nonzero return code and the file system should release the lock. The file
2308  * system is also responsible to keep a corresponding posix lock when it
2309  * grants a lock so the VFS can find out which locks are locally held and do
2310  * the correct lock cleanup when required.
2311  * The underlying filesystem must not drop the kernel lock or call
2312  * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2313  * return code.
2314  */
2315 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2316 {
2317 	if (filp->f_op->lock)
2318 		return filp->f_op->lock(filp, cmd, fl);
2319 	else
2320 		return posix_lock_file(filp, fl, conf);
2321 }
2322 EXPORT_SYMBOL_GPL(vfs_lock_file);
2323 
2324 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2325 			     struct file_lock *fl)
2326 {
2327 	int error;
2328 
2329 	error = security_file_lock(filp, fl->fl_type);
2330 	if (error)
2331 		return error;
2332 
2333 	for (;;) {
2334 		error = vfs_lock_file(filp, cmd, fl, NULL);
2335 		if (error != FILE_LOCK_DEFERRED)
2336 			break;
2337 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
2338 		if (error)
2339 			break;
2340 	}
2341 	locks_delete_block(fl);
2342 
2343 	return error;
2344 }
2345 
2346 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2347 static int
2348 check_fmode_for_setlk(struct file_lock *fl)
2349 {
2350 	switch (fl->fl_type) {
2351 	case F_RDLCK:
2352 		if (!(fl->fl_file->f_mode & FMODE_READ))
2353 			return -EBADF;
2354 		break;
2355 	case F_WRLCK:
2356 		if (!(fl->fl_file->f_mode & FMODE_WRITE))
2357 			return -EBADF;
2358 	}
2359 	return 0;
2360 }
2361 
2362 /* Apply the lock described by l to an open file descriptor.
2363  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2364  */
2365 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2366 		struct flock *flock)
2367 {
2368 	struct file_lock *file_lock = locks_alloc_lock();
2369 	struct inode *inode = locks_inode(filp);
2370 	struct file *f;
2371 	int error;
2372 
2373 	if (file_lock == NULL)
2374 		return -ENOLCK;
2375 
2376 	/* Don't allow mandatory locks on files that may be memory mapped
2377 	 * and shared.
2378 	 */
2379 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2380 		error = -EAGAIN;
2381 		goto out;
2382 	}
2383 
2384 	error = flock_to_posix_lock(filp, file_lock, flock);
2385 	if (error)
2386 		goto out;
2387 
2388 	error = check_fmode_for_setlk(file_lock);
2389 	if (error)
2390 		goto out;
2391 
2392 	/*
2393 	 * If the cmd is requesting file-private locks, then set the
2394 	 * FL_OFDLCK flag and override the owner.
2395 	 */
2396 	switch (cmd) {
2397 	case F_OFD_SETLK:
2398 		error = -EINVAL;
2399 		if (flock->l_pid != 0)
2400 			goto out;
2401 
2402 		cmd = F_SETLK;
2403 		file_lock->fl_flags |= FL_OFDLCK;
2404 		file_lock->fl_owner = filp;
2405 		break;
2406 	case F_OFD_SETLKW:
2407 		error = -EINVAL;
2408 		if (flock->l_pid != 0)
2409 			goto out;
2410 
2411 		cmd = F_SETLKW;
2412 		file_lock->fl_flags |= FL_OFDLCK;
2413 		file_lock->fl_owner = filp;
2414 		/* Fallthrough */
2415 	case F_SETLKW:
2416 		file_lock->fl_flags |= FL_SLEEP;
2417 	}
2418 
2419 	error = do_lock_file_wait(filp, cmd, file_lock);
2420 
2421 	/*
2422 	 * Attempt to detect a close/fcntl race and recover by releasing the
2423 	 * lock that was just acquired. There is no need to do that when we're
2424 	 * unlocking though, or for OFD locks.
2425 	 */
2426 	if (!error && file_lock->fl_type != F_UNLCK &&
2427 	    !(file_lock->fl_flags & FL_OFDLCK)) {
2428 		/*
2429 		 * We need that spin_lock here - it prevents reordering between
2430 		 * update of i_flctx->flc_posix and check for it done in
2431 		 * close(). rcu_read_lock() wouldn't do.
2432 		 */
2433 		spin_lock(&current->files->file_lock);
2434 		f = fcheck(fd);
2435 		spin_unlock(&current->files->file_lock);
2436 		if (f != filp) {
2437 			file_lock->fl_type = F_UNLCK;
2438 			error = do_lock_file_wait(filp, cmd, file_lock);
2439 			WARN_ON_ONCE(error);
2440 			error = -EBADF;
2441 		}
2442 	}
2443 out:
2444 	trace_fcntl_setlk(inode, file_lock, error);
2445 	locks_free_lock(file_lock);
2446 	return error;
2447 }
2448 
2449 #if BITS_PER_LONG == 32
2450 /* Report the first existing lock that would conflict with l.
2451  * This implements the F_GETLK command of fcntl().
2452  */
2453 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2454 {
2455 	struct file_lock *fl;
2456 	int error;
2457 
2458 	fl = locks_alloc_lock();
2459 	if (fl == NULL)
2460 		return -ENOMEM;
2461 
2462 	error = -EINVAL;
2463 	if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2464 		goto out;
2465 
2466 	error = flock64_to_posix_lock(filp, fl, flock);
2467 	if (error)
2468 		goto out;
2469 
2470 	if (cmd == F_OFD_GETLK) {
2471 		error = -EINVAL;
2472 		if (flock->l_pid != 0)
2473 			goto out;
2474 
2475 		cmd = F_GETLK64;
2476 		fl->fl_flags |= FL_OFDLCK;
2477 		fl->fl_owner = filp;
2478 	}
2479 
2480 	error = vfs_test_lock(filp, fl);
2481 	if (error)
2482 		goto out;
2483 
2484 	flock->l_type = fl->fl_type;
2485 	if (fl->fl_type != F_UNLCK)
2486 		posix_lock_to_flock64(flock, fl);
2487 
2488 out:
2489 	locks_free_lock(fl);
2490 	return error;
2491 }
2492 
2493 /* Apply the lock described by l to an open file descriptor.
2494  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2495  */
2496 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2497 		struct flock64 *flock)
2498 {
2499 	struct file_lock *file_lock = locks_alloc_lock();
2500 	struct inode *inode = locks_inode(filp);
2501 	struct file *f;
2502 	int error;
2503 
2504 	if (file_lock == NULL)
2505 		return -ENOLCK;
2506 
2507 	/* Don't allow mandatory locks on files that may be memory mapped
2508 	 * and shared.
2509 	 */
2510 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2511 		error = -EAGAIN;
2512 		goto out;
2513 	}
2514 
2515 	error = flock64_to_posix_lock(filp, file_lock, flock);
2516 	if (error)
2517 		goto out;
2518 
2519 	error = check_fmode_for_setlk(file_lock);
2520 	if (error)
2521 		goto out;
2522 
2523 	/*
2524 	 * If the cmd is requesting file-private locks, then set the
2525 	 * FL_OFDLCK flag and override the owner.
2526 	 */
2527 	switch (cmd) {
2528 	case F_OFD_SETLK:
2529 		error = -EINVAL;
2530 		if (flock->l_pid != 0)
2531 			goto out;
2532 
2533 		cmd = F_SETLK64;
2534 		file_lock->fl_flags |= FL_OFDLCK;
2535 		file_lock->fl_owner = filp;
2536 		break;
2537 	case F_OFD_SETLKW:
2538 		error = -EINVAL;
2539 		if (flock->l_pid != 0)
2540 			goto out;
2541 
2542 		cmd = F_SETLKW64;
2543 		file_lock->fl_flags |= FL_OFDLCK;
2544 		file_lock->fl_owner = filp;
2545 		/* Fallthrough */
2546 	case F_SETLKW64:
2547 		file_lock->fl_flags |= FL_SLEEP;
2548 	}
2549 
2550 	error = do_lock_file_wait(filp, cmd, file_lock);
2551 
2552 	/*
2553 	 * Attempt to detect a close/fcntl race and recover by releasing the
2554 	 * lock that was just acquired. There is no need to do that when we're
2555 	 * unlocking though, or for OFD locks.
2556 	 */
2557 	if (!error && file_lock->fl_type != F_UNLCK &&
2558 	    !(file_lock->fl_flags & FL_OFDLCK)) {
2559 		/*
2560 		 * We need that spin_lock here - it prevents reordering between
2561 		 * update of i_flctx->flc_posix and check for it done in
2562 		 * close(). rcu_read_lock() wouldn't do.
2563 		 */
2564 		spin_lock(&current->files->file_lock);
2565 		f = fcheck(fd);
2566 		spin_unlock(&current->files->file_lock);
2567 		if (f != filp) {
2568 			file_lock->fl_type = F_UNLCK;
2569 			error = do_lock_file_wait(filp, cmd, file_lock);
2570 			WARN_ON_ONCE(error);
2571 			error = -EBADF;
2572 		}
2573 	}
2574 out:
2575 	locks_free_lock(file_lock);
2576 	return error;
2577 }
2578 #endif /* BITS_PER_LONG == 32 */
2579 
2580 /*
2581  * This function is called when the file is being removed
2582  * from the task's fd array.  POSIX locks belonging to this task
2583  * are deleted at this time.
2584  */
2585 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2586 {
2587 	int error;
2588 	struct inode *inode = locks_inode(filp);
2589 	struct file_lock lock;
2590 	struct file_lock_context *ctx;
2591 
2592 	/*
2593 	 * If there are no locks held on this file, we don't need to call
2594 	 * posix_lock_file().  Another process could be setting a lock on this
2595 	 * file at the same time, but we wouldn't remove that lock anyway.
2596 	 */
2597 	ctx =  smp_load_acquire(&inode->i_flctx);
2598 	if (!ctx || list_empty(&ctx->flc_posix))
2599 		return;
2600 
2601 	locks_init_lock(&lock);
2602 	lock.fl_type = F_UNLCK;
2603 	lock.fl_flags = FL_POSIX | FL_CLOSE;
2604 	lock.fl_start = 0;
2605 	lock.fl_end = OFFSET_MAX;
2606 	lock.fl_owner = owner;
2607 	lock.fl_pid = current->tgid;
2608 	lock.fl_file = filp;
2609 	lock.fl_ops = NULL;
2610 	lock.fl_lmops = NULL;
2611 
2612 	error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2613 
2614 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2615 		lock.fl_ops->fl_release_private(&lock);
2616 	trace_locks_remove_posix(inode, &lock, error);
2617 }
2618 EXPORT_SYMBOL(locks_remove_posix);
2619 
2620 /* The i_flctx must be valid when calling into here */
2621 static void
2622 locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2623 {
2624 	struct file_lock fl;
2625 	struct inode *inode = locks_inode(filp);
2626 
2627 	if (list_empty(&flctx->flc_flock))
2628 		return;
2629 
2630 	flock_make_lock(filp, LOCK_UN, &fl);
2631 	fl.fl_flags |= FL_CLOSE;
2632 
2633 	if (filp->f_op->flock)
2634 		filp->f_op->flock(filp, F_SETLKW, &fl);
2635 	else
2636 		flock_lock_inode(inode, &fl);
2637 
2638 	if (fl.fl_ops && fl.fl_ops->fl_release_private)
2639 		fl.fl_ops->fl_release_private(&fl);
2640 }
2641 
2642 /* The i_flctx must be valid when calling into here */
2643 static void
2644 locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2645 {
2646 	struct file_lock *fl, *tmp;
2647 	LIST_HEAD(dispose);
2648 
2649 	if (list_empty(&ctx->flc_lease))
2650 		return;
2651 
2652 	percpu_down_read(&file_rwsem);
2653 	spin_lock(&ctx->flc_lock);
2654 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2655 		if (filp == fl->fl_file)
2656 			lease_modify(fl, F_UNLCK, &dispose);
2657 	spin_unlock(&ctx->flc_lock);
2658 	percpu_up_read(&file_rwsem);
2659 
2660 	locks_dispose_list(&dispose);
2661 }
2662 
2663 /*
2664  * This function is called on the last close of an open file.
2665  */
2666 void locks_remove_file(struct file *filp)
2667 {
2668 	struct file_lock_context *ctx;
2669 
2670 	ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2671 	if (!ctx)
2672 		return;
2673 
2674 	/* remove any OFD locks */
2675 	locks_remove_posix(filp, filp);
2676 
2677 	/* remove flock locks */
2678 	locks_remove_flock(filp, ctx);
2679 
2680 	/* remove any leases */
2681 	locks_remove_lease(filp, ctx);
2682 
2683 	spin_lock(&ctx->flc_lock);
2684 	locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2685 	locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2686 	locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2687 	spin_unlock(&ctx->flc_lock);
2688 }
2689 
2690 /**
2691  * vfs_cancel_lock - file byte range unblock lock
2692  * @filp: The file to apply the unblock to
2693  * @fl: The lock to be unblocked
2694  *
2695  * Used by lock managers to cancel blocked requests
2696  */
2697 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2698 {
2699 	if (filp->f_op->lock)
2700 		return filp->f_op->lock(filp, F_CANCELLK, fl);
2701 	return 0;
2702 }
2703 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2704 
2705 #ifdef CONFIG_PROC_FS
2706 #include <linux/proc_fs.h>
2707 #include <linux/seq_file.h>
2708 
2709 struct locks_iterator {
2710 	int	li_cpu;
2711 	loff_t	li_pos;
2712 };
2713 
2714 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2715 			    loff_t id, char *pfx)
2716 {
2717 	struct inode *inode = NULL;
2718 	unsigned int fl_pid;
2719 	struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2720 
2721 	fl_pid = locks_translate_pid(fl, proc_pidns);
2722 	/*
2723 	 * If lock owner is dead (and pid is freed) or not visible in current
2724 	 * pidns, zero is shown as a pid value. Check lock info from
2725 	 * init_pid_ns to get saved lock pid value.
2726 	 */
2727 
2728 	if (fl->fl_file != NULL)
2729 		inode = locks_inode(fl->fl_file);
2730 
2731 	seq_printf(f, "%lld:%s ", id, pfx);
2732 	if (IS_POSIX(fl)) {
2733 		if (fl->fl_flags & FL_ACCESS)
2734 			seq_puts(f, "ACCESS");
2735 		else if (IS_OFDLCK(fl))
2736 			seq_puts(f, "OFDLCK");
2737 		else
2738 			seq_puts(f, "POSIX ");
2739 
2740 		seq_printf(f, " %s ",
2741 			     (inode == NULL) ? "*NOINODE*" :
2742 			     mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2743 	} else if (IS_FLOCK(fl)) {
2744 		if (fl->fl_type & LOCK_MAND) {
2745 			seq_puts(f, "FLOCK  MSNFS     ");
2746 		} else {
2747 			seq_puts(f, "FLOCK  ADVISORY  ");
2748 		}
2749 	} else if (IS_LEASE(fl)) {
2750 		if (fl->fl_flags & FL_DELEG)
2751 			seq_puts(f, "DELEG  ");
2752 		else
2753 			seq_puts(f, "LEASE  ");
2754 
2755 		if (lease_breaking(fl))
2756 			seq_puts(f, "BREAKING  ");
2757 		else if (fl->fl_file)
2758 			seq_puts(f, "ACTIVE    ");
2759 		else
2760 			seq_puts(f, "BREAKER   ");
2761 	} else {
2762 		seq_puts(f, "UNKNOWN UNKNOWN  ");
2763 	}
2764 	if (fl->fl_type & LOCK_MAND) {
2765 		seq_printf(f, "%s ",
2766 			       (fl->fl_type & LOCK_READ)
2767 			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2768 			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2769 	} else {
2770 		seq_printf(f, "%s ",
2771 			       (lease_breaking(fl))
2772 			       ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2773 			       : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2774 	}
2775 	if (inode) {
2776 		/* userspace relies on this representation of dev_t */
2777 		seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2778 				MAJOR(inode->i_sb->s_dev),
2779 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2780 	} else {
2781 		seq_printf(f, "%d <none>:0 ", fl_pid);
2782 	}
2783 	if (IS_POSIX(fl)) {
2784 		if (fl->fl_end == OFFSET_MAX)
2785 			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2786 		else
2787 			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2788 	} else {
2789 		seq_puts(f, "0 EOF\n");
2790 	}
2791 }
2792 
2793 static int locks_show(struct seq_file *f, void *v)
2794 {
2795 	struct locks_iterator *iter = f->private;
2796 	struct file_lock *fl, *bfl;
2797 	struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2798 
2799 	fl = hlist_entry(v, struct file_lock, fl_link);
2800 
2801 	if (locks_translate_pid(fl, proc_pidns) == 0)
2802 		return 0;
2803 
2804 	lock_get_status(f, fl, iter->li_pos, "");
2805 
2806 	list_for_each_entry(bfl, &fl->fl_blocked_requests, fl_blocked_member)
2807 		lock_get_status(f, bfl, iter->li_pos, " ->");
2808 
2809 	return 0;
2810 }
2811 
2812 static void __show_fd_locks(struct seq_file *f,
2813 			struct list_head *head, int *id,
2814 			struct file *filp, struct files_struct *files)
2815 {
2816 	struct file_lock *fl;
2817 
2818 	list_for_each_entry(fl, head, fl_list) {
2819 
2820 		if (filp != fl->fl_file)
2821 			continue;
2822 		if (fl->fl_owner != files &&
2823 		    fl->fl_owner != filp)
2824 			continue;
2825 
2826 		(*id)++;
2827 		seq_puts(f, "lock:\t");
2828 		lock_get_status(f, fl, *id, "");
2829 	}
2830 }
2831 
2832 void show_fd_locks(struct seq_file *f,
2833 		  struct file *filp, struct files_struct *files)
2834 {
2835 	struct inode *inode = locks_inode(filp);
2836 	struct file_lock_context *ctx;
2837 	int id = 0;
2838 
2839 	ctx = smp_load_acquire(&inode->i_flctx);
2840 	if (!ctx)
2841 		return;
2842 
2843 	spin_lock(&ctx->flc_lock);
2844 	__show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2845 	__show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2846 	__show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2847 	spin_unlock(&ctx->flc_lock);
2848 }
2849 
2850 static void *locks_start(struct seq_file *f, loff_t *pos)
2851 	__acquires(&blocked_lock_lock)
2852 {
2853 	struct locks_iterator *iter = f->private;
2854 
2855 	iter->li_pos = *pos + 1;
2856 	percpu_down_write(&file_rwsem);
2857 	spin_lock(&blocked_lock_lock);
2858 	return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2859 }
2860 
2861 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2862 {
2863 	struct locks_iterator *iter = f->private;
2864 
2865 	++iter->li_pos;
2866 	return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2867 }
2868 
2869 static void locks_stop(struct seq_file *f, void *v)
2870 	__releases(&blocked_lock_lock)
2871 {
2872 	spin_unlock(&blocked_lock_lock);
2873 	percpu_up_write(&file_rwsem);
2874 }
2875 
2876 static const struct seq_operations locks_seq_operations = {
2877 	.start	= locks_start,
2878 	.next	= locks_next,
2879 	.stop	= locks_stop,
2880 	.show	= locks_show,
2881 };
2882 
2883 static int __init proc_locks_init(void)
2884 {
2885 	proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2886 			sizeof(struct locks_iterator), NULL);
2887 	return 0;
2888 }
2889 fs_initcall(proc_locks_init);
2890 #endif
2891 
2892 static int __init filelock_init(void)
2893 {
2894 	int i;
2895 
2896 	flctx_cache = kmem_cache_create("file_lock_ctx",
2897 			sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2898 
2899 	filelock_cache = kmem_cache_create("file_lock_cache",
2900 			sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2901 
2902 	for_each_possible_cpu(i) {
2903 		struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2904 
2905 		spin_lock_init(&fll->lock);
2906 		INIT_HLIST_HEAD(&fll->hlist);
2907 	}
2908 
2909 	return 0;
2910 }
2911 core_initcall(filelock_init);
2912