xref: /openbmc/linux/fs/locks.c (revision 4d765e48c5edb2090b82e97680b2d1ddf6d18c31)
1  /*
2   *  linux/fs/locks.c
3   *
4   *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5   *  Doug Evans (dje@spiff.uucp), August 07, 1992
6   *
7   *  Deadlock detection added.
8   *  FIXME: one thing isn't handled yet:
9   *	- mandatory locks (requires lots of changes elsewhere)
10   *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11   *
12   *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13   *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14   *
15   *  Converted file_lock_table to a linked list from an array, which eliminates
16   *  the limits on how many active file locks are open.
17   *  Chad Page (pageone@netcom.com), November 27, 1994
18   *
19   *  Removed dependency on file descriptors. dup()'ed file descriptors now
20   *  get the same locks as the original file descriptors, and a close() on
21   *  any file descriptor removes ALL the locks on the file for the current
22   *  process. Since locks still depend on the process id, locks are inherited
23   *  after an exec() but not after a fork(). This agrees with POSIX, and both
24   *  BSD and SVR4 practice.
25   *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
26   *
27   *  Scrapped free list which is redundant now that we allocate locks
28   *  dynamically with kmalloc()/kfree().
29   *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
30   *
31   *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32   *
33   *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
34   *  fcntl() system call. They have the semantics described above.
35   *
36   *  FL_FLOCK locks are created with calls to flock(), through the flock()
37   *  system call, which is new. Old C libraries implement flock() via fcntl()
38   *  and will continue to use the old, broken implementation.
39   *
40   *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41   *  with a file pointer (filp). As a result they can be shared by a parent
42   *  process and its children after a fork(). They are removed when the last
43   *  file descriptor referring to the file pointer is closed (unless explicitly
44   *  unlocked).
45   *
46   *  FL_FLOCK locks never deadlock, an existing lock is always removed before
47   *  upgrading from shared to exclusive (or vice versa). When this happens
48   *  any processes blocked by the current lock are woken up and allowed to
49   *  run before the new lock is applied.
50   *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
51   *
52   *  Removed some race conditions in flock_lock_file(), marked other possible
53   *  races. Just grep for FIXME to see them.
54   *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
55   *
56   *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57   *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58   *  once we've checked for blocking and deadlocking.
59   *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
60   *
61   *  Initial implementation of mandatory locks. SunOS turned out to be
62   *  a rotten model, so I implemented the "obvious" semantics.
63   *  See 'Documentation/filesystems/mandatory-locking.txt' for details.
64   *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
65   *
66   *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67   *  check if a file has mandatory locks, used by mmap(), open() and creat() to
68   *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69   *  Manual, Section 2.
70   *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
71   *
72   *  Tidied up block list handling. Added '/proc/locks' interface.
73   *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
74   *
75   *  Fixed deadlock condition for pathological code that mixes calls to
76   *  flock() and fcntl().
77   *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
78   *
79   *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80   *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81   *  guarantee sensible behaviour in the case where file system modules might
82   *  be compiled with different options than the kernel itself.
83   *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
84   *
85   *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86   *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87   *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
88   *
89   *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90   *  locks. Changed process synchronisation to avoid dereferencing locks that
91   *  have already been freed.
92   *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
93   *
94   *  Made the block list a circular list to minimise searching in the list.
95   *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
96   *
97   *  Made mandatory locking a mount option. Default is not to allow mandatory
98   *  locking.
99   *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
100   *
101   *  Some adaptations for NFS support.
102   *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
103   *
104   *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105   *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
106   *
107   *  Use slab allocator instead of kmalloc/kfree.
108   *  Use generic list implementation from <linux/list.h>.
109   *  Sped up posix_locks_deadlock by only considering blocked locks.
110   *  Matthew Wilcox <willy@debian.org>, March, 2000.
111   *
112   *  Leases and LOCK_MAND
113   *  Matthew Wilcox <willy@debian.org>, June, 2000.
114   *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
115   */
116  
117  #include <linux/capability.h>
118  #include <linux/file.h>
119  #include <linux/fdtable.h>
120  #include <linux/fs.h>
121  #include <linux/init.h>
122  #include <linux/module.h>
123  #include <linux/security.h>
124  #include <linux/slab.h>
125  #include <linux/syscalls.h>
126  #include <linux/time.h>
127  #include <linux/rcupdate.h>
128  #include <linux/pid_namespace.h>
129  #include <linux/hashtable.h>
130  #include <linux/percpu.h>
131  #include <linux/lglock.h>
132  
133  #define CREATE_TRACE_POINTS
134  #include <trace/events/filelock.h>
135  
136  #include <asm/uaccess.h>
137  
138  #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
139  #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
140  #define IS_LEASE(fl)	(fl->fl_flags & (FL_LEASE|FL_DELEG))
141  #define IS_OFDLCK(fl)	(fl->fl_flags & FL_OFDLCK)
142  
143  static bool lease_breaking(struct file_lock *fl)
144  {
145  	return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
146  }
147  
148  static int target_leasetype(struct file_lock *fl)
149  {
150  	if (fl->fl_flags & FL_UNLOCK_PENDING)
151  		return F_UNLCK;
152  	if (fl->fl_flags & FL_DOWNGRADE_PENDING)
153  		return F_RDLCK;
154  	return fl->fl_type;
155  }
156  
157  int leases_enable = 1;
158  int lease_break_time = 45;
159  
160  #define for_each_lock(inode, lockp) \
161  	for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
162  
163  /*
164   * The global file_lock_list is only used for displaying /proc/locks, so we
165   * keep a list on each CPU, with each list protected by its own spinlock via
166   * the file_lock_lglock. Note that alterations to the list also require that
167   * the relevant i_lock is held.
168   */
169  DEFINE_STATIC_LGLOCK(file_lock_lglock);
170  static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
171  
172  /*
173   * The blocked_hash is used to find POSIX lock loops for deadlock detection.
174   * It is protected by blocked_lock_lock.
175   *
176   * We hash locks by lockowner in order to optimize searching for the lock a
177   * particular lockowner is waiting on.
178   *
179   * FIXME: make this value scale via some heuristic? We generally will want more
180   * buckets when we have more lockowners holding locks, but that's a little
181   * difficult to determine without knowing what the workload will look like.
182   */
183  #define BLOCKED_HASH_BITS	7
184  static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
185  
186  /*
187   * This lock protects the blocked_hash. Generally, if you're accessing it, you
188   * want to be holding this lock.
189   *
190   * In addition, it also protects the fl->fl_block list, and the fl->fl_next
191   * pointer for file_lock structures that are acting as lock requests (in
192   * contrast to those that are acting as records of acquired locks).
193   *
194   * Note that when we acquire this lock in order to change the above fields,
195   * we often hold the i_lock as well. In certain cases, when reading the fields
196   * protected by this lock, we can skip acquiring it iff we already hold the
197   * i_lock.
198   *
199   * In particular, adding an entry to the fl_block list requires that you hold
200   * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting
201   * an entry from the list however only requires the file_lock_lock.
202   */
203  static DEFINE_SPINLOCK(blocked_lock_lock);
204  
205  static struct kmem_cache *filelock_cache __read_mostly;
206  
207  static void locks_init_lock_heads(struct file_lock *fl)
208  {
209  	INIT_HLIST_NODE(&fl->fl_link);
210  	INIT_LIST_HEAD(&fl->fl_block);
211  	init_waitqueue_head(&fl->fl_wait);
212  }
213  
214  /* Allocate an empty lock structure. */
215  struct file_lock *locks_alloc_lock(void)
216  {
217  	struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
218  
219  	if (fl)
220  		locks_init_lock_heads(fl);
221  
222  	return fl;
223  }
224  EXPORT_SYMBOL_GPL(locks_alloc_lock);
225  
226  void locks_release_private(struct file_lock *fl)
227  {
228  	if (fl->fl_ops) {
229  		if (fl->fl_ops->fl_release_private)
230  			fl->fl_ops->fl_release_private(fl);
231  		fl->fl_ops = NULL;
232  	}
233  
234  	if (fl->fl_lmops) {
235  		if (fl->fl_lmops->lm_put_owner)
236  			fl->fl_lmops->lm_put_owner(fl);
237  		fl->fl_lmops = NULL;
238  	}
239  }
240  EXPORT_SYMBOL_GPL(locks_release_private);
241  
242  /* Free a lock which is not in use. */
243  void locks_free_lock(struct file_lock *fl)
244  {
245  	BUG_ON(waitqueue_active(&fl->fl_wait));
246  	BUG_ON(!list_empty(&fl->fl_block));
247  	BUG_ON(!hlist_unhashed(&fl->fl_link));
248  
249  	locks_release_private(fl);
250  	kmem_cache_free(filelock_cache, fl);
251  }
252  EXPORT_SYMBOL(locks_free_lock);
253  
254  static void
255  locks_dispose_list(struct list_head *dispose)
256  {
257  	struct file_lock *fl;
258  
259  	while (!list_empty(dispose)) {
260  		fl = list_first_entry(dispose, struct file_lock, fl_block);
261  		list_del_init(&fl->fl_block);
262  		locks_free_lock(fl);
263  	}
264  }
265  
266  void locks_init_lock(struct file_lock *fl)
267  {
268  	memset(fl, 0, sizeof(struct file_lock));
269  	locks_init_lock_heads(fl);
270  }
271  
272  EXPORT_SYMBOL(locks_init_lock);
273  
274  /*
275   * Initialize a new lock from an existing file_lock structure.
276   */
277  void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
278  {
279  	new->fl_owner = fl->fl_owner;
280  	new->fl_pid = fl->fl_pid;
281  	new->fl_file = NULL;
282  	new->fl_flags = fl->fl_flags;
283  	new->fl_type = fl->fl_type;
284  	new->fl_start = fl->fl_start;
285  	new->fl_end = fl->fl_end;
286  	new->fl_lmops = fl->fl_lmops;
287  	new->fl_ops = NULL;
288  
289  	if (fl->fl_lmops) {
290  		if (fl->fl_lmops->lm_get_owner)
291  			fl->fl_lmops->lm_get_owner(new, fl);
292  	}
293  }
294  EXPORT_SYMBOL(locks_copy_conflock);
295  
296  void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
297  {
298  	/* "new" must be a freshly-initialized lock */
299  	WARN_ON_ONCE(new->fl_ops);
300  
301  	locks_copy_conflock(new, fl);
302  
303  	new->fl_file = fl->fl_file;
304  	new->fl_ops = fl->fl_ops;
305  
306  	if (fl->fl_ops) {
307  		if (fl->fl_ops->fl_copy_lock)
308  			fl->fl_ops->fl_copy_lock(new, fl);
309  	}
310  }
311  
312  EXPORT_SYMBOL(locks_copy_lock);
313  
314  static inline int flock_translate_cmd(int cmd) {
315  	if (cmd & LOCK_MAND)
316  		return cmd & (LOCK_MAND | LOCK_RW);
317  	switch (cmd) {
318  	case LOCK_SH:
319  		return F_RDLCK;
320  	case LOCK_EX:
321  		return F_WRLCK;
322  	case LOCK_UN:
323  		return F_UNLCK;
324  	}
325  	return -EINVAL;
326  }
327  
328  /* Fill in a file_lock structure with an appropriate FLOCK lock. */
329  static struct file_lock *
330  flock_make_lock(struct file *filp, unsigned int cmd)
331  {
332  	struct file_lock *fl;
333  	int type = flock_translate_cmd(cmd);
334  
335  	if (type < 0)
336  		return ERR_PTR(type);
337  
338  	fl = locks_alloc_lock();
339  	if (fl == NULL)
340  		return ERR_PTR(-ENOMEM);
341  
342  	fl->fl_file = filp;
343  	fl->fl_owner = filp;
344  	fl->fl_pid = current->tgid;
345  	fl->fl_flags = FL_FLOCK;
346  	fl->fl_type = type;
347  	fl->fl_end = OFFSET_MAX;
348  
349  	return fl;
350  }
351  
352  static int assign_type(struct file_lock *fl, long type)
353  {
354  	switch (type) {
355  	case F_RDLCK:
356  	case F_WRLCK:
357  	case F_UNLCK:
358  		fl->fl_type = type;
359  		break;
360  	default:
361  		return -EINVAL;
362  	}
363  	return 0;
364  }
365  
366  static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
367  				 struct flock64 *l)
368  {
369  	switch (l->l_whence) {
370  	case SEEK_SET:
371  		fl->fl_start = 0;
372  		break;
373  	case SEEK_CUR:
374  		fl->fl_start = filp->f_pos;
375  		break;
376  	case SEEK_END:
377  		fl->fl_start = i_size_read(file_inode(filp));
378  		break;
379  	default:
380  		return -EINVAL;
381  	}
382  	if (l->l_start > OFFSET_MAX - fl->fl_start)
383  		return -EOVERFLOW;
384  	fl->fl_start += l->l_start;
385  	if (fl->fl_start < 0)
386  		return -EINVAL;
387  
388  	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
389  	   POSIX-2001 defines it. */
390  	if (l->l_len > 0) {
391  		if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
392  			return -EOVERFLOW;
393  		fl->fl_end = fl->fl_start + l->l_len - 1;
394  
395  	} else if (l->l_len < 0) {
396  		if (fl->fl_start + l->l_len < 0)
397  			return -EINVAL;
398  		fl->fl_end = fl->fl_start - 1;
399  		fl->fl_start += l->l_len;
400  	} else
401  		fl->fl_end = OFFSET_MAX;
402  
403  	fl->fl_owner = current->files;
404  	fl->fl_pid = current->tgid;
405  	fl->fl_file = filp;
406  	fl->fl_flags = FL_POSIX;
407  	fl->fl_ops = NULL;
408  	fl->fl_lmops = NULL;
409  
410  	return assign_type(fl, l->l_type);
411  }
412  
413  /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
414   * style lock.
415   */
416  static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
417  			       struct flock *l)
418  {
419  	struct flock64 ll = {
420  		.l_type = l->l_type,
421  		.l_whence = l->l_whence,
422  		.l_start = l->l_start,
423  		.l_len = l->l_len,
424  	};
425  
426  	return flock64_to_posix_lock(filp, fl, &ll);
427  }
428  
429  /* default lease lock manager operations */
430  static bool
431  lease_break_callback(struct file_lock *fl)
432  {
433  	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
434  	return false;
435  }
436  
437  static void
438  lease_setup(struct file_lock *fl, void **priv)
439  {
440  	struct file *filp = fl->fl_file;
441  	struct fasync_struct *fa = *priv;
442  
443  	/*
444  	 * fasync_insert_entry() returns the old entry if any. If there was no
445  	 * old entry, then it used "priv" and inserted it into the fasync list.
446  	 * Clear the pointer to indicate that it shouldn't be freed.
447  	 */
448  	if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
449  		*priv = NULL;
450  
451  	__f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
452  }
453  
454  static const struct lock_manager_operations lease_manager_ops = {
455  	.lm_break = lease_break_callback,
456  	.lm_change = lease_modify,
457  	.lm_setup = lease_setup,
458  };
459  
460  /*
461   * Initialize a lease, use the default lock manager operations
462   */
463  static int lease_init(struct file *filp, long type, struct file_lock *fl)
464   {
465  	if (assign_type(fl, type) != 0)
466  		return -EINVAL;
467  
468  	fl->fl_owner = filp;
469  	fl->fl_pid = current->tgid;
470  
471  	fl->fl_file = filp;
472  	fl->fl_flags = FL_LEASE;
473  	fl->fl_start = 0;
474  	fl->fl_end = OFFSET_MAX;
475  	fl->fl_ops = NULL;
476  	fl->fl_lmops = &lease_manager_ops;
477  	return 0;
478  }
479  
480  /* Allocate a file_lock initialised to this type of lease */
481  static struct file_lock *lease_alloc(struct file *filp, long type)
482  {
483  	struct file_lock *fl = locks_alloc_lock();
484  	int error = -ENOMEM;
485  
486  	if (fl == NULL)
487  		return ERR_PTR(error);
488  
489  	error = lease_init(filp, type, fl);
490  	if (error) {
491  		locks_free_lock(fl);
492  		return ERR_PTR(error);
493  	}
494  	return fl;
495  }
496  
497  /* Check if two locks overlap each other.
498   */
499  static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
500  {
501  	return ((fl1->fl_end >= fl2->fl_start) &&
502  		(fl2->fl_end >= fl1->fl_start));
503  }
504  
505  /*
506   * Check whether two locks have the same owner.
507   */
508  static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
509  {
510  	if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
511  		return fl2->fl_lmops == fl1->fl_lmops &&
512  			fl1->fl_lmops->lm_compare_owner(fl1, fl2);
513  	return fl1->fl_owner == fl2->fl_owner;
514  }
515  
516  /* Must be called with the i_lock held! */
517  static void locks_insert_global_locks(struct file_lock *fl)
518  {
519  	lg_local_lock(&file_lock_lglock);
520  	fl->fl_link_cpu = smp_processor_id();
521  	hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list));
522  	lg_local_unlock(&file_lock_lglock);
523  }
524  
525  /* Must be called with the i_lock held! */
526  static void locks_delete_global_locks(struct file_lock *fl)
527  {
528  	/*
529  	 * Avoid taking lock if already unhashed. This is safe since this check
530  	 * is done while holding the i_lock, and new insertions into the list
531  	 * also require that it be held.
532  	 */
533  	if (hlist_unhashed(&fl->fl_link))
534  		return;
535  	lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu);
536  	hlist_del_init(&fl->fl_link);
537  	lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu);
538  }
539  
540  static unsigned long
541  posix_owner_key(struct file_lock *fl)
542  {
543  	if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
544  		return fl->fl_lmops->lm_owner_key(fl);
545  	return (unsigned long)fl->fl_owner;
546  }
547  
548  static void locks_insert_global_blocked(struct file_lock *waiter)
549  {
550  	hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
551  }
552  
553  static void locks_delete_global_blocked(struct file_lock *waiter)
554  {
555  	hash_del(&waiter->fl_link);
556  }
557  
558  /* Remove waiter from blocker's block list.
559   * When blocker ends up pointing to itself then the list is empty.
560   *
561   * Must be called with blocked_lock_lock held.
562   */
563  static void __locks_delete_block(struct file_lock *waiter)
564  {
565  	locks_delete_global_blocked(waiter);
566  	list_del_init(&waiter->fl_block);
567  	waiter->fl_next = NULL;
568  }
569  
570  static void locks_delete_block(struct file_lock *waiter)
571  {
572  	spin_lock(&blocked_lock_lock);
573  	__locks_delete_block(waiter);
574  	spin_unlock(&blocked_lock_lock);
575  }
576  
577  /* Insert waiter into blocker's block list.
578   * We use a circular list so that processes can be easily woken up in
579   * the order they blocked. The documentation doesn't require this but
580   * it seems like the reasonable thing to do.
581   *
582   * Must be called with both the i_lock and blocked_lock_lock held. The fl_block
583   * list itself is protected by the blocked_lock_lock, but by ensuring that the
584   * i_lock is also held on insertions we can avoid taking the blocked_lock_lock
585   * in some cases when we see that the fl_block list is empty.
586   */
587  static void __locks_insert_block(struct file_lock *blocker,
588  					struct file_lock *waiter)
589  {
590  	BUG_ON(!list_empty(&waiter->fl_block));
591  	waiter->fl_next = blocker;
592  	list_add_tail(&waiter->fl_block, &blocker->fl_block);
593  	if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
594  		locks_insert_global_blocked(waiter);
595  }
596  
597  /* Must be called with i_lock held. */
598  static void locks_insert_block(struct file_lock *blocker,
599  					struct file_lock *waiter)
600  {
601  	spin_lock(&blocked_lock_lock);
602  	__locks_insert_block(blocker, waiter);
603  	spin_unlock(&blocked_lock_lock);
604  }
605  
606  /*
607   * Wake up processes blocked waiting for blocker.
608   *
609   * Must be called with the inode->i_lock held!
610   */
611  static void locks_wake_up_blocks(struct file_lock *blocker)
612  {
613  	/*
614  	 * Avoid taking global lock if list is empty. This is safe since new
615  	 * blocked requests are only added to the list under the i_lock, and
616  	 * the i_lock is always held here. Note that removal from the fl_block
617  	 * list does not require the i_lock, so we must recheck list_empty()
618  	 * after acquiring the blocked_lock_lock.
619  	 */
620  	if (list_empty(&blocker->fl_block))
621  		return;
622  
623  	spin_lock(&blocked_lock_lock);
624  	while (!list_empty(&blocker->fl_block)) {
625  		struct file_lock *waiter;
626  
627  		waiter = list_first_entry(&blocker->fl_block,
628  				struct file_lock, fl_block);
629  		__locks_delete_block(waiter);
630  		if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
631  			waiter->fl_lmops->lm_notify(waiter);
632  		else
633  			wake_up(&waiter->fl_wait);
634  	}
635  	spin_unlock(&blocked_lock_lock);
636  }
637  
638  /* Insert file lock fl into an inode's lock list at the position indicated
639   * by pos. At the same time add the lock to the global file lock list.
640   *
641   * Must be called with the i_lock held!
642   */
643  static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
644  {
645  	fl->fl_nspid = get_pid(task_tgid(current));
646  
647  	/* insert into file's list */
648  	fl->fl_next = *pos;
649  	*pos = fl;
650  
651  	locks_insert_global_locks(fl);
652  }
653  
654  /**
655   * locks_delete_lock - Delete a lock and then free it.
656   * @thisfl_p: pointer that points to the fl_next field of the previous
657   * 	      inode->i_flock list entry
658   *
659   * Unlink a lock from all lists and free the namespace reference, but don't
660   * free it yet. Wake up processes that are blocked waiting for this lock and
661   * notify the FS that the lock has been cleared.
662   *
663   * Must be called with the i_lock held!
664   */
665  static void locks_unlink_lock(struct file_lock **thisfl_p)
666  {
667  	struct file_lock *fl = *thisfl_p;
668  
669  	locks_delete_global_locks(fl);
670  
671  	*thisfl_p = fl->fl_next;
672  	fl->fl_next = NULL;
673  
674  	if (fl->fl_nspid) {
675  		put_pid(fl->fl_nspid);
676  		fl->fl_nspid = NULL;
677  	}
678  
679  	locks_wake_up_blocks(fl);
680  }
681  
682  /*
683   * Unlink a lock from all lists and free it.
684   *
685   * Must be called with i_lock held!
686   */
687  static void locks_delete_lock(struct file_lock **thisfl_p,
688  			      struct list_head *dispose)
689  {
690  	struct file_lock *fl = *thisfl_p;
691  
692  	locks_unlink_lock(thisfl_p);
693  	if (dispose)
694  		list_add(&fl->fl_block, dispose);
695  	else
696  		locks_free_lock(fl);
697  }
698  
699  /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
700   * checks for shared/exclusive status of overlapping locks.
701   */
702  static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
703  {
704  	if (sys_fl->fl_type == F_WRLCK)
705  		return 1;
706  	if (caller_fl->fl_type == F_WRLCK)
707  		return 1;
708  	return 0;
709  }
710  
711  /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
712   * checking before calling the locks_conflict().
713   */
714  static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
715  {
716  	/* POSIX locks owned by the same process do not conflict with
717  	 * each other.
718  	 */
719  	if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
720  		return (0);
721  
722  	/* Check whether they overlap */
723  	if (!locks_overlap(caller_fl, sys_fl))
724  		return 0;
725  
726  	return (locks_conflict(caller_fl, sys_fl));
727  }
728  
729  /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
730   * checking before calling the locks_conflict().
731   */
732  static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
733  {
734  	/* FLOCK locks referring to the same filp do not conflict with
735  	 * each other.
736  	 */
737  	if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
738  		return (0);
739  	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
740  		return 0;
741  
742  	return (locks_conflict(caller_fl, sys_fl));
743  }
744  
745  void
746  posix_test_lock(struct file *filp, struct file_lock *fl)
747  {
748  	struct file_lock *cfl;
749  	struct inode *inode = file_inode(filp);
750  
751  	spin_lock(&inode->i_lock);
752  	for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) {
753  		if (!IS_POSIX(cfl))
754  			continue;
755  		if (posix_locks_conflict(fl, cfl))
756  			break;
757  	}
758  	if (cfl) {
759  		locks_copy_conflock(fl, cfl);
760  		if (cfl->fl_nspid)
761  			fl->fl_pid = pid_vnr(cfl->fl_nspid);
762  	} else
763  		fl->fl_type = F_UNLCK;
764  	spin_unlock(&inode->i_lock);
765  	return;
766  }
767  EXPORT_SYMBOL(posix_test_lock);
768  
769  /*
770   * Deadlock detection:
771   *
772   * We attempt to detect deadlocks that are due purely to posix file
773   * locks.
774   *
775   * We assume that a task can be waiting for at most one lock at a time.
776   * So for any acquired lock, the process holding that lock may be
777   * waiting on at most one other lock.  That lock in turns may be held by
778   * someone waiting for at most one other lock.  Given a requested lock
779   * caller_fl which is about to wait for a conflicting lock block_fl, we
780   * follow this chain of waiters to ensure we are not about to create a
781   * cycle.
782   *
783   * Since we do this before we ever put a process to sleep on a lock, we
784   * are ensured that there is never a cycle; that is what guarantees that
785   * the while() loop in posix_locks_deadlock() eventually completes.
786   *
787   * Note: the above assumption may not be true when handling lock
788   * requests from a broken NFS client. It may also fail in the presence
789   * of tasks (such as posix threads) sharing the same open file table.
790   * To handle those cases, we just bail out after a few iterations.
791   *
792   * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
793   * Because the owner is not even nominally tied to a thread of
794   * execution, the deadlock detection below can't reasonably work well. Just
795   * skip it for those.
796   *
797   * In principle, we could do a more limited deadlock detection on FL_OFDLCK
798   * locks that just checks for the case where two tasks are attempting to
799   * upgrade from read to write locks on the same inode.
800   */
801  
802  #define MAX_DEADLK_ITERATIONS 10
803  
804  /* Find a lock that the owner of the given block_fl is blocking on. */
805  static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
806  {
807  	struct file_lock *fl;
808  
809  	hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
810  		if (posix_same_owner(fl, block_fl))
811  			return fl->fl_next;
812  	}
813  	return NULL;
814  }
815  
816  /* Must be called with the blocked_lock_lock held! */
817  static int posix_locks_deadlock(struct file_lock *caller_fl,
818  				struct file_lock *block_fl)
819  {
820  	int i = 0;
821  
822  	/*
823  	 * This deadlock detector can't reasonably detect deadlocks with
824  	 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
825  	 */
826  	if (IS_OFDLCK(caller_fl))
827  		return 0;
828  
829  	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
830  		if (i++ > MAX_DEADLK_ITERATIONS)
831  			return 0;
832  		if (posix_same_owner(caller_fl, block_fl))
833  			return 1;
834  	}
835  	return 0;
836  }
837  
838  /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
839   * after any leases, but before any posix locks.
840   *
841   * Note that if called with an FL_EXISTS argument, the caller may determine
842   * whether or not a lock was successfully freed by testing the return
843   * value for -ENOENT.
844   */
845  static int flock_lock_file(struct file *filp, struct file_lock *request)
846  {
847  	struct file_lock *new_fl = NULL;
848  	struct file_lock **before;
849  	struct inode * inode = file_inode(filp);
850  	int error = 0;
851  	int found = 0;
852  	LIST_HEAD(dispose);
853  
854  	if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
855  		new_fl = locks_alloc_lock();
856  		if (!new_fl)
857  			return -ENOMEM;
858  	}
859  
860  	spin_lock(&inode->i_lock);
861  	if (request->fl_flags & FL_ACCESS)
862  		goto find_conflict;
863  
864  	for_each_lock(inode, before) {
865  		struct file_lock *fl = *before;
866  		if (IS_POSIX(fl))
867  			break;
868  		if (IS_LEASE(fl))
869  			continue;
870  		if (filp != fl->fl_file)
871  			continue;
872  		if (request->fl_type == fl->fl_type)
873  			goto out;
874  		found = 1;
875  		locks_delete_lock(before, &dispose);
876  		break;
877  	}
878  
879  	if (request->fl_type == F_UNLCK) {
880  		if ((request->fl_flags & FL_EXISTS) && !found)
881  			error = -ENOENT;
882  		goto out;
883  	}
884  
885  	/*
886  	 * If a higher-priority process was blocked on the old file lock,
887  	 * give it the opportunity to lock the file.
888  	 */
889  	if (found) {
890  		spin_unlock(&inode->i_lock);
891  		cond_resched();
892  		spin_lock(&inode->i_lock);
893  	}
894  
895  find_conflict:
896  	for_each_lock(inode, before) {
897  		struct file_lock *fl = *before;
898  		if (IS_POSIX(fl))
899  			break;
900  		if (IS_LEASE(fl))
901  			continue;
902  		if (!flock_locks_conflict(request, fl))
903  			continue;
904  		error = -EAGAIN;
905  		if (!(request->fl_flags & FL_SLEEP))
906  			goto out;
907  		error = FILE_LOCK_DEFERRED;
908  		locks_insert_block(fl, request);
909  		goto out;
910  	}
911  	if (request->fl_flags & FL_ACCESS)
912  		goto out;
913  	locks_copy_lock(new_fl, request);
914  	locks_insert_lock(before, new_fl);
915  	new_fl = NULL;
916  	error = 0;
917  
918  out:
919  	spin_unlock(&inode->i_lock);
920  	if (new_fl)
921  		locks_free_lock(new_fl);
922  	locks_dispose_list(&dispose);
923  	return error;
924  }
925  
926  static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
927  {
928  	struct file_lock *fl;
929  	struct file_lock *new_fl = NULL;
930  	struct file_lock *new_fl2 = NULL;
931  	struct file_lock *left = NULL;
932  	struct file_lock *right = NULL;
933  	struct file_lock **before;
934  	int error;
935  	bool added = false;
936  	LIST_HEAD(dispose);
937  
938  	/*
939  	 * We may need two file_lock structures for this operation,
940  	 * so we get them in advance to avoid races.
941  	 *
942  	 * In some cases we can be sure, that no new locks will be needed
943  	 */
944  	if (!(request->fl_flags & FL_ACCESS) &&
945  	    (request->fl_type != F_UNLCK ||
946  	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
947  		new_fl = locks_alloc_lock();
948  		new_fl2 = locks_alloc_lock();
949  	}
950  
951  	spin_lock(&inode->i_lock);
952  	/*
953  	 * New lock request. Walk all POSIX locks and look for conflicts. If
954  	 * there are any, either return error or put the request on the
955  	 * blocker's list of waiters and the global blocked_hash.
956  	 */
957  	if (request->fl_type != F_UNLCK) {
958  		for_each_lock(inode, before) {
959  			fl = *before;
960  			if (!IS_POSIX(fl))
961  				continue;
962  			if (!posix_locks_conflict(request, fl))
963  				continue;
964  			if (conflock)
965  				locks_copy_conflock(conflock, fl);
966  			error = -EAGAIN;
967  			if (!(request->fl_flags & FL_SLEEP))
968  				goto out;
969  			/*
970  			 * Deadlock detection and insertion into the blocked
971  			 * locks list must be done while holding the same lock!
972  			 */
973  			error = -EDEADLK;
974  			spin_lock(&blocked_lock_lock);
975  			if (likely(!posix_locks_deadlock(request, fl))) {
976  				error = FILE_LOCK_DEFERRED;
977  				__locks_insert_block(fl, request);
978  			}
979  			spin_unlock(&blocked_lock_lock);
980  			goto out;
981    		}
982    	}
983  
984  	/* If we're just looking for a conflict, we're done. */
985  	error = 0;
986  	if (request->fl_flags & FL_ACCESS)
987  		goto out;
988  
989  	/*
990  	 * Find the first old lock with the same owner as the new lock.
991  	 */
992  
993  	before = &inode->i_flock;
994  
995  	/* First skip locks owned by other processes.  */
996  	while ((fl = *before) && (!IS_POSIX(fl) ||
997  				  !posix_same_owner(request, fl))) {
998  		before = &fl->fl_next;
999  	}
1000  
1001  	/* Process locks with this owner. */
1002  	while ((fl = *before) && posix_same_owner(request, fl)) {
1003  		/* Detect adjacent or overlapping regions (if same lock type)
1004  		 */
1005  		if (request->fl_type == fl->fl_type) {
1006  			/* In all comparisons of start vs end, use
1007  			 * "start - 1" rather than "end + 1". If end
1008  			 * is OFFSET_MAX, end + 1 will become negative.
1009  			 */
1010  			if (fl->fl_end < request->fl_start - 1)
1011  				goto next_lock;
1012  			/* If the next lock in the list has entirely bigger
1013  			 * addresses than the new one, insert the lock here.
1014  			 */
1015  			if (fl->fl_start - 1 > request->fl_end)
1016  				break;
1017  
1018  			/* If we come here, the new and old lock are of the
1019  			 * same type and adjacent or overlapping. Make one
1020  			 * lock yielding from the lower start address of both
1021  			 * locks to the higher end address.
1022  			 */
1023  			if (fl->fl_start > request->fl_start)
1024  				fl->fl_start = request->fl_start;
1025  			else
1026  				request->fl_start = fl->fl_start;
1027  			if (fl->fl_end < request->fl_end)
1028  				fl->fl_end = request->fl_end;
1029  			else
1030  				request->fl_end = fl->fl_end;
1031  			if (added) {
1032  				locks_delete_lock(before, &dispose);
1033  				continue;
1034  			}
1035  			request = fl;
1036  			added = true;
1037  		}
1038  		else {
1039  			/* Processing for different lock types is a bit
1040  			 * more complex.
1041  			 */
1042  			if (fl->fl_end < request->fl_start)
1043  				goto next_lock;
1044  			if (fl->fl_start > request->fl_end)
1045  				break;
1046  			if (request->fl_type == F_UNLCK)
1047  				added = true;
1048  			if (fl->fl_start < request->fl_start)
1049  				left = fl;
1050  			/* If the next lock in the list has a higher end
1051  			 * address than the new one, insert the new one here.
1052  			 */
1053  			if (fl->fl_end > request->fl_end) {
1054  				right = fl;
1055  				break;
1056  			}
1057  			if (fl->fl_start >= request->fl_start) {
1058  				/* The new lock completely replaces an old
1059  				 * one (This may happen several times).
1060  				 */
1061  				if (added) {
1062  					locks_delete_lock(before, &dispose);
1063  					continue;
1064  				}
1065  				/*
1066  				 * Replace the old lock with new_fl, and
1067  				 * remove the old one. It's safe to do the
1068  				 * insert here since we know that we won't be
1069  				 * using new_fl later, and that the lock is
1070  				 * just replacing an existing lock.
1071  				 */
1072  				error = -ENOLCK;
1073  				if (!new_fl)
1074  					goto out;
1075  				locks_copy_lock(new_fl, request);
1076  				request = new_fl;
1077  				new_fl = NULL;
1078  				locks_delete_lock(before, &dispose);
1079  				locks_insert_lock(before, request);
1080  				added = true;
1081  			}
1082  		}
1083  		/* Go on to next lock.
1084  		 */
1085  	next_lock:
1086  		before = &fl->fl_next;
1087  	}
1088  
1089  	/*
1090  	 * The above code only modifies existing locks in case of merging or
1091  	 * replacing. If new lock(s) need to be inserted all modifications are
1092  	 * done below this, so it's safe yet to bail out.
1093  	 */
1094  	error = -ENOLCK; /* "no luck" */
1095  	if (right && left == right && !new_fl2)
1096  		goto out;
1097  
1098  	error = 0;
1099  	if (!added) {
1100  		if (request->fl_type == F_UNLCK) {
1101  			if (request->fl_flags & FL_EXISTS)
1102  				error = -ENOENT;
1103  			goto out;
1104  		}
1105  
1106  		if (!new_fl) {
1107  			error = -ENOLCK;
1108  			goto out;
1109  		}
1110  		locks_copy_lock(new_fl, request);
1111  		locks_insert_lock(before, new_fl);
1112  		new_fl = NULL;
1113  	}
1114  	if (right) {
1115  		if (left == right) {
1116  			/* The new lock breaks the old one in two pieces,
1117  			 * so we have to use the second new lock.
1118  			 */
1119  			left = new_fl2;
1120  			new_fl2 = NULL;
1121  			locks_copy_lock(left, right);
1122  			locks_insert_lock(before, left);
1123  		}
1124  		right->fl_start = request->fl_end + 1;
1125  		locks_wake_up_blocks(right);
1126  	}
1127  	if (left) {
1128  		left->fl_end = request->fl_start - 1;
1129  		locks_wake_up_blocks(left);
1130  	}
1131   out:
1132  	spin_unlock(&inode->i_lock);
1133  	/*
1134  	 * Free any unused locks.
1135  	 */
1136  	if (new_fl)
1137  		locks_free_lock(new_fl);
1138  	if (new_fl2)
1139  		locks_free_lock(new_fl2);
1140  	locks_dispose_list(&dispose);
1141  	return error;
1142  }
1143  
1144  /**
1145   * posix_lock_file - Apply a POSIX-style lock to a file
1146   * @filp: The file to apply the lock to
1147   * @fl: The lock to be applied
1148   * @conflock: Place to return a copy of the conflicting lock, if found.
1149   *
1150   * Add a POSIX style lock to a file.
1151   * We merge adjacent & overlapping locks whenever possible.
1152   * POSIX locks are sorted by owner task, then by starting address
1153   *
1154   * Note that if called with an FL_EXISTS argument, the caller may determine
1155   * whether or not a lock was successfully freed by testing the return
1156   * value for -ENOENT.
1157   */
1158  int posix_lock_file(struct file *filp, struct file_lock *fl,
1159  			struct file_lock *conflock)
1160  {
1161  	return __posix_lock_file(file_inode(filp), fl, conflock);
1162  }
1163  EXPORT_SYMBOL(posix_lock_file);
1164  
1165  /**
1166   * posix_lock_file_wait - Apply a POSIX-style lock to a file
1167   * @filp: The file to apply the lock to
1168   * @fl: The lock to be applied
1169   *
1170   * Add a POSIX style lock to a file.
1171   * We merge adjacent & overlapping locks whenever possible.
1172   * POSIX locks are sorted by owner task, then by starting address
1173   */
1174  int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1175  {
1176  	int error;
1177  	might_sleep ();
1178  	for (;;) {
1179  		error = posix_lock_file(filp, fl, NULL);
1180  		if (error != FILE_LOCK_DEFERRED)
1181  			break;
1182  		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1183  		if (!error)
1184  			continue;
1185  
1186  		locks_delete_block(fl);
1187  		break;
1188  	}
1189  	return error;
1190  }
1191  EXPORT_SYMBOL(posix_lock_file_wait);
1192  
1193  /**
1194   * locks_mandatory_locked - Check for an active lock
1195   * @file: the file to check
1196   *
1197   * Searches the inode's list of locks to find any POSIX locks which conflict.
1198   * This function is called from locks_verify_locked() only.
1199   */
1200  int locks_mandatory_locked(struct file *file)
1201  {
1202  	struct inode *inode = file_inode(file);
1203  	struct file_lock *fl;
1204  
1205  	/*
1206  	 * Search the lock list for this inode for any POSIX locks.
1207  	 */
1208  	spin_lock(&inode->i_lock);
1209  	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1210  		if (!IS_POSIX(fl))
1211  			continue;
1212  		if (fl->fl_owner != current->files &&
1213  		    fl->fl_owner != file)
1214  			break;
1215  	}
1216  	spin_unlock(&inode->i_lock);
1217  	return fl ? -EAGAIN : 0;
1218  }
1219  
1220  /**
1221   * locks_mandatory_area - Check for a conflicting lock
1222   * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1223   *		for shared
1224   * @inode:      the file to check
1225   * @filp:       how the file was opened (if it was)
1226   * @offset:     start of area to check
1227   * @count:      length of area to check
1228   *
1229   * Searches the inode's list of locks to find any POSIX locks which conflict.
1230   * This function is called from rw_verify_area() and
1231   * locks_verify_truncate().
1232   */
1233  int locks_mandatory_area(int read_write, struct inode *inode,
1234  			 struct file *filp, loff_t offset,
1235  			 size_t count)
1236  {
1237  	struct file_lock fl;
1238  	int error;
1239  	bool sleep = false;
1240  
1241  	locks_init_lock(&fl);
1242  	fl.fl_pid = current->tgid;
1243  	fl.fl_file = filp;
1244  	fl.fl_flags = FL_POSIX | FL_ACCESS;
1245  	if (filp && !(filp->f_flags & O_NONBLOCK))
1246  		sleep = true;
1247  	fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1248  	fl.fl_start = offset;
1249  	fl.fl_end = offset + count - 1;
1250  
1251  	for (;;) {
1252  		if (filp) {
1253  			fl.fl_owner = filp;
1254  			fl.fl_flags &= ~FL_SLEEP;
1255  			error = __posix_lock_file(inode, &fl, NULL);
1256  			if (!error)
1257  				break;
1258  		}
1259  
1260  		if (sleep)
1261  			fl.fl_flags |= FL_SLEEP;
1262  		fl.fl_owner = current->files;
1263  		error = __posix_lock_file(inode, &fl, NULL);
1264  		if (error != FILE_LOCK_DEFERRED)
1265  			break;
1266  		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1267  		if (!error) {
1268  			/*
1269  			 * If we've been sleeping someone might have
1270  			 * changed the permissions behind our back.
1271  			 */
1272  			if (__mandatory_lock(inode))
1273  				continue;
1274  		}
1275  
1276  		locks_delete_block(&fl);
1277  		break;
1278  	}
1279  
1280  	return error;
1281  }
1282  
1283  EXPORT_SYMBOL(locks_mandatory_area);
1284  
1285  static void lease_clear_pending(struct file_lock *fl, int arg)
1286  {
1287  	switch (arg) {
1288  	case F_UNLCK:
1289  		fl->fl_flags &= ~FL_UNLOCK_PENDING;
1290  		/* fall through: */
1291  	case F_RDLCK:
1292  		fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1293  	}
1294  }
1295  
1296  /* We already had a lease on this file; just change its type */
1297  int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
1298  {
1299  	struct file_lock *fl = *before;
1300  	int error = assign_type(fl, arg);
1301  
1302  	if (error)
1303  		return error;
1304  	lease_clear_pending(fl, arg);
1305  	locks_wake_up_blocks(fl);
1306  	if (arg == F_UNLCK) {
1307  		struct file *filp = fl->fl_file;
1308  
1309  		f_delown(filp);
1310  		filp->f_owner.signum = 0;
1311  		fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1312  		if (fl->fl_fasync != NULL) {
1313  			printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1314  			fl->fl_fasync = NULL;
1315  		}
1316  		locks_delete_lock(before, dispose);
1317  	}
1318  	return 0;
1319  }
1320  EXPORT_SYMBOL(lease_modify);
1321  
1322  static bool past_time(unsigned long then)
1323  {
1324  	if (!then)
1325  		/* 0 is a special value meaning "this never expires": */
1326  		return false;
1327  	return time_after(jiffies, then);
1328  }
1329  
1330  static void time_out_leases(struct inode *inode, struct list_head *dispose)
1331  {
1332  	struct file_lock **before;
1333  	struct file_lock *fl;
1334  
1335  	lockdep_assert_held(&inode->i_lock);
1336  
1337  	before = &inode->i_flock;
1338  	while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
1339  		trace_time_out_leases(inode, fl);
1340  		if (past_time(fl->fl_downgrade_time))
1341  			lease_modify(before, F_RDLCK, dispose);
1342  		if (past_time(fl->fl_break_time))
1343  			lease_modify(before, F_UNLCK, dispose);
1344  		if (fl == *before)	/* lease_modify may have freed fl */
1345  			before = &fl->fl_next;
1346  	}
1347  }
1348  
1349  static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1350  {
1351  	if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
1352  		return false;
1353  	return locks_conflict(breaker, lease);
1354  }
1355  
1356  static bool
1357  any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1358  {
1359  	struct file_lock *fl;
1360  
1361  	lockdep_assert_held(&inode->i_lock);
1362  
1363  	for (fl = inode->i_flock ; fl && IS_LEASE(fl); fl = fl->fl_next) {
1364  		if (leases_conflict(fl, breaker))
1365  			return true;
1366  	}
1367  	return false;
1368  }
1369  
1370  /**
1371   *	__break_lease	-	revoke all outstanding leases on file
1372   *	@inode: the inode of the file to return
1373   *	@mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1374   *	    break all leases
1375   *	@type: FL_LEASE: break leases and delegations; FL_DELEG: break
1376   *	    only delegations
1377   *
1378   *	break_lease (inlined for speed) has checked there already is at least
1379   *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1380   *	a call to open() or truncate().  This function can sleep unless you
1381   *	specified %O_NONBLOCK to your open().
1382   */
1383  int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1384  {
1385  	int error = 0;
1386  	struct file_lock *new_fl;
1387  	struct file_lock *fl, **before;
1388  	unsigned long break_time;
1389  	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1390  	LIST_HEAD(dispose);
1391  
1392  	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1393  	if (IS_ERR(new_fl))
1394  		return PTR_ERR(new_fl);
1395  	new_fl->fl_flags = type;
1396  
1397  	spin_lock(&inode->i_lock);
1398  
1399  	time_out_leases(inode, &dispose);
1400  
1401  	if (!any_leases_conflict(inode, new_fl))
1402  		goto out;
1403  
1404  	break_time = 0;
1405  	if (lease_break_time > 0) {
1406  		break_time = jiffies + lease_break_time * HZ;
1407  		if (break_time == 0)
1408  			break_time++;	/* so that 0 means no break time */
1409  	}
1410  
1411  	for (before = &inode->i_flock;
1412  			((fl = *before) != NULL) && IS_LEASE(fl);
1413  			before = &fl->fl_next) {
1414  		if (!leases_conflict(fl, new_fl))
1415  			continue;
1416  		if (want_write) {
1417  			if (fl->fl_flags & FL_UNLOCK_PENDING)
1418  				continue;
1419  			fl->fl_flags |= FL_UNLOCK_PENDING;
1420  			fl->fl_break_time = break_time;
1421  		} else {
1422  			if (lease_breaking(inode->i_flock))
1423  				continue;
1424  			fl->fl_flags |= FL_DOWNGRADE_PENDING;
1425  			fl->fl_downgrade_time = break_time;
1426  		}
1427  		if (fl->fl_lmops->lm_break(fl))
1428  			locks_delete_lock(before, &dispose);
1429  	}
1430  
1431  	fl = inode->i_flock;
1432  	if (!fl || !IS_LEASE(fl))
1433  		goto out;
1434  
1435  	if (mode & O_NONBLOCK) {
1436  		trace_break_lease_noblock(inode, new_fl);
1437  		error = -EWOULDBLOCK;
1438  		goto out;
1439  	}
1440  
1441  restart:
1442  	break_time = inode->i_flock->fl_break_time;
1443  	if (break_time != 0)
1444  		break_time -= jiffies;
1445  	if (break_time == 0)
1446  		break_time++;
1447  	locks_insert_block(inode->i_flock, new_fl);
1448  	trace_break_lease_block(inode, new_fl);
1449  	spin_unlock(&inode->i_lock);
1450  	locks_dispose_list(&dispose);
1451  	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1452  						!new_fl->fl_next, break_time);
1453  	spin_lock(&inode->i_lock);
1454  	trace_break_lease_unblock(inode, new_fl);
1455  	locks_delete_block(new_fl);
1456  	if (error >= 0) {
1457  		/*
1458  		 * Wait for the next conflicting lease that has not been
1459  		 * broken yet
1460  		 */
1461  		if (error == 0)
1462  			time_out_leases(inode, &dispose);
1463  		if (any_leases_conflict(inode, new_fl))
1464  			goto restart;
1465  
1466  		error = 0;
1467  	}
1468  
1469  out:
1470  	spin_unlock(&inode->i_lock);
1471  	locks_dispose_list(&dispose);
1472  	locks_free_lock(new_fl);
1473  	return error;
1474  }
1475  
1476  EXPORT_SYMBOL(__break_lease);
1477  
1478  /**
1479   *	lease_get_mtime - get the last modified time of an inode
1480   *	@inode: the inode
1481   *      @time:  pointer to a timespec which will contain the last modified time
1482   *
1483   * This is to force NFS clients to flush their caches for files with
1484   * exclusive leases.  The justification is that if someone has an
1485   * exclusive lease, then they could be modifying it.
1486   */
1487  void lease_get_mtime(struct inode *inode, struct timespec *time)
1488  {
1489  	bool has_lease = false;
1490  	struct file_lock *flock;
1491  
1492  	if (inode->i_flock) {
1493  		spin_lock(&inode->i_lock);
1494  		flock = inode->i_flock;
1495  		if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK))
1496  			has_lease = true;
1497  		spin_unlock(&inode->i_lock);
1498  	}
1499  
1500  	if (has_lease)
1501  		*time = current_fs_time(inode->i_sb);
1502  	else
1503  		*time = inode->i_mtime;
1504  }
1505  
1506  EXPORT_SYMBOL(lease_get_mtime);
1507  
1508  /**
1509   *	fcntl_getlease - Enquire what lease is currently active
1510   *	@filp: the file
1511   *
1512   *	The value returned by this function will be one of
1513   *	(if no lease break is pending):
1514   *
1515   *	%F_RDLCK to indicate a shared lease is held.
1516   *
1517   *	%F_WRLCK to indicate an exclusive lease is held.
1518   *
1519   *	%F_UNLCK to indicate no lease is held.
1520   *
1521   *	(if a lease break is pending):
1522   *
1523   *	%F_RDLCK to indicate an exclusive lease needs to be
1524   *		changed to a shared lease (or removed).
1525   *
1526   *	%F_UNLCK to indicate the lease needs to be removed.
1527   *
1528   *	XXX: sfr & willy disagree over whether F_INPROGRESS
1529   *	should be returned to userspace.
1530   */
1531  int fcntl_getlease(struct file *filp)
1532  {
1533  	struct file_lock *fl;
1534  	struct inode *inode = file_inode(filp);
1535  	int type = F_UNLCK;
1536  	LIST_HEAD(dispose);
1537  
1538  	spin_lock(&inode->i_lock);
1539  	time_out_leases(file_inode(filp), &dispose);
1540  	for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl);
1541  			fl = fl->fl_next) {
1542  		if (fl->fl_file == filp) {
1543  			type = target_leasetype(fl);
1544  			break;
1545  		}
1546  	}
1547  	spin_unlock(&inode->i_lock);
1548  	locks_dispose_list(&dispose);
1549  	return type;
1550  }
1551  
1552  /**
1553   * check_conflicting_open - see if the given dentry points to a file that has
1554   * 			    an existing open that would conflict with the
1555   * 			    desired lease.
1556   * @dentry:	dentry to check
1557   * @arg:	type of lease that we're trying to acquire
1558   *
1559   * Check to see if there's an existing open fd on this file that would
1560   * conflict with the lease we're trying to set.
1561   */
1562  static int
1563  check_conflicting_open(const struct dentry *dentry, const long arg)
1564  {
1565  	int ret = 0;
1566  	struct inode *inode = dentry->d_inode;
1567  
1568  	if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1569  		return -EAGAIN;
1570  
1571  	if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
1572  	    (atomic_read(&inode->i_count) > 1)))
1573  		ret = -EAGAIN;
1574  
1575  	return ret;
1576  }
1577  
1578  static int
1579  generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1580  {
1581  	struct file_lock *fl, **before, **my_before = NULL, *lease;
1582  	struct dentry *dentry = filp->f_path.dentry;
1583  	struct inode *inode = dentry->d_inode;
1584  	bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1585  	int error;
1586  	LIST_HEAD(dispose);
1587  
1588  	lease = *flp;
1589  	trace_generic_add_lease(inode, lease);
1590  
1591  	/*
1592  	 * In the delegation case we need mutual exclusion with
1593  	 * a number of operations that take the i_mutex.  We trylock
1594  	 * because delegations are an optional optimization, and if
1595  	 * there's some chance of a conflict--we'd rather not
1596  	 * bother, maybe that's a sign this just isn't a good file to
1597  	 * hand out a delegation on.
1598  	 */
1599  	if (is_deleg && !mutex_trylock(&inode->i_mutex))
1600  		return -EAGAIN;
1601  
1602  	if (is_deleg && arg == F_WRLCK) {
1603  		/* Write delegations are not currently supported: */
1604  		mutex_unlock(&inode->i_mutex);
1605  		WARN_ON_ONCE(1);
1606  		return -EINVAL;
1607  	}
1608  
1609  	spin_lock(&inode->i_lock);
1610  	time_out_leases(inode, &dispose);
1611  	error = check_conflicting_open(dentry, arg);
1612  	if (error)
1613  		goto out;
1614  
1615  	/*
1616  	 * At this point, we know that if there is an exclusive
1617  	 * lease on this file, then we hold it on this filp
1618  	 * (otherwise our open of this file would have blocked).
1619  	 * And if we are trying to acquire an exclusive lease,
1620  	 * then the file is not open by anyone (including us)
1621  	 * except for this filp.
1622  	 */
1623  	error = -EAGAIN;
1624  	for (before = &inode->i_flock;
1625  			((fl = *before) != NULL) && IS_LEASE(fl);
1626  			before = &fl->fl_next) {
1627  		if (fl->fl_file == filp) {
1628  			my_before = before;
1629  			continue;
1630  		}
1631  		/*
1632  		 * No exclusive leases if someone else has a lease on
1633  		 * this file:
1634  		 */
1635  		if (arg == F_WRLCK)
1636  			goto out;
1637  		/*
1638  		 * Modifying our existing lease is OK, but no getting a
1639  		 * new lease if someone else is opening for write:
1640  		 */
1641  		if (fl->fl_flags & FL_UNLOCK_PENDING)
1642  			goto out;
1643  	}
1644  
1645  	if (my_before != NULL) {
1646  		lease = *my_before;
1647  		error = lease->fl_lmops->lm_change(my_before, arg, &dispose);
1648  		if (error)
1649  			goto out;
1650  		goto out_setup;
1651  	}
1652  
1653  	error = -EINVAL;
1654  	if (!leases_enable)
1655  		goto out;
1656  
1657  	locks_insert_lock(before, lease);
1658  	/*
1659  	 * The check in break_lease() is lockless. It's possible for another
1660  	 * open to race in after we did the earlier check for a conflicting
1661  	 * open but before the lease was inserted. Check again for a
1662  	 * conflicting open and cancel the lease if there is one.
1663  	 *
1664  	 * We also add a barrier here to ensure that the insertion of the lock
1665  	 * precedes these checks.
1666  	 */
1667  	smp_mb();
1668  	error = check_conflicting_open(dentry, arg);
1669  	if (error)
1670  		goto out_unlink;
1671  
1672  out_setup:
1673  	if (lease->fl_lmops->lm_setup)
1674  		lease->fl_lmops->lm_setup(lease, priv);
1675  out:
1676  	spin_unlock(&inode->i_lock);
1677  	locks_dispose_list(&dispose);
1678  	if (is_deleg)
1679  		mutex_unlock(&inode->i_mutex);
1680  	if (!error && !my_before)
1681  		*flp = NULL;
1682  	return error;
1683  out_unlink:
1684  	locks_unlink_lock(before);
1685  	goto out;
1686  }
1687  
1688  static int generic_delete_lease(struct file *filp)
1689  {
1690  	int error = -EAGAIN;
1691  	struct file_lock *fl, **before;
1692  	struct dentry *dentry = filp->f_path.dentry;
1693  	struct inode *inode = dentry->d_inode;
1694  	LIST_HEAD(dispose);
1695  
1696  	spin_lock(&inode->i_lock);
1697  	time_out_leases(inode, &dispose);
1698  	for (before = &inode->i_flock;
1699  			((fl = *before) != NULL) && IS_LEASE(fl);
1700  			before = &fl->fl_next) {
1701  		if (fl->fl_file == filp)
1702  			break;
1703  	}
1704  	trace_generic_delete_lease(inode, fl);
1705  	if (fl)
1706  		error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
1707  	spin_unlock(&inode->i_lock);
1708  	locks_dispose_list(&dispose);
1709  	return error;
1710  }
1711  
1712  /**
1713   *	generic_setlease	-	sets a lease on an open file
1714   *	@filp:	file pointer
1715   *	@arg:	type of lease to obtain
1716   *	@flp:	input - file_lock to use, output - file_lock inserted
1717   *	@priv:	private data for lm_setup (may be NULL if lm_setup
1718   *		doesn't require it)
1719   *
1720   *	The (input) flp->fl_lmops->lm_break function is required
1721   *	by break_lease().
1722   */
1723  int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1724  			void **priv)
1725  {
1726  	struct dentry *dentry = filp->f_path.dentry;
1727  	struct inode *inode = dentry->d_inode;
1728  	int error;
1729  
1730  	if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1731  		return -EACCES;
1732  	if (!S_ISREG(inode->i_mode))
1733  		return -EINVAL;
1734  	error = security_file_lock(filp, arg);
1735  	if (error)
1736  		return error;
1737  
1738  	switch (arg) {
1739  	case F_UNLCK:
1740  		return generic_delete_lease(filp);
1741  	case F_RDLCK:
1742  	case F_WRLCK:
1743  		if (!(*flp)->fl_lmops->lm_break) {
1744  			WARN_ON_ONCE(1);
1745  			return -ENOLCK;
1746  		}
1747  		return generic_add_lease(filp, arg, flp, priv);
1748  	default:
1749  		return -EINVAL;
1750  	}
1751  }
1752  EXPORT_SYMBOL(generic_setlease);
1753  
1754  /**
1755   * vfs_setlease        -       sets a lease on an open file
1756   * @filp:	file pointer
1757   * @arg:	type of lease to obtain
1758   * @lease:	file_lock to use when adding a lease
1759   * @priv:	private info for lm_setup when adding a lease (may be
1760   * 		NULL if lm_setup doesn't require it)
1761   *
1762   * Call this to establish a lease on the file. The "lease" argument is not
1763   * used for F_UNLCK requests and may be NULL. For commands that set or alter
1764   * an existing lease, the (*lease)->fl_lmops->lm_break operation must be set;
1765   * if not, this function will return -ENOLCK (and generate a scary-looking
1766   * stack trace).
1767   *
1768   * The "priv" pointer is passed directly to the lm_setup function as-is. It
1769   * may be NULL if the lm_setup operation doesn't require it.
1770   */
1771  int
1772  vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1773  {
1774  	if (filp->f_op->setlease)
1775  		return filp->f_op->setlease(filp, arg, lease, priv);
1776  	else
1777  		return generic_setlease(filp, arg, lease, priv);
1778  }
1779  EXPORT_SYMBOL_GPL(vfs_setlease);
1780  
1781  static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1782  {
1783  	struct file_lock *fl;
1784  	struct fasync_struct *new;
1785  	int error;
1786  
1787  	fl = lease_alloc(filp, arg);
1788  	if (IS_ERR(fl))
1789  		return PTR_ERR(fl);
1790  
1791  	new = fasync_alloc();
1792  	if (!new) {
1793  		locks_free_lock(fl);
1794  		return -ENOMEM;
1795  	}
1796  	new->fa_fd = fd;
1797  
1798  	error = vfs_setlease(filp, arg, &fl, (void **)&new);
1799  	if (fl)
1800  		locks_free_lock(fl);
1801  	if (new)
1802  		fasync_free(new);
1803  	return error;
1804  }
1805  
1806  /**
1807   *	fcntl_setlease	-	sets a lease on an open file
1808   *	@fd: open file descriptor
1809   *	@filp: file pointer
1810   *	@arg: type of lease to obtain
1811   *
1812   *	Call this fcntl to establish a lease on the file.
1813   *	Note that you also need to call %F_SETSIG to
1814   *	receive a signal when the lease is broken.
1815   */
1816  int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1817  {
1818  	if (arg == F_UNLCK)
1819  		return vfs_setlease(filp, F_UNLCK, NULL, NULL);
1820  	return do_fcntl_add_lease(fd, filp, arg);
1821  }
1822  
1823  /**
1824   * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1825   * @filp: The file to apply the lock to
1826   * @fl: The lock to be applied
1827   *
1828   * Add a FLOCK style lock to a file.
1829   */
1830  int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1831  {
1832  	int error;
1833  	might_sleep();
1834  	for (;;) {
1835  		error = flock_lock_file(filp, fl);
1836  		if (error != FILE_LOCK_DEFERRED)
1837  			break;
1838  		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1839  		if (!error)
1840  			continue;
1841  
1842  		locks_delete_block(fl);
1843  		break;
1844  	}
1845  	return error;
1846  }
1847  
1848  EXPORT_SYMBOL(flock_lock_file_wait);
1849  
1850  /**
1851   *	sys_flock: - flock() system call.
1852   *	@fd: the file descriptor to lock.
1853   *	@cmd: the type of lock to apply.
1854   *
1855   *	Apply a %FL_FLOCK style lock to an open file descriptor.
1856   *	The @cmd can be one of
1857   *
1858   *	%LOCK_SH -- a shared lock.
1859   *
1860   *	%LOCK_EX -- an exclusive lock.
1861   *
1862   *	%LOCK_UN -- remove an existing lock.
1863   *
1864   *	%LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1865   *
1866   *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1867   *	processes read and write access respectively.
1868   */
1869  SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1870  {
1871  	struct fd f = fdget(fd);
1872  	struct file_lock *lock;
1873  	int can_sleep, unlock;
1874  	int error;
1875  
1876  	error = -EBADF;
1877  	if (!f.file)
1878  		goto out;
1879  
1880  	can_sleep = !(cmd & LOCK_NB);
1881  	cmd &= ~LOCK_NB;
1882  	unlock = (cmd == LOCK_UN);
1883  
1884  	if (!unlock && !(cmd & LOCK_MAND) &&
1885  	    !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
1886  		goto out_putf;
1887  
1888  	lock = flock_make_lock(f.file, cmd);
1889  	if (IS_ERR(lock)) {
1890  		error = PTR_ERR(lock);
1891  		goto out_putf;
1892  	}
1893  
1894  	if (can_sleep)
1895  		lock->fl_flags |= FL_SLEEP;
1896  
1897  	error = security_file_lock(f.file, lock->fl_type);
1898  	if (error)
1899  		goto out_free;
1900  
1901  	if (f.file->f_op->flock)
1902  		error = f.file->f_op->flock(f.file,
1903  					  (can_sleep) ? F_SETLKW : F_SETLK,
1904  					  lock);
1905  	else
1906  		error = flock_lock_file_wait(f.file, lock);
1907  
1908   out_free:
1909  	locks_free_lock(lock);
1910  
1911   out_putf:
1912  	fdput(f);
1913   out:
1914  	return error;
1915  }
1916  
1917  /**
1918   * vfs_test_lock - test file byte range lock
1919   * @filp: The file to test lock for
1920   * @fl: The lock to test; also used to hold result
1921   *
1922   * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
1923   * setting conf->fl_type to something other than F_UNLCK.
1924   */
1925  int vfs_test_lock(struct file *filp, struct file_lock *fl)
1926  {
1927  	if (filp->f_op->lock)
1928  		return filp->f_op->lock(filp, F_GETLK, fl);
1929  	posix_test_lock(filp, fl);
1930  	return 0;
1931  }
1932  EXPORT_SYMBOL_GPL(vfs_test_lock);
1933  
1934  static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1935  {
1936  	flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
1937  #if BITS_PER_LONG == 32
1938  	/*
1939  	 * Make sure we can represent the posix lock via
1940  	 * legacy 32bit flock.
1941  	 */
1942  	if (fl->fl_start > OFFT_OFFSET_MAX)
1943  		return -EOVERFLOW;
1944  	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1945  		return -EOVERFLOW;
1946  #endif
1947  	flock->l_start = fl->fl_start;
1948  	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1949  		fl->fl_end - fl->fl_start + 1;
1950  	flock->l_whence = 0;
1951  	flock->l_type = fl->fl_type;
1952  	return 0;
1953  }
1954  
1955  #if BITS_PER_LONG == 32
1956  static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1957  {
1958  	flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
1959  	flock->l_start = fl->fl_start;
1960  	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1961  		fl->fl_end - fl->fl_start + 1;
1962  	flock->l_whence = 0;
1963  	flock->l_type = fl->fl_type;
1964  }
1965  #endif
1966  
1967  /* Report the first existing lock that would conflict with l.
1968   * This implements the F_GETLK command of fcntl().
1969   */
1970  int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l)
1971  {
1972  	struct file_lock file_lock;
1973  	struct flock flock;
1974  	int error;
1975  
1976  	error = -EFAULT;
1977  	if (copy_from_user(&flock, l, sizeof(flock)))
1978  		goto out;
1979  	error = -EINVAL;
1980  	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1981  		goto out;
1982  
1983  	error = flock_to_posix_lock(filp, &file_lock, &flock);
1984  	if (error)
1985  		goto out;
1986  
1987  	if (cmd == F_OFD_GETLK) {
1988  		error = -EINVAL;
1989  		if (flock.l_pid != 0)
1990  			goto out;
1991  
1992  		cmd = F_GETLK;
1993  		file_lock.fl_flags |= FL_OFDLCK;
1994  		file_lock.fl_owner = filp;
1995  	}
1996  
1997  	error = vfs_test_lock(filp, &file_lock);
1998  	if (error)
1999  		goto out;
2000  
2001  	flock.l_type = file_lock.fl_type;
2002  	if (file_lock.fl_type != F_UNLCK) {
2003  		error = posix_lock_to_flock(&flock, &file_lock);
2004  		if (error)
2005  			goto rel_priv;
2006  	}
2007  	error = -EFAULT;
2008  	if (!copy_to_user(l, &flock, sizeof(flock)))
2009  		error = 0;
2010  rel_priv:
2011  	locks_release_private(&file_lock);
2012  out:
2013  	return error;
2014  }
2015  
2016  /**
2017   * vfs_lock_file - file byte range lock
2018   * @filp: The file to apply the lock to
2019   * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2020   * @fl: The lock to be applied
2021   * @conf: Place to return a copy of the conflicting lock, if found.
2022   *
2023   * A caller that doesn't care about the conflicting lock may pass NULL
2024   * as the final argument.
2025   *
2026   * If the filesystem defines a private ->lock() method, then @conf will
2027   * be left unchanged; so a caller that cares should initialize it to
2028   * some acceptable default.
2029   *
2030   * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2031   * locks, the ->lock() interface may return asynchronously, before the lock has
2032   * been granted or denied by the underlying filesystem, if (and only if)
2033   * lm_grant is set. Callers expecting ->lock() to return asynchronously
2034   * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2035   * the request is for a blocking lock. When ->lock() does return asynchronously,
2036   * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2037   * request completes.
2038   * If the request is for non-blocking lock the file system should return
2039   * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2040   * with the result. If the request timed out the callback routine will return a
2041   * nonzero return code and the file system should release the lock. The file
2042   * system is also responsible to keep a corresponding posix lock when it
2043   * grants a lock so the VFS can find out which locks are locally held and do
2044   * the correct lock cleanup when required.
2045   * The underlying filesystem must not drop the kernel lock or call
2046   * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2047   * return code.
2048   */
2049  int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2050  {
2051  	if (filp->f_op->lock)
2052  		return filp->f_op->lock(filp, cmd, fl);
2053  	else
2054  		return posix_lock_file(filp, fl, conf);
2055  }
2056  EXPORT_SYMBOL_GPL(vfs_lock_file);
2057  
2058  static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2059  			     struct file_lock *fl)
2060  {
2061  	int error;
2062  
2063  	error = security_file_lock(filp, fl->fl_type);
2064  	if (error)
2065  		return error;
2066  
2067  	for (;;) {
2068  		error = vfs_lock_file(filp, cmd, fl, NULL);
2069  		if (error != FILE_LOCK_DEFERRED)
2070  			break;
2071  		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
2072  		if (!error)
2073  			continue;
2074  
2075  		locks_delete_block(fl);
2076  		break;
2077  	}
2078  
2079  	return error;
2080  }
2081  
2082  /* Ensure that fl->fl_filp has compatible f_mode for F_SETLK calls */
2083  static int
2084  check_fmode_for_setlk(struct file_lock *fl)
2085  {
2086  	switch (fl->fl_type) {
2087  	case F_RDLCK:
2088  		if (!(fl->fl_file->f_mode & FMODE_READ))
2089  			return -EBADF;
2090  		break;
2091  	case F_WRLCK:
2092  		if (!(fl->fl_file->f_mode & FMODE_WRITE))
2093  			return -EBADF;
2094  	}
2095  	return 0;
2096  }
2097  
2098  /* Apply the lock described by l to an open file descriptor.
2099   * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2100   */
2101  int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2102  		struct flock __user *l)
2103  {
2104  	struct file_lock *file_lock = locks_alloc_lock();
2105  	struct flock flock;
2106  	struct inode *inode;
2107  	struct file *f;
2108  	int error;
2109  
2110  	if (file_lock == NULL)
2111  		return -ENOLCK;
2112  
2113  	/*
2114  	 * This might block, so we do it before checking the inode.
2115  	 */
2116  	error = -EFAULT;
2117  	if (copy_from_user(&flock, l, sizeof(flock)))
2118  		goto out;
2119  
2120  	inode = file_inode(filp);
2121  
2122  	/* Don't allow mandatory locks on files that may be memory mapped
2123  	 * and shared.
2124  	 */
2125  	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2126  		error = -EAGAIN;
2127  		goto out;
2128  	}
2129  
2130  again:
2131  	error = flock_to_posix_lock(filp, file_lock, &flock);
2132  	if (error)
2133  		goto out;
2134  
2135  	error = check_fmode_for_setlk(file_lock);
2136  	if (error)
2137  		goto out;
2138  
2139  	/*
2140  	 * If the cmd is requesting file-private locks, then set the
2141  	 * FL_OFDLCK flag and override the owner.
2142  	 */
2143  	switch (cmd) {
2144  	case F_OFD_SETLK:
2145  		error = -EINVAL;
2146  		if (flock.l_pid != 0)
2147  			goto out;
2148  
2149  		cmd = F_SETLK;
2150  		file_lock->fl_flags |= FL_OFDLCK;
2151  		file_lock->fl_owner = filp;
2152  		break;
2153  	case F_OFD_SETLKW:
2154  		error = -EINVAL;
2155  		if (flock.l_pid != 0)
2156  			goto out;
2157  
2158  		cmd = F_SETLKW;
2159  		file_lock->fl_flags |= FL_OFDLCK;
2160  		file_lock->fl_owner = filp;
2161  		/* Fallthrough */
2162  	case F_SETLKW:
2163  		file_lock->fl_flags |= FL_SLEEP;
2164  	}
2165  
2166  	error = do_lock_file_wait(filp, cmd, file_lock);
2167  
2168  	/*
2169  	 * Attempt to detect a close/fcntl race and recover by
2170  	 * releasing the lock that was just acquired.
2171  	 */
2172  	/*
2173  	 * we need that spin_lock here - it prevents reordering between
2174  	 * update of inode->i_flock and check for it done in close().
2175  	 * rcu_read_lock() wouldn't do.
2176  	 */
2177  	spin_lock(&current->files->file_lock);
2178  	f = fcheck(fd);
2179  	spin_unlock(&current->files->file_lock);
2180  	if (!error && f != filp && flock.l_type != F_UNLCK) {
2181  		flock.l_type = F_UNLCK;
2182  		goto again;
2183  	}
2184  
2185  out:
2186  	locks_free_lock(file_lock);
2187  	return error;
2188  }
2189  
2190  #if BITS_PER_LONG == 32
2191  /* Report the first existing lock that would conflict with l.
2192   * This implements the F_GETLK command of fcntl().
2193   */
2194  int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
2195  {
2196  	struct file_lock file_lock;
2197  	struct flock64 flock;
2198  	int error;
2199  
2200  	error = -EFAULT;
2201  	if (copy_from_user(&flock, l, sizeof(flock)))
2202  		goto out;
2203  	error = -EINVAL;
2204  	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
2205  		goto out;
2206  
2207  	error = flock64_to_posix_lock(filp, &file_lock, &flock);
2208  	if (error)
2209  		goto out;
2210  
2211  	if (cmd == F_OFD_GETLK) {
2212  		error = -EINVAL;
2213  		if (flock.l_pid != 0)
2214  			goto out;
2215  
2216  		cmd = F_GETLK64;
2217  		file_lock.fl_flags |= FL_OFDLCK;
2218  		file_lock.fl_owner = filp;
2219  	}
2220  
2221  	error = vfs_test_lock(filp, &file_lock);
2222  	if (error)
2223  		goto out;
2224  
2225  	flock.l_type = file_lock.fl_type;
2226  	if (file_lock.fl_type != F_UNLCK)
2227  		posix_lock_to_flock64(&flock, &file_lock);
2228  
2229  	error = -EFAULT;
2230  	if (!copy_to_user(l, &flock, sizeof(flock)))
2231  		error = 0;
2232  
2233  	locks_release_private(&file_lock);
2234  out:
2235  	return error;
2236  }
2237  
2238  /* Apply the lock described by l to an open file descriptor.
2239   * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2240   */
2241  int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2242  		struct flock64 __user *l)
2243  {
2244  	struct file_lock *file_lock = locks_alloc_lock();
2245  	struct flock64 flock;
2246  	struct inode *inode;
2247  	struct file *f;
2248  	int error;
2249  
2250  	if (file_lock == NULL)
2251  		return -ENOLCK;
2252  
2253  	/*
2254  	 * This might block, so we do it before checking the inode.
2255  	 */
2256  	error = -EFAULT;
2257  	if (copy_from_user(&flock, l, sizeof(flock)))
2258  		goto out;
2259  
2260  	inode = file_inode(filp);
2261  
2262  	/* Don't allow mandatory locks on files that may be memory mapped
2263  	 * and shared.
2264  	 */
2265  	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2266  		error = -EAGAIN;
2267  		goto out;
2268  	}
2269  
2270  again:
2271  	error = flock64_to_posix_lock(filp, file_lock, &flock);
2272  	if (error)
2273  		goto out;
2274  
2275  	error = check_fmode_for_setlk(file_lock);
2276  	if (error)
2277  		goto out;
2278  
2279  	/*
2280  	 * If the cmd is requesting file-private locks, then set the
2281  	 * FL_OFDLCK flag and override the owner.
2282  	 */
2283  	switch (cmd) {
2284  	case F_OFD_SETLK:
2285  		error = -EINVAL;
2286  		if (flock.l_pid != 0)
2287  			goto out;
2288  
2289  		cmd = F_SETLK64;
2290  		file_lock->fl_flags |= FL_OFDLCK;
2291  		file_lock->fl_owner = filp;
2292  		break;
2293  	case F_OFD_SETLKW:
2294  		error = -EINVAL;
2295  		if (flock.l_pid != 0)
2296  			goto out;
2297  
2298  		cmd = F_SETLKW64;
2299  		file_lock->fl_flags |= FL_OFDLCK;
2300  		file_lock->fl_owner = filp;
2301  		/* Fallthrough */
2302  	case F_SETLKW64:
2303  		file_lock->fl_flags |= FL_SLEEP;
2304  	}
2305  
2306  	error = do_lock_file_wait(filp, cmd, file_lock);
2307  
2308  	/*
2309  	 * Attempt to detect a close/fcntl race and recover by
2310  	 * releasing the lock that was just acquired.
2311  	 */
2312  	spin_lock(&current->files->file_lock);
2313  	f = fcheck(fd);
2314  	spin_unlock(&current->files->file_lock);
2315  	if (!error && f != filp && flock.l_type != F_UNLCK) {
2316  		flock.l_type = F_UNLCK;
2317  		goto again;
2318  	}
2319  
2320  out:
2321  	locks_free_lock(file_lock);
2322  	return error;
2323  }
2324  #endif /* BITS_PER_LONG == 32 */
2325  
2326  /*
2327   * This function is called when the file is being removed
2328   * from the task's fd array.  POSIX locks belonging to this task
2329   * are deleted at this time.
2330   */
2331  void locks_remove_posix(struct file *filp, fl_owner_t owner)
2332  {
2333  	struct file_lock lock;
2334  
2335  	/*
2336  	 * If there are no locks held on this file, we don't need to call
2337  	 * posix_lock_file().  Another process could be setting a lock on this
2338  	 * file at the same time, but we wouldn't remove that lock anyway.
2339  	 */
2340  	if (!file_inode(filp)->i_flock)
2341  		return;
2342  
2343  	lock.fl_type = F_UNLCK;
2344  	lock.fl_flags = FL_POSIX | FL_CLOSE;
2345  	lock.fl_start = 0;
2346  	lock.fl_end = OFFSET_MAX;
2347  	lock.fl_owner = owner;
2348  	lock.fl_pid = current->tgid;
2349  	lock.fl_file = filp;
2350  	lock.fl_ops = NULL;
2351  	lock.fl_lmops = NULL;
2352  
2353  	vfs_lock_file(filp, F_SETLK, &lock, NULL);
2354  
2355  	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2356  		lock.fl_ops->fl_release_private(&lock);
2357  }
2358  
2359  EXPORT_SYMBOL(locks_remove_posix);
2360  
2361  /*
2362   * This function is called on the last close of an open file.
2363   */
2364  void locks_remove_file(struct file *filp)
2365  {
2366  	struct inode * inode = file_inode(filp);
2367  	struct file_lock *fl;
2368  	struct file_lock **before;
2369  	LIST_HEAD(dispose);
2370  
2371  	if (!inode->i_flock)
2372  		return;
2373  
2374  	locks_remove_posix(filp, filp);
2375  
2376  	if (filp->f_op->flock) {
2377  		struct file_lock fl = {
2378  			.fl_owner = filp,
2379  			.fl_pid = current->tgid,
2380  			.fl_file = filp,
2381  			.fl_flags = FL_FLOCK,
2382  			.fl_type = F_UNLCK,
2383  			.fl_end = OFFSET_MAX,
2384  		};
2385  		filp->f_op->flock(filp, F_SETLKW, &fl);
2386  		if (fl.fl_ops && fl.fl_ops->fl_release_private)
2387  			fl.fl_ops->fl_release_private(&fl);
2388  	}
2389  
2390  	spin_lock(&inode->i_lock);
2391  	before = &inode->i_flock;
2392  
2393  	while ((fl = *before) != NULL) {
2394  		if (fl->fl_file == filp) {
2395  			if (IS_LEASE(fl)) {
2396  				lease_modify(before, F_UNLCK, &dispose);
2397  				continue;
2398  			}
2399  
2400  			/*
2401  			 * There's a leftover lock on the list of a type that
2402  			 * we didn't expect to see. Most likely a classic
2403  			 * POSIX lock that ended up not getting released
2404  			 * properly, or that raced onto the list somehow. Log
2405  			 * some info about it and then just remove it from
2406  			 * the list.
2407  			 */
2408  			WARN(!IS_FLOCK(fl),
2409  				"leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n",
2410  				MAJOR(inode->i_sb->s_dev),
2411  				MINOR(inode->i_sb->s_dev), inode->i_ino,
2412  				fl->fl_type, fl->fl_flags,
2413  				fl->fl_start, fl->fl_end);
2414  
2415  			locks_delete_lock(before, &dispose);
2416  			continue;
2417   		}
2418  		before = &fl->fl_next;
2419  	}
2420  	spin_unlock(&inode->i_lock);
2421  	locks_dispose_list(&dispose);
2422  }
2423  
2424  /**
2425   *	posix_unblock_lock - stop waiting for a file lock
2426   *	@waiter: the lock which was waiting
2427   *
2428   *	lockd needs to block waiting for locks.
2429   */
2430  int
2431  posix_unblock_lock(struct file_lock *waiter)
2432  {
2433  	int status = 0;
2434  
2435  	spin_lock(&blocked_lock_lock);
2436  	if (waiter->fl_next)
2437  		__locks_delete_block(waiter);
2438  	else
2439  		status = -ENOENT;
2440  	spin_unlock(&blocked_lock_lock);
2441  	return status;
2442  }
2443  EXPORT_SYMBOL(posix_unblock_lock);
2444  
2445  /**
2446   * vfs_cancel_lock - file byte range unblock lock
2447   * @filp: The file to apply the unblock to
2448   * @fl: The lock to be unblocked
2449   *
2450   * Used by lock managers to cancel blocked requests
2451   */
2452  int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2453  {
2454  	if (filp->f_op->lock)
2455  		return filp->f_op->lock(filp, F_CANCELLK, fl);
2456  	return 0;
2457  }
2458  
2459  EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2460  
2461  #ifdef CONFIG_PROC_FS
2462  #include <linux/proc_fs.h>
2463  #include <linux/seq_file.h>
2464  
2465  struct locks_iterator {
2466  	int	li_cpu;
2467  	loff_t	li_pos;
2468  };
2469  
2470  static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2471  			    loff_t id, char *pfx)
2472  {
2473  	struct inode *inode = NULL;
2474  	unsigned int fl_pid;
2475  
2476  	if (fl->fl_nspid)
2477  		fl_pid = pid_vnr(fl->fl_nspid);
2478  	else
2479  		fl_pid = fl->fl_pid;
2480  
2481  	if (fl->fl_file != NULL)
2482  		inode = file_inode(fl->fl_file);
2483  
2484  	seq_printf(f, "%lld:%s ", id, pfx);
2485  	if (IS_POSIX(fl)) {
2486  		if (fl->fl_flags & FL_ACCESS)
2487  			seq_puts(f, "ACCESS");
2488  		else if (IS_OFDLCK(fl))
2489  			seq_puts(f, "OFDLCK");
2490  		else
2491  			seq_puts(f, "POSIX ");
2492  
2493  		seq_printf(f, " %s ",
2494  			     (inode == NULL) ? "*NOINODE*" :
2495  			     mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2496  	} else if (IS_FLOCK(fl)) {
2497  		if (fl->fl_type & LOCK_MAND) {
2498  			seq_puts(f, "FLOCK  MSNFS     ");
2499  		} else {
2500  			seq_puts(f, "FLOCK  ADVISORY  ");
2501  		}
2502  	} else if (IS_LEASE(fl)) {
2503  		if (fl->fl_flags & FL_DELEG)
2504  			seq_puts(f, "DELEG  ");
2505  		else
2506  			seq_puts(f, "LEASE  ");
2507  
2508  		if (lease_breaking(fl))
2509  			seq_puts(f, "BREAKING  ");
2510  		else if (fl->fl_file)
2511  			seq_puts(f, "ACTIVE    ");
2512  		else
2513  			seq_puts(f, "BREAKER   ");
2514  	} else {
2515  		seq_puts(f, "UNKNOWN UNKNOWN  ");
2516  	}
2517  	if (fl->fl_type & LOCK_MAND) {
2518  		seq_printf(f, "%s ",
2519  			       (fl->fl_type & LOCK_READ)
2520  			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2521  			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2522  	} else {
2523  		seq_printf(f, "%s ",
2524  			       (lease_breaking(fl))
2525  			       ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2526  			       : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2527  	}
2528  	if (inode) {
2529  #ifdef WE_CAN_BREAK_LSLK_NOW
2530  		seq_printf(f, "%d %s:%ld ", fl_pid,
2531  				inode->i_sb->s_id, inode->i_ino);
2532  #else
2533  		/* userspace relies on this representation of dev_t ;-( */
2534  		seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2535  				MAJOR(inode->i_sb->s_dev),
2536  				MINOR(inode->i_sb->s_dev), inode->i_ino);
2537  #endif
2538  	} else {
2539  		seq_printf(f, "%d <none>:0 ", fl_pid);
2540  	}
2541  	if (IS_POSIX(fl)) {
2542  		if (fl->fl_end == OFFSET_MAX)
2543  			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2544  		else
2545  			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2546  	} else {
2547  		seq_puts(f, "0 EOF\n");
2548  	}
2549  }
2550  
2551  static int locks_show(struct seq_file *f, void *v)
2552  {
2553  	struct locks_iterator *iter = f->private;
2554  	struct file_lock *fl, *bfl;
2555  
2556  	fl = hlist_entry(v, struct file_lock, fl_link);
2557  
2558  	lock_get_status(f, fl, iter->li_pos, "");
2559  
2560  	list_for_each_entry(bfl, &fl->fl_block, fl_block)
2561  		lock_get_status(f, bfl, iter->li_pos, " ->");
2562  
2563  	return 0;
2564  }
2565  
2566  static void *locks_start(struct seq_file *f, loff_t *pos)
2567  	__acquires(&blocked_lock_lock)
2568  {
2569  	struct locks_iterator *iter = f->private;
2570  
2571  	iter->li_pos = *pos + 1;
2572  	lg_global_lock(&file_lock_lglock);
2573  	spin_lock(&blocked_lock_lock);
2574  	return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos);
2575  }
2576  
2577  static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2578  {
2579  	struct locks_iterator *iter = f->private;
2580  
2581  	++iter->li_pos;
2582  	return seq_hlist_next_percpu(v, &file_lock_list, &iter->li_cpu, pos);
2583  }
2584  
2585  static void locks_stop(struct seq_file *f, void *v)
2586  	__releases(&blocked_lock_lock)
2587  {
2588  	spin_unlock(&blocked_lock_lock);
2589  	lg_global_unlock(&file_lock_lglock);
2590  }
2591  
2592  static const struct seq_operations locks_seq_operations = {
2593  	.start	= locks_start,
2594  	.next	= locks_next,
2595  	.stop	= locks_stop,
2596  	.show	= locks_show,
2597  };
2598  
2599  static int locks_open(struct inode *inode, struct file *filp)
2600  {
2601  	return seq_open_private(filp, &locks_seq_operations,
2602  					sizeof(struct locks_iterator));
2603  }
2604  
2605  static const struct file_operations proc_locks_operations = {
2606  	.open		= locks_open,
2607  	.read		= seq_read,
2608  	.llseek		= seq_lseek,
2609  	.release	= seq_release_private,
2610  };
2611  
2612  static int __init proc_locks_init(void)
2613  {
2614  	proc_create("locks", 0, NULL, &proc_locks_operations);
2615  	return 0;
2616  }
2617  module_init(proc_locks_init);
2618  #endif
2619  
2620  static int __init filelock_init(void)
2621  {
2622  	int i;
2623  
2624  	filelock_cache = kmem_cache_create("file_lock_cache",
2625  			sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2626  
2627  	lg_lock_init(&file_lock_lglock, "file_lock_lglock");
2628  
2629  	for_each_possible_cpu(i)
2630  		INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i));
2631  
2632  	return 0;
2633  }
2634  
2635  core_initcall(filelock_init);
2636