xref: /openbmc/linux/fs/locks.c (revision 4800cd83)
1 /*
2  *  linux/fs/locks.c
3  *
4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5  *  Doug Evans (dje@spiff.uucp), August 07, 1992
6  *
7  *  Deadlock detection added.
8  *  FIXME: one thing isn't handled yet:
9  *	- mandatory locks (requires lots of changes elsewhere)
10  *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11  *
12  *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13  *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14  *
15  *  Converted file_lock_table to a linked list from an array, which eliminates
16  *  the limits on how many active file locks are open.
17  *  Chad Page (pageone@netcom.com), November 27, 1994
18  *
19  *  Removed dependency on file descriptors. dup()'ed file descriptors now
20  *  get the same locks as the original file descriptors, and a close() on
21  *  any file descriptor removes ALL the locks on the file for the current
22  *  process. Since locks still depend on the process id, locks are inherited
23  *  after an exec() but not after a fork(). This agrees with POSIX, and both
24  *  BSD and SVR4 practice.
25  *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
26  *
27  *  Scrapped free list which is redundant now that we allocate locks
28  *  dynamically with kmalloc()/kfree().
29  *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
30  *
31  *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32  *
33  *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
34  *  fcntl() system call. They have the semantics described above.
35  *
36  *  FL_FLOCK locks are created with calls to flock(), through the flock()
37  *  system call, which is new. Old C libraries implement flock() via fcntl()
38  *  and will continue to use the old, broken implementation.
39  *
40  *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41  *  with a file pointer (filp). As a result they can be shared by a parent
42  *  process and its children after a fork(). They are removed when the last
43  *  file descriptor referring to the file pointer is closed (unless explicitly
44  *  unlocked).
45  *
46  *  FL_FLOCK locks never deadlock, an existing lock is always removed before
47  *  upgrading from shared to exclusive (or vice versa). When this happens
48  *  any processes blocked by the current lock are woken up and allowed to
49  *  run before the new lock is applied.
50  *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
51  *
52  *  Removed some race conditions in flock_lock_file(), marked other possible
53  *  races. Just grep for FIXME to see them.
54  *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
55  *
56  *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57  *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58  *  once we've checked for blocking and deadlocking.
59  *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
60  *
61  *  Initial implementation of mandatory locks. SunOS turned out to be
62  *  a rotten model, so I implemented the "obvious" semantics.
63  *  See 'Documentation/mandatory.txt' for details.
64  *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
65  *
66  *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67  *  check if a file has mandatory locks, used by mmap(), open() and creat() to
68  *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69  *  Manual, Section 2.
70  *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
71  *
72  *  Tidied up block list handling. Added '/proc/locks' interface.
73  *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
74  *
75  *  Fixed deadlock condition for pathological code that mixes calls to
76  *  flock() and fcntl().
77  *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
78  *
79  *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80  *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81  *  guarantee sensible behaviour in the case where file system modules might
82  *  be compiled with different options than the kernel itself.
83  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
84  *
85  *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86  *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
88  *
89  *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90  *  locks. Changed process synchronisation to avoid dereferencing locks that
91  *  have already been freed.
92  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
93  *
94  *  Made the block list a circular list to minimise searching in the list.
95  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
96  *
97  *  Made mandatory locking a mount option. Default is not to allow mandatory
98  *  locking.
99  *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
100  *
101  *  Some adaptations for NFS support.
102  *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
103  *
104  *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105  *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
106  *
107  *  Use slab allocator instead of kmalloc/kfree.
108  *  Use generic list implementation from <linux/list.h>.
109  *  Sped up posix_locks_deadlock by only considering blocked locks.
110  *  Matthew Wilcox <willy@debian.org>, March, 2000.
111  *
112  *  Leases and LOCK_MAND
113  *  Matthew Wilcox <willy@debian.org>, June, 2000.
114  *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
115  */
116 
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/module.h>
123 #include <linux/security.h>
124 #include <linux/slab.h>
125 #include <linux/syscalls.h>
126 #include <linux/time.h>
127 #include <linux/rcupdate.h>
128 #include <linux/pid_namespace.h>
129 
130 #include <asm/uaccess.h>
131 
132 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
133 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
134 #define IS_LEASE(fl)	(fl->fl_flags & FL_LEASE)
135 
136 int leases_enable = 1;
137 int lease_break_time = 45;
138 
139 #define for_each_lock(inode, lockp) \
140 	for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
141 
142 static LIST_HEAD(file_lock_list);
143 static LIST_HEAD(blocked_list);
144 static DEFINE_SPINLOCK(file_lock_lock);
145 
146 /*
147  * Protects the two list heads above, plus the inode->i_flock list
148  * FIXME: should use a spinlock, once lockd and ceph are ready.
149  */
150 void lock_flocks(void)
151 {
152 	spin_lock(&file_lock_lock);
153 }
154 EXPORT_SYMBOL_GPL(lock_flocks);
155 
156 void unlock_flocks(void)
157 {
158 	spin_unlock(&file_lock_lock);
159 }
160 EXPORT_SYMBOL_GPL(unlock_flocks);
161 
162 static struct kmem_cache *filelock_cache __read_mostly;
163 
164 /* Allocate an empty lock structure. */
165 struct file_lock *locks_alloc_lock(void)
166 {
167 	return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
168 }
169 EXPORT_SYMBOL_GPL(locks_alloc_lock);
170 
171 void locks_release_private(struct file_lock *fl)
172 {
173 	if (fl->fl_ops) {
174 		if (fl->fl_ops->fl_release_private)
175 			fl->fl_ops->fl_release_private(fl);
176 		fl->fl_ops = NULL;
177 	}
178 	if (fl->fl_lmops) {
179 		if (fl->fl_lmops->fl_release_private)
180 			fl->fl_lmops->fl_release_private(fl);
181 		fl->fl_lmops = NULL;
182 	}
183 
184 }
185 EXPORT_SYMBOL_GPL(locks_release_private);
186 
187 /* Free a lock which is not in use. */
188 void locks_free_lock(struct file_lock *fl)
189 {
190 	BUG_ON(waitqueue_active(&fl->fl_wait));
191 	BUG_ON(!list_empty(&fl->fl_block));
192 	BUG_ON(!list_empty(&fl->fl_link));
193 
194 	locks_release_private(fl);
195 	kmem_cache_free(filelock_cache, fl);
196 }
197 EXPORT_SYMBOL(locks_free_lock);
198 
199 void locks_init_lock(struct file_lock *fl)
200 {
201 	INIT_LIST_HEAD(&fl->fl_link);
202 	INIT_LIST_HEAD(&fl->fl_block);
203 	init_waitqueue_head(&fl->fl_wait);
204 	fl->fl_next = NULL;
205 	fl->fl_fasync = NULL;
206 	fl->fl_owner = NULL;
207 	fl->fl_pid = 0;
208 	fl->fl_nspid = NULL;
209 	fl->fl_file = NULL;
210 	fl->fl_flags = 0;
211 	fl->fl_type = 0;
212 	fl->fl_start = fl->fl_end = 0;
213 	fl->fl_ops = NULL;
214 	fl->fl_lmops = NULL;
215 }
216 
217 EXPORT_SYMBOL(locks_init_lock);
218 
219 /*
220  * Initialises the fields of the file lock which are invariant for
221  * free file_locks.
222  */
223 static void init_once(void *foo)
224 {
225 	struct file_lock *lock = (struct file_lock *) foo;
226 
227 	locks_init_lock(lock);
228 }
229 
230 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
231 {
232 	if (fl->fl_ops) {
233 		if (fl->fl_ops->fl_copy_lock)
234 			fl->fl_ops->fl_copy_lock(new, fl);
235 		new->fl_ops = fl->fl_ops;
236 	}
237 	if (fl->fl_lmops)
238 		new->fl_lmops = fl->fl_lmops;
239 }
240 
241 /*
242  * Initialize a new lock from an existing file_lock structure.
243  */
244 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
245 {
246 	new->fl_owner = fl->fl_owner;
247 	new->fl_pid = fl->fl_pid;
248 	new->fl_file = NULL;
249 	new->fl_flags = fl->fl_flags;
250 	new->fl_type = fl->fl_type;
251 	new->fl_start = fl->fl_start;
252 	new->fl_end = fl->fl_end;
253 	new->fl_ops = NULL;
254 	new->fl_lmops = NULL;
255 }
256 EXPORT_SYMBOL(__locks_copy_lock);
257 
258 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
259 {
260 	locks_release_private(new);
261 
262 	__locks_copy_lock(new, fl);
263 	new->fl_file = fl->fl_file;
264 	new->fl_ops = fl->fl_ops;
265 	new->fl_lmops = fl->fl_lmops;
266 
267 	locks_copy_private(new, fl);
268 }
269 
270 EXPORT_SYMBOL(locks_copy_lock);
271 
272 static inline int flock_translate_cmd(int cmd) {
273 	if (cmd & LOCK_MAND)
274 		return cmd & (LOCK_MAND | LOCK_RW);
275 	switch (cmd) {
276 	case LOCK_SH:
277 		return F_RDLCK;
278 	case LOCK_EX:
279 		return F_WRLCK;
280 	case LOCK_UN:
281 		return F_UNLCK;
282 	}
283 	return -EINVAL;
284 }
285 
286 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
287 static int flock_make_lock(struct file *filp, struct file_lock **lock,
288 		unsigned int cmd)
289 {
290 	struct file_lock *fl;
291 	int type = flock_translate_cmd(cmd);
292 	if (type < 0)
293 		return type;
294 
295 	fl = locks_alloc_lock();
296 	if (fl == NULL)
297 		return -ENOMEM;
298 
299 	fl->fl_file = filp;
300 	fl->fl_pid = current->tgid;
301 	fl->fl_flags = FL_FLOCK;
302 	fl->fl_type = type;
303 	fl->fl_end = OFFSET_MAX;
304 
305 	*lock = fl;
306 	return 0;
307 }
308 
309 static int assign_type(struct file_lock *fl, int type)
310 {
311 	switch (type) {
312 	case F_RDLCK:
313 	case F_WRLCK:
314 	case F_UNLCK:
315 		fl->fl_type = type;
316 		break;
317 	default:
318 		return -EINVAL;
319 	}
320 	return 0;
321 }
322 
323 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
324  * style lock.
325  */
326 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
327 			       struct flock *l)
328 {
329 	off_t start, end;
330 
331 	switch (l->l_whence) {
332 	case SEEK_SET:
333 		start = 0;
334 		break;
335 	case SEEK_CUR:
336 		start = filp->f_pos;
337 		break;
338 	case SEEK_END:
339 		start = i_size_read(filp->f_path.dentry->d_inode);
340 		break;
341 	default:
342 		return -EINVAL;
343 	}
344 
345 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
346 	   POSIX-2001 defines it. */
347 	start += l->l_start;
348 	if (start < 0)
349 		return -EINVAL;
350 	fl->fl_end = OFFSET_MAX;
351 	if (l->l_len > 0) {
352 		end = start + l->l_len - 1;
353 		fl->fl_end = end;
354 	} else if (l->l_len < 0) {
355 		end = start - 1;
356 		fl->fl_end = end;
357 		start += l->l_len;
358 		if (start < 0)
359 			return -EINVAL;
360 	}
361 	fl->fl_start = start;	/* we record the absolute position */
362 	if (fl->fl_end < fl->fl_start)
363 		return -EOVERFLOW;
364 
365 	fl->fl_owner = current->files;
366 	fl->fl_pid = current->tgid;
367 	fl->fl_file = filp;
368 	fl->fl_flags = FL_POSIX;
369 	fl->fl_ops = NULL;
370 	fl->fl_lmops = NULL;
371 
372 	return assign_type(fl, l->l_type);
373 }
374 
375 #if BITS_PER_LONG == 32
376 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
377 				 struct flock64 *l)
378 {
379 	loff_t start;
380 
381 	switch (l->l_whence) {
382 	case SEEK_SET:
383 		start = 0;
384 		break;
385 	case SEEK_CUR:
386 		start = filp->f_pos;
387 		break;
388 	case SEEK_END:
389 		start = i_size_read(filp->f_path.dentry->d_inode);
390 		break;
391 	default:
392 		return -EINVAL;
393 	}
394 
395 	start += l->l_start;
396 	if (start < 0)
397 		return -EINVAL;
398 	fl->fl_end = OFFSET_MAX;
399 	if (l->l_len > 0) {
400 		fl->fl_end = start + l->l_len - 1;
401 	} else if (l->l_len < 0) {
402 		fl->fl_end = start - 1;
403 		start += l->l_len;
404 		if (start < 0)
405 			return -EINVAL;
406 	}
407 	fl->fl_start = start;	/* we record the absolute position */
408 	if (fl->fl_end < fl->fl_start)
409 		return -EOVERFLOW;
410 
411 	fl->fl_owner = current->files;
412 	fl->fl_pid = current->tgid;
413 	fl->fl_file = filp;
414 	fl->fl_flags = FL_POSIX;
415 	fl->fl_ops = NULL;
416 	fl->fl_lmops = NULL;
417 
418 	switch (l->l_type) {
419 	case F_RDLCK:
420 	case F_WRLCK:
421 	case F_UNLCK:
422 		fl->fl_type = l->l_type;
423 		break;
424 	default:
425 		return -EINVAL;
426 	}
427 
428 	return (0);
429 }
430 #endif
431 
432 /* default lease lock manager operations */
433 static void lease_break_callback(struct file_lock *fl)
434 {
435 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
436 }
437 
438 static void lease_release_private_callback(struct file_lock *fl)
439 {
440 	if (!fl->fl_file)
441 		return;
442 
443 	f_delown(fl->fl_file);
444 	fl->fl_file->f_owner.signum = 0;
445 }
446 
447 static const struct lock_manager_operations lease_manager_ops = {
448 	.fl_break = lease_break_callback,
449 	.fl_release_private = lease_release_private_callback,
450 	.fl_change = lease_modify,
451 };
452 
453 /*
454  * Initialize a lease, use the default lock manager operations
455  */
456 static int lease_init(struct file *filp, int type, struct file_lock *fl)
457  {
458 	if (assign_type(fl, type) != 0)
459 		return -EINVAL;
460 
461 	fl->fl_owner = current->files;
462 	fl->fl_pid = current->tgid;
463 
464 	fl->fl_file = filp;
465 	fl->fl_flags = FL_LEASE;
466 	fl->fl_start = 0;
467 	fl->fl_end = OFFSET_MAX;
468 	fl->fl_ops = NULL;
469 	fl->fl_lmops = &lease_manager_ops;
470 	return 0;
471 }
472 
473 /* Allocate a file_lock initialised to this type of lease */
474 static struct file_lock *lease_alloc(struct file *filp, int type)
475 {
476 	struct file_lock *fl = locks_alloc_lock();
477 	int error = -ENOMEM;
478 
479 	if (fl == NULL)
480 		return ERR_PTR(error);
481 
482 	error = lease_init(filp, type, fl);
483 	if (error) {
484 		locks_free_lock(fl);
485 		return ERR_PTR(error);
486 	}
487 	return fl;
488 }
489 
490 /* Check if two locks overlap each other.
491  */
492 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
493 {
494 	return ((fl1->fl_end >= fl2->fl_start) &&
495 		(fl2->fl_end >= fl1->fl_start));
496 }
497 
498 /*
499  * Check whether two locks have the same owner.
500  */
501 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
502 {
503 	if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
504 		return fl2->fl_lmops == fl1->fl_lmops &&
505 			fl1->fl_lmops->fl_compare_owner(fl1, fl2);
506 	return fl1->fl_owner == fl2->fl_owner;
507 }
508 
509 /* Remove waiter from blocker's block list.
510  * When blocker ends up pointing to itself then the list is empty.
511  */
512 static void __locks_delete_block(struct file_lock *waiter)
513 {
514 	list_del_init(&waiter->fl_block);
515 	list_del_init(&waiter->fl_link);
516 	waiter->fl_next = NULL;
517 }
518 
519 /*
520  */
521 static void locks_delete_block(struct file_lock *waiter)
522 {
523 	lock_flocks();
524 	__locks_delete_block(waiter);
525 	unlock_flocks();
526 }
527 
528 /* Insert waiter into blocker's block list.
529  * We use a circular list so that processes can be easily woken up in
530  * the order they blocked. The documentation doesn't require this but
531  * it seems like the reasonable thing to do.
532  */
533 static void locks_insert_block(struct file_lock *blocker,
534 			       struct file_lock *waiter)
535 {
536 	BUG_ON(!list_empty(&waiter->fl_block));
537 	list_add_tail(&waiter->fl_block, &blocker->fl_block);
538 	waiter->fl_next = blocker;
539 	if (IS_POSIX(blocker))
540 		list_add(&waiter->fl_link, &blocked_list);
541 }
542 
543 /* Wake up processes blocked waiting for blocker.
544  * If told to wait then schedule the processes until the block list
545  * is empty, otherwise empty the block list ourselves.
546  */
547 static void locks_wake_up_blocks(struct file_lock *blocker)
548 {
549 	while (!list_empty(&blocker->fl_block)) {
550 		struct file_lock *waiter;
551 
552 		waiter = list_first_entry(&blocker->fl_block,
553 				struct file_lock, fl_block);
554 		__locks_delete_block(waiter);
555 		if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
556 			waiter->fl_lmops->fl_notify(waiter);
557 		else
558 			wake_up(&waiter->fl_wait);
559 	}
560 }
561 
562 /* Insert file lock fl into an inode's lock list at the position indicated
563  * by pos. At the same time add the lock to the global file lock list.
564  */
565 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
566 {
567 	list_add(&fl->fl_link, &file_lock_list);
568 
569 	fl->fl_nspid = get_pid(task_tgid(current));
570 
571 	/* insert into file's list */
572 	fl->fl_next = *pos;
573 	*pos = fl;
574 }
575 
576 /*
577  * Delete a lock and then free it.
578  * Wake up processes that are blocked waiting for this lock,
579  * notify the FS that the lock has been cleared and
580  * finally free the lock.
581  */
582 static void locks_delete_lock(struct file_lock **thisfl_p)
583 {
584 	struct file_lock *fl = *thisfl_p;
585 
586 	*thisfl_p = fl->fl_next;
587 	fl->fl_next = NULL;
588 	list_del_init(&fl->fl_link);
589 
590 	fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
591 	if (fl->fl_fasync != NULL) {
592 		printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
593 		fl->fl_fasync = NULL;
594 	}
595 
596 	if (fl->fl_nspid) {
597 		put_pid(fl->fl_nspid);
598 		fl->fl_nspid = NULL;
599 	}
600 
601 	locks_wake_up_blocks(fl);
602 	locks_free_lock(fl);
603 }
604 
605 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
606  * checks for shared/exclusive status of overlapping locks.
607  */
608 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
609 {
610 	if (sys_fl->fl_type == F_WRLCK)
611 		return 1;
612 	if (caller_fl->fl_type == F_WRLCK)
613 		return 1;
614 	return 0;
615 }
616 
617 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
618  * checking before calling the locks_conflict().
619  */
620 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
621 {
622 	/* POSIX locks owned by the same process do not conflict with
623 	 * each other.
624 	 */
625 	if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
626 		return (0);
627 
628 	/* Check whether they overlap */
629 	if (!locks_overlap(caller_fl, sys_fl))
630 		return 0;
631 
632 	return (locks_conflict(caller_fl, sys_fl));
633 }
634 
635 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
636  * checking before calling the locks_conflict().
637  */
638 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
639 {
640 	/* FLOCK locks referring to the same filp do not conflict with
641 	 * each other.
642 	 */
643 	if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
644 		return (0);
645 	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
646 		return 0;
647 
648 	return (locks_conflict(caller_fl, sys_fl));
649 }
650 
651 void
652 posix_test_lock(struct file *filp, struct file_lock *fl)
653 {
654 	struct file_lock *cfl;
655 
656 	lock_flocks();
657 	for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
658 		if (!IS_POSIX(cfl))
659 			continue;
660 		if (posix_locks_conflict(fl, cfl))
661 			break;
662 	}
663 	if (cfl) {
664 		__locks_copy_lock(fl, cfl);
665 		if (cfl->fl_nspid)
666 			fl->fl_pid = pid_vnr(cfl->fl_nspid);
667 	} else
668 		fl->fl_type = F_UNLCK;
669 	unlock_flocks();
670 	return;
671 }
672 EXPORT_SYMBOL(posix_test_lock);
673 
674 /*
675  * Deadlock detection:
676  *
677  * We attempt to detect deadlocks that are due purely to posix file
678  * locks.
679  *
680  * We assume that a task can be waiting for at most one lock at a time.
681  * So for any acquired lock, the process holding that lock may be
682  * waiting on at most one other lock.  That lock in turns may be held by
683  * someone waiting for at most one other lock.  Given a requested lock
684  * caller_fl which is about to wait for a conflicting lock block_fl, we
685  * follow this chain of waiters to ensure we are not about to create a
686  * cycle.
687  *
688  * Since we do this before we ever put a process to sleep on a lock, we
689  * are ensured that there is never a cycle; that is what guarantees that
690  * the while() loop in posix_locks_deadlock() eventually completes.
691  *
692  * Note: the above assumption may not be true when handling lock
693  * requests from a broken NFS client. It may also fail in the presence
694  * of tasks (such as posix threads) sharing the same open file table.
695  *
696  * To handle those cases, we just bail out after a few iterations.
697  */
698 
699 #define MAX_DEADLK_ITERATIONS 10
700 
701 /* Find a lock that the owner of the given block_fl is blocking on. */
702 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
703 {
704 	struct file_lock *fl;
705 
706 	list_for_each_entry(fl, &blocked_list, fl_link) {
707 		if (posix_same_owner(fl, block_fl))
708 			return fl->fl_next;
709 	}
710 	return NULL;
711 }
712 
713 static int posix_locks_deadlock(struct file_lock *caller_fl,
714 				struct file_lock *block_fl)
715 {
716 	int i = 0;
717 
718 	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
719 		if (i++ > MAX_DEADLK_ITERATIONS)
720 			return 0;
721 		if (posix_same_owner(caller_fl, block_fl))
722 			return 1;
723 	}
724 	return 0;
725 }
726 
727 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
728  * after any leases, but before any posix locks.
729  *
730  * Note that if called with an FL_EXISTS argument, the caller may determine
731  * whether or not a lock was successfully freed by testing the return
732  * value for -ENOENT.
733  */
734 static int flock_lock_file(struct file *filp, struct file_lock *request)
735 {
736 	struct file_lock *new_fl = NULL;
737 	struct file_lock **before;
738 	struct inode * inode = filp->f_path.dentry->d_inode;
739 	int error = 0;
740 	int found = 0;
741 
742 	if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
743 		new_fl = locks_alloc_lock();
744 		if (!new_fl)
745 			return -ENOMEM;
746 	}
747 
748 	lock_flocks();
749 	if (request->fl_flags & FL_ACCESS)
750 		goto find_conflict;
751 
752 	for_each_lock(inode, before) {
753 		struct file_lock *fl = *before;
754 		if (IS_POSIX(fl))
755 			break;
756 		if (IS_LEASE(fl))
757 			continue;
758 		if (filp != fl->fl_file)
759 			continue;
760 		if (request->fl_type == fl->fl_type)
761 			goto out;
762 		found = 1;
763 		locks_delete_lock(before);
764 		break;
765 	}
766 
767 	if (request->fl_type == F_UNLCK) {
768 		if ((request->fl_flags & FL_EXISTS) && !found)
769 			error = -ENOENT;
770 		goto out;
771 	}
772 
773 	/*
774 	 * If a higher-priority process was blocked on the old file lock,
775 	 * give it the opportunity to lock the file.
776 	 */
777 	if (found) {
778 		unlock_flocks();
779 		cond_resched();
780 		lock_flocks();
781 	}
782 
783 find_conflict:
784 	for_each_lock(inode, before) {
785 		struct file_lock *fl = *before;
786 		if (IS_POSIX(fl))
787 			break;
788 		if (IS_LEASE(fl))
789 			continue;
790 		if (!flock_locks_conflict(request, fl))
791 			continue;
792 		error = -EAGAIN;
793 		if (!(request->fl_flags & FL_SLEEP))
794 			goto out;
795 		error = FILE_LOCK_DEFERRED;
796 		locks_insert_block(fl, request);
797 		goto out;
798 	}
799 	if (request->fl_flags & FL_ACCESS)
800 		goto out;
801 	locks_copy_lock(new_fl, request);
802 	locks_insert_lock(before, new_fl);
803 	new_fl = NULL;
804 	error = 0;
805 
806 out:
807 	unlock_flocks();
808 	if (new_fl)
809 		locks_free_lock(new_fl);
810 	return error;
811 }
812 
813 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
814 {
815 	struct file_lock *fl;
816 	struct file_lock *new_fl = NULL;
817 	struct file_lock *new_fl2 = NULL;
818 	struct file_lock *left = NULL;
819 	struct file_lock *right = NULL;
820 	struct file_lock **before;
821 	int error, added = 0;
822 
823 	/*
824 	 * We may need two file_lock structures for this operation,
825 	 * so we get them in advance to avoid races.
826 	 *
827 	 * In some cases we can be sure, that no new locks will be needed
828 	 */
829 	if (!(request->fl_flags & FL_ACCESS) &&
830 	    (request->fl_type != F_UNLCK ||
831 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
832 		new_fl = locks_alloc_lock();
833 		new_fl2 = locks_alloc_lock();
834 	}
835 
836 	lock_flocks();
837 	if (request->fl_type != F_UNLCK) {
838 		for_each_lock(inode, before) {
839 			fl = *before;
840 			if (!IS_POSIX(fl))
841 				continue;
842 			if (!posix_locks_conflict(request, fl))
843 				continue;
844 			if (conflock)
845 				__locks_copy_lock(conflock, fl);
846 			error = -EAGAIN;
847 			if (!(request->fl_flags & FL_SLEEP))
848 				goto out;
849 			error = -EDEADLK;
850 			if (posix_locks_deadlock(request, fl))
851 				goto out;
852 			error = FILE_LOCK_DEFERRED;
853 			locks_insert_block(fl, request);
854 			goto out;
855   		}
856   	}
857 
858 	/* If we're just looking for a conflict, we're done. */
859 	error = 0;
860 	if (request->fl_flags & FL_ACCESS)
861 		goto out;
862 
863 	/*
864 	 * Find the first old lock with the same owner as the new lock.
865 	 */
866 
867 	before = &inode->i_flock;
868 
869 	/* First skip locks owned by other processes.  */
870 	while ((fl = *before) && (!IS_POSIX(fl) ||
871 				  !posix_same_owner(request, fl))) {
872 		before = &fl->fl_next;
873 	}
874 
875 	/* Process locks with this owner.  */
876 	while ((fl = *before) && posix_same_owner(request, fl)) {
877 		/* Detect adjacent or overlapping regions (if same lock type)
878 		 */
879 		if (request->fl_type == fl->fl_type) {
880 			/* In all comparisons of start vs end, use
881 			 * "start - 1" rather than "end + 1". If end
882 			 * is OFFSET_MAX, end + 1 will become negative.
883 			 */
884 			if (fl->fl_end < request->fl_start - 1)
885 				goto next_lock;
886 			/* If the next lock in the list has entirely bigger
887 			 * addresses than the new one, insert the lock here.
888 			 */
889 			if (fl->fl_start - 1 > request->fl_end)
890 				break;
891 
892 			/* If we come here, the new and old lock are of the
893 			 * same type and adjacent or overlapping. Make one
894 			 * lock yielding from the lower start address of both
895 			 * locks to the higher end address.
896 			 */
897 			if (fl->fl_start > request->fl_start)
898 				fl->fl_start = request->fl_start;
899 			else
900 				request->fl_start = fl->fl_start;
901 			if (fl->fl_end < request->fl_end)
902 				fl->fl_end = request->fl_end;
903 			else
904 				request->fl_end = fl->fl_end;
905 			if (added) {
906 				locks_delete_lock(before);
907 				continue;
908 			}
909 			request = fl;
910 			added = 1;
911 		}
912 		else {
913 			/* Processing for different lock types is a bit
914 			 * more complex.
915 			 */
916 			if (fl->fl_end < request->fl_start)
917 				goto next_lock;
918 			if (fl->fl_start > request->fl_end)
919 				break;
920 			if (request->fl_type == F_UNLCK)
921 				added = 1;
922 			if (fl->fl_start < request->fl_start)
923 				left = fl;
924 			/* If the next lock in the list has a higher end
925 			 * address than the new one, insert the new one here.
926 			 */
927 			if (fl->fl_end > request->fl_end) {
928 				right = fl;
929 				break;
930 			}
931 			if (fl->fl_start >= request->fl_start) {
932 				/* The new lock completely replaces an old
933 				 * one (This may happen several times).
934 				 */
935 				if (added) {
936 					locks_delete_lock(before);
937 					continue;
938 				}
939 				/* Replace the old lock with the new one.
940 				 * Wake up anybody waiting for the old one,
941 				 * as the change in lock type might satisfy
942 				 * their needs.
943 				 */
944 				locks_wake_up_blocks(fl);
945 				fl->fl_start = request->fl_start;
946 				fl->fl_end = request->fl_end;
947 				fl->fl_type = request->fl_type;
948 				locks_release_private(fl);
949 				locks_copy_private(fl, request);
950 				request = fl;
951 				added = 1;
952 			}
953 		}
954 		/* Go on to next lock.
955 		 */
956 	next_lock:
957 		before = &fl->fl_next;
958 	}
959 
960 	/*
961 	 * The above code only modifies existing locks in case of
962 	 * merging or replacing.  If new lock(s) need to be inserted
963 	 * all modifications are done bellow this, so it's safe yet to
964 	 * bail out.
965 	 */
966 	error = -ENOLCK; /* "no luck" */
967 	if (right && left == right && !new_fl2)
968 		goto out;
969 
970 	error = 0;
971 	if (!added) {
972 		if (request->fl_type == F_UNLCK) {
973 			if (request->fl_flags & FL_EXISTS)
974 				error = -ENOENT;
975 			goto out;
976 		}
977 
978 		if (!new_fl) {
979 			error = -ENOLCK;
980 			goto out;
981 		}
982 		locks_copy_lock(new_fl, request);
983 		locks_insert_lock(before, new_fl);
984 		new_fl = NULL;
985 	}
986 	if (right) {
987 		if (left == right) {
988 			/* The new lock breaks the old one in two pieces,
989 			 * so we have to use the second new lock.
990 			 */
991 			left = new_fl2;
992 			new_fl2 = NULL;
993 			locks_copy_lock(left, right);
994 			locks_insert_lock(before, left);
995 		}
996 		right->fl_start = request->fl_end + 1;
997 		locks_wake_up_blocks(right);
998 	}
999 	if (left) {
1000 		left->fl_end = request->fl_start - 1;
1001 		locks_wake_up_blocks(left);
1002 	}
1003  out:
1004 	unlock_flocks();
1005 	/*
1006 	 * Free any unused locks.
1007 	 */
1008 	if (new_fl)
1009 		locks_free_lock(new_fl);
1010 	if (new_fl2)
1011 		locks_free_lock(new_fl2);
1012 	return error;
1013 }
1014 
1015 /**
1016  * posix_lock_file - Apply a POSIX-style lock to a file
1017  * @filp: The file to apply the lock to
1018  * @fl: The lock to be applied
1019  * @conflock: Place to return a copy of the conflicting lock, if found.
1020  *
1021  * Add a POSIX style lock to a file.
1022  * We merge adjacent & overlapping locks whenever possible.
1023  * POSIX locks are sorted by owner task, then by starting address
1024  *
1025  * Note that if called with an FL_EXISTS argument, the caller may determine
1026  * whether or not a lock was successfully freed by testing the return
1027  * value for -ENOENT.
1028  */
1029 int posix_lock_file(struct file *filp, struct file_lock *fl,
1030 			struct file_lock *conflock)
1031 {
1032 	return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1033 }
1034 EXPORT_SYMBOL(posix_lock_file);
1035 
1036 /**
1037  * posix_lock_file_wait - Apply a POSIX-style lock to a file
1038  * @filp: The file to apply the lock to
1039  * @fl: The lock to be applied
1040  *
1041  * Add a POSIX style lock to a file.
1042  * We merge adjacent & overlapping locks whenever possible.
1043  * POSIX locks are sorted by owner task, then by starting address
1044  */
1045 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1046 {
1047 	int error;
1048 	might_sleep ();
1049 	for (;;) {
1050 		error = posix_lock_file(filp, fl, NULL);
1051 		if (error != FILE_LOCK_DEFERRED)
1052 			break;
1053 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1054 		if (!error)
1055 			continue;
1056 
1057 		locks_delete_block(fl);
1058 		break;
1059 	}
1060 	return error;
1061 }
1062 EXPORT_SYMBOL(posix_lock_file_wait);
1063 
1064 /**
1065  * locks_mandatory_locked - Check for an active lock
1066  * @inode: the file to check
1067  *
1068  * Searches the inode's list of locks to find any POSIX locks which conflict.
1069  * This function is called from locks_verify_locked() only.
1070  */
1071 int locks_mandatory_locked(struct inode *inode)
1072 {
1073 	fl_owner_t owner = current->files;
1074 	struct file_lock *fl;
1075 
1076 	/*
1077 	 * Search the lock list for this inode for any POSIX locks.
1078 	 */
1079 	lock_flocks();
1080 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1081 		if (!IS_POSIX(fl))
1082 			continue;
1083 		if (fl->fl_owner != owner)
1084 			break;
1085 	}
1086 	unlock_flocks();
1087 	return fl ? -EAGAIN : 0;
1088 }
1089 
1090 /**
1091  * locks_mandatory_area - Check for a conflicting lock
1092  * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1093  *		for shared
1094  * @inode:      the file to check
1095  * @filp:       how the file was opened (if it was)
1096  * @offset:     start of area to check
1097  * @count:      length of area to check
1098  *
1099  * Searches the inode's list of locks to find any POSIX locks which conflict.
1100  * This function is called from rw_verify_area() and
1101  * locks_verify_truncate().
1102  */
1103 int locks_mandatory_area(int read_write, struct inode *inode,
1104 			 struct file *filp, loff_t offset,
1105 			 size_t count)
1106 {
1107 	struct file_lock fl;
1108 	int error;
1109 
1110 	locks_init_lock(&fl);
1111 	fl.fl_owner = current->files;
1112 	fl.fl_pid = current->tgid;
1113 	fl.fl_file = filp;
1114 	fl.fl_flags = FL_POSIX | FL_ACCESS;
1115 	if (filp && !(filp->f_flags & O_NONBLOCK))
1116 		fl.fl_flags |= FL_SLEEP;
1117 	fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1118 	fl.fl_start = offset;
1119 	fl.fl_end = offset + count - 1;
1120 
1121 	for (;;) {
1122 		error = __posix_lock_file(inode, &fl, NULL);
1123 		if (error != FILE_LOCK_DEFERRED)
1124 			break;
1125 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1126 		if (!error) {
1127 			/*
1128 			 * If we've been sleeping someone might have
1129 			 * changed the permissions behind our back.
1130 			 */
1131 			if (__mandatory_lock(inode))
1132 				continue;
1133 		}
1134 
1135 		locks_delete_block(&fl);
1136 		break;
1137 	}
1138 
1139 	return error;
1140 }
1141 
1142 EXPORT_SYMBOL(locks_mandatory_area);
1143 
1144 /* We already had a lease on this file; just change its type */
1145 int lease_modify(struct file_lock **before, int arg)
1146 {
1147 	struct file_lock *fl = *before;
1148 	int error = assign_type(fl, arg);
1149 
1150 	if (error)
1151 		return error;
1152 	locks_wake_up_blocks(fl);
1153 	if (arg == F_UNLCK)
1154 		locks_delete_lock(before);
1155 	return 0;
1156 }
1157 
1158 EXPORT_SYMBOL(lease_modify);
1159 
1160 static void time_out_leases(struct inode *inode)
1161 {
1162 	struct file_lock **before;
1163 	struct file_lock *fl;
1164 
1165 	before = &inode->i_flock;
1166 	while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1167 		if ((fl->fl_break_time == 0)
1168 				|| time_before(jiffies, fl->fl_break_time)) {
1169 			before = &fl->fl_next;
1170 			continue;
1171 		}
1172 		lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1173 		if (fl == *before)	/* lease_modify may have freed fl */
1174 			before = &fl->fl_next;
1175 	}
1176 }
1177 
1178 /**
1179  *	__break_lease	-	revoke all outstanding leases on file
1180  *	@inode: the inode of the file to return
1181  *	@mode: the open mode (read or write)
1182  *
1183  *	break_lease (inlined for speed) has checked there already is at least
1184  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1185  *	a call to open() or truncate().  This function can sleep unless you
1186  *	specified %O_NONBLOCK to your open().
1187  */
1188 int __break_lease(struct inode *inode, unsigned int mode)
1189 {
1190 	int error = 0, future;
1191 	struct file_lock *new_fl, *flock;
1192 	struct file_lock *fl;
1193 	unsigned long break_time;
1194 	int i_have_this_lease = 0;
1195 	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1196 
1197 	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1198 
1199 	lock_flocks();
1200 
1201 	time_out_leases(inode);
1202 
1203 	flock = inode->i_flock;
1204 	if ((flock == NULL) || !IS_LEASE(flock))
1205 		goto out;
1206 
1207 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1208 		if (fl->fl_owner == current->files)
1209 			i_have_this_lease = 1;
1210 
1211 	if (want_write) {
1212 		/* If we want write access, we have to revoke any lease. */
1213 		future = F_UNLCK | F_INPROGRESS;
1214 	} else if (flock->fl_type & F_INPROGRESS) {
1215 		/* If the lease is already being broken, we just leave it */
1216 		future = flock->fl_type;
1217 	} else if (flock->fl_type & F_WRLCK) {
1218 		/* Downgrade the exclusive lease to a read-only lease. */
1219 		future = F_RDLCK | F_INPROGRESS;
1220 	} else {
1221 		/* the existing lease was read-only, so we can read too. */
1222 		goto out;
1223 	}
1224 
1225 	if (IS_ERR(new_fl) && !i_have_this_lease
1226 			&& ((mode & O_NONBLOCK) == 0)) {
1227 		error = PTR_ERR(new_fl);
1228 		goto out;
1229 	}
1230 
1231 	break_time = 0;
1232 	if (lease_break_time > 0) {
1233 		break_time = jiffies + lease_break_time * HZ;
1234 		if (break_time == 0)
1235 			break_time++;	/* so that 0 means no break time */
1236 	}
1237 
1238 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1239 		if (fl->fl_type != future) {
1240 			fl->fl_type = future;
1241 			fl->fl_break_time = break_time;
1242 			/* lease must have lmops break callback */
1243 			fl->fl_lmops->fl_break(fl);
1244 		}
1245 	}
1246 
1247 	if (i_have_this_lease || (mode & O_NONBLOCK)) {
1248 		error = -EWOULDBLOCK;
1249 		goto out;
1250 	}
1251 
1252 restart:
1253 	break_time = flock->fl_break_time;
1254 	if (break_time != 0) {
1255 		break_time -= jiffies;
1256 		if (break_time == 0)
1257 			break_time++;
1258 	}
1259 	locks_insert_block(flock, new_fl);
1260 	unlock_flocks();
1261 	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1262 						!new_fl->fl_next, break_time);
1263 	lock_flocks();
1264 	__locks_delete_block(new_fl);
1265 	if (error >= 0) {
1266 		if (error == 0)
1267 			time_out_leases(inode);
1268 		/* Wait for the next lease that has not been broken yet */
1269 		for (flock = inode->i_flock; flock && IS_LEASE(flock);
1270 				flock = flock->fl_next) {
1271 			if (flock->fl_type & F_INPROGRESS)
1272 				goto restart;
1273 		}
1274 		error = 0;
1275 	}
1276 
1277 out:
1278 	unlock_flocks();
1279 	if (!IS_ERR(new_fl))
1280 		locks_free_lock(new_fl);
1281 	return error;
1282 }
1283 
1284 EXPORT_SYMBOL(__break_lease);
1285 
1286 /**
1287  *	lease_get_mtime - get the last modified time of an inode
1288  *	@inode: the inode
1289  *      @time:  pointer to a timespec which will contain the last modified time
1290  *
1291  * This is to force NFS clients to flush their caches for files with
1292  * exclusive leases.  The justification is that if someone has an
1293  * exclusive lease, then they could be modifying it.
1294  */
1295 void lease_get_mtime(struct inode *inode, struct timespec *time)
1296 {
1297 	struct file_lock *flock = inode->i_flock;
1298 	if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1299 		*time = current_fs_time(inode->i_sb);
1300 	else
1301 		*time = inode->i_mtime;
1302 }
1303 
1304 EXPORT_SYMBOL(lease_get_mtime);
1305 
1306 /**
1307  *	fcntl_getlease - Enquire what lease is currently active
1308  *	@filp: the file
1309  *
1310  *	The value returned by this function will be one of
1311  *	(if no lease break is pending):
1312  *
1313  *	%F_RDLCK to indicate a shared lease is held.
1314  *
1315  *	%F_WRLCK to indicate an exclusive lease is held.
1316  *
1317  *	%F_UNLCK to indicate no lease is held.
1318  *
1319  *	(if a lease break is pending):
1320  *
1321  *	%F_RDLCK to indicate an exclusive lease needs to be
1322  *		changed to a shared lease (or removed).
1323  *
1324  *	%F_UNLCK to indicate the lease needs to be removed.
1325  *
1326  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1327  *	should be returned to userspace.
1328  */
1329 int fcntl_getlease(struct file *filp)
1330 {
1331 	struct file_lock *fl;
1332 	int type = F_UNLCK;
1333 
1334 	lock_flocks();
1335 	time_out_leases(filp->f_path.dentry->d_inode);
1336 	for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1337 			fl = fl->fl_next) {
1338 		if (fl->fl_file == filp) {
1339 			type = fl->fl_type & ~F_INPROGRESS;
1340 			break;
1341 		}
1342 	}
1343 	unlock_flocks();
1344 	return type;
1345 }
1346 
1347 /**
1348  *	generic_setlease	-	sets a lease on an open file
1349  *	@filp: file pointer
1350  *	@arg: type of lease to obtain
1351  *	@flp: input - file_lock to use, output - file_lock inserted
1352  *
1353  *	The (input) flp->fl_lmops->fl_break function is required
1354  *	by break_lease().
1355  *
1356  *	Called with file_lock_lock held.
1357  */
1358 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1359 {
1360 	struct file_lock *fl, **before, **my_before = NULL, *lease;
1361 	struct dentry *dentry = filp->f_path.dentry;
1362 	struct inode *inode = dentry->d_inode;
1363 	int error, rdlease_count = 0, wrlease_count = 0;
1364 
1365 	lease = *flp;
1366 
1367 	error = -EACCES;
1368 	if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1369 		goto out;
1370 	error = -EINVAL;
1371 	if (!S_ISREG(inode->i_mode))
1372 		goto out;
1373 	error = security_file_lock(filp, arg);
1374 	if (error)
1375 		goto out;
1376 
1377 	time_out_leases(inode);
1378 
1379 	BUG_ON(!(*flp)->fl_lmops->fl_break);
1380 
1381 	if (arg != F_UNLCK) {
1382 		error = -EAGAIN;
1383 		if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1384 			goto out;
1385 		if ((arg == F_WRLCK)
1386 		    && ((dentry->d_count > 1)
1387 			|| (atomic_read(&inode->i_count) > 1)))
1388 			goto out;
1389 	}
1390 
1391 	/*
1392 	 * At this point, we know that if there is an exclusive
1393 	 * lease on this file, then we hold it on this filp
1394 	 * (otherwise our open of this file would have blocked).
1395 	 * And if we are trying to acquire an exclusive lease,
1396 	 * then the file is not open by anyone (including us)
1397 	 * except for this filp.
1398 	 */
1399 	for (before = &inode->i_flock;
1400 			((fl = *before) != NULL) && IS_LEASE(fl);
1401 			before = &fl->fl_next) {
1402 		if (fl->fl_file == filp)
1403 			my_before = before;
1404 		else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1405 			/*
1406 			 * Someone is in the process of opening this
1407 			 * file for writing so we may not take an
1408 			 * exclusive lease on it.
1409 			 */
1410 			wrlease_count++;
1411 		else
1412 			rdlease_count++;
1413 	}
1414 
1415 	error = -EAGAIN;
1416 	if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1417 	    (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1418 		goto out;
1419 
1420 	if (my_before != NULL) {
1421 		error = lease->fl_lmops->fl_change(my_before, arg);
1422 		if (!error)
1423 			*flp = *my_before;
1424 		goto out;
1425 	}
1426 
1427 	if (arg == F_UNLCK)
1428 		goto out;
1429 
1430 	error = -EINVAL;
1431 	if (!leases_enable)
1432 		goto out;
1433 
1434 	locks_insert_lock(before, lease);
1435 	return 0;
1436 
1437 out:
1438 	return error;
1439 }
1440 EXPORT_SYMBOL(generic_setlease);
1441 
1442 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1443 {
1444 	if (filp->f_op && filp->f_op->setlease)
1445 		return filp->f_op->setlease(filp, arg, lease);
1446 	else
1447 		return generic_setlease(filp, arg, lease);
1448 }
1449 
1450 /**
1451  *	vfs_setlease        -       sets a lease on an open file
1452  *	@filp: file pointer
1453  *	@arg: type of lease to obtain
1454  *	@lease: file_lock to use
1455  *
1456  *	Call this to establish a lease on the file.
1457  *	The (*lease)->fl_lmops->fl_break operation must be set; if not,
1458  *	break_lease will oops!
1459  *
1460  *	This will call the filesystem's setlease file method, if
1461  *	defined.  Note that there is no getlease method; instead, the
1462  *	filesystem setlease method should call back to setlease() to
1463  *	add a lease to the inode's lease list, where fcntl_getlease() can
1464  *	find it.  Since fcntl_getlease() only reports whether the current
1465  *	task holds a lease, a cluster filesystem need only do this for
1466  *	leases held by processes on this node.
1467  *
1468  *	There is also no break_lease method; filesystems that
1469  *	handle their own leases should break leases themselves from the
1470  *	filesystem's open, create, and (on truncate) setattr methods.
1471  *
1472  *	Warning: the only current setlease methods exist only to disable
1473  *	leases in certain cases.  More vfs changes may be required to
1474  *	allow a full filesystem lease implementation.
1475  */
1476 
1477 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1478 {
1479 	int error;
1480 
1481 	lock_flocks();
1482 	error = __vfs_setlease(filp, arg, lease);
1483 	unlock_flocks();
1484 
1485 	return error;
1486 }
1487 EXPORT_SYMBOL_GPL(vfs_setlease);
1488 
1489 static int do_fcntl_delete_lease(struct file *filp)
1490 {
1491 	struct file_lock fl, *flp = &fl;
1492 
1493 	lease_init(filp, F_UNLCK, flp);
1494 
1495 	return vfs_setlease(filp, F_UNLCK, &flp);
1496 }
1497 
1498 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1499 {
1500 	struct file_lock *fl, *ret;
1501 	struct fasync_struct *new;
1502 	int error;
1503 
1504 	fl = lease_alloc(filp, arg);
1505 	if (IS_ERR(fl))
1506 		return PTR_ERR(fl);
1507 
1508 	new = fasync_alloc();
1509 	if (!new) {
1510 		locks_free_lock(fl);
1511 		return -ENOMEM;
1512 	}
1513 	ret = fl;
1514 	lock_flocks();
1515 	error = __vfs_setlease(filp, arg, &ret);
1516 	if (error) {
1517 		unlock_flocks();
1518 		locks_free_lock(fl);
1519 		goto out_free_fasync;
1520 	}
1521 	if (ret != fl)
1522 		locks_free_lock(fl);
1523 
1524 	/*
1525 	 * fasync_insert_entry() returns the old entry if any.
1526 	 * If there was no old entry, then it used 'new' and
1527 	 * inserted it into the fasync list. Clear new so that
1528 	 * we don't release it here.
1529 	 */
1530 	if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1531 		new = NULL;
1532 
1533 	error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1534 	unlock_flocks();
1535 
1536 out_free_fasync:
1537 	if (new)
1538 		fasync_free(new);
1539 	return error;
1540 }
1541 
1542 /**
1543  *	fcntl_setlease	-	sets a lease on an open file
1544  *	@fd: open file descriptor
1545  *	@filp: file pointer
1546  *	@arg: type of lease to obtain
1547  *
1548  *	Call this fcntl to establish a lease on the file.
1549  *	Note that you also need to call %F_SETSIG to
1550  *	receive a signal when the lease is broken.
1551  */
1552 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1553 {
1554 	if (arg == F_UNLCK)
1555 		return do_fcntl_delete_lease(filp);
1556 	return do_fcntl_add_lease(fd, filp, arg);
1557 }
1558 
1559 /**
1560  * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1561  * @filp: The file to apply the lock to
1562  * @fl: The lock to be applied
1563  *
1564  * Add a FLOCK style lock to a file.
1565  */
1566 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1567 {
1568 	int error;
1569 	might_sleep();
1570 	for (;;) {
1571 		error = flock_lock_file(filp, fl);
1572 		if (error != FILE_LOCK_DEFERRED)
1573 			break;
1574 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1575 		if (!error)
1576 			continue;
1577 
1578 		locks_delete_block(fl);
1579 		break;
1580 	}
1581 	return error;
1582 }
1583 
1584 EXPORT_SYMBOL(flock_lock_file_wait);
1585 
1586 /**
1587  *	sys_flock: - flock() system call.
1588  *	@fd: the file descriptor to lock.
1589  *	@cmd: the type of lock to apply.
1590  *
1591  *	Apply a %FL_FLOCK style lock to an open file descriptor.
1592  *	The @cmd can be one of
1593  *
1594  *	%LOCK_SH -- a shared lock.
1595  *
1596  *	%LOCK_EX -- an exclusive lock.
1597  *
1598  *	%LOCK_UN -- remove an existing lock.
1599  *
1600  *	%LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1601  *
1602  *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1603  *	processes read and write access respectively.
1604  */
1605 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1606 {
1607 	struct file *filp;
1608 	struct file_lock *lock;
1609 	int can_sleep, unlock;
1610 	int error;
1611 
1612 	error = -EBADF;
1613 	filp = fget(fd);
1614 	if (!filp)
1615 		goto out;
1616 
1617 	can_sleep = !(cmd & LOCK_NB);
1618 	cmd &= ~LOCK_NB;
1619 	unlock = (cmd == LOCK_UN);
1620 
1621 	if (!unlock && !(cmd & LOCK_MAND) &&
1622 	    !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1623 		goto out_putf;
1624 
1625 	error = flock_make_lock(filp, &lock, cmd);
1626 	if (error)
1627 		goto out_putf;
1628 	if (can_sleep)
1629 		lock->fl_flags |= FL_SLEEP;
1630 
1631 	error = security_file_lock(filp, lock->fl_type);
1632 	if (error)
1633 		goto out_free;
1634 
1635 	if (filp->f_op && filp->f_op->flock)
1636 		error = filp->f_op->flock(filp,
1637 					  (can_sleep) ? F_SETLKW : F_SETLK,
1638 					  lock);
1639 	else
1640 		error = flock_lock_file_wait(filp, lock);
1641 
1642  out_free:
1643 	locks_free_lock(lock);
1644 
1645  out_putf:
1646 	fput(filp);
1647  out:
1648 	return error;
1649 }
1650 
1651 /**
1652  * vfs_test_lock - test file byte range lock
1653  * @filp: The file to test lock for
1654  * @fl: The lock to test; also used to hold result
1655  *
1656  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
1657  * setting conf->fl_type to something other than F_UNLCK.
1658  */
1659 int vfs_test_lock(struct file *filp, struct file_lock *fl)
1660 {
1661 	if (filp->f_op && filp->f_op->lock)
1662 		return filp->f_op->lock(filp, F_GETLK, fl);
1663 	posix_test_lock(filp, fl);
1664 	return 0;
1665 }
1666 EXPORT_SYMBOL_GPL(vfs_test_lock);
1667 
1668 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1669 {
1670 	flock->l_pid = fl->fl_pid;
1671 #if BITS_PER_LONG == 32
1672 	/*
1673 	 * Make sure we can represent the posix lock via
1674 	 * legacy 32bit flock.
1675 	 */
1676 	if (fl->fl_start > OFFT_OFFSET_MAX)
1677 		return -EOVERFLOW;
1678 	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1679 		return -EOVERFLOW;
1680 #endif
1681 	flock->l_start = fl->fl_start;
1682 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1683 		fl->fl_end - fl->fl_start + 1;
1684 	flock->l_whence = 0;
1685 	flock->l_type = fl->fl_type;
1686 	return 0;
1687 }
1688 
1689 #if BITS_PER_LONG == 32
1690 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1691 {
1692 	flock->l_pid = fl->fl_pid;
1693 	flock->l_start = fl->fl_start;
1694 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1695 		fl->fl_end - fl->fl_start + 1;
1696 	flock->l_whence = 0;
1697 	flock->l_type = fl->fl_type;
1698 }
1699 #endif
1700 
1701 /* Report the first existing lock that would conflict with l.
1702  * This implements the F_GETLK command of fcntl().
1703  */
1704 int fcntl_getlk(struct file *filp, struct flock __user *l)
1705 {
1706 	struct file_lock file_lock;
1707 	struct flock flock;
1708 	int error;
1709 
1710 	error = -EFAULT;
1711 	if (copy_from_user(&flock, l, sizeof(flock)))
1712 		goto out;
1713 	error = -EINVAL;
1714 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1715 		goto out;
1716 
1717 	error = flock_to_posix_lock(filp, &file_lock, &flock);
1718 	if (error)
1719 		goto out;
1720 
1721 	error = vfs_test_lock(filp, &file_lock);
1722 	if (error)
1723 		goto out;
1724 
1725 	flock.l_type = file_lock.fl_type;
1726 	if (file_lock.fl_type != F_UNLCK) {
1727 		error = posix_lock_to_flock(&flock, &file_lock);
1728 		if (error)
1729 			goto out;
1730 	}
1731 	error = -EFAULT;
1732 	if (!copy_to_user(l, &flock, sizeof(flock)))
1733 		error = 0;
1734 out:
1735 	return error;
1736 }
1737 
1738 /**
1739  * vfs_lock_file - file byte range lock
1740  * @filp: The file to apply the lock to
1741  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1742  * @fl: The lock to be applied
1743  * @conf: Place to return a copy of the conflicting lock, if found.
1744  *
1745  * A caller that doesn't care about the conflicting lock may pass NULL
1746  * as the final argument.
1747  *
1748  * If the filesystem defines a private ->lock() method, then @conf will
1749  * be left unchanged; so a caller that cares should initialize it to
1750  * some acceptable default.
1751  *
1752  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1753  * locks, the ->lock() interface may return asynchronously, before the lock has
1754  * been granted or denied by the underlying filesystem, if (and only if)
1755  * fl_grant is set. Callers expecting ->lock() to return asynchronously
1756  * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1757  * the request is for a blocking lock. When ->lock() does return asynchronously,
1758  * it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock
1759  * request completes.
1760  * If the request is for non-blocking lock the file system should return
1761  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1762  * with the result. If the request timed out the callback routine will return a
1763  * nonzero return code and the file system should release the lock. The file
1764  * system is also responsible to keep a corresponding posix lock when it
1765  * grants a lock so the VFS can find out which locks are locally held and do
1766  * the correct lock cleanup when required.
1767  * The underlying filesystem must not drop the kernel lock or call
1768  * ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1769  * return code.
1770  */
1771 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1772 {
1773 	if (filp->f_op && filp->f_op->lock)
1774 		return filp->f_op->lock(filp, cmd, fl);
1775 	else
1776 		return posix_lock_file(filp, fl, conf);
1777 }
1778 EXPORT_SYMBOL_GPL(vfs_lock_file);
1779 
1780 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1781 			     struct file_lock *fl)
1782 {
1783 	int error;
1784 
1785 	error = security_file_lock(filp, fl->fl_type);
1786 	if (error)
1787 		return error;
1788 
1789 	for (;;) {
1790 		error = vfs_lock_file(filp, cmd, fl, NULL);
1791 		if (error != FILE_LOCK_DEFERRED)
1792 			break;
1793 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1794 		if (!error)
1795 			continue;
1796 
1797 		locks_delete_block(fl);
1798 		break;
1799 	}
1800 
1801 	return error;
1802 }
1803 
1804 /* Apply the lock described by l to an open file descriptor.
1805  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1806  */
1807 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1808 		struct flock __user *l)
1809 {
1810 	struct file_lock *file_lock = locks_alloc_lock();
1811 	struct flock flock;
1812 	struct inode *inode;
1813 	struct file *f;
1814 	int error;
1815 
1816 	if (file_lock == NULL)
1817 		return -ENOLCK;
1818 
1819 	/*
1820 	 * This might block, so we do it before checking the inode.
1821 	 */
1822 	error = -EFAULT;
1823 	if (copy_from_user(&flock, l, sizeof(flock)))
1824 		goto out;
1825 
1826 	inode = filp->f_path.dentry->d_inode;
1827 
1828 	/* Don't allow mandatory locks on files that may be memory mapped
1829 	 * and shared.
1830 	 */
1831 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1832 		error = -EAGAIN;
1833 		goto out;
1834 	}
1835 
1836 again:
1837 	error = flock_to_posix_lock(filp, file_lock, &flock);
1838 	if (error)
1839 		goto out;
1840 	if (cmd == F_SETLKW) {
1841 		file_lock->fl_flags |= FL_SLEEP;
1842 	}
1843 
1844 	error = -EBADF;
1845 	switch (flock.l_type) {
1846 	case F_RDLCK:
1847 		if (!(filp->f_mode & FMODE_READ))
1848 			goto out;
1849 		break;
1850 	case F_WRLCK:
1851 		if (!(filp->f_mode & FMODE_WRITE))
1852 			goto out;
1853 		break;
1854 	case F_UNLCK:
1855 		break;
1856 	default:
1857 		error = -EINVAL;
1858 		goto out;
1859 	}
1860 
1861 	error = do_lock_file_wait(filp, cmd, file_lock);
1862 
1863 	/*
1864 	 * Attempt to detect a close/fcntl race and recover by
1865 	 * releasing the lock that was just acquired.
1866 	 */
1867 	/*
1868 	 * we need that spin_lock here - it prevents reordering between
1869 	 * update of inode->i_flock and check for it done in close().
1870 	 * rcu_read_lock() wouldn't do.
1871 	 */
1872 	spin_lock(&current->files->file_lock);
1873 	f = fcheck(fd);
1874 	spin_unlock(&current->files->file_lock);
1875 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1876 		flock.l_type = F_UNLCK;
1877 		goto again;
1878 	}
1879 
1880 out:
1881 	locks_free_lock(file_lock);
1882 	return error;
1883 }
1884 
1885 #if BITS_PER_LONG == 32
1886 /* Report the first existing lock that would conflict with l.
1887  * This implements the F_GETLK command of fcntl().
1888  */
1889 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1890 {
1891 	struct file_lock file_lock;
1892 	struct flock64 flock;
1893 	int error;
1894 
1895 	error = -EFAULT;
1896 	if (copy_from_user(&flock, l, sizeof(flock)))
1897 		goto out;
1898 	error = -EINVAL;
1899 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1900 		goto out;
1901 
1902 	error = flock64_to_posix_lock(filp, &file_lock, &flock);
1903 	if (error)
1904 		goto out;
1905 
1906 	error = vfs_test_lock(filp, &file_lock);
1907 	if (error)
1908 		goto out;
1909 
1910 	flock.l_type = file_lock.fl_type;
1911 	if (file_lock.fl_type != F_UNLCK)
1912 		posix_lock_to_flock64(&flock, &file_lock);
1913 
1914 	error = -EFAULT;
1915 	if (!copy_to_user(l, &flock, sizeof(flock)))
1916 		error = 0;
1917 
1918 out:
1919 	return error;
1920 }
1921 
1922 /* Apply the lock described by l to an open file descriptor.
1923  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1924  */
1925 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1926 		struct flock64 __user *l)
1927 {
1928 	struct file_lock *file_lock = locks_alloc_lock();
1929 	struct flock64 flock;
1930 	struct inode *inode;
1931 	struct file *f;
1932 	int error;
1933 
1934 	if (file_lock == NULL)
1935 		return -ENOLCK;
1936 
1937 	/*
1938 	 * This might block, so we do it before checking the inode.
1939 	 */
1940 	error = -EFAULT;
1941 	if (copy_from_user(&flock, l, sizeof(flock)))
1942 		goto out;
1943 
1944 	inode = filp->f_path.dentry->d_inode;
1945 
1946 	/* Don't allow mandatory locks on files that may be memory mapped
1947 	 * and shared.
1948 	 */
1949 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1950 		error = -EAGAIN;
1951 		goto out;
1952 	}
1953 
1954 again:
1955 	error = flock64_to_posix_lock(filp, file_lock, &flock);
1956 	if (error)
1957 		goto out;
1958 	if (cmd == F_SETLKW64) {
1959 		file_lock->fl_flags |= FL_SLEEP;
1960 	}
1961 
1962 	error = -EBADF;
1963 	switch (flock.l_type) {
1964 	case F_RDLCK:
1965 		if (!(filp->f_mode & FMODE_READ))
1966 			goto out;
1967 		break;
1968 	case F_WRLCK:
1969 		if (!(filp->f_mode & FMODE_WRITE))
1970 			goto out;
1971 		break;
1972 	case F_UNLCK:
1973 		break;
1974 	default:
1975 		error = -EINVAL;
1976 		goto out;
1977 	}
1978 
1979 	error = do_lock_file_wait(filp, cmd, file_lock);
1980 
1981 	/*
1982 	 * Attempt to detect a close/fcntl race and recover by
1983 	 * releasing the lock that was just acquired.
1984 	 */
1985 	spin_lock(&current->files->file_lock);
1986 	f = fcheck(fd);
1987 	spin_unlock(&current->files->file_lock);
1988 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1989 		flock.l_type = F_UNLCK;
1990 		goto again;
1991 	}
1992 
1993 out:
1994 	locks_free_lock(file_lock);
1995 	return error;
1996 }
1997 #endif /* BITS_PER_LONG == 32 */
1998 
1999 /*
2000  * This function is called when the file is being removed
2001  * from the task's fd array.  POSIX locks belonging to this task
2002  * are deleted at this time.
2003  */
2004 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2005 {
2006 	struct file_lock lock;
2007 
2008 	/*
2009 	 * If there are no locks held on this file, we don't need to call
2010 	 * posix_lock_file().  Another process could be setting a lock on this
2011 	 * file at the same time, but we wouldn't remove that lock anyway.
2012 	 */
2013 	if (!filp->f_path.dentry->d_inode->i_flock)
2014 		return;
2015 
2016 	lock.fl_type = F_UNLCK;
2017 	lock.fl_flags = FL_POSIX | FL_CLOSE;
2018 	lock.fl_start = 0;
2019 	lock.fl_end = OFFSET_MAX;
2020 	lock.fl_owner = owner;
2021 	lock.fl_pid = current->tgid;
2022 	lock.fl_file = filp;
2023 	lock.fl_ops = NULL;
2024 	lock.fl_lmops = NULL;
2025 
2026 	vfs_lock_file(filp, F_SETLK, &lock, NULL);
2027 
2028 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2029 		lock.fl_ops->fl_release_private(&lock);
2030 }
2031 
2032 EXPORT_SYMBOL(locks_remove_posix);
2033 
2034 /*
2035  * This function is called on the last close of an open file.
2036  */
2037 void locks_remove_flock(struct file *filp)
2038 {
2039 	struct inode * inode = filp->f_path.dentry->d_inode;
2040 	struct file_lock *fl;
2041 	struct file_lock **before;
2042 
2043 	if (!inode->i_flock)
2044 		return;
2045 
2046 	if (filp->f_op && filp->f_op->flock) {
2047 		struct file_lock fl = {
2048 			.fl_pid = current->tgid,
2049 			.fl_file = filp,
2050 			.fl_flags = FL_FLOCK,
2051 			.fl_type = F_UNLCK,
2052 			.fl_end = OFFSET_MAX,
2053 		};
2054 		filp->f_op->flock(filp, F_SETLKW, &fl);
2055 		if (fl.fl_ops && fl.fl_ops->fl_release_private)
2056 			fl.fl_ops->fl_release_private(&fl);
2057 	}
2058 
2059 	lock_flocks();
2060 	before = &inode->i_flock;
2061 
2062 	while ((fl = *before) != NULL) {
2063 		if (fl->fl_file == filp) {
2064 			if (IS_FLOCK(fl)) {
2065 				locks_delete_lock(before);
2066 				continue;
2067 			}
2068 			if (IS_LEASE(fl)) {
2069 				lease_modify(before, F_UNLCK);
2070 				continue;
2071 			}
2072 			/* What? */
2073 			BUG();
2074  		}
2075 		before = &fl->fl_next;
2076 	}
2077 	unlock_flocks();
2078 }
2079 
2080 /**
2081  *	posix_unblock_lock - stop waiting for a file lock
2082  *      @filp:   how the file was opened
2083  *	@waiter: the lock which was waiting
2084  *
2085  *	lockd needs to block waiting for locks.
2086  */
2087 int
2088 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2089 {
2090 	int status = 0;
2091 
2092 	lock_flocks();
2093 	if (waiter->fl_next)
2094 		__locks_delete_block(waiter);
2095 	else
2096 		status = -ENOENT;
2097 	unlock_flocks();
2098 	return status;
2099 }
2100 
2101 EXPORT_SYMBOL(posix_unblock_lock);
2102 
2103 /**
2104  * vfs_cancel_lock - file byte range unblock lock
2105  * @filp: The file to apply the unblock to
2106  * @fl: The lock to be unblocked
2107  *
2108  * Used by lock managers to cancel blocked requests
2109  */
2110 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2111 {
2112 	if (filp->f_op && filp->f_op->lock)
2113 		return filp->f_op->lock(filp, F_CANCELLK, fl);
2114 	return 0;
2115 }
2116 
2117 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2118 
2119 #ifdef CONFIG_PROC_FS
2120 #include <linux/proc_fs.h>
2121 #include <linux/seq_file.h>
2122 
2123 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2124 			    loff_t id, char *pfx)
2125 {
2126 	struct inode *inode = NULL;
2127 	unsigned int fl_pid;
2128 
2129 	if (fl->fl_nspid)
2130 		fl_pid = pid_vnr(fl->fl_nspid);
2131 	else
2132 		fl_pid = fl->fl_pid;
2133 
2134 	if (fl->fl_file != NULL)
2135 		inode = fl->fl_file->f_path.dentry->d_inode;
2136 
2137 	seq_printf(f, "%lld:%s ", id, pfx);
2138 	if (IS_POSIX(fl)) {
2139 		seq_printf(f, "%6s %s ",
2140 			     (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2141 			     (inode == NULL) ? "*NOINODE*" :
2142 			     mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2143 	} else if (IS_FLOCK(fl)) {
2144 		if (fl->fl_type & LOCK_MAND) {
2145 			seq_printf(f, "FLOCK  MSNFS     ");
2146 		} else {
2147 			seq_printf(f, "FLOCK  ADVISORY  ");
2148 		}
2149 	} else if (IS_LEASE(fl)) {
2150 		seq_printf(f, "LEASE  ");
2151 		if (fl->fl_type & F_INPROGRESS)
2152 			seq_printf(f, "BREAKING  ");
2153 		else if (fl->fl_file)
2154 			seq_printf(f, "ACTIVE    ");
2155 		else
2156 			seq_printf(f, "BREAKER   ");
2157 	} else {
2158 		seq_printf(f, "UNKNOWN UNKNOWN  ");
2159 	}
2160 	if (fl->fl_type & LOCK_MAND) {
2161 		seq_printf(f, "%s ",
2162 			       (fl->fl_type & LOCK_READ)
2163 			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2164 			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2165 	} else {
2166 		seq_printf(f, "%s ",
2167 			       (fl->fl_type & F_INPROGRESS)
2168 			       ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2169 			       : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2170 	}
2171 	if (inode) {
2172 #ifdef WE_CAN_BREAK_LSLK_NOW
2173 		seq_printf(f, "%d %s:%ld ", fl_pid,
2174 				inode->i_sb->s_id, inode->i_ino);
2175 #else
2176 		/* userspace relies on this representation of dev_t ;-( */
2177 		seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2178 				MAJOR(inode->i_sb->s_dev),
2179 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2180 #endif
2181 	} else {
2182 		seq_printf(f, "%d <none>:0 ", fl_pid);
2183 	}
2184 	if (IS_POSIX(fl)) {
2185 		if (fl->fl_end == OFFSET_MAX)
2186 			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2187 		else
2188 			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2189 	} else {
2190 		seq_printf(f, "0 EOF\n");
2191 	}
2192 }
2193 
2194 static int locks_show(struct seq_file *f, void *v)
2195 {
2196 	struct file_lock *fl, *bfl;
2197 
2198 	fl = list_entry(v, struct file_lock, fl_link);
2199 
2200 	lock_get_status(f, fl, *((loff_t *)f->private), "");
2201 
2202 	list_for_each_entry(bfl, &fl->fl_block, fl_block)
2203 		lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2204 
2205 	return 0;
2206 }
2207 
2208 static void *locks_start(struct seq_file *f, loff_t *pos)
2209 {
2210 	loff_t *p = f->private;
2211 
2212 	lock_flocks();
2213 	*p = (*pos + 1);
2214 	return seq_list_start(&file_lock_list, *pos);
2215 }
2216 
2217 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2218 {
2219 	loff_t *p = f->private;
2220 	++*p;
2221 	return seq_list_next(v, &file_lock_list, pos);
2222 }
2223 
2224 static void locks_stop(struct seq_file *f, void *v)
2225 {
2226 	unlock_flocks();
2227 }
2228 
2229 static const struct seq_operations locks_seq_operations = {
2230 	.start	= locks_start,
2231 	.next	= locks_next,
2232 	.stop	= locks_stop,
2233 	.show	= locks_show,
2234 };
2235 
2236 static int locks_open(struct inode *inode, struct file *filp)
2237 {
2238 	return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2239 }
2240 
2241 static const struct file_operations proc_locks_operations = {
2242 	.open		= locks_open,
2243 	.read		= seq_read,
2244 	.llseek		= seq_lseek,
2245 	.release	= seq_release_private,
2246 };
2247 
2248 static int __init proc_locks_init(void)
2249 {
2250 	proc_create("locks", 0, NULL, &proc_locks_operations);
2251 	return 0;
2252 }
2253 module_init(proc_locks_init);
2254 #endif
2255 
2256 /**
2257  *	lock_may_read - checks that the region is free of locks
2258  *	@inode: the inode that is being read
2259  *	@start: the first byte to read
2260  *	@len: the number of bytes to read
2261  *
2262  *	Emulates Windows locking requirements.  Whole-file
2263  *	mandatory locks (share modes) can prohibit a read and
2264  *	byte-range POSIX locks can prohibit a read if they overlap.
2265  *
2266  *	N.B. this function is only ever called
2267  *	from knfsd and ownership of locks is never checked.
2268  */
2269 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2270 {
2271 	struct file_lock *fl;
2272 	int result = 1;
2273 	lock_flocks();
2274 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2275 		if (IS_POSIX(fl)) {
2276 			if (fl->fl_type == F_RDLCK)
2277 				continue;
2278 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2279 				continue;
2280 		} else if (IS_FLOCK(fl)) {
2281 			if (!(fl->fl_type & LOCK_MAND))
2282 				continue;
2283 			if (fl->fl_type & LOCK_READ)
2284 				continue;
2285 		} else
2286 			continue;
2287 		result = 0;
2288 		break;
2289 	}
2290 	unlock_flocks();
2291 	return result;
2292 }
2293 
2294 EXPORT_SYMBOL(lock_may_read);
2295 
2296 /**
2297  *	lock_may_write - checks that the region is free of locks
2298  *	@inode: the inode that is being written
2299  *	@start: the first byte to write
2300  *	@len: the number of bytes to write
2301  *
2302  *	Emulates Windows locking requirements.  Whole-file
2303  *	mandatory locks (share modes) can prohibit a write and
2304  *	byte-range POSIX locks can prohibit a write if they overlap.
2305  *
2306  *	N.B. this function is only ever called
2307  *	from knfsd and ownership of locks is never checked.
2308  */
2309 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2310 {
2311 	struct file_lock *fl;
2312 	int result = 1;
2313 	lock_flocks();
2314 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2315 		if (IS_POSIX(fl)) {
2316 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2317 				continue;
2318 		} else if (IS_FLOCK(fl)) {
2319 			if (!(fl->fl_type & LOCK_MAND))
2320 				continue;
2321 			if (fl->fl_type & LOCK_WRITE)
2322 				continue;
2323 		} else
2324 			continue;
2325 		result = 0;
2326 		break;
2327 	}
2328 	unlock_flocks();
2329 	return result;
2330 }
2331 
2332 EXPORT_SYMBOL(lock_may_write);
2333 
2334 static int __init filelock_init(void)
2335 {
2336 	filelock_cache = kmem_cache_create("file_lock_cache",
2337 			sizeof(struct file_lock), 0, SLAB_PANIC,
2338 			init_once);
2339 	return 0;
2340 }
2341 
2342 core_initcall(filelock_init);
2343