xref: /openbmc/linux/fs/locks.c (revision 81d67439)
1 /*
2  *  linux/fs/locks.c
3  *
4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5  *  Doug Evans (dje@spiff.uucp), August 07, 1992
6  *
7  *  Deadlock detection added.
8  *  FIXME: one thing isn't handled yet:
9  *	- mandatory locks (requires lots of changes elsewhere)
10  *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11  *
12  *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13  *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14  *
15  *  Converted file_lock_table to a linked list from an array, which eliminates
16  *  the limits on how many active file locks are open.
17  *  Chad Page (pageone@netcom.com), November 27, 1994
18  *
19  *  Removed dependency on file descriptors. dup()'ed file descriptors now
20  *  get the same locks as the original file descriptors, and a close() on
21  *  any file descriptor removes ALL the locks on the file for the current
22  *  process. Since locks still depend on the process id, locks are inherited
23  *  after an exec() but not after a fork(). This agrees with POSIX, and both
24  *  BSD and SVR4 practice.
25  *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
26  *
27  *  Scrapped free list which is redundant now that we allocate locks
28  *  dynamically with kmalloc()/kfree().
29  *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
30  *
31  *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32  *
33  *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
34  *  fcntl() system call. They have the semantics described above.
35  *
36  *  FL_FLOCK locks are created with calls to flock(), through the flock()
37  *  system call, which is new. Old C libraries implement flock() via fcntl()
38  *  and will continue to use the old, broken implementation.
39  *
40  *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41  *  with a file pointer (filp). As a result they can be shared by a parent
42  *  process and its children after a fork(). They are removed when the last
43  *  file descriptor referring to the file pointer is closed (unless explicitly
44  *  unlocked).
45  *
46  *  FL_FLOCK locks never deadlock, an existing lock is always removed before
47  *  upgrading from shared to exclusive (or vice versa). When this happens
48  *  any processes blocked by the current lock are woken up and allowed to
49  *  run before the new lock is applied.
50  *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
51  *
52  *  Removed some race conditions in flock_lock_file(), marked other possible
53  *  races. Just grep for FIXME to see them.
54  *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
55  *
56  *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57  *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58  *  once we've checked for blocking and deadlocking.
59  *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
60  *
61  *  Initial implementation of mandatory locks. SunOS turned out to be
62  *  a rotten model, so I implemented the "obvious" semantics.
63  *  See 'Documentation/mandatory.txt' for details.
64  *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
65  *
66  *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67  *  check if a file has mandatory locks, used by mmap(), open() and creat() to
68  *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69  *  Manual, Section 2.
70  *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
71  *
72  *  Tidied up block list handling. Added '/proc/locks' interface.
73  *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
74  *
75  *  Fixed deadlock condition for pathological code that mixes calls to
76  *  flock() and fcntl().
77  *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
78  *
79  *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80  *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81  *  guarantee sensible behaviour in the case where file system modules might
82  *  be compiled with different options than the kernel itself.
83  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
84  *
85  *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86  *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
88  *
89  *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90  *  locks. Changed process synchronisation to avoid dereferencing locks that
91  *  have already been freed.
92  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
93  *
94  *  Made the block list a circular list to minimise searching in the list.
95  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
96  *
97  *  Made mandatory locking a mount option. Default is not to allow mandatory
98  *  locking.
99  *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
100  *
101  *  Some adaptations for NFS support.
102  *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
103  *
104  *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105  *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
106  *
107  *  Use slab allocator instead of kmalloc/kfree.
108  *  Use generic list implementation from <linux/list.h>.
109  *  Sped up posix_locks_deadlock by only considering blocked locks.
110  *  Matthew Wilcox <willy@debian.org>, March, 2000.
111  *
112  *  Leases and LOCK_MAND
113  *  Matthew Wilcox <willy@debian.org>, June, 2000.
114  *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
115  */
116 
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/module.h>
123 #include <linux/security.h>
124 #include <linux/slab.h>
125 #include <linux/syscalls.h>
126 #include <linux/time.h>
127 #include <linux/rcupdate.h>
128 #include <linux/pid_namespace.h>
129 
130 #include <asm/uaccess.h>
131 
132 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
133 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
134 #define IS_LEASE(fl)	(fl->fl_flags & FL_LEASE)
135 
136 int leases_enable = 1;
137 int lease_break_time = 45;
138 
139 #define for_each_lock(inode, lockp) \
140 	for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
141 
142 static LIST_HEAD(file_lock_list);
143 static LIST_HEAD(blocked_list);
144 static DEFINE_SPINLOCK(file_lock_lock);
145 
146 /*
147  * Protects the two list heads above, plus the inode->i_flock list
148  */
149 void lock_flocks(void)
150 {
151 	spin_lock(&file_lock_lock);
152 }
153 EXPORT_SYMBOL_GPL(lock_flocks);
154 
155 void unlock_flocks(void)
156 {
157 	spin_unlock(&file_lock_lock);
158 }
159 EXPORT_SYMBOL_GPL(unlock_flocks);
160 
161 static struct kmem_cache *filelock_cache __read_mostly;
162 
163 static void locks_init_lock_always(struct file_lock *fl)
164 {
165 	fl->fl_next = NULL;
166 	fl->fl_fasync = NULL;
167 	fl->fl_owner = NULL;
168 	fl->fl_pid = 0;
169 	fl->fl_nspid = NULL;
170 	fl->fl_file = NULL;
171 	fl->fl_flags = 0;
172 	fl->fl_type = 0;
173 	fl->fl_start = fl->fl_end = 0;
174 }
175 
176 /* Allocate an empty lock structure. */
177 struct file_lock *locks_alloc_lock(void)
178 {
179 	struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
180 
181 	if (fl)
182 		locks_init_lock_always(fl);
183 
184 	return fl;
185 }
186 EXPORT_SYMBOL_GPL(locks_alloc_lock);
187 
188 void locks_release_private(struct file_lock *fl)
189 {
190 	if (fl->fl_ops) {
191 		if (fl->fl_ops->fl_release_private)
192 			fl->fl_ops->fl_release_private(fl);
193 		fl->fl_ops = NULL;
194 	}
195 	if (fl->fl_lmops) {
196 		if (fl->fl_lmops->fl_release_private)
197 			fl->fl_lmops->fl_release_private(fl);
198 		fl->fl_lmops = NULL;
199 	}
200 
201 }
202 EXPORT_SYMBOL_GPL(locks_release_private);
203 
204 /* Free a lock which is not in use. */
205 void locks_free_lock(struct file_lock *fl)
206 {
207 	BUG_ON(waitqueue_active(&fl->fl_wait));
208 	BUG_ON(!list_empty(&fl->fl_block));
209 	BUG_ON(!list_empty(&fl->fl_link));
210 
211 	locks_release_private(fl);
212 	kmem_cache_free(filelock_cache, fl);
213 }
214 EXPORT_SYMBOL(locks_free_lock);
215 
216 void locks_init_lock(struct file_lock *fl)
217 {
218 	INIT_LIST_HEAD(&fl->fl_link);
219 	INIT_LIST_HEAD(&fl->fl_block);
220 	init_waitqueue_head(&fl->fl_wait);
221 	fl->fl_ops = NULL;
222 	fl->fl_lmops = NULL;
223 	locks_init_lock_always(fl);
224 }
225 
226 EXPORT_SYMBOL(locks_init_lock);
227 
228 /*
229  * Initialises the fields of the file lock which are invariant for
230  * free file_locks.
231  */
232 static void init_once(void *foo)
233 {
234 	struct file_lock *lock = (struct file_lock *) foo;
235 
236 	locks_init_lock(lock);
237 }
238 
239 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
240 {
241 	if (fl->fl_ops) {
242 		if (fl->fl_ops->fl_copy_lock)
243 			fl->fl_ops->fl_copy_lock(new, fl);
244 		new->fl_ops = fl->fl_ops;
245 	}
246 	if (fl->fl_lmops)
247 		new->fl_lmops = fl->fl_lmops;
248 }
249 
250 /*
251  * Initialize a new lock from an existing file_lock structure.
252  */
253 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
254 {
255 	new->fl_owner = fl->fl_owner;
256 	new->fl_pid = fl->fl_pid;
257 	new->fl_file = NULL;
258 	new->fl_flags = fl->fl_flags;
259 	new->fl_type = fl->fl_type;
260 	new->fl_start = fl->fl_start;
261 	new->fl_end = fl->fl_end;
262 	new->fl_ops = NULL;
263 	new->fl_lmops = NULL;
264 }
265 EXPORT_SYMBOL(__locks_copy_lock);
266 
267 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
268 {
269 	locks_release_private(new);
270 
271 	__locks_copy_lock(new, fl);
272 	new->fl_file = fl->fl_file;
273 	new->fl_ops = fl->fl_ops;
274 	new->fl_lmops = fl->fl_lmops;
275 
276 	locks_copy_private(new, fl);
277 }
278 
279 EXPORT_SYMBOL(locks_copy_lock);
280 
281 static inline int flock_translate_cmd(int cmd) {
282 	if (cmd & LOCK_MAND)
283 		return cmd & (LOCK_MAND | LOCK_RW);
284 	switch (cmd) {
285 	case LOCK_SH:
286 		return F_RDLCK;
287 	case LOCK_EX:
288 		return F_WRLCK;
289 	case LOCK_UN:
290 		return F_UNLCK;
291 	}
292 	return -EINVAL;
293 }
294 
295 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
296 static int flock_make_lock(struct file *filp, struct file_lock **lock,
297 		unsigned int cmd)
298 {
299 	struct file_lock *fl;
300 	int type = flock_translate_cmd(cmd);
301 	if (type < 0)
302 		return type;
303 
304 	fl = locks_alloc_lock();
305 	if (fl == NULL)
306 		return -ENOMEM;
307 
308 	fl->fl_file = filp;
309 	fl->fl_pid = current->tgid;
310 	fl->fl_flags = FL_FLOCK;
311 	fl->fl_type = type;
312 	fl->fl_end = OFFSET_MAX;
313 
314 	*lock = fl;
315 	return 0;
316 }
317 
318 static int assign_type(struct file_lock *fl, int type)
319 {
320 	switch (type) {
321 	case F_RDLCK:
322 	case F_WRLCK:
323 	case F_UNLCK:
324 		fl->fl_type = type;
325 		break;
326 	default:
327 		return -EINVAL;
328 	}
329 	return 0;
330 }
331 
332 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
333  * style lock.
334  */
335 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
336 			       struct flock *l)
337 {
338 	off_t start, end;
339 
340 	switch (l->l_whence) {
341 	case SEEK_SET:
342 		start = 0;
343 		break;
344 	case SEEK_CUR:
345 		start = filp->f_pos;
346 		break;
347 	case SEEK_END:
348 		start = i_size_read(filp->f_path.dentry->d_inode);
349 		break;
350 	default:
351 		return -EINVAL;
352 	}
353 
354 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
355 	   POSIX-2001 defines it. */
356 	start += l->l_start;
357 	if (start < 0)
358 		return -EINVAL;
359 	fl->fl_end = OFFSET_MAX;
360 	if (l->l_len > 0) {
361 		end = start + l->l_len - 1;
362 		fl->fl_end = end;
363 	} else if (l->l_len < 0) {
364 		end = start - 1;
365 		fl->fl_end = end;
366 		start += l->l_len;
367 		if (start < 0)
368 			return -EINVAL;
369 	}
370 	fl->fl_start = start;	/* we record the absolute position */
371 	if (fl->fl_end < fl->fl_start)
372 		return -EOVERFLOW;
373 
374 	fl->fl_owner = current->files;
375 	fl->fl_pid = current->tgid;
376 	fl->fl_file = filp;
377 	fl->fl_flags = FL_POSIX;
378 	fl->fl_ops = NULL;
379 	fl->fl_lmops = NULL;
380 
381 	return assign_type(fl, l->l_type);
382 }
383 
384 #if BITS_PER_LONG == 32
385 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
386 				 struct flock64 *l)
387 {
388 	loff_t start;
389 
390 	switch (l->l_whence) {
391 	case SEEK_SET:
392 		start = 0;
393 		break;
394 	case SEEK_CUR:
395 		start = filp->f_pos;
396 		break;
397 	case SEEK_END:
398 		start = i_size_read(filp->f_path.dentry->d_inode);
399 		break;
400 	default:
401 		return -EINVAL;
402 	}
403 
404 	start += l->l_start;
405 	if (start < 0)
406 		return -EINVAL;
407 	fl->fl_end = OFFSET_MAX;
408 	if (l->l_len > 0) {
409 		fl->fl_end = start + l->l_len - 1;
410 	} else if (l->l_len < 0) {
411 		fl->fl_end = start - 1;
412 		start += l->l_len;
413 		if (start < 0)
414 			return -EINVAL;
415 	}
416 	fl->fl_start = start;	/* we record the absolute position */
417 	if (fl->fl_end < fl->fl_start)
418 		return -EOVERFLOW;
419 
420 	fl->fl_owner = current->files;
421 	fl->fl_pid = current->tgid;
422 	fl->fl_file = filp;
423 	fl->fl_flags = FL_POSIX;
424 	fl->fl_ops = NULL;
425 	fl->fl_lmops = NULL;
426 
427 	return assign_type(fl, l->l_type);
428 }
429 #endif
430 
431 /* default lease lock manager operations */
432 static void lease_break_callback(struct file_lock *fl)
433 {
434 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
435 }
436 
437 static void lease_release_private_callback(struct file_lock *fl)
438 {
439 	if (!fl->fl_file)
440 		return;
441 
442 	f_delown(fl->fl_file);
443 	fl->fl_file->f_owner.signum = 0;
444 }
445 
446 static const struct lock_manager_operations lease_manager_ops = {
447 	.fl_break = lease_break_callback,
448 	.fl_release_private = lease_release_private_callback,
449 	.fl_change = lease_modify,
450 };
451 
452 /*
453  * Initialize a lease, use the default lock manager operations
454  */
455 static int lease_init(struct file *filp, int type, struct file_lock *fl)
456  {
457 	if (assign_type(fl, type) != 0)
458 		return -EINVAL;
459 
460 	fl->fl_owner = current->files;
461 	fl->fl_pid = current->tgid;
462 
463 	fl->fl_file = filp;
464 	fl->fl_flags = FL_LEASE;
465 	fl->fl_start = 0;
466 	fl->fl_end = OFFSET_MAX;
467 	fl->fl_ops = NULL;
468 	fl->fl_lmops = &lease_manager_ops;
469 	return 0;
470 }
471 
472 /* Allocate a file_lock initialised to this type of lease */
473 static struct file_lock *lease_alloc(struct file *filp, int type)
474 {
475 	struct file_lock *fl = locks_alloc_lock();
476 	int error = -ENOMEM;
477 
478 	if (fl == NULL)
479 		return ERR_PTR(error);
480 
481 	error = lease_init(filp, type, fl);
482 	if (error) {
483 		locks_free_lock(fl);
484 		return ERR_PTR(error);
485 	}
486 	return fl;
487 }
488 
489 /* Check if two locks overlap each other.
490  */
491 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
492 {
493 	return ((fl1->fl_end >= fl2->fl_start) &&
494 		(fl2->fl_end >= fl1->fl_start));
495 }
496 
497 /*
498  * Check whether two locks have the same owner.
499  */
500 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
501 {
502 	if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
503 		return fl2->fl_lmops == fl1->fl_lmops &&
504 			fl1->fl_lmops->fl_compare_owner(fl1, fl2);
505 	return fl1->fl_owner == fl2->fl_owner;
506 }
507 
508 /* Remove waiter from blocker's block list.
509  * When blocker ends up pointing to itself then the list is empty.
510  */
511 static void __locks_delete_block(struct file_lock *waiter)
512 {
513 	list_del_init(&waiter->fl_block);
514 	list_del_init(&waiter->fl_link);
515 	waiter->fl_next = NULL;
516 }
517 
518 /*
519  */
520 static void locks_delete_block(struct file_lock *waiter)
521 {
522 	lock_flocks();
523 	__locks_delete_block(waiter);
524 	unlock_flocks();
525 }
526 
527 /* Insert waiter into blocker's block list.
528  * We use a circular list so that processes can be easily woken up in
529  * the order they blocked. The documentation doesn't require this but
530  * it seems like the reasonable thing to do.
531  */
532 static void locks_insert_block(struct file_lock *blocker,
533 			       struct file_lock *waiter)
534 {
535 	BUG_ON(!list_empty(&waiter->fl_block));
536 	list_add_tail(&waiter->fl_block, &blocker->fl_block);
537 	waiter->fl_next = blocker;
538 	if (IS_POSIX(blocker))
539 		list_add(&waiter->fl_link, &blocked_list);
540 }
541 
542 /* Wake up processes blocked waiting for blocker.
543  * If told to wait then schedule the processes until the block list
544  * is empty, otherwise empty the block list ourselves.
545  */
546 static void locks_wake_up_blocks(struct file_lock *blocker)
547 {
548 	while (!list_empty(&blocker->fl_block)) {
549 		struct file_lock *waiter;
550 
551 		waiter = list_first_entry(&blocker->fl_block,
552 				struct file_lock, fl_block);
553 		__locks_delete_block(waiter);
554 		if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
555 			waiter->fl_lmops->fl_notify(waiter);
556 		else
557 			wake_up(&waiter->fl_wait);
558 	}
559 }
560 
561 /* Insert file lock fl into an inode's lock list at the position indicated
562  * by pos. At the same time add the lock to the global file lock list.
563  */
564 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
565 {
566 	list_add(&fl->fl_link, &file_lock_list);
567 
568 	fl->fl_nspid = get_pid(task_tgid(current));
569 
570 	/* insert into file's list */
571 	fl->fl_next = *pos;
572 	*pos = fl;
573 }
574 
575 /*
576  * Delete a lock and then free it.
577  * Wake up processes that are blocked waiting for this lock,
578  * notify the FS that the lock has been cleared and
579  * finally free the lock.
580  */
581 static void locks_delete_lock(struct file_lock **thisfl_p)
582 {
583 	struct file_lock *fl = *thisfl_p;
584 
585 	*thisfl_p = fl->fl_next;
586 	fl->fl_next = NULL;
587 	list_del_init(&fl->fl_link);
588 
589 	fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
590 	if (fl->fl_fasync != NULL) {
591 		printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
592 		fl->fl_fasync = NULL;
593 	}
594 
595 	if (fl->fl_nspid) {
596 		put_pid(fl->fl_nspid);
597 		fl->fl_nspid = NULL;
598 	}
599 
600 	locks_wake_up_blocks(fl);
601 	locks_free_lock(fl);
602 }
603 
604 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
605  * checks for shared/exclusive status of overlapping locks.
606  */
607 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
608 {
609 	if (sys_fl->fl_type == F_WRLCK)
610 		return 1;
611 	if (caller_fl->fl_type == F_WRLCK)
612 		return 1;
613 	return 0;
614 }
615 
616 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
617  * checking before calling the locks_conflict().
618  */
619 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
620 {
621 	/* POSIX locks owned by the same process do not conflict with
622 	 * each other.
623 	 */
624 	if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
625 		return (0);
626 
627 	/* Check whether they overlap */
628 	if (!locks_overlap(caller_fl, sys_fl))
629 		return 0;
630 
631 	return (locks_conflict(caller_fl, sys_fl));
632 }
633 
634 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
635  * checking before calling the locks_conflict().
636  */
637 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
638 {
639 	/* FLOCK locks referring to the same filp do not conflict with
640 	 * each other.
641 	 */
642 	if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
643 		return (0);
644 	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
645 		return 0;
646 
647 	return (locks_conflict(caller_fl, sys_fl));
648 }
649 
650 void
651 posix_test_lock(struct file *filp, struct file_lock *fl)
652 {
653 	struct file_lock *cfl;
654 
655 	lock_flocks();
656 	for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
657 		if (!IS_POSIX(cfl))
658 			continue;
659 		if (posix_locks_conflict(fl, cfl))
660 			break;
661 	}
662 	if (cfl) {
663 		__locks_copy_lock(fl, cfl);
664 		if (cfl->fl_nspid)
665 			fl->fl_pid = pid_vnr(cfl->fl_nspid);
666 	} else
667 		fl->fl_type = F_UNLCK;
668 	unlock_flocks();
669 	return;
670 }
671 EXPORT_SYMBOL(posix_test_lock);
672 
673 /*
674  * Deadlock detection:
675  *
676  * We attempt to detect deadlocks that are due purely to posix file
677  * locks.
678  *
679  * We assume that a task can be waiting for at most one lock at a time.
680  * So for any acquired lock, the process holding that lock may be
681  * waiting on at most one other lock.  That lock in turns may be held by
682  * someone waiting for at most one other lock.  Given a requested lock
683  * caller_fl which is about to wait for a conflicting lock block_fl, we
684  * follow this chain of waiters to ensure we are not about to create a
685  * cycle.
686  *
687  * Since we do this before we ever put a process to sleep on a lock, we
688  * are ensured that there is never a cycle; that is what guarantees that
689  * the while() loop in posix_locks_deadlock() eventually completes.
690  *
691  * Note: the above assumption may not be true when handling lock
692  * requests from a broken NFS client. It may also fail in the presence
693  * of tasks (such as posix threads) sharing the same open file table.
694  *
695  * To handle those cases, we just bail out after a few iterations.
696  */
697 
698 #define MAX_DEADLK_ITERATIONS 10
699 
700 /* Find a lock that the owner of the given block_fl is blocking on. */
701 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
702 {
703 	struct file_lock *fl;
704 
705 	list_for_each_entry(fl, &blocked_list, fl_link) {
706 		if (posix_same_owner(fl, block_fl))
707 			return fl->fl_next;
708 	}
709 	return NULL;
710 }
711 
712 static int posix_locks_deadlock(struct file_lock *caller_fl,
713 				struct file_lock *block_fl)
714 {
715 	int i = 0;
716 
717 	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
718 		if (i++ > MAX_DEADLK_ITERATIONS)
719 			return 0;
720 		if (posix_same_owner(caller_fl, block_fl))
721 			return 1;
722 	}
723 	return 0;
724 }
725 
726 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
727  * after any leases, but before any posix locks.
728  *
729  * Note that if called with an FL_EXISTS argument, the caller may determine
730  * whether or not a lock was successfully freed by testing the return
731  * value for -ENOENT.
732  */
733 static int flock_lock_file(struct file *filp, struct file_lock *request)
734 {
735 	struct file_lock *new_fl = NULL;
736 	struct file_lock **before;
737 	struct inode * inode = filp->f_path.dentry->d_inode;
738 	int error = 0;
739 	int found = 0;
740 
741 	if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
742 		new_fl = locks_alloc_lock();
743 		if (!new_fl)
744 			return -ENOMEM;
745 	}
746 
747 	lock_flocks();
748 	if (request->fl_flags & FL_ACCESS)
749 		goto find_conflict;
750 
751 	for_each_lock(inode, before) {
752 		struct file_lock *fl = *before;
753 		if (IS_POSIX(fl))
754 			break;
755 		if (IS_LEASE(fl))
756 			continue;
757 		if (filp != fl->fl_file)
758 			continue;
759 		if (request->fl_type == fl->fl_type)
760 			goto out;
761 		found = 1;
762 		locks_delete_lock(before);
763 		break;
764 	}
765 
766 	if (request->fl_type == F_UNLCK) {
767 		if ((request->fl_flags & FL_EXISTS) && !found)
768 			error = -ENOENT;
769 		goto out;
770 	}
771 
772 	/*
773 	 * If a higher-priority process was blocked on the old file lock,
774 	 * give it the opportunity to lock the file.
775 	 */
776 	if (found) {
777 		unlock_flocks();
778 		cond_resched();
779 		lock_flocks();
780 	}
781 
782 find_conflict:
783 	for_each_lock(inode, before) {
784 		struct file_lock *fl = *before;
785 		if (IS_POSIX(fl))
786 			break;
787 		if (IS_LEASE(fl))
788 			continue;
789 		if (!flock_locks_conflict(request, fl))
790 			continue;
791 		error = -EAGAIN;
792 		if (!(request->fl_flags & FL_SLEEP))
793 			goto out;
794 		error = FILE_LOCK_DEFERRED;
795 		locks_insert_block(fl, request);
796 		goto out;
797 	}
798 	if (request->fl_flags & FL_ACCESS)
799 		goto out;
800 	locks_copy_lock(new_fl, request);
801 	locks_insert_lock(before, new_fl);
802 	new_fl = NULL;
803 	error = 0;
804 
805 out:
806 	unlock_flocks();
807 	if (new_fl)
808 		locks_free_lock(new_fl);
809 	return error;
810 }
811 
812 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
813 {
814 	struct file_lock *fl;
815 	struct file_lock *new_fl = NULL;
816 	struct file_lock *new_fl2 = NULL;
817 	struct file_lock *left = NULL;
818 	struct file_lock *right = NULL;
819 	struct file_lock **before;
820 	int error, added = 0;
821 
822 	/*
823 	 * We may need two file_lock structures for this operation,
824 	 * so we get them in advance to avoid races.
825 	 *
826 	 * In some cases we can be sure, that no new locks will be needed
827 	 */
828 	if (!(request->fl_flags & FL_ACCESS) &&
829 	    (request->fl_type != F_UNLCK ||
830 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
831 		new_fl = locks_alloc_lock();
832 		new_fl2 = locks_alloc_lock();
833 	}
834 
835 	lock_flocks();
836 	if (request->fl_type != F_UNLCK) {
837 		for_each_lock(inode, before) {
838 			fl = *before;
839 			if (!IS_POSIX(fl))
840 				continue;
841 			if (!posix_locks_conflict(request, fl))
842 				continue;
843 			if (conflock)
844 				__locks_copy_lock(conflock, fl);
845 			error = -EAGAIN;
846 			if (!(request->fl_flags & FL_SLEEP))
847 				goto out;
848 			error = -EDEADLK;
849 			if (posix_locks_deadlock(request, fl))
850 				goto out;
851 			error = FILE_LOCK_DEFERRED;
852 			locks_insert_block(fl, request);
853 			goto out;
854   		}
855   	}
856 
857 	/* If we're just looking for a conflict, we're done. */
858 	error = 0;
859 	if (request->fl_flags & FL_ACCESS)
860 		goto out;
861 
862 	/*
863 	 * Find the first old lock with the same owner as the new lock.
864 	 */
865 
866 	before = &inode->i_flock;
867 
868 	/* First skip locks owned by other processes.  */
869 	while ((fl = *before) && (!IS_POSIX(fl) ||
870 				  !posix_same_owner(request, fl))) {
871 		before = &fl->fl_next;
872 	}
873 
874 	/* Process locks with this owner.  */
875 	while ((fl = *before) && posix_same_owner(request, fl)) {
876 		/* Detect adjacent or overlapping regions (if same lock type)
877 		 */
878 		if (request->fl_type == fl->fl_type) {
879 			/* In all comparisons of start vs end, use
880 			 * "start - 1" rather than "end + 1". If end
881 			 * is OFFSET_MAX, end + 1 will become negative.
882 			 */
883 			if (fl->fl_end < request->fl_start - 1)
884 				goto next_lock;
885 			/* If the next lock in the list has entirely bigger
886 			 * addresses than the new one, insert the lock here.
887 			 */
888 			if (fl->fl_start - 1 > request->fl_end)
889 				break;
890 
891 			/* If we come here, the new and old lock are of the
892 			 * same type and adjacent or overlapping. Make one
893 			 * lock yielding from the lower start address of both
894 			 * locks to the higher end address.
895 			 */
896 			if (fl->fl_start > request->fl_start)
897 				fl->fl_start = request->fl_start;
898 			else
899 				request->fl_start = fl->fl_start;
900 			if (fl->fl_end < request->fl_end)
901 				fl->fl_end = request->fl_end;
902 			else
903 				request->fl_end = fl->fl_end;
904 			if (added) {
905 				locks_delete_lock(before);
906 				continue;
907 			}
908 			request = fl;
909 			added = 1;
910 		}
911 		else {
912 			/* Processing for different lock types is a bit
913 			 * more complex.
914 			 */
915 			if (fl->fl_end < request->fl_start)
916 				goto next_lock;
917 			if (fl->fl_start > request->fl_end)
918 				break;
919 			if (request->fl_type == F_UNLCK)
920 				added = 1;
921 			if (fl->fl_start < request->fl_start)
922 				left = fl;
923 			/* If the next lock in the list has a higher end
924 			 * address than the new one, insert the new one here.
925 			 */
926 			if (fl->fl_end > request->fl_end) {
927 				right = fl;
928 				break;
929 			}
930 			if (fl->fl_start >= request->fl_start) {
931 				/* The new lock completely replaces an old
932 				 * one (This may happen several times).
933 				 */
934 				if (added) {
935 					locks_delete_lock(before);
936 					continue;
937 				}
938 				/* Replace the old lock with the new one.
939 				 * Wake up anybody waiting for the old one,
940 				 * as the change in lock type might satisfy
941 				 * their needs.
942 				 */
943 				locks_wake_up_blocks(fl);
944 				fl->fl_start = request->fl_start;
945 				fl->fl_end = request->fl_end;
946 				fl->fl_type = request->fl_type;
947 				locks_release_private(fl);
948 				locks_copy_private(fl, request);
949 				request = fl;
950 				added = 1;
951 			}
952 		}
953 		/* Go on to next lock.
954 		 */
955 	next_lock:
956 		before = &fl->fl_next;
957 	}
958 
959 	/*
960 	 * The above code only modifies existing locks in case of
961 	 * merging or replacing.  If new lock(s) need to be inserted
962 	 * all modifications are done bellow this, so it's safe yet to
963 	 * bail out.
964 	 */
965 	error = -ENOLCK; /* "no luck" */
966 	if (right && left == right && !new_fl2)
967 		goto out;
968 
969 	error = 0;
970 	if (!added) {
971 		if (request->fl_type == F_UNLCK) {
972 			if (request->fl_flags & FL_EXISTS)
973 				error = -ENOENT;
974 			goto out;
975 		}
976 
977 		if (!new_fl) {
978 			error = -ENOLCK;
979 			goto out;
980 		}
981 		locks_copy_lock(new_fl, request);
982 		locks_insert_lock(before, new_fl);
983 		new_fl = NULL;
984 	}
985 	if (right) {
986 		if (left == right) {
987 			/* The new lock breaks the old one in two pieces,
988 			 * so we have to use the second new lock.
989 			 */
990 			left = new_fl2;
991 			new_fl2 = NULL;
992 			locks_copy_lock(left, right);
993 			locks_insert_lock(before, left);
994 		}
995 		right->fl_start = request->fl_end + 1;
996 		locks_wake_up_blocks(right);
997 	}
998 	if (left) {
999 		left->fl_end = request->fl_start - 1;
1000 		locks_wake_up_blocks(left);
1001 	}
1002  out:
1003 	unlock_flocks();
1004 	/*
1005 	 * Free any unused locks.
1006 	 */
1007 	if (new_fl)
1008 		locks_free_lock(new_fl);
1009 	if (new_fl2)
1010 		locks_free_lock(new_fl2);
1011 	return error;
1012 }
1013 
1014 /**
1015  * posix_lock_file - Apply a POSIX-style lock to a file
1016  * @filp: The file to apply the lock to
1017  * @fl: The lock to be applied
1018  * @conflock: Place to return a copy of the conflicting lock, if found.
1019  *
1020  * Add a POSIX style lock to a file.
1021  * We merge adjacent & overlapping locks whenever possible.
1022  * POSIX locks are sorted by owner task, then by starting address
1023  *
1024  * Note that if called with an FL_EXISTS argument, the caller may determine
1025  * whether or not a lock was successfully freed by testing the return
1026  * value for -ENOENT.
1027  */
1028 int posix_lock_file(struct file *filp, struct file_lock *fl,
1029 			struct file_lock *conflock)
1030 {
1031 	return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1032 }
1033 EXPORT_SYMBOL(posix_lock_file);
1034 
1035 /**
1036  * posix_lock_file_wait - Apply a POSIX-style lock to a file
1037  * @filp: The file to apply the lock to
1038  * @fl: The lock to be applied
1039  *
1040  * Add a POSIX style lock to a file.
1041  * We merge adjacent & overlapping locks whenever possible.
1042  * POSIX locks are sorted by owner task, then by starting address
1043  */
1044 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1045 {
1046 	int error;
1047 	might_sleep ();
1048 	for (;;) {
1049 		error = posix_lock_file(filp, fl, NULL);
1050 		if (error != FILE_LOCK_DEFERRED)
1051 			break;
1052 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1053 		if (!error)
1054 			continue;
1055 
1056 		locks_delete_block(fl);
1057 		break;
1058 	}
1059 	return error;
1060 }
1061 EXPORT_SYMBOL(posix_lock_file_wait);
1062 
1063 /**
1064  * locks_mandatory_locked - Check for an active lock
1065  * @inode: the file to check
1066  *
1067  * Searches the inode's list of locks to find any POSIX locks which conflict.
1068  * This function is called from locks_verify_locked() only.
1069  */
1070 int locks_mandatory_locked(struct inode *inode)
1071 {
1072 	fl_owner_t owner = current->files;
1073 	struct file_lock *fl;
1074 
1075 	/*
1076 	 * Search the lock list for this inode for any POSIX locks.
1077 	 */
1078 	lock_flocks();
1079 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1080 		if (!IS_POSIX(fl))
1081 			continue;
1082 		if (fl->fl_owner != owner)
1083 			break;
1084 	}
1085 	unlock_flocks();
1086 	return fl ? -EAGAIN : 0;
1087 }
1088 
1089 /**
1090  * locks_mandatory_area - Check for a conflicting lock
1091  * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1092  *		for shared
1093  * @inode:      the file to check
1094  * @filp:       how the file was opened (if it was)
1095  * @offset:     start of area to check
1096  * @count:      length of area to check
1097  *
1098  * Searches the inode's list of locks to find any POSIX locks which conflict.
1099  * This function is called from rw_verify_area() and
1100  * locks_verify_truncate().
1101  */
1102 int locks_mandatory_area(int read_write, struct inode *inode,
1103 			 struct file *filp, loff_t offset,
1104 			 size_t count)
1105 {
1106 	struct file_lock fl;
1107 	int error;
1108 
1109 	locks_init_lock(&fl);
1110 	fl.fl_owner = current->files;
1111 	fl.fl_pid = current->tgid;
1112 	fl.fl_file = filp;
1113 	fl.fl_flags = FL_POSIX | FL_ACCESS;
1114 	if (filp && !(filp->f_flags & O_NONBLOCK))
1115 		fl.fl_flags |= FL_SLEEP;
1116 	fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1117 	fl.fl_start = offset;
1118 	fl.fl_end = offset + count - 1;
1119 
1120 	for (;;) {
1121 		error = __posix_lock_file(inode, &fl, NULL);
1122 		if (error != FILE_LOCK_DEFERRED)
1123 			break;
1124 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1125 		if (!error) {
1126 			/*
1127 			 * If we've been sleeping someone might have
1128 			 * changed the permissions behind our back.
1129 			 */
1130 			if (__mandatory_lock(inode))
1131 				continue;
1132 		}
1133 
1134 		locks_delete_block(&fl);
1135 		break;
1136 	}
1137 
1138 	return error;
1139 }
1140 
1141 EXPORT_SYMBOL(locks_mandatory_area);
1142 
1143 /* We already had a lease on this file; just change its type */
1144 int lease_modify(struct file_lock **before, int arg)
1145 {
1146 	struct file_lock *fl = *before;
1147 	int error = assign_type(fl, arg);
1148 
1149 	if (error)
1150 		return error;
1151 	locks_wake_up_blocks(fl);
1152 	if (arg == F_UNLCK)
1153 		locks_delete_lock(before);
1154 	return 0;
1155 }
1156 
1157 EXPORT_SYMBOL(lease_modify);
1158 
1159 static void time_out_leases(struct inode *inode)
1160 {
1161 	struct file_lock **before;
1162 	struct file_lock *fl;
1163 
1164 	before = &inode->i_flock;
1165 	while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1166 		if ((fl->fl_break_time == 0)
1167 				|| time_before(jiffies, fl->fl_break_time)) {
1168 			before = &fl->fl_next;
1169 			continue;
1170 		}
1171 		lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1172 		if (fl == *before)	/* lease_modify may have freed fl */
1173 			before = &fl->fl_next;
1174 	}
1175 }
1176 
1177 /**
1178  *	__break_lease	-	revoke all outstanding leases on file
1179  *	@inode: the inode of the file to return
1180  *	@mode: the open mode (read or write)
1181  *
1182  *	break_lease (inlined for speed) has checked there already is at least
1183  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1184  *	a call to open() or truncate().  This function can sleep unless you
1185  *	specified %O_NONBLOCK to your open().
1186  */
1187 int __break_lease(struct inode *inode, unsigned int mode)
1188 {
1189 	int error = 0, future;
1190 	struct file_lock *new_fl, *flock;
1191 	struct file_lock *fl;
1192 	unsigned long break_time;
1193 	int i_have_this_lease = 0;
1194 	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1195 
1196 	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1197 
1198 	lock_flocks();
1199 
1200 	time_out_leases(inode);
1201 
1202 	flock = inode->i_flock;
1203 	if ((flock == NULL) || !IS_LEASE(flock))
1204 		goto out;
1205 
1206 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1207 		if (fl->fl_owner == current->files)
1208 			i_have_this_lease = 1;
1209 
1210 	if (want_write) {
1211 		/* If we want write access, we have to revoke any lease. */
1212 		future = F_UNLCK | F_INPROGRESS;
1213 	} else if (flock->fl_type & F_INPROGRESS) {
1214 		/* If the lease is already being broken, we just leave it */
1215 		future = flock->fl_type;
1216 	} else if (flock->fl_type & F_WRLCK) {
1217 		/* Downgrade the exclusive lease to a read-only lease. */
1218 		future = F_RDLCK | F_INPROGRESS;
1219 	} else {
1220 		/* the existing lease was read-only, so we can read too. */
1221 		goto out;
1222 	}
1223 
1224 	if (IS_ERR(new_fl) && !i_have_this_lease
1225 			&& ((mode & O_NONBLOCK) == 0)) {
1226 		error = PTR_ERR(new_fl);
1227 		goto out;
1228 	}
1229 
1230 	break_time = 0;
1231 	if (lease_break_time > 0) {
1232 		break_time = jiffies + lease_break_time * HZ;
1233 		if (break_time == 0)
1234 			break_time++;	/* so that 0 means no break time */
1235 	}
1236 
1237 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1238 		if (fl->fl_type != future) {
1239 			fl->fl_type = future;
1240 			fl->fl_break_time = break_time;
1241 			/* lease must have lmops break callback */
1242 			fl->fl_lmops->fl_break(fl);
1243 		}
1244 	}
1245 
1246 	if (i_have_this_lease || (mode & O_NONBLOCK)) {
1247 		error = -EWOULDBLOCK;
1248 		goto out;
1249 	}
1250 
1251 restart:
1252 	break_time = flock->fl_break_time;
1253 	if (break_time != 0) {
1254 		break_time -= jiffies;
1255 		if (break_time == 0)
1256 			break_time++;
1257 	}
1258 	locks_insert_block(flock, new_fl);
1259 	unlock_flocks();
1260 	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1261 						!new_fl->fl_next, break_time);
1262 	lock_flocks();
1263 	__locks_delete_block(new_fl);
1264 	if (error >= 0) {
1265 		if (error == 0)
1266 			time_out_leases(inode);
1267 		/* Wait for the next lease that has not been broken yet */
1268 		for (flock = inode->i_flock; flock && IS_LEASE(flock);
1269 				flock = flock->fl_next) {
1270 			if (flock->fl_type & F_INPROGRESS)
1271 				goto restart;
1272 		}
1273 		error = 0;
1274 	}
1275 
1276 out:
1277 	unlock_flocks();
1278 	if (!IS_ERR(new_fl))
1279 		locks_free_lock(new_fl);
1280 	return error;
1281 }
1282 
1283 EXPORT_SYMBOL(__break_lease);
1284 
1285 /**
1286  *	lease_get_mtime - get the last modified time of an inode
1287  *	@inode: the inode
1288  *      @time:  pointer to a timespec which will contain the last modified time
1289  *
1290  * This is to force NFS clients to flush their caches for files with
1291  * exclusive leases.  The justification is that if someone has an
1292  * exclusive lease, then they could be modifying it.
1293  */
1294 void lease_get_mtime(struct inode *inode, struct timespec *time)
1295 {
1296 	struct file_lock *flock = inode->i_flock;
1297 	if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1298 		*time = current_fs_time(inode->i_sb);
1299 	else
1300 		*time = inode->i_mtime;
1301 }
1302 
1303 EXPORT_SYMBOL(lease_get_mtime);
1304 
1305 /**
1306  *	fcntl_getlease - Enquire what lease is currently active
1307  *	@filp: the file
1308  *
1309  *	The value returned by this function will be one of
1310  *	(if no lease break is pending):
1311  *
1312  *	%F_RDLCK to indicate a shared lease is held.
1313  *
1314  *	%F_WRLCK to indicate an exclusive lease is held.
1315  *
1316  *	%F_UNLCK to indicate no lease is held.
1317  *
1318  *	(if a lease break is pending):
1319  *
1320  *	%F_RDLCK to indicate an exclusive lease needs to be
1321  *		changed to a shared lease (or removed).
1322  *
1323  *	%F_UNLCK to indicate the lease needs to be removed.
1324  *
1325  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1326  *	should be returned to userspace.
1327  */
1328 int fcntl_getlease(struct file *filp)
1329 {
1330 	struct file_lock *fl;
1331 	int type = F_UNLCK;
1332 
1333 	lock_flocks();
1334 	time_out_leases(filp->f_path.dentry->d_inode);
1335 	for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1336 			fl = fl->fl_next) {
1337 		if (fl->fl_file == filp) {
1338 			type = fl->fl_type & ~F_INPROGRESS;
1339 			break;
1340 		}
1341 	}
1342 	unlock_flocks();
1343 	return type;
1344 }
1345 
1346 /**
1347  *	generic_setlease	-	sets a lease on an open file
1348  *	@filp: file pointer
1349  *	@arg: type of lease to obtain
1350  *	@flp: input - file_lock to use, output - file_lock inserted
1351  *
1352  *	The (input) flp->fl_lmops->fl_break function is required
1353  *	by break_lease().
1354  *
1355  *	Called with file_lock_lock held.
1356  */
1357 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1358 {
1359 	struct file_lock *fl, **before, **my_before = NULL, *lease;
1360 	struct dentry *dentry = filp->f_path.dentry;
1361 	struct inode *inode = dentry->d_inode;
1362 	int error, rdlease_count = 0, wrlease_count = 0;
1363 
1364 	lease = *flp;
1365 
1366 	error = -EACCES;
1367 	if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1368 		goto out;
1369 	error = -EINVAL;
1370 	if (!S_ISREG(inode->i_mode))
1371 		goto out;
1372 	error = security_file_lock(filp, arg);
1373 	if (error)
1374 		goto out;
1375 
1376 	time_out_leases(inode);
1377 
1378 	BUG_ON(!(*flp)->fl_lmops->fl_break);
1379 
1380 	if (arg != F_UNLCK) {
1381 		error = -EAGAIN;
1382 		if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1383 			goto out;
1384 		if ((arg == F_WRLCK)
1385 		    && ((dentry->d_count > 1)
1386 			|| (atomic_read(&inode->i_count) > 1)))
1387 			goto out;
1388 	}
1389 
1390 	/*
1391 	 * At this point, we know that if there is an exclusive
1392 	 * lease on this file, then we hold it on this filp
1393 	 * (otherwise our open of this file would have blocked).
1394 	 * And if we are trying to acquire an exclusive lease,
1395 	 * then the file is not open by anyone (including us)
1396 	 * except for this filp.
1397 	 */
1398 	for (before = &inode->i_flock;
1399 			((fl = *before) != NULL) && IS_LEASE(fl);
1400 			before = &fl->fl_next) {
1401 		if (fl->fl_file == filp)
1402 			my_before = before;
1403 		else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1404 			/*
1405 			 * Someone is in the process of opening this
1406 			 * file for writing so we may not take an
1407 			 * exclusive lease on it.
1408 			 */
1409 			wrlease_count++;
1410 		else
1411 			rdlease_count++;
1412 	}
1413 
1414 	error = -EAGAIN;
1415 	if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1416 	    (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1417 		goto out;
1418 
1419 	if (my_before != NULL) {
1420 		error = lease->fl_lmops->fl_change(my_before, arg);
1421 		if (!error)
1422 			*flp = *my_before;
1423 		goto out;
1424 	}
1425 
1426 	if (arg == F_UNLCK)
1427 		goto out;
1428 
1429 	error = -EINVAL;
1430 	if (!leases_enable)
1431 		goto out;
1432 
1433 	locks_insert_lock(before, lease);
1434 	return 0;
1435 
1436 out:
1437 	return error;
1438 }
1439 EXPORT_SYMBOL(generic_setlease);
1440 
1441 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1442 {
1443 	if (filp->f_op && filp->f_op->setlease)
1444 		return filp->f_op->setlease(filp, arg, lease);
1445 	else
1446 		return generic_setlease(filp, arg, lease);
1447 }
1448 
1449 /**
1450  *	vfs_setlease        -       sets a lease on an open file
1451  *	@filp: file pointer
1452  *	@arg: type of lease to obtain
1453  *	@lease: file_lock to use
1454  *
1455  *	Call this to establish a lease on the file.
1456  *	The (*lease)->fl_lmops->fl_break operation must be set; if not,
1457  *	break_lease will oops!
1458  *
1459  *	This will call the filesystem's setlease file method, if
1460  *	defined.  Note that there is no getlease method; instead, the
1461  *	filesystem setlease method should call back to setlease() to
1462  *	add a lease to the inode's lease list, where fcntl_getlease() can
1463  *	find it.  Since fcntl_getlease() only reports whether the current
1464  *	task holds a lease, a cluster filesystem need only do this for
1465  *	leases held by processes on this node.
1466  *
1467  *	There is also no break_lease method; filesystems that
1468  *	handle their own leases should break leases themselves from the
1469  *	filesystem's open, create, and (on truncate) setattr methods.
1470  *
1471  *	Warning: the only current setlease methods exist only to disable
1472  *	leases in certain cases.  More vfs changes may be required to
1473  *	allow a full filesystem lease implementation.
1474  */
1475 
1476 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1477 {
1478 	int error;
1479 
1480 	lock_flocks();
1481 	error = __vfs_setlease(filp, arg, lease);
1482 	unlock_flocks();
1483 
1484 	return error;
1485 }
1486 EXPORT_SYMBOL_GPL(vfs_setlease);
1487 
1488 static int do_fcntl_delete_lease(struct file *filp)
1489 {
1490 	struct file_lock fl, *flp = &fl;
1491 
1492 	lease_init(filp, F_UNLCK, flp);
1493 
1494 	return vfs_setlease(filp, F_UNLCK, &flp);
1495 }
1496 
1497 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1498 {
1499 	struct file_lock *fl, *ret;
1500 	struct fasync_struct *new;
1501 	int error;
1502 
1503 	fl = lease_alloc(filp, arg);
1504 	if (IS_ERR(fl))
1505 		return PTR_ERR(fl);
1506 
1507 	new = fasync_alloc();
1508 	if (!new) {
1509 		locks_free_lock(fl);
1510 		return -ENOMEM;
1511 	}
1512 	ret = fl;
1513 	lock_flocks();
1514 	error = __vfs_setlease(filp, arg, &ret);
1515 	if (error) {
1516 		unlock_flocks();
1517 		locks_free_lock(fl);
1518 		goto out_free_fasync;
1519 	}
1520 	if (ret != fl)
1521 		locks_free_lock(fl);
1522 
1523 	/*
1524 	 * fasync_insert_entry() returns the old entry if any.
1525 	 * If there was no old entry, then it used 'new' and
1526 	 * inserted it into the fasync list. Clear new so that
1527 	 * we don't release it here.
1528 	 */
1529 	if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1530 		new = NULL;
1531 
1532 	error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1533 	unlock_flocks();
1534 
1535 out_free_fasync:
1536 	if (new)
1537 		fasync_free(new);
1538 	return error;
1539 }
1540 
1541 /**
1542  *	fcntl_setlease	-	sets a lease on an open file
1543  *	@fd: open file descriptor
1544  *	@filp: file pointer
1545  *	@arg: type of lease to obtain
1546  *
1547  *	Call this fcntl to establish a lease on the file.
1548  *	Note that you also need to call %F_SETSIG to
1549  *	receive a signal when the lease is broken.
1550  */
1551 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1552 {
1553 	if (arg == F_UNLCK)
1554 		return do_fcntl_delete_lease(filp);
1555 	return do_fcntl_add_lease(fd, filp, arg);
1556 }
1557 
1558 /**
1559  * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1560  * @filp: The file to apply the lock to
1561  * @fl: The lock to be applied
1562  *
1563  * Add a FLOCK style lock to a file.
1564  */
1565 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1566 {
1567 	int error;
1568 	might_sleep();
1569 	for (;;) {
1570 		error = flock_lock_file(filp, fl);
1571 		if (error != FILE_LOCK_DEFERRED)
1572 			break;
1573 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1574 		if (!error)
1575 			continue;
1576 
1577 		locks_delete_block(fl);
1578 		break;
1579 	}
1580 	return error;
1581 }
1582 
1583 EXPORT_SYMBOL(flock_lock_file_wait);
1584 
1585 /**
1586  *	sys_flock: - flock() system call.
1587  *	@fd: the file descriptor to lock.
1588  *	@cmd: the type of lock to apply.
1589  *
1590  *	Apply a %FL_FLOCK style lock to an open file descriptor.
1591  *	The @cmd can be one of
1592  *
1593  *	%LOCK_SH -- a shared lock.
1594  *
1595  *	%LOCK_EX -- an exclusive lock.
1596  *
1597  *	%LOCK_UN -- remove an existing lock.
1598  *
1599  *	%LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1600  *
1601  *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1602  *	processes read and write access respectively.
1603  */
1604 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1605 {
1606 	struct file *filp;
1607 	struct file_lock *lock;
1608 	int can_sleep, unlock;
1609 	int error;
1610 
1611 	error = -EBADF;
1612 	filp = fget(fd);
1613 	if (!filp)
1614 		goto out;
1615 
1616 	can_sleep = !(cmd & LOCK_NB);
1617 	cmd &= ~LOCK_NB;
1618 	unlock = (cmd == LOCK_UN);
1619 
1620 	if (!unlock && !(cmd & LOCK_MAND) &&
1621 	    !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1622 		goto out_putf;
1623 
1624 	error = flock_make_lock(filp, &lock, cmd);
1625 	if (error)
1626 		goto out_putf;
1627 	if (can_sleep)
1628 		lock->fl_flags |= FL_SLEEP;
1629 
1630 	error = security_file_lock(filp, lock->fl_type);
1631 	if (error)
1632 		goto out_free;
1633 
1634 	if (filp->f_op && filp->f_op->flock)
1635 		error = filp->f_op->flock(filp,
1636 					  (can_sleep) ? F_SETLKW : F_SETLK,
1637 					  lock);
1638 	else
1639 		error = flock_lock_file_wait(filp, lock);
1640 
1641  out_free:
1642 	locks_free_lock(lock);
1643 
1644  out_putf:
1645 	fput(filp);
1646  out:
1647 	return error;
1648 }
1649 
1650 /**
1651  * vfs_test_lock - test file byte range lock
1652  * @filp: The file to test lock for
1653  * @fl: The lock to test; also used to hold result
1654  *
1655  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
1656  * setting conf->fl_type to something other than F_UNLCK.
1657  */
1658 int vfs_test_lock(struct file *filp, struct file_lock *fl)
1659 {
1660 	if (filp->f_op && filp->f_op->lock)
1661 		return filp->f_op->lock(filp, F_GETLK, fl);
1662 	posix_test_lock(filp, fl);
1663 	return 0;
1664 }
1665 EXPORT_SYMBOL_GPL(vfs_test_lock);
1666 
1667 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1668 {
1669 	flock->l_pid = fl->fl_pid;
1670 #if BITS_PER_LONG == 32
1671 	/*
1672 	 * Make sure we can represent the posix lock via
1673 	 * legacy 32bit flock.
1674 	 */
1675 	if (fl->fl_start > OFFT_OFFSET_MAX)
1676 		return -EOVERFLOW;
1677 	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1678 		return -EOVERFLOW;
1679 #endif
1680 	flock->l_start = fl->fl_start;
1681 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1682 		fl->fl_end - fl->fl_start + 1;
1683 	flock->l_whence = 0;
1684 	flock->l_type = fl->fl_type;
1685 	return 0;
1686 }
1687 
1688 #if BITS_PER_LONG == 32
1689 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1690 {
1691 	flock->l_pid = fl->fl_pid;
1692 	flock->l_start = fl->fl_start;
1693 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1694 		fl->fl_end - fl->fl_start + 1;
1695 	flock->l_whence = 0;
1696 	flock->l_type = fl->fl_type;
1697 }
1698 #endif
1699 
1700 /* Report the first existing lock that would conflict with l.
1701  * This implements the F_GETLK command of fcntl().
1702  */
1703 int fcntl_getlk(struct file *filp, struct flock __user *l)
1704 {
1705 	struct file_lock file_lock;
1706 	struct flock flock;
1707 	int error;
1708 
1709 	error = -EFAULT;
1710 	if (copy_from_user(&flock, l, sizeof(flock)))
1711 		goto out;
1712 	error = -EINVAL;
1713 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1714 		goto out;
1715 
1716 	error = flock_to_posix_lock(filp, &file_lock, &flock);
1717 	if (error)
1718 		goto out;
1719 
1720 	error = vfs_test_lock(filp, &file_lock);
1721 	if (error)
1722 		goto out;
1723 
1724 	flock.l_type = file_lock.fl_type;
1725 	if (file_lock.fl_type != F_UNLCK) {
1726 		error = posix_lock_to_flock(&flock, &file_lock);
1727 		if (error)
1728 			goto out;
1729 	}
1730 	error = -EFAULT;
1731 	if (!copy_to_user(l, &flock, sizeof(flock)))
1732 		error = 0;
1733 out:
1734 	return error;
1735 }
1736 
1737 /**
1738  * vfs_lock_file - file byte range lock
1739  * @filp: The file to apply the lock to
1740  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1741  * @fl: The lock to be applied
1742  * @conf: Place to return a copy of the conflicting lock, if found.
1743  *
1744  * A caller that doesn't care about the conflicting lock may pass NULL
1745  * as the final argument.
1746  *
1747  * If the filesystem defines a private ->lock() method, then @conf will
1748  * be left unchanged; so a caller that cares should initialize it to
1749  * some acceptable default.
1750  *
1751  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1752  * locks, the ->lock() interface may return asynchronously, before the lock has
1753  * been granted or denied by the underlying filesystem, if (and only if)
1754  * fl_grant is set. Callers expecting ->lock() to return asynchronously
1755  * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1756  * the request is for a blocking lock. When ->lock() does return asynchronously,
1757  * it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock
1758  * request completes.
1759  * If the request is for non-blocking lock the file system should return
1760  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1761  * with the result. If the request timed out the callback routine will return a
1762  * nonzero return code and the file system should release the lock. The file
1763  * system is also responsible to keep a corresponding posix lock when it
1764  * grants a lock so the VFS can find out which locks are locally held and do
1765  * the correct lock cleanup when required.
1766  * The underlying filesystem must not drop the kernel lock or call
1767  * ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1768  * return code.
1769  */
1770 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1771 {
1772 	if (filp->f_op && filp->f_op->lock)
1773 		return filp->f_op->lock(filp, cmd, fl);
1774 	else
1775 		return posix_lock_file(filp, fl, conf);
1776 }
1777 EXPORT_SYMBOL_GPL(vfs_lock_file);
1778 
1779 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1780 			     struct file_lock *fl)
1781 {
1782 	int error;
1783 
1784 	error = security_file_lock(filp, fl->fl_type);
1785 	if (error)
1786 		return error;
1787 
1788 	for (;;) {
1789 		error = vfs_lock_file(filp, cmd, fl, NULL);
1790 		if (error != FILE_LOCK_DEFERRED)
1791 			break;
1792 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1793 		if (!error)
1794 			continue;
1795 
1796 		locks_delete_block(fl);
1797 		break;
1798 	}
1799 
1800 	return error;
1801 }
1802 
1803 /* Apply the lock described by l to an open file descriptor.
1804  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1805  */
1806 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1807 		struct flock __user *l)
1808 {
1809 	struct file_lock *file_lock = locks_alloc_lock();
1810 	struct flock flock;
1811 	struct inode *inode;
1812 	struct file *f;
1813 	int error;
1814 
1815 	if (file_lock == NULL)
1816 		return -ENOLCK;
1817 
1818 	/*
1819 	 * This might block, so we do it before checking the inode.
1820 	 */
1821 	error = -EFAULT;
1822 	if (copy_from_user(&flock, l, sizeof(flock)))
1823 		goto out;
1824 
1825 	inode = filp->f_path.dentry->d_inode;
1826 
1827 	/* Don't allow mandatory locks on files that may be memory mapped
1828 	 * and shared.
1829 	 */
1830 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1831 		error = -EAGAIN;
1832 		goto out;
1833 	}
1834 
1835 again:
1836 	error = flock_to_posix_lock(filp, file_lock, &flock);
1837 	if (error)
1838 		goto out;
1839 	if (cmd == F_SETLKW) {
1840 		file_lock->fl_flags |= FL_SLEEP;
1841 	}
1842 
1843 	error = -EBADF;
1844 	switch (flock.l_type) {
1845 	case F_RDLCK:
1846 		if (!(filp->f_mode & FMODE_READ))
1847 			goto out;
1848 		break;
1849 	case F_WRLCK:
1850 		if (!(filp->f_mode & FMODE_WRITE))
1851 			goto out;
1852 		break;
1853 	case F_UNLCK:
1854 		break;
1855 	default:
1856 		error = -EINVAL;
1857 		goto out;
1858 	}
1859 
1860 	error = do_lock_file_wait(filp, cmd, file_lock);
1861 
1862 	/*
1863 	 * Attempt to detect a close/fcntl race and recover by
1864 	 * releasing the lock that was just acquired.
1865 	 */
1866 	/*
1867 	 * we need that spin_lock here - it prevents reordering between
1868 	 * update of inode->i_flock and check for it done in close().
1869 	 * rcu_read_lock() wouldn't do.
1870 	 */
1871 	spin_lock(&current->files->file_lock);
1872 	f = fcheck(fd);
1873 	spin_unlock(&current->files->file_lock);
1874 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1875 		flock.l_type = F_UNLCK;
1876 		goto again;
1877 	}
1878 
1879 out:
1880 	locks_free_lock(file_lock);
1881 	return error;
1882 }
1883 
1884 #if BITS_PER_LONG == 32
1885 /* Report the first existing lock that would conflict with l.
1886  * This implements the F_GETLK command of fcntl().
1887  */
1888 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1889 {
1890 	struct file_lock file_lock;
1891 	struct flock64 flock;
1892 	int error;
1893 
1894 	error = -EFAULT;
1895 	if (copy_from_user(&flock, l, sizeof(flock)))
1896 		goto out;
1897 	error = -EINVAL;
1898 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1899 		goto out;
1900 
1901 	error = flock64_to_posix_lock(filp, &file_lock, &flock);
1902 	if (error)
1903 		goto out;
1904 
1905 	error = vfs_test_lock(filp, &file_lock);
1906 	if (error)
1907 		goto out;
1908 
1909 	flock.l_type = file_lock.fl_type;
1910 	if (file_lock.fl_type != F_UNLCK)
1911 		posix_lock_to_flock64(&flock, &file_lock);
1912 
1913 	error = -EFAULT;
1914 	if (!copy_to_user(l, &flock, sizeof(flock)))
1915 		error = 0;
1916 
1917 out:
1918 	return error;
1919 }
1920 
1921 /* Apply the lock described by l to an open file descriptor.
1922  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1923  */
1924 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1925 		struct flock64 __user *l)
1926 {
1927 	struct file_lock *file_lock = locks_alloc_lock();
1928 	struct flock64 flock;
1929 	struct inode *inode;
1930 	struct file *f;
1931 	int error;
1932 
1933 	if (file_lock == NULL)
1934 		return -ENOLCK;
1935 
1936 	/*
1937 	 * This might block, so we do it before checking the inode.
1938 	 */
1939 	error = -EFAULT;
1940 	if (copy_from_user(&flock, l, sizeof(flock)))
1941 		goto out;
1942 
1943 	inode = filp->f_path.dentry->d_inode;
1944 
1945 	/* Don't allow mandatory locks on files that may be memory mapped
1946 	 * and shared.
1947 	 */
1948 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1949 		error = -EAGAIN;
1950 		goto out;
1951 	}
1952 
1953 again:
1954 	error = flock64_to_posix_lock(filp, file_lock, &flock);
1955 	if (error)
1956 		goto out;
1957 	if (cmd == F_SETLKW64) {
1958 		file_lock->fl_flags |= FL_SLEEP;
1959 	}
1960 
1961 	error = -EBADF;
1962 	switch (flock.l_type) {
1963 	case F_RDLCK:
1964 		if (!(filp->f_mode & FMODE_READ))
1965 			goto out;
1966 		break;
1967 	case F_WRLCK:
1968 		if (!(filp->f_mode & FMODE_WRITE))
1969 			goto out;
1970 		break;
1971 	case F_UNLCK:
1972 		break;
1973 	default:
1974 		error = -EINVAL;
1975 		goto out;
1976 	}
1977 
1978 	error = do_lock_file_wait(filp, cmd, file_lock);
1979 
1980 	/*
1981 	 * Attempt to detect a close/fcntl race and recover by
1982 	 * releasing the lock that was just acquired.
1983 	 */
1984 	spin_lock(&current->files->file_lock);
1985 	f = fcheck(fd);
1986 	spin_unlock(&current->files->file_lock);
1987 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1988 		flock.l_type = F_UNLCK;
1989 		goto again;
1990 	}
1991 
1992 out:
1993 	locks_free_lock(file_lock);
1994 	return error;
1995 }
1996 #endif /* BITS_PER_LONG == 32 */
1997 
1998 /*
1999  * This function is called when the file is being removed
2000  * from the task's fd array.  POSIX locks belonging to this task
2001  * are deleted at this time.
2002  */
2003 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2004 {
2005 	struct file_lock lock;
2006 
2007 	/*
2008 	 * If there are no locks held on this file, we don't need to call
2009 	 * posix_lock_file().  Another process could be setting a lock on this
2010 	 * file at the same time, but we wouldn't remove that lock anyway.
2011 	 */
2012 	if (!filp->f_path.dentry->d_inode->i_flock)
2013 		return;
2014 
2015 	lock.fl_type = F_UNLCK;
2016 	lock.fl_flags = FL_POSIX | FL_CLOSE;
2017 	lock.fl_start = 0;
2018 	lock.fl_end = OFFSET_MAX;
2019 	lock.fl_owner = owner;
2020 	lock.fl_pid = current->tgid;
2021 	lock.fl_file = filp;
2022 	lock.fl_ops = NULL;
2023 	lock.fl_lmops = NULL;
2024 
2025 	vfs_lock_file(filp, F_SETLK, &lock, NULL);
2026 
2027 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2028 		lock.fl_ops->fl_release_private(&lock);
2029 }
2030 
2031 EXPORT_SYMBOL(locks_remove_posix);
2032 
2033 /*
2034  * This function is called on the last close of an open file.
2035  */
2036 void locks_remove_flock(struct file *filp)
2037 {
2038 	struct inode * inode = filp->f_path.dentry->d_inode;
2039 	struct file_lock *fl;
2040 	struct file_lock **before;
2041 
2042 	if (!inode->i_flock)
2043 		return;
2044 
2045 	if (filp->f_op && filp->f_op->flock) {
2046 		struct file_lock fl = {
2047 			.fl_pid = current->tgid,
2048 			.fl_file = filp,
2049 			.fl_flags = FL_FLOCK,
2050 			.fl_type = F_UNLCK,
2051 			.fl_end = OFFSET_MAX,
2052 		};
2053 		filp->f_op->flock(filp, F_SETLKW, &fl);
2054 		if (fl.fl_ops && fl.fl_ops->fl_release_private)
2055 			fl.fl_ops->fl_release_private(&fl);
2056 	}
2057 
2058 	lock_flocks();
2059 	before = &inode->i_flock;
2060 
2061 	while ((fl = *before) != NULL) {
2062 		if (fl->fl_file == filp) {
2063 			if (IS_FLOCK(fl)) {
2064 				locks_delete_lock(before);
2065 				continue;
2066 			}
2067 			if (IS_LEASE(fl)) {
2068 				lease_modify(before, F_UNLCK);
2069 				continue;
2070 			}
2071 			/* What? */
2072 			BUG();
2073  		}
2074 		before = &fl->fl_next;
2075 	}
2076 	unlock_flocks();
2077 }
2078 
2079 /**
2080  *	posix_unblock_lock - stop waiting for a file lock
2081  *      @filp:   how the file was opened
2082  *	@waiter: the lock which was waiting
2083  *
2084  *	lockd needs to block waiting for locks.
2085  */
2086 int
2087 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2088 {
2089 	int status = 0;
2090 
2091 	lock_flocks();
2092 	if (waiter->fl_next)
2093 		__locks_delete_block(waiter);
2094 	else
2095 		status = -ENOENT;
2096 	unlock_flocks();
2097 	return status;
2098 }
2099 
2100 EXPORT_SYMBOL(posix_unblock_lock);
2101 
2102 /**
2103  * vfs_cancel_lock - file byte range unblock lock
2104  * @filp: The file to apply the unblock to
2105  * @fl: The lock to be unblocked
2106  *
2107  * Used by lock managers to cancel blocked requests
2108  */
2109 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2110 {
2111 	if (filp->f_op && filp->f_op->lock)
2112 		return filp->f_op->lock(filp, F_CANCELLK, fl);
2113 	return 0;
2114 }
2115 
2116 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2117 
2118 #ifdef CONFIG_PROC_FS
2119 #include <linux/proc_fs.h>
2120 #include <linux/seq_file.h>
2121 
2122 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2123 			    loff_t id, char *pfx)
2124 {
2125 	struct inode *inode = NULL;
2126 	unsigned int fl_pid;
2127 
2128 	if (fl->fl_nspid)
2129 		fl_pid = pid_vnr(fl->fl_nspid);
2130 	else
2131 		fl_pid = fl->fl_pid;
2132 
2133 	if (fl->fl_file != NULL)
2134 		inode = fl->fl_file->f_path.dentry->d_inode;
2135 
2136 	seq_printf(f, "%lld:%s ", id, pfx);
2137 	if (IS_POSIX(fl)) {
2138 		seq_printf(f, "%6s %s ",
2139 			     (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2140 			     (inode == NULL) ? "*NOINODE*" :
2141 			     mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2142 	} else if (IS_FLOCK(fl)) {
2143 		if (fl->fl_type & LOCK_MAND) {
2144 			seq_printf(f, "FLOCK  MSNFS     ");
2145 		} else {
2146 			seq_printf(f, "FLOCK  ADVISORY  ");
2147 		}
2148 	} else if (IS_LEASE(fl)) {
2149 		seq_printf(f, "LEASE  ");
2150 		if (fl->fl_type & F_INPROGRESS)
2151 			seq_printf(f, "BREAKING  ");
2152 		else if (fl->fl_file)
2153 			seq_printf(f, "ACTIVE    ");
2154 		else
2155 			seq_printf(f, "BREAKER   ");
2156 	} else {
2157 		seq_printf(f, "UNKNOWN UNKNOWN  ");
2158 	}
2159 	if (fl->fl_type & LOCK_MAND) {
2160 		seq_printf(f, "%s ",
2161 			       (fl->fl_type & LOCK_READ)
2162 			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2163 			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2164 	} else {
2165 		seq_printf(f, "%s ",
2166 			       (fl->fl_type & F_INPROGRESS)
2167 			       ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2168 			       : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2169 	}
2170 	if (inode) {
2171 #ifdef WE_CAN_BREAK_LSLK_NOW
2172 		seq_printf(f, "%d %s:%ld ", fl_pid,
2173 				inode->i_sb->s_id, inode->i_ino);
2174 #else
2175 		/* userspace relies on this representation of dev_t ;-( */
2176 		seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2177 				MAJOR(inode->i_sb->s_dev),
2178 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2179 #endif
2180 	} else {
2181 		seq_printf(f, "%d <none>:0 ", fl_pid);
2182 	}
2183 	if (IS_POSIX(fl)) {
2184 		if (fl->fl_end == OFFSET_MAX)
2185 			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2186 		else
2187 			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2188 	} else {
2189 		seq_printf(f, "0 EOF\n");
2190 	}
2191 }
2192 
2193 static int locks_show(struct seq_file *f, void *v)
2194 {
2195 	struct file_lock *fl, *bfl;
2196 
2197 	fl = list_entry(v, struct file_lock, fl_link);
2198 
2199 	lock_get_status(f, fl, *((loff_t *)f->private), "");
2200 
2201 	list_for_each_entry(bfl, &fl->fl_block, fl_block)
2202 		lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2203 
2204 	return 0;
2205 }
2206 
2207 static void *locks_start(struct seq_file *f, loff_t *pos)
2208 {
2209 	loff_t *p = f->private;
2210 
2211 	lock_flocks();
2212 	*p = (*pos + 1);
2213 	return seq_list_start(&file_lock_list, *pos);
2214 }
2215 
2216 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2217 {
2218 	loff_t *p = f->private;
2219 	++*p;
2220 	return seq_list_next(v, &file_lock_list, pos);
2221 }
2222 
2223 static void locks_stop(struct seq_file *f, void *v)
2224 {
2225 	unlock_flocks();
2226 }
2227 
2228 static const struct seq_operations locks_seq_operations = {
2229 	.start	= locks_start,
2230 	.next	= locks_next,
2231 	.stop	= locks_stop,
2232 	.show	= locks_show,
2233 };
2234 
2235 static int locks_open(struct inode *inode, struct file *filp)
2236 {
2237 	return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2238 }
2239 
2240 static const struct file_operations proc_locks_operations = {
2241 	.open		= locks_open,
2242 	.read		= seq_read,
2243 	.llseek		= seq_lseek,
2244 	.release	= seq_release_private,
2245 };
2246 
2247 static int __init proc_locks_init(void)
2248 {
2249 	proc_create("locks", 0, NULL, &proc_locks_operations);
2250 	return 0;
2251 }
2252 module_init(proc_locks_init);
2253 #endif
2254 
2255 /**
2256  *	lock_may_read - checks that the region is free of locks
2257  *	@inode: the inode that is being read
2258  *	@start: the first byte to read
2259  *	@len: the number of bytes to read
2260  *
2261  *	Emulates Windows locking requirements.  Whole-file
2262  *	mandatory locks (share modes) can prohibit a read and
2263  *	byte-range POSIX locks can prohibit a read if they overlap.
2264  *
2265  *	N.B. this function is only ever called
2266  *	from knfsd and ownership of locks is never checked.
2267  */
2268 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2269 {
2270 	struct file_lock *fl;
2271 	int result = 1;
2272 	lock_flocks();
2273 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2274 		if (IS_POSIX(fl)) {
2275 			if (fl->fl_type == F_RDLCK)
2276 				continue;
2277 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2278 				continue;
2279 		} else if (IS_FLOCK(fl)) {
2280 			if (!(fl->fl_type & LOCK_MAND))
2281 				continue;
2282 			if (fl->fl_type & LOCK_READ)
2283 				continue;
2284 		} else
2285 			continue;
2286 		result = 0;
2287 		break;
2288 	}
2289 	unlock_flocks();
2290 	return result;
2291 }
2292 
2293 EXPORT_SYMBOL(lock_may_read);
2294 
2295 /**
2296  *	lock_may_write - checks that the region is free of locks
2297  *	@inode: the inode that is being written
2298  *	@start: the first byte to write
2299  *	@len: the number of bytes to write
2300  *
2301  *	Emulates Windows locking requirements.  Whole-file
2302  *	mandatory locks (share modes) can prohibit a write and
2303  *	byte-range POSIX locks can prohibit a write if they overlap.
2304  *
2305  *	N.B. this function is only ever called
2306  *	from knfsd and ownership of locks is never checked.
2307  */
2308 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2309 {
2310 	struct file_lock *fl;
2311 	int result = 1;
2312 	lock_flocks();
2313 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2314 		if (IS_POSIX(fl)) {
2315 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2316 				continue;
2317 		} else if (IS_FLOCK(fl)) {
2318 			if (!(fl->fl_type & LOCK_MAND))
2319 				continue;
2320 			if (fl->fl_type & LOCK_WRITE)
2321 				continue;
2322 		} else
2323 			continue;
2324 		result = 0;
2325 		break;
2326 	}
2327 	unlock_flocks();
2328 	return result;
2329 }
2330 
2331 EXPORT_SYMBOL(lock_may_write);
2332 
2333 static int __init filelock_init(void)
2334 {
2335 	filelock_cache = kmem_cache_create("file_lock_cache",
2336 			sizeof(struct file_lock), 0, SLAB_PANIC,
2337 			init_once);
2338 	return 0;
2339 }
2340 
2341 core_initcall(filelock_init);
2342