xref: /openbmc/linux/fs/locks.c (revision 615c36f5)
1 /*
2  *  linux/fs/locks.c
3  *
4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5  *  Doug Evans (dje@spiff.uucp), August 07, 1992
6  *
7  *  Deadlock detection added.
8  *  FIXME: one thing isn't handled yet:
9  *	- mandatory locks (requires lots of changes elsewhere)
10  *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11  *
12  *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13  *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14  *
15  *  Converted file_lock_table to a linked list from an array, which eliminates
16  *  the limits on how many active file locks are open.
17  *  Chad Page (pageone@netcom.com), November 27, 1994
18  *
19  *  Removed dependency on file descriptors. dup()'ed file descriptors now
20  *  get the same locks as the original file descriptors, and a close() on
21  *  any file descriptor removes ALL the locks on the file for the current
22  *  process. Since locks still depend on the process id, locks are inherited
23  *  after an exec() but not after a fork(). This agrees with POSIX, and both
24  *  BSD and SVR4 practice.
25  *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
26  *
27  *  Scrapped free list which is redundant now that we allocate locks
28  *  dynamically with kmalloc()/kfree().
29  *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
30  *
31  *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32  *
33  *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
34  *  fcntl() system call. They have the semantics described above.
35  *
36  *  FL_FLOCK locks are created with calls to flock(), through the flock()
37  *  system call, which is new. Old C libraries implement flock() via fcntl()
38  *  and will continue to use the old, broken implementation.
39  *
40  *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41  *  with a file pointer (filp). As a result they can be shared by a parent
42  *  process and its children after a fork(). They are removed when the last
43  *  file descriptor referring to the file pointer is closed (unless explicitly
44  *  unlocked).
45  *
46  *  FL_FLOCK locks never deadlock, an existing lock is always removed before
47  *  upgrading from shared to exclusive (or vice versa). When this happens
48  *  any processes blocked by the current lock are woken up and allowed to
49  *  run before the new lock is applied.
50  *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
51  *
52  *  Removed some race conditions in flock_lock_file(), marked other possible
53  *  races. Just grep for FIXME to see them.
54  *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
55  *
56  *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57  *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58  *  once we've checked for blocking and deadlocking.
59  *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
60  *
61  *  Initial implementation of mandatory locks. SunOS turned out to be
62  *  a rotten model, so I implemented the "obvious" semantics.
63  *  See 'Documentation/filesystems/mandatory-locking.txt' for details.
64  *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
65  *
66  *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67  *  check if a file has mandatory locks, used by mmap(), open() and creat() to
68  *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69  *  Manual, Section 2.
70  *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
71  *
72  *  Tidied up block list handling. Added '/proc/locks' interface.
73  *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
74  *
75  *  Fixed deadlock condition for pathological code that mixes calls to
76  *  flock() and fcntl().
77  *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
78  *
79  *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80  *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81  *  guarantee sensible behaviour in the case where file system modules might
82  *  be compiled with different options than the kernel itself.
83  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
84  *
85  *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86  *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
88  *
89  *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90  *  locks. Changed process synchronisation to avoid dereferencing locks that
91  *  have already been freed.
92  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
93  *
94  *  Made the block list a circular list to minimise searching in the list.
95  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
96  *
97  *  Made mandatory locking a mount option. Default is not to allow mandatory
98  *  locking.
99  *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
100  *
101  *  Some adaptations for NFS support.
102  *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
103  *
104  *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105  *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
106  *
107  *  Use slab allocator instead of kmalloc/kfree.
108  *  Use generic list implementation from <linux/list.h>.
109  *  Sped up posix_locks_deadlock by only considering blocked locks.
110  *  Matthew Wilcox <willy@debian.org>, March, 2000.
111  *
112  *  Leases and LOCK_MAND
113  *  Matthew Wilcox <willy@debian.org>, June, 2000.
114  *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
115  */
116 
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/module.h>
123 #include <linux/security.h>
124 #include <linux/slab.h>
125 #include <linux/syscalls.h>
126 #include <linux/time.h>
127 #include <linux/rcupdate.h>
128 #include <linux/pid_namespace.h>
129 
130 #include <asm/uaccess.h>
131 
132 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
133 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
134 #define IS_LEASE(fl)	(fl->fl_flags & FL_LEASE)
135 
136 static bool lease_breaking(struct file_lock *fl)
137 {
138 	return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
139 }
140 
141 static int target_leasetype(struct file_lock *fl)
142 {
143 	if (fl->fl_flags & FL_UNLOCK_PENDING)
144 		return F_UNLCK;
145 	if (fl->fl_flags & FL_DOWNGRADE_PENDING)
146 		return F_RDLCK;
147 	return fl->fl_type;
148 }
149 
150 int leases_enable = 1;
151 int lease_break_time = 45;
152 
153 #define for_each_lock(inode, lockp) \
154 	for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
155 
156 static LIST_HEAD(file_lock_list);
157 static LIST_HEAD(blocked_list);
158 static DEFINE_SPINLOCK(file_lock_lock);
159 
160 /*
161  * Protects the two list heads above, plus the inode->i_flock list
162  */
163 void lock_flocks(void)
164 {
165 	spin_lock(&file_lock_lock);
166 }
167 EXPORT_SYMBOL_GPL(lock_flocks);
168 
169 void unlock_flocks(void)
170 {
171 	spin_unlock(&file_lock_lock);
172 }
173 EXPORT_SYMBOL_GPL(unlock_flocks);
174 
175 static struct kmem_cache *filelock_cache __read_mostly;
176 
177 static void locks_init_lock_heads(struct file_lock *fl)
178 {
179 	INIT_LIST_HEAD(&fl->fl_link);
180 	INIT_LIST_HEAD(&fl->fl_block);
181 	init_waitqueue_head(&fl->fl_wait);
182 }
183 
184 /* Allocate an empty lock structure. */
185 struct file_lock *locks_alloc_lock(void)
186 {
187 	struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
188 
189 	if (fl)
190 		locks_init_lock_heads(fl);
191 
192 	return fl;
193 }
194 EXPORT_SYMBOL_GPL(locks_alloc_lock);
195 
196 void locks_release_private(struct file_lock *fl)
197 {
198 	if (fl->fl_ops) {
199 		if (fl->fl_ops->fl_release_private)
200 			fl->fl_ops->fl_release_private(fl);
201 		fl->fl_ops = NULL;
202 	}
203 	if (fl->fl_lmops) {
204 		if (fl->fl_lmops->lm_release_private)
205 			fl->fl_lmops->lm_release_private(fl);
206 		fl->fl_lmops = NULL;
207 	}
208 
209 }
210 EXPORT_SYMBOL_GPL(locks_release_private);
211 
212 /* Free a lock which is not in use. */
213 void locks_free_lock(struct file_lock *fl)
214 {
215 	BUG_ON(waitqueue_active(&fl->fl_wait));
216 	BUG_ON(!list_empty(&fl->fl_block));
217 	BUG_ON(!list_empty(&fl->fl_link));
218 
219 	locks_release_private(fl);
220 	kmem_cache_free(filelock_cache, fl);
221 }
222 EXPORT_SYMBOL(locks_free_lock);
223 
224 void locks_init_lock(struct file_lock *fl)
225 {
226 	memset(fl, 0, sizeof(struct file_lock));
227 	locks_init_lock_heads(fl);
228 }
229 
230 EXPORT_SYMBOL(locks_init_lock);
231 
232 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
233 {
234 	if (fl->fl_ops) {
235 		if (fl->fl_ops->fl_copy_lock)
236 			fl->fl_ops->fl_copy_lock(new, fl);
237 		new->fl_ops = fl->fl_ops;
238 	}
239 	if (fl->fl_lmops)
240 		new->fl_lmops = fl->fl_lmops;
241 }
242 
243 /*
244  * Initialize a new lock from an existing file_lock structure.
245  */
246 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
247 {
248 	new->fl_owner = fl->fl_owner;
249 	new->fl_pid = fl->fl_pid;
250 	new->fl_file = NULL;
251 	new->fl_flags = fl->fl_flags;
252 	new->fl_type = fl->fl_type;
253 	new->fl_start = fl->fl_start;
254 	new->fl_end = fl->fl_end;
255 	new->fl_ops = NULL;
256 	new->fl_lmops = NULL;
257 }
258 EXPORT_SYMBOL(__locks_copy_lock);
259 
260 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
261 {
262 	locks_release_private(new);
263 
264 	__locks_copy_lock(new, fl);
265 	new->fl_file = fl->fl_file;
266 	new->fl_ops = fl->fl_ops;
267 	new->fl_lmops = fl->fl_lmops;
268 
269 	locks_copy_private(new, fl);
270 }
271 
272 EXPORT_SYMBOL(locks_copy_lock);
273 
274 static inline int flock_translate_cmd(int cmd) {
275 	if (cmd & LOCK_MAND)
276 		return cmd & (LOCK_MAND | LOCK_RW);
277 	switch (cmd) {
278 	case LOCK_SH:
279 		return F_RDLCK;
280 	case LOCK_EX:
281 		return F_WRLCK;
282 	case LOCK_UN:
283 		return F_UNLCK;
284 	}
285 	return -EINVAL;
286 }
287 
288 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
289 static int flock_make_lock(struct file *filp, struct file_lock **lock,
290 		unsigned int cmd)
291 {
292 	struct file_lock *fl;
293 	int type = flock_translate_cmd(cmd);
294 	if (type < 0)
295 		return type;
296 
297 	fl = locks_alloc_lock();
298 	if (fl == NULL)
299 		return -ENOMEM;
300 
301 	fl->fl_file = filp;
302 	fl->fl_pid = current->tgid;
303 	fl->fl_flags = FL_FLOCK;
304 	fl->fl_type = type;
305 	fl->fl_end = OFFSET_MAX;
306 
307 	*lock = fl;
308 	return 0;
309 }
310 
311 static int assign_type(struct file_lock *fl, int type)
312 {
313 	switch (type) {
314 	case F_RDLCK:
315 	case F_WRLCK:
316 	case F_UNLCK:
317 		fl->fl_type = type;
318 		break;
319 	default:
320 		return -EINVAL;
321 	}
322 	return 0;
323 }
324 
325 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
326  * style lock.
327  */
328 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
329 			       struct flock *l)
330 {
331 	off_t start, end;
332 
333 	switch (l->l_whence) {
334 	case SEEK_SET:
335 		start = 0;
336 		break;
337 	case SEEK_CUR:
338 		start = filp->f_pos;
339 		break;
340 	case SEEK_END:
341 		start = i_size_read(filp->f_path.dentry->d_inode);
342 		break;
343 	default:
344 		return -EINVAL;
345 	}
346 
347 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
348 	   POSIX-2001 defines it. */
349 	start += l->l_start;
350 	if (start < 0)
351 		return -EINVAL;
352 	fl->fl_end = OFFSET_MAX;
353 	if (l->l_len > 0) {
354 		end = start + l->l_len - 1;
355 		fl->fl_end = end;
356 	} else if (l->l_len < 0) {
357 		end = start - 1;
358 		fl->fl_end = end;
359 		start += l->l_len;
360 		if (start < 0)
361 			return -EINVAL;
362 	}
363 	fl->fl_start = start;	/* we record the absolute position */
364 	if (fl->fl_end < fl->fl_start)
365 		return -EOVERFLOW;
366 
367 	fl->fl_owner = current->files;
368 	fl->fl_pid = current->tgid;
369 	fl->fl_file = filp;
370 	fl->fl_flags = FL_POSIX;
371 	fl->fl_ops = NULL;
372 	fl->fl_lmops = NULL;
373 
374 	return assign_type(fl, l->l_type);
375 }
376 
377 #if BITS_PER_LONG == 32
378 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
379 				 struct flock64 *l)
380 {
381 	loff_t start;
382 
383 	switch (l->l_whence) {
384 	case SEEK_SET:
385 		start = 0;
386 		break;
387 	case SEEK_CUR:
388 		start = filp->f_pos;
389 		break;
390 	case SEEK_END:
391 		start = i_size_read(filp->f_path.dentry->d_inode);
392 		break;
393 	default:
394 		return -EINVAL;
395 	}
396 
397 	start += l->l_start;
398 	if (start < 0)
399 		return -EINVAL;
400 	fl->fl_end = OFFSET_MAX;
401 	if (l->l_len > 0) {
402 		fl->fl_end = start + l->l_len - 1;
403 	} else if (l->l_len < 0) {
404 		fl->fl_end = start - 1;
405 		start += l->l_len;
406 		if (start < 0)
407 			return -EINVAL;
408 	}
409 	fl->fl_start = start;	/* we record the absolute position */
410 	if (fl->fl_end < fl->fl_start)
411 		return -EOVERFLOW;
412 
413 	fl->fl_owner = current->files;
414 	fl->fl_pid = current->tgid;
415 	fl->fl_file = filp;
416 	fl->fl_flags = FL_POSIX;
417 	fl->fl_ops = NULL;
418 	fl->fl_lmops = NULL;
419 
420 	return assign_type(fl, l->l_type);
421 }
422 #endif
423 
424 /* default lease lock manager operations */
425 static void lease_break_callback(struct file_lock *fl)
426 {
427 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
428 }
429 
430 static void lease_release_private_callback(struct file_lock *fl)
431 {
432 	if (!fl->fl_file)
433 		return;
434 
435 	f_delown(fl->fl_file);
436 	fl->fl_file->f_owner.signum = 0;
437 }
438 
439 static const struct lock_manager_operations lease_manager_ops = {
440 	.lm_break = lease_break_callback,
441 	.lm_release_private = lease_release_private_callback,
442 	.lm_change = lease_modify,
443 };
444 
445 /*
446  * Initialize a lease, use the default lock manager operations
447  */
448 static int lease_init(struct file *filp, int type, struct file_lock *fl)
449  {
450 	if (assign_type(fl, type) != 0)
451 		return -EINVAL;
452 
453 	fl->fl_owner = current->files;
454 	fl->fl_pid = current->tgid;
455 
456 	fl->fl_file = filp;
457 	fl->fl_flags = FL_LEASE;
458 	fl->fl_start = 0;
459 	fl->fl_end = OFFSET_MAX;
460 	fl->fl_ops = NULL;
461 	fl->fl_lmops = &lease_manager_ops;
462 	return 0;
463 }
464 
465 /* Allocate a file_lock initialised to this type of lease */
466 static struct file_lock *lease_alloc(struct file *filp, int type)
467 {
468 	struct file_lock *fl = locks_alloc_lock();
469 	int error = -ENOMEM;
470 
471 	if (fl == NULL)
472 		return ERR_PTR(error);
473 
474 	error = lease_init(filp, type, fl);
475 	if (error) {
476 		locks_free_lock(fl);
477 		return ERR_PTR(error);
478 	}
479 	return fl;
480 }
481 
482 /* Check if two locks overlap each other.
483  */
484 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
485 {
486 	return ((fl1->fl_end >= fl2->fl_start) &&
487 		(fl2->fl_end >= fl1->fl_start));
488 }
489 
490 /*
491  * Check whether two locks have the same owner.
492  */
493 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
494 {
495 	if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
496 		return fl2->fl_lmops == fl1->fl_lmops &&
497 			fl1->fl_lmops->lm_compare_owner(fl1, fl2);
498 	return fl1->fl_owner == fl2->fl_owner;
499 }
500 
501 /* Remove waiter from blocker's block list.
502  * When blocker ends up pointing to itself then the list is empty.
503  */
504 static void __locks_delete_block(struct file_lock *waiter)
505 {
506 	list_del_init(&waiter->fl_block);
507 	list_del_init(&waiter->fl_link);
508 	waiter->fl_next = NULL;
509 }
510 
511 /*
512  */
513 static void locks_delete_block(struct file_lock *waiter)
514 {
515 	lock_flocks();
516 	__locks_delete_block(waiter);
517 	unlock_flocks();
518 }
519 
520 /* Insert waiter into blocker's block list.
521  * We use a circular list so that processes can be easily woken up in
522  * the order they blocked. The documentation doesn't require this but
523  * it seems like the reasonable thing to do.
524  */
525 static void locks_insert_block(struct file_lock *blocker,
526 			       struct file_lock *waiter)
527 {
528 	BUG_ON(!list_empty(&waiter->fl_block));
529 	list_add_tail(&waiter->fl_block, &blocker->fl_block);
530 	waiter->fl_next = blocker;
531 	if (IS_POSIX(blocker))
532 		list_add(&waiter->fl_link, &blocked_list);
533 }
534 
535 /* Wake up processes blocked waiting for blocker.
536  * If told to wait then schedule the processes until the block list
537  * is empty, otherwise empty the block list ourselves.
538  */
539 static void locks_wake_up_blocks(struct file_lock *blocker)
540 {
541 	while (!list_empty(&blocker->fl_block)) {
542 		struct file_lock *waiter;
543 
544 		waiter = list_first_entry(&blocker->fl_block,
545 				struct file_lock, fl_block);
546 		__locks_delete_block(waiter);
547 		if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
548 			waiter->fl_lmops->lm_notify(waiter);
549 		else
550 			wake_up(&waiter->fl_wait);
551 	}
552 }
553 
554 /* Insert file lock fl into an inode's lock list at the position indicated
555  * by pos. At the same time add the lock to the global file lock list.
556  */
557 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
558 {
559 	list_add(&fl->fl_link, &file_lock_list);
560 
561 	fl->fl_nspid = get_pid(task_tgid(current));
562 
563 	/* insert into file's list */
564 	fl->fl_next = *pos;
565 	*pos = fl;
566 }
567 
568 /*
569  * Delete a lock and then free it.
570  * Wake up processes that are blocked waiting for this lock,
571  * notify the FS that the lock has been cleared and
572  * finally free the lock.
573  */
574 static void locks_delete_lock(struct file_lock **thisfl_p)
575 {
576 	struct file_lock *fl = *thisfl_p;
577 
578 	*thisfl_p = fl->fl_next;
579 	fl->fl_next = NULL;
580 	list_del_init(&fl->fl_link);
581 
582 	fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
583 	if (fl->fl_fasync != NULL) {
584 		printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
585 		fl->fl_fasync = NULL;
586 	}
587 
588 	if (fl->fl_nspid) {
589 		put_pid(fl->fl_nspid);
590 		fl->fl_nspid = NULL;
591 	}
592 
593 	locks_wake_up_blocks(fl);
594 	locks_free_lock(fl);
595 }
596 
597 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
598  * checks for shared/exclusive status of overlapping locks.
599  */
600 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
601 {
602 	if (sys_fl->fl_type == F_WRLCK)
603 		return 1;
604 	if (caller_fl->fl_type == F_WRLCK)
605 		return 1;
606 	return 0;
607 }
608 
609 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
610  * checking before calling the locks_conflict().
611  */
612 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
613 {
614 	/* POSIX locks owned by the same process do not conflict with
615 	 * each other.
616 	 */
617 	if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
618 		return (0);
619 
620 	/* Check whether they overlap */
621 	if (!locks_overlap(caller_fl, sys_fl))
622 		return 0;
623 
624 	return (locks_conflict(caller_fl, sys_fl));
625 }
626 
627 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
628  * checking before calling the locks_conflict().
629  */
630 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
631 {
632 	/* FLOCK locks referring to the same filp do not conflict with
633 	 * each other.
634 	 */
635 	if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
636 		return (0);
637 	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
638 		return 0;
639 
640 	return (locks_conflict(caller_fl, sys_fl));
641 }
642 
643 void
644 posix_test_lock(struct file *filp, struct file_lock *fl)
645 {
646 	struct file_lock *cfl;
647 
648 	lock_flocks();
649 	for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
650 		if (!IS_POSIX(cfl))
651 			continue;
652 		if (posix_locks_conflict(fl, cfl))
653 			break;
654 	}
655 	if (cfl) {
656 		__locks_copy_lock(fl, cfl);
657 		if (cfl->fl_nspid)
658 			fl->fl_pid = pid_vnr(cfl->fl_nspid);
659 	} else
660 		fl->fl_type = F_UNLCK;
661 	unlock_flocks();
662 	return;
663 }
664 EXPORT_SYMBOL(posix_test_lock);
665 
666 /*
667  * Deadlock detection:
668  *
669  * We attempt to detect deadlocks that are due purely to posix file
670  * locks.
671  *
672  * We assume that a task can be waiting for at most one lock at a time.
673  * So for any acquired lock, the process holding that lock may be
674  * waiting on at most one other lock.  That lock in turns may be held by
675  * someone waiting for at most one other lock.  Given a requested lock
676  * caller_fl which is about to wait for a conflicting lock block_fl, we
677  * follow this chain of waiters to ensure we are not about to create a
678  * cycle.
679  *
680  * Since we do this before we ever put a process to sleep on a lock, we
681  * are ensured that there is never a cycle; that is what guarantees that
682  * the while() loop in posix_locks_deadlock() eventually completes.
683  *
684  * Note: the above assumption may not be true when handling lock
685  * requests from a broken NFS client. It may also fail in the presence
686  * of tasks (such as posix threads) sharing the same open file table.
687  *
688  * To handle those cases, we just bail out after a few iterations.
689  */
690 
691 #define MAX_DEADLK_ITERATIONS 10
692 
693 /* Find a lock that the owner of the given block_fl is blocking on. */
694 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
695 {
696 	struct file_lock *fl;
697 
698 	list_for_each_entry(fl, &blocked_list, fl_link) {
699 		if (posix_same_owner(fl, block_fl))
700 			return fl->fl_next;
701 	}
702 	return NULL;
703 }
704 
705 static int posix_locks_deadlock(struct file_lock *caller_fl,
706 				struct file_lock *block_fl)
707 {
708 	int i = 0;
709 
710 	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
711 		if (i++ > MAX_DEADLK_ITERATIONS)
712 			return 0;
713 		if (posix_same_owner(caller_fl, block_fl))
714 			return 1;
715 	}
716 	return 0;
717 }
718 
719 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
720  * after any leases, but before any posix locks.
721  *
722  * Note that if called with an FL_EXISTS argument, the caller may determine
723  * whether or not a lock was successfully freed by testing the return
724  * value for -ENOENT.
725  */
726 static int flock_lock_file(struct file *filp, struct file_lock *request)
727 {
728 	struct file_lock *new_fl = NULL;
729 	struct file_lock **before;
730 	struct inode * inode = filp->f_path.dentry->d_inode;
731 	int error = 0;
732 	int found = 0;
733 
734 	if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
735 		new_fl = locks_alloc_lock();
736 		if (!new_fl)
737 			return -ENOMEM;
738 	}
739 
740 	lock_flocks();
741 	if (request->fl_flags & FL_ACCESS)
742 		goto find_conflict;
743 
744 	for_each_lock(inode, before) {
745 		struct file_lock *fl = *before;
746 		if (IS_POSIX(fl))
747 			break;
748 		if (IS_LEASE(fl))
749 			continue;
750 		if (filp != fl->fl_file)
751 			continue;
752 		if (request->fl_type == fl->fl_type)
753 			goto out;
754 		found = 1;
755 		locks_delete_lock(before);
756 		break;
757 	}
758 
759 	if (request->fl_type == F_UNLCK) {
760 		if ((request->fl_flags & FL_EXISTS) && !found)
761 			error = -ENOENT;
762 		goto out;
763 	}
764 
765 	/*
766 	 * If a higher-priority process was blocked on the old file lock,
767 	 * give it the opportunity to lock the file.
768 	 */
769 	if (found) {
770 		unlock_flocks();
771 		cond_resched();
772 		lock_flocks();
773 	}
774 
775 find_conflict:
776 	for_each_lock(inode, before) {
777 		struct file_lock *fl = *before;
778 		if (IS_POSIX(fl))
779 			break;
780 		if (IS_LEASE(fl))
781 			continue;
782 		if (!flock_locks_conflict(request, fl))
783 			continue;
784 		error = -EAGAIN;
785 		if (!(request->fl_flags & FL_SLEEP))
786 			goto out;
787 		error = FILE_LOCK_DEFERRED;
788 		locks_insert_block(fl, request);
789 		goto out;
790 	}
791 	if (request->fl_flags & FL_ACCESS)
792 		goto out;
793 	locks_copy_lock(new_fl, request);
794 	locks_insert_lock(before, new_fl);
795 	new_fl = NULL;
796 	error = 0;
797 
798 out:
799 	unlock_flocks();
800 	if (new_fl)
801 		locks_free_lock(new_fl);
802 	return error;
803 }
804 
805 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
806 {
807 	struct file_lock *fl;
808 	struct file_lock *new_fl = NULL;
809 	struct file_lock *new_fl2 = NULL;
810 	struct file_lock *left = NULL;
811 	struct file_lock *right = NULL;
812 	struct file_lock **before;
813 	int error, added = 0;
814 
815 	/*
816 	 * We may need two file_lock structures for this operation,
817 	 * so we get them in advance to avoid races.
818 	 *
819 	 * In some cases we can be sure, that no new locks will be needed
820 	 */
821 	if (!(request->fl_flags & FL_ACCESS) &&
822 	    (request->fl_type != F_UNLCK ||
823 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
824 		new_fl = locks_alloc_lock();
825 		new_fl2 = locks_alloc_lock();
826 	}
827 
828 	lock_flocks();
829 	if (request->fl_type != F_UNLCK) {
830 		for_each_lock(inode, before) {
831 			fl = *before;
832 			if (!IS_POSIX(fl))
833 				continue;
834 			if (!posix_locks_conflict(request, fl))
835 				continue;
836 			if (conflock)
837 				__locks_copy_lock(conflock, fl);
838 			error = -EAGAIN;
839 			if (!(request->fl_flags & FL_SLEEP))
840 				goto out;
841 			error = -EDEADLK;
842 			if (posix_locks_deadlock(request, fl))
843 				goto out;
844 			error = FILE_LOCK_DEFERRED;
845 			locks_insert_block(fl, request);
846 			goto out;
847   		}
848   	}
849 
850 	/* If we're just looking for a conflict, we're done. */
851 	error = 0;
852 	if (request->fl_flags & FL_ACCESS)
853 		goto out;
854 
855 	/*
856 	 * Find the first old lock with the same owner as the new lock.
857 	 */
858 
859 	before = &inode->i_flock;
860 
861 	/* First skip locks owned by other processes.  */
862 	while ((fl = *before) && (!IS_POSIX(fl) ||
863 				  !posix_same_owner(request, fl))) {
864 		before = &fl->fl_next;
865 	}
866 
867 	/* Process locks with this owner.  */
868 	while ((fl = *before) && posix_same_owner(request, fl)) {
869 		/* Detect adjacent or overlapping regions (if same lock type)
870 		 */
871 		if (request->fl_type == fl->fl_type) {
872 			/* In all comparisons of start vs end, use
873 			 * "start - 1" rather than "end + 1". If end
874 			 * is OFFSET_MAX, end + 1 will become negative.
875 			 */
876 			if (fl->fl_end < request->fl_start - 1)
877 				goto next_lock;
878 			/* If the next lock in the list has entirely bigger
879 			 * addresses than the new one, insert the lock here.
880 			 */
881 			if (fl->fl_start - 1 > request->fl_end)
882 				break;
883 
884 			/* If we come here, the new and old lock are of the
885 			 * same type and adjacent or overlapping. Make one
886 			 * lock yielding from the lower start address of both
887 			 * locks to the higher end address.
888 			 */
889 			if (fl->fl_start > request->fl_start)
890 				fl->fl_start = request->fl_start;
891 			else
892 				request->fl_start = fl->fl_start;
893 			if (fl->fl_end < request->fl_end)
894 				fl->fl_end = request->fl_end;
895 			else
896 				request->fl_end = fl->fl_end;
897 			if (added) {
898 				locks_delete_lock(before);
899 				continue;
900 			}
901 			request = fl;
902 			added = 1;
903 		}
904 		else {
905 			/* Processing for different lock types is a bit
906 			 * more complex.
907 			 */
908 			if (fl->fl_end < request->fl_start)
909 				goto next_lock;
910 			if (fl->fl_start > request->fl_end)
911 				break;
912 			if (request->fl_type == F_UNLCK)
913 				added = 1;
914 			if (fl->fl_start < request->fl_start)
915 				left = fl;
916 			/* If the next lock in the list has a higher end
917 			 * address than the new one, insert the new one here.
918 			 */
919 			if (fl->fl_end > request->fl_end) {
920 				right = fl;
921 				break;
922 			}
923 			if (fl->fl_start >= request->fl_start) {
924 				/* The new lock completely replaces an old
925 				 * one (This may happen several times).
926 				 */
927 				if (added) {
928 					locks_delete_lock(before);
929 					continue;
930 				}
931 				/* Replace the old lock with the new one.
932 				 * Wake up anybody waiting for the old one,
933 				 * as the change in lock type might satisfy
934 				 * their needs.
935 				 */
936 				locks_wake_up_blocks(fl);
937 				fl->fl_start = request->fl_start;
938 				fl->fl_end = request->fl_end;
939 				fl->fl_type = request->fl_type;
940 				locks_release_private(fl);
941 				locks_copy_private(fl, request);
942 				request = fl;
943 				added = 1;
944 			}
945 		}
946 		/* Go on to next lock.
947 		 */
948 	next_lock:
949 		before = &fl->fl_next;
950 	}
951 
952 	/*
953 	 * The above code only modifies existing locks in case of
954 	 * merging or replacing.  If new lock(s) need to be inserted
955 	 * all modifications are done bellow this, so it's safe yet to
956 	 * bail out.
957 	 */
958 	error = -ENOLCK; /* "no luck" */
959 	if (right && left == right && !new_fl2)
960 		goto out;
961 
962 	error = 0;
963 	if (!added) {
964 		if (request->fl_type == F_UNLCK) {
965 			if (request->fl_flags & FL_EXISTS)
966 				error = -ENOENT;
967 			goto out;
968 		}
969 
970 		if (!new_fl) {
971 			error = -ENOLCK;
972 			goto out;
973 		}
974 		locks_copy_lock(new_fl, request);
975 		locks_insert_lock(before, new_fl);
976 		new_fl = NULL;
977 	}
978 	if (right) {
979 		if (left == right) {
980 			/* The new lock breaks the old one in two pieces,
981 			 * so we have to use the second new lock.
982 			 */
983 			left = new_fl2;
984 			new_fl2 = NULL;
985 			locks_copy_lock(left, right);
986 			locks_insert_lock(before, left);
987 		}
988 		right->fl_start = request->fl_end + 1;
989 		locks_wake_up_blocks(right);
990 	}
991 	if (left) {
992 		left->fl_end = request->fl_start - 1;
993 		locks_wake_up_blocks(left);
994 	}
995  out:
996 	unlock_flocks();
997 	/*
998 	 * Free any unused locks.
999 	 */
1000 	if (new_fl)
1001 		locks_free_lock(new_fl);
1002 	if (new_fl2)
1003 		locks_free_lock(new_fl2);
1004 	return error;
1005 }
1006 
1007 /**
1008  * posix_lock_file - Apply a POSIX-style lock to a file
1009  * @filp: The file to apply the lock to
1010  * @fl: The lock to be applied
1011  * @conflock: Place to return a copy of the conflicting lock, if found.
1012  *
1013  * Add a POSIX style lock to a file.
1014  * We merge adjacent & overlapping locks whenever possible.
1015  * POSIX locks are sorted by owner task, then by starting address
1016  *
1017  * Note that if called with an FL_EXISTS argument, the caller may determine
1018  * whether or not a lock was successfully freed by testing the return
1019  * value for -ENOENT.
1020  */
1021 int posix_lock_file(struct file *filp, struct file_lock *fl,
1022 			struct file_lock *conflock)
1023 {
1024 	return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1025 }
1026 EXPORT_SYMBOL(posix_lock_file);
1027 
1028 /**
1029  * posix_lock_file_wait - Apply a POSIX-style lock to a file
1030  * @filp: The file to apply the lock to
1031  * @fl: The lock to be applied
1032  *
1033  * Add a POSIX style lock to a file.
1034  * We merge adjacent & overlapping locks whenever possible.
1035  * POSIX locks are sorted by owner task, then by starting address
1036  */
1037 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1038 {
1039 	int error;
1040 	might_sleep ();
1041 	for (;;) {
1042 		error = posix_lock_file(filp, fl, NULL);
1043 		if (error != FILE_LOCK_DEFERRED)
1044 			break;
1045 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1046 		if (!error)
1047 			continue;
1048 
1049 		locks_delete_block(fl);
1050 		break;
1051 	}
1052 	return error;
1053 }
1054 EXPORT_SYMBOL(posix_lock_file_wait);
1055 
1056 /**
1057  * locks_mandatory_locked - Check for an active lock
1058  * @inode: the file to check
1059  *
1060  * Searches the inode's list of locks to find any POSIX locks which conflict.
1061  * This function is called from locks_verify_locked() only.
1062  */
1063 int locks_mandatory_locked(struct inode *inode)
1064 {
1065 	fl_owner_t owner = current->files;
1066 	struct file_lock *fl;
1067 
1068 	/*
1069 	 * Search the lock list for this inode for any POSIX locks.
1070 	 */
1071 	lock_flocks();
1072 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1073 		if (!IS_POSIX(fl))
1074 			continue;
1075 		if (fl->fl_owner != owner)
1076 			break;
1077 	}
1078 	unlock_flocks();
1079 	return fl ? -EAGAIN : 0;
1080 }
1081 
1082 /**
1083  * locks_mandatory_area - Check for a conflicting lock
1084  * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1085  *		for shared
1086  * @inode:      the file to check
1087  * @filp:       how the file was opened (if it was)
1088  * @offset:     start of area to check
1089  * @count:      length of area to check
1090  *
1091  * Searches the inode's list of locks to find any POSIX locks which conflict.
1092  * This function is called from rw_verify_area() and
1093  * locks_verify_truncate().
1094  */
1095 int locks_mandatory_area(int read_write, struct inode *inode,
1096 			 struct file *filp, loff_t offset,
1097 			 size_t count)
1098 {
1099 	struct file_lock fl;
1100 	int error;
1101 
1102 	locks_init_lock(&fl);
1103 	fl.fl_owner = current->files;
1104 	fl.fl_pid = current->tgid;
1105 	fl.fl_file = filp;
1106 	fl.fl_flags = FL_POSIX | FL_ACCESS;
1107 	if (filp && !(filp->f_flags & O_NONBLOCK))
1108 		fl.fl_flags |= FL_SLEEP;
1109 	fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1110 	fl.fl_start = offset;
1111 	fl.fl_end = offset + count - 1;
1112 
1113 	for (;;) {
1114 		error = __posix_lock_file(inode, &fl, NULL);
1115 		if (error != FILE_LOCK_DEFERRED)
1116 			break;
1117 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1118 		if (!error) {
1119 			/*
1120 			 * If we've been sleeping someone might have
1121 			 * changed the permissions behind our back.
1122 			 */
1123 			if (__mandatory_lock(inode))
1124 				continue;
1125 		}
1126 
1127 		locks_delete_block(&fl);
1128 		break;
1129 	}
1130 
1131 	return error;
1132 }
1133 
1134 EXPORT_SYMBOL(locks_mandatory_area);
1135 
1136 static void lease_clear_pending(struct file_lock *fl, int arg)
1137 {
1138 	switch (arg) {
1139 	case F_UNLCK:
1140 		fl->fl_flags &= ~FL_UNLOCK_PENDING;
1141 		/* fall through: */
1142 	case F_RDLCK:
1143 		fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1144 	}
1145 }
1146 
1147 /* We already had a lease on this file; just change its type */
1148 int lease_modify(struct file_lock **before, int arg)
1149 {
1150 	struct file_lock *fl = *before;
1151 	int error = assign_type(fl, arg);
1152 
1153 	if (error)
1154 		return error;
1155 	lease_clear_pending(fl, arg);
1156 	locks_wake_up_blocks(fl);
1157 	if (arg == F_UNLCK)
1158 		locks_delete_lock(before);
1159 	return 0;
1160 }
1161 
1162 EXPORT_SYMBOL(lease_modify);
1163 
1164 static bool past_time(unsigned long then)
1165 {
1166 	if (!then)
1167 		/* 0 is a special value meaning "this never expires": */
1168 		return false;
1169 	return time_after(jiffies, then);
1170 }
1171 
1172 static void time_out_leases(struct inode *inode)
1173 {
1174 	struct file_lock **before;
1175 	struct file_lock *fl;
1176 
1177 	before = &inode->i_flock;
1178 	while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
1179 		if (past_time(fl->fl_downgrade_time))
1180 			lease_modify(before, F_RDLCK);
1181 		if (past_time(fl->fl_break_time))
1182 			lease_modify(before, F_UNLCK);
1183 		if (fl == *before)	/* lease_modify may have freed fl */
1184 			before = &fl->fl_next;
1185 	}
1186 }
1187 
1188 /**
1189  *	__break_lease	-	revoke all outstanding leases on file
1190  *	@inode: the inode of the file to return
1191  *	@mode: the open mode (read or write)
1192  *
1193  *	break_lease (inlined for speed) has checked there already is at least
1194  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1195  *	a call to open() or truncate().  This function can sleep unless you
1196  *	specified %O_NONBLOCK to your open().
1197  */
1198 int __break_lease(struct inode *inode, unsigned int mode)
1199 {
1200 	int error = 0;
1201 	struct file_lock *new_fl, *flock;
1202 	struct file_lock *fl;
1203 	unsigned long break_time;
1204 	int i_have_this_lease = 0;
1205 	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1206 
1207 	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1208 
1209 	lock_flocks();
1210 
1211 	time_out_leases(inode);
1212 
1213 	flock = inode->i_flock;
1214 	if ((flock == NULL) || !IS_LEASE(flock))
1215 		goto out;
1216 
1217 	if (!locks_conflict(flock, new_fl))
1218 		goto out;
1219 
1220 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1221 		if (fl->fl_owner == current->files)
1222 			i_have_this_lease = 1;
1223 
1224 	if (IS_ERR(new_fl) && !i_have_this_lease
1225 			&& ((mode & O_NONBLOCK) == 0)) {
1226 		error = PTR_ERR(new_fl);
1227 		goto out;
1228 	}
1229 
1230 	break_time = 0;
1231 	if (lease_break_time > 0) {
1232 		break_time = jiffies + lease_break_time * HZ;
1233 		if (break_time == 0)
1234 			break_time++;	/* so that 0 means no break time */
1235 	}
1236 
1237 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1238 		if (want_write) {
1239 			if (fl->fl_flags & FL_UNLOCK_PENDING)
1240 				continue;
1241 			fl->fl_flags |= FL_UNLOCK_PENDING;
1242 			fl->fl_break_time = break_time;
1243 		} else {
1244 			if (lease_breaking(flock))
1245 				continue;
1246 			fl->fl_flags |= FL_DOWNGRADE_PENDING;
1247 			fl->fl_downgrade_time = break_time;
1248 		}
1249 		fl->fl_lmops->lm_break(fl);
1250 	}
1251 
1252 	if (i_have_this_lease || (mode & O_NONBLOCK)) {
1253 		error = -EWOULDBLOCK;
1254 		goto out;
1255 	}
1256 
1257 restart:
1258 	break_time = flock->fl_break_time;
1259 	if (break_time != 0) {
1260 		break_time -= jiffies;
1261 		if (break_time == 0)
1262 			break_time++;
1263 	}
1264 	locks_insert_block(flock, new_fl);
1265 	unlock_flocks();
1266 	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1267 						!new_fl->fl_next, break_time);
1268 	lock_flocks();
1269 	__locks_delete_block(new_fl);
1270 	if (error >= 0) {
1271 		if (error == 0)
1272 			time_out_leases(inode);
1273 		/*
1274 		 * Wait for the next conflicting lease that has not been
1275 		 * broken yet
1276 		 */
1277 		for (flock = inode->i_flock; flock && IS_LEASE(flock);
1278 				flock = flock->fl_next) {
1279 			if (locks_conflict(new_fl, flock))
1280 				goto restart;
1281 		}
1282 		error = 0;
1283 	}
1284 
1285 out:
1286 	unlock_flocks();
1287 	if (!IS_ERR(new_fl))
1288 		locks_free_lock(new_fl);
1289 	return error;
1290 }
1291 
1292 EXPORT_SYMBOL(__break_lease);
1293 
1294 /**
1295  *	lease_get_mtime - get the last modified time of an inode
1296  *	@inode: the inode
1297  *      @time:  pointer to a timespec which will contain the last modified time
1298  *
1299  * This is to force NFS clients to flush their caches for files with
1300  * exclusive leases.  The justification is that if someone has an
1301  * exclusive lease, then they could be modifying it.
1302  */
1303 void lease_get_mtime(struct inode *inode, struct timespec *time)
1304 {
1305 	struct file_lock *flock = inode->i_flock;
1306 	if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1307 		*time = current_fs_time(inode->i_sb);
1308 	else
1309 		*time = inode->i_mtime;
1310 }
1311 
1312 EXPORT_SYMBOL(lease_get_mtime);
1313 
1314 /**
1315  *	fcntl_getlease - Enquire what lease is currently active
1316  *	@filp: the file
1317  *
1318  *	The value returned by this function will be one of
1319  *	(if no lease break is pending):
1320  *
1321  *	%F_RDLCK to indicate a shared lease is held.
1322  *
1323  *	%F_WRLCK to indicate an exclusive lease is held.
1324  *
1325  *	%F_UNLCK to indicate no lease is held.
1326  *
1327  *	(if a lease break is pending):
1328  *
1329  *	%F_RDLCK to indicate an exclusive lease needs to be
1330  *		changed to a shared lease (or removed).
1331  *
1332  *	%F_UNLCK to indicate the lease needs to be removed.
1333  *
1334  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1335  *	should be returned to userspace.
1336  */
1337 int fcntl_getlease(struct file *filp)
1338 {
1339 	struct file_lock *fl;
1340 	int type = F_UNLCK;
1341 
1342 	lock_flocks();
1343 	time_out_leases(filp->f_path.dentry->d_inode);
1344 	for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1345 			fl = fl->fl_next) {
1346 		if (fl->fl_file == filp) {
1347 			type = target_leasetype(fl);
1348 			break;
1349 		}
1350 	}
1351 	unlock_flocks();
1352 	return type;
1353 }
1354 
1355 int generic_add_lease(struct file *filp, long arg, struct file_lock **flp)
1356 {
1357 	struct file_lock *fl, **before, **my_before = NULL, *lease;
1358 	struct dentry *dentry = filp->f_path.dentry;
1359 	struct inode *inode = dentry->d_inode;
1360 	int error;
1361 
1362 	lease = *flp;
1363 
1364 	error = -EAGAIN;
1365 	if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1366 		goto out;
1367 	if ((arg == F_WRLCK)
1368 	    && ((dentry->d_count > 1)
1369 		|| (atomic_read(&inode->i_count) > 1)))
1370 		goto out;
1371 
1372 	/*
1373 	 * At this point, we know that if there is an exclusive
1374 	 * lease on this file, then we hold it on this filp
1375 	 * (otherwise our open of this file would have blocked).
1376 	 * And if we are trying to acquire an exclusive lease,
1377 	 * then the file is not open by anyone (including us)
1378 	 * except for this filp.
1379 	 */
1380 	error = -EAGAIN;
1381 	for (before = &inode->i_flock;
1382 			((fl = *before) != NULL) && IS_LEASE(fl);
1383 			before = &fl->fl_next) {
1384 		if (fl->fl_file == filp) {
1385 			my_before = before;
1386 			continue;
1387 		}
1388 		/*
1389 		 * No exclusive leases if someone else has a lease on
1390 		 * this file:
1391 		 */
1392 		if (arg == F_WRLCK)
1393 			goto out;
1394 		/*
1395 		 * Modifying our existing lease is OK, but no getting a
1396 		 * new lease if someone else is opening for write:
1397 		 */
1398 		if (fl->fl_flags & FL_UNLOCK_PENDING)
1399 			goto out;
1400 	}
1401 
1402 	if (my_before != NULL) {
1403 		error = lease->fl_lmops->lm_change(my_before, arg);
1404 		if (!error)
1405 			*flp = *my_before;
1406 		goto out;
1407 	}
1408 
1409 	error = -EINVAL;
1410 	if (!leases_enable)
1411 		goto out;
1412 
1413 	locks_insert_lock(before, lease);
1414 	return 0;
1415 
1416 out:
1417 	return error;
1418 }
1419 
1420 int generic_delete_lease(struct file *filp, struct file_lock **flp)
1421 {
1422 	struct file_lock *fl, **before;
1423 	struct dentry *dentry = filp->f_path.dentry;
1424 	struct inode *inode = dentry->d_inode;
1425 
1426 	for (before = &inode->i_flock;
1427 			((fl = *before) != NULL) && IS_LEASE(fl);
1428 			before = &fl->fl_next) {
1429 		if (fl->fl_file != filp)
1430 			continue;
1431 		return (*flp)->fl_lmops->lm_change(before, F_UNLCK);
1432 	}
1433 	return -EAGAIN;
1434 }
1435 
1436 /**
1437  *	generic_setlease	-	sets a lease on an open file
1438  *	@filp: file pointer
1439  *	@arg: type of lease to obtain
1440  *	@flp: input - file_lock to use, output - file_lock inserted
1441  *
1442  *	The (input) flp->fl_lmops->lm_break function is required
1443  *	by break_lease().
1444  *
1445  *	Called with file_lock_lock held.
1446  */
1447 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1448 {
1449 	struct dentry *dentry = filp->f_path.dentry;
1450 	struct inode *inode = dentry->d_inode;
1451 	int error;
1452 
1453 	if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1454 		return -EACCES;
1455 	if (!S_ISREG(inode->i_mode))
1456 		return -EINVAL;
1457 	error = security_file_lock(filp, arg);
1458 	if (error)
1459 		return error;
1460 
1461 	time_out_leases(inode);
1462 
1463 	BUG_ON(!(*flp)->fl_lmops->lm_break);
1464 
1465 	switch (arg) {
1466 	case F_UNLCK:
1467 		return generic_delete_lease(filp, flp);
1468 	case F_RDLCK:
1469 	case F_WRLCK:
1470 		return generic_add_lease(filp, arg, flp);
1471 	default:
1472 		BUG();
1473 	}
1474 }
1475 EXPORT_SYMBOL(generic_setlease);
1476 
1477 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1478 {
1479 	if (filp->f_op && filp->f_op->setlease)
1480 		return filp->f_op->setlease(filp, arg, lease);
1481 	else
1482 		return generic_setlease(filp, arg, lease);
1483 }
1484 
1485 /**
1486  *	vfs_setlease        -       sets a lease on an open file
1487  *	@filp: file pointer
1488  *	@arg: type of lease to obtain
1489  *	@lease: file_lock to use
1490  *
1491  *	Call this to establish a lease on the file.
1492  *	The (*lease)->fl_lmops->lm_break operation must be set; if not,
1493  *	break_lease will oops!
1494  *
1495  *	This will call the filesystem's setlease file method, if
1496  *	defined.  Note that there is no getlease method; instead, the
1497  *	filesystem setlease method should call back to setlease() to
1498  *	add a lease to the inode's lease list, where fcntl_getlease() can
1499  *	find it.  Since fcntl_getlease() only reports whether the current
1500  *	task holds a lease, a cluster filesystem need only do this for
1501  *	leases held by processes on this node.
1502  *
1503  *	There is also no break_lease method; filesystems that
1504  *	handle their own leases should break leases themselves from the
1505  *	filesystem's open, create, and (on truncate) setattr methods.
1506  *
1507  *	Warning: the only current setlease methods exist only to disable
1508  *	leases in certain cases.  More vfs changes may be required to
1509  *	allow a full filesystem lease implementation.
1510  */
1511 
1512 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1513 {
1514 	int error;
1515 
1516 	lock_flocks();
1517 	error = __vfs_setlease(filp, arg, lease);
1518 	unlock_flocks();
1519 
1520 	return error;
1521 }
1522 EXPORT_SYMBOL_GPL(vfs_setlease);
1523 
1524 static int do_fcntl_delete_lease(struct file *filp)
1525 {
1526 	struct file_lock fl, *flp = &fl;
1527 
1528 	lease_init(filp, F_UNLCK, flp);
1529 
1530 	return vfs_setlease(filp, F_UNLCK, &flp);
1531 }
1532 
1533 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1534 {
1535 	struct file_lock *fl, *ret;
1536 	struct fasync_struct *new;
1537 	int error;
1538 
1539 	fl = lease_alloc(filp, arg);
1540 	if (IS_ERR(fl))
1541 		return PTR_ERR(fl);
1542 
1543 	new = fasync_alloc();
1544 	if (!new) {
1545 		locks_free_lock(fl);
1546 		return -ENOMEM;
1547 	}
1548 	ret = fl;
1549 	lock_flocks();
1550 	error = __vfs_setlease(filp, arg, &ret);
1551 	if (error) {
1552 		unlock_flocks();
1553 		locks_free_lock(fl);
1554 		goto out_free_fasync;
1555 	}
1556 	if (ret != fl)
1557 		locks_free_lock(fl);
1558 
1559 	/*
1560 	 * fasync_insert_entry() returns the old entry if any.
1561 	 * If there was no old entry, then it used 'new' and
1562 	 * inserted it into the fasync list. Clear new so that
1563 	 * we don't release it here.
1564 	 */
1565 	if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1566 		new = NULL;
1567 
1568 	error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1569 	unlock_flocks();
1570 
1571 out_free_fasync:
1572 	if (new)
1573 		fasync_free(new);
1574 	return error;
1575 }
1576 
1577 /**
1578  *	fcntl_setlease	-	sets a lease on an open file
1579  *	@fd: open file descriptor
1580  *	@filp: file pointer
1581  *	@arg: type of lease to obtain
1582  *
1583  *	Call this fcntl to establish a lease on the file.
1584  *	Note that you also need to call %F_SETSIG to
1585  *	receive a signal when the lease is broken.
1586  */
1587 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1588 {
1589 	if (arg == F_UNLCK)
1590 		return do_fcntl_delete_lease(filp);
1591 	return do_fcntl_add_lease(fd, filp, arg);
1592 }
1593 
1594 /**
1595  * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1596  * @filp: The file to apply the lock to
1597  * @fl: The lock to be applied
1598  *
1599  * Add a FLOCK style lock to a file.
1600  */
1601 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1602 {
1603 	int error;
1604 	might_sleep();
1605 	for (;;) {
1606 		error = flock_lock_file(filp, fl);
1607 		if (error != FILE_LOCK_DEFERRED)
1608 			break;
1609 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1610 		if (!error)
1611 			continue;
1612 
1613 		locks_delete_block(fl);
1614 		break;
1615 	}
1616 	return error;
1617 }
1618 
1619 EXPORT_SYMBOL(flock_lock_file_wait);
1620 
1621 /**
1622  *	sys_flock: - flock() system call.
1623  *	@fd: the file descriptor to lock.
1624  *	@cmd: the type of lock to apply.
1625  *
1626  *	Apply a %FL_FLOCK style lock to an open file descriptor.
1627  *	The @cmd can be one of
1628  *
1629  *	%LOCK_SH -- a shared lock.
1630  *
1631  *	%LOCK_EX -- an exclusive lock.
1632  *
1633  *	%LOCK_UN -- remove an existing lock.
1634  *
1635  *	%LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1636  *
1637  *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1638  *	processes read and write access respectively.
1639  */
1640 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1641 {
1642 	struct file *filp;
1643 	struct file_lock *lock;
1644 	int can_sleep, unlock;
1645 	int error;
1646 
1647 	error = -EBADF;
1648 	filp = fget(fd);
1649 	if (!filp)
1650 		goto out;
1651 
1652 	can_sleep = !(cmd & LOCK_NB);
1653 	cmd &= ~LOCK_NB;
1654 	unlock = (cmd == LOCK_UN);
1655 
1656 	if (!unlock && !(cmd & LOCK_MAND) &&
1657 	    !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1658 		goto out_putf;
1659 
1660 	error = flock_make_lock(filp, &lock, cmd);
1661 	if (error)
1662 		goto out_putf;
1663 	if (can_sleep)
1664 		lock->fl_flags |= FL_SLEEP;
1665 
1666 	error = security_file_lock(filp, lock->fl_type);
1667 	if (error)
1668 		goto out_free;
1669 
1670 	if (filp->f_op && filp->f_op->flock)
1671 		error = filp->f_op->flock(filp,
1672 					  (can_sleep) ? F_SETLKW : F_SETLK,
1673 					  lock);
1674 	else
1675 		error = flock_lock_file_wait(filp, lock);
1676 
1677  out_free:
1678 	locks_free_lock(lock);
1679 
1680  out_putf:
1681 	fput(filp);
1682  out:
1683 	return error;
1684 }
1685 
1686 /**
1687  * vfs_test_lock - test file byte range lock
1688  * @filp: The file to test lock for
1689  * @fl: The lock to test; also used to hold result
1690  *
1691  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
1692  * setting conf->fl_type to something other than F_UNLCK.
1693  */
1694 int vfs_test_lock(struct file *filp, struct file_lock *fl)
1695 {
1696 	if (filp->f_op && filp->f_op->lock)
1697 		return filp->f_op->lock(filp, F_GETLK, fl);
1698 	posix_test_lock(filp, fl);
1699 	return 0;
1700 }
1701 EXPORT_SYMBOL_GPL(vfs_test_lock);
1702 
1703 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1704 {
1705 	flock->l_pid = fl->fl_pid;
1706 #if BITS_PER_LONG == 32
1707 	/*
1708 	 * Make sure we can represent the posix lock via
1709 	 * legacy 32bit flock.
1710 	 */
1711 	if (fl->fl_start > OFFT_OFFSET_MAX)
1712 		return -EOVERFLOW;
1713 	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1714 		return -EOVERFLOW;
1715 #endif
1716 	flock->l_start = fl->fl_start;
1717 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1718 		fl->fl_end - fl->fl_start + 1;
1719 	flock->l_whence = 0;
1720 	flock->l_type = fl->fl_type;
1721 	return 0;
1722 }
1723 
1724 #if BITS_PER_LONG == 32
1725 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1726 {
1727 	flock->l_pid = fl->fl_pid;
1728 	flock->l_start = fl->fl_start;
1729 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1730 		fl->fl_end - fl->fl_start + 1;
1731 	flock->l_whence = 0;
1732 	flock->l_type = fl->fl_type;
1733 }
1734 #endif
1735 
1736 /* Report the first existing lock that would conflict with l.
1737  * This implements the F_GETLK command of fcntl().
1738  */
1739 int fcntl_getlk(struct file *filp, struct flock __user *l)
1740 {
1741 	struct file_lock file_lock;
1742 	struct flock flock;
1743 	int error;
1744 
1745 	error = -EFAULT;
1746 	if (copy_from_user(&flock, l, sizeof(flock)))
1747 		goto out;
1748 	error = -EINVAL;
1749 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1750 		goto out;
1751 
1752 	error = flock_to_posix_lock(filp, &file_lock, &flock);
1753 	if (error)
1754 		goto out;
1755 
1756 	error = vfs_test_lock(filp, &file_lock);
1757 	if (error)
1758 		goto out;
1759 
1760 	flock.l_type = file_lock.fl_type;
1761 	if (file_lock.fl_type != F_UNLCK) {
1762 		error = posix_lock_to_flock(&flock, &file_lock);
1763 		if (error)
1764 			goto out;
1765 	}
1766 	error = -EFAULT;
1767 	if (!copy_to_user(l, &flock, sizeof(flock)))
1768 		error = 0;
1769 out:
1770 	return error;
1771 }
1772 
1773 /**
1774  * vfs_lock_file - file byte range lock
1775  * @filp: The file to apply the lock to
1776  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1777  * @fl: The lock to be applied
1778  * @conf: Place to return a copy of the conflicting lock, if found.
1779  *
1780  * A caller that doesn't care about the conflicting lock may pass NULL
1781  * as the final argument.
1782  *
1783  * If the filesystem defines a private ->lock() method, then @conf will
1784  * be left unchanged; so a caller that cares should initialize it to
1785  * some acceptable default.
1786  *
1787  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1788  * locks, the ->lock() interface may return asynchronously, before the lock has
1789  * been granted or denied by the underlying filesystem, if (and only if)
1790  * lm_grant is set. Callers expecting ->lock() to return asynchronously
1791  * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1792  * the request is for a blocking lock. When ->lock() does return asynchronously,
1793  * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
1794  * request completes.
1795  * If the request is for non-blocking lock the file system should return
1796  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1797  * with the result. If the request timed out the callback routine will return a
1798  * nonzero return code and the file system should release the lock. The file
1799  * system is also responsible to keep a corresponding posix lock when it
1800  * grants a lock so the VFS can find out which locks are locally held and do
1801  * the correct lock cleanup when required.
1802  * The underlying filesystem must not drop the kernel lock or call
1803  * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1804  * return code.
1805  */
1806 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1807 {
1808 	if (filp->f_op && filp->f_op->lock)
1809 		return filp->f_op->lock(filp, cmd, fl);
1810 	else
1811 		return posix_lock_file(filp, fl, conf);
1812 }
1813 EXPORT_SYMBOL_GPL(vfs_lock_file);
1814 
1815 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1816 			     struct file_lock *fl)
1817 {
1818 	int error;
1819 
1820 	error = security_file_lock(filp, fl->fl_type);
1821 	if (error)
1822 		return error;
1823 
1824 	for (;;) {
1825 		error = vfs_lock_file(filp, cmd, fl, NULL);
1826 		if (error != FILE_LOCK_DEFERRED)
1827 			break;
1828 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1829 		if (!error)
1830 			continue;
1831 
1832 		locks_delete_block(fl);
1833 		break;
1834 	}
1835 
1836 	return error;
1837 }
1838 
1839 /* Apply the lock described by l to an open file descriptor.
1840  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1841  */
1842 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1843 		struct flock __user *l)
1844 {
1845 	struct file_lock *file_lock = locks_alloc_lock();
1846 	struct flock flock;
1847 	struct inode *inode;
1848 	struct file *f;
1849 	int error;
1850 
1851 	if (file_lock == NULL)
1852 		return -ENOLCK;
1853 
1854 	/*
1855 	 * This might block, so we do it before checking the inode.
1856 	 */
1857 	error = -EFAULT;
1858 	if (copy_from_user(&flock, l, sizeof(flock)))
1859 		goto out;
1860 
1861 	inode = filp->f_path.dentry->d_inode;
1862 
1863 	/* Don't allow mandatory locks on files that may be memory mapped
1864 	 * and shared.
1865 	 */
1866 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1867 		error = -EAGAIN;
1868 		goto out;
1869 	}
1870 
1871 again:
1872 	error = flock_to_posix_lock(filp, file_lock, &flock);
1873 	if (error)
1874 		goto out;
1875 	if (cmd == F_SETLKW) {
1876 		file_lock->fl_flags |= FL_SLEEP;
1877 	}
1878 
1879 	error = -EBADF;
1880 	switch (flock.l_type) {
1881 	case F_RDLCK:
1882 		if (!(filp->f_mode & FMODE_READ))
1883 			goto out;
1884 		break;
1885 	case F_WRLCK:
1886 		if (!(filp->f_mode & FMODE_WRITE))
1887 			goto out;
1888 		break;
1889 	case F_UNLCK:
1890 		break;
1891 	default:
1892 		error = -EINVAL;
1893 		goto out;
1894 	}
1895 
1896 	error = do_lock_file_wait(filp, cmd, file_lock);
1897 
1898 	/*
1899 	 * Attempt to detect a close/fcntl race and recover by
1900 	 * releasing the lock that was just acquired.
1901 	 */
1902 	/*
1903 	 * we need that spin_lock here - it prevents reordering between
1904 	 * update of inode->i_flock and check for it done in close().
1905 	 * rcu_read_lock() wouldn't do.
1906 	 */
1907 	spin_lock(&current->files->file_lock);
1908 	f = fcheck(fd);
1909 	spin_unlock(&current->files->file_lock);
1910 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1911 		flock.l_type = F_UNLCK;
1912 		goto again;
1913 	}
1914 
1915 out:
1916 	locks_free_lock(file_lock);
1917 	return error;
1918 }
1919 
1920 #if BITS_PER_LONG == 32
1921 /* Report the first existing lock that would conflict with l.
1922  * This implements the F_GETLK command of fcntl().
1923  */
1924 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1925 {
1926 	struct file_lock file_lock;
1927 	struct flock64 flock;
1928 	int error;
1929 
1930 	error = -EFAULT;
1931 	if (copy_from_user(&flock, l, sizeof(flock)))
1932 		goto out;
1933 	error = -EINVAL;
1934 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1935 		goto out;
1936 
1937 	error = flock64_to_posix_lock(filp, &file_lock, &flock);
1938 	if (error)
1939 		goto out;
1940 
1941 	error = vfs_test_lock(filp, &file_lock);
1942 	if (error)
1943 		goto out;
1944 
1945 	flock.l_type = file_lock.fl_type;
1946 	if (file_lock.fl_type != F_UNLCK)
1947 		posix_lock_to_flock64(&flock, &file_lock);
1948 
1949 	error = -EFAULT;
1950 	if (!copy_to_user(l, &flock, sizeof(flock)))
1951 		error = 0;
1952 
1953 out:
1954 	return error;
1955 }
1956 
1957 /* Apply the lock described by l to an open file descriptor.
1958  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1959  */
1960 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1961 		struct flock64 __user *l)
1962 {
1963 	struct file_lock *file_lock = locks_alloc_lock();
1964 	struct flock64 flock;
1965 	struct inode *inode;
1966 	struct file *f;
1967 	int error;
1968 
1969 	if (file_lock == NULL)
1970 		return -ENOLCK;
1971 
1972 	/*
1973 	 * This might block, so we do it before checking the inode.
1974 	 */
1975 	error = -EFAULT;
1976 	if (copy_from_user(&flock, l, sizeof(flock)))
1977 		goto out;
1978 
1979 	inode = filp->f_path.dentry->d_inode;
1980 
1981 	/* Don't allow mandatory locks on files that may be memory mapped
1982 	 * and shared.
1983 	 */
1984 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1985 		error = -EAGAIN;
1986 		goto out;
1987 	}
1988 
1989 again:
1990 	error = flock64_to_posix_lock(filp, file_lock, &flock);
1991 	if (error)
1992 		goto out;
1993 	if (cmd == F_SETLKW64) {
1994 		file_lock->fl_flags |= FL_SLEEP;
1995 	}
1996 
1997 	error = -EBADF;
1998 	switch (flock.l_type) {
1999 	case F_RDLCK:
2000 		if (!(filp->f_mode & FMODE_READ))
2001 			goto out;
2002 		break;
2003 	case F_WRLCK:
2004 		if (!(filp->f_mode & FMODE_WRITE))
2005 			goto out;
2006 		break;
2007 	case F_UNLCK:
2008 		break;
2009 	default:
2010 		error = -EINVAL;
2011 		goto out;
2012 	}
2013 
2014 	error = do_lock_file_wait(filp, cmd, file_lock);
2015 
2016 	/*
2017 	 * Attempt to detect a close/fcntl race and recover by
2018 	 * releasing the lock that was just acquired.
2019 	 */
2020 	spin_lock(&current->files->file_lock);
2021 	f = fcheck(fd);
2022 	spin_unlock(&current->files->file_lock);
2023 	if (!error && f != filp && flock.l_type != F_UNLCK) {
2024 		flock.l_type = F_UNLCK;
2025 		goto again;
2026 	}
2027 
2028 out:
2029 	locks_free_lock(file_lock);
2030 	return error;
2031 }
2032 #endif /* BITS_PER_LONG == 32 */
2033 
2034 /*
2035  * This function is called when the file is being removed
2036  * from the task's fd array.  POSIX locks belonging to this task
2037  * are deleted at this time.
2038  */
2039 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2040 {
2041 	struct file_lock lock;
2042 
2043 	/*
2044 	 * If there are no locks held on this file, we don't need to call
2045 	 * posix_lock_file().  Another process could be setting a lock on this
2046 	 * file at the same time, but we wouldn't remove that lock anyway.
2047 	 */
2048 	if (!filp->f_path.dentry->d_inode->i_flock)
2049 		return;
2050 
2051 	lock.fl_type = F_UNLCK;
2052 	lock.fl_flags = FL_POSIX | FL_CLOSE;
2053 	lock.fl_start = 0;
2054 	lock.fl_end = OFFSET_MAX;
2055 	lock.fl_owner = owner;
2056 	lock.fl_pid = current->tgid;
2057 	lock.fl_file = filp;
2058 	lock.fl_ops = NULL;
2059 	lock.fl_lmops = NULL;
2060 
2061 	vfs_lock_file(filp, F_SETLK, &lock, NULL);
2062 
2063 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2064 		lock.fl_ops->fl_release_private(&lock);
2065 }
2066 
2067 EXPORT_SYMBOL(locks_remove_posix);
2068 
2069 /*
2070  * This function is called on the last close of an open file.
2071  */
2072 void locks_remove_flock(struct file *filp)
2073 {
2074 	struct inode * inode = filp->f_path.dentry->d_inode;
2075 	struct file_lock *fl;
2076 	struct file_lock **before;
2077 
2078 	if (!inode->i_flock)
2079 		return;
2080 
2081 	if (filp->f_op && filp->f_op->flock) {
2082 		struct file_lock fl = {
2083 			.fl_pid = current->tgid,
2084 			.fl_file = filp,
2085 			.fl_flags = FL_FLOCK,
2086 			.fl_type = F_UNLCK,
2087 			.fl_end = OFFSET_MAX,
2088 		};
2089 		filp->f_op->flock(filp, F_SETLKW, &fl);
2090 		if (fl.fl_ops && fl.fl_ops->fl_release_private)
2091 			fl.fl_ops->fl_release_private(&fl);
2092 	}
2093 
2094 	lock_flocks();
2095 	before = &inode->i_flock;
2096 
2097 	while ((fl = *before) != NULL) {
2098 		if (fl->fl_file == filp) {
2099 			if (IS_FLOCK(fl)) {
2100 				locks_delete_lock(before);
2101 				continue;
2102 			}
2103 			if (IS_LEASE(fl)) {
2104 				lease_modify(before, F_UNLCK);
2105 				continue;
2106 			}
2107 			/* What? */
2108 			BUG();
2109  		}
2110 		before = &fl->fl_next;
2111 	}
2112 	unlock_flocks();
2113 }
2114 
2115 /**
2116  *	posix_unblock_lock - stop waiting for a file lock
2117  *      @filp:   how the file was opened
2118  *	@waiter: the lock which was waiting
2119  *
2120  *	lockd needs to block waiting for locks.
2121  */
2122 int
2123 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2124 {
2125 	int status = 0;
2126 
2127 	lock_flocks();
2128 	if (waiter->fl_next)
2129 		__locks_delete_block(waiter);
2130 	else
2131 		status = -ENOENT;
2132 	unlock_flocks();
2133 	return status;
2134 }
2135 
2136 EXPORT_SYMBOL(posix_unblock_lock);
2137 
2138 /**
2139  * vfs_cancel_lock - file byte range unblock lock
2140  * @filp: The file to apply the unblock to
2141  * @fl: The lock to be unblocked
2142  *
2143  * Used by lock managers to cancel blocked requests
2144  */
2145 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2146 {
2147 	if (filp->f_op && filp->f_op->lock)
2148 		return filp->f_op->lock(filp, F_CANCELLK, fl);
2149 	return 0;
2150 }
2151 
2152 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2153 
2154 #ifdef CONFIG_PROC_FS
2155 #include <linux/proc_fs.h>
2156 #include <linux/seq_file.h>
2157 
2158 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2159 			    loff_t id, char *pfx)
2160 {
2161 	struct inode *inode = NULL;
2162 	unsigned int fl_pid;
2163 
2164 	if (fl->fl_nspid)
2165 		fl_pid = pid_vnr(fl->fl_nspid);
2166 	else
2167 		fl_pid = fl->fl_pid;
2168 
2169 	if (fl->fl_file != NULL)
2170 		inode = fl->fl_file->f_path.dentry->d_inode;
2171 
2172 	seq_printf(f, "%lld:%s ", id, pfx);
2173 	if (IS_POSIX(fl)) {
2174 		seq_printf(f, "%6s %s ",
2175 			     (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2176 			     (inode == NULL) ? "*NOINODE*" :
2177 			     mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2178 	} else if (IS_FLOCK(fl)) {
2179 		if (fl->fl_type & LOCK_MAND) {
2180 			seq_printf(f, "FLOCK  MSNFS     ");
2181 		} else {
2182 			seq_printf(f, "FLOCK  ADVISORY  ");
2183 		}
2184 	} else if (IS_LEASE(fl)) {
2185 		seq_printf(f, "LEASE  ");
2186 		if (lease_breaking(fl))
2187 			seq_printf(f, "BREAKING  ");
2188 		else if (fl->fl_file)
2189 			seq_printf(f, "ACTIVE    ");
2190 		else
2191 			seq_printf(f, "BREAKER   ");
2192 	} else {
2193 		seq_printf(f, "UNKNOWN UNKNOWN  ");
2194 	}
2195 	if (fl->fl_type & LOCK_MAND) {
2196 		seq_printf(f, "%s ",
2197 			       (fl->fl_type & LOCK_READ)
2198 			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2199 			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2200 	} else {
2201 		seq_printf(f, "%s ",
2202 			       (lease_breaking(fl))
2203 			       ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2204 			       : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2205 	}
2206 	if (inode) {
2207 #ifdef WE_CAN_BREAK_LSLK_NOW
2208 		seq_printf(f, "%d %s:%ld ", fl_pid,
2209 				inode->i_sb->s_id, inode->i_ino);
2210 #else
2211 		/* userspace relies on this representation of dev_t ;-( */
2212 		seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2213 				MAJOR(inode->i_sb->s_dev),
2214 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2215 #endif
2216 	} else {
2217 		seq_printf(f, "%d <none>:0 ", fl_pid);
2218 	}
2219 	if (IS_POSIX(fl)) {
2220 		if (fl->fl_end == OFFSET_MAX)
2221 			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2222 		else
2223 			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2224 	} else {
2225 		seq_printf(f, "0 EOF\n");
2226 	}
2227 }
2228 
2229 static int locks_show(struct seq_file *f, void *v)
2230 {
2231 	struct file_lock *fl, *bfl;
2232 
2233 	fl = list_entry(v, struct file_lock, fl_link);
2234 
2235 	lock_get_status(f, fl, *((loff_t *)f->private), "");
2236 
2237 	list_for_each_entry(bfl, &fl->fl_block, fl_block)
2238 		lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2239 
2240 	return 0;
2241 }
2242 
2243 static void *locks_start(struct seq_file *f, loff_t *pos)
2244 {
2245 	loff_t *p = f->private;
2246 
2247 	lock_flocks();
2248 	*p = (*pos + 1);
2249 	return seq_list_start(&file_lock_list, *pos);
2250 }
2251 
2252 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2253 {
2254 	loff_t *p = f->private;
2255 	++*p;
2256 	return seq_list_next(v, &file_lock_list, pos);
2257 }
2258 
2259 static void locks_stop(struct seq_file *f, void *v)
2260 {
2261 	unlock_flocks();
2262 }
2263 
2264 static const struct seq_operations locks_seq_operations = {
2265 	.start	= locks_start,
2266 	.next	= locks_next,
2267 	.stop	= locks_stop,
2268 	.show	= locks_show,
2269 };
2270 
2271 static int locks_open(struct inode *inode, struct file *filp)
2272 {
2273 	return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2274 }
2275 
2276 static const struct file_operations proc_locks_operations = {
2277 	.open		= locks_open,
2278 	.read		= seq_read,
2279 	.llseek		= seq_lseek,
2280 	.release	= seq_release_private,
2281 };
2282 
2283 static int __init proc_locks_init(void)
2284 {
2285 	proc_create("locks", 0, NULL, &proc_locks_operations);
2286 	return 0;
2287 }
2288 module_init(proc_locks_init);
2289 #endif
2290 
2291 /**
2292  *	lock_may_read - checks that the region is free of locks
2293  *	@inode: the inode that is being read
2294  *	@start: the first byte to read
2295  *	@len: the number of bytes to read
2296  *
2297  *	Emulates Windows locking requirements.  Whole-file
2298  *	mandatory locks (share modes) can prohibit a read and
2299  *	byte-range POSIX locks can prohibit a read if they overlap.
2300  *
2301  *	N.B. this function is only ever called
2302  *	from knfsd and ownership of locks is never checked.
2303  */
2304 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2305 {
2306 	struct file_lock *fl;
2307 	int result = 1;
2308 	lock_flocks();
2309 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2310 		if (IS_POSIX(fl)) {
2311 			if (fl->fl_type == F_RDLCK)
2312 				continue;
2313 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2314 				continue;
2315 		} else if (IS_FLOCK(fl)) {
2316 			if (!(fl->fl_type & LOCK_MAND))
2317 				continue;
2318 			if (fl->fl_type & LOCK_READ)
2319 				continue;
2320 		} else
2321 			continue;
2322 		result = 0;
2323 		break;
2324 	}
2325 	unlock_flocks();
2326 	return result;
2327 }
2328 
2329 EXPORT_SYMBOL(lock_may_read);
2330 
2331 /**
2332  *	lock_may_write - checks that the region is free of locks
2333  *	@inode: the inode that is being written
2334  *	@start: the first byte to write
2335  *	@len: the number of bytes to write
2336  *
2337  *	Emulates Windows locking requirements.  Whole-file
2338  *	mandatory locks (share modes) can prohibit a write and
2339  *	byte-range POSIX locks can prohibit a write if they overlap.
2340  *
2341  *	N.B. this function is only ever called
2342  *	from knfsd and ownership of locks is never checked.
2343  */
2344 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2345 {
2346 	struct file_lock *fl;
2347 	int result = 1;
2348 	lock_flocks();
2349 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2350 		if (IS_POSIX(fl)) {
2351 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2352 				continue;
2353 		} else if (IS_FLOCK(fl)) {
2354 			if (!(fl->fl_type & LOCK_MAND))
2355 				continue;
2356 			if (fl->fl_type & LOCK_WRITE)
2357 				continue;
2358 		} else
2359 			continue;
2360 		result = 0;
2361 		break;
2362 	}
2363 	unlock_flocks();
2364 	return result;
2365 }
2366 
2367 EXPORT_SYMBOL(lock_may_write);
2368 
2369 static int __init filelock_init(void)
2370 {
2371 	filelock_cache = kmem_cache_create("file_lock_cache",
2372 			sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2373 
2374 	return 0;
2375 }
2376 
2377 core_initcall(filelock_init);
2378