xref: /openbmc/linux/fs/locks.c (revision 803f6914)
1 /*
2  *  linux/fs/locks.c
3  *
4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5  *  Doug Evans (dje@spiff.uucp), August 07, 1992
6  *
7  *  Deadlock detection added.
8  *  FIXME: one thing isn't handled yet:
9  *	- mandatory locks (requires lots of changes elsewhere)
10  *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11  *
12  *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13  *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14  *
15  *  Converted file_lock_table to a linked list from an array, which eliminates
16  *  the limits on how many active file locks are open.
17  *  Chad Page (pageone@netcom.com), November 27, 1994
18  *
19  *  Removed dependency on file descriptors. dup()'ed file descriptors now
20  *  get the same locks as the original file descriptors, and a close() on
21  *  any file descriptor removes ALL the locks on the file for the current
22  *  process. Since locks still depend on the process id, locks are inherited
23  *  after an exec() but not after a fork(). This agrees with POSIX, and both
24  *  BSD and SVR4 practice.
25  *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
26  *
27  *  Scrapped free list which is redundant now that we allocate locks
28  *  dynamically with kmalloc()/kfree().
29  *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
30  *
31  *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32  *
33  *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
34  *  fcntl() system call. They have the semantics described above.
35  *
36  *  FL_FLOCK locks are created with calls to flock(), through the flock()
37  *  system call, which is new. Old C libraries implement flock() via fcntl()
38  *  and will continue to use the old, broken implementation.
39  *
40  *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41  *  with a file pointer (filp). As a result they can be shared by a parent
42  *  process and its children after a fork(). They are removed when the last
43  *  file descriptor referring to the file pointer is closed (unless explicitly
44  *  unlocked).
45  *
46  *  FL_FLOCK locks never deadlock, an existing lock is always removed before
47  *  upgrading from shared to exclusive (or vice versa). When this happens
48  *  any processes blocked by the current lock are woken up and allowed to
49  *  run before the new lock is applied.
50  *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
51  *
52  *  Removed some race conditions in flock_lock_file(), marked other possible
53  *  races. Just grep for FIXME to see them.
54  *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
55  *
56  *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57  *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58  *  once we've checked for blocking and deadlocking.
59  *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
60  *
61  *  Initial implementation of mandatory locks. SunOS turned out to be
62  *  a rotten model, so I implemented the "obvious" semantics.
63  *  See 'Documentation/filesystems/mandatory-locking.txt' for details.
64  *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
65  *
66  *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67  *  check if a file has mandatory locks, used by mmap(), open() and creat() to
68  *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69  *  Manual, Section 2.
70  *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
71  *
72  *  Tidied up block list handling. Added '/proc/locks' interface.
73  *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
74  *
75  *  Fixed deadlock condition for pathological code that mixes calls to
76  *  flock() and fcntl().
77  *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
78  *
79  *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80  *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81  *  guarantee sensible behaviour in the case where file system modules might
82  *  be compiled with different options than the kernel itself.
83  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
84  *
85  *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86  *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
88  *
89  *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90  *  locks. Changed process synchronisation to avoid dereferencing locks that
91  *  have already been freed.
92  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
93  *
94  *  Made the block list a circular list to minimise searching in the list.
95  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
96  *
97  *  Made mandatory locking a mount option. Default is not to allow mandatory
98  *  locking.
99  *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
100  *
101  *  Some adaptations for NFS support.
102  *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
103  *
104  *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105  *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
106  *
107  *  Use slab allocator instead of kmalloc/kfree.
108  *  Use generic list implementation from <linux/list.h>.
109  *  Sped up posix_locks_deadlock by only considering blocked locks.
110  *  Matthew Wilcox <willy@debian.org>, March, 2000.
111  *
112  *  Leases and LOCK_MAND
113  *  Matthew Wilcox <willy@debian.org>, June, 2000.
114  *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
115  */
116 
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/module.h>
123 #include <linux/security.h>
124 #include <linux/slab.h>
125 #include <linux/syscalls.h>
126 #include <linux/time.h>
127 #include <linux/rcupdate.h>
128 #include <linux/pid_namespace.h>
129 
130 #include <asm/uaccess.h>
131 
132 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
133 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
134 #define IS_LEASE(fl)	(fl->fl_flags & FL_LEASE)
135 
136 static bool lease_breaking(struct file_lock *fl)
137 {
138 	return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
139 }
140 
141 static int target_leasetype(struct file_lock *fl)
142 {
143 	if (fl->fl_flags & FL_UNLOCK_PENDING)
144 		return F_UNLCK;
145 	if (fl->fl_flags & FL_DOWNGRADE_PENDING)
146 		return F_RDLCK;
147 	return fl->fl_type;
148 }
149 
150 int leases_enable = 1;
151 int lease_break_time = 45;
152 
153 #define for_each_lock(inode, lockp) \
154 	for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
155 
156 static LIST_HEAD(file_lock_list);
157 static LIST_HEAD(blocked_list);
158 static DEFINE_SPINLOCK(file_lock_lock);
159 
160 /*
161  * Protects the two list heads above, plus the inode->i_flock list
162  */
163 void lock_flocks(void)
164 {
165 	spin_lock(&file_lock_lock);
166 }
167 EXPORT_SYMBOL_GPL(lock_flocks);
168 
169 void unlock_flocks(void)
170 {
171 	spin_unlock(&file_lock_lock);
172 }
173 EXPORT_SYMBOL_GPL(unlock_flocks);
174 
175 static struct kmem_cache *filelock_cache __read_mostly;
176 
177 static void locks_init_lock_heads(struct file_lock *fl)
178 {
179 	INIT_LIST_HEAD(&fl->fl_link);
180 	INIT_LIST_HEAD(&fl->fl_block);
181 	init_waitqueue_head(&fl->fl_wait);
182 }
183 
184 /* Allocate an empty lock structure. */
185 struct file_lock *locks_alloc_lock(void)
186 {
187 	struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
188 
189 	if (fl)
190 		locks_init_lock_heads(fl);
191 
192 	return fl;
193 }
194 EXPORT_SYMBOL_GPL(locks_alloc_lock);
195 
196 void locks_release_private(struct file_lock *fl)
197 {
198 	if (fl->fl_ops) {
199 		if (fl->fl_ops->fl_release_private)
200 			fl->fl_ops->fl_release_private(fl);
201 		fl->fl_ops = NULL;
202 	}
203 	if (fl->fl_lmops) {
204 		if (fl->fl_lmops->lm_release_private)
205 			fl->fl_lmops->lm_release_private(fl);
206 		fl->fl_lmops = NULL;
207 	}
208 
209 }
210 EXPORT_SYMBOL_GPL(locks_release_private);
211 
212 /* Free a lock which is not in use. */
213 void locks_free_lock(struct file_lock *fl)
214 {
215 	BUG_ON(waitqueue_active(&fl->fl_wait));
216 	BUG_ON(!list_empty(&fl->fl_block));
217 	BUG_ON(!list_empty(&fl->fl_link));
218 
219 	locks_release_private(fl);
220 	kmem_cache_free(filelock_cache, fl);
221 }
222 EXPORT_SYMBOL(locks_free_lock);
223 
224 void locks_init_lock(struct file_lock *fl)
225 {
226 	memset(fl, 0, sizeof(struct file_lock));
227 	locks_init_lock_heads(fl);
228 }
229 
230 EXPORT_SYMBOL(locks_init_lock);
231 
232 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
233 {
234 	if (fl->fl_ops) {
235 		if (fl->fl_ops->fl_copy_lock)
236 			fl->fl_ops->fl_copy_lock(new, fl);
237 		new->fl_ops = fl->fl_ops;
238 	}
239 	if (fl->fl_lmops)
240 		new->fl_lmops = fl->fl_lmops;
241 }
242 
243 /*
244  * Initialize a new lock from an existing file_lock structure.
245  */
246 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
247 {
248 	new->fl_owner = fl->fl_owner;
249 	new->fl_pid = fl->fl_pid;
250 	new->fl_file = NULL;
251 	new->fl_flags = fl->fl_flags;
252 	new->fl_type = fl->fl_type;
253 	new->fl_start = fl->fl_start;
254 	new->fl_end = fl->fl_end;
255 	new->fl_ops = NULL;
256 	new->fl_lmops = NULL;
257 }
258 EXPORT_SYMBOL(__locks_copy_lock);
259 
260 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
261 {
262 	locks_release_private(new);
263 
264 	__locks_copy_lock(new, fl);
265 	new->fl_file = fl->fl_file;
266 	new->fl_ops = fl->fl_ops;
267 	new->fl_lmops = fl->fl_lmops;
268 
269 	locks_copy_private(new, fl);
270 }
271 
272 EXPORT_SYMBOL(locks_copy_lock);
273 
274 static inline int flock_translate_cmd(int cmd) {
275 	if (cmd & LOCK_MAND)
276 		return cmd & (LOCK_MAND | LOCK_RW);
277 	switch (cmd) {
278 	case LOCK_SH:
279 		return F_RDLCK;
280 	case LOCK_EX:
281 		return F_WRLCK;
282 	case LOCK_UN:
283 		return F_UNLCK;
284 	}
285 	return -EINVAL;
286 }
287 
288 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
289 static int flock_make_lock(struct file *filp, struct file_lock **lock,
290 		unsigned int cmd)
291 {
292 	struct file_lock *fl;
293 	int type = flock_translate_cmd(cmd);
294 	if (type < 0)
295 		return type;
296 
297 	fl = locks_alloc_lock();
298 	if (fl == NULL)
299 		return -ENOMEM;
300 
301 	fl->fl_file = filp;
302 	fl->fl_pid = current->tgid;
303 	fl->fl_flags = FL_FLOCK;
304 	fl->fl_type = type;
305 	fl->fl_end = OFFSET_MAX;
306 
307 	*lock = fl;
308 	return 0;
309 }
310 
311 static int assign_type(struct file_lock *fl, int type)
312 {
313 	switch (type) {
314 	case F_RDLCK:
315 	case F_WRLCK:
316 	case F_UNLCK:
317 		fl->fl_type = type;
318 		break;
319 	default:
320 		return -EINVAL;
321 	}
322 	return 0;
323 }
324 
325 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
326  * style lock.
327  */
328 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
329 			       struct flock *l)
330 {
331 	off_t start, end;
332 
333 	switch (l->l_whence) {
334 	case SEEK_SET:
335 		start = 0;
336 		break;
337 	case SEEK_CUR:
338 		start = filp->f_pos;
339 		break;
340 	case SEEK_END:
341 		start = i_size_read(filp->f_path.dentry->d_inode);
342 		break;
343 	default:
344 		return -EINVAL;
345 	}
346 
347 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
348 	   POSIX-2001 defines it. */
349 	start += l->l_start;
350 	if (start < 0)
351 		return -EINVAL;
352 	fl->fl_end = OFFSET_MAX;
353 	if (l->l_len > 0) {
354 		end = start + l->l_len - 1;
355 		fl->fl_end = end;
356 	} else if (l->l_len < 0) {
357 		end = start - 1;
358 		fl->fl_end = end;
359 		start += l->l_len;
360 		if (start < 0)
361 			return -EINVAL;
362 	}
363 	fl->fl_start = start;	/* we record the absolute position */
364 	if (fl->fl_end < fl->fl_start)
365 		return -EOVERFLOW;
366 
367 	fl->fl_owner = current->files;
368 	fl->fl_pid = current->tgid;
369 	fl->fl_file = filp;
370 	fl->fl_flags = FL_POSIX;
371 	fl->fl_ops = NULL;
372 	fl->fl_lmops = NULL;
373 
374 	return assign_type(fl, l->l_type);
375 }
376 
377 #if BITS_PER_LONG == 32
378 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
379 				 struct flock64 *l)
380 {
381 	loff_t start;
382 
383 	switch (l->l_whence) {
384 	case SEEK_SET:
385 		start = 0;
386 		break;
387 	case SEEK_CUR:
388 		start = filp->f_pos;
389 		break;
390 	case SEEK_END:
391 		start = i_size_read(filp->f_path.dentry->d_inode);
392 		break;
393 	default:
394 		return -EINVAL;
395 	}
396 
397 	start += l->l_start;
398 	if (start < 0)
399 		return -EINVAL;
400 	fl->fl_end = OFFSET_MAX;
401 	if (l->l_len > 0) {
402 		fl->fl_end = start + l->l_len - 1;
403 	} else if (l->l_len < 0) {
404 		fl->fl_end = start - 1;
405 		start += l->l_len;
406 		if (start < 0)
407 			return -EINVAL;
408 	}
409 	fl->fl_start = start;	/* we record the absolute position */
410 	if (fl->fl_end < fl->fl_start)
411 		return -EOVERFLOW;
412 
413 	fl->fl_owner = current->files;
414 	fl->fl_pid = current->tgid;
415 	fl->fl_file = filp;
416 	fl->fl_flags = FL_POSIX;
417 	fl->fl_ops = NULL;
418 	fl->fl_lmops = NULL;
419 
420 	return assign_type(fl, l->l_type);
421 }
422 #endif
423 
424 /* default lease lock manager operations */
425 static void lease_break_callback(struct file_lock *fl)
426 {
427 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
428 }
429 
430 static void lease_release_private_callback(struct file_lock *fl)
431 {
432 	if (!fl->fl_file)
433 		return;
434 
435 	f_delown(fl->fl_file);
436 	fl->fl_file->f_owner.signum = 0;
437 }
438 
439 static const struct lock_manager_operations lease_manager_ops = {
440 	.lm_break = lease_break_callback,
441 	.lm_release_private = lease_release_private_callback,
442 	.lm_change = lease_modify,
443 };
444 
445 /*
446  * Initialize a lease, use the default lock manager operations
447  */
448 static int lease_init(struct file *filp, int type, struct file_lock *fl)
449  {
450 	if (assign_type(fl, type) != 0)
451 		return -EINVAL;
452 
453 	fl->fl_owner = current->files;
454 	fl->fl_pid = current->tgid;
455 
456 	fl->fl_file = filp;
457 	fl->fl_flags = FL_LEASE;
458 	fl->fl_start = 0;
459 	fl->fl_end = OFFSET_MAX;
460 	fl->fl_ops = NULL;
461 	fl->fl_lmops = &lease_manager_ops;
462 	return 0;
463 }
464 
465 /* Allocate a file_lock initialised to this type of lease */
466 static struct file_lock *lease_alloc(struct file *filp, int type)
467 {
468 	struct file_lock *fl = locks_alloc_lock();
469 	int error = -ENOMEM;
470 
471 	if (fl == NULL)
472 		return ERR_PTR(error);
473 
474 	error = lease_init(filp, type, fl);
475 	if (error) {
476 		locks_free_lock(fl);
477 		return ERR_PTR(error);
478 	}
479 	return fl;
480 }
481 
482 /* Check if two locks overlap each other.
483  */
484 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
485 {
486 	return ((fl1->fl_end >= fl2->fl_start) &&
487 		(fl2->fl_end >= fl1->fl_start));
488 }
489 
490 /*
491  * Check whether two locks have the same owner.
492  */
493 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
494 {
495 	if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
496 		return fl2->fl_lmops == fl1->fl_lmops &&
497 			fl1->fl_lmops->lm_compare_owner(fl1, fl2);
498 	return fl1->fl_owner == fl2->fl_owner;
499 }
500 
501 /* Remove waiter from blocker's block list.
502  * When blocker ends up pointing to itself then the list is empty.
503  */
504 static void __locks_delete_block(struct file_lock *waiter)
505 {
506 	list_del_init(&waiter->fl_block);
507 	list_del_init(&waiter->fl_link);
508 	waiter->fl_next = NULL;
509 }
510 
511 /*
512  */
513 static void locks_delete_block(struct file_lock *waiter)
514 {
515 	lock_flocks();
516 	__locks_delete_block(waiter);
517 	unlock_flocks();
518 }
519 
520 /* Insert waiter into blocker's block list.
521  * We use a circular list so that processes can be easily woken up in
522  * the order they blocked. The documentation doesn't require this but
523  * it seems like the reasonable thing to do.
524  */
525 static void locks_insert_block(struct file_lock *blocker,
526 			       struct file_lock *waiter)
527 {
528 	BUG_ON(!list_empty(&waiter->fl_block));
529 	list_add_tail(&waiter->fl_block, &blocker->fl_block);
530 	waiter->fl_next = blocker;
531 	if (IS_POSIX(blocker))
532 		list_add(&waiter->fl_link, &blocked_list);
533 }
534 
535 /* Wake up processes blocked waiting for blocker.
536  * If told to wait then schedule the processes until the block list
537  * is empty, otherwise empty the block list ourselves.
538  */
539 static void locks_wake_up_blocks(struct file_lock *blocker)
540 {
541 	while (!list_empty(&blocker->fl_block)) {
542 		struct file_lock *waiter;
543 
544 		waiter = list_first_entry(&blocker->fl_block,
545 				struct file_lock, fl_block);
546 		__locks_delete_block(waiter);
547 		if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
548 			waiter->fl_lmops->lm_notify(waiter);
549 		else
550 			wake_up(&waiter->fl_wait);
551 	}
552 }
553 
554 /* Insert file lock fl into an inode's lock list at the position indicated
555  * by pos. At the same time add the lock to the global file lock list.
556  */
557 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
558 {
559 	list_add(&fl->fl_link, &file_lock_list);
560 
561 	fl->fl_nspid = get_pid(task_tgid(current));
562 
563 	/* insert into file's list */
564 	fl->fl_next = *pos;
565 	*pos = fl;
566 }
567 
568 /*
569  * Delete a lock and then free it.
570  * Wake up processes that are blocked waiting for this lock,
571  * notify the FS that the lock has been cleared and
572  * finally free the lock.
573  */
574 static void locks_delete_lock(struct file_lock **thisfl_p)
575 {
576 	struct file_lock *fl = *thisfl_p;
577 
578 	*thisfl_p = fl->fl_next;
579 	fl->fl_next = NULL;
580 	list_del_init(&fl->fl_link);
581 
582 	fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
583 	if (fl->fl_fasync != NULL) {
584 		printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
585 		fl->fl_fasync = NULL;
586 	}
587 
588 	if (fl->fl_nspid) {
589 		put_pid(fl->fl_nspid);
590 		fl->fl_nspid = NULL;
591 	}
592 
593 	locks_wake_up_blocks(fl);
594 	locks_free_lock(fl);
595 }
596 
597 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
598  * checks for shared/exclusive status of overlapping locks.
599  */
600 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
601 {
602 	if (sys_fl->fl_type == F_WRLCK)
603 		return 1;
604 	if (caller_fl->fl_type == F_WRLCK)
605 		return 1;
606 	return 0;
607 }
608 
609 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
610  * checking before calling the locks_conflict().
611  */
612 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
613 {
614 	/* POSIX locks owned by the same process do not conflict with
615 	 * each other.
616 	 */
617 	if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
618 		return (0);
619 
620 	/* Check whether they overlap */
621 	if (!locks_overlap(caller_fl, sys_fl))
622 		return 0;
623 
624 	return (locks_conflict(caller_fl, sys_fl));
625 }
626 
627 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
628  * checking before calling the locks_conflict().
629  */
630 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
631 {
632 	/* FLOCK locks referring to the same filp do not conflict with
633 	 * each other.
634 	 */
635 	if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
636 		return (0);
637 	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
638 		return 0;
639 
640 	return (locks_conflict(caller_fl, sys_fl));
641 }
642 
643 void
644 posix_test_lock(struct file *filp, struct file_lock *fl)
645 {
646 	struct file_lock *cfl;
647 
648 	lock_flocks();
649 	for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
650 		if (!IS_POSIX(cfl))
651 			continue;
652 		if (posix_locks_conflict(fl, cfl))
653 			break;
654 	}
655 	if (cfl) {
656 		__locks_copy_lock(fl, cfl);
657 		if (cfl->fl_nspid)
658 			fl->fl_pid = pid_vnr(cfl->fl_nspid);
659 	} else
660 		fl->fl_type = F_UNLCK;
661 	unlock_flocks();
662 	return;
663 }
664 EXPORT_SYMBOL(posix_test_lock);
665 
666 /*
667  * Deadlock detection:
668  *
669  * We attempt to detect deadlocks that are due purely to posix file
670  * locks.
671  *
672  * We assume that a task can be waiting for at most one lock at a time.
673  * So for any acquired lock, the process holding that lock may be
674  * waiting on at most one other lock.  That lock in turns may be held by
675  * someone waiting for at most one other lock.  Given a requested lock
676  * caller_fl which is about to wait for a conflicting lock block_fl, we
677  * follow this chain of waiters to ensure we are not about to create a
678  * cycle.
679  *
680  * Since we do this before we ever put a process to sleep on a lock, we
681  * are ensured that there is never a cycle; that is what guarantees that
682  * the while() loop in posix_locks_deadlock() eventually completes.
683  *
684  * Note: the above assumption may not be true when handling lock
685  * requests from a broken NFS client. It may also fail in the presence
686  * of tasks (such as posix threads) sharing the same open file table.
687  *
688  * To handle those cases, we just bail out after a few iterations.
689  */
690 
691 #define MAX_DEADLK_ITERATIONS 10
692 
693 /* Find a lock that the owner of the given block_fl is blocking on. */
694 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
695 {
696 	struct file_lock *fl;
697 
698 	list_for_each_entry(fl, &blocked_list, fl_link) {
699 		if (posix_same_owner(fl, block_fl))
700 			return fl->fl_next;
701 	}
702 	return NULL;
703 }
704 
705 static int posix_locks_deadlock(struct file_lock *caller_fl,
706 				struct file_lock *block_fl)
707 {
708 	int i = 0;
709 
710 	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
711 		if (i++ > MAX_DEADLK_ITERATIONS)
712 			return 0;
713 		if (posix_same_owner(caller_fl, block_fl))
714 			return 1;
715 	}
716 	return 0;
717 }
718 
719 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
720  * after any leases, but before any posix locks.
721  *
722  * Note that if called with an FL_EXISTS argument, the caller may determine
723  * whether or not a lock was successfully freed by testing the return
724  * value for -ENOENT.
725  */
726 static int flock_lock_file(struct file *filp, struct file_lock *request)
727 {
728 	struct file_lock *new_fl = NULL;
729 	struct file_lock **before;
730 	struct inode * inode = filp->f_path.dentry->d_inode;
731 	int error = 0;
732 	int found = 0;
733 
734 	if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
735 		new_fl = locks_alloc_lock();
736 		if (!new_fl)
737 			return -ENOMEM;
738 	}
739 
740 	lock_flocks();
741 	if (request->fl_flags & FL_ACCESS)
742 		goto find_conflict;
743 
744 	for_each_lock(inode, before) {
745 		struct file_lock *fl = *before;
746 		if (IS_POSIX(fl))
747 			break;
748 		if (IS_LEASE(fl))
749 			continue;
750 		if (filp != fl->fl_file)
751 			continue;
752 		if (request->fl_type == fl->fl_type)
753 			goto out;
754 		found = 1;
755 		locks_delete_lock(before);
756 		break;
757 	}
758 
759 	if (request->fl_type == F_UNLCK) {
760 		if ((request->fl_flags & FL_EXISTS) && !found)
761 			error = -ENOENT;
762 		goto out;
763 	}
764 
765 	/*
766 	 * If a higher-priority process was blocked on the old file lock,
767 	 * give it the opportunity to lock the file.
768 	 */
769 	if (found) {
770 		unlock_flocks();
771 		cond_resched();
772 		lock_flocks();
773 	}
774 
775 find_conflict:
776 	for_each_lock(inode, before) {
777 		struct file_lock *fl = *before;
778 		if (IS_POSIX(fl))
779 			break;
780 		if (IS_LEASE(fl))
781 			continue;
782 		if (!flock_locks_conflict(request, fl))
783 			continue;
784 		error = -EAGAIN;
785 		if (!(request->fl_flags & FL_SLEEP))
786 			goto out;
787 		error = FILE_LOCK_DEFERRED;
788 		locks_insert_block(fl, request);
789 		goto out;
790 	}
791 	if (request->fl_flags & FL_ACCESS)
792 		goto out;
793 	locks_copy_lock(new_fl, request);
794 	locks_insert_lock(before, new_fl);
795 	new_fl = NULL;
796 	error = 0;
797 
798 out:
799 	unlock_flocks();
800 	if (new_fl)
801 		locks_free_lock(new_fl);
802 	return error;
803 }
804 
805 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
806 {
807 	struct file_lock *fl;
808 	struct file_lock *new_fl = NULL;
809 	struct file_lock *new_fl2 = NULL;
810 	struct file_lock *left = NULL;
811 	struct file_lock *right = NULL;
812 	struct file_lock **before;
813 	int error, added = 0;
814 
815 	/*
816 	 * We may need two file_lock structures for this operation,
817 	 * so we get them in advance to avoid races.
818 	 *
819 	 * In some cases we can be sure, that no new locks will be needed
820 	 */
821 	if (!(request->fl_flags & FL_ACCESS) &&
822 	    (request->fl_type != F_UNLCK ||
823 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
824 		new_fl = locks_alloc_lock();
825 		new_fl2 = locks_alloc_lock();
826 	}
827 
828 	lock_flocks();
829 	if (request->fl_type != F_UNLCK) {
830 		for_each_lock(inode, before) {
831 			fl = *before;
832 			if (!IS_POSIX(fl))
833 				continue;
834 			if (!posix_locks_conflict(request, fl))
835 				continue;
836 			if (conflock)
837 				__locks_copy_lock(conflock, fl);
838 			error = -EAGAIN;
839 			if (!(request->fl_flags & FL_SLEEP))
840 				goto out;
841 			error = -EDEADLK;
842 			if (posix_locks_deadlock(request, fl))
843 				goto out;
844 			error = FILE_LOCK_DEFERRED;
845 			locks_insert_block(fl, request);
846 			goto out;
847   		}
848   	}
849 
850 	/* If we're just looking for a conflict, we're done. */
851 	error = 0;
852 	if (request->fl_flags & FL_ACCESS)
853 		goto out;
854 
855 	/*
856 	 * Find the first old lock with the same owner as the new lock.
857 	 */
858 
859 	before = &inode->i_flock;
860 
861 	/* First skip locks owned by other processes.  */
862 	while ((fl = *before) && (!IS_POSIX(fl) ||
863 				  !posix_same_owner(request, fl))) {
864 		before = &fl->fl_next;
865 	}
866 
867 	/* Process locks with this owner.  */
868 	while ((fl = *before) && posix_same_owner(request, fl)) {
869 		/* Detect adjacent or overlapping regions (if same lock type)
870 		 */
871 		if (request->fl_type == fl->fl_type) {
872 			/* In all comparisons of start vs end, use
873 			 * "start - 1" rather than "end + 1". If end
874 			 * is OFFSET_MAX, end + 1 will become negative.
875 			 */
876 			if (fl->fl_end < request->fl_start - 1)
877 				goto next_lock;
878 			/* If the next lock in the list has entirely bigger
879 			 * addresses than the new one, insert the lock here.
880 			 */
881 			if (fl->fl_start - 1 > request->fl_end)
882 				break;
883 
884 			/* If we come here, the new and old lock are of the
885 			 * same type and adjacent or overlapping. Make one
886 			 * lock yielding from the lower start address of both
887 			 * locks to the higher end address.
888 			 */
889 			if (fl->fl_start > request->fl_start)
890 				fl->fl_start = request->fl_start;
891 			else
892 				request->fl_start = fl->fl_start;
893 			if (fl->fl_end < request->fl_end)
894 				fl->fl_end = request->fl_end;
895 			else
896 				request->fl_end = fl->fl_end;
897 			if (added) {
898 				locks_delete_lock(before);
899 				continue;
900 			}
901 			request = fl;
902 			added = 1;
903 		}
904 		else {
905 			/* Processing for different lock types is a bit
906 			 * more complex.
907 			 */
908 			if (fl->fl_end < request->fl_start)
909 				goto next_lock;
910 			if (fl->fl_start > request->fl_end)
911 				break;
912 			if (request->fl_type == F_UNLCK)
913 				added = 1;
914 			if (fl->fl_start < request->fl_start)
915 				left = fl;
916 			/* If the next lock in the list has a higher end
917 			 * address than the new one, insert the new one here.
918 			 */
919 			if (fl->fl_end > request->fl_end) {
920 				right = fl;
921 				break;
922 			}
923 			if (fl->fl_start >= request->fl_start) {
924 				/* The new lock completely replaces an old
925 				 * one (This may happen several times).
926 				 */
927 				if (added) {
928 					locks_delete_lock(before);
929 					continue;
930 				}
931 				/* Replace the old lock with the new one.
932 				 * Wake up anybody waiting for the old one,
933 				 * as the change in lock type might satisfy
934 				 * their needs.
935 				 */
936 				locks_wake_up_blocks(fl);
937 				fl->fl_start = request->fl_start;
938 				fl->fl_end = request->fl_end;
939 				fl->fl_type = request->fl_type;
940 				locks_release_private(fl);
941 				locks_copy_private(fl, request);
942 				request = fl;
943 				added = 1;
944 			}
945 		}
946 		/* Go on to next lock.
947 		 */
948 	next_lock:
949 		before = &fl->fl_next;
950 	}
951 
952 	/*
953 	 * The above code only modifies existing locks in case of
954 	 * merging or replacing.  If new lock(s) need to be inserted
955 	 * all modifications are done bellow this, so it's safe yet to
956 	 * bail out.
957 	 */
958 	error = -ENOLCK; /* "no luck" */
959 	if (right && left == right && !new_fl2)
960 		goto out;
961 
962 	error = 0;
963 	if (!added) {
964 		if (request->fl_type == F_UNLCK) {
965 			if (request->fl_flags & FL_EXISTS)
966 				error = -ENOENT;
967 			goto out;
968 		}
969 
970 		if (!new_fl) {
971 			error = -ENOLCK;
972 			goto out;
973 		}
974 		locks_copy_lock(new_fl, request);
975 		locks_insert_lock(before, new_fl);
976 		new_fl = NULL;
977 	}
978 	if (right) {
979 		if (left == right) {
980 			/* The new lock breaks the old one in two pieces,
981 			 * so we have to use the second new lock.
982 			 */
983 			left = new_fl2;
984 			new_fl2 = NULL;
985 			locks_copy_lock(left, right);
986 			locks_insert_lock(before, left);
987 		}
988 		right->fl_start = request->fl_end + 1;
989 		locks_wake_up_blocks(right);
990 	}
991 	if (left) {
992 		left->fl_end = request->fl_start - 1;
993 		locks_wake_up_blocks(left);
994 	}
995  out:
996 	unlock_flocks();
997 	/*
998 	 * Free any unused locks.
999 	 */
1000 	if (new_fl)
1001 		locks_free_lock(new_fl);
1002 	if (new_fl2)
1003 		locks_free_lock(new_fl2);
1004 	return error;
1005 }
1006 
1007 /**
1008  * posix_lock_file - Apply a POSIX-style lock to a file
1009  * @filp: The file to apply the lock to
1010  * @fl: The lock to be applied
1011  * @conflock: Place to return a copy of the conflicting lock, if found.
1012  *
1013  * Add a POSIX style lock to a file.
1014  * We merge adjacent & overlapping locks whenever possible.
1015  * POSIX locks are sorted by owner task, then by starting address
1016  *
1017  * Note that if called with an FL_EXISTS argument, the caller may determine
1018  * whether or not a lock was successfully freed by testing the return
1019  * value for -ENOENT.
1020  */
1021 int posix_lock_file(struct file *filp, struct file_lock *fl,
1022 			struct file_lock *conflock)
1023 {
1024 	return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1025 }
1026 EXPORT_SYMBOL(posix_lock_file);
1027 
1028 /**
1029  * posix_lock_file_wait - Apply a POSIX-style lock to a file
1030  * @filp: The file to apply the lock to
1031  * @fl: The lock to be applied
1032  *
1033  * Add a POSIX style lock to a file.
1034  * We merge adjacent & overlapping locks whenever possible.
1035  * POSIX locks are sorted by owner task, then by starting address
1036  */
1037 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1038 {
1039 	int error;
1040 	might_sleep ();
1041 	for (;;) {
1042 		error = posix_lock_file(filp, fl, NULL);
1043 		if (error != FILE_LOCK_DEFERRED)
1044 			break;
1045 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1046 		if (!error)
1047 			continue;
1048 
1049 		locks_delete_block(fl);
1050 		break;
1051 	}
1052 	return error;
1053 }
1054 EXPORT_SYMBOL(posix_lock_file_wait);
1055 
1056 /**
1057  * locks_mandatory_locked - Check for an active lock
1058  * @inode: the file to check
1059  *
1060  * Searches the inode's list of locks to find any POSIX locks which conflict.
1061  * This function is called from locks_verify_locked() only.
1062  */
1063 int locks_mandatory_locked(struct inode *inode)
1064 {
1065 	fl_owner_t owner = current->files;
1066 	struct file_lock *fl;
1067 
1068 	/*
1069 	 * Search the lock list for this inode for any POSIX locks.
1070 	 */
1071 	lock_flocks();
1072 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1073 		if (!IS_POSIX(fl))
1074 			continue;
1075 		if (fl->fl_owner != owner)
1076 			break;
1077 	}
1078 	unlock_flocks();
1079 	return fl ? -EAGAIN : 0;
1080 }
1081 
1082 /**
1083  * locks_mandatory_area - Check for a conflicting lock
1084  * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1085  *		for shared
1086  * @inode:      the file to check
1087  * @filp:       how the file was opened (if it was)
1088  * @offset:     start of area to check
1089  * @count:      length of area to check
1090  *
1091  * Searches the inode's list of locks to find any POSIX locks which conflict.
1092  * This function is called from rw_verify_area() and
1093  * locks_verify_truncate().
1094  */
1095 int locks_mandatory_area(int read_write, struct inode *inode,
1096 			 struct file *filp, loff_t offset,
1097 			 size_t count)
1098 {
1099 	struct file_lock fl;
1100 	int error;
1101 
1102 	locks_init_lock(&fl);
1103 	fl.fl_owner = current->files;
1104 	fl.fl_pid = current->tgid;
1105 	fl.fl_file = filp;
1106 	fl.fl_flags = FL_POSIX | FL_ACCESS;
1107 	if (filp && !(filp->f_flags & O_NONBLOCK))
1108 		fl.fl_flags |= FL_SLEEP;
1109 	fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1110 	fl.fl_start = offset;
1111 	fl.fl_end = offset + count - 1;
1112 
1113 	for (;;) {
1114 		error = __posix_lock_file(inode, &fl, NULL);
1115 		if (error != FILE_LOCK_DEFERRED)
1116 			break;
1117 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1118 		if (!error) {
1119 			/*
1120 			 * If we've been sleeping someone might have
1121 			 * changed the permissions behind our back.
1122 			 */
1123 			if (__mandatory_lock(inode))
1124 				continue;
1125 		}
1126 
1127 		locks_delete_block(&fl);
1128 		break;
1129 	}
1130 
1131 	return error;
1132 }
1133 
1134 EXPORT_SYMBOL(locks_mandatory_area);
1135 
1136 static void lease_clear_pending(struct file_lock *fl, int arg)
1137 {
1138 	switch (arg) {
1139 	case F_UNLCK:
1140 		fl->fl_flags &= ~FL_UNLOCK_PENDING;
1141 		/* fall through: */
1142 	case F_RDLCK:
1143 		fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1144 	}
1145 }
1146 
1147 /* We already had a lease on this file; just change its type */
1148 int lease_modify(struct file_lock **before, int arg)
1149 {
1150 	struct file_lock *fl = *before;
1151 	int error = assign_type(fl, arg);
1152 
1153 	if (error)
1154 		return error;
1155 	lease_clear_pending(fl, arg);
1156 	locks_wake_up_blocks(fl);
1157 	if (arg == F_UNLCK)
1158 		locks_delete_lock(before);
1159 	return 0;
1160 }
1161 
1162 EXPORT_SYMBOL(lease_modify);
1163 
1164 static bool past_time(unsigned long then)
1165 {
1166 	if (!then)
1167 		/* 0 is a special value meaning "this never expires": */
1168 		return false;
1169 	return time_after(jiffies, then);
1170 }
1171 
1172 static void time_out_leases(struct inode *inode)
1173 {
1174 	struct file_lock **before;
1175 	struct file_lock *fl;
1176 
1177 	before = &inode->i_flock;
1178 	while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
1179 		if (past_time(fl->fl_downgrade_time))
1180 			lease_modify(before, F_RDLCK);
1181 		if (past_time(fl->fl_break_time))
1182 			lease_modify(before, F_UNLCK);
1183 		if (fl == *before)	/* lease_modify may have freed fl */
1184 			before = &fl->fl_next;
1185 	}
1186 }
1187 
1188 /**
1189  *	__break_lease	-	revoke all outstanding leases on file
1190  *	@inode: the inode of the file to return
1191  *	@mode: the open mode (read or write)
1192  *
1193  *	break_lease (inlined for speed) has checked there already is at least
1194  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1195  *	a call to open() or truncate().  This function can sleep unless you
1196  *	specified %O_NONBLOCK to your open().
1197  */
1198 int __break_lease(struct inode *inode, unsigned int mode)
1199 {
1200 	int error = 0;
1201 	struct file_lock *new_fl, *flock;
1202 	struct file_lock *fl;
1203 	unsigned long break_time;
1204 	int i_have_this_lease = 0;
1205 	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1206 
1207 	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1208 	if (IS_ERR(new_fl))
1209 		return PTR_ERR(new_fl);
1210 
1211 	lock_flocks();
1212 
1213 	time_out_leases(inode);
1214 
1215 	flock = inode->i_flock;
1216 	if ((flock == NULL) || !IS_LEASE(flock))
1217 		goto out;
1218 
1219 	if (!locks_conflict(flock, new_fl))
1220 		goto out;
1221 
1222 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1223 		if (fl->fl_owner == current->files)
1224 			i_have_this_lease = 1;
1225 
1226 	break_time = 0;
1227 	if (lease_break_time > 0) {
1228 		break_time = jiffies + lease_break_time * HZ;
1229 		if (break_time == 0)
1230 			break_time++;	/* so that 0 means no break time */
1231 	}
1232 
1233 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1234 		if (want_write) {
1235 			if (fl->fl_flags & FL_UNLOCK_PENDING)
1236 				continue;
1237 			fl->fl_flags |= FL_UNLOCK_PENDING;
1238 			fl->fl_break_time = break_time;
1239 		} else {
1240 			if (lease_breaking(flock))
1241 				continue;
1242 			fl->fl_flags |= FL_DOWNGRADE_PENDING;
1243 			fl->fl_downgrade_time = break_time;
1244 		}
1245 		fl->fl_lmops->lm_break(fl);
1246 	}
1247 
1248 	if (i_have_this_lease || (mode & O_NONBLOCK)) {
1249 		error = -EWOULDBLOCK;
1250 		goto out;
1251 	}
1252 
1253 restart:
1254 	break_time = flock->fl_break_time;
1255 	if (break_time != 0) {
1256 		break_time -= jiffies;
1257 		if (break_time == 0)
1258 			break_time++;
1259 	}
1260 	locks_insert_block(flock, new_fl);
1261 	unlock_flocks();
1262 	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1263 						!new_fl->fl_next, break_time);
1264 	lock_flocks();
1265 	__locks_delete_block(new_fl);
1266 	if (error >= 0) {
1267 		if (error == 0)
1268 			time_out_leases(inode);
1269 		/*
1270 		 * Wait for the next conflicting lease that has not been
1271 		 * broken yet
1272 		 */
1273 		for (flock = inode->i_flock; flock && IS_LEASE(flock);
1274 				flock = flock->fl_next) {
1275 			if (locks_conflict(new_fl, flock))
1276 				goto restart;
1277 		}
1278 		error = 0;
1279 	}
1280 
1281 out:
1282 	unlock_flocks();
1283 	locks_free_lock(new_fl);
1284 	return error;
1285 }
1286 
1287 EXPORT_SYMBOL(__break_lease);
1288 
1289 /**
1290  *	lease_get_mtime - get the last modified time of an inode
1291  *	@inode: the inode
1292  *      @time:  pointer to a timespec which will contain the last modified time
1293  *
1294  * This is to force NFS clients to flush their caches for files with
1295  * exclusive leases.  The justification is that if someone has an
1296  * exclusive lease, then they could be modifying it.
1297  */
1298 void lease_get_mtime(struct inode *inode, struct timespec *time)
1299 {
1300 	struct file_lock *flock = inode->i_flock;
1301 	if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1302 		*time = current_fs_time(inode->i_sb);
1303 	else
1304 		*time = inode->i_mtime;
1305 }
1306 
1307 EXPORT_SYMBOL(lease_get_mtime);
1308 
1309 /**
1310  *	fcntl_getlease - Enquire what lease is currently active
1311  *	@filp: the file
1312  *
1313  *	The value returned by this function will be one of
1314  *	(if no lease break is pending):
1315  *
1316  *	%F_RDLCK to indicate a shared lease is held.
1317  *
1318  *	%F_WRLCK to indicate an exclusive lease is held.
1319  *
1320  *	%F_UNLCK to indicate no lease is held.
1321  *
1322  *	(if a lease break is pending):
1323  *
1324  *	%F_RDLCK to indicate an exclusive lease needs to be
1325  *		changed to a shared lease (or removed).
1326  *
1327  *	%F_UNLCK to indicate the lease needs to be removed.
1328  *
1329  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1330  *	should be returned to userspace.
1331  */
1332 int fcntl_getlease(struct file *filp)
1333 {
1334 	struct file_lock *fl;
1335 	int type = F_UNLCK;
1336 
1337 	lock_flocks();
1338 	time_out_leases(filp->f_path.dentry->d_inode);
1339 	for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1340 			fl = fl->fl_next) {
1341 		if (fl->fl_file == filp) {
1342 			type = target_leasetype(fl);
1343 			break;
1344 		}
1345 	}
1346 	unlock_flocks();
1347 	return type;
1348 }
1349 
1350 int generic_add_lease(struct file *filp, long arg, struct file_lock **flp)
1351 {
1352 	struct file_lock *fl, **before, **my_before = NULL, *lease;
1353 	struct dentry *dentry = filp->f_path.dentry;
1354 	struct inode *inode = dentry->d_inode;
1355 	int error;
1356 
1357 	lease = *flp;
1358 
1359 	error = -EAGAIN;
1360 	if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1361 		goto out;
1362 	if ((arg == F_WRLCK)
1363 	    && ((dentry->d_count > 1)
1364 		|| (atomic_read(&inode->i_count) > 1)))
1365 		goto out;
1366 
1367 	/*
1368 	 * At this point, we know that if there is an exclusive
1369 	 * lease on this file, then we hold it on this filp
1370 	 * (otherwise our open of this file would have blocked).
1371 	 * And if we are trying to acquire an exclusive lease,
1372 	 * then the file is not open by anyone (including us)
1373 	 * except for this filp.
1374 	 */
1375 	error = -EAGAIN;
1376 	for (before = &inode->i_flock;
1377 			((fl = *before) != NULL) && IS_LEASE(fl);
1378 			before = &fl->fl_next) {
1379 		if (fl->fl_file == filp) {
1380 			my_before = before;
1381 			continue;
1382 		}
1383 		/*
1384 		 * No exclusive leases if someone else has a lease on
1385 		 * this file:
1386 		 */
1387 		if (arg == F_WRLCK)
1388 			goto out;
1389 		/*
1390 		 * Modifying our existing lease is OK, but no getting a
1391 		 * new lease if someone else is opening for write:
1392 		 */
1393 		if (fl->fl_flags & FL_UNLOCK_PENDING)
1394 			goto out;
1395 	}
1396 
1397 	if (my_before != NULL) {
1398 		error = lease->fl_lmops->lm_change(my_before, arg);
1399 		if (!error)
1400 			*flp = *my_before;
1401 		goto out;
1402 	}
1403 
1404 	error = -EINVAL;
1405 	if (!leases_enable)
1406 		goto out;
1407 
1408 	locks_insert_lock(before, lease);
1409 	return 0;
1410 
1411 out:
1412 	return error;
1413 }
1414 
1415 int generic_delete_lease(struct file *filp, struct file_lock **flp)
1416 {
1417 	struct file_lock *fl, **before;
1418 	struct dentry *dentry = filp->f_path.dentry;
1419 	struct inode *inode = dentry->d_inode;
1420 
1421 	for (before = &inode->i_flock;
1422 			((fl = *before) != NULL) && IS_LEASE(fl);
1423 			before = &fl->fl_next) {
1424 		if (fl->fl_file != filp)
1425 			continue;
1426 		return (*flp)->fl_lmops->lm_change(before, F_UNLCK);
1427 	}
1428 	return -EAGAIN;
1429 }
1430 
1431 /**
1432  *	generic_setlease	-	sets a lease on an open file
1433  *	@filp: file pointer
1434  *	@arg: type of lease to obtain
1435  *	@flp: input - file_lock to use, output - file_lock inserted
1436  *
1437  *	The (input) flp->fl_lmops->lm_break function is required
1438  *	by break_lease().
1439  *
1440  *	Called with file_lock_lock held.
1441  */
1442 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1443 {
1444 	struct dentry *dentry = filp->f_path.dentry;
1445 	struct inode *inode = dentry->d_inode;
1446 	int error;
1447 
1448 	if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1449 		return -EACCES;
1450 	if (!S_ISREG(inode->i_mode))
1451 		return -EINVAL;
1452 	error = security_file_lock(filp, arg);
1453 	if (error)
1454 		return error;
1455 
1456 	time_out_leases(inode);
1457 
1458 	BUG_ON(!(*flp)->fl_lmops->lm_break);
1459 
1460 	switch (arg) {
1461 	case F_UNLCK:
1462 		return generic_delete_lease(filp, flp);
1463 	case F_RDLCK:
1464 	case F_WRLCK:
1465 		return generic_add_lease(filp, arg, flp);
1466 	default:
1467 		BUG();
1468 	}
1469 }
1470 EXPORT_SYMBOL(generic_setlease);
1471 
1472 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1473 {
1474 	if (filp->f_op && filp->f_op->setlease)
1475 		return filp->f_op->setlease(filp, arg, lease);
1476 	else
1477 		return generic_setlease(filp, arg, lease);
1478 }
1479 
1480 /**
1481  *	vfs_setlease        -       sets a lease on an open file
1482  *	@filp: file pointer
1483  *	@arg: type of lease to obtain
1484  *	@lease: file_lock to use
1485  *
1486  *	Call this to establish a lease on the file.
1487  *	The (*lease)->fl_lmops->lm_break operation must be set; if not,
1488  *	break_lease will oops!
1489  *
1490  *	This will call the filesystem's setlease file method, if
1491  *	defined.  Note that there is no getlease method; instead, the
1492  *	filesystem setlease method should call back to setlease() to
1493  *	add a lease to the inode's lease list, where fcntl_getlease() can
1494  *	find it.  Since fcntl_getlease() only reports whether the current
1495  *	task holds a lease, a cluster filesystem need only do this for
1496  *	leases held by processes on this node.
1497  *
1498  *	There is also no break_lease method; filesystems that
1499  *	handle their own leases should break leases themselves from the
1500  *	filesystem's open, create, and (on truncate) setattr methods.
1501  *
1502  *	Warning: the only current setlease methods exist only to disable
1503  *	leases in certain cases.  More vfs changes may be required to
1504  *	allow a full filesystem lease implementation.
1505  */
1506 
1507 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1508 {
1509 	int error;
1510 
1511 	lock_flocks();
1512 	error = __vfs_setlease(filp, arg, lease);
1513 	unlock_flocks();
1514 
1515 	return error;
1516 }
1517 EXPORT_SYMBOL_GPL(vfs_setlease);
1518 
1519 static int do_fcntl_delete_lease(struct file *filp)
1520 {
1521 	struct file_lock fl, *flp = &fl;
1522 
1523 	lease_init(filp, F_UNLCK, flp);
1524 
1525 	return vfs_setlease(filp, F_UNLCK, &flp);
1526 }
1527 
1528 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1529 {
1530 	struct file_lock *fl, *ret;
1531 	struct fasync_struct *new;
1532 	int error;
1533 
1534 	fl = lease_alloc(filp, arg);
1535 	if (IS_ERR(fl))
1536 		return PTR_ERR(fl);
1537 
1538 	new = fasync_alloc();
1539 	if (!new) {
1540 		locks_free_lock(fl);
1541 		return -ENOMEM;
1542 	}
1543 	ret = fl;
1544 	lock_flocks();
1545 	error = __vfs_setlease(filp, arg, &ret);
1546 	if (error) {
1547 		unlock_flocks();
1548 		locks_free_lock(fl);
1549 		goto out_free_fasync;
1550 	}
1551 	if (ret != fl)
1552 		locks_free_lock(fl);
1553 
1554 	/*
1555 	 * fasync_insert_entry() returns the old entry if any.
1556 	 * If there was no old entry, then it used 'new' and
1557 	 * inserted it into the fasync list. Clear new so that
1558 	 * we don't release it here.
1559 	 */
1560 	if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1561 		new = NULL;
1562 
1563 	error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1564 	unlock_flocks();
1565 
1566 out_free_fasync:
1567 	if (new)
1568 		fasync_free(new);
1569 	return error;
1570 }
1571 
1572 /**
1573  *	fcntl_setlease	-	sets a lease on an open file
1574  *	@fd: open file descriptor
1575  *	@filp: file pointer
1576  *	@arg: type of lease to obtain
1577  *
1578  *	Call this fcntl to establish a lease on the file.
1579  *	Note that you also need to call %F_SETSIG to
1580  *	receive a signal when the lease is broken.
1581  */
1582 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1583 {
1584 	if (arg == F_UNLCK)
1585 		return do_fcntl_delete_lease(filp);
1586 	return do_fcntl_add_lease(fd, filp, arg);
1587 }
1588 
1589 /**
1590  * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1591  * @filp: The file to apply the lock to
1592  * @fl: The lock to be applied
1593  *
1594  * Add a FLOCK style lock to a file.
1595  */
1596 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1597 {
1598 	int error;
1599 	might_sleep();
1600 	for (;;) {
1601 		error = flock_lock_file(filp, fl);
1602 		if (error != FILE_LOCK_DEFERRED)
1603 			break;
1604 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1605 		if (!error)
1606 			continue;
1607 
1608 		locks_delete_block(fl);
1609 		break;
1610 	}
1611 	return error;
1612 }
1613 
1614 EXPORT_SYMBOL(flock_lock_file_wait);
1615 
1616 /**
1617  *	sys_flock: - flock() system call.
1618  *	@fd: the file descriptor to lock.
1619  *	@cmd: the type of lock to apply.
1620  *
1621  *	Apply a %FL_FLOCK style lock to an open file descriptor.
1622  *	The @cmd can be one of
1623  *
1624  *	%LOCK_SH -- a shared lock.
1625  *
1626  *	%LOCK_EX -- an exclusive lock.
1627  *
1628  *	%LOCK_UN -- remove an existing lock.
1629  *
1630  *	%LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1631  *
1632  *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1633  *	processes read and write access respectively.
1634  */
1635 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1636 {
1637 	struct file *filp;
1638 	struct file_lock *lock;
1639 	int can_sleep, unlock;
1640 	int error;
1641 
1642 	error = -EBADF;
1643 	filp = fget(fd);
1644 	if (!filp)
1645 		goto out;
1646 
1647 	can_sleep = !(cmd & LOCK_NB);
1648 	cmd &= ~LOCK_NB;
1649 	unlock = (cmd == LOCK_UN);
1650 
1651 	if (!unlock && !(cmd & LOCK_MAND) &&
1652 	    !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1653 		goto out_putf;
1654 
1655 	error = flock_make_lock(filp, &lock, cmd);
1656 	if (error)
1657 		goto out_putf;
1658 	if (can_sleep)
1659 		lock->fl_flags |= FL_SLEEP;
1660 
1661 	error = security_file_lock(filp, lock->fl_type);
1662 	if (error)
1663 		goto out_free;
1664 
1665 	if (filp->f_op && filp->f_op->flock)
1666 		error = filp->f_op->flock(filp,
1667 					  (can_sleep) ? F_SETLKW : F_SETLK,
1668 					  lock);
1669 	else
1670 		error = flock_lock_file_wait(filp, lock);
1671 
1672  out_free:
1673 	locks_free_lock(lock);
1674 
1675  out_putf:
1676 	fput(filp);
1677  out:
1678 	return error;
1679 }
1680 
1681 /**
1682  * vfs_test_lock - test file byte range lock
1683  * @filp: The file to test lock for
1684  * @fl: The lock to test; also used to hold result
1685  *
1686  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
1687  * setting conf->fl_type to something other than F_UNLCK.
1688  */
1689 int vfs_test_lock(struct file *filp, struct file_lock *fl)
1690 {
1691 	if (filp->f_op && filp->f_op->lock)
1692 		return filp->f_op->lock(filp, F_GETLK, fl);
1693 	posix_test_lock(filp, fl);
1694 	return 0;
1695 }
1696 EXPORT_SYMBOL_GPL(vfs_test_lock);
1697 
1698 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1699 {
1700 	flock->l_pid = fl->fl_pid;
1701 #if BITS_PER_LONG == 32
1702 	/*
1703 	 * Make sure we can represent the posix lock via
1704 	 * legacy 32bit flock.
1705 	 */
1706 	if (fl->fl_start > OFFT_OFFSET_MAX)
1707 		return -EOVERFLOW;
1708 	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1709 		return -EOVERFLOW;
1710 #endif
1711 	flock->l_start = fl->fl_start;
1712 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1713 		fl->fl_end - fl->fl_start + 1;
1714 	flock->l_whence = 0;
1715 	flock->l_type = fl->fl_type;
1716 	return 0;
1717 }
1718 
1719 #if BITS_PER_LONG == 32
1720 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1721 {
1722 	flock->l_pid = fl->fl_pid;
1723 	flock->l_start = fl->fl_start;
1724 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1725 		fl->fl_end - fl->fl_start + 1;
1726 	flock->l_whence = 0;
1727 	flock->l_type = fl->fl_type;
1728 }
1729 #endif
1730 
1731 /* Report the first existing lock that would conflict with l.
1732  * This implements the F_GETLK command of fcntl().
1733  */
1734 int fcntl_getlk(struct file *filp, struct flock __user *l)
1735 {
1736 	struct file_lock file_lock;
1737 	struct flock flock;
1738 	int error;
1739 
1740 	error = -EFAULT;
1741 	if (copy_from_user(&flock, l, sizeof(flock)))
1742 		goto out;
1743 	error = -EINVAL;
1744 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1745 		goto out;
1746 
1747 	error = flock_to_posix_lock(filp, &file_lock, &flock);
1748 	if (error)
1749 		goto out;
1750 
1751 	error = vfs_test_lock(filp, &file_lock);
1752 	if (error)
1753 		goto out;
1754 
1755 	flock.l_type = file_lock.fl_type;
1756 	if (file_lock.fl_type != F_UNLCK) {
1757 		error = posix_lock_to_flock(&flock, &file_lock);
1758 		if (error)
1759 			goto out;
1760 	}
1761 	error = -EFAULT;
1762 	if (!copy_to_user(l, &flock, sizeof(flock)))
1763 		error = 0;
1764 out:
1765 	return error;
1766 }
1767 
1768 /**
1769  * vfs_lock_file - file byte range lock
1770  * @filp: The file to apply the lock to
1771  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1772  * @fl: The lock to be applied
1773  * @conf: Place to return a copy of the conflicting lock, if found.
1774  *
1775  * A caller that doesn't care about the conflicting lock may pass NULL
1776  * as the final argument.
1777  *
1778  * If the filesystem defines a private ->lock() method, then @conf will
1779  * be left unchanged; so a caller that cares should initialize it to
1780  * some acceptable default.
1781  *
1782  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1783  * locks, the ->lock() interface may return asynchronously, before the lock has
1784  * been granted or denied by the underlying filesystem, if (and only if)
1785  * lm_grant is set. Callers expecting ->lock() to return asynchronously
1786  * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1787  * the request is for a blocking lock. When ->lock() does return asynchronously,
1788  * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
1789  * request completes.
1790  * If the request is for non-blocking lock the file system should return
1791  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1792  * with the result. If the request timed out the callback routine will return a
1793  * nonzero return code and the file system should release the lock. The file
1794  * system is also responsible to keep a corresponding posix lock when it
1795  * grants a lock so the VFS can find out which locks are locally held and do
1796  * the correct lock cleanup when required.
1797  * The underlying filesystem must not drop the kernel lock or call
1798  * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1799  * return code.
1800  */
1801 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1802 {
1803 	if (filp->f_op && filp->f_op->lock)
1804 		return filp->f_op->lock(filp, cmd, fl);
1805 	else
1806 		return posix_lock_file(filp, fl, conf);
1807 }
1808 EXPORT_SYMBOL_GPL(vfs_lock_file);
1809 
1810 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1811 			     struct file_lock *fl)
1812 {
1813 	int error;
1814 
1815 	error = security_file_lock(filp, fl->fl_type);
1816 	if (error)
1817 		return error;
1818 
1819 	for (;;) {
1820 		error = vfs_lock_file(filp, cmd, fl, NULL);
1821 		if (error != FILE_LOCK_DEFERRED)
1822 			break;
1823 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1824 		if (!error)
1825 			continue;
1826 
1827 		locks_delete_block(fl);
1828 		break;
1829 	}
1830 
1831 	return error;
1832 }
1833 
1834 /* Apply the lock described by l to an open file descriptor.
1835  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1836  */
1837 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1838 		struct flock __user *l)
1839 {
1840 	struct file_lock *file_lock = locks_alloc_lock();
1841 	struct flock flock;
1842 	struct inode *inode;
1843 	struct file *f;
1844 	int error;
1845 
1846 	if (file_lock == NULL)
1847 		return -ENOLCK;
1848 
1849 	/*
1850 	 * This might block, so we do it before checking the inode.
1851 	 */
1852 	error = -EFAULT;
1853 	if (copy_from_user(&flock, l, sizeof(flock)))
1854 		goto out;
1855 
1856 	inode = filp->f_path.dentry->d_inode;
1857 
1858 	/* Don't allow mandatory locks on files that may be memory mapped
1859 	 * and shared.
1860 	 */
1861 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1862 		error = -EAGAIN;
1863 		goto out;
1864 	}
1865 
1866 again:
1867 	error = flock_to_posix_lock(filp, file_lock, &flock);
1868 	if (error)
1869 		goto out;
1870 	if (cmd == F_SETLKW) {
1871 		file_lock->fl_flags |= FL_SLEEP;
1872 	}
1873 
1874 	error = -EBADF;
1875 	switch (flock.l_type) {
1876 	case F_RDLCK:
1877 		if (!(filp->f_mode & FMODE_READ))
1878 			goto out;
1879 		break;
1880 	case F_WRLCK:
1881 		if (!(filp->f_mode & FMODE_WRITE))
1882 			goto out;
1883 		break;
1884 	case F_UNLCK:
1885 		break;
1886 	default:
1887 		error = -EINVAL;
1888 		goto out;
1889 	}
1890 
1891 	error = do_lock_file_wait(filp, cmd, file_lock);
1892 
1893 	/*
1894 	 * Attempt to detect a close/fcntl race and recover by
1895 	 * releasing the lock that was just acquired.
1896 	 */
1897 	/*
1898 	 * we need that spin_lock here - it prevents reordering between
1899 	 * update of inode->i_flock and check for it done in close().
1900 	 * rcu_read_lock() wouldn't do.
1901 	 */
1902 	spin_lock(&current->files->file_lock);
1903 	f = fcheck(fd);
1904 	spin_unlock(&current->files->file_lock);
1905 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1906 		flock.l_type = F_UNLCK;
1907 		goto again;
1908 	}
1909 
1910 out:
1911 	locks_free_lock(file_lock);
1912 	return error;
1913 }
1914 
1915 #if BITS_PER_LONG == 32
1916 /* Report the first existing lock that would conflict with l.
1917  * This implements the F_GETLK command of fcntl().
1918  */
1919 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1920 {
1921 	struct file_lock file_lock;
1922 	struct flock64 flock;
1923 	int error;
1924 
1925 	error = -EFAULT;
1926 	if (copy_from_user(&flock, l, sizeof(flock)))
1927 		goto out;
1928 	error = -EINVAL;
1929 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1930 		goto out;
1931 
1932 	error = flock64_to_posix_lock(filp, &file_lock, &flock);
1933 	if (error)
1934 		goto out;
1935 
1936 	error = vfs_test_lock(filp, &file_lock);
1937 	if (error)
1938 		goto out;
1939 
1940 	flock.l_type = file_lock.fl_type;
1941 	if (file_lock.fl_type != F_UNLCK)
1942 		posix_lock_to_flock64(&flock, &file_lock);
1943 
1944 	error = -EFAULT;
1945 	if (!copy_to_user(l, &flock, sizeof(flock)))
1946 		error = 0;
1947 
1948 out:
1949 	return error;
1950 }
1951 
1952 /* Apply the lock described by l to an open file descriptor.
1953  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1954  */
1955 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1956 		struct flock64 __user *l)
1957 {
1958 	struct file_lock *file_lock = locks_alloc_lock();
1959 	struct flock64 flock;
1960 	struct inode *inode;
1961 	struct file *f;
1962 	int error;
1963 
1964 	if (file_lock == NULL)
1965 		return -ENOLCK;
1966 
1967 	/*
1968 	 * This might block, so we do it before checking the inode.
1969 	 */
1970 	error = -EFAULT;
1971 	if (copy_from_user(&flock, l, sizeof(flock)))
1972 		goto out;
1973 
1974 	inode = filp->f_path.dentry->d_inode;
1975 
1976 	/* Don't allow mandatory locks on files that may be memory mapped
1977 	 * and shared.
1978 	 */
1979 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1980 		error = -EAGAIN;
1981 		goto out;
1982 	}
1983 
1984 again:
1985 	error = flock64_to_posix_lock(filp, file_lock, &flock);
1986 	if (error)
1987 		goto out;
1988 	if (cmd == F_SETLKW64) {
1989 		file_lock->fl_flags |= FL_SLEEP;
1990 	}
1991 
1992 	error = -EBADF;
1993 	switch (flock.l_type) {
1994 	case F_RDLCK:
1995 		if (!(filp->f_mode & FMODE_READ))
1996 			goto out;
1997 		break;
1998 	case F_WRLCK:
1999 		if (!(filp->f_mode & FMODE_WRITE))
2000 			goto out;
2001 		break;
2002 	case F_UNLCK:
2003 		break;
2004 	default:
2005 		error = -EINVAL;
2006 		goto out;
2007 	}
2008 
2009 	error = do_lock_file_wait(filp, cmd, file_lock);
2010 
2011 	/*
2012 	 * Attempt to detect a close/fcntl race and recover by
2013 	 * releasing the lock that was just acquired.
2014 	 */
2015 	spin_lock(&current->files->file_lock);
2016 	f = fcheck(fd);
2017 	spin_unlock(&current->files->file_lock);
2018 	if (!error && f != filp && flock.l_type != F_UNLCK) {
2019 		flock.l_type = F_UNLCK;
2020 		goto again;
2021 	}
2022 
2023 out:
2024 	locks_free_lock(file_lock);
2025 	return error;
2026 }
2027 #endif /* BITS_PER_LONG == 32 */
2028 
2029 /*
2030  * This function is called when the file is being removed
2031  * from the task's fd array.  POSIX locks belonging to this task
2032  * are deleted at this time.
2033  */
2034 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2035 {
2036 	struct file_lock lock;
2037 
2038 	/*
2039 	 * If there are no locks held on this file, we don't need to call
2040 	 * posix_lock_file().  Another process could be setting a lock on this
2041 	 * file at the same time, but we wouldn't remove that lock anyway.
2042 	 */
2043 	if (!filp->f_path.dentry->d_inode->i_flock)
2044 		return;
2045 
2046 	lock.fl_type = F_UNLCK;
2047 	lock.fl_flags = FL_POSIX | FL_CLOSE;
2048 	lock.fl_start = 0;
2049 	lock.fl_end = OFFSET_MAX;
2050 	lock.fl_owner = owner;
2051 	lock.fl_pid = current->tgid;
2052 	lock.fl_file = filp;
2053 	lock.fl_ops = NULL;
2054 	lock.fl_lmops = NULL;
2055 
2056 	vfs_lock_file(filp, F_SETLK, &lock, NULL);
2057 
2058 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2059 		lock.fl_ops->fl_release_private(&lock);
2060 }
2061 
2062 EXPORT_SYMBOL(locks_remove_posix);
2063 
2064 /*
2065  * This function is called on the last close of an open file.
2066  */
2067 void locks_remove_flock(struct file *filp)
2068 {
2069 	struct inode * inode = filp->f_path.dentry->d_inode;
2070 	struct file_lock *fl;
2071 	struct file_lock **before;
2072 
2073 	if (!inode->i_flock)
2074 		return;
2075 
2076 	if (filp->f_op && filp->f_op->flock) {
2077 		struct file_lock fl = {
2078 			.fl_pid = current->tgid,
2079 			.fl_file = filp,
2080 			.fl_flags = FL_FLOCK,
2081 			.fl_type = F_UNLCK,
2082 			.fl_end = OFFSET_MAX,
2083 		};
2084 		filp->f_op->flock(filp, F_SETLKW, &fl);
2085 		if (fl.fl_ops && fl.fl_ops->fl_release_private)
2086 			fl.fl_ops->fl_release_private(&fl);
2087 	}
2088 
2089 	lock_flocks();
2090 	before = &inode->i_flock;
2091 
2092 	while ((fl = *before) != NULL) {
2093 		if (fl->fl_file == filp) {
2094 			if (IS_FLOCK(fl)) {
2095 				locks_delete_lock(before);
2096 				continue;
2097 			}
2098 			if (IS_LEASE(fl)) {
2099 				lease_modify(before, F_UNLCK);
2100 				continue;
2101 			}
2102 			/* What? */
2103 			BUG();
2104  		}
2105 		before = &fl->fl_next;
2106 	}
2107 	unlock_flocks();
2108 }
2109 
2110 /**
2111  *	posix_unblock_lock - stop waiting for a file lock
2112  *      @filp:   how the file was opened
2113  *	@waiter: the lock which was waiting
2114  *
2115  *	lockd needs to block waiting for locks.
2116  */
2117 int
2118 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2119 {
2120 	int status = 0;
2121 
2122 	lock_flocks();
2123 	if (waiter->fl_next)
2124 		__locks_delete_block(waiter);
2125 	else
2126 		status = -ENOENT;
2127 	unlock_flocks();
2128 	return status;
2129 }
2130 
2131 EXPORT_SYMBOL(posix_unblock_lock);
2132 
2133 /**
2134  * vfs_cancel_lock - file byte range unblock lock
2135  * @filp: The file to apply the unblock to
2136  * @fl: The lock to be unblocked
2137  *
2138  * Used by lock managers to cancel blocked requests
2139  */
2140 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2141 {
2142 	if (filp->f_op && filp->f_op->lock)
2143 		return filp->f_op->lock(filp, F_CANCELLK, fl);
2144 	return 0;
2145 }
2146 
2147 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2148 
2149 #ifdef CONFIG_PROC_FS
2150 #include <linux/proc_fs.h>
2151 #include <linux/seq_file.h>
2152 
2153 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2154 			    loff_t id, char *pfx)
2155 {
2156 	struct inode *inode = NULL;
2157 	unsigned int fl_pid;
2158 
2159 	if (fl->fl_nspid)
2160 		fl_pid = pid_vnr(fl->fl_nspid);
2161 	else
2162 		fl_pid = fl->fl_pid;
2163 
2164 	if (fl->fl_file != NULL)
2165 		inode = fl->fl_file->f_path.dentry->d_inode;
2166 
2167 	seq_printf(f, "%lld:%s ", id, pfx);
2168 	if (IS_POSIX(fl)) {
2169 		seq_printf(f, "%6s %s ",
2170 			     (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2171 			     (inode == NULL) ? "*NOINODE*" :
2172 			     mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2173 	} else if (IS_FLOCK(fl)) {
2174 		if (fl->fl_type & LOCK_MAND) {
2175 			seq_printf(f, "FLOCK  MSNFS     ");
2176 		} else {
2177 			seq_printf(f, "FLOCK  ADVISORY  ");
2178 		}
2179 	} else if (IS_LEASE(fl)) {
2180 		seq_printf(f, "LEASE  ");
2181 		if (lease_breaking(fl))
2182 			seq_printf(f, "BREAKING  ");
2183 		else if (fl->fl_file)
2184 			seq_printf(f, "ACTIVE    ");
2185 		else
2186 			seq_printf(f, "BREAKER   ");
2187 	} else {
2188 		seq_printf(f, "UNKNOWN UNKNOWN  ");
2189 	}
2190 	if (fl->fl_type & LOCK_MAND) {
2191 		seq_printf(f, "%s ",
2192 			       (fl->fl_type & LOCK_READ)
2193 			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2194 			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2195 	} else {
2196 		seq_printf(f, "%s ",
2197 			       (lease_breaking(fl))
2198 			       ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2199 			       : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2200 	}
2201 	if (inode) {
2202 #ifdef WE_CAN_BREAK_LSLK_NOW
2203 		seq_printf(f, "%d %s:%ld ", fl_pid,
2204 				inode->i_sb->s_id, inode->i_ino);
2205 #else
2206 		/* userspace relies on this representation of dev_t ;-( */
2207 		seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2208 				MAJOR(inode->i_sb->s_dev),
2209 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2210 #endif
2211 	} else {
2212 		seq_printf(f, "%d <none>:0 ", fl_pid);
2213 	}
2214 	if (IS_POSIX(fl)) {
2215 		if (fl->fl_end == OFFSET_MAX)
2216 			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2217 		else
2218 			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2219 	} else {
2220 		seq_printf(f, "0 EOF\n");
2221 	}
2222 }
2223 
2224 static int locks_show(struct seq_file *f, void *v)
2225 {
2226 	struct file_lock *fl, *bfl;
2227 
2228 	fl = list_entry(v, struct file_lock, fl_link);
2229 
2230 	lock_get_status(f, fl, *((loff_t *)f->private), "");
2231 
2232 	list_for_each_entry(bfl, &fl->fl_block, fl_block)
2233 		lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2234 
2235 	return 0;
2236 }
2237 
2238 static void *locks_start(struct seq_file *f, loff_t *pos)
2239 {
2240 	loff_t *p = f->private;
2241 
2242 	lock_flocks();
2243 	*p = (*pos + 1);
2244 	return seq_list_start(&file_lock_list, *pos);
2245 }
2246 
2247 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2248 {
2249 	loff_t *p = f->private;
2250 	++*p;
2251 	return seq_list_next(v, &file_lock_list, pos);
2252 }
2253 
2254 static void locks_stop(struct seq_file *f, void *v)
2255 {
2256 	unlock_flocks();
2257 }
2258 
2259 static const struct seq_operations locks_seq_operations = {
2260 	.start	= locks_start,
2261 	.next	= locks_next,
2262 	.stop	= locks_stop,
2263 	.show	= locks_show,
2264 };
2265 
2266 static int locks_open(struct inode *inode, struct file *filp)
2267 {
2268 	return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2269 }
2270 
2271 static const struct file_operations proc_locks_operations = {
2272 	.open		= locks_open,
2273 	.read		= seq_read,
2274 	.llseek		= seq_lseek,
2275 	.release	= seq_release_private,
2276 };
2277 
2278 static int __init proc_locks_init(void)
2279 {
2280 	proc_create("locks", 0, NULL, &proc_locks_operations);
2281 	return 0;
2282 }
2283 module_init(proc_locks_init);
2284 #endif
2285 
2286 /**
2287  *	lock_may_read - checks that the region is free of locks
2288  *	@inode: the inode that is being read
2289  *	@start: the first byte to read
2290  *	@len: the number of bytes to read
2291  *
2292  *	Emulates Windows locking requirements.  Whole-file
2293  *	mandatory locks (share modes) can prohibit a read and
2294  *	byte-range POSIX locks can prohibit a read if they overlap.
2295  *
2296  *	N.B. this function is only ever called
2297  *	from knfsd and ownership of locks is never checked.
2298  */
2299 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2300 {
2301 	struct file_lock *fl;
2302 	int result = 1;
2303 	lock_flocks();
2304 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2305 		if (IS_POSIX(fl)) {
2306 			if (fl->fl_type == F_RDLCK)
2307 				continue;
2308 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2309 				continue;
2310 		} else if (IS_FLOCK(fl)) {
2311 			if (!(fl->fl_type & LOCK_MAND))
2312 				continue;
2313 			if (fl->fl_type & LOCK_READ)
2314 				continue;
2315 		} else
2316 			continue;
2317 		result = 0;
2318 		break;
2319 	}
2320 	unlock_flocks();
2321 	return result;
2322 }
2323 
2324 EXPORT_SYMBOL(lock_may_read);
2325 
2326 /**
2327  *	lock_may_write - checks that the region is free of locks
2328  *	@inode: the inode that is being written
2329  *	@start: the first byte to write
2330  *	@len: the number of bytes to write
2331  *
2332  *	Emulates Windows locking requirements.  Whole-file
2333  *	mandatory locks (share modes) can prohibit a write and
2334  *	byte-range POSIX locks can prohibit a write if they overlap.
2335  *
2336  *	N.B. this function is only ever called
2337  *	from knfsd and ownership of locks is never checked.
2338  */
2339 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2340 {
2341 	struct file_lock *fl;
2342 	int result = 1;
2343 	lock_flocks();
2344 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2345 		if (IS_POSIX(fl)) {
2346 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2347 				continue;
2348 		} else if (IS_FLOCK(fl)) {
2349 			if (!(fl->fl_type & LOCK_MAND))
2350 				continue;
2351 			if (fl->fl_type & LOCK_WRITE)
2352 				continue;
2353 		} else
2354 			continue;
2355 		result = 0;
2356 		break;
2357 	}
2358 	unlock_flocks();
2359 	return result;
2360 }
2361 
2362 EXPORT_SYMBOL(lock_may_write);
2363 
2364 static int __init filelock_init(void)
2365 {
2366 	filelock_cache = kmem_cache_create("file_lock_cache",
2367 			sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2368 
2369 	return 0;
2370 }
2371 
2372 core_initcall(filelock_init);
2373