xref: /openbmc/linux/fs/lockd/svclock.c (revision e6e8c6c2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/lockd/svclock.c
4  *
5  * Handling of server-side locks, mostly of the blocked variety.
6  * This is the ugliest part of lockd because we tread on very thin ice.
7  * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
8  * IMNSHO introducing the grant callback into the NLM protocol was one
9  * of the worst ideas Sun ever had. Except maybe for the idea of doing
10  * NFS file locking at all.
11  *
12  * I'm trying hard to avoid race conditions by protecting most accesses
13  * to a file's list of blocked locks through a semaphore. The global
14  * list of blocked locks is not protected in this fashion however.
15  * Therefore, some functions (such as the RPC callback for the async grant
16  * call) move blocked locks towards the head of the list *while some other
17  * process might be traversing it*. This should not be a problem in
18  * practice, because this will only cause functions traversing the list
19  * to visit some blocks twice.
20  *
21  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
22  */
23 
24 #include <linux/types.h>
25 #include <linux/slab.h>
26 #include <linux/errno.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/svc_xprt.h>
31 #include <linux/lockd/nlm.h>
32 #include <linux/lockd/lockd.h>
33 #include <linux/kthread.h>
34 #include <linux/exportfs.h>
35 
36 #define NLMDBG_FACILITY		NLMDBG_SVCLOCK
37 
38 #ifdef CONFIG_LOCKD_V4
39 #define nlm_deadlock	nlm4_deadlock
40 #else
41 #define nlm_deadlock	nlm_lck_denied
42 #endif
43 
44 static void nlmsvc_release_block(struct nlm_block *block);
45 static void	nlmsvc_insert_block(struct nlm_block *block, unsigned long);
46 static void	nlmsvc_remove_block(struct nlm_block *block);
47 
48 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
49 static void nlmsvc_freegrantargs(struct nlm_rqst *call);
50 static const struct rpc_call_ops nlmsvc_grant_ops;
51 
52 /*
53  * The list of blocked locks to retry
54  */
55 static LIST_HEAD(nlm_blocked);
56 static DEFINE_SPINLOCK(nlm_blocked_lock);
57 
58 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
59 static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
60 {
61 	/*
62 	 * We can get away with a static buffer because this is only called
63 	 * from lockd, which is single-threaded.
64 	 */
65 	static char buf[2*NLM_MAXCOOKIELEN+1];
66 	unsigned int i, len = sizeof(buf);
67 	char *p = buf;
68 
69 	len--;	/* allow for trailing \0 */
70 	if (len < 3)
71 		return "???";
72 	for (i = 0 ; i < cookie->len ; i++) {
73 		if (len < 2) {
74 			strcpy(p-3, "...");
75 			break;
76 		}
77 		sprintf(p, "%02x", cookie->data[i]);
78 		p += 2;
79 		len -= 2;
80 	}
81 	*p = '\0';
82 
83 	return buf;
84 }
85 #endif
86 
87 /*
88  * Insert a blocked lock into the global list
89  */
90 static void
91 nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
92 {
93 	struct nlm_block *b;
94 	struct list_head *pos;
95 
96 	dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
97 	if (list_empty(&block->b_list)) {
98 		kref_get(&block->b_count);
99 	} else {
100 		list_del_init(&block->b_list);
101 	}
102 
103 	pos = &nlm_blocked;
104 	if (when != NLM_NEVER) {
105 		if ((when += jiffies) == NLM_NEVER)
106 			when ++;
107 		list_for_each(pos, &nlm_blocked) {
108 			b = list_entry(pos, struct nlm_block, b_list);
109 			if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
110 				break;
111 		}
112 		/* On normal exit from the loop, pos == &nlm_blocked,
113 		 * so we will be adding to the end of the list - good
114 		 */
115 	}
116 
117 	list_add_tail(&block->b_list, pos);
118 	block->b_when = when;
119 }
120 
121 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
122 {
123 	spin_lock(&nlm_blocked_lock);
124 	nlmsvc_insert_block_locked(block, when);
125 	spin_unlock(&nlm_blocked_lock);
126 }
127 
128 /*
129  * Remove a block from the global list
130  */
131 static inline void
132 nlmsvc_remove_block(struct nlm_block *block)
133 {
134 	if (!list_empty(&block->b_list)) {
135 		spin_lock(&nlm_blocked_lock);
136 		list_del_init(&block->b_list);
137 		spin_unlock(&nlm_blocked_lock);
138 		nlmsvc_release_block(block);
139 	}
140 }
141 
142 /*
143  * Find a block for a given lock
144  */
145 static struct nlm_block *
146 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
147 {
148 	struct nlm_block	*block;
149 	struct file_lock	*fl;
150 
151 	dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
152 				file, lock->fl.fl_pid,
153 				(long long)lock->fl.fl_start,
154 				(long long)lock->fl.fl_end, lock->fl.fl_type);
155 	list_for_each_entry(block, &nlm_blocked, b_list) {
156 		fl = &block->b_call->a_args.lock.fl;
157 		dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
158 				block->b_file, fl->fl_pid,
159 				(long long)fl->fl_start,
160 				(long long)fl->fl_end, fl->fl_type,
161 				nlmdbg_cookie2a(&block->b_call->a_args.cookie));
162 		if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
163 			kref_get(&block->b_count);
164 			return block;
165 		}
166 	}
167 
168 	return NULL;
169 }
170 
171 static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
172 {
173 	if (a->len != b->len)
174 		return 0;
175 	if (memcmp(a->data, b->data, a->len))
176 		return 0;
177 	return 1;
178 }
179 
180 /*
181  * Find a block with a given NLM cookie.
182  */
183 static inline struct nlm_block *
184 nlmsvc_find_block(struct nlm_cookie *cookie)
185 {
186 	struct nlm_block *block;
187 
188 	list_for_each_entry(block, &nlm_blocked, b_list) {
189 		if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
190 			goto found;
191 	}
192 
193 	return NULL;
194 
195 found:
196 	dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
197 	kref_get(&block->b_count);
198 	return block;
199 }
200 
201 /*
202  * Create a block and initialize it.
203  *
204  * Note: we explicitly set the cookie of the grant reply to that of
205  * the blocked lock request. The spec explicitly mentions that the client
206  * should _not_ rely on the callback containing the same cookie as the
207  * request, but (as I found out later) that's because some implementations
208  * do just this. Never mind the standards comittees, they support our
209  * logging industries.
210  *
211  * 10 years later: I hope we can safely ignore these old and broken
212  * clients by now. Let's fix this so we can uniquely identify an incoming
213  * GRANTED_RES message by cookie, without having to rely on the client's IP
214  * address. --okir
215  */
216 static struct nlm_block *
217 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
218 		    struct nlm_file *file, struct nlm_lock *lock,
219 		    struct nlm_cookie *cookie)
220 {
221 	struct nlm_block	*block;
222 	struct nlm_rqst		*call = NULL;
223 
224 	call = nlm_alloc_call(host);
225 	if (call == NULL)
226 		return NULL;
227 
228 	/* Allocate memory for block, and initialize arguments */
229 	block = kzalloc(sizeof(*block), GFP_KERNEL);
230 	if (block == NULL)
231 		goto failed;
232 	kref_init(&block->b_count);
233 	INIT_LIST_HEAD(&block->b_list);
234 	INIT_LIST_HEAD(&block->b_flist);
235 
236 	if (!nlmsvc_setgrantargs(call, lock))
237 		goto failed_free;
238 
239 	/* Set notifier function for VFS, and init args */
240 	call->a_args.lock.fl.fl_flags |= FL_SLEEP;
241 	call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
242 	nlmclnt_next_cookie(&call->a_args.cookie);
243 
244 	dprintk("lockd: created block %p...\n", block);
245 
246 	/* Create and initialize the block */
247 	block->b_daemon = rqstp->rq_server;
248 	block->b_host   = host;
249 	block->b_file   = file;
250 	file->f_count++;
251 
252 	/* Add to file's list of blocks */
253 	list_add(&block->b_flist, &file->f_blocks);
254 
255 	/* Set up RPC arguments for callback */
256 	block->b_call = call;
257 	call->a_flags   = RPC_TASK_ASYNC;
258 	call->a_block = block;
259 
260 	return block;
261 
262 failed_free:
263 	kfree(block);
264 failed:
265 	nlmsvc_release_call(call);
266 	return NULL;
267 }
268 
269 /*
270  * Delete a block.
271  * It is the caller's responsibility to check whether the file
272  * can be closed hereafter.
273  */
274 static int nlmsvc_unlink_block(struct nlm_block *block)
275 {
276 	int status;
277 	dprintk("lockd: unlinking block %p...\n", block);
278 
279 	/* Remove block from list */
280 	status = locks_delete_block(&block->b_call->a_args.lock.fl);
281 	nlmsvc_remove_block(block);
282 	return status;
283 }
284 
285 static void nlmsvc_free_block(struct kref *kref)
286 {
287 	struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
288 	struct nlm_file		*file = block->b_file;
289 
290 	dprintk("lockd: freeing block %p...\n", block);
291 
292 	/* Remove block from file's list of blocks */
293 	list_del_init(&block->b_flist);
294 	mutex_unlock(&file->f_mutex);
295 
296 	nlmsvc_freegrantargs(block->b_call);
297 	nlmsvc_release_call(block->b_call);
298 	nlm_release_file(block->b_file);
299 	kfree(block);
300 }
301 
302 static void nlmsvc_release_block(struct nlm_block *block)
303 {
304 	if (block != NULL)
305 		kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
306 }
307 
308 /*
309  * Loop over all blocks and delete blocks held by
310  * a matching host.
311  */
312 void nlmsvc_traverse_blocks(struct nlm_host *host,
313 			struct nlm_file *file,
314 			nlm_host_match_fn_t match)
315 {
316 	struct nlm_block *block, *next;
317 
318 restart:
319 	mutex_lock(&file->f_mutex);
320 	list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
321 		if (!match(block->b_host, host))
322 			continue;
323 		/* Do not destroy blocks that are not on
324 		 * the global retry list - why? */
325 		if (list_empty(&block->b_list))
326 			continue;
327 		kref_get(&block->b_count);
328 		mutex_unlock(&file->f_mutex);
329 		nlmsvc_unlink_block(block);
330 		nlmsvc_release_block(block);
331 		goto restart;
332 	}
333 	mutex_unlock(&file->f_mutex);
334 }
335 
336 static struct nlm_lockowner *
337 nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
338 {
339 	refcount_inc(&lockowner->count);
340 	return lockowner;
341 }
342 
343 void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
344 {
345 	if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
346 		return;
347 	list_del(&lockowner->list);
348 	spin_unlock(&lockowner->host->h_lock);
349 	nlmsvc_release_host(lockowner->host);
350 	kfree(lockowner);
351 }
352 
353 static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
354 {
355 	struct nlm_lockowner *lockowner;
356 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
357 		if (lockowner->pid != pid)
358 			continue;
359 		return nlmsvc_get_lockowner(lockowner);
360 	}
361 	return NULL;
362 }
363 
364 static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
365 {
366 	struct nlm_lockowner *res, *new = NULL;
367 
368 	spin_lock(&host->h_lock);
369 	res = __nlmsvc_find_lockowner(host, pid);
370 
371 	if (res == NULL) {
372 		spin_unlock(&host->h_lock);
373 		new = kmalloc(sizeof(*res), GFP_KERNEL);
374 		spin_lock(&host->h_lock);
375 		res = __nlmsvc_find_lockowner(host, pid);
376 		if (res == NULL && new != NULL) {
377 			res = new;
378 			/* fs/locks.c will manage the refcount through lock_ops */
379 			refcount_set(&new->count, 1);
380 			new->pid = pid;
381 			new->host = nlm_get_host(host);
382 			list_add(&new->list, &host->h_lockowners);
383 			new = NULL;
384 		}
385 	}
386 
387 	spin_unlock(&host->h_lock);
388 	kfree(new);
389 	return res;
390 }
391 
392 void
393 nlmsvc_release_lockowner(struct nlm_lock *lock)
394 {
395 	if (lock->fl.fl_owner)
396 		nlmsvc_put_lockowner(lock->fl.fl_owner);
397 }
398 
399 void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
400 						pid_t pid)
401 {
402 	fl->fl_owner = nlmsvc_find_lockowner(host, pid);
403 }
404 
405 /*
406  * Initialize arguments for GRANTED call. The nlm_rqst structure
407  * has been cleared already.
408  */
409 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
410 {
411 	locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
412 	memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
413 	call->a_args.lock.caller = utsname()->nodename;
414 	call->a_args.lock.oh.len = lock->oh.len;
415 
416 	/* set default data area */
417 	call->a_args.lock.oh.data = call->a_owner;
418 	call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
419 
420 	if (lock->oh.len > NLMCLNT_OHSIZE) {
421 		void *data = kmalloc(lock->oh.len, GFP_KERNEL);
422 		if (!data)
423 			return 0;
424 		call->a_args.lock.oh.data = (u8 *) data;
425 	}
426 
427 	memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
428 	return 1;
429 }
430 
431 static void nlmsvc_freegrantargs(struct nlm_rqst *call)
432 {
433 	if (call->a_args.lock.oh.data != call->a_owner)
434 		kfree(call->a_args.lock.oh.data);
435 
436 	locks_release_private(&call->a_args.lock.fl);
437 }
438 
439 /*
440  * Deferred lock request handling for non-blocking lock
441  */
442 static __be32
443 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
444 {
445 	__be32 status = nlm_lck_denied_nolocks;
446 
447 	block->b_flags |= B_QUEUED;
448 
449 	nlmsvc_insert_block(block, NLM_TIMEOUT);
450 
451 	block->b_cache_req = &rqstp->rq_chandle;
452 	if (rqstp->rq_chandle.defer) {
453 		block->b_deferred_req =
454 			rqstp->rq_chandle.defer(block->b_cache_req);
455 		if (block->b_deferred_req != NULL)
456 			status = nlm_drop_reply;
457 	}
458 	dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
459 		block, block->b_flags, ntohl(status));
460 
461 	return status;
462 }
463 
464 /*
465  * Attempt to establish a lock, and if it can't be granted, block it
466  * if required.
467  */
468 __be32
469 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
470 	    struct nlm_host *host, struct nlm_lock *lock, int wait,
471 	    struct nlm_cookie *cookie, int reclaim)
472 {
473 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
474 	struct inode		*inode = nlmsvc_file_inode(file);
475 #endif
476 	struct nlm_block	*block = NULL;
477 	int			error;
478 	int			mode;
479 	int			async_block = 0;
480 	__be32			ret;
481 
482 	dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
483 				inode->i_sb->s_id, inode->i_ino,
484 				lock->fl.fl_type, lock->fl.fl_pid,
485 				(long long)lock->fl.fl_start,
486 				(long long)lock->fl.fl_end,
487 				wait);
488 
489 	if (nlmsvc_file_file(file)->f_op->lock) {
490 		async_block = wait;
491 		wait = 0;
492 	}
493 
494 	/* Lock file against concurrent access */
495 	mutex_lock(&file->f_mutex);
496 	/* Get existing block (in case client is busy-waiting)
497 	 * or create new block
498 	 */
499 	block = nlmsvc_lookup_block(file, lock);
500 	if (block == NULL) {
501 		block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
502 		ret = nlm_lck_denied_nolocks;
503 		if (block == NULL)
504 			goto out;
505 		lock = &block->b_call->a_args.lock;
506 	} else
507 		lock->fl.fl_flags &= ~FL_SLEEP;
508 
509 	if (block->b_flags & B_QUEUED) {
510 		dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
511 							block, block->b_flags);
512 		if (block->b_granted) {
513 			nlmsvc_unlink_block(block);
514 			ret = nlm_granted;
515 			goto out;
516 		}
517 		if (block->b_flags & B_TIMED_OUT) {
518 			nlmsvc_unlink_block(block);
519 			ret = nlm_lck_denied;
520 			goto out;
521 		}
522 		ret = nlm_drop_reply;
523 		goto out;
524 	}
525 
526 	if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
527 		ret = nlm_lck_denied_grace_period;
528 		goto out;
529 	}
530 	if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
531 		ret = nlm_lck_denied_grace_period;
532 		goto out;
533 	}
534 
535 	if (!wait)
536 		lock->fl.fl_flags &= ~FL_SLEEP;
537 	mode = lock_to_openmode(&lock->fl);
538 	error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
539 	lock->fl.fl_flags &= ~FL_SLEEP;
540 
541 	dprintk("lockd: vfs_lock_file returned %d\n", error);
542 	switch (error) {
543 		case 0:
544 			ret = nlm_granted;
545 			goto out;
546 		case -EAGAIN:
547 			/*
548 			 * If this is a blocking request for an
549 			 * already pending lock request then we need
550 			 * to put it back on lockd's block list
551 			 */
552 			if (wait)
553 				break;
554 			ret = async_block ? nlm_lck_blocked : nlm_lck_denied;
555 			goto out;
556 		case FILE_LOCK_DEFERRED:
557 			if (wait)
558 				break;
559 			/* Filesystem lock operation is in progress
560 			   Add it to the queue waiting for callback */
561 			ret = nlmsvc_defer_lock_rqst(rqstp, block);
562 			goto out;
563 		case -EDEADLK:
564 			ret = nlm_deadlock;
565 			goto out;
566 		default:			/* includes ENOLCK */
567 			ret = nlm_lck_denied_nolocks;
568 			goto out;
569 	}
570 
571 	ret = nlm_lck_blocked;
572 
573 	/* Append to list of blocked */
574 	nlmsvc_insert_block(block, NLM_NEVER);
575 out:
576 	mutex_unlock(&file->f_mutex);
577 	nlmsvc_release_block(block);
578 	dprintk("lockd: nlmsvc_lock returned %u\n", ret);
579 	return ret;
580 }
581 
582 /*
583  * Test for presence of a conflicting lock.
584  */
585 __be32
586 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
587 		struct nlm_host *host, struct nlm_lock *lock,
588 		struct nlm_lock *conflock, struct nlm_cookie *cookie)
589 {
590 	int			error;
591 	int			mode;
592 	__be32			ret;
593 
594 	dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
595 				nlmsvc_file_inode(file)->i_sb->s_id,
596 				nlmsvc_file_inode(file)->i_ino,
597 				lock->fl.fl_type,
598 				(long long)lock->fl.fl_start,
599 				(long long)lock->fl.fl_end);
600 
601 	if (locks_in_grace(SVC_NET(rqstp))) {
602 		ret = nlm_lck_denied_grace_period;
603 		goto out;
604 	}
605 
606 	mode = lock_to_openmode(&lock->fl);
607 	error = vfs_test_lock(file->f_file[mode], &lock->fl);
608 	if (error) {
609 		/* We can't currently deal with deferred test requests */
610 		if (error == FILE_LOCK_DEFERRED)
611 			WARN_ON_ONCE(1);
612 
613 		ret = nlm_lck_denied_nolocks;
614 		goto out;
615 	}
616 
617 	if (lock->fl.fl_type == F_UNLCK) {
618 		ret = nlm_granted;
619 		goto out;
620 	}
621 
622 	dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
623 		lock->fl.fl_type, (long long)lock->fl.fl_start,
624 		(long long)lock->fl.fl_end);
625 	conflock->caller = "somehost";	/* FIXME */
626 	conflock->len = strlen(conflock->caller);
627 	conflock->oh.len = 0;		/* don't return OH info */
628 	conflock->svid = lock->fl.fl_pid;
629 	conflock->fl.fl_type = lock->fl.fl_type;
630 	conflock->fl.fl_start = lock->fl.fl_start;
631 	conflock->fl.fl_end = lock->fl.fl_end;
632 	locks_release_private(&lock->fl);
633 
634 	ret = nlm_lck_denied;
635 out:
636 	return ret;
637 }
638 
639 /*
640  * Remove a lock.
641  * This implies a CANCEL call: We send a GRANT_MSG, the client replies
642  * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
643  * afterwards. In this case the block will still be there, and hence
644  * must be removed.
645  */
646 __be32
647 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
648 {
649 	int	error = 0;
650 
651 	dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
652 				nlmsvc_file_inode(file)->i_sb->s_id,
653 				nlmsvc_file_inode(file)->i_ino,
654 				lock->fl.fl_pid,
655 				(long long)lock->fl.fl_start,
656 				(long long)lock->fl.fl_end);
657 
658 	/* First, cancel any lock that might be there */
659 	nlmsvc_cancel_blocked(net, file, lock);
660 
661 	lock->fl.fl_type = F_UNLCK;
662 	if (file->f_file[O_RDONLY])
663 		error = vfs_lock_file(file->f_file[O_RDONLY], F_SETLK,
664 					&lock->fl, NULL);
665 	if (file->f_file[O_WRONLY])
666 		error = vfs_lock_file(file->f_file[O_WRONLY], F_SETLK,
667 					&lock->fl, NULL);
668 
669 	return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
670 }
671 
672 /*
673  * Cancel a previously blocked request.
674  *
675  * A cancel request always overrides any grant that may currently
676  * be in progress.
677  * The calling procedure must check whether the file can be closed.
678  */
679 __be32
680 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
681 {
682 	struct nlm_block	*block;
683 	int status = 0;
684 	int mode;
685 
686 	dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
687 				nlmsvc_file_inode(file)->i_sb->s_id,
688 				nlmsvc_file_inode(file)->i_ino,
689 				lock->fl.fl_pid,
690 				(long long)lock->fl.fl_start,
691 				(long long)lock->fl.fl_end);
692 
693 	if (locks_in_grace(net))
694 		return nlm_lck_denied_grace_period;
695 
696 	mutex_lock(&file->f_mutex);
697 	block = nlmsvc_lookup_block(file, lock);
698 	mutex_unlock(&file->f_mutex);
699 	if (block != NULL) {
700 		mode = lock_to_openmode(&lock->fl);
701 		vfs_cancel_lock(block->b_file->f_file[mode],
702 				&block->b_call->a_args.lock.fl);
703 		status = nlmsvc_unlink_block(block);
704 		nlmsvc_release_block(block);
705 	}
706 	return status ? nlm_lck_denied : nlm_granted;
707 }
708 
709 /*
710  * This is a callback from the filesystem for VFS file lock requests.
711  * It will be used if lm_grant is defined and the filesystem can not
712  * respond to the request immediately.
713  * For SETLK or SETLKW request it will get the local posix lock.
714  * In all cases it will move the block to the head of nlm_blocked q where
715  * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
716  * deferred rpc for GETLK and SETLK.
717  */
718 static void
719 nlmsvc_update_deferred_block(struct nlm_block *block, int result)
720 {
721 	block->b_flags |= B_GOT_CALLBACK;
722 	if (result == 0)
723 		block->b_granted = 1;
724 	else
725 		block->b_flags |= B_TIMED_OUT;
726 }
727 
728 static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
729 {
730 	struct nlm_block *block;
731 	int rc = -ENOENT;
732 
733 	spin_lock(&nlm_blocked_lock);
734 	list_for_each_entry(block, &nlm_blocked, b_list) {
735 		if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
736 			dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
737 							block, block->b_flags);
738 			if (block->b_flags & B_QUEUED) {
739 				if (block->b_flags & B_TIMED_OUT) {
740 					rc = -ENOLCK;
741 					break;
742 				}
743 				nlmsvc_update_deferred_block(block, result);
744 			} else if (result == 0)
745 				block->b_granted = 1;
746 
747 			nlmsvc_insert_block_locked(block, 0);
748 			svc_wake_up(block->b_daemon);
749 			rc = 0;
750 			break;
751 		}
752 	}
753 	spin_unlock(&nlm_blocked_lock);
754 	if (rc == -ENOENT)
755 		printk(KERN_WARNING "lockd: grant for unknown block\n");
756 	return rc;
757 }
758 
759 /*
760  * Unblock a blocked lock request. This is a callback invoked from the
761  * VFS layer when a lock on which we blocked is removed.
762  *
763  * This function doesn't grant the blocked lock instantly, but rather moves
764  * the block to the head of nlm_blocked where it can be picked up by lockd.
765  */
766 static void
767 nlmsvc_notify_blocked(struct file_lock *fl)
768 {
769 	struct nlm_block	*block;
770 
771 	dprintk("lockd: VFS unblock notification for block %p\n", fl);
772 	spin_lock(&nlm_blocked_lock);
773 	list_for_each_entry(block, &nlm_blocked, b_list) {
774 		if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
775 			nlmsvc_insert_block_locked(block, 0);
776 			spin_unlock(&nlm_blocked_lock);
777 			svc_wake_up(block->b_daemon);
778 			return;
779 		}
780 	}
781 	spin_unlock(&nlm_blocked_lock);
782 	printk(KERN_WARNING "lockd: notification for unknown block!\n");
783 }
784 
785 static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
786 {
787 	return nlmsvc_get_lockowner(owner);
788 }
789 
790 static void nlmsvc_put_owner(fl_owner_t owner)
791 {
792 	nlmsvc_put_lockowner(owner);
793 }
794 
795 const struct lock_manager_operations nlmsvc_lock_operations = {
796 	.lm_notify = nlmsvc_notify_blocked,
797 	.lm_grant = nlmsvc_grant_deferred,
798 	.lm_get_owner = nlmsvc_get_owner,
799 	.lm_put_owner = nlmsvc_put_owner,
800 };
801 
802 /*
803  * Try to claim a lock that was previously blocked.
804  *
805  * Note that we use both the RPC_GRANTED_MSG call _and_ an async
806  * RPC thread when notifying the client. This seems like overkill...
807  * Here's why:
808  *  -	we don't want to use a synchronous RPC thread, otherwise
809  *	we might find ourselves hanging on a dead portmapper.
810  *  -	Some lockd implementations (e.g. HP) don't react to
811  *	RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
812  */
813 static void
814 nlmsvc_grant_blocked(struct nlm_block *block)
815 {
816 	struct nlm_file		*file = block->b_file;
817 	struct nlm_lock		*lock = &block->b_call->a_args.lock;
818 	int			mode;
819 	int			error;
820 	loff_t			fl_start, fl_end;
821 
822 	dprintk("lockd: grant blocked lock %p\n", block);
823 
824 	kref_get(&block->b_count);
825 
826 	/* Unlink block request from list */
827 	nlmsvc_unlink_block(block);
828 
829 	/* If b_granted is true this means we've been here before.
830 	 * Just retry the grant callback, possibly refreshing the RPC
831 	 * binding */
832 	if (block->b_granted) {
833 		nlm_rebind_host(block->b_host);
834 		goto callback;
835 	}
836 
837 	/* Try the lock operation again */
838 	/* vfs_lock_file() can mangle fl_start and fl_end, but we need
839 	 * them unchanged for the GRANT_MSG
840 	 */
841 	lock->fl.fl_flags |= FL_SLEEP;
842 	fl_start = lock->fl.fl_start;
843 	fl_end = lock->fl.fl_end;
844 	mode = lock_to_openmode(&lock->fl);
845 	error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
846 	lock->fl.fl_flags &= ~FL_SLEEP;
847 	lock->fl.fl_start = fl_start;
848 	lock->fl.fl_end = fl_end;
849 
850 	switch (error) {
851 	case 0:
852 		break;
853 	case FILE_LOCK_DEFERRED:
854 		dprintk("lockd: lock still blocked error %d\n", error);
855 		nlmsvc_insert_block(block, NLM_NEVER);
856 		nlmsvc_release_block(block);
857 		return;
858 	default:
859 		printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
860 				-error, __func__);
861 		nlmsvc_insert_block(block, 10 * HZ);
862 		nlmsvc_release_block(block);
863 		return;
864 	}
865 
866 callback:
867 	/* Lock was granted by VFS. */
868 	dprintk("lockd: GRANTing blocked lock.\n");
869 	block->b_granted = 1;
870 
871 	/* keep block on the list, but don't reattempt until the RPC
872 	 * completes or the submission fails
873 	 */
874 	nlmsvc_insert_block(block, NLM_NEVER);
875 
876 	/* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
877 	 * will queue up a new one if this one times out
878 	 */
879 	error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
880 				&nlmsvc_grant_ops);
881 
882 	/* RPC submission failed, wait a bit and retry */
883 	if (error < 0)
884 		nlmsvc_insert_block(block, 10 * HZ);
885 }
886 
887 /*
888  * This is the callback from the RPC layer when the NLM_GRANTED_MSG
889  * RPC call has succeeded or timed out.
890  * Like all RPC callbacks, it is invoked by the rpciod process, so it
891  * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
892  * chain once more in order to have it removed by lockd itself (which can
893  * then sleep on the file semaphore without disrupting e.g. the nfs client).
894  */
895 static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
896 {
897 	struct nlm_rqst		*call = data;
898 	struct nlm_block	*block = call->a_block;
899 	unsigned long		timeout;
900 
901 	dprintk("lockd: GRANT_MSG RPC callback\n");
902 
903 	spin_lock(&nlm_blocked_lock);
904 	/* if the block is not on a list at this point then it has
905 	 * been invalidated. Don't try to requeue it.
906 	 *
907 	 * FIXME: it's possible that the block is removed from the list
908 	 * after this check but before the nlmsvc_insert_block. In that
909 	 * case it will be added back. Perhaps we need better locking
910 	 * for nlm_blocked?
911 	 */
912 	if (list_empty(&block->b_list))
913 		goto out;
914 
915 	/* Technically, we should down the file semaphore here. Since we
916 	 * move the block towards the head of the queue only, no harm
917 	 * can be done, though. */
918 	if (task->tk_status < 0) {
919 		/* RPC error: Re-insert for retransmission */
920 		timeout = 10 * HZ;
921 	} else {
922 		/* Call was successful, now wait for client callback */
923 		timeout = 60 * HZ;
924 	}
925 	nlmsvc_insert_block_locked(block, timeout);
926 	svc_wake_up(block->b_daemon);
927 out:
928 	spin_unlock(&nlm_blocked_lock);
929 }
930 
931 /*
932  * FIXME: nlmsvc_release_block() grabs a mutex.  This is not allowed for an
933  * .rpc_release rpc_call_op
934  */
935 static void nlmsvc_grant_release(void *data)
936 {
937 	struct nlm_rqst		*call = data;
938 	nlmsvc_release_block(call->a_block);
939 }
940 
941 static const struct rpc_call_ops nlmsvc_grant_ops = {
942 	.rpc_call_done = nlmsvc_grant_callback,
943 	.rpc_release = nlmsvc_grant_release,
944 };
945 
946 /*
947  * We received a GRANT_RES callback. Try to find the corresponding
948  * block.
949  */
950 void
951 nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
952 {
953 	struct nlm_block	*block;
954 
955 	dprintk("grant_reply: looking for cookie %x, s=%d \n",
956 		*(unsigned int *)(cookie->data), status);
957 	if (!(block = nlmsvc_find_block(cookie)))
958 		return;
959 
960 	if (status == nlm_lck_denied_grace_period) {
961 		/* Try again in a couple of seconds */
962 		nlmsvc_insert_block(block, 10 * HZ);
963 	} else {
964 		/*
965 		 * Lock is now held by client, or has been rejected.
966 		 * In both cases, the block should be removed.
967 		 */
968 		nlmsvc_unlink_block(block);
969 	}
970 	nlmsvc_release_block(block);
971 }
972 
973 /* Helper function to handle retry of a deferred block.
974  * If it is a blocking lock, call grant_blocked.
975  * For a non-blocking lock or test lock, revisit the request.
976  */
977 static void
978 retry_deferred_block(struct nlm_block *block)
979 {
980 	if (!(block->b_flags & B_GOT_CALLBACK))
981 		block->b_flags |= B_TIMED_OUT;
982 	nlmsvc_insert_block(block, NLM_TIMEOUT);
983 	dprintk("revisit block %p flags %d\n",	block, block->b_flags);
984 	if (block->b_deferred_req) {
985 		block->b_deferred_req->revisit(block->b_deferred_req, 0);
986 		block->b_deferred_req = NULL;
987 	}
988 }
989 
990 /*
991  * Retry all blocked locks that have been notified. This is where lockd
992  * picks up locks that can be granted, or grant notifications that must
993  * be retransmitted.
994  */
995 unsigned long
996 nlmsvc_retry_blocked(void)
997 {
998 	unsigned long	timeout = MAX_SCHEDULE_TIMEOUT;
999 	struct nlm_block *block;
1000 
1001 	spin_lock(&nlm_blocked_lock);
1002 	while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
1003 		block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
1004 
1005 		if (block->b_when == NLM_NEVER)
1006 			break;
1007 		if (time_after(block->b_when, jiffies)) {
1008 			timeout = block->b_when - jiffies;
1009 			break;
1010 		}
1011 		spin_unlock(&nlm_blocked_lock);
1012 
1013 		dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
1014 			block, block->b_when);
1015 		if (block->b_flags & B_QUEUED) {
1016 			dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
1017 				block, block->b_granted, block->b_flags);
1018 			retry_deferred_block(block);
1019 		} else
1020 			nlmsvc_grant_blocked(block);
1021 		spin_lock(&nlm_blocked_lock);
1022 	}
1023 	spin_unlock(&nlm_blocked_lock);
1024 
1025 	return timeout;
1026 }
1027