xref: /openbmc/linux/fs/lockd/svclock.c (revision 7f024fcd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/lockd/svclock.c
4  *
5  * Handling of server-side locks, mostly of the blocked variety.
6  * This is the ugliest part of lockd because we tread on very thin ice.
7  * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
8  * IMNSHO introducing the grant callback into the NLM protocol was one
9  * of the worst ideas Sun ever had. Except maybe for the idea of doing
10  * NFS file locking at all.
11  *
12  * I'm trying hard to avoid race conditions by protecting most accesses
13  * to a file's list of blocked locks through a semaphore. The global
14  * list of blocked locks is not protected in this fashion however.
15  * Therefore, some functions (such as the RPC callback for the async grant
16  * call) move blocked locks towards the head of the list *while some other
17  * process might be traversing it*. This should not be a problem in
18  * practice, because this will only cause functions traversing the list
19  * to visit some blocks twice.
20  *
21  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
22  */
23 
24 #include <linux/types.h>
25 #include <linux/slab.h>
26 #include <linux/errno.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/svc_xprt.h>
31 #include <linux/lockd/nlm.h>
32 #include <linux/lockd/lockd.h>
33 #include <linux/kthread.h>
34 
35 #define NLMDBG_FACILITY		NLMDBG_SVCLOCK
36 
37 #ifdef CONFIG_LOCKD_V4
38 #define nlm_deadlock	nlm4_deadlock
39 #else
40 #define nlm_deadlock	nlm_lck_denied
41 #endif
42 
43 static void nlmsvc_release_block(struct nlm_block *block);
44 static void	nlmsvc_insert_block(struct nlm_block *block, unsigned long);
45 static void	nlmsvc_remove_block(struct nlm_block *block);
46 
47 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
48 static void nlmsvc_freegrantargs(struct nlm_rqst *call);
49 static const struct rpc_call_ops nlmsvc_grant_ops;
50 
51 /*
52  * The list of blocked locks to retry
53  */
54 static LIST_HEAD(nlm_blocked);
55 static DEFINE_SPINLOCK(nlm_blocked_lock);
56 
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58 static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
59 {
60 	/*
61 	 * We can get away with a static buffer because this is only called
62 	 * from lockd, which is single-threaded.
63 	 */
64 	static char buf[2*NLM_MAXCOOKIELEN+1];
65 	unsigned int i, len = sizeof(buf);
66 	char *p = buf;
67 
68 	len--;	/* allow for trailing \0 */
69 	if (len < 3)
70 		return "???";
71 	for (i = 0 ; i < cookie->len ; i++) {
72 		if (len < 2) {
73 			strcpy(p-3, "...");
74 			break;
75 		}
76 		sprintf(p, "%02x", cookie->data[i]);
77 		p += 2;
78 		len -= 2;
79 	}
80 	*p = '\0';
81 
82 	return buf;
83 }
84 #endif
85 
86 /*
87  * Insert a blocked lock into the global list
88  */
89 static void
90 nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
91 {
92 	struct nlm_block *b;
93 	struct list_head *pos;
94 
95 	dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
96 	if (list_empty(&block->b_list)) {
97 		kref_get(&block->b_count);
98 	} else {
99 		list_del_init(&block->b_list);
100 	}
101 
102 	pos = &nlm_blocked;
103 	if (when != NLM_NEVER) {
104 		if ((when += jiffies) == NLM_NEVER)
105 			when ++;
106 		list_for_each(pos, &nlm_blocked) {
107 			b = list_entry(pos, struct nlm_block, b_list);
108 			if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
109 				break;
110 		}
111 		/* On normal exit from the loop, pos == &nlm_blocked,
112 		 * so we will be adding to the end of the list - good
113 		 */
114 	}
115 
116 	list_add_tail(&block->b_list, pos);
117 	block->b_when = when;
118 }
119 
120 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
121 {
122 	spin_lock(&nlm_blocked_lock);
123 	nlmsvc_insert_block_locked(block, when);
124 	spin_unlock(&nlm_blocked_lock);
125 }
126 
127 /*
128  * Remove a block from the global list
129  */
130 static inline void
131 nlmsvc_remove_block(struct nlm_block *block)
132 {
133 	if (!list_empty(&block->b_list)) {
134 		spin_lock(&nlm_blocked_lock);
135 		list_del_init(&block->b_list);
136 		spin_unlock(&nlm_blocked_lock);
137 		nlmsvc_release_block(block);
138 	}
139 }
140 
141 /*
142  * Find a block for a given lock
143  */
144 static struct nlm_block *
145 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
146 {
147 	struct nlm_block	*block;
148 	struct file_lock	*fl;
149 
150 	dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
151 				file, lock->fl.fl_pid,
152 				(long long)lock->fl.fl_start,
153 				(long long)lock->fl.fl_end, lock->fl.fl_type);
154 	list_for_each_entry(block, &nlm_blocked, b_list) {
155 		fl = &block->b_call->a_args.lock.fl;
156 		dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
157 				block->b_file, fl->fl_pid,
158 				(long long)fl->fl_start,
159 				(long long)fl->fl_end, fl->fl_type,
160 				nlmdbg_cookie2a(&block->b_call->a_args.cookie));
161 		if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
162 			kref_get(&block->b_count);
163 			return block;
164 		}
165 	}
166 
167 	return NULL;
168 }
169 
170 static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
171 {
172 	if (a->len != b->len)
173 		return 0;
174 	if (memcmp(a->data, b->data, a->len))
175 		return 0;
176 	return 1;
177 }
178 
179 /*
180  * Find a block with a given NLM cookie.
181  */
182 static inline struct nlm_block *
183 nlmsvc_find_block(struct nlm_cookie *cookie)
184 {
185 	struct nlm_block *block;
186 
187 	list_for_each_entry(block, &nlm_blocked, b_list) {
188 		if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
189 			goto found;
190 	}
191 
192 	return NULL;
193 
194 found:
195 	dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
196 	kref_get(&block->b_count);
197 	return block;
198 }
199 
200 /*
201  * Create a block and initialize it.
202  *
203  * Note: we explicitly set the cookie of the grant reply to that of
204  * the blocked lock request. The spec explicitly mentions that the client
205  * should _not_ rely on the callback containing the same cookie as the
206  * request, but (as I found out later) that's because some implementations
207  * do just this. Never mind the standards comittees, they support our
208  * logging industries.
209  *
210  * 10 years later: I hope we can safely ignore these old and broken
211  * clients by now. Let's fix this so we can uniquely identify an incoming
212  * GRANTED_RES message by cookie, without having to rely on the client's IP
213  * address. --okir
214  */
215 static struct nlm_block *
216 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
217 		    struct nlm_file *file, struct nlm_lock *lock,
218 		    struct nlm_cookie *cookie)
219 {
220 	struct nlm_block	*block;
221 	struct nlm_rqst		*call = NULL;
222 
223 	call = nlm_alloc_call(host);
224 	if (call == NULL)
225 		return NULL;
226 
227 	/* Allocate memory for block, and initialize arguments */
228 	block = kzalloc(sizeof(*block), GFP_KERNEL);
229 	if (block == NULL)
230 		goto failed;
231 	kref_init(&block->b_count);
232 	INIT_LIST_HEAD(&block->b_list);
233 	INIT_LIST_HEAD(&block->b_flist);
234 
235 	if (!nlmsvc_setgrantargs(call, lock))
236 		goto failed_free;
237 
238 	/* Set notifier function for VFS, and init args */
239 	call->a_args.lock.fl.fl_flags |= FL_SLEEP;
240 	call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
241 	nlmclnt_next_cookie(&call->a_args.cookie);
242 
243 	dprintk("lockd: created block %p...\n", block);
244 
245 	/* Create and initialize the block */
246 	block->b_daemon = rqstp->rq_server;
247 	block->b_host   = host;
248 	block->b_file   = file;
249 	file->f_count++;
250 
251 	/* Add to file's list of blocks */
252 	list_add(&block->b_flist, &file->f_blocks);
253 
254 	/* Set up RPC arguments for callback */
255 	block->b_call = call;
256 	call->a_flags   = RPC_TASK_ASYNC;
257 	call->a_block = block;
258 
259 	return block;
260 
261 failed_free:
262 	kfree(block);
263 failed:
264 	nlmsvc_release_call(call);
265 	return NULL;
266 }
267 
268 /*
269  * Delete a block.
270  * It is the caller's responsibility to check whether the file
271  * can be closed hereafter.
272  */
273 static int nlmsvc_unlink_block(struct nlm_block *block)
274 {
275 	int status;
276 	dprintk("lockd: unlinking block %p...\n", block);
277 
278 	/* Remove block from list */
279 	status = locks_delete_block(&block->b_call->a_args.lock.fl);
280 	nlmsvc_remove_block(block);
281 	return status;
282 }
283 
284 static void nlmsvc_free_block(struct kref *kref)
285 {
286 	struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
287 	struct nlm_file		*file = block->b_file;
288 
289 	dprintk("lockd: freeing block %p...\n", block);
290 
291 	/* Remove block from file's list of blocks */
292 	list_del_init(&block->b_flist);
293 	mutex_unlock(&file->f_mutex);
294 
295 	nlmsvc_freegrantargs(block->b_call);
296 	nlmsvc_release_call(block->b_call);
297 	nlm_release_file(block->b_file);
298 	kfree(block);
299 }
300 
301 static void nlmsvc_release_block(struct nlm_block *block)
302 {
303 	if (block != NULL)
304 		kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
305 }
306 
307 /*
308  * Loop over all blocks and delete blocks held by
309  * a matching host.
310  */
311 void nlmsvc_traverse_blocks(struct nlm_host *host,
312 			struct nlm_file *file,
313 			nlm_host_match_fn_t match)
314 {
315 	struct nlm_block *block, *next;
316 
317 restart:
318 	mutex_lock(&file->f_mutex);
319 	list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
320 		if (!match(block->b_host, host))
321 			continue;
322 		/* Do not destroy blocks that are not on
323 		 * the global retry list - why? */
324 		if (list_empty(&block->b_list))
325 			continue;
326 		kref_get(&block->b_count);
327 		mutex_unlock(&file->f_mutex);
328 		nlmsvc_unlink_block(block);
329 		nlmsvc_release_block(block);
330 		goto restart;
331 	}
332 	mutex_unlock(&file->f_mutex);
333 }
334 
335 static struct nlm_lockowner *
336 nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
337 {
338 	refcount_inc(&lockowner->count);
339 	return lockowner;
340 }
341 
342 static void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
343 {
344 	if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
345 		return;
346 	list_del(&lockowner->list);
347 	spin_unlock(&lockowner->host->h_lock);
348 	nlmsvc_release_host(lockowner->host);
349 	kfree(lockowner);
350 }
351 
352 static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
353 {
354 	struct nlm_lockowner *lockowner;
355 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
356 		if (lockowner->pid != pid)
357 			continue;
358 		return nlmsvc_get_lockowner(lockowner);
359 	}
360 	return NULL;
361 }
362 
363 static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
364 {
365 	struct nlm_lockowner *res, *new = NULL;
366 
367 	spin_lock(&host->h_lock);
368 	res = __nlmsvc_find_lockowner(host, pid);
369 
370 	if (res == NULL) {
371 		spin_unlock(&host->h_lock);
372 		new = kmalloc(sizeof(*res), GFP_KERNEL);
373 		spin_lock(&host->h_lock);
374 		res = __nlmsvc_find_lockowner(host, pid);
375 		if (res == NULL && new != NULL) {
376 			res = new;
377 			/* fs/locks.c will manage the refcount through lock_ops */
378 			refcount_set(&new->count, 1);
379 			new->pid = pid;
380 			new->host = nlm_get_host(host);
381 			list_add(&new->list, &host->h_lockowners);
382 			new = NULL;
383 		}
384 	}
385 
386 	spin_unlock(&host->h_lock);
387 	kfree(new);
388 	return res;
389 }
390 
391 void
392 nlmsvc_release_lockowner(struct nlm_lock *lock)
393 {
394 	if (lock->fl.fl_owner)
395 		nlmsvc_put_lockowner(lock->fl.fl_owner);
396 }
397 
398 void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
399 						pid_t pid)
400 {
401 	fl->fl_owner = nlmsvc_find_lockowner(host, pid);
402 }
403 
404 /*
405  * Initialize arguments for GRANTED call. The nlm_rqst structure
406  * has been cleared already.
407  */
408 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
409 {
410 	locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
411 	memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
412 	call->a_args.lock.caller = utsname()->nodename;
413 	call->a_args.lock.oh.len = lock->oh.len;
414 
415 	/* set default data area */
416 	call->a_args.lock.oh.data = call->a_owner;
417 	call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
418 
419 	if (lock->oh.len > NLMCLNT_OHSIZE) {
420 		void *data = kmalloc(lock->oh.len, GFP_KERNEL);
421 		if (!data)
422 			return 0;
423 		call->a_args.lock.oh.data = (u8 *) data;
424 	}
425 
426 	memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
427 	return 1;
428 }
429 
430 static void nlmsvc_freegrantargs(struct nlm_rqst *call)
431 {
432 	if (call->a_args.lock.oh.data != call->a_owner)
433 		kfree(call->a_args.lock.oh.data);
434 
435 	locks_release_private(&call->a_args.lock.fl);
436 }
437 
438 /*
439  * Deferred lock request handling for non-blocking lock
440  */
441 static __be32
442 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
443 {
444 	__be32 status = nlm_lck_denied_nolocks;
445 
446 	block->b_flags |= B_QUEUED;
447 
448 	nlmsvc_insert_block(block, NLM_TIMEOUT);
449 
450 	block->b_cache_req = &rqstp->rq_chandle;
451 	if (rqstp->rq_chandle.defer) {
452 		block->b_deferred_req =
453 			rqstp->rq_chandle.defer(block->b_cache_req);
454 		if (block->b_deferred_req != NULL)
455 			status = nlm_drop_reply;
456 	}
457 	dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
458 		block, block->b_flags, ntohl(status));
459 
460 	return status;
461 }
462 
463 /*
464  * Attempt to establish a lock, and if it can't be granted, block it
465  * if required.
466  */
467 __be32
468 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
469 	    struct nlm_host *host, struct nlm_lock *lock, int wait,
470 	    struct nlm_cookie *cookie, int reclaim)
471 {
472 	struct nlm_block	*block = NULL;
473 	int			error;
474 	int			mode;
475 	__be32			ret;
476 
477 	dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
478 				nlmsvc_file_inode(file)->i_sb->s_id,
479 				nlmsvc_file_inode(file)->i_ino,
480 				lock->fl.fl_type, lock->fl.fl_pid,
481 				(long long)lock->fl.fl_start,
482 				(long long)lock->fl.fl_end,
483 				wait);
484 
485 	/* Lock file against concurrent access */
486 	mutex_lock(&file->f_mutex);
487 	/* Get existing block (in case client is busy-waiting)
488 	 * or create new block
489 	 */
490 	block = nlmsvc_lookup_block(file, lock);
491 	if (block == NULL) {
492 		block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
493 		ret = nlm_lck_denied_nolocks;
494 		if (block == NULL)
495 			goto out;
496 		lock = &block->b_call->a_args.lock;
497 	} else
498 		lock->fl.fl_flags &= ~FL_SLEEP;
499 
500 	if (block->b_flags & B_QUEUED) {
501 		dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
502 							block, block->b_flags);
503 		if (block->b_granted) {
504 			nlmsvc_unlink_block(block);
505 			ret = nlm_granted;
506 			goto out;
507 		}
508 		if (block->b_flags & B_TIMED_OUT) {
509 			nlmsvc_unlink_block(block);
510 			ret = nlm_lck_denied;
511 			goto out;
512 		}
513 		ret = nlm_drop_reply;
514 		goto out;
515 	}
516 
517 	if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
518 		ret = nlm_lck_denied_grace_period;
519 		goto out;
520 	}
521 	if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
522 		ret = nlm_lck_denied_grace_period;
523 		goto out;
524 	}
525 
526 	if (!wait)
527 		lock->fl.fl_flags &= ~FL_SLEEP;
528 	mode = lock_to_openmode(&lock->fl);
529 	error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
530 	lock->fl.fl_flags &= ~FL_SLEEP;
531 
532 	dprintk("lockd: vfs_lock_file returned %d\n", error);
533 	switch (error) {
534 		case 0:
535 			ret = nlm_granted;
536 			goto out;
537 		case -EAGAIN:
538 			/*
539 			 * If this is a blocking request for an
540 			 * already pending lock request then we need
541 			 * to put it back on lockd's block list
542 			 */
543 			if (wait)
544 				break;
545 			ret = nlm_lck_denied;
546 			goto out;
547 		case FILE_LOCK_DEFERRED:
548 			if (wait)
549 				break;
550 			/* Filesystem lock operation is in progress
551 			   Add it to the queue waiting for callback */
552 			ret = nlmsvc_defer_lock_rqst(rqstp, block);
553 			goto out;
554 		case -EDEADLK:
555 			ret = nlm_deadlock;
556 			goto out;
557 		default:			/* includes ENOLCK */
558 			ret = nlm_lck_denied_nolocks;
559 			goto out;
560 	}
561 
562 	ret = nlm_lck_blocked;
563 
564 	/* Append to list of blocked */
565 	nlmsvc_insert_block(block, NLM_NEVER);
566 out:
567 	mutex_unlock(&file->f_mutex);
568 	nlmsvc_release_block(block);
569 	dprintk("lockd: nlmsvc_lock returned %u\n", ret);
570 	return ret;
571 }
572 
573 /*
574  * Test for presence of a conflicting lock.
575  */
576 __be32
577 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
578 		struct nlm_host *host, struct nlm_lock *lock,
579 		struct nlm_lock *conflock, struct nlm_cookie *cookie)
580 {
581 	int			error;
582 	int			mode;
583 	__be32			ret;
584 	struct nlm_lockowner	*test_owner;
585 
586 	dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
587 				nlmsvc_file_inode(file)->i_sb->s_id,
588 				nlmsvc_file_inode(file)->i_ino,
589 				lock->fl.fl_type,
590 				(long long)lock->fl.fl_start,
591 				(long long)lock->fl.fl_end);
592 
593 	if (locks_in_grace(SVC_NET(rqstp))) {
594 		ret = nlm_lck_denied_grace_period;
595 		goto out;
596 	}
597 
598 	/* If there's a conflicting lock, remember to clean up the test lock */
599 	test_owner = (struct nlm_lockowner *)lock->fl.fl_owner;
600 
601 	mode = lock_to_openmode(&lock->fl);
602 	error = vfs_test_lock(file->f_file[mode], &lock->fl);
603 	if (error) {
604 		/* We can't currently deal with deferred test requests */
605 		if (error == FILE_LOCK_DEFERRED)
606 			WARN_ON_ONCE(1);
607 
608 		ret = nlm_lck_denied_nolocks;
609 		goto out;
610 	}
611 
612 	if (lock->fl.fl_type == F_UNLCK) {
613 		ret = nlm_granted;
614 		goto out;
615 	}
616 
617 	dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
618 		lock->fl.fl_type, (long long)lock->fl.fl_start,
619 		(long long)lock->fl.fl_end);
620 	conflock->caller = "somehost";	/* FIXME */
621 	conflock->len = strlen(conflock->caller);
622 	conflock->oh.len = 0;		/* don't return OH info */
623 	conflock->svid = lock->fl.fl_pid;
624 	conflock->fl.fl_type = lock->fl.fl_type;
625 	conflock->fl.fl_start = lock->fl.fl_start;
626 	conflock->fl.fl_end = lock->fl.fl_end;
627 	locks_release_private(&lock->fl);
628 
629 	/* Clean up the test lock */
630 	lock->fl.fl_owner = NULL;
631 	nlmsvc_put_lockowner(test_owner);
632 
633 	ret = nlm_lck_denied;
634 out:
635 	return ret;
636 }
637 
638 /*
639  * Remove a lock.
640  * This implies a CANCEL call: We send a GRANT_MSG, the client replies
641  * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
642  * afterwards. In this case the block will still be there, and hence
643  * must be removed.
644  */
645 __be32
646 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
647 {
648 	int	error = 0;
649 
650 	dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
651 				nlmsvc_file_inode(file)->i_sb->s_id,
652 				nlmsvc_file_inode(file)->i_ino,
653 				lock->fl.fl_pid,
654 				(long long)lock->fl.fl_start,
655 				(long long)lock->fl.fl_end);
656 
657 	/* First, cancel any lock that might be there */
658 	nlmsvc_cancel_blocked(net, file, lock);
659 
660 	lock->fl.fl_type = F_UNLCK;
661 	if (file->f_file[O_RDONLY])
662 		error = vfs_lock_file(file->f_file[O_RDONLY], F_SETLK,
663 					&lock->fl, NULL);
664 	if (file->f_file[O_WRONLY])
665 		error = vfs_lock_file(file->f_file[O_WRONLY], F_SETLK,
666 					&lock->fl, NULL);
667 
668 	return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
669 }
670 
671 /*
672  * Cancel a previously blocked request.
673  *
674  * A cancel request always overrides any grant that may currently
675  * be in progress.
676  * The calling procedure must check whether the file can be closed.
677  */
678 __be32
679 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
680 {
681 	struct nlm_block	*block;
682 	int status = 0;
683 	int mode;
684 
685 	dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
686 				nlmsvc_file_inode(file)->i_sb->s_id,
687 				nlmsvc_file_inode(file)->i_ino,
688 				lock->fl.fl_pid,
689 				(long long)lock->fl.fl_start,
690 				(long long)lock->fl.fl_end);
691 
692 	if (locks_in_grace(net))
693 		return nlm_lck_denied_grace_period;
694 
695 	mutex_lock(&file->f_mutex);
696 	block = nlmsvc_lookup_block(file, lock);
697 	mutex_unlock(&file->f_mutex);
698 	if (block != NULL) {
699 		mode = lock_to_openmode(&lock->fl);
700 		vfs_cancel_lock(block->b_file->f_file[mode],
701 				&block->b_call->a_args.lock.fl);
702 		status = nlmsvc_unlink_block(block);
703 		nlmsvc_release_block(block);
704 	}
705 	return status ? nlm_lck_denied : nlm_granted;
706 }
707 
708 /*
709  * This is a callback from the filesystem for VFS file lock requests.
710  * It will be used if lm_grant is defined and the filesystem can not
711  * respond to the request immediately.
712  * For SETLK or SETLKW request it will get the local posix lock.
713  * In all cases it will move the block to the head of nlm_blocked q where
714  * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
715  * deferred rpc for GETLK and SETLK.
716  */
717 static void
718 nlmsvc_update_deferred_block(struct nlm_block *block, int result)
719 {
720 	block->b_flags |= B_GOT_CALLBACK;
721 	if (result == 0)
722 		block->b_granted = 1;
723 	else
724 		block->b_flags |= B_TIMED_OUT;
725 }
726 
727 static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
728 {
729 	struct nlm_block *block;
730 	int rc = -ENOENT;
731 
732 	spin_lock(&nlm_blocked_lock);
733 	list_for_each_entry(block, &nlm_blocked, b_list) {
734 		if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
735 			dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
736 							block, block->b_flags);
737 			if (block->b_flags & B_QUEUED) {
738 				if (block->b_flags & B_TIMED_OUT) {
739 					rc = -ENOLCK;
740 					break;
741 				}
742 				nlmsvc_update_deferred_block(block, result);
743 			} else if (result == 0)
744 				block->b_granted = 1;
745 
746 			nlmsvc_insert_block_locked(block, 0);
747 			svc_wake_up(block->b_daemon);
748 			rc = 0;
749 			break;
750 		}
751 	}
752 	spin_unlock(&nlm_blocked_lock);
753 	if (rc == -ENOENT)
754 		printk(KERN_WARNING "lockd: grant for unknown block\n");
755 	return rc;
756 }
757 
758 /*
759  * Unblock a blocked lock request. This is a callback invoked from the
760  * VFS layer when a lock on which we blocked is removed.
761  *
762  * This function doesn't grant the blocked lock instantly, but rather moves
763  * the block to the head of nlm_blocked where it can be picked up by lockd.
764  */
765 static void
766 nlmsvc_notify_blocked(struct file_lock *fl)
767 {
768 	struct nlm_block	*block;
769 
770 	dprintk("lockd: VFS unblock notification for block %p\n", fl);
771 	spin_lock(&nlm_blocked_lock);
772 	list_for_each_entry(block, &nlm_blocked, b_list) {
773 		if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
774 			nlmsvc_insert_block_locked(block, 0);
775 			spin_unlock(&nlm_blocked_lock);
776 			svc_wake_up(block->b_daemon);
777 			return;
778 		}
779 	}
780 	spin_unlock(&nlm_blocked_lock);
781 	printk(KERN_WARNING "lockd: notification for unknown block!\n");
782 }
783 
784 static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
785 {
786 	return nlmsvc_get_lockowner(owner);
787 }
788 
789 static void nlmsvc_put_owner(fl_owner_t owner)
790 {
791 	nlmsvc_put_lockowner(owner);
792 }
793 
794 const struct lock_manager_operations nlmsvc_lock_operations = {
795 	.lm_notify = nlmsvc_notify_blocked,
796 	.lm_grant = nlmsvc_grant_deferred,
797 	.lm_get_owner = nlmsvc_get_owner,
798 	.lm_put_owner = nlmsvc_put_owner,
799 };
800 
801 /*
802  * Try to claim a lock that was previously blocked.
803  *
804  * Note that we use both the RPC_GRANTED_MSG call _and_ an async
805  * RPC thread when notifying the client. This seems like overkill...
806  * Here's why:
807  *  -	we don't want to use a synchronous RPC thread, otherwise
808  *	we might find ourselves hanging on a dead portmapper.
809  *  -	Some lockd implementations (e.g. HP) don't react to
810  *	RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
811  */
812 static void
813 nlmsvc_grant_blocked(struct nlm_block *block)
814 {
815 	struct nlm_file		*file = block->b_file;
816 	struct nlm_lock		*lock = &block->b_call->a_args.lock;
817 	int			mode;
818 	int			error;
819 	loff_t			fl_start, fl_end;
820 
821 	dprintk("lockd: grant blocked lock %p\n", block);
822 
823 	kref_get(&block->b_count);
824 
825 	/* Unlink block request from list */
826 	nlmsvc_unlink_block(block);
827 
828 	/* If b_granted is true this means we've been here before.
829 	 * Just retry the grant callback, possibly refreshing the RPC
830 	 * binding */
831 	if (block->b_granted) {
832 		nlm_rebind_host(block->b_host);
833 		goto callback;
834 	}
835 
836 	/* Try the lock operation again */
837 	/* vfs_lock_file() can mangle fl_start and fl_end, but we need
838 	 * them unchanged for the GRANT_MSG
839 	 */
840 	lock->fl.fl_flags |= FL_SLEEP;
841 	fl_start = lock->fl.fl_start;
842 	fl_end = lock->fl.fl_end;
843 	mode = lock_to_openmode(&lock->fl);
844 	error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
845 	lock->fl.fl_flags &= ~FL_SLEEP;
846 	lock->fl.fl_start = fl_start;
847 	lock->fl.fl_end = fl_end;
848 
849 	switch (error) {
850 	case 0:
851 		break;
852 	case FILE_LOCK_DEFERRED:
853 		dprintk("lockd: lock still blocked error %d\n", error);
854 		nlmsvc_insert_block(block, NLM_NEVER);
855 		nlmsvc_release_block(block);
856 		return;
857 	default:
858 		printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
859 				-error, __func__);
860 		nlmsvc_insert_block(block, 10 * HZ);
861 		nlmsvc_release_block(block);
862 		return;
863 	}
864 
865 callback:
866 	/* Lock was granted by VFS. */
867 	dprintk("lockd: GRANTing blocked lock.\n");
868 	block->b_granted = 1;
869 
870 	/* keep block on the list, but don't reattempt until the RPC
871 	 * completes or the submission fails
872 	 */
873 	nlmsvc_insert_block(block, NLM_NEVER);
874 
875 	/* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
876 	 * will queue up a new one if this one times out
877 	 */
878 	error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
879 				&nlmsvc_grant_ops);
880 
881 	/* RPC submission failed, wait a bit and retry */
882 	if (error < 0)
883 		nlmsvc_insert_block(block, 10 * HZ);
884 }
885 
886 /*
887  * This is the callback from the RPC layer when the NLM_GRANTED_MSG
888  * RPC call has succeeded or timed out.
889  * Like all RPC callbacks, it is invoked by the rpciod process, so it
890  * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
891  * chain once more in order to have it removed by lockd itself (which can
892  * then sleep on the file semaphore without disrupting e.g. the nfs client).
893  */
894 static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
895 {
896 	struct nlm_rqst		*call = data;
897 	struct nlm_block	*block = call->a_block;
898 	unsigned long		timeout;
899 
900 	dprintk("lockd: GRANT_MSG RPC callback\n");
901 
902 	spin_lock(&nlm_blocked_lock);
903 	/* if the block is not on a list at this point then it has
904 	 * been invalidated. Don't try to requeue it.
905 	 *
906 	 * FIXME: it's possible that the block is removed from the list
907 	 * after this check but before the nlmsvc_insert_block. In that
908 	 * case it will be added back. Perhaps we need better locking
909 	 * for nlm_blocked?
910 	 */
911 	if (list_empty(&block->b_list))
912 		goto out;
913 
914 	/* Technically, we should down the file semaphore here. Since we
915 	 * move the block towards the head of the queue only, no harm
916 	 * can be done, though. */
917 	if (task->tk_status < 0) {
918 		/* RPC error: Re-insert for retransmission */
919 		timeout = 10 * HZ;
920 	} else {
921 		/* Call was successful, now wait for client callback */
922 		timeout = 60 * HZ;
923 	}
924 	nlmsvc_insert_block_locked(block, timeout);
925 	svc_wake_up(block->b_daemon);
926 out:
927 	spin_unlock(&nlm_blocked_lock);
928 }
929 
930 /*
931  * FIXME: nlmsvc_release_block() grabs a mutex.  This is not allowed for an
932  * .rpc_release rpc_call_op
933  */
934 static void nlmsvc_grant_release(void *data)
935 {
936 	struct nlm_rqst		*call = data;
937 	nlmsvc_release_block(call->a_block);
938 }
939 
940 static const struct rpc_call_ops nlmsvc_grant_ops = {
941 	.rpc_call_done = nlmsvc_grant_callback,
942 	.rpc_release = nlmsvc_grant_release,
943 };
944 
945 /*
946  * We received a GRANT_RES callback. Try to find the corresponding
947  * block.
948  */
949 void
950 nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
951 {
952 	struct nlm_block	*block;
953 
954 	dprintk("grant_reply: looking for cookie %x, s=%d \n",
955 		*(unsigned int *)(cookie->data), status);
956 	if (!(block = nlmsvc_find_block(cookie)))
957 		return;
958 
959 	if (status == nlm_lck_denied_grace_period) {
960 		/* Try again in a couple of seconds */
961 		nlmsvc_insert_block(block, 10 * HZ);
962 	} else {
963 		/*
964 		 * Lock is now held by client, or has been rejected.
965 		 * In both cases, the block should be removed.
966 		 */
967 		nlmsvc_unlink_block(block);
968 	}
969 	nlmsvc_release_block(block);
970 }
971 
972 /* Helper function to handle retry of a deferred block.
973  * If it is a blocking lock, call grant_blocked.
974  * For a non-blocking lock or test lock, revisit the request.
975  */
976 static void
977 retry_deferred_block(struct nlm_block *block)
978 {
979 	if (!(block->b_flags & B_GOT_CALLBACK))
980 		block->b_flags |= B_TIMED_OUT;
981 	nlmsvc_insert_block(block, NLM_TIMEOUT);
982 	dprintk("revisit block %p flags %d\n",	block, block->b_flags);
983 	if (block->b_deferred_req) {
984 		block->b_deferred_req->revisit(block->b_deferred_req, 0);
985 		block->b_deferred_req = NULL;
986 	}
987 }
988 
989 /*
990  * Retry all blocked locks that have been notified. This is where lockd
991  * picks up locks that can be granted, or grant notifications that must
992  * be retransmitted.
993  */
994 unsigned long
995 nlmsvc_retry_blocked(void)
996 {
997 	unsigned long	timeout = MAX_SCHEDULE_TIMEOUT;
998 	struct nlm_block *block;
999 
1000 	spin_lock(&nlm_blocked_lock);
1001 	while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
1002 		block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
1003 
1004 		if (block->b_when == NLM_NEVER)
1005 			break;
1006 		if (time_after(block->b_when, jiffies)) {
1007 			timeout = block->b_when - jiffies;
1008 			break;
1009 		}
1010 		spin_unlock(&nlm_blocked_lock);
1011 
1012 		dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
1013 			block, block->b_when);
1014 		if (block->b_flags & B_QUEUED) {
1015 			dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
1016 				block, block->b_granted, block->b_flags);
1017 			retry_deferred_block(block);
1018 		} else
1019 			nlmsvc_grant_blocked(block);
1020 		spin_lock(&nlm_blocked_lock);
1021 	}
1022 	spin_unlock(&nlm_blocked_lock);
1023 
1024 	return timeout;
1025 }
1026