xref: /openbmc/linux/fs/lockd/clntproc.c (revision d5cb9783536a41df9f9cba5b0a1d78047ed787f7)
1 /*
2  * linux/fs/lockd/clntproc.c
3  *
4  * RPC procedures for the client side NLM implementation
5  *
6  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/errno.h>
13 #include <linux/fs.h>
14 #include <linux/nfs_fs.h>
15 #include <linux/utsname.h>
16 #include <linux/smp_lock.h>
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/sunrpc/svc.h>
19 #include <linux/lockd/lockd.h>
20 #include <linux/lockd/sm_inter.h>
21 
22 #define NLMDBG_FACILITY		NLMDBG_CLIENT
23 #define NLMCLNT_GRACE_WAIT	(5*HZ)
24 #define NLMCLNT_POLL_TIMEOUT	(30*HZ)
25 
26 static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
27 static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
28 static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
29 static void	nlmclnt_unlock_callback(struct rpc_task *);
30 static void	nlmclnt_cancel_callback(struct rpc_task *);
31 static int	nlm_stat_to_errno(u32 stat);
32 static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
33 
34 /*
35  * Cookie counter for NLM requests
36  */
37 static u32	nlm_cookie = 0x1234;
38 
39 static inline void nlmclnt_next_cookie(struct nlm_cookie *c)
40 {
41 	memcpy(c->data, &nlm_cookie, 4);
42 	memset(c->data+4, 0, 4);
43 	c->len=4;
44 	nlm_cookie++;
45 }
46 
47 static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
48 {
49 	atomic_inc(&lockowner->count);
50 	return lockowner;
51 }
52 
53 static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
54 {
55 	if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
56 		return;
57 	list_del(&lockowner->list);
58 	spin_unlock(&lockowner->host->h_lock);
59 	nlm_release_host(lockowner->host);
60 	kfree(lockowner);
61 }
62 
63 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
64 {
65 	struct nlm_lockowner *lockowner;
66 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
67 		if (lockowner->pid == pid)
68 			return -EBUSY;
69 	}
70 	return 0;
71 }
72 
73 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
74 {
75 	uint32_t res;
76 	do {
77 		res = host->h_pidcount++;
78 	} while (nlm_pidbusy(host, res) < 0);
79 	return res;
80 }
81 
82 static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
83 {
84 	struct nlm_lockowner *lockowner;
85 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
86 		if (lockowner->owner != owner)
87 			continue;
88 		return nlm_get_lockowner(lockowner);
89 	}
90 	return NULL;
91 }
92 
93 static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
94 {
95 	struct nlm_lockowner *res, *new = NULL;
96 
97 	spin_lock(&host->h_lock);
98 	res = __nlm_find_lockowner(host, owner);
99 	if (res == NULL) {
100 		spin_unlock(&host->h_lock);
101 		new = (struct nlm_lockowner *)kmalloc(sizeof(*new), GFP_KERNEL);
102 		spin_lock(&host->h_lock);
103 		res = __nlm_find_lockowner(host, owner);
104 		if (res == NULL && new != NULL) {
105 			res = new;
106 			atomic_set(&new->count, 1);
107 			new->owner = owner;
108 			new->pid = __nlm_alloc_pid(host);
109 			new->host = nlm_get_host(host);
110 			list_add(&new->list, &host->h_lockowners);
111 			new = NULL;
112 		}
113 	}
114 	spin_unlock(&host->h_lock);
115 	if (new != NULL)
116 		kfree(new);
117 	return res;
118 }
119 
120 /*
121  * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
122  */
123 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
124 {
125 	struct nlm_args	*argp = &req->a_args;
126 	struct nlm_lock	*lock = &argp->lock;
127 
128 	nlmclnt_next_cookie(&argp->cookie);
129 	argp->state   = nsm_local_state;
130 	memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh));
131 	lock->caller  = system_utsname.nodename;
132 	lock->oh.data = req->a_owner;
133 	lock->oh.len  = sprintf(req->a_owner, "%d@%s",
134 				current->pid, system_utsname.nodename);
135 	locks_copy_lock(&lock->fl, fl);
136 }
137 
138 static void nlmclnt_release_lockargs(struct nlm_rqst *req)
139 {
140 	struct file_lock *fl = &req->a_args.lock.fl;
141 
142 	if (fl->fl_ops && fl->fl_ops->fl_release_private)
143 		fl->fl_ops->fl_release_private(fl);
144 }
145 
146 /*
147  * Initialize arguments for GRANTED call. The nlm_rqst structure
148  * has been cleared already.
149  */
150 int
151 nlmclnt_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
152 {
153 	locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
154 	memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
155 	call->a_args.lock.caller = system_utsname.nodename;
156 	call->a_args.lock.oh.len = lock->oh.len;
157 
158 	/* set default data area */
159 	call->a_args.lock.oh.data = call->a_owner;
160 
161 	if (lock->oh.len > NLMCLNT_OHSIZE) {
162 		void *data = kmalloc(lock->oh.len, GFP_KERNEL);
163 		if (!data) {
164 			nlmclnt_freegrantargs(call);
165 			return 0;
166 		}
167 		call->a_args.lock.oh.data = (u8 *) data;
168 	}
169 
170 	memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
171 	return 1;
172 }
173 
174 void
175 nlmclnt_freegrantargs(struct nlm_rqst *call)
176 {
177 	struct file_lock *fl = &call->a_args.lock.fl;
178 	/*
179 	 * Check whether we allocated memory for the owner.
180 	 */
181 	if (call->a_args.lock.oh.data != (u8 *) call->a_owner) {
182 		kfree(call->a_args.lock.oh.data);
183 	}
184 	if (fl->fl_ops && fl->fl_ops->fl_release_private)
185 		fl->fl_ops->fl_release_private(fl);
186 }
187 
188 /*
189  * This is the main entry point for the NLM client.
190  */
191 int
192 nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
193 {
194 	struct nfs_server	*nfssrv = NFS_SERVER(inode);
195 	struct nlm_host		*host;
196 	struct nlm_rqst		reqst, *call = &reqst;
197 	sigset_t		oldset;
198 	unsigned long		flags;
199 	int			status, proto, vers;
200 
201 	vers = (NFS_PROTO(inode)->version == 3) ? 4 : 1;
202 	if (NFS_PROTO(inode)->version > 3) {
203 		printk(KERN_NOTICE "NFSv4 file locking not implemented!\n");
204 		return -ENOLCK;
205 	}
206 
207 	/* Retrieve transport protocol from NFS client */
208 	proto = NFS_CLIENT(inode)->cl_xprt->prot;
209 
210 	if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers)))
211 		return -ENOLCK;
212 
213 	/* Create RPC client handle if not there, and copy soft
214 	 * and intr flags from NFS client. */
215 	if (host->h_rpcclnt == NULL) {
216 		struct rpc_clnt	*clnt;
217 
218 		/* Bind an rpc client to this host handle (does not
219 		 * perform a portmapper lookup) */
220 		if (!(clnt = nlm_bind_host(host))) {
221 			status = -ENOLCK;
222 			goto done;
223 		}
224 		clnt->cl_softrtry = nfssrv->client->cl_softrtry;
225 		clnt->cl_intr     = nfssrv->client->cl_intr;
226 		clnt->cl_chatty   = nfssrv->client->cl_chatty;
227 	}
228 
229 	/* Keep the old signal mask */
230 	spin_lock_irqsave(&current->sighand->siglock, flags);
231 	oldset = current->blocked;
232 
233 	/* If we're cleaning up locks because the process is exiting,
234 	 * perform the RPC call asynchronously. */
235 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd))
236 	    && fl->fl_type == F_UNLCK
237 	    && (current->flags & PF_EXITING)) {
238 		sigfillset(&current->blocked);	/* Mask all signals */
239 		recalc_sigpending();
240 		spin_unlock_irqrestore(&current->sighand->siglock, flags);
241 
242 		call = nlmclnt_alloc_call();
243 		if (!call) {
244 			status = -ENOMEM;
245 			goto out_restore;
246 		}
247 		call->a_flags = RPC_TASK_ASYNC;
248 	} else {
249 		spin_unlock_irqrestore(&current->sighand->siglock, flags);
250 		memset(call, 0, sizeof(*call));
251 		locks_init_lock(&call->a_args.lock.fl);
252 		locks_init_lock(&call->a_res.lock.fl);
253 	}
254 	call->a_host = host;
255 
256 	nlmclnt_locks_init_private(fl, host);
257 
258 	/* Set up the argument struct */
259 	nlmclnt_setlockargs(call, fl);
260 
261 	if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
262 		if (fl->fl_type != F_UNLCK) {
263 			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
264 			status = nlmclnt_lock(call, fl);
265 		} else
266 			status = nlmclnt_unlock(call, fl);
267 	} else if (IS_GETLK(cmd))
268 		status = nlmclnt_test(call, fl);
269 	else
270 		status = -EINVAL;
271 
272  out_restore:
273 	spin_lock_irqsave(&current->sighand->siglock, flags);
274 	current->blocked = oldset;
275 	recalc_sigpending();
276 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
277 
278 done:
279 	dprintk("lockd: clnt proc returns %d\n", status);
280 	nlm_release_host(host);
281 	return status;
282 }
283 EXPORT_SYMBOL(nlmclnt_proc);
284 
285 /*
286  * Allocate an NLM RPC call struct
287  */
288 struct nlm_rqst *
289 nlmclnt_alloc_call(void)
290 {
291 	struct nlm_rqst	*call;
292 
293 	while (!signalled()) {
294 		call = (struct nlm_rqst *) kmalloc(sizeof(struct nlm_rqst), GFP_KERNEL);
295 		if (call) {
296 			memset(call, 0, sizeof(*call));
297 			locks_init_lock(&call->a_args.lock.fl);
298 			locks_init_lock(&call->a_res.lock.fl);
299 			return call;
300 		}
301 		printk("nlmclnt_alloc_call: failed, waiting for memory\n");
302 		schedule_timeout_interruptible(5*HZ);
303 	}
304 	return NULL;
305 }
306 
307 static int nlm_wait_on_grace(wait_queue_head_t *queue)
308 {
309 	DEFINE_WAIT(wait);
310 	int status = -EINTR;
311 
312 	prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
313 	if (!signalled ()) {
314 		schedule_timeout(NLMCLNT_GRACE_WAIT);
315 		try_to_freeze();
316 		if (!signalled ())
317 			status = 0;
318 	}
319 	finish_wait(queue, &wait);
320 	return status;
321 }
322 
323 /*
324  * Generic NLM call
325  */
326 static int
327 nlmclnt_call(struct nlm_rqst *req, u32 proc)
328 {
329 	struct nlm_host	*host = req->a_host;
330 	struct rpc_clnt	*clnt;
331 	struct nlm_args	*argp = &req->a_args;
332 	struct nlm_res	*resp = &req->a_res;
333 	struct rpc_message msg = {
334 		.rpc_argp	= argp,
335 		.rpc_resp	= resp,
336 	};
337 	int		status;
338 
339 	dprintk("lockd: call procedure %d on %s\n",
340 			(int)proc, host->h_name);
341 
342 	do {
343 		if (host->h_reclaiming && !argp->reclaim)
344 			goto in_grace_period;
345 
346 		/* If we have no RPC client yet, create one. */
347 		if ((clnt = nlm_bind_host(host)) == NULL)
348 			return -ENOLCK;
349 		msg.rpc_proc = &clnt->cl_procinfo[proc];
350 
351 		/* Perform the RPC call. If an error occurs, try again */
352 		if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
353 			dprintk("lockd: rpc_call returned error %d\n", -status);
354 			switch (status) {
355 			case -EPROTONOSUPPORT:
356 				status = -EINVAL;
357 				break;
358 			case -ECONNREFUSED:
359 			case -ETIMEDOUT:
360 			case -ENOTCONN:
361 				nlm_rebind_host(host);
362 				status = -EAGAIN;
363 				break;
364 			case -ERESTARTSYS:
365 				return signalled () ? -EINTR : status;
366 			default:
367 				break;
368 			}
369 			break;
370 		} else
371 		if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) {
372 			dprintk("lockd: server in grace period\n");
373 			if (argp->reclaim) {
374 				printk(KERN_WARNING
375 				     "lockd: spurious grace period reject?!\n");
376 				return -ENOLCK;
377 			}
378 		} else {
379 			if (!argp->reclaim) {
380 				/* We appear to be out of the grace period */
381 				wake_up_all(&host->h_gracewait);
382 			}
383 			dprintk("lockd: server returns status %d\n", resp->status);
384 			return 0;	/* Okay, call complete */
385 		}
386 
387 in_grace_period:
388 		/*
389 		 * The server has rebooted and appears to be in the grace
390 		 * period during which locks are only allowed to be
391 		 * reclaimed.
392 		 * We can only back off and try again later.
393 		 */
394 		status = nlm_wait_on_grace(&host->h_gracewait);
395 	} while (status == 0);
396 
397 	return status;
398 }
399 
400 /*
401  * Generic NLM call, async version.
402  */
403 int
404 nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
405 {
406 	struct nlm_host	*host = req->a_host;
407 	struct rpc_clnt	*clnt;
408 	struct rpc_message msg = {
409 		.rpc_argp	= &req->a_args,
410 		.rpc_resp	= &req->a_res,
411 	};
412 	int		status;
413 
414 	dprintk("lockd: call procedure %d on %s (async)\n",
415 			(int)proc, host->h_name);
416 
417 	/* If we have no RPC client yet, create one. */
418 	if ((clnt = nlm_bind_host(host)) == NULL)
419 		return -ENOLCK;
420 	msg.rpc_proc = &clnt->cl_procinfo[proc];
421 
422         /* bootstrap and kick off the async RPC call */
423         status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
424 
425 	return status;
426 }
427 
428 static int
429 nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
430 {
431 	struct nlm_host	*host = req->a_host;
432 	struct rpc_clnt	*clnt;
433 	struct nlm_args	*argp = &req->a_args;
434 	struct nlm_res	*resp = &req->a_res;
435 	struct rpc_message msg = {
436 		.rpc_argp	= argp,
437 		.rpc_resp	= resp,
438 	};
439 	int		status;
440 
441 	dprintk("lockd: call procedure %d on %s (async)\n",
442 			(int)proc, host->h_name);
443 
444 	/* If we have no RPC client yet, create one. */
445 	if ((clnt = nlm_bind_host(host)) == NULL)
446 		return -ENOLCK;
447 	msg.rpc_proc = &clnt->cl_procinfo[proc];
448 
449 	/* Increment host refcount */
450 	nlm_get_host(host);
451         /* bootstrap and kick off the async RPC call */
452         status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
453 	if (status < 0)
454 		nlm_release_host(host);
455 	return status;
456 }
457 
458 /*
459  * TEST for the presence of a conflicting lock
460  */
461 static int
462 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
463 {
464 	int	status;
465 
466 	status = nlmclnt_call(req, NLMPROC_TEST);
467 	nlmclnt_release_lockargs(req);
468 	if (status < 0)
469 		return status;
470 
471 	status = req->a_res.status;
472 	if (status == NLM_LCK_GRANTED) {
473 		fl->fl_type = F_UNLCK;
474 	} if (status == NLM_LCK_DENIED) {
475 		/*
476 		 * Report the conflicting lock back to the application.
477 		 */
478 		locks_copy_lock(fl, &req->a_res.lock.fl);
479 		fl->fl_pid = 0;
480 	} else {
481 		return nlm_stat_to_errno(req->a_res.status);
482 	}
483 
484 	return 0;
485 }
486 
487 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
488 {
489 	memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl));
490 	nlm_get_lockowner(new->fl_u.nfs_fl.owner);
491 }
492 
493 static void nlmclnt_locks_release_private(struct file_lock *fl)
494 {
495 	nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
496 	fl->fl_ops = NULL;
497 }
498 
499 static struct file_lock_operations nlmclnt_lock_ops = {
500 	.fl_copy_lock = nlmclnt_locks_copy_lock,
501 	.fl_release_private = nlmclnt_locks_release_private,
502 };
503 
504 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
505 {
506 	BUG_ON(fl->fl_ops != NULL);
507 	fl->fl_u.nfs_fl.state = 0;
508 	fl->fl_u.nfs_fl.flags = 0;
509 	fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
510 	fl->fl_ops = &nlmclnt_lock_ops;
511 }
512 
513 static void do_vfs_lock(struct file_lock *fl)
514 {
515 	int res = 0;
516 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
517 		case FL_POSIX:
518 			res = posix_lock_file_wait(fl->fl_file, fl);
519 			break;
520 		case FL_FLOCK:
521 			res = flock_lock_file_wait(fl->fl_file, fl);
522 			break;
523 		default:
524 			BUG();
525 	}
526 	if (res < 0)
527 		printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
528 				__FUNCTION__);
529 }
530 
531 /*
532  * LOCK: Try to create a lock
533  *
534  *			Programmer Harassment Alert
535  *
536  * When given a blocking lock request in a sync RPC call, the HPUX lockd
537  * will faithfully return LCK_BLOCKED but never cares to notify us when
538  * the lock could be granted. This way, our local process could hang
539  * around forever waiting for the callback.
540  *
541  *  Solution A:	Implement busy-waiting
542  *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
543  *
544  * For now I am implementing solution A, because I hate the idea of
545  * re-implementing lockd for a third time in two months. The async
546  * calls shouldn't be too hard to do, however.
547  *
548  * This is one of the lovely things about standards in the NFS area:
549  * they're so soft and squishy you can't really blame HP for doing this.
550  */
551 static int
552 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
553 {
554 	struct nlm_host	*host = req->a_host;
555 	struct nlm_res	*resp = &req->a_res;
556 	long timeout;
557 	int status;
558 
559 	if (!host->h_monitored && nsm_monitor(host) < 0) {
560 		printk(KERN_NOTICE "lockd: failed to monitor %s\n",
561 					host->h_name);
562 		status = -ENOLCK;
563 		goto out;
564 	}
565 
566 	if (req->a_args.block) {
567 		status = nlmclnt_prepare_block(req, host, fl);
568 		if (status < 0)
569 			goto out;
570 	}
571 	for(;;) {
572 		status = nlmclnt_call(req, NLMPROC_LOCK);
573 		if (status < 0)
574 			goto out_unblock;
575 		if (resp->status != NLM_LCK_BLOCKED)
576 			break;
577 		/* Wait on an NLM blocking lock */
578 		timeout = nlmclnt_block(req, NLMCLNT_POLL_TIMEOUT);
579 		/* Did a reclaimer thread notify us of a server reboot? */
580 		if (resp->status ==  NLM_LCK_DENIED_GRACE_PERIOD)
581 			continue;
582 		if (resp->status != NLM_LCK_BLOCKED)
583 			break;
584 		if (timeout >= 0)
585 			continue;
586 		/* We were interrupted. Send a CANCEL request to the server
587 		 * and exit
588 		 */
589 		status = (int)timeout;
590 		goto out_unblock;
591 	}
592 
593 	if (resp->status == NLM_LCK_GRANTED) {
594 		fl->fl_u.nfs_fl.state = host->h_state;
595 		fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED;
596 		fl->fl_flags |= FL_SLEEP;
597 		do_vfs_lock(fl);
598 	}
599 	status = nlm_stat_to_errno(resp->status);
600 out_unblock:
601 	nlmclnt_finish_block(req);
602 	/* Cancel the blocked request if it is still pending */
603 	if (resp->status == NLM_LCK_BLOCKED)
604 		nlmclnt_cancel(host, fl);
605 out:
606 	nlmclnt_release_lockargs(req);
607 	return status;
608 }
609 
610 /*
611  * RECLAIM: Try to reclaim a lock
612  */
613 int
614 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
615 {
616 	struct nlm_rqst reqst, *req;
617 	int		status;
618 
619 	req = &reqst;
620 	memset(req, 0, sizeof(*req));
621 	locks_init_lock(&req->a_args.lock.fl);
622 	locks_init_lock(&req->a_res.lock.fl);
623 	req->a_host  = host;
624 	req->a_flags = 0;
625 
626 	/* Set up the argument struct */
627 	nlmclnt_setlockargs(req, fl);
628 	req->a_args.reclaim = 1;
629 
630 	if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0
631 	 && req->a_res.status == NLM_LCK_GRANTED)
632 		return 0;
633 
634 	printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
635 				"(errno %d, status %d)\n", fl->fl_pid,
636 				status, req->a_res.status);
637 
638 	/*
639 	 * FIXME: This is a serious failure. We can
640 	 *
641 	 *  a.	Ignore the problem
642 	 *  b.	Send the owning process some signal (Linux doesn't have
643 	 *	SIGLOST, though...)
644 	 *  c.	Retry the operation
645 	 *
646 	 * Until someone comes up with a simple implementation
647 	 * for b or c, I'll choose option a.
648 	 */
649 
650 	return -ENOLCK;
651 }
652 
653 /*
654  * UNLOCK: remove an existing lock
655  */
656 static int
657 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
658 {
659 	struct nlm_res	*resp = &req->a_res;
660 	int		status;
661 
662 	/* Clean the GRANTED flag now so the lock doesn't get
663 	 * reclaimed while we're stuck in the unlock call. */
664 	fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED;
665 
666 	if (req->a_flags & RPC_TASK_ASYNC) {
667 		status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
668 					nlmclnt_unlock_callback);
669 		/* Hrmf... Do the unlock early since locks_remove_posix()
670 		 * really expects us to free the lock synchronously */
671 		do_vfs_lock(fl);
672 		if (status < 0) {
673 			nlmclnt_release_lockargs(req);
674 			kfree(req);
675 		}
676 		return status;
677 	}
678 
679 	status = nlmclnt_call(req, NLMPROC_UNLOCK);
680 	nlmclnt_release_lockargs(req);
681 	if (status < 0)
682 		return status;
683 
684 	do_vfs_lock(fl);
685 	if (resp->status == NLM_LCK_GRANTED)
686 		return 0;
687 
688 	if (resp->status != NLM_LCK_DENIED_NOLOCKS)
689 		printk("lockd: unexpected unlock status: %d\n", resp->status);
690 
691 	/* What to do now? I'm out of my depth... */
692 
693 	return -ENOLCK;
694 }
695 
696 static void
697 nlmclnt_unlock_callback(struct rpc_task *task)
698 {
699 	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;
700 	int		status = req->a_res.status;
701 
702 	if (RPC_ASSASSINATED(task))
703 		goto die;
704 
705 	if (task->tk_status < 0) {
706 		dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
707 		goto retry_rebind;
708 	}
709 	if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
710 		rpc_delay(task, NLMCLNT_GRACE_WAIT);
711 		goto retry_unlock;
712 	}
713 	if (status != NLM_LCK_GRANTED)
714 		printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
715 die:
716 	nlm_release_host(req->a_host);
717 	nlmclnt_release_lockargs(req);
718 	kfree(req);
719 	return;
720  retry_rebind:
721 	nlm_rebind_host(req->a_host);
722  retry_unlock:
723 	rpc_restart_call(task);
724 }
725 
726 /*
727  * Cancel a blocked lock request.
728  * We always use an async RPC call for this in order not to hang a
729  * process that has been Ctrl-C'ed.
730  */
731 int
732 nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
733 {
734 	struct nlm_rqst	*req;
735 	unsigned long	flags;
736 	sigset_t	oldset;
737 	int		status;
738 
739 	/* Block all signals while setting up call */
740 	spin_lock_irqsave(&current->sighand->siglock, flags);
741 	oldset = current->blocked;
742 	sigfillset(&current->blocked);
743 	recalc_sigpending();
744 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
745 
746 	req = nlmclnt_alloc_call();
747 	if (!req)
748 		return -ENOMEM;
749 	req->a_host  = host;
750 	req->a_flags = RPC_TASK_ASYNC;
751 
752 	nlmclnt_setlockargs(req, fl);
753 
754 	status = nlmclnt_async_call(req, NLMPROC_CANCEL,
755 					nlmclnt_cancel_callback);
756 	if (status < 0) {
757 		nlmclnt_release_lockargs(req);
758 		kfree(req);
759 	}
760 
761 	spin_lock_irqsave(&current->sighand->siglock, flags);
762 	current->blocked = oldset;
763 	recalc_sigpending();
764 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
765 
766 	return status;
767 }
768 
769 static void
770 nlmclnt_cancel_callback(struct rpc_task *task)
771 {
772 	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;
773 
774 	if (RPC_ASSASSINATED(task))
775 		goto die;
776 
777 	if (task->tk_status < 0) {
778 		dprintk("lockd: CANCEL call error %d, retrying.\n",
779 					task->tk_status);
780 		goto retry_cancel;
781 	}
782 
783 	dprintk("lockd: cancel status %d (task %d)\n",
784 			req->a_res.status, task->tk_pid);
785 
786 	switch (req->a_res.status) {
787 	case NLM_LCK_GRANTED:
788 	case NLM_LCK_DENIED_GRACE_PERIOD:
789 		/* Everything's good */
790 		break;
791 	case NLM_LCK_DENIED_NOLOCKS:
792 		dprintk("lockd: CANCEL failed (server has no locks)\n");
793 		goto retry_cancel;
794 	default:
795 		printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
796 			req->a_res.status);
797 	}
798 
799 die:
800 	nlm_release_host(req->a_host);
801 	nlmclnt_release_lockargs(req);
802 	kfree(req);
803 	return;
804 
805 retry_cancel:
806 	nlm_rebind_host(req->a_host);
807 	rpc_restart_call(task);
808 	rpc_delay(task, 30 * HZ);
809 }
810 
811 /*
812  * Convert an NLM status code to a generic kernel errno
813  */
814 static int
815 nlm_stat_to_errno(u32 status)
816 {
817 	switch(status) {
818 	case NLM_LCK_GRANTED:
819 		return 0;
820 	case NLM_LCK_DENIED:
821 		return -EAGAIN;
822 	case NLM_LCK_DENIED_NOLOCKS:
823 	case NLM_LCK_DENIED_GRACE_PERIOD:
824 		return -ENOLCK;
825 	case NLM_LCK_BLOCKED:
826 		printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
827 		return -ENOLCK;
828 #ifdef CONFIG_LOCKD_V4
829 	case NLM_DEADLCK:
830 		return -EDEADLK;
831 	case NLM_ROFS:
832 		return -EROFS;
833 	case NLM_STALE_FH:
834 		return -ESTALE;
835 	case NLM_FBIG:
836 		return -EOVERFLOW;
837 	case NLM_FAILED:
838 		return -ENOLCK;
839 #endif
840 	}
841 	printk(KERN_NOTICE "lockd: unexpected server status %d\n", status);
842 	return -ENOLCK;
843 }
844