xref: /openbmc/linux/fs/lockd/clntproc.c (revision 55b24334)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/lockd/clntproc.c
4  *
5  * RPC procedures for the client side NLM implementation
6  *
7  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/utsname.h>
18 #include <linux/freezer.h>
19 #include <linux/sunrpc/clnt.h>
20 #include <linux/sunrpc/svc.h>
21 #include <linux/lockd/lockd.h>
22 
23 #include "trace.h"
24 
25 #define NLMDBG_FACILITY		NLMDBG_CLIENT
26 #define NLMCLNT_GRACE_WAIT	(5*HZ)
27 #define NLMCLNT_POLL_TIMEOUT	(30*HZ)
28 #define NLMCLNT_MAX_RETRIES	3
29 
30 static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
31 static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
32 static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
33 static int	nlm_stat_to_errno(__be32 stat);
34 static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
35 static int	nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
36 
37 static const struct rpc_call_ops nlmclnt_unlock_ops;
38 static const struct rpc_call_ops nlmclnt_cancel_ops;
39 
40 /*
41  * Cookie counter for NLM requests
42  */
43 static atomic_t	nlm_cookie = ATOMIC_INIT(0x1234);
44 
45 void nlmclnt_next_cookie(struct nlm_cookie *c)
46 {
47 	u32	cookie = atomic_inc_return(&nlm_cookie);
48 
49 	memcpy(c->data, &cookie, 4);
50 	c->len=4;
51 }
52 
53 static struct nlm_lockowner *
54 nlmclnt_get_lockowner(struct nlm_lockowner *lockowner)
55 {
56 	refcount_inc(&lockowner->count);
57 	return lockowner;
58 }
59 
60 static void nlmclnt_put_lockowner(struct nlm_lockowner *lockowner)
61 {
62 	if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
63 		return;
64 	list_del(&lockowner->list);
65 	spin_unlock(&lockowner->host->h_lock);
66 	nlmclnt_release_host(lockowner->host);
67 	kfree(lockowner);
68 }
69 
70 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
71 {
72 	struct nlm_lockowner *lockowner;
73 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
74 		if (lockowner->pid == pid)
75 			return -EBUSY;
76 	}
77 	return 0;
78 }
79 
80 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
81 {
82 	uint32_t res;
83 	do {
84 		res = host->h_pidcount++;
85 	} while (nlm_pidbusy(host, res) < 0);
86 	return res;
87 }
88 
89 static struct nlm_lockowner *__nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
90 {
91 	struct nlm_lockowner *lockowner;
92 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
93 		if (lockowner->owner != owner)
94 			continue;
95 		return nlmclnt_get_lockowner(lockowner);
96 	}
97 	return NULL;
98 }
99 
100 static struct nlm_lockowner *nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
101 {
102 	struct nlm_lockowner *res, *new = NULL;
103 
104 	spin_lock(&host->h_lock);
105 	res = __nlmclnt_find_lockowner(host, owner);
106 	if (res == NULL) {
107 		spin_unlock(&host->h_lock);
108 		new = kmalloc(sizeof(*new), GFP_KERNEL);
109 		spin_lock(&host->h_lock);
110 		res = __nlmclnt_find_lockowner(host, owner);
111 		if (res == NULL && new != NULL) {
112 			res = new;
113 			refcount_set(&new->count, 1);
114 			new->owner = owner;
115 			new->pid = __nlm_alloc_pid(host);
116 			new->host = nlm_get_host(host);
117 			list_add(&new->list, &host->h_lockowners);
118 			new = NULL;
119 		}
120 	}
121 	spin_unlock(&host->h_lock);
122 	kfree(new);
123 	return res;
124 }
125 
126 /*
127  * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
128  */
129 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
130 {
131 	struct nlm_args	*argp = &req->a_args;
132 	struct nlm_lock	*lock = &argp->lock;
133 	char *nodename = req->a_host->h_rpcclnt->cl_nodename;
134 
135 	nlmclnt_next_cookie(&argp->cookie);
136 	memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
137 	lock->caller  = nodename;
138 	lock->oh.data = req->a_owner;
139 	lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
140 				(unsigned int)fl->fl_u.nfs_fl.owner->pid,
141 				nodename);
142 	lock->svid = fl->fl_u.nfs_fl.owner->pid;
143 	lock->fl.fl_start = fl->fl_start;
144 	lock->fl.fl_end = fl->fl_end;
145 	lock->fl.fl_type = fl->fl_type;
146 }
147 
148 static void nlmclnt_release_lockargs(struct nlm_rqst *req)
149 {
150 	WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL);
151 }
152 
153 /**
154  * nlmclnt_proc - Perform a single client-side lock request
155  * @host: address of a valid nlm_host context representing the NLM server
156  * @cmd: fcntl-style file lock operation to perform
157  * @fl: address of arguments for the lock operation
158  * @data: address of data to be sent to callback operations
159  *
160  */
161 int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data)
162 {
163 	struct nlm_rqst		*call;
164 	int			status;
165 	const struct nlmclnt_operations *nlmclnt_ops = host->h_nlmclnt_ops;
166 
167 	call = nlm_alloc_call(host);
168 	if (call == NULL)
169 		return -ENOMEM;
170 
171 	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_alloc_call)
172 		nlmclnt_ops->nlmclnt_alloc_call(data);
173 
174 	nlmclnt_locks_init_private(fl, host);
175 	if (!fl->fl_u.nfs_fl.owner) {
176 		/* lockowner allocation has failed */
177 		nlmclnt_release_call(call);
178 		return -ENOMEM;
179 	}
180 	/* Set up the argument struct */
181 	nlmclnt_setlockargs(call, fl);
182 	call->a_callback_data = data;
183 
184 	if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
185 		if (fl->fl_type != F_UNLCK) {
186 			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
187 			status = nlmclnt_lock(call, fl);
188 		} else
189 			status = nlmclnt_unlock(call, fl);
190 	} else if (IS_GETLK(cmd))
191 		status = nlmclnt_test(call, fl);
192 	else
193 		status = -EINVAL;
194 	fl->fl_ops->fl_release_private(fl);
195 	fl->fl_ops = NULL;
196 
197 	dprintk("lockd: clnt proc returns %d\n", status);
198 	return status;
199 }
200 EXPORT_SYMBOL_GPL(nlmclnt_proc);
201 
202 /*
203  * Allocate an NLM RPC call struct
204  */
205 struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
206 {
207 	struct nlm_rqst	*call;
208 
209 	for(;;) {
210 		call = kzalloc(sizeof(*call), GFP_KERNEL);
211 		if (call != NULL) {
212 			refcount_set(&call->a_count, 1);
213 			locks_init_lock(&call->a_args.lock.fl);
214 			locks_init_lock(&call->a_res.lock.fl);
215 			call->a_host = nlm_get_host(host);
216 			return call;
217 		}
218 		if (signalled())
219 			break;
220 		printk("nlm_alloc_call: failed, waiting for memory\n");
221 		schedule_timeout_interruptible(5*HZ);
222 	}
223 	return NULL;
224 }
225 
226 void nlmclnt_release_call(struct nlm_rqst *call)
227 {
228 	const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops;
229 
230 	if (!refcount_dec_and_test(&call->a_count))
231 		return;
232 	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call)
233 		nlmclnt_ops->nlmclnt_release_call(call->a_callback_data);
234 	nlmclnt_release_host(call->a_host);
235 	nlmclnt_release_lockargs(call);
236 	kfree(call);
237 }
238 
239 static void nlmclnt_rpc_release(void *data)
240 {
241 	nlmclnt_release_call(data);
242 }
243 
244 static int nlm_wait_on_grace(wait_queue_head_t *queue)
245 {
246 	DEFINE_WAIT(wait);
247 	int status = -EINTR;
248 
249 	prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
250 	if (!signalled ()) {
251 		schedule_timeout(NLMCLNT_GRACE_WAIT);
252 		try_to_freeze();
253 		if (!signalled ())
254 			status = 0;
255 	}
256 	finish_wait(queue, &wait);
257 	return status;
258 }
259 
260 /*
261  * Generic NLM call
262  */
263 static int
264 nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc)
265 {
266 	struct nlm_host	*host = req->a_host;
267 	struct rpc_clnt	*clnt;
268 	struct nlm_args	*argp = &req->a_args;
269 	struct nlm_res	*resp = &req->a_res;
270 	struct rpc_message msg = {
271 		.rpc_argp	= argp,
272 		.rpc_resp	= resp,
273 		.rpc_cred	= cred,
274 	};
275 	int		status;
276 
277 	dprintk("lockd: call procedure %d on %s\n",
278 			(int)proc, host->h_name);
279 
280 	do {
281 		if (host->h_reclaiming && !argp->reclaim)
282 			goto in_grace_period;
283 
284 		/* If we have no RPC client yet, create one. */
285 		if ((clnt = nlm_bind_host(host)) == NULL)
286 			return -ENOLCK;
287 		msg.rpc_proc = &clnt->cl_procinfo[proc];
288 
289 		/* Perform the RPC call. If an error occurs, try again */
290 		if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
291 			dprintk("lockd: rpc_call returned error %d\n", -status);
292 			switch (status) {
293 			case -EPROTONOSUPPORT:
294 				status = -EINVAL;
295 				break;
296 			case -ECONNREFUSED:
297 			case -ETIMEDOUT:
298 			case -ENOTCONN:
299 				nlm_rebind_host(host);
300 				status = -EAGAIN;
301 				break;
302 			case -ERESTARTSYS:
303 				return signalled () ? -EINTR : status;
304 			default:
305 				break;
306 			}
307 			break;
308 		} else
309 		if (resp->status == nlm_lck_denied_grace_period) {
310 			dprintk("lockd: server in grace period\n");
311 			if (argp->reclaim) {
312 				printk(KERN_WARNING
313 				     "lockd: spurious grace period reject?!\n");
314 				return -ENOLCK;
315 			}
316 		} else {
317 			if (!argp->reclaim) {
318 				/* We appear to be out of the grace period */
319 				wake_up_all(&host->h_gracewait);
320 			}
321 			dprintk("lockd: server returns status %d\n",
322 				ntohl(resp->status));
323 			return 0;	/* Okay, call complete */
324 		}
325 
326 in_grace_period:
327 		/*
328 		 * The server has rebooted and appears to be in the grace
329 		 * period during which locks are only allowed to be
330 		 * reclaimed.
331 		 * We can only back off and try again later.
332 		 */
333 		status = nlm_wait_on_grace(&host->h_gracewait);
334 	} while (status == 0);
335 
336 	return status;
337 }
338 
339 /*
340  * Generic NLM call, async version.
341  */
342 static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
343 {
344 	struct nlm_host	*host = req->a_host;
345 	struct rpc_clnt	*clnt;
346 	struct rpc_task_setup task_setup_data = {
347 		.rpc_message = msg,
348 		.callback_ops = tk_ops,
349 		.callback_data = req,
350 		.flags = RPC_TASK_ASYNC,
351 	};
352 
353 	dprintk("lockd: call procedure %d on %s (async)\n",
354 			(int)proc, host->h_name);
355 
356 	/* If we have no RPC client yet, create one. */
357 	clnt = nlm_bind_host(host);
358 	if (clnt == NULL)
359 		goto out_err;
360 	msg->rpc_proc = &clnt->cl_procinfo[proc];
361 	task_setup_data.rpc_client = clnt;
362 
363         /* bootstrap and kick off the async RPC call */
364 	return rpc_run_task(&task_setup_data);
365 out_err:
366 	tk_ops->rpc_release(req);
367 	return ERR_PTR(-ENOLCK);
368 }
369 
370 static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
371 {
372 	struct rpc_task *task;
373 
374 	task = __nlm_async_call(req, proc, msg, tk_ops);
375 	if (IS_ERR(task))
376 		return PTR_ERR(task);
377 	rpc_put_task(task);
378 	return 0;
379 }
380 
381 /*
382  * NLM asynchronous call.
383  */
384 int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
385 {
386 	struct rpc_message msg = {
387 		.rpc_argp	= &req->a_args,
388 		.rpc_resp	= &req->a_res,
389 	};
390 	return nlm_do_async_call(req, proc, &msg, tk_ops);
391 }
392 
393 int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
394 {
395 	struct rpc_message msg = {
396 		.rpc_argp	= &req->a_res,
397 	};
398 	return nlm_do_async_call(req, proc, &msg, tk_ops);
399 }
400 
401 /*
402  * NLM client asynchronous call.
403  *
404  * Note that although the calls are asynchronous, and are therefore
405  *      guaranteed to complete, we still always attempt to wait for
406  *      completion in order to be able to correctly track the lock
407  *      state.
408  */
409 static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
410 {
411 	struct rpc_message msg = {
412 		.rpc_argp	= &req->a_args,
413 		.rpc_resp	= &req->a_res,
414 		.rpc_cred	= cred,
415 	};
416 	struct rpc_task *task;
417 	int err;
418 
419 	task = __nlm_async_call(req, proc, &msg, tk_ops);
420 	if (IS_ERR(task))
421 		return PTR_ERR(task);
422 	err = rpc_wait_for_completion_task(task);
423 	rpc_put_task(task);
424 	return err;
425 }
426 
427 /*
428  * TEST for the presence of a conflicting lock
429  */
430 static int
431 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
432 {
433 	int	status;
434 
435 	status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
436 	if (status < 0)
437 		goto out;
438 
439 	switch (req->a_res.status) {
440 		case nlm_granted:
441 			fl->fl_type = F_UNLCK;
442 			break;
443 		case nlm_lck_denied:
444 			/*
445 			 * Report the conflicting lock back to the application.
446 			 */
447 			fl->fl_start = req->a_res.lock.fl.fl_start;
448 			fl->fl_end = req->a_res.lock.fl.fl_end;
449 			fl->fl_type = req->a_res.lock.fl.fl_type;
450 			fl->fl_pid = -req->a_res.lock.fl.fl_pid;
451 			break;
452 		default:
453 			status = nlm_stat_to_errno(req->a_res.status);
454 	}
455 out:
456 	trace_nlmclnt_test(&req->a_args.lock,
457 			   (const struct sockaddr *)&req->a_host->h_addr,
458 			   req->a_host->h_addrlen, req->a_res.status);
459 	nlmclnt_release_call(req);
460 	return status;
461 }
462 
463 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
464 {
465 	spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
466 	new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
467 	new->fl_u.nfs_fl.owner = nlmclnt_get_lockowner(fl->fl_u.nfs_fl.owner);
468 	list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
469 	spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
470 }
471 
472 static void nlmclnt_locks_release_private(struct file_lock *fl)
473 {
474 	spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
475 	list_del(&fl->fl_u.nfs_fl.list);
476 	spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
477 	nlmclnt_put_lockowner(fl->fl_u.nfs_fl.owner);
478 }
479 
480 static const struct file_lock_operations nlmclnt_lock_ops = {
481 	.fl_copy_lock = nlmclnt_locks_copy_lock,
482 	.fl_release_private = nlmclnt_locks_release_private,
483 };
484 
485 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
486 {
487 	fl->fl_u.nfs_fl.state = 0;
488 	fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host, fl->fl_owner);
489 	INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
490 	fl->fl_ops = &nlmclnt_lock_ops;
491 }
492 
493 static int do_vfs_lock(struct file_lock *fl)
494 {
495 	return locks_lock_file_wait(fl->fl_file, fl);
496 }
497 
498 /*
499  * LOCK: Try to create a lock
500  *
501  *			Programmer Harassment Alert
502  *
503  * When given a blocking lock request in a sync RPC call, the HPUX lockd
504  * will faithfully return LCK_BLOCKED but never cares to notify us when
505  * the lock could be granted. This way, our local process could hang
506  * around forever waiting for the callback.
507  *
508  *  Solution A:	Implement busy-waiting
509  *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
510  *
511  * For now I am implementing solution A, because I hate the idea of
512  * re-implementing lockd for a third time in two months. The async
513  * calls shouldn't be too hard to do, however.
514  *
515  * This is one of the lovely things about standards in the NFS area:
516  * they're so soft and squishy you can't really blame HP for doing this.
517  */
518 static int
519 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
520 {
521 	const struct cred *cred = nfs_file_cred(fl->fl_file);
522 	struct nlm_host	*host = req->a_host;
523 	struct nlm_res	*resp = &req->a_res;
524 	struct nlm_wait block;
525 	unsigned char fl_flags = fl->fl_flags;
526 	unsigned char fl_type;
527 	__be32 b_status;
528 	int status = -ENOLCK;
529 
530 	if (nsm_monitor(host) < 0)
531 		goto out;
532 	req->a_args.state = nsm_local_state;
533 
534 	fl->fl_flags |= FL_ACCESS;
535 	status = do_vfs_lock(fl);
536 	fl->fl_flags = fl_flags;
537 	if (status < 0)
538 		goto out;
539 
540 	nlmclnt_prepare_block(&block, host, fl);
541 again:
542 	/*
543 	 * Initialise resp->status to a valid non-zero value,
544 	 * since 0 == nlm_lck_granted
545 	 */
546 	resp->status = nlm_lck_blocked;
547 
548 	/*
549 	 * A GRANTED callback can come at any time -- even before the reply
550 	 * to the LOCK request arrives, so we queue the wait before
551 	 * requesting the lock.
552 	 */
553 	nlmclnt_queue_block(&block);
554 	for (;;) {
555 		/* Reboot protection */
556 		fl->fl_u.nfs_fl.state = host->h_state;
557 		status = nlmclnt_call(cred, req, NLMPROC_LOCK);
558 		if (status < 0)
559 			break;
560 		/* Did a reclaimer thread notify us of a server reboot? */
561 		if (resp->status == nlm_lck_denied_grace_period)
562 			continue;
563 		if (resp->status != nlm_lck_blocked)
564 			break;
565 		/* Wait on an NLM blocking lock */
566 		status = nlmclnt_wait(&block, req, NLMCLNT_POLL_TIMEOUT);
567 		if (status < 0)
568 			break;
569 		if (block.b_status != nlm_lck_blocked)
570 			break;
571 	}
572 	b_status = nlmclnt_dequeue_block(&block);
573 	if (resp->status == nlm_lck_blocked)
574 		resp->status = b_status;
575 
576 	/* if we were interrupted while blocking, then cancel the lock request
577 	 * and exit
578 	 */
579 	if (resp->status == nlm_lck_blocked) {
580 		if (!req->a_args.block)
581 			goto out_unlock;
582 		if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
583 			goto out;
584 	}
585 
586 	if (resp->status == nlm_granted) {
587 		down_read(&host->h_rwsem);
588 		/* Check whether or not the server has rebooted */
589 		if (fl->fl_u.nfs_fl.state != host->h_state) {
590 			up_read(&host->h_rwsem);
591 			goto again;
592 		}
593 		/* Ensure the resulting lock will get added to granted list */
594 		fl->fl_flags |= FL_SLEEP;
595 		if (do_vfs_lock(fl) < 0)
596 			printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
597 		up_read(&host->h_rwsem);
598 		fl->fl_flags = fl_flags;
599 		status = 0;
600 	}
601 	if (status < 0)
602 		goto out_unlock;
603 	/*
604 	 * EAGAIN doesn't make sense for sleeping locks, and in some
605 	 * cases NLM_LCK_DENIED is returned for a permanent error.  So
606 	 * turn it into an ENOLCK.
607 	 */
608 	if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
609 		status = -ENOLCK;
610 	else
611 		status = nlm_stat_to_errno(resp->status);
612 out:
613 	trace_nlmclnt_lock(&req->a_args.lock,
614 			   (const struct sockaddr *)&req->a_host->h_addr,
615 			   req->a_host->h_addrlen, req->a_res.status);
616 	nlmclnt_release_call(req);
617 	return status;
618 out_unlock:
619 	/* Fatal error: ensure that we remove the lock altogether */
620 	trace_nlmclnt_lock(&req->a_args.lock,
621 			   (const struct sockaddr *)&req->a_host->h_addr,
622 			   req->a_host->h_addrlen, req->a_res.status);
623 	dprintk("lockd: lock attempt ended in fatal error.\n"
624 		"       Attempting to unlock.\n");
625 	fl_type = fl->fl_type;
626 	fl->fl_type = F_UNLCK;
627 	down_read(&host->h_rwsem);
628 	do_vfs_lock(fl);
629 	up_read(&host->h_rwsem);
630 	fl->fl_type = fl_type;
631 	fl->fl_flags = fl_flags;
632 	nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
633 	return status;
634 }
635 
636 /*
637  * RECLAIM: Try to reclaim a lock
638  */
639 int
640 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
641 		struct nlm_rqst *req)
642 {
643 	int		status;
644 
645 	memset(req, 0, sizeof(*req));
646 	locks_init_lock(&req->a_args.lock.fl);
647 	locks_init_lock(&req->a_res.lock.fl);
648 	req->a_host  = host;
649 
650 	/* Set up the argument struct */
651 	nlmclnt_setlockargs(req, fl);
652 	req->a_args.reclaim = 1;
653 
654 	status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
655 	if (status >= 0 && req->a_res.status == nlm_granted)
656 		return 0;
657 
658 	printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
659 				"(errno %d, status %d)\n", fl->fl_pid,
660 				status, ntohl(req->a_res.status));
661 
662 	/*
663 	 * FIXME: This is a serious failure. We can
664 	 *
665 	 *  a.	Ignore the problem
666 	 *  b.	Send the owning process some signal (Linux doesn't have
667 	 *	SIGLOST, though...)
668 	 *  c.	Retry the operation
669 	 *
670 	 * Until someone comes up with a simple implementation
671 	 * for b or c, I'll choose option a.
672 	 */
673 
674 	return -ENOLCK;
675 }
676 
677 /*
678  * UNLOCK: remove an existing lock
679  */
680 static int
681 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
682 {
683 	struct nlm_host	*host = req->a_host;
684 	struct nlm_res	*resp = &req->a_res;
685 	int status;
686 	unsigned char fl_flags = fl->fl_flags;
687 
688 	/*
689 	 * Note: the server is supposed to either grant us the unlock
690 	 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
691 	 * case, we want to unlock.
692 	 */
693 	fl->fl_flags |= FL_EXISTS;
694 	down_read(&host->h_rwsem);
695 	status = do_vfs_lock(fl);
696 	up_read(&host->h_rwsem);
697 	fl->fl_flags = fl_flags;
698 	if (status == -ENOENT) {
699 		status = 0;
700 		goto out;
701 	}
702 
703 	refcount_inc(&req->a_count);
704 	status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
705 			NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
706 	if (status < 0)
707 		goto out;
708 
709 	if (resp->status == nlm_granted)
710 		goto out;
711 
712 	if (resp->status != nlm_lck_denied_nolocks)
713 		printk("lockd: unexpected unlock status: %d\n",
714 			ntohl(resp->status));
715 	/* What to do now? I'm out of my depth... */
716 	status = -ENOLCK;
717 out:
718 	trace_nlmclnt_unlock(&req->a_args.lock,
719 			     (const struct sockaddr *)&req->a_host->h_addr,
720 			     req->a_host->h_addrlen, req->a_res.status);
721 	nlmclnt_release_call(req);
722 	return status;
723 }
724 
725 static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data)
726 {
727 	struct nlm_rqst	*req = data;
728 	const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops;
729 	bool defer_call = false;
730 
731 	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_unlock_prepare)
732 		defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data);
733 
734 	if (!defer_call)
735 		rpc_call_start(task);
736 }
737 
738 static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
739 {
740 	struct nlm_rqst	*req = data;
741 	u32 status = ntohl(req->a_res.status);
742 
743 	if (RPC_SIGNALLED(task))
744 		goto die;
745 
746 	if (task->tk_status < 0) {
747 		dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
748 		switch (task->tk_status) {
749 		case -EACCES:
750 		case -EIO:
751 			goto die;
752 		default:
753 			goto retry_rebind;
754 		}
755 	}
756 	if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
757 		rpc_delay(task, NLMCLNT_GRACE_WAIT);
758 		goto retry_unlock;
759 	}
760 	if (status != NLM_LCK_GRANTED)
761 		printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
762 die:
763 	return;
764  retry_rebind:
765 	nlm_rebind_host(req->a_host);
766  retry_unlock:
767 	rpc_restart_call(task);
768 }
769 
770 static const struct rpc_call_ops nlmclnt_unlock_ops = {
771 	.rpc_call_prepare = nlmclnt_unlock_prepare,
772 	.rpc_call_done = nlmclnt_unlock_callback,
773 	.rpc_release = nlmclnt_rpc_release,
774 };
775 
776 /*
777  * Cancel a blocked lock request.
778  * We always use an async RPC call for this in order not to hang a
779  * process that has been Ctrl-C'ed.
780  */
781 static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
782 {
783 	struct nlm_rqst	*req;
784 	int status;
785 
786 	dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
787 		"       Attempting to cancel lock.\n");
788 
789 	req = nlm_alloc_call(host);
790 	if (!req)
791 		return -ENOMEM;
792 	req->a_flags = RPC_TASK_ASYNC;
793 
794 	nlmclnt_setlockargs(req, fl);
795 	req->a_args.block = block;
796 
797 	refcount_inc(&req->a_count);
798 	status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
799 			NLMPROC_CANCEL, &nlmclnt_cancel_ops);
800 	if (status == 0 && req->a_res.status == nlm_lck_denied)
801 		status = -ENOLCK;
802 	nlmclnt_release_call(req);
803 	return status;
804 }
805 
806 static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
807 {
808 	struct nlm_rqst	*req = data;
809 	u32 status = ntohl(req->a_res.status);
810 
811 	if (RPC_SIGNALLED(task))
812 		goto die;
813 
814 	if (task->tk_status < 0) {
815 		dprintk("lockd: CANCEL call error %d, retrying.\n",
816 					task->tk_status);
817 		goto retry_cancel;
818 	}
819 
820 	switch (status) {
821 	case NLM_LCK_GRANTED:
822 	case NLM_LCK_DENIED_GRACE_PERIOD:
823 	case NLM_LCK_DENIED:
824 		/* Everything's good */
825 		break;
826 	case NLM_LCK_DENIED_NOLOCKS:
827 		dprintk("lockd: CANCEL failed (server has no locks)\n");
828 		goto retry_cancel;
829 	default:
830 		printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
831 			status);
832 	}
833 
834 die:
835 	return;
836 
837 retry_cancel:
838 	/* Don't ever retry more than 3 times */
839 	if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
840 		goto die;
841 	nlm_rebind_host(req->a_host);
842 	rpc_restart_call(task);
843 	rpc_delay(task, 30 * HZ);
844 }
845 
846 static const struct rpc_call_ops nlmclnt_cancel_ops = {
847 	.rpc_call_done = nlmclnt_cancel_callback,
848 	.rpc_release = nlmclnt_rpc_release,
849 };
850 
851 /*
852  * Convert an NLM status code to a generic kernel errno
853  */
854 static int
855 nlm_stat_to_errno(__be32 status)
856 {
857 	switch(ntohl(status)) {
858 	case NLM_LCK_GRANTED:
859 		return 0;
860 	case NLM_LCK_DENIED:
861 		return -EAGAIN;
862 	case NLM_LCK_DENIED_NOLOCKS:
863 	case NLM_LCK_DENIED_GRACE_PERIOD:
864 		return -ENOLCK;
865 	case NLM_LCK_BLOCKED:
866 		printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
867 		return -ENOLCK;
868 #ifdef CONFIG_LOCKD_V4
869 	case NLM_DEADLCK:
870 		return -EDEADLK;
871 	case NLM_ROFS:
872 		return -EROFS;
873 	case NLM_STALE_FH:
874 		return -ESTALE;
875 	case NLM_FBIG:
876 		return -EOVERFLOW;
877 	case NLM_FAILED:
878 		return -ENOLCK;
879 #endif
880 	}
881 	printk(KERN_NOTICE "lockd: unexpected server status %d\n",
882 		 ntohl(status));
883 	return -ENOLCK;
884 }
885