xref: /openbmc/linux/net/sunrpc/clnt.c (revision 600a711c)
1 /*
2  *  linux/net/sunrpc/clnt.c
3  *
4  *  This file contains the high-level RPC interface.
5  *  It is modeled as a finite state machine to support both synchronous
6  *  and asynchronous requests.
7  *
8  *  -	RPC header generation and argument serialization.
9  *  -	Credential refresh.
10  *  -	TCP connect handling.
11  *  -	Retry of operation when it is suspected the operation failed because
12  *	of uid squashing on the server, or when the credentials were stale
13  *	and need to be refreshed, or when a packet was damaged in transit.
14  *	This may be have to be moved to the VFS layer.
15  *
16  *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
17  *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
18  */
19 
20 
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kallsyms.h>
24 #include <linux/mm.h>
25 #include <linux/namei.h>
26 #include <linux/mount.h>
27 #include <linux/slab.h>
28 #include <linux/utsname.h>
29 #include <linux/workqueue.h>
30 #include <linux/in.h>
31 #include <linux/in6.h>
32 #include <linux/un.h>
33 #include <linux/rcupdate.h>
34 
35 #include <linux/sunrpc/clnt.h>
36 #include <linux/sunrpc/rpc_pipe_fs.h>
37 #include <linux/sunrpc/metrics.h>
38 #include <linux/sunrpc/bc_xprt.h>
39 #include <trace/events/sunrpc.h>
40 
41 #include "sunrpc.h"
42 #include "netns.h"
43 
44 #ifdef RPC_DEBUG
45 # define RPCDBG_FACILITY	RPCDBG_CALL
46 #endif
47 
48 #define dprint_status(t)					\
49 	dprintk("RPC: %5u %s (status %d)\n", t->tk_pid,		\
50 			__func__, t->tk_status)
51 
52 /*
53  * All RPC clients are linked into this list
54  */
55 
56 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
57 
58 
59 static void	call_start(struct rpc_task *task);
60 static void	call_reserve(struct rpc_task *task);
61 static void	call_reserveresult(struct rpc_task *task);
62 static void	call_allocate(struct rpc_task *task);
63 static void	call_decode(struct rpc_task *task);
64 static void	call_bind(struct rpc_task *task);
65 static void	call_bind_status(struct rpc_task *task);
66 static void	call_transmit(struct rpc_task *task);
67 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
68 static void	call_bc_transmit(struct rpc_task *task);
69 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
70 static void	call_status(struct rpc_task *task);
71 static void	call_transmit_status(struct rpc_task *task);
72 static void	call_refresh(struct rpc_task *task);
73 static void	call_refreshresult(struct rpc_task *task);
74 static void	call_timeout(struct rpc_task *task);
75 static void	call_connect(struct rpc_task *task);
76 static void	call_connect_status(struct rpc_task *task);
77 
78 static __be32	*rpc_encode_header(struct rpc_task *task);
79 static __be32	*rpc_verify_header(struct rpc_task *task);
80 static int	rpc_ping(struct rpc_clnt *clnt);
81 
82 static void rpc_register_client(struct rpc_clnt *clnt)
83 {
84 	struct net *net = rpc_net_ns(clnt);
85 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
86 
87 	spin_lock(&sn->rpc_client_lock);
88 	list_add(&clnt->cl_clients, &sn->all_clients);
89 	spin_unlock(&sn->rpc_client_lock);
90 }
91 
92 static void rpc_unregister_client(struct rpc_clnt *clnt)
93 {
94 	struct net *net = rpc_net_ns(clnt);
95 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
96 
97 	spin_lock(&sn->rpc_client_lock);
98 	list_del(&clnt->cl_clients);
99 	spin_unlock(&sn->rpc_client_lock);
100 }
101 
102 static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
103 {
104 	if (clnt->cl_dentry) {
105 		if (clnt->cl_auth && clnt->cl_auth->au_ops->pipes_destroy)
106 			clnt->cl_auth->au_ops->pipes_destroy(clnt->cl_auth);
107 		rpc_remove_client_dir(clnt->cl_dentry);
108 	}
109 	clnt->cl_dentry = NULL;
110 }
111 
112 static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
113 {
114 	struct net *net = rpc_net_ns(clnt);
115 	struct super_block *pipefs_sb;
116 
117 	pipefs_sb = rpc_get_sb_net(net);
118 	if (pipefs_sb) {
119 		__rpc_clnt_remove_pipedir(clnt);
120 		rpc_put_sb_net(net);
121 	}
122 }
123 
124 static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
125 				    struct rpc_clnt *clnt,
126 				    const char *dir_name)
127 {
128 	static uint32_t clntid;
129 	char name[15];
130 	struct qstr q = { .name = name };
131 	struct dentry *dir, *dentry;
132 	int error;
133 
134 	dir = rpc_d_lookup_sb(sb, dir_name);
135 	if (dir == NULL)
136 		return dir;
137 	for (;;) {
138 		q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
139 		name[sizeof(name) - 1] = '\0';
140 		q.hash = full_name_hash(q.name, q.len);
141 		dentry = rpc_create_client_dir(dir, &q, clnt);
142 		if (!IS_ERR(dentry))
143 			break;
144 		error = PTR_ERR(dentry);
145 		if (error != -EEXIST) {
146 			printk(KERN_INFO "RPC: Couldn't create pipefs entry"
147 					" %s/%s, error %d\n",
148 					dir_name, name, error);
149 			break;
150 		}
151 	}
152 	dput(dir);
153 	return dentry;
154 }
155 
156 static int
157 rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name)
158 {
159 	struct net *net = rpc_net_ns(clnt);
160 	struct super_block *pipefs_sb;
161 	struct dentry *dentry;
162 
163 	clnt->cl_dentry = NULL;
164 	if (dir_name == NULL)
165 		return 0;
166 	pipefs_sb = rpc_get_sb_net(net);
167 	if (!pipefs_sb)
168 		return 0;
169 	dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt, dir_name);
170 	rpc_put_sb_net(net);
171 	if (IS_ERR(dentry))
172 		return PTR_ERR(dentry);
173 	clnt->cl_dentry = dentry;
174 	return 0;
175 }
176 
177 static inline int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
178 {
179 	if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) ||
180 	    ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry))
181 		return 1;
182 	return 0;
183 }
184 
185 static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
186 				   struct super_block *sb)
187 {
188 	struct dentry *dentry;
189 	int err = 0;
190 
191 	switch (event) {
192 	case RPC_PIPEFS_MOUNT:
193 		dentry = rpc_setup_pipedir_sb(sb, clnt,
194 					      clnt->cl_program->pipe_dir_name);
195 		BUG_ON(dentry == NULL);
196 		if (IS_ERR(dentry))
197 			return PTR_ERR(dentry);
198 		clnt->cl_dentry = dentry;
199 		if (clnt->cl_auth->au_ops->pipes_create) {
200 			err = clnt->cl_auth->au_ops->pipes_create(clnt->cl_auth);
201 			if (err)
202 				__rpc_clnt_remove_pipedir(clnt);
203 		}
204 		break;
205 	case RPC_PIPEFS_UMOUNT:
206 		__rpc_clnt_remove_pipedir(clnt);
207 		break;
208 	default:
209 		printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
210 		return -ENOTSUPP;
211 	}
212 	return err;
213 }
214 
215 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
216 				struct super_block *sb)
217 {
218 	int error = 0;
219 
220 	for (;; clnt = clnt->cl_parent) {
221 		if (!rpc_clnt_skip_event(clnt, event))
222 			error = __rpc_clnt_handle_event(clnt, event, sb);
223 		if (error || clnt == clnt->cl_parent)
224 			break;
225 	}
226 	return error;
227 }
228 
229 static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
230 {
231 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
232 	struct rpc_clnt *clnt;
233 
234 	spin_lock(&sn->rpc_client_lock);
235 	list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
236 		if (clnt->cl_program->pipe_dir_name == NULL)
237 			break;
238 		if (rpc_clnt_skip_event(clnt, event))
239 			continue;
240 		if (atomic_inc_not_zero(&clnt->cl_count) == 0)
241 			continue;
242 		spin_unlock(&sn->rpc_client_lock);
243 		return clnt;
244 	}
245 	spin_unlock(&sn->rpc_client_lock);
246 	return NULL;
247 }
248 
249 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
250 			    void *ptr)
251 {
252 	struct super_block *sb = ptr;
253 	struct rpc_clnt *clnt;
254 	int error = 0;
255 
256 	while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) {
257 		error = __rpc_pipefs_event(clnt, event, sb);
258 		rpc_release_client(clnt);
259 		if (error)
260 			break;
261 	}
262 	return error;
263 }
264 
265 static struct notifier_block rpc_clients_block = {
266 	.notifier_call	= rpc_pipefs_event,
267 	.priority	= SUNRPC_PIPEFS_RPC_PRIO,
268 };
269 
270 int rpc_clients_notifier_register(void)
271 {
272 	return rpc_pipefs_notifier_register(&rpc_clients_block);
273 }
274 
275 void rpc_clients_notifier_unregister(void)
276 {
277 	return rpc_pipefs_notifier_unregister(&rpc_clients_block);
278 }
279 
280 static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
281 {
282 	clnt->cl_nodelen = strlen(nodename);
283 	if (clnt->cl_nodelen > UNX_MAXNODENAME)
284 		clnt->cl_nodelen = UNX_MAXNODENAME;
285 	memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
286 }
287 
288 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
289 {
290 	const struct rpc_program *program = args->program;
291 	const struct rpc_version *version;
292 	struct rpc_clnt		*clnt = NULL;
293 	struct rpc_auth		*auth;
294 	int err;
295 
296 	/* sanity check the name before trying to print it */
297 	dprintk("RPC:       creating %s client for %s (xprt %p)\n",
298 			program->name, args->servername, xprt);
299 
300 	err = rpciod_up();
301 	if (err)
302 		goto out_no_rpciod;
303 	err = -EINVAL;
304 	if (!xprt)
305 		goto out_no_xprt;
306 
307 	if (args->version >= program->nrvers)
308 		goto out_err;
309 	version = program->version[args->version];
310 	if (version == NULL)
311 		goto out_err;
312 
313 	err = -ENOMEM;
314 	clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
315 	if (!clnt)
316 		goto out_err;
317 	clnt->cl_parent = clnt;
318 
319 	rcu_assign_pointer(clnt->cl_xprt, xprt);
320 	clnt->cl_procinfo = version->procs;
321 	clnt->cl_maxproc  = version->nrprocs;
322 	clnt->cl_protname = program->name;
323 	clnt->cl_prog     = args->prognumber ? : program->number;
324 	clnt->cl_vers     = version->number;
325 	clnt->cl_stats    = program->stats;
326 	clnt->cl_metrics  = rpc_alloc_iostats(clnt);
327 	err = -ENOMEM;
328 	if (clnt->cl_metrics == NULL)
329 		goto out_no_stats;
330 	clnt->cl_program  = program;
331 	INIT_LIST_HEAD(&clnt->cl_tasks);
332 	spin_lock_init(&clnt->cl_lock);
333 
334 	if (!xprt_bound(xprt))
335 		clnt->cl_autobind = 1;
336 
337 	clnt->cl_timeout = xprt->timeout;
338 	if (args->timeout != NULL) {
339 		memcpy(&clnt->cl_timeout_default, args->timeout,
340 				sizeof(clnt->cl_timeout_default));
341 		clnt->cl_timeout = &clnt->cl_timeout_default;
342 	}
343 
344 	clnt->cl_rtt = &clnt->cl_rtt_default;
345 	rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
346 	clnt->cl_principal = NULL;
347 	if (args->client_name) {
348 		clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL);
349 		if (!clnt->cl_principal)
350 			goto out_no_principal;
351 	}
352 
353 	atomic_set(&clnt->cl_count, 1);
354 
355 	err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
356 	if (err < 0)
357 		goto out_no_path;
358 
359 	auth = rpcauth_create(args->authflavor, clnt);
360 	if (IS_ERR(auth)) {
361 		printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
362 				args->authflavor);
363 		err = PTR_ERR(auth);
364 		goto out_no_auth;
365 	}
366 
367 	/* save the nodename */
368 	rpc_clnt_set_nodename(clnt, utsname()->nodename);
369 	rpc_register_client(clnt);
370 	return clnt;
371 
372 out_no_auth:
373 	rpc_clnt_remove_pipedir(clnt);
374 out_no_path:
375 	kfree(clnt->cl_principal);
376 out_no_principal:
377 	rpc_free_iostats(clnt->cl_metrics);
378 out_no_stats:
379 	kfree(clnt);
380 out_err:
381 	xprt_put(xprt);
382 out_no_xprt:
383 	rpciod_down();
384 out_no_rpciod:
385 	return ERR_PTR(err);
386 }
387 
388 /**
389  * rpc_create - create an RPC client and transport with one call
390  * @args: rpc_clnt create argument structure
391  *
392  * Creates and initializes an RPC transport and an RPC client.
393  *
394  * It can ping the server in order to determine if it is up, and to see if
395  * it supports this program and version.  RPC_CLNT_CREATE_NOPING disables
396  * this behavior so asynchronous tasks can also use rpc_create.
397  */
398 struct rpc_clnt *rpc_create(struct rpc_create_args *args)
399 {
400 	struct rpc_xprt *xprt;
401 	struct rpc_clnt *clnt;
402 	struct xprt_create xprtargs = {
403 		.net = args->net,
404 		.ident = args->protocol,
405 		.srcaddr = args->saddress,
406 		.dstaddr = args->address,
407 		.addrlen = args->addrsize,
408 		.servername = args->servername,
409 		.bc_xprt = args->bc_xprt,
410 	};
411 	char servername[48];
412 
413 	/*
414 	 * If the caller chooses not to specify a hostname, whip
415 	 * up a string representation of the passed-in address.
416 	 */
417 	if (xprtargs.servername == NULL) {
418 		struct sockaddr_un *sun =
419 				(struct sockaddr_un *)args->address;
420 		struct sockaddr_in *sin =
421 				(struct sockaddr_in *)args->address;
422 		struct sockaddr_in6 *sin6 =
423 				(struct sockaddr_in6 *)args->address;
424 
425 		servername[0] = '\0';
426 		switch (args->address->sa_family) {
427 		case AF_LOCAL:
428 			snprintf(servername, sizeof(servername), "%s",
429 				 sun->sun_path);
430 			break;
431 		case AF_INET:
432 			snprintf(servername, sizeof(servername), "%pI4",
433 				 &sin->sin_addr.s_addr);
434 			break;
435 		case AF_INET6:
436 			snprintf(servername, sizeof(servername), "%pI6",
437 				 &sin6->sin6_addr);
438 			break;
439 		default:
440 			/* caller wants default server name, but
441 			 * address family isn't recognized. */
442 			return ERR_PTR(-EINVAL);
443 		}
444 		xprtargs.servername = servername;
445 	}
446 
447 	xprt = xprt_create_transport(&xprtargs);
448 	if (IS_ERR(xprt))
449 		return (struct rpc_clnt *)xprt;
450 
451 	/*
452 	 * By default, kernel RPC client connects from a reserved port.
453 	 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
454 	 * but it is always enabled for rpciod, which handles the connect
455 	 * operation.
456 	 */
457 	xprt->resvport = 1;
458 	if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
459 		xprt->resvport = 0;
460 
461 	clnt = rpc_new_client(args, xprt);
462 	if (IS_ERR(clnt))
463 		return clnt;
464 
465 	if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
466 		int err = rpc_ping(clnt);
467 		if (err != 0) {
468 			rpc_shutdown_client(clnt);
469 			return ERR_PTR(err);
470 		}
471 	}
472 
473 	clnt->cl_softrtry = 1;
474 	if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
475 		clnt->cl_softrtry = 0;
476 
477 	if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
478 		clnt->cl_autobind = 1;
479 	if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
480 		clnt->cl_discrtry = 1;
481 	if (!(args->flags & RPC_CLNT_CREATE_QUIET))
482 		clnt->cl_chatty = 1;
483 
484 	return clnt;
485 }
486 EXPORT_SYMBOL_GPL(rpc_create);
487 
488 /*
489  * This function clones the RPC client structure. It allows us to share the
490  * same transport while varying parameters such as the authentication
491  * flavour.
492  */
493 struct rpc_clnt *
494 rpc_clone_client(struct rpc_clnt *clnt)
495 {
496 	struct rpc_clnt *new;
497 	struct rpc_xprt *xprt;
498 	int err = -ENOMEM;
499 
500 	new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
501 	if (!new)
502 		goto out_no_clnt;
503 	new->cl_parent = clnt;
504 	/* Turn off autobind on clones */
505 	new->cl_autobind = 0;
506 	INIT_LIST_HEAD(&new->cl_tasks);
507 	spin_lock_init(&new->cl_lock);
508 	rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval);
509 	new->cl_metrics = rpc_alloc_iostats(clnt);
510 	if (new->cl_metrics == NULL)
511 		goto out_no_stats;
512 	if (clnt->cl_principal) {
513 		new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL);
514 		if (new->cl_principal == NULL)
515 			goto out_no_principal;
516 	}
517 	rcu_read_lock();
518 	xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
519 	rcu_read_unlock();
520 	if (xprt == NULL)
521 		goto out_no_transport;
522 	rcu_assign_pointer(new->cl_xprt, xprt);
523 	atomic_set(&new->cl_count, 1);
524 	err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
525 	if (err != 0)
526 		goto out_no_path;
527 	rpc_clnt_set_nodename(new, utsname()->nodename);
528 	if (new->cl_auth)
529 		atomic_inc(&new->cl_auth->au_count);
530 	atomic_inc(&clnt->cl_count);
531 	rpc_register_client(new);
532 	rpciod_up();
533 	return new;
534 out_no_path:
535 	xprt_put(xprt);
536 out_no_transport:
537 	kfree(new->cl_principal);
538 out_no_principal:
539 	rpc_free_iostats(new->cl_metrics);
540 out_no_stats:
541 	kfree(new);
542 out_no_clnt:
543 	dprintk("RPC:       %s: returned error %d\n", __func__, err);
544 	return ERR_PTR(err);
545 }
546 EXPORT_SYMBOL_GPL(rpc_clone_client);
547 
548 /*
549  * Kill all tasks for the given client.
550  * XXX: kill their descendants as well?
551  */
552 void rpc_killall_tasks(struct rpc_clnt *clnt)
553 {
554 	struct rpc_task	*rovr;
555 
556 
557 	if (list_empty(&clnt->cl_tasks))
558 		return;
559 	dprintk("RPC:       killing all tasks for client %p\n", clnt);
560 	/*
561 	 * Spin lock all_tasks to prevent changes...
562 	 */
563 	spin_lock(&clnt->cl_lock);
564 	list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
565 		if (!RPC_IS_ACTIVATED(rovr))
566 			continue;
567 		if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
568 			rovr->tk_flags |= RPC_TASK_KILLED;
569 			rpc_exit(rovr, -EIO);
570 			if (RPC_IS_QUEUED(rovr))
571 				rpc_wake_up_queued_task(rovr->tk_waitqueue,
572 							rovr);
573 		}
574 	}
575 	spin_unlock(&clnt->cl_lock);
576 }
577 EXPORT_SYMBOL_GPL(rpc_killall_tasks);
578 
579 /*
580  * Properly shut down an RPC client, terminating all outstanding
581  * requests.
582  */
583 void rpc_shutdown_client(struct rpc_clnt *clnt)
584 {
585 	dprintk_rcu("RPC:       shutting down %s client for %s\n",
586 			clnt->cl_protname,
587 			rcu_dereference(clnt->cl_xprt)->servername);
588 
589 	while (!list_empty(&clnt->cl_tasks)) {
590 		rpc_killall_tasks(clnt);
591 		wait_event_timeout(destroy_wait,
592 			list_empty(&clnt->cl_tasks), 1*HZ);
593 	}
594 
595 	rpc_release_client(clnt);
596 }
597 EXPORT_SYMBOL_GPL(rpc_shutdown_client);
598 
599 /*
600  * Free an RPC client
601  */
602 static void
603 rpc_free_client(struct rpc_clnt *clnt)
604 {
605 	dprintk_rcu("RPC:       destroying %s client for %s\n",
606 			clnt->cl_protname,
607 			rcu_dereference(clnt->cl_xprt)->servername);
608 	if (clnt->cl_parent != clnt)
609 		rpc_release_client(clnt->cl_parent);
610 	rpc_unregister_client(clnt);
611 	rpc_clnt_remove_pipedir(clnt);
612 	rpc_free_iostats(clnt->cl_metrics);
613 	kfree(clnt->cl_principal);
614 	clnt->cl_metrics = NULL;
615 	xprt_put(rcu_dereference_raw(clnt->cl_xprt));
616 	rpciod_down();
617 	kfree(clnt);
618 }
619 
620 /*
621  * Free an RPC client
622  */
623 static void
624 rpc_free_auth(struct rpc_clnt *clnt)
625 {
626 	if (clnt->cl_auth == NULL) {
627 		rpc_free_client(clnt);
628 		return;
629 	}
630 
631 	/*
632 	 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
633 	 *       release remaining GSS contexts. This mechanism ensures
634 	 *       that it can do so safely.
635 	 */
636 	atomic_inc(&clnt->cl_count);
637 	rpcauth_release(clnt->cl_auth);
638 	clnt->cl_auth = NULL;
639 	if (atomic_dec_and_test(&clnt->cl_count))
640 		rpc_free_client(clnt);
641 }
642 
643 /*
644  * Release reference to the RPC client
645  */
646 void
647 rpc_release_client(struct rpc_clnt *clnt)
648 {
649 	dprintk("RPC:       rpc_release_client(%p)\n", clnt);
650 
651 	if (list_empty(&clnt->cl_tasks))
652 		wake_up(&destroy_wait);
653 	if (atomic_dec_and_test(&clnt->cl_count))
654 		rpc_free_auth(clnt);
655 }
656 
657 /**
658  * rpc_bind_new_program - bind a new RPC program to an existing client
659  * @old: old rpc_client
660  * @program: rpc program to set
661  * @vers: rpc program version
662  *
663  * Clones the rpc client and sets up a new RPC program. This is mainly
664  * of use for enabling different RPC programs to share the same transport.
665  * The Sun NFSv2/v3 ACL protocol can do this.
666  */
667 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
668 				      const struct rpc_program *program,
669 				      u32 vers)
670 {
671 	struct rpc_clnt *clnt;
672 	const struct rpc_version *version;
673 	int err;
674 
675 	BUG_ON(vers >= program->nrvers || !program->version[vers]);
676 	version = program->version[vers];
677 	clnt = rpc_clone_client(old);
678 	if (IS_ERR(clnt))
679 		goto out;
680 	clnt->cl_procinfo = version->procs;
681 	clnt->cl_maxproc  = version->nrprocs;
682 	clnt->cl_protname = program->name;
683 	clnt->cl_prog     = program->number;
684 	clnt->cl_vers     = version->number;
685 	clnt->cl_stats    = program->stats;
686 	err = rpc_ping(clnt);
687 	if (err != 0) {
688 		rpc_shutdown_client(clnt);
689 		clnt = ERR_PTR(err);
690 	}
691 out:
692 	return clnt;
693 }
694 EXPORT_SYMBOL_GPL(rpc_bind_new_program);
695 
696 void rpc_task_release_client(struct rpc_task *task)
697 {
698 	struct rpc_clnt *clnt = task->tk_client;
699 
700 	if (clnt != NULL) {
701 		/* Remove from client task list */
702 		spin_lock(&clnt->cl_lock);
703 		list_del(&task->tk_task);
704 		spin_unlock(&clnt->cl_lock);
705 		task->tk_client = NULL;
706 
707 		rpc_release_client(clnt);
708 	}
709 }
710 
711 static
712 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
713 {
714 	if (clnt != NULL) {
715 		rpc_task_release_client(task);
716 		task->tk_client = clnt;
717 		atomic_inc(&clnt->cl_count);
718 		if (clnt->cl_softrtry)
719 			task->tk_flags |= RPC_TASK_SOFT;
720 		if (sk_memalloc_socks()) {
721 			struct rpc_xprt *xprt;
722 
723 			rcu_read_lock();
724 			xprt = rcu_dereference(clnt->cl_xprt);
725 			if (xprt->swapper)
726 				task->tk_flags |= RPC_TASK_SWAPPER;
727 			rcu_read_unlock();
728 		}
729 		/* Add to the client's list of all tasks */
730 		spin_lock(&clnt->cl_lock);
731 		list_add_tail(&task->tk_task, &clnt->cl_tasks);
732 		spin_unlock(&clnt->cl_lock);
733 	}
734 }
735 
736 void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt)
737 {
738 	rpc_task_release_client(task);
739 	rpc_task_set_client(task, clnt);
740 }
741 EXPORT_SYMBOL_GPL(rpc_task_reset_client);
742 
743 
744 static void
745 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
746 {
747 	if (msg != NULL) {
748 		task->tk_msg.rpc_proc = msg->rpc_proc;
749 		task->tk_msg.rpc_argp = msg->rpc_argp;
750 		task->tk_msg.rpc_resp = msg->rpc_resp;
751 		if (msg->rpc_cred != NULL)
752 			task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
753 	}
754 }
755 
756 /*
757  * Default callback for async RPC calls
758  */
759 static void
760 rpc_default_callback(struct rpc_task *task, void *data)
761 {
762 }
763 
764 static const struct rpc_call_ops rpc_default_ops = {
765 	.rpc_call_done = rpc_default_callback,
766 };
767 
768 /**
769  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
770  * @task_setup_data: pointer to task initialisation data
771  */
772 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
773 {
774 	struct rpc_task *task;
775 
776 	task = rpc_new_task(task_setup_data);
777 	if (IS_ERR(task))
778 		goto out;
779 
780 	rpc_task_set_client(task, task_setup_data->rpc_client);
781 	rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
782 
783 	if (task->tk_action == NULL)
784 		rpc_call_start(task);
785 
786 	atomic_inc(&task->tk_count);
787 	rpc_execute(task);
788 out:
789 	return task;
790 }
791 EXPORT_SYMBOL_GPL(rpc_run_task);
792 
793 /**
794  * rpc_call_sync - Perform a synchronous RPC call
795  * @clnt: pointer to RPC client
796  * @msg: RPC call parameters
797  * @flags: RPC call flags
798  */
799 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
800 {
801 	struct rpc_task	*task;
802 	struct rpc_task_setup task_setup_data = {
803 		.rpc_client = clnt,
804 		.rpc_message = msg,
805 		.callback_ops = &rpc_default_ops,
806 		.flags = flags,
807 	};
808 	int status;
809 
810 	BUG_ON(flags & RPC_TASK_ASYNC);
811 
812 	task = rpc_run_task(&task_setup_data);
813 	if (IS_ERR(task))
814 		return PTR_ERR(task);
815 	status = task->tk_status;
816 	rpc_put_task(task);
817 	return status;
818 }
819 EXPORT_SYMBOL_GPL(rpc_call_sync);
820 
821 /**
822  * rpc_call_async - Perform an asynchronous RPC call
823  * @clnt: pointer to RPC client
824  * @msg: RPC call parameters
825  * @flags: RPC call flags
826  * @tk_ops: RPC call ops
827  * @data: user call data
828  */
829 int
830 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
831 	       const struct rpc_call_ops *tk_ops, void *data)
832 {
833 	struct rpc_task	*task;
834 	struct rpc_task_setup task_setup_data = {
835 		.rpc_client = clnt,
836 		.rpc_message = msg,
837 		.callback_ops = tk_ops,
838 		.callback_data = data,
839 		.flags = flags|RPC_TASK_ASYNC,
840 	};
841 
842 	task = rpc_run_task(&task_setup_data);
843 	if (IS_ERR(task))
844 		return PTR_ERR(task);
845 	rpc_put_task(task);
846 	return 0;
847 }
848 EXPORT_SYMBOL_GPL(rpc_call_async);
849 
850 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
851 /**
852  * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
853  * rpc_execute against it
854  * @req: RPC request
855  * @tk_ops: RPC call ops
856  */
857 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
858 				const struct rpc_call_ops *tk_ops)
859 {
860 	struct rpc_task *task;
861 	struct xdr_buf *xbufp = &req->rq_snd_buf;
862 	struct rpc_task_setup task_setup_data = {
863 		.callback_ops = tk_ops,
864 	};
865 
866 	dprintk("RPC: rpc_run_bc_task req= %p\n", req);
867 	/*
868 	 * Create an rpc_task to send the data
869 	 */
870 	task = rpc_new_task(&task_setup_data);
871 	if (IS_ERR(task)) {
872 		xprt_free_bc_request(req);
873 		goto out;
874 	}
875 	task->tk_rqstp = req;
876 
877 	/*
878 	 * Set up the xdr_buf length.
879 	 * This also indicates that the buffer is XDR encoded already.
880 	 */
881 	xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
882 			xbufp->tail[0].iov_len;
883 
884 	task->tk_action = call_bc_transmit;
885 	atomic_inc(&task->tk_count);
886 	BUG_ON(atomic_read(&task->tk_count) != 2);
887 	rpc_execute(task);
888 
889 out:
890 	dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
891 	return task;
892 }
893 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
894 
895 void
896 rpc_call_start(struct rpc_task *task)
897 {
898 	task->tk_action = call_start;
899 }
900 EXPORT_SYMBOL_GPL(rpc_call_start);
901 
902 /**
903  * rpc_peeraddr - extract remote peer address from clnt's xprt
904  * @clnt: RPC client structure
905  * @buf: target buffer
906  * @bufsize: length of target buffer
907  *
908  * Returns the number of bytes that are actually in the stored address.
909  */
910 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
911 {
912 	size_t bytes;
913 	struct rpc_xprt *xprt;
914 
915 	rcu_read_lock();
916 	xprt = rcu_dereference(clnt->cl_xprt);
917 
918 	bytes = xprt->addrlen;
919 	if (bytes > bufsize)
920 		bytes = bufsize;
921 	memcpy(buf, &xprt->addr, bytes);
922 	rcu_read_unlock();
923 
924 	return bytes;
925 }
926 EXPORT_SYMBOL_GPL(rpc_peeraddr);
927 
928 /**
929  * rpc_peeraddr2str - return remote peer address in printable format
930  * @clnt: RPC client structure
931  * @format: address format
932  *
933  * NB: the lifetime of the memory referenced by the returned pointer is
934  * the same as the rpc_xprt itself.  As long as the caller uses this
935  * pointer, it must hold the RCU read lock.
936  */
937 const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
938 			     enum rpc_display_format_t format)
939 {
940 	struct rpc_xprt *xprt;
941 
942 	xprt = rcu_dereference(clnt->cl_xprt);
943 
944 	if (xprt->address_strings[format] != NULL)
945 		return xprt->address_strings[format];
946 	else
947 		return "unprintable";
948 }
949 EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
950 
951 static const struct sockaddr_in rpc_inaddr_loopback = {
952 	.sin_family		= AF_INET,
953 	.sin_addr.s_addr	= htonl(INADDR_ANY),
954 };
955 
956 static const struct sockaddr_in6 rpc_in6addr_loopback = {
957 	.sin6_family		= AF_INET6,
958 	.sin6_addr		= IN6ADDR_ANY_INIT,
959 };
960 
961 /*
962  * Try a getsockname() on a connected datagram socket.  Using a
963  * connected datagram socket prevents leaving a socket in TIME_WAIT.
964  * This conserves the ephemeral port number space.
965  *
966  * Returns zero and fills in "buf" if successful; otherwise, a
967  * negative errno is returned.
968  */
969 static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
970 			struct sockaddr *buf, int buflen)
971 {
972 	struct socket *sock;
973 	int err;
974 
975 	err = __sock_create(net, sap->sa_family,
976 				SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
977 	if (err < 0) {
978 		dprintk("RPC:       can't create UDP socket (%d)\n", err);
979 		goto out;
980 	}
981 
982 	switch (sap->sa_family) {
983 	case AF_INET:
984 		err = kernel_bind(sock,
985 				(struct sockaddr *)&rpc_inaddr_loopback,
986 				sizeof(rpc_inaddr_loopback));
987 		break;
988 	case AF_INET6:
989 		err = kernel_bind(sock,
990 				(struct sockaddr *)&rpc_in6addr_loopback,
991 				sizeof(rpc_in6addr_loopback));
992 		break;
993 	default:
994 		err = -EAFNOSUPPORT;
995 		goto out;
996 	}
997 	if (err < 0) {
998 		dprintk("RPC:       can't bind UDP socket (%d)\n", err);
999 		goto out_release;
1000 	}
1001 
1002 	err = kernel_connect(sock, sap, salen, 0);
1003 	if (err < 0) {
1004 		dprintk("RPC:       can't connect UDP socket (%d)\n", err);
1005 		goto out_release;
1006 	}
1007 
1008 	err = kernel_getsockname(sock, buf, &buflen);
1009 	if (err < 0) {
1010 		dprintk("RPC:       getsockname failed (%d)\n", err);
1011 		goto out_release;
1012 	}
1013 
1014 	err = 0;
1015 	if (buf->sa_family == AF_INET6) {
1016 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf;
1017 		sin6->sin6_scope_id = 0;
1018 	}
1019 	dprintk("RPC:       %s succeeded\n", __func__);
1020 
1021 out_release:
1022 	sock_release(sock);
1023 out:
1024 	return err;
1025 }
1026 
1027 /*
1028  * Scraping a connected socket failed, so we don't have a useable
1029  * local address.  Fallback: generate an address that will prevent
1030  * the server from calling us back.
1031  *
1032  * Returns zero and fills in "buf" if successful; otherwise, a
1033  * negative errno is returned.
1034  */
1035 static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen)
1036 {
1037 	switch (family) {
1038 	case AF_INET:
1039 		if (buflen < sizeof(rpc_inaddr_loopback))
1040 			return -EINVAL;
1041 		memcpy(buf, &rpc_inaddr_loopback,
1042 				sizeof(rpc_inaddr_loopback));
1043 		break;
1044 	case AF_INET6:
1045 		if (buflen < sizeof(rpc_in6addr_loopback))
1046 			return -EINVAL;
1047 		memcpy(buf, &rpc_in6addr_loopback,
1048 				sizeof(rpc_in6addr_loopback));
1049 	default:
1050 		dprintk("RPC:       %s: address family not supported\n",
1051 			__func__);
1052 		return -EAFNOSUPPORT;
1053 	}
1054 	dprintk("RPC:       %s: succeeded\n", __func__);
1055 	return 0;
1056 }
1057 
1058 /**
1059  * rpc_localaddr - discover local endpoint address for an RPC client
1060  * @clnt: RPC client structure
1061  * @buf: target buffer
1062  * @buflen: size of target buffer, in bytes
1063  *
1064  * Returns zero and fills in "buf" and "buflen" if successful;
1065  * otherwise, a negative errno is returned.
1066  *
1067  * This works even if the underlying transport is not currently connected,
1068  * or if the upper layer never previously provided a source address.
1069  *
1070  * The result of this function call is transient: multiple calls in
1071  * succession may give different results, depending on how local
1072  * networking configuration changes over time.
1073  */
1074 int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen)
1075 {
1076 	struct sockaddr_storage address;
1077 	struct sockaddr *sap = (struct sockaddr *)&address;
1078 	struct rpc_xprt *xprt;
1079 	struct net *net;
1080 	size_t salen;
1081 	int err;
1082 
1083 	rcu_read_lock();
1084 	xprt = rcu_dereference(clnt->cl_xprt);
1085 	salen = xprt->addrlen;
1086 	memcpy(sap, &xprt->addr, salen);
1087 	net = get_net(xprt->xprt_net);
1088 	rcu_read_unlock();
1089 
1090 	rpc_set_port(sap, 0);
1091 	err = rpc_sockname(net, sap, salen, buf, buflen);
1092 	put_net(net);
1093 	if (err != 0)
1094 		/* Couldn't discover local address, return ANYADDR */
1095 		return rpc_anyaddr(sap->sa_family, buf, buflen);
1096 	return 0;
1097 }
1098 EXPORT_SYMBOL_GPL(rpc_localaddr);
1099 
1100 void
1101 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
1102 {
1103 	struct rpc_xprt *xprt;
1104 
1105 	rcu_read_lock();
1106 	xprt = rcu_dereference(clnt->cl_xprt);
1107 	if (xprt->ops->set_buffer_size)
1108 		xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
1109 	rcu_read_unlock();
1110 }
1111 EXPORT_SYMBOL_GPL(rpc_setbufsize);
1112 
1113 /**
1114  * rpc_protocol - Get transport protocol number for an RPC client
1115  * @clnt: RPC client to query
1116  *
1117  */
1118 int rpc_protocol(struct rpc_clnt *clnt)
1119 {
1120 	int protocol;
1121 
1122 	rcu_read_lock();
1123 	protocol = rcu_dereference(clnt->cl_xprt)->prot;
1124 	rcu_read_unlock();
1125 	return protocol;
1126 }
1127 EXPORT_SYMBOL_GPL(rpc_protocol);
1128 
1129 /**
1130  * rpc_net_ns - Get the network namespace for this RPC client
1131  * @clnt: RPC client to query
1132  *
1133  */
1134 struct net *rpc_net_ns(struct rpc_clnt *clnt)
1135 {
1136 	struct net *ret;
1137 
1138 	rcu_read_lock();
1139 	ret = rcu_dereference(clnt->cl_xprt)->xprt_net;
1140 	rcu_read_unlock();
1141 	return ret;
1142 }
1143 EXPORT_SYMBOL_GPL(rpc_net_ns);
1144 
1145 /**
1146  * rpc_max_payload - Get maximum payload size for a transport, in bytes
1147  * @clnt: RPC client to query
1148  *
1149  * For stream transports, this is one RPC record fragment (see RFC
1150  * 1831), as we don't support multi-record requests yet.  For datagram
1151  * transports, this is the size of an IP packet minus the IP, UDP, and
1152  * RPC header sizes.
1153  */
1154 size_t rpc_max_payload(struct rpc_clnt *clnt)
1155 {
1156 	size_t ret;
1157 
1158 	rcu_read_lock();
1159 	ret = rcu_dereference(clnt->cl_xprt)->max_payload;
1160 	rcu_read_unlock();
1161 	return ret;
1162 }
1163 EXPORT_SYMBOL_GPL(rpc_max_payload);
1164 
1165 /**
1166  * rpc_force_rebind - force transport to check that remote port is unchanged
1167  * @clnt: client to rebind
1168  *
1169  */
1170 void rpc_force_rebind(struct rpc_clnt *clnt)
1171 {
1172 	if (clnt->cl_autobind) {
1173 		rcu_read_lock();
1174 		xprt_clear_bound(rcu_dereference(clnt->cl_xprt));
1175 		rcu_read_unlock();
1176 	}
1177 }
1178 EXPORT_SYMBOL_GPL(rpc_force_rebind);
1179 
1180 /*
1181  * Restart an (async) RPC call from the call_prepare state.
1182  * Usually called from within the exit handler.
1183  */
1184 int
1185 rpc_restart_call_prepare(struct rpc_task *task)
1186 {
1187 	if (RPC_ASSASSINATED(task))
1188 		return 0;
1189 	task->tk_action = call_start;
1190 	if (task->tk_ops->rpc_call_prepare != NULL)
1191 		task->tk_action = rpc_prepare_task;
1192 	return 1;
1193 }
1194 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
1195 
1196 /*
1197  * Restart an (async) RPC call. Usually called from within the
1198  * exit handler.
1199  */
1200 int
1201 rpc_restart_call(struct rpc_task *task)
1202 {
1203 	if (RPC_ASSASSINATED(task))
1204 		return 0;
1205 	task->tk_action = call_start;
1206 	return 1;
1207 }
1208 EXPORT_SYMBOL_GPL(rpc_restart_call);
1209 
1210 #ifdef RPC_DEBUG
1211 static const char *rpc_proc_name(const struct rpc_task *task)
1212 {
1213 	const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1214 
1215 	if (proc) {
1216 		if (proc->p_name)
1217 			return proc->p_name;
1218 		else
1219 			return "NULL";
1220 	} else
1221 		return "no proc";
1222 }
1223 #endif
1224 
1225 /*
1226  * 0.  Initial state
1227  *
1228  *     Other FSM states can be visited zero or more times, but
1229  *     this state is visited exactly once for each RPC.
1230  */
1231 static void
1232 call_start(struct rpc_task *task)
1233 {
1234 	struct rpc_clnt	*clnt = task->tk_client;
1235 
1236 	dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
1237 			clnt->cl_protname, clnt->cl_vers,
1238 			rpc_proc_name(task),
1239 			(RPC_IS_ASYNC(task) ? "async" : "sync"));
1240 
1241 	/* Increment call count */
1242 	task->tk_msg.rpc_proc->p_count++;
1243 	clnt->cl_stats->rpccnt++;
1244 	task->tk_action = call_reserve;
1245 }
1246 
1247 /*
1248  * 1.	Reserve an RPC call slot
1249  */
1250 static void
1251 call_reserve(struct rpc_task *task)
1252 {
1253 	dprint_status(task);
1254 
1255 	task->tk_status  = 0;
1256 	task->tk_action  = call_reserveresult;
1257 	xprt_reserve(task);
1258 }
1259 
1260 /*
1261  * 1b.	Grok the result of xprt_reserve()
1262  */
1263 static void
1264 call_reserveresult(struct rpc_task *task)
1265 {
1266 	int status = task->tk_status;
1267 
1268 	dprint_status(task);
1269 
1270 	/*
1271 	 * After a call to xprt_reserve(), we must have either
1272 	 * a request slot or else an error status.
1273 	 */
1274 	task->tk_status = 0;
1275 	if (status >= 0) {
1276 		if (task->tk_rqstp) {
1277 			task->tk_action = call_refresh;
1278 			return;
1279 		}
1280 
1281 		printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
1282 				__func__, status);
1283 		rpc_exit(task, -EIO);
1284 		return;
1285 	}
1286 
1287 	/*
1288 	 * Even though there was an error, we may have acquired
1289 	 * a request slot somehow.  Make sure not to leak it.
1290 	 */
1291 	if (task->tk_rqstp) {
1292 		printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
1293 				__func__, status);
1294 		xprt_release(task);
1295 	}
1296 
1297 	switch (status) {
1298 	case -ENOMEM:
1299 		rpc_delay(task, HZ >> 2);
1300 	case -EAGAIN:	/* woken up; retry */
1301 		task->tk_action = call_reserve;
1302 		return;
1303 	case -EIO:	/* probably a shutdown */
1304 		break;
1305 	default:
1306 		printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
1307 				__func__, status);
1308 		break;
1309 	}
1310 	rpc_exit(task, status);
1311 }
1312 
1313 /*
1314  * 2.	Bind and/or refresh the credentials
1315  */
1316 static void
1317 call_refresh(struct rpc_task *task)
1318 {
1319 	dprint_status(task);
1320 
1321 	task->tk_action = call_refreshresult;
1322 	task->tk_status = 0;
1323 	task->tk_client->cl_stats->rpcauthrefresh++;
1324 	rpcauth_refreshcred(task);
1325 }
1326 
1327 /*
1328  * 2a.	Process the results of a credential refresh
1329  */
1330 static void
1331 call_refreshresult(struct rpc_task *task)
1332 {
1333 	int status = task->tk_status;
1334 
1335 	dprint_status(task);
1336 
1337 	task->tk_status = 0;
1338 	task->tk_action = call_refresh;
1339 	switch (status) {
1340 	case 0:
1341 		if (rpcauth_uptodatecred(task))
1342 			task->tk_action = call_allocate;
1343 		return;
1344 	case -ETIMEDOUT:
1345 		rpc_delay(task, 3*HZ);
1346 	case -EAGAIN:
1347 		status = -EACCES;
1348 		if (!task->tk_cred_retry)
1349 			break;
1350 		task->tk_cred_retry--;
1351 		dprintk("RPC: %5u %s: retry refresh creds\n",
1352 				task->tk_pid, __func__);
1353 		return;
1354 	}
1355 	dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1356 				task->tk_pid, __func__, status);
1357 	rpc_exit(task, status);
1358 }
1359 
1360 /*
1361  * 2b.	Allocate the buffer. For details, see sched.c:rpc_malloc.
1362  *	(Note: buffer memory is freed in xprt_release).
1363  */
1364 static void
1365 call_allocate(struct rpc_task *task)
1366 {
1367 	unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
1368 	struct rpc_rqst *req = task->tk_rqstp;
1369 	struct rpc_xprt *xprt = task->tk_xprt;
1370 	struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1371 
1372 	dprint_status(task);
1373 
1374 	task->tk_status = 0;
1375 	task->tk_action = call_bind;
1376 
1377 	if (req->rq_buffer)
1378 		return;
1379 
1380 	if (proc->p_proc != 0) {
1381 		BUG_ON(proc->p_arglen == 0);
1382 		if (proc->p_decode != NULL)
1383 			BUG_ON(proc->p_replen == 0);
1384 	}
1385 
1386 	/*
1387 	 * Calculate the size (in quads) of the RPC call
1388 	 * and reply headers, and convert both values
1389 	 * to byte sizes.
1390 	 */
1391 	req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
1392 	req->rq_callsize <<= 2;
1393 	req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
1394 	req->rq_rcvsize <<= 2;
1395 
1396 	req->rq_buffer = xprt->ops->buf_alloc(task,
1397 					req->rq_callsize + req->rq_rcvsize);
1398 	if (req->rq_buffer != NULL)
1399 		return;
1400 
1401 	dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1402 
1403 	if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1404 		task->tk_action = call_allocate;
1405 		rpc_delay(task, HZ>>4);
1406 		return;
1407 	}
1408 
1409 	rpc_exit(task, -ERESTARTSYS);
1410 }
1411 
1412 static inline int
1413 rpc_task_need_encode(struct rpc_task *task)
1414 {
1415 	return task->tk_rqstp->rq_snd_buf.len == 0;
1416 }
1417 
1418 static inline void
1419 rpc_task_force_reencode(struct rpc_task *task)
1420 {
1421 	task->tk_rqstp->rq_snd_buf.len = 0;
1422 	task->tk_rqstp->rq_bytes_sent = 0;
1423 }
1424 
1425 static inline void
1426 rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
1427 {
1428 	buf->head[0].iov_base = start;
1429 	buf->head[0].iov_len = len;
1430 	buf->tail[0].iov_len = 0;
1431 	buf->page_len = 0;
1432 	buf->flags = 0;
1433 	buf->len = 0;
1434 	buf->buflen = len;
1435 }
1436 
1437 /*
1438  * 3.	Encode arguments of an RPC call
1439  */
1440 static void
1441 rpc_xdr_encode(struct rpc_task *task)
1442 {
1443 	struct rpc_rqst	*req = task->tk_rqstp;
1444 	kxdreproc_t	encode;
1445 	__be32		*p;
1446 
1447 	dprint_status(task);
1448 
1449 	rpc_xdr_buf_init(&req->rq_snd_buf,
1450 			 req->rq_buffer,
1451 			 req->rq_callsize);
1452 	rpc_xdr_buf_init(&req->rq_rcv_buf,
1453 			 (char *)req->rq_buffer + req->rq_callsize,
1454 			 req->rq_rcvsize);
1455 
1456 	p = rpc_encode_header(task);
1457 	if (p == NULL) {
1458 		printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
1459 		rpc_exit(task, -EIO);
1460 		return;
1461 	}
1462 
1463 	encode = task->tk_msg.rpc_proc->p_encode;
1464 	if (encode == NULL)
1465 		return;
1466 
1467 	task->tk_status = rpcauth_wrap_req(task, encode, req, p,
1468 			task->tk_msg.rpc_argp);
1469 }
1470 
1471 /*
1472  * 4.	Get the server port number if not yet set
1473  */
1474 static void
1475 call_bind(struct rpc_task *task)
1476 {
1477 	struct rpc_xprt *xprt = task->tk_xprt;
1478 
1479 	dprint_status(task);
1480 
1481 	task->tk_action = call_connect;
1482 	if (!xprt_bound(xprt)) {
1483 		task->tk_action = call_bind_status;
1484 		task->tk_timeout = xprt->bind_timeout;
1485 		xprt->ops->rpcbind(task);
1486 	}
1487 }
1488 
1489 /*
1490  * 4a.	Sort out bind result
1491  */
1492 static void
1493 call_bind_status(struct rpc_task *task)
1494 {
1495 	int status = -EIO;
1496 
1497 	if (task->tk_status >= 0) {
1498 		dprint_status(task);
1499 		task->tk_status = 0;
1500 		task->tk_action = call_connect;
1501 		return;
1502 	}
1503 
1504 	trace_rpc_bind_status(task);
1505 	switch (task->tk_status) {
1506 	case -ENOMEM:
1507 		dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
1508 		rpc_delay(task, HZ >> 2);
1509 		goto retry_timeout;
1510 	case -EACCES:
1511 		dprintk("RPC: %5u remote rpcbind: RPC program/version "
1512 				"unavailable\n", task->tk_pid);
1513 		/* fail immediately if this is an RPC ping */
1514 		if (task->tk_msg.rpc_proc->p_proc == 0) {
1515 			status = -EOPNOTSUPP;
1516 			break;
1517 		}
1518 		if (task->tk_rebind_retry == 0)
1519 			break;
1520 		task->tk_rebind_retry--;
1521 		rpc_delay(task, 3*HZ);
1522 		goto retry_timeout;
1523 	case -ETIMEDOUT:
1524 		dprintk("RPC: %5u rpcbind request timed out\n",
1525 				task->tk_pid);
1526 		goto retry_timeout;
1527 	case -EPFNOSUPPORT:
1528 		/* server doesn't support any rpcbind version we know of */
1529 		dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1530 				task->tk_pid);
1531 		break;
1532 	case -EPROTONOSUPPORT:
1533 		dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1534 				task->tk_pid);
1535 		task->tk_status = 0;
1536 		task->tk_action = call_bind;
1537 		return;
1538 	case -ECONNREFUSED:		/* connection problems */
1539 	case -ECONNRESET:
1540 	case -ENOTCONN:
1541 	case -EHOSTDOWN:
1542 	case -EHOSTUNREACH:
1543 	case -ENETUNREACH:
1544 	case -EPIPE:
1545 		dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1546 				task->tk_pid, task->tk_status);
1547 		if (!RPC_IS_SOFTCONN(task)) {
1548 			rpc_delay(task, 5*HZ);
1549 			goto retry_timeout;
1550 		}
1551 		status = task->tk_status;
1552 		break;
1553 	default:
1554 		dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1555 				task->tk_pid, -task->tk_status);
1556 	}
1557 
1558 	rpc_exit(task, status);
1559 	return;
1560 
1561 retry_timeout:
1562 	task->tk_action = call_timeout;
1563 }
1564 
1565 /*
1566  * 4b.	Connect to the RPC server
1567  */
1568 static void
1569 call_connect(struct rpc_task *task)
1570 {
1571 	struct rpc_xprt *xprt = task->tk_xprt;
1572 
1573 	dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1574 			task->tk_pid, xprt,
1575 			(xprt_connected(xprt) ? "is" : "is not"));
1576 
1577 	task->tk_action = call_transmit;
1578 	if (!xprt_connected(xprt)) {
1579 		task->tk_action = call_connect_status;
1580 		if (task->tk_status < 0)
1581 			return;
1582 		xprt_connect(task);
1583 	}
1584 }
1585 
1586 /*
1587  * 4c.	Sort out connect result
1588  */
1589 static void
1590 call_connect_status(struct rpc_task *task)
1591 {
1592 	struct rpc_clnt *clnt = task->tk_client;
1593 	int status = task->tk_status;
1594 
1595 	dprint_status(task);
1596 
1597 	task->tk_status = 0;
1598 	if (status >= 0 || status == -EAGAIN) {
1599 		clnt->cl_stats->netreconn++;
1600 		task->tk_action = call_transmit;
1601 		return;
1602 	}
1603 
1604 	trace_rpc_connect_status(task, status);
1605 	switch (status) {
1606 		/* if soft mounted, test if we've timed out */
1607 	case -ETIMEDOUT:
1608 		task->tk_action = call_timeout;
1609 		break;
1610 	default:
1611 		rpc_exit(task, -EIO);
1612 	}
1613 }
1614 
1615 /*
1616  * 5.	Transmit the RPC request, and wait for reply
1617  */
1618 static void
1619 call_transmit(struct rpc_task *task)
1620 {
1621 	dprint_status(task);
1622 
1623 	task->tk_action = call_status;
1624 	if (task->tk_status < 0)
1625 		return;
1626 	task->tk_status = xprt_prepare_transmit(task);
1627 	if (task->tk_status != 0)
1628 		return;
1629 	task->tk_action = call_transmit_status;
1630 	/* Encode here so that rpcsec_gss can use correct sequence number. */
1631 	if (rpc_task_need_encode(task)) {
1632 		BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
1633 		rpc_xdr_encode(task);
1634 		/* Did the encode result in an error condition? */
1635 		if (task->tk_status != 0) {
1636 			/* Was the error nonfatal? */
1637 			if (task->tk_status == -EAGAIN)
1638 				rpc_delay(task, HZ >> 4);
1639 			else
1640 				rpc_exit(task, task->tk_status);
1641 			return;
1642 		}
1643 	}
1644 	xprt_transmit(task);
1645 	if (task->tk_status < 0)
1646 		return;
1647 	/*
1648 	 * On success, ensure that we call xprt_end_transmit() before sleeping
1649 	 * in order to allow access to the socket to other RPC requests.
1650 	 */
1651 	call_transmit_status(task);
1652 	if (rpc_reply_expected(task))
1653 		return;
1654 	task->tk_action = rpc_exit_task;
1655 	rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
1656 }
1657 
1658 /*
1659  * 5a.	Handle cleanup after a transmission
1660  */
1661 static void
1662 call_transmit_status(struct rpc_task *task)
1663 {
1664 	task->tk_action = call_status;
1665 
1666 	/*
1667 	 * Common case: success.  Force the compiler to put this
1668 	 * test first.
1669 	 */
1670 	if (task->tk_status == 0) {
1671 		xprt_end_transmit(task);
1672 		rpc_task_force_reencode(task);
1673 		return;
1674 	}
1675 
1676 	switch (task->tk_status) {
1677 	case -EAGAIN:
1678 		break;
1679 	default:
1680 		dprint_status(task);
1681 		xprt_end_transmit(task);
1682 		rpc_task_force_reencode(task);
1683 		break;
1684 		/*
1685 		 * Special cases: if we've been waiting on the
1686 		 * socket's write_space() callback, or if the
1687 		 * socket just returned a connection error,
1688 		 * then hold onto the transport lock.
1689 		 */
1690 	case -ECONNREFUSED:
1691 	case -EHOSTDOWN:
1692 	case -EHOSTUNREACH:
1693 	case -ENETUNREACH:
1694 		if (RPC_IS_SOFTCONN(task)) {
1695 			xprt_end_transmit(task);
1696 			rpc_exit(task, task->tk_status);
1697 			break;
1698 		}
1699 	case -ECONNRESET:
1700 	case -ENOTCONN:
1701 	case -EPIPE:
1702 		rpc_task_force_reencode(task);
1703 	}
1704 }
1705 
1706 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1707 /*
1708  * 5b.	Send the backchannel RPC reply.  On error, drop the reply.  In
1709  * addition, disconnect on connectivity errors.
1710  */
1711 static void
1712 call_bc_transmit(struct rpc_task *task)
1713 {
1714 	struct rpc_rqst *req = task->tk_rqstp;
1715 
1716 	BUG_ON(task->tk_status != 0);
1717 	task->tk_status = xprt_prepare_transmit(task);
1718 	if (task->tk_status == -EAGAIN) {
1719 		/*
1720 		 * Could not reserve the transport. Try again after the
1721 		 * transport is released.
1722 		 */
1723 		task->tk_status = 0;
1724 		task->tk_action = call_bc_transmit;
1725 		return;
1726 	}
1727 
1728 	task->tk_action = rpc_exit_task;
1729 	if (task->tk_status < 0) {
1730 		printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1731 			"error: %d\n", task->tk_status);
1732 		return;
1733 	}
1734 
1735 	xprt_transmit(task);
1736 	xprt_end_transmit(task);
1737 	dprint_status(task);
1738 	switch (task->tk_status) {
1739 	case 0:
1740 		/* Success */
1741 		break;
1742 	case -EHOSTDOWN:
1743 	case -EHOSTUNREACH:
1744 	case -ENETUNREACH:
1745 	case -ETIMEDOUT:
1746 		/*
1747 		 * Problem reaching the server.  Disconnect and let the
1748 		 * forechannel reestablish the connection.  The server will
1749 		 * have to retransmit the backchannel request and we'll
1750 		 * reprocess it.  Since these ops are idempotent, there's no
1751 		 * need to cache our reply at this time.
1752 		 */
1753 		printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1754 			"error: %d\n", task->tk_status);
1755 		xprt_conditional_disconnect(task->tk_xprt,
1756 			req->rq_connect_cookie);
1757 		break;
1758 	default:
1759 		/*
1760 		 * We were unable to reply and will have to drop the
1761 		 * request.  The server should reconnect and retransmit.
1762 		 */
1763 		BUG_ON(task->tk_status == -EAGAIN);
1764 		printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1765 			"error: %d\n", task->tk_status);
1766 		break;
1767 	}
1768 	rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1769 }
1770 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1771 
1772 /*
1773  * 6.	Sort out the RPC call status
1774  */
1775 static void
1776 call_status(struct rpc_task *task)
1777 {
1778 	struct rpc_clnt	*clnt = task->tk_client;
1779 	struct rpc_rqst	*req = task->tk_rqstp;
1780 	int		status;
1781 
1782 	if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
1783 		task->tk_status = req->rq_reply_bytes_recvd;
1784 
1785 	dprint_status(task);
1786 
1787 	status = task->tk_status;
1788 	if (status >= 0) {
1789 		task->tk_action = call_decode;
1790 		return;
1791 	}
1792 
1793 	trace_rpc_call_status(task);
1794 	task->tk_status = 0;
1795 	switch(status) {
1796 	case -EHOSTDOWN:
1797 	case -EHOSTUNREACH:
1798 	case -ENETUNREACH:
1799 		/*
1800 		 * Delay any retries for 3 seconds, then handle as if it
1801 		 * were a timeout.
1802 		 */
1803 		rpc_delay(task, 3*HZ);
1804 	case -ETIMEDOUT:
1805 		task->tk_action = call_timeout;
1806 		if (task->tk_client->cl_discrtry)
1807 			xprt_conditional_disconnect(task->tk_xprt,
1808 					req->rq_connect_cookie);
1809 		break;
1810 	case -ECONNRESET:
1811 	case -ECONNREFUSED:
1812 		rpc_force_rebind(clnt);
1813 		rpc_delay(task, 3*HZ);
1814 	case -EPIPE:
1815 	case -ENOTCONN:
1816 		task->tk_action = call_bind;
1817 		break;
1818 	case -EAGAIN:
1819 		task->tk_action = call_transmit;
1820 		break;
1821 	case -EIO:
1822 		/* shutdown or soft timeout */
1823 		rpc_exit(task, status);
1824 		break;
1825 	default:
1826 		if (clnt->cl_chatty)
1827 			printk("%s: RPC call returned error %d\n",
1828 			       clnt->cl_protname, -status);
1829 		rpc_exit(task, status);
1830 	}
1831 }
1832 
1833 /*
1834  * 6a.	Handle RPC timeout
1835  * 	We do not release the request slot, so we keep using the
1836  *	same XID for all retransmits.
1837  */
1838 static void
1839 call_timeout(struct rpc_task *task)
1840 {
1841 	struct rpc_clnt	*clnt = task->tk_client;
1842 
1843 	if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
1844 		dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
1845 		goto retry;
1846 	}
1847 
1848 	dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
1849 	task->tk_timeouts++;
1850 
1851 	if (RPC_IS_SOFTCONN(task)) {
1852 		rpc_exit(task, -ETIMEDOUT);
1853 		return;
1854 	}
1855 	if (RPC_IS_SOFT(task)) {
1856 		if (clnt->cl_chatty) {
1857 			rcu_read_lock();
1858 			printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1859 				clnt->cl_protname,
1860 				rcu_dereference(clnt->cl_xprt)->servername);
1861 			rcu_read_unlock();
1862 		}
1863 		if (task->tk_flags & RPC_TASK_TIMEOUT)
1864 			rpc_exit(task, -ETIMEDOUT);
1865 		else
1866 			rpc_exit(task, -EIO);
1867 		return;
1868 	}
1869 
1870 	if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1871 		task->tk_flags |= RPC_CALL_MAJORSEEN;
1872 		if (clnt->cl_chatty) {
1873 			rcu_read_lock();
1874 			printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1875 			clnt->cl_protname,
1876 			rcu_dereference(clnt->cl_xprt)->servername);
1877 			rcu_read_unlock();
1878 		}
1879 	}
1880 	rpc_force_rebind(clnt);
1881 	/*
1882 	 * Did our request time out due to an RPCSEC_GSS out-of-sequence
1883 	 * event? RFC2203 requires the server to drop all such requests.
1884 	 */
1885 	rpcauth_invalcred(task);
1886 
1887 retry:
1888 	clnt->cl_stats->rpcretrans++;
1889 	task->tk_action = call_bind;
1890 	task->tk_status = 0;
1891 }
1892 
1893 /*
1894  * 7.	Decode the RPC reply
1895  */
1896 static void
1897 call_decode(struct rpc_task *task)
1898 {
1899 	struct rpc_clnt	*clnt = task->tk_client;
1900 	struct rpc_rqst	*req = task->tk_rqstp;
1901 	kxdrdproc_t	decode = task->tk_msg.rpc_proc->p_decode;
1902 	__be32		*p;
1903 
1904 	dprint_status(task);
1905 
1906 	if (task->tk_flags & RPC_CALL_MAJORSEEN) {
1907 		if (clnt->cl_chatty) {
1908 			rcu_read_lock();
1909 			printk(KERN_NOTICE "%s: server %s OK\n",
1910 				clnt->cl_protname,
1911 				rcu_dereference(clnt->cl_xprt)->servername);
1912 			rcu_read_unlock();
1913 		}
1914 		task->tk_flags &= ~RPC_CALL_MAJORSEEN;
1915 	}
1916 
1917 	/*
1918 	 * Ensure that we see all writes made by xprt_complete_rqst()
1919 	 * before it changed req->rq_reply_bytes_recvd.
1920 	 */
1921 	smp_rmb();
1922 	req->rq_rcv_buf.len = req->rq_private_buf.len;
1923 
1924 	/* Check that the softirq receive buffer is valid */
1925 	WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
1926 				sizeof(req->rq_rcv_buf)) != 0);
1927 
1928 	if (req->rq_rcv_buf.len < 12) {
1929 		if (!RPC_IS_SOFT(task)) {
1930 			task->tk_action = call_bind;
1931 			clnt->cl_stats->rpcretrans++;
1932 			goto out_retry;
1933 		}
1934 		dprintk("RPC:       %s: too small RPC reply size (%d bytes)\n",
1935 				clnt->cl_protname, task->tk_status);
1936 		task->tk_action = call_timeout;
1937 		goto out_retry;
1938 	}
1939 
1940 	p = rpc_verify_header(task);
1941 	if (IS_ERR(p)) {
1942 		if (p == ERR_PTR(-EAGAIN))
1943 			goto out_retry;
1944 		return;
1945 	}
1946 
1947 	task->tk_action = rpc_exit_task;
1948 
1949 	if (decode) {
1950 		task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
1951 						      task->tk_msg.rpc_resp);
1952 	}
1953 	dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
1954 			task->tk_status);
1955 	return;
1956 out_retry:
1957 	task->tk_status = 0;
1958 	/* Note: rpc_verify_header() may have freed the RPC slot */
1959 	if (task->tk_rqstp == req) {
1960 		req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
1961 		if (task->tk_client->cl_discrtry)
1962 			xprt_conditional_disconnect(task->tk_xprt,
1963 					req->rq_connect_cookie);
1964 	}
1965 }
1966 
1967 static __be32 *
1968 rpc_encode_header(struct rpc_task *task)
1969 {
1970 	struct rpc_clnt *clnt = task->tk_client;
1971 	struct rpc_rqst	*req = task->tk_rqstp;
1972 	__be32		*p = req->rq_svec[0].iov_base;
1973 
1974 	/* FIXME: check buffer size? */
1975 
1976 	p = xprt_skip_transport_header(task->tk_xprt, p);
1977 	*p++ = req->rq_xid;		/* XID */
1978 	*p++ = htonl(RPC_CALL);		/* CALL */
1979 	*p++ = htonl(RPC_VERSION);	/* RPC version */
1980 	*p++ = htonl(clnt->cl_prog);	/* program number */
1981 	*p++ = htonl(clnt->cl_vers);	/* program version */
1982 	*p++ = htonl(task->tk_msg.rpc_proc->p_proc);	/* procedure */
1983 	p = rpcauth_marshcred(task, p);
1984 	req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
1985 	return p;
1986 }
1987 
1988 static __be32 *
1989 rpc_verify_header(struct rpc_task *task)
1990 {
1991 	struct rpc_clnt *clnt = task->tk_client;
1992 	struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
1993 	int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
1994 	__be32	*p = iov->iov_base;
1995 	u32 n;
1996 	int error = -EACCES;
1997 
1998 	if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
1999 		/* RFC-1014 says that the representation of XDR data must be a
2000 		 * multiple of four bytes
2001 		 * - if it isn't pointer subtraction in the NFS client may give
2002 		 *   undefined results
2003 		 */
2004 		dprintk("RPC: %5u %s: XDR representation not a multiple of"
2005 		       " 4 bytes: 0x%x\n", task->tk_pid, __func__,
2006 		       task->tk_rqstp->rq_rcv_buf.len);
2007 		goto out_eio;
2008 	}
2009 	if ((len -= 3) < 0)
2010 		goto out_overflow;
2011 
2012 	p += 1; /* skip XID */
2013 	if ((n = ntohl(*p++)) != RPC_REPLY) {
2014 		dprintk("RPC: %5u %s: not an RPC reply: %x\n",
2015 			task->tk_pid, __func__, n);
2016 		goto out_garbage;
2017 	}
2018 
2019 	if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
2020 		if (--len < 0)
2021 			goto out_overflow;
2022 		switch ((n = ntohl(*p++))) {
2023 		case RPC_AUTH_ERROR:
2024 			break;
2025 		case RPC_MISMATCH:
2026 			dprintk("RPC: %5u %s: RPC call version mismatch!\n",
2027 				task->tk_pid, __func__);
2028 			error = -EPROTONOSUPPORT;
2029 			goto out_err;
2030 		default:
2031 			dprintk("RPC: %5u %s: RPC call rejected, "
2032 				"unknown error: %x\n",
2033 				task->tk_pid, __func__, n);
2034 			goto out_eio;
2035 		}
2036 		if (--len < 0)
2037 			goto out_overflow;
2038 		switch ((n = ntohl(*p++))) {
2039 		case RPC_AUTH_REJECTEDCRED:
2040 		case RPC_AUTH_REJECTEDVERF:
2041 		case RPCSEC_GSS_CREDPROBLEM:
2042 		case RPCSEC_GSS_CTXPROBLEM:
2043 			if (!task->tk_cred_retry)
2044 				break;
2045 			task->tk_cred_retry--;
2046 			dprintk("RPC: %5u %s: retry stale creds\n",
2047 					task->tk_pid, __func__);
2048 			rpcauth_invalcred(task);
2049 			/* Ensure we obtain a new XID! */
2050 			xprt_release(task);
2051 			task->tk_action = call_reserve;
2052 			goto out_retry;
2053 		case RPC_AUTH_BADCRED:
2054 		case RPC_AUTH_BADVERF:
2055 			/* possibly garbled cred/verf? */
2056 			if (!task->tk_garb_retry)
2057 				break;
2058 			task->tk_garb_retry--;
2059 			dprintk("RPC: %5u %s: retry garbled creds\n",
2060 					task->tk_pid, __func__);
2061 			task->tk_action = call_bind;
2062 			goto out_retry;
2063 		case RPC_AUTH_TOOWEAK:
2064 			rcu_read_lock();
2065 			printk(KERN_NOTICE "RPC: server %s requires stronger "
2066 			       "authentication.\n",
2067 			       rcu_dereference(clnt->cl_xprt)->servername);
2068 			rcu_read_unlock();
2069 			break;
2070 		default:
2071 			dprintk("RPC: %5u %s: unknown auth error: %x\n",
2072 					task->tk_pid, __func__, n);
2073 			error = -EIO;
2074 		}
2075 		dprintk("RPC: %5u %s: call rejected %d\n",
2076 				task->tk_pid, __func__, n);
2077 		goto out_err;
2078 	}
2079 	if (!(p = rpcauth_checkverf(task, p))) {
2080 		dprintk("RPC: %5u %s: auth check failed\n",
2081 				task->tk_pid, __func__);
2082 		goto out_garbage;		/* bad verifier, retry */
2083 	}
2084 	len = p - (__be32 *)iov->iov_base - 1;
2085 	if (len < 0)
2086 		goto out_overflow;
2087 	switch ((n = ntohl(*p++))) {
2088 	case RPC_SUCCESS:
2089 		return p;
2090 	case RPC_PROG_UNAVAIL:
2091 		dprintk_rcu("RPC: %5u %s: program %u is unsupported "
2092 				"by server %s\n", task->tk_pid, __func__,
2093 				(unsigned int)clnt->cl_prog,
2094 				rcu_dereference(clnt->cl_xprt)->servername);
2095 		error = -EPFNOSUPPORT;
2096 		goto out_err;
2097 	case RPC_PROG_MISMATCH:
2098 		dprintk_rcu("RPC: %5u %s: program %u, version %u unsupported "
2099 				"by server %s\n", task->tk_pid, __func__,
2100 				(unsigned int)clnt->cl_prog,
2101 				(unsigned int)clnt->cl_vers,
2102 				rcu_dereference(clnt->cl_xprt)->servername);
2103 		error = -EPROTONOSUPPORT;
2104 		goto out_err;
2105 	case RPC_PROC_UNAVAIL:
2106 		dprintk_rcu("RPC: %5u %s: proc %s unsupported by program %u, "
2107 				"version %u on server %s\n",
2108 				task->tk_pid, __func__,
2109 				rpc_proc_name(task),
2110 				clnt->cl_prog, clnt->cl_vers,
2111 				rcu_dereference(clnt->cl_xprt)->servername);
2112 		error = -EOPNOTSUPP;
2113 		goto out_err;
2114 	case RPC_GARBAGE_ARGS:
2115 		dprintk("RPC: %5u %s: server saw garbage\n",
2116 				task->tk_pid, __func__);
2117 		break;			/* retry */
2118 	default:
2119 		dprintk("RPC: %5u %s: server accept status: %x\n",
2120 				task->tk_pid, __func__, n);
2121 		/* Also retry */
2122 	}
2123 
2124 out_garbage:
2125 	clnt->cl_stats->rpcgarbage++;
2126 	if (task->tk_garb_retry) {
2127 		task->tk_garb_retry--;
2128 		dprintk("RPC: %5u %s: retrying\n",
2129 				task->tk_pid, __func__);
2130 		task->tk_action = call_bind;
2131 out_retry:
2132 		return ERR_PTR(-EAGAIN);
2133 	}
2134 out_eio:
2135 	error = -EIO;
2136 out_err:
2137 	rpc_exit(task, error);
2138 	dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
2139 			__func__, error);
2140 	return ERR_PTR(error);
2141 out_overflow:
2142 	dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
2143 			__func__);
2144 	goto out_garbage;
2145 }
2146 
2147 static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2148 {
2149 }
2150 
2151 static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2152 {
2153 	return 0;
2154 }
2155 
2156 static struct rpc_procinfo rpcproc_null = {
2157 	.p_encode = rpcproc_encode_null,
2158 	.p_decode = rpcproc_decode_null,
2159 };
2160 
2161 static int rpc_ping(struct rpc_clnt *clnt)
2162 {
2163 	struct rpc_message msg = {
2164 		.rpc_proc = &rpcproc_null,
2165 	};
2166 	int err;
2167 	msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
2168 	err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
2169 	put_rpccred(msg.rpc_cred);
2170 	return err;
2171 }
2172 
2173 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
2174 {
2175 	struct rpc_message msg = {
2176 		.rpc_proc = &rpcproc_null,
2177 		.rpc_cred = cred,
2178 	};
2179 	struct rpc_task_setup task_setup_data = {
2180 		.rpc_client = clnt,
2181 		.rpc_message = &msg,
2182 		.callback_ops = &rpc_default_ops,
2183 		.flags = flags,
2184 	};
2185 	return rpc_run_task(&task_setup_data);
2186 }
2187 EXPORT_SYMBOL_GPL(rpc_call_null);
2188 
2189 #ifdef RPC_DEBUG
2190 static void rpc_show_header(void)
2191 {
2192 	printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
2193 		"-timeout ---ops--\n");
2194 }
2195 
2196 static void rpc_show_task(const struct rpc_clnt *clnt,
2197 			  const struct rpc_task *task)
2198 {
2199 	const char *rpc_waitq = "none";
2200 
2201 	if (RPC_IS_QUEUED(task))
2202 		rpc_waitq = rpc_qname(task->tk_waitqueue);
2203 
2204 	printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
2205 		task->tk_pid, task->tk_flags, task->tk_status,
2206 		clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
2207 		clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
2208 		task->tk_action, rpc_waitq);
2209 }
2210 
2211 void rpc_show_tasks(struct net *net)
2212 {
2213 	struct rpc_clnt *clnt;
2214 	struct rpc_task *task;
2215 	int header = 0;
2216 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2217 
2218 	spin_lock(&sn->rpc_client_lock);
2219 	list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
2220 		spin_lock(&clnt->cl_lock);
2221 		list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
2222 			if (!header) {
2223 				rpc_show_header();
2224 				header++;
2225 			}
2226 			rpc_show_task(clnt, task);
2227 		}
2228 		spin_unlock(&clnt->cl_lock);
2229 	}
2230 	spin_unlock(&sn->rpc_client_lock);
2231 }
2232 #endif
2233