1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/net/sunrpc/clnt.c
4 *
5 * This file contains the high-level RPC interface.
6 * It is modeled as a finite state machine to support both synchronous
7 * and asynchronous requests.
8 *
9 * - RPC header generation and argument serialization.
10 * - Credential refresh.
11 * - TCP connect handling.
12 * - Retry of operation when it is suspected the operation failed because
13 * of uid squashing on the server, or when the credentials were stale
14 * and need to be refreshed, or when a packet was damaged in transit.
15 * This may be have to be moved to the VFS layer.
16 *
17 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
18 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
19 */
20
21
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kallsyms.h>
25 #include <linux/mm.h>
26 #include <linux/namei.h>
27 #include <linux/mount.h>
28 #include <linux/slab.h>
29 #include <linux/rcupdate.h>
30 #include <linux/utsname.h>
31 #include <linux/workqueue.h>
32 #include <linux/in.h>
33 #include <linux/in6.h>
34 #include <linux/un.h>
35
36 #include <linux/sunrpc/clnt.h>
37 #include <linux/sunrpc/addr.h>
38 #include <linux/sunrpc/rpc_pipe_fs.h>
39 #include <linux/sunrpc/metrics.h>
40 #include <linux/sunrpc/bc_xprt.h>
41 #include <trace/events/sunrpc.h>
42
43 #include "sunrpc.h"
44 #include "sysfs.h"
45 #include "netns.h"
46
47 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
48 # define RPCDBG_FACILITY RPCDBG_CALL
49 #endif
50
51 /*
52 * All RPC clients are linked into this list
53 */
54
55 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
56
57
58 static void call_start(struct rpc_task *task);
59 static void call_reserve(struct rpc_task *task);
60 static void call_reserveresult(struct rpc_task *task);
61 static void call_allocate(struct rpc_task *task);
62 static void call_encode(struct rpc_task *task);
63 static void call_decode(struct rpc_task *task);
64 static void call_bind(struct rpc_task *task);
65 static void call_bind_status(struct rpc_task *task);
66 static void call_transmit(struct rpc_task *task);
67 static void call_status(struct rpc_task *task);
68 static void call_transmit_status(struct rpc_task *task);
69 static void call_refresh(struct rpc_task *task);
70 static void call_refreshresult(struct rpc_task *task);
71 static void call_connect(struct rpc_task *task);
72 static void call_connect_status(struct rpc_task *task);
73
74 static int rpc_encode_header(struct rpc_task *task,
75 struct xdr_stream *xdr);
76 static int rpc_decode_header(struct rpc_task *task,
77 struct xdr_stream *xdr);
78 static int rpc_ping(struct rpc_clnt *clnt);
79 static int rpc_ping_noreply(struct rpc_clnt *clnt);
80 static void rpc_check_timeout(struct rpc_task *task);
81
rpc_register_client(struct rpc_clnt * clnt)82 static void rpc_register_client(struct rpc_clnt *clnt)
83 {
84 struct net *net = rpc_net_ns(clnt);
85 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
86
87 spin_lock(&sn->rpc_client_lock);
88 list_add(&clnt->cl_clients, &sn->all_clients);
89 spin_unlock(&sn->rpc_client_lock);
90 }
91
rpc_unregister_client(struct rpc_clnt * clnt)92 static void rpc_unregister_client(struct rpc_clnt *clnt)
93 {
94 struct net *net = rpc_net_ns(clnt);
95 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
96
97 spin_lock(&sn->rpc_client_lock);
98 list_del(&clnt->cl_clients);
99 spin_unlock(&sn->rpc_client_lock);
100 }
101
__rpc_clnt_remove_pipedir(struct rpc_clnt * clnt)102 static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
103 {
104 rpc_remove_client_dir(clnt);
105 }
106
rpc_clnt_remove_pipedir(struct rpc_clnt * clnt)107 static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
108 {
109 struct net *net = rpc_net_ns(clnt);
110 struct super_block *pipefs_sb;
111
112 pipefs_sb = rpc_get_sb_net(net);
113 if (pipefs_sb) {
114 if (pipefs_sb == clnt->pipefs_sb)
115 __rpc_clnt_remove_pipedir(clnt);
116 rpc_put_sb_net(net);
117 }
118 }
119
rpc_setup_pipedir_sb(struct super_block * sb,struct rpc_clnt * clnt)120 static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
121 struct rpc_clnt *clnt)
122 {
123 static uint32_t clntid;
124 const char *dir_name = clnt->cl_program->pipe_dir_name;
125 char name[15];
126 struct dentry *dir, *dentry;
127
128 dir = rpc_d_lookup_sb(sb, dir_name);
129 if (dir == NULL) {
130 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name);
131 return dir;
132 }
133 for (;;) {
134 snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
135 name[sizeof(name) - 1] = '\0';
136 dentry = rpc_create_client_dir(dir, name, clnt);
137 if (!IS_ERR(dentry))
138 break;
139 if (dentry == ERR_PTR(-EEXIST))
140 continue;
141 printk(KERN_INFO "RPC: Couldn't create pipefs entry"
142 " %s/%s, error %ld\n",
143 dir_name, name, PTR_ERR(dentry));
144 break;
145 }
146 dput(dir);
147 return dentry;
148 }
149
150 static int
rpc_setup_pipedir(struct super_block * pipefs_sb,struct rpc_clnt * clnt)151 rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
152 {
153 struct dentry *dentry;
154
155 clnt->pipefs_sb = pipefs_sb;
156
157 if (clnt->cl_program->pipe_dir_name != NULL) {
158 dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
159 if (IS_ERR(dentry))
160 return PTR_ERR(dentry);
161 }
162 return 0;
163 }
164
rpc_clnt_skip_event(struct rpc_clnt * clnt,unsigned long event)165 static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
166 {
167 if (clnt->cl_program->pipe_dir_name == NULL)
168 return 1;
169
170 switch (event) {
171 case RPC_PIPEFS_MOUNT:
172 if (clnt->cl_pipedir_objects.pdh_dentry != NULL)
173 return 1;
174 if (refcount_read(&clnt->cl_count) == 0)
175 return 1;
176 break;
177 case RPC_PIPEFS_UMOUNT:
178 if (clnt->cl_pipedir_objects.pdh_dentry == NULL)
179 return 1;
180 break;
181 }
182 return 0;
183 }
184
__rpc_clnt_handle_event(struct rpc_clnt * clnt,unsigned long event,struct super_block * sb)185 static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
186 struct super_block *sb)
187 {
188 struct dentry *dentry;
189
190 switch (event) {
191 case RPC_PIPEFS_MOUNT:
192 dentry = rpc_setup_pipedir_sb(sb, clnt);
193 if (!dentry)
194 return -ENOENT;
195 if (IS_ERR(dentry))
196 return PTR_ERR(dentry);
197 break;
198 case RPC_PIPEFS_UMOUNT:
199 __rpc_clnt_remove_pipedir(clnt);
200 break;
201 default:
202 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
203 return -ENOTSUPP;
204 }
205 return 0;
206 }
207
__rpc_pipefs_event(struct rpc_clnt * clnt,unsigned long event,struct super_block * sb)208 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
209 struct super_block *sb)
210 {
211 int error = 0;
212
213 for (;; clnt = clnt->cl_parent) {
214 if (!rpc_clnt_skip_event(clnt, event))
215 error = __rpc_clnt_handle_event(clnt, event, sb);
216 if (error || clnt == clnt->cl_parent)
217 break;
218 }
219 return error;
220 }
221
rpc_get_client_for_event(struct net * net,int event)222 static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
223 {
224 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
225 struct rpc_clnt *clnt;
226
227 spin_lock(&sn->rpc_client_lock);
228 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
229 if (rpc_clnt_skip_event(clnt, event))
230 continue;
231 spin_unlock(&sn->rpc_client_lock);
232 return clnt;
233 }
234 spin_unlock(&sn->rpc_client_lock);
235 return NULL;
236 }
237
rpc_pipefs_event(struct notifier_block * nb,unsigned long event,void * ptr)238 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
239 void *ptr)
240 {
241 struct super_block *sb = ptr;
242 struct rpc_clnt *clnt;
243 int error = 0;
244
245 while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) {
246 error = __rpc_pipefs_event(clnt, event, sb);
247 if (error)
248 break;
249 }
250 return error;
251 }
252
253 static struct notifier_block rpc_clients_block = {
254 .notifier_call = rpc_pipefs_event,
255 .priority = SUNRPC_PIPEFS_RPC_PRIO,
256 };
257
rpc_clients_notifier_register(void)258 int rpc_clients_notifier_register(void)
259 {
260 return rpc_pipefs_notifier_register(&rpc_clients_block);
261 }
262
rpc_clients_notifier_unregister(void)263 void rpc_clients_notifier_unregister(void)
264 {
265 return rpc_pipefs_notifier_unregister(&rpc_clients_block);
266 }
267
rpc_clnt_set_transport(struct rpc_clnt * clnt,struct rpc_xprt * xprt,const struct rpc_timeout * timeout)268 static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt,
269 struct rpc_xprt *xprt,
270 const struct rpc_timeout *timeout)
271 {
272 struct rpc_xprt *old;
273
274 spin_lock(&clnt->cl_lock);
275 old = rcu_dereference_protected(clnt->cl_xprt,
276 lockdep_is_held(&clnt->cl_lock));
277
278 clnt->cl_timeout = timeout;
279 rcu_assign_pointer(clnt->cl_xprt, xprt);
280 spin_unlock(&clnt->cl_lock);
281
282 return old;
283 }
284
rpc_clnt_set_nodename(struct rpc_clnt * clnt,const char * nodename)285 static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
286 {
287 clnt->cl_nodelen = strlcpy(clnt->cl_nodename,
288 nodename, sizeof(clnt->cl_nodename));
289 }
290
rpc_client_register(struct rpc_clnt * clnt,rpc_authflavor_t pseudoflavor,const char * client_name)291 static int rpc_client_register(struct rpc_clnt *clnt,
292 rpc_authflavor_t pseudoflavor,
293 const char *client_name)
294 {
295 struct rpc_auth_create_args auth_args = {
296 .pseudoflavor = pseudoflavor,
297 .target_name = client_name,
298 };
299 struct rpc_auth *auth;
300 struct net *net = rpc_net_ns(clnt);
301 struct super_block *pipefs_sb;
302 int err;
303
304 rpc_clnt_debugfs_register(clnt);
305
306 pipefs_sb = rpc_get_sb_net(net);
307 if (pipefs_sb) {
308 err = rpc_setup_pipedir(pipefs_sb, clnt);
309 if (err)
310 goto out;
311 }
312
313 rpc_register_client(clnt);
314 if (pipefs_sb)
315 rpc_put_sb_net(net);
316
317 auth = rpcauth_create(&auth_args, clnt);
318 if (IS_ERR(auth)) {
319 dprintk("RPC: Couldn't create auth handle (flavor %u)\n",
320 pseudoflavor);
321 err = PTR_ERR(auth);
322 goto err_auth;
323 }
324 return 0;
325 err_auth:
326 pipefs_sb = rpc_get_sb_net(net);
327 rpc_unregister_client(clnt);
328 __rpc_clnt_remove_pipedir(clnt);
329 out:
330 if (pipefs_sb)
331 rpc_put_sb_net(net);
332 rpc_sysfs_client_destroy(clnt);
333 rpc_clnt_debugfs_unregister(clnt);
334 return err;
335 }
336
337 static DEFINE_IDA(rpc_clids);
338
rpc_cleanup_clids(void)339 void rpc_cleanup_clids(void)
340 {
341 ida_destroy(&rpc_clids);
342 }
343
rpc_alloc_clid(struct rpc_clnt * clnt)344 static int rpc_alloc_clid(struct rpc_clnt *clnt)
345 {
346 int clid;
347
348 clid = ida_alloc(&rpc_clids, GFP_KERNEL);
349 if (clid < 0)
350 return clid;
351 clnt->cl_clid = clid;
352 return 0;
353 }
354
rpc_free_clid(struct rpc_clnt * clnt)355 static void rpc_free_clid(struct rpc_clnt *clnt)
356 {
357 ida_free(&rpc_clids, clnt->cl_clid);
358 }
359
rpc_new_client(const struct rpc_create_args * args,struct rpc_xprt_switch * xps,struct rpc_xprt * xprt,struct rpc_clnt * parent)360 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
361 struct rpc_xprt_switch *xps,
362 struct rpc_xprt *xprt,
363 struct rpc_clnt *parent)
364 {
365 const struct rpc_program *program = args->program;
366 const struct rpc_version *version;
367 struct rpc_clnt *clnt = NULL;
368 const struct rpc_timeout *timeout;
369 const char *nodename = args->nodename;
370 int err;
371
372 err = rpciod_up();
373 if (err)
374 goto out_no_rpciod;
375
376 err = -EINVAL;
377 if (args->version >= program->nrvers)
378 goto out_err;
379 version = program->version[args->version];
380 if (version == NULL)
381 goto out_err;
382
383 err = -ENOMEM;
384 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
385 if (!clnt)
386 goto out_err;
387 clnt->cl_parent = parent ? : clnt;
388 clnt->cl_xprtsec = args->xprtsec;
389
390 err = rpc_alloc_clid(clnt);
391 if (err)
392 goto out_no_clid;
393
394 clnt->cl_cred = get_cred(args->cred);
395 clnt->cl_procinfo = version->procs;
396 clnt->cl_maxproc = version->nrprocs;
397 clnt->cl_prog = args->prognumber ? : program->number;
398 clnt->cl_vers = version->number;
399 clnt->cl_stats = args->stats ? : program->stats;
400 clnt->cl_metrics = rpc_alloc_iostats(clnt);
401 rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects);
402 err = -ENOMEM;
403 if (clnt->cl_metrics == NULL)
404 goto out_no_stats;
405 clnt->cl_program = program;
406 INIT_LIST_HEAD(&clnt->cl_tasks);
407 spin_lock_init(&clnt->cl_lock);
408
409 timeout = xprt->timeout;
410 if (args->timeout != NULL) {
411 memcpy(&clnt->cl_timeout_default, args->timeout,
412 sizeof(clnt->cl_timeout_default));
413 timeout = &clnt->cl_timeout_default;
414 }
415
416 rpc_clnt_set_transport(clnt, xprt, timeout);
417 xprt->main = true;
418 xprt_iter_init(&clnt->cl_xpi, xps);
419 xprt_switch_put(xps);
420
421 clnt->cl_rtt = &clnt->cl_rtt_default;
422 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
423
424 refcount_set(&clnt->cl_count, 1);
425
426 if (nodename == NULL)
427 nodename = utsname()->nodename;
428 /* save the nodename */
429 rpc_clnt_set_nodename(clnt, nodename);
430
431 rpc_sysfs_client_setup(clnt, xps, rpc_net_ns(clnt));
432 err = rpc_client_register(clnt, args->authflavor, args->client_name);
433 if (err)
434 goto out_no_path;
435 if (parent)
436 refcount_inc(&parent->cl_count);
437
438 trace_rpc_clnt_new(clnt, xprt, args);
439 return clnt;
440
441 out_no_path:
442 rpc_free_iostats(clnt->cl_metrics);
443 out_no_stats:
444 put_cred(clnt->cl_cred);
445 rpc_free_clid(clnt);
446 out_no_clid:
447 kfree(clnt);
448 out_err:
449 rpciod_down();
450 out_no_rpciod:
451 xprt_switch_put(xps);
452 xprt_put(xprt);
453 trace_rpc_clnt_new_err(program->name, args->servername, err);
454 return ERR_PTR(err);
455 }
456
rpc_create_xprt(struct rpc_create_args * args,struct rpc_xprt * xprt)457 static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
458 struct rpc_xprt *xprt)
459 {
460 struct rpc_clnt *clnt = NULL;
461 struct rpc_xprt_switch *xps;
462
463 if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
464 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
465 xps = args->bc_xprt->xpt_bc_xps;
466 xprt_switch_get(xps);
467 } else {
468 xps = xprt_switch_alloc(xprt, GFP_KERNEL);
469 if (xps == NULL) {
470 xprt_put(xprt);
471 return ERR_PTR(-ENOMEM);
472 }
473 if (xprt->bc_xprt) {
474 xprt_switch_get(xps);
475 xprt->bc_xprt->xpt_bc_xps = xps;
476 }
477 }
478 clnt = rpc_new_client(args, xps, xprt, NULL);
479 if (IS_ERR(clnt))
480 return clnt;
481
482 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
483 int err = rpc_ping(clnt);
484 if (err != 0) {
485 rpc_shutdown_client(clnt);
486 return ERR_PTR(err);
487 }
488 } else if (args->flags & RPC_CLNT_CREATE_CONNECTED) {
489 int err = rpc_ping_noreply(clnt);
490 if (err != 0) {
491 rpc_shutdown_client(clnt);
492 return ERR_PTR(err);
493 }
494 }
495
496 clnt->cl_softrtry = 1;
497 if (args->flags & (RPC_CLNT_CREATE_HARDRTRY|RPC_CLNT_CREATE_SOFTERR)) {
498 clnt->cl_softrtry = 0;
499 if (args->flags & RPC_CLNT_CREATE_SOFTERR)
500 clnt->cl_softerr = 1;
501 }
502
503 if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
504 clnt->cl_autobind = 1;
505 if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT)
506 clnt->cl_noretranstimeo = 1;
507 if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
508 clnt->cl_discrtry = 1;
509 if (!(args->flags & RPC_CLNT_CREATE_QUIET))
510 clnt->cl_chatty = 1;
511
512 return clnt;
513 }
514
515 /**
516 * rpc_create - create an RPC client and transport with one call
517 * @args: rpc_clnt create argument structure
518 *
519 * Creates and initializes an RPC transport and an RPC client.
520 *
521 * It can ping the server in order to determine if it is up, and to see if
522 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables
523 * this behavior so asynchronous tasks can also use rpc_create.
524 */
rpc_create(struct rpc_create_args * args)525 struct rpc_clnt *rpc_create(struct rpc_create_args *args)
526 {
527 struct rpc_xprt *xprt;
528 struct xprt_create xprtargs = {
529 .net = args->net,
530 .ident = args->protocol,
531 .srcaddr = args->saddress,
532 .dstaddr = args->address,
533 .addrlen = args->addrsize,
534 .servername = args->servername,
535 .bc_xprt = args->bc_xprt,
536 .xprtsec = args->xprtsec,
537 .connect_timeout = args->connect_timeout,
538 .reconnect_timeout = args->reconnect_timeout,
539 };
540 char servername[48];
541 struct rpc_clnt *clnt;
542 int i;
543
544 if (args->bc_xprt) {
545 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
546 xprt = args->bc_xprt->xpt_bc_xprt;
547 if (xprt) {
548 xprt_get(xprt);
549 return rpc_create_xprt(args, xprt);
550 }
551 }
552
553 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
554 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
555 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
556 xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT;
557 /*
558 * If the caller chooses not to specify a hostname, whip
559 * up a string representation of the passed-in address.
560 */
561 if (xprtargs.servername == NULL) {
562 struct sockaddr_un *sun =
563 (struct sockaddr_un *)args->address;
564 struct sockaddr_in *sin =
565 (struct sockaddr_in *)args->address;
566 struct sockaddr_in6 *sin6 =
567 (struct sockaddr_in6 *)args->address;
568
569 servername[0] = '\0';
570 switch (args->address->sa_family) {
571 case AF_LOCAL:
572 if (sun->sun_path[0])
573 snprintf(servername, sizeof(servername), "%s",
574 sun->sun_path);
575 else
576 snprintf(servername, sizeof(servername), "@%s",
577 sun->sun_path+1);
578 break;
579 case AF_INET:
580 snprintf(servername, sizeof(servername), "%pI4",
581 &sin->sin_addr.s_addr);
582 break;
583 case AF_INET6:
584 snprintf(servername, sizeof(servername), "%pI6",
585 &sin6->sin6_addr);
586 break;
587 default:
588 /* caller wants default server name, but
589 * address family isn't recognized. */
590 return ERR_PTR(-EINVAL);
591 }
592 xprtargs.servername = servername;
593 }
594
595 xprt = xprt_create_transport(&xprtargs);
596 if (IS_ERR(xprt))
597 return (struct rpc_clnt *)xprt;
598
599 /*
600 * By default, kernel RPC client connects from a reserved port.
601 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
602 * but it is always enabled for rpciod, which handles the connect
603 * operation.
604 */
605 xprt->resvport = 1;
606 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
607 xprt->resvport = 0;
608 xprt->reuseport = 0;
609 if (args->flags & RPC_CLNT_CREATE_REUSEPORT)
610 xprt->reuseport = 1;
611
612 clnt = rpc_create_xprt(args, xprt);
613 if (IS_ERR(clnt) || args->nconnect <= 1)
614 return clnt;
615
616 for (i = 0; i < args->nconnect - 1; i++) {
617 if (rpc_clnt_add_xprt(clnt, &xprtargs, NULL, NULL) < 0)
618 break;
619 }
620 return clnt;
621 }
622 EXPORT_SYMBOL_GPL(rpc_create);
623
624 /*
625 * This function clones the RPC client structure. It allows us to share the
626 * same transport while varying parameters such as the authentication
627 * flavour.
628 */
__rpc_clone_client(struct rpc_create_args * args,struct rpc_clnt * clnt)629 static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
630 struct rpc_clnt *clnt)
631 {
632 struct rpc_xprt_switch *xps;
633 struct rpc_xprt *xprt;
634 struct rpc_clnt *new;
635 int err;
636
637 err = -ENOMEM;
638 rcu_read_lock();
639 xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
640 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
641 rcu_read_unlock();
642 if (xprt == NULL || xps == NULL) {
643 xprt_put(xprt);
644 xprt_switch_put(xps);
645 goto out_err;
646 }
647 args->servername = xprt->servername;
648 args->nodename = clnt->cl_nodename;
649
650 new = rpc_new_client(args, xps, xprt, clnt);
651 if (IS_ERR(new))
652 return new;
653
654 /* Turn off autobind on clones */
655 new->cl_autobind = 0;
656 new->cl_softrtry = clnt->cl_softrtry;
657 new->cl_softerr = clnt->cl_softerr;
658 new->cl_noretranstimeo = clnt->cl_noretranstimeo;
659 new->cl_discrtry = clnt->cl_discrtry;
660 new->cl_chatty = clnt->cl_chatty;
661 new->cl_principal = clnt->cl_principal;
662 new->cl_max_connect = clnt->cl_max_connect;
663 return new;
664
665 out_err:
666 trace_rpc_clnt_clone_err(clnt, err);
667 return ERR_PTR(err);
668 }
669
670 /**
671 * rpc_clone_client - Clone an RPC client structure
672 *
673 * @clnt: RPC client whose parameters are copied
674 *
675 * Returns a fresh RPC client or an ERR_PTR.
676 */
rpc_clone_client(struct rpc_clnt * clnt)677 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt)
678 {
679 struct rpc_create_args args = {
680 .program = clnt->cl_program,
681 .prognumber = clnt->cl_prog,
682 .version = clnt->cl_vers,
683 .authflavor = clnt->cl_auth->au_flavor,
684 .cred = clnt->cl_cred,
685 .stats = clnt->cl_stats,
686 };
687 return __rpc_clone_client(&args, clnt);
688 }
689 EXPORT_SYMBOL_GPL(rpc_clone_client);
690
691 /**
692 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
693 *
694 * @clnt: RPC client whose parameters are copied
695 * @flavor: security flavor for new client
696 *
697 * Returns a fresh RPC client or an ERR_PTR.
698 */
699 struct rpc_clnt *
rpc_clone_client_set_auth(struct rpc_clnt * clnt,rpc_authflavor_t flavor)700 rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
701 {
702 struct rpc_create_args args = {
703 .program = clnt->cl_program,
704 .prognumber = clnt->cl_prog,
705 .version = clnt->cl_vers,
706 .authflavor = flavor,
707 .cred = clnt->cl_cred,
708 .stats = clnt->cl_stats,
709 };
710 return __rpc_clone_client(&args, clnt);
711 }
712 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth);
713
714 /**
715 * rpc_switch_client_transport: switch the RPC transport on the fly
716 * @clnt: pointer to a struct rpc_clnt
717 * @args: pointer to the new transport arguments
718 * @timeout: pointer to the new timeout parameters
719 *
720 * This function allows the caller to switch the RPC transport for the
721 * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS
722 * server, for instance. It assumes that the caller has ensured that
723 * there are no active RPC tasks by using some form of locking.
724 *
725 * Returns zero if "clnt" is now using the new xprt. Otherwise a
726 * negative errno is returned, and "clnt" continues to use the old
727 * xprt.
728 */
rpc_switch_client_transport(struct rpc_clnt * clnt,struct xprt_create * args,const struct rpc_timeout * timeout)729 int rpc_switch_client_transport(struct rpc_clnt *clnt,
730 struct xprt_create *args,
731 const struct rpc_timeout *timeout)
732 {
733 const struct rpc_timeout *old_timeo;
734 rpc_authflavor_t pseudoflavor;
735 struct rpc_xprt_switch *xps, *oldxps;
736 struct rpc_xprt *xprt, *old;
737 struct rpc_clnt *parent;
738 int err;
739
740 args->xprtsec = clnt->cl_xprtsec;
741 xprt = xprt_create_transport(args);
742 if (IS_ERR(xprt))
743 return PTR_ERR(xprt);
744
745 xps = xprt_switch_alloc(xprt, GFP_KERNEL);
746 if (xps == NULL) {
747 xprt_put(xprt);
748 return -ENOMEM;
749 }
750
751 pseudoflavor = clnt->cl_auth->au_flavor;
752
753 old_timeo = clnt->cl_timeout;
754 old = rpc_clnt_set_transport(clnt, xprt, timeout);
755 oldxps = xprt_iter_xchg_switch(&clnt->cl_xpi, xps);
756
757 rpc_unregister_client(clnt);
758 __rpc_clnt_remove_pipedir(clnt);
759 rpc_sysfs_client_destroy(clnt);
760 rpc_clnt_debugfs_unregister(clnt);
761
762 /*
763 * A new transport was created. "clnt" therefore
764 * becomes the root of a new cl_parent tree. clnt's
765 * children, if it has any, still point to the old xprt.
766 */
767 parent = clnt->cl_parent;
768 clnt->cl_parent = clnt;
769
770 /*
771 * The old rpc_auth cache cannot be re-used. GSS
772 * contexts in particular are between a single
773 * client and server.
774 */
775 err = rpc_client_register(clnt, pseudoflavor, NULL);
776 if (err)
777 goto out_revert;
778
779 synchronize_rcu();
780 if (parent != clnt)
781 rpc_release_client(parent);
782 xprt_switch_put(oldxps);
783 xprt_put(old);
784 trace_rpc_clnt_replace_xprt(clnt);
785 return 0;
786
787 out_revert:
788 xps = xprt_iter_xchg_switch(&clnt->cl_xpi, oldxps);
789 rpc_clnt_set_transport(clnt, old, old_timeo);
790 clnt->cl_parent = parent;
791 rpc_client_register(clnt, pseudoflavor, NULL);
792 xprt_switch_put(xps);
793 xprt_put(xprt);
794 trace_rpc_clnt_replace_xprt_err(clnt);
795 return err;
796 }
797 EXPORT_SYMBOL_GPL(rpc_switch_client_transport);
798
799 static
_rpc_clnt_xprt_iter_init(struct rpc_clnt * clnt,struct rpc_xprt_iter * xpi,void func (struct rpc_xprt_iter * xpi,struct rpc_xprt_switch * xps))800 int _rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi,
801 void func(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps))
802 {
803 struct rpc_xprt_switch *xps;
804
805 rcu_read_lock();
806 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
807 rcu_read_unlock();
808 if (xps == NULL)
809 return -EAGAIN;
810 func(xpi, xps);
811 xprt_switch_put(xps);
812 return 0;
813 }
814
815 static
rpc_clnt_xprt_iter_init(struct rpc_clnt * clnt,struct rpc_xprt_iter * xpi)816 int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi)
817 {
818 return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listall);
819 }
820
821 static
rpc_clnt_xprt_iter_offline_init(struct rpc_clnt * clnt,struct rpc_xprt_iter * xpi)822 int rpc_clnt_xprt_iter_offline_init(struct rpc_clnt *clnt,
823 struct rpc_xprt_iter *xpi)
824 {
825 return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listoffline);
826 }
827
828 /**
829 * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports
830 * @clnt: pointer to client
831 * @fn: function to apply
832 * @data: void pointer to function data
833 *
834 * Iterates through the list of RPC transports currently attached to the
835 * client and applies the function fn(clnt, xprt, data).
836 *
837 * On error, the iteration stops, and the function returns the error value.
838 */
rpc_clnt_iterate_for_each_xprt(struct rpc_clnt * clnt,int (* fn)(struct rpc_clnt *,struct rpc_xprt *,void *),void * data)839 int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt,
840 int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *),
841 void *data)
842 {
843 struct rpc_xprt_iter xpi;
844 int ret;
845
846 ret = rpc_clnt_xprt_iter_init(clnt, &xpi);
847 if (ret)
848 return ret;
849 for (;;) {
850 struct rpc_xprt *xprt = xprt_iter_get_next(&xpi);
851
852 if (!xprt)
853 break;
854 ret = fn(clnt, xprt, data);
855 xprt_put(xprt);
856 if (ret < 0)
857 break;
858 }
859 xprt_iter_destroy(&xpi);
860 return ret;
861 }
862 EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt);
863
864 /*
865 * Kill all tasks for the given client.
866 * XXX: kill their descendants as well?
867 */
rpc_killall_tasks(struct rpc_clnt * clnt)868 void rpc_killall_tasks(struct rpc_clnt *clnt)
869 {
870 struct rpc_task *rovr;
871
872
873 if (list_empty(&clnt->cl_tasks))
874 return;
875
876 /*
877 * Spin lock all_tasks to prevent changes...
878 */
879 trace_rpc_clnt_killall(clnt);
880 spin_lock(&clnt->cl_lock);
881 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task)
882 rpc_signal_task(rovr);
883 spin_unlock(&clnt->cl_lock);
884 }
885 EXPORT_SYMBOL_GPL(rpc_killall_tasks);
886
887 /**
888 * rpc_cancel_tasks - try to cancel a set of RPC tasks
889 * @clnt: Pointer to RPC client
890 * @error: RPC task error value to set
891 * @fnmatch: Pointer to selector function
892 * @data: User data
893 *
894 * Uses @fnmatch to define a set of RPC tasks that are to be cancelled.
895 * The argument @error must be a negative error value.
896 */
rpc_cancel_tasks(struct rpc_clnt * clnt,int error,bool (* fnmatch)(const struct rpc_task *,const void *),const void * data)897 unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error,
898 bool (*fnmatch)(const struct rpc_task *,
899 const void *),
900 const void *data)
901 {
902 struct rpc_task *task;
903 unsigned long count = 0;
904
905 if (list_empty(&clnt->cl_tasks))
906 return 0;
907 /*
908 * Spin lock all_tasks to prevent changes...
909 */
910 spin_lock(&clnt->cl_lock);
911 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
912 if (!RPC_IS_ACTIVATED(task))
913 continue;
914 if (!fnmatch(task, data))
915 continue;
916 rpc_task_try_cancel(task, error);
917 count++;
918 }
919 spin_unlock(&clnt->cl_lock);
920 return count;
921 }
922 EXPORT_SYMBOL_GPL(rpc_cancel_tasks);
923
rpc_clnt_disconnect_xprt(struct rpc_clnt * clnt,struct rpc_xprt * xprt,void * dummy)924 static int rpc_clnt_disconnect_xprt(struct rpc_clnt *clnt,
925 struct rpc_xprt *xprt, void *dummy)
926 {
927 if (xprt_connected(xprt))
928 xprt_force_disconnect(xprt);
929 return 0;
930 }
931
rpc_clnt_disconnect(struct rpc_clnt * clnt)932 void rpc_clnt_disconnect(struct rpc_clnt *clnt)
933 {
934 rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_disconnect_xprt, NULL);
935 }
936 EXPORT_SYMBOL_GPL(rpc_clnt_disconnect);
937
938 /*
939 * Properly shut down an RPC client, terminating all outstanding
940 * requests.
941 */
rpc_shutdown_client(struct rpc_clnt * clnt)942 void rpc_shutdown_client(struct rpc_clnt *clnt)
943 {
944 might_sleep();
945
946 trace_rpc_clnt_shutdown(clnt);
947
948 while (!list_empty(&clnt->cl_tasks)) {
949 rpc_killall_tasks(clnt);
950 wait_event_timeout(destroy_wait,
951 list_empty(&clnt->cl_tasks), 1*HZ);
952 }
953
954 rpc_release_client(clnt);
955 }
956 EXPORT_SYMBOL_GPL(rpc_shutdown_client);
957
958 /*
959 * Free an RPC client
960 */
rpc_free_client_work(struct work_struct * work)961 static void rpc_free_client_work(struct work_struct *work)
962 {
963 struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work);
964
965 trace_rpc_clnt_free(clnt);
966
967 /* These might block on processes that might allocate memory,
968 * so they cannot be called in rpciod, so they are handled separately
969 * here.
970 */
971 rpc_sysfs_client_destroy(clnt);
972 rpc_clnt_debugfs_unregister(clnt);
973 rpc_free_clid(clnt);
974 rpc_clnt_remove_pipedir(clnt);
975 xprt_put(rcu_dereference_raw(clnt->cl_xprt));
976
977 kfree(clnt);
978 rpciod_down();
979 }
980 static struct rpc_clnt *
rpc_free_client(struct rpc_clnt * clnt)981 rpc_free_client(struct rpc_clnt *clnt)
982 {
983 struct rpc_clnt *parent = NULL;
984
985 trace_rpc_clnt_release(clnt);
986 if (clnt->cl_parent != clnt)
987 parent = clnt->cl_parent;
988 rpc_unregister_client(clnt);
989 rpc_free_iostats(clnt->cl_metrics);
990 clnt->cl_metrics = NULL;
991 xprt_iter_destroy(&clnt->cl_xpi);
992 put_cred(clnt->cl_cred);
993
994 INIT_WORK(&clnt->cl_work, rpc_free_client_work);
995 schedule_work(&clnt->cl_work);
996 return parent;
997 }
998
999 /*
1000 * Free an RPC client
1001 */
1002 static struct rpc_clnt *
rpc_free_auth(struct rpc_clnt * clnt)1003 rpc_free_auth(struct rpc_clnt *clnt)
1004 {
1005 /*
1006 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
1007 * release remaining GSS contexts. This mechanism ensures
1008 * that it can do so safely.
1009 */
1010 if (clnt->cl_auth != NULL) {
1011 rpcauth_release(clnt->cl_auth);
1012 clnt->cl_auth = NULL;
1013 }
1014 if (refcount_dec_and_test(&clnt->cl_count))
1015 return rpc_free_client(clnt);
1016 return NULL;
1017 }
1018
1019 /*
1020 * Release reference to the RPC client
1021 */
1022 void
rpc_release_client(struct rpc_clnt * clnt)1023 rpc_release_client(struct rpc_clnt *clnt)
1024 {
1025 do {
1026 if (list_empty(&clnt->cl_tasks))
1027 wake_up(&destroy_wait);
1028 if (refcount_dec_not_one(&clnt->cl_count))
1029 break;
1030 clnt = rpc_free_auth(clnt);
1031 } while (clnt != NULL);
1032 }
1033 EXPORT_SYMBOL_GPL(rpc_release_client);
1034
1035 /**
1036 * rpc_bind_new_program - bind a new RPC program to an existing client
1037 * @old: old rpc_client
1038 * @program: rpc program to set
1039 * @vers: rpc program version
1040 *
1041 * Clones the rpc client and sets up a new RPC program. This is mainly
1042 * of use for enabling different RPC programs to share the same transport.
1043 * The Sun NFSv2/v3 ACL protocol can do this.
1044 */
rpc_bind_new_program(struct rpc_clnt * old,const struct rpc_program * program,u32 vers)1045 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
1046 const struct rpc_program *program,
1047 u32 vers)
1048 {
1049 struct rpc_create_args args = {
1050 .program = program,
1051 .prognumber = program->number,
1052 .version = vers,
1053 .authflavor = old->cl_auth->au_flavor,
1054 .cred = old->cl_cred,
1055 .stats = old->cl_stats,
1056 .timeout = old->cl_timeout,
1057 };
1058 struct rpc_clnt *clnt;
1059 int err;
1060
1061 clnt = __rpc_clone_client(&args, old);
1062 if (IS_ERR(clnt))
1063 goto out;
1064 err = rpc_ping(clnt);
1065 if (err != 0) {
1066 rpc_shutdown_client(clnt);
1067 clnt = ERR_PTR(err);
1068 }
1069 out:
1070 return clnt;
1071 }
1072 EXPORT_SYMBOL_GPL(rpc_bind_new_program);
1073
1074 struct rpc_xprt *
rpc_task_get_xprt(struct rpc_clnt * clnt,struct rpc_xprt * xprt)1075 rpc_task_get_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
1076 {
1077 struct rpc_xprt_switch *xps;
1078
1079 if (!xprt)
1080 return NULL;
1081 rcu_read_lock();
1082 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
1083 atomic_long_inc(&xps->xps_queuelen);
1084 rcu_read_unlock();
1085 atomic_long_inc(&xprt->queuelen);
1086
1087 return xprt;
1088 }
1089
1090 static void
rpc_task_release_xprt(struct rpc_clnt * clnt,struct rpc_xprt * xprt)1091 rpc_task_release_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
1092 {
1093 struct rpc_xprt_switch *xps;
1094
1095 atomic_long_dec(&xprt->queuelen);
1096 rcu_read_lock();
1097 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
1098 atomic_long_dec(&xps->xps_queuelen);
1099 rcu_read_unlock();
1100
1101 xprt_put(xprt);
1102 }
1103
rpc_task_release_transport(struct rpc_task * task)1104 void rpc_task_release_transport(struct rpc_task *task)
1105 {
1106 struct rpc_xprt *xprt = task->tk_xprt;
1107
1108 if (xprt) {
1109 task->tk_xprt = NULL;
1110 if (task->tk_client)
1111 rpc_task_release_xprt(task->tk_client, xprt);
1112 else
1113 xprt_put(xprt);
1114 }
1115 }
1116 EXPORT_SYMBOL_GPL(rpc_task_release_transport);
1117
rpc_task_release_client(struct rpc_task * task)1118 void rpc_task_release_client(struct rpc_task *task)
1119 {
1120 struct rpc_clnt *clnt = task->tk_client;
1121
1122 rpc_task_release_transport(task);
1123 if (clnt != NULL) {
1124 /* Remove from client task list */
1125 spin_lock(&clnt->cl_lock);
1126 list_del(&task->tk_task);
1127 spin_unlock(&clnt->cl_lock);
1128 task->tk_client = NULL;
1129
1130 rpc_release_client(clnt);
1131 }
1132 }
1133
1134 static struct rpc_xprt *
rpc_task_get_first_xprt(struct rpc_clnt * clnt)1135 rpc_task_get_first_xprt(struct rpc_clnt *clnt)
1136 {
1137 struct rpc_xprt *xprt;
1138
1139 rcu_read_lock();
1140 xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
1141 rcu_read_unlock();
1142 return rpc_task_get_xprt(clnt, xprt);
1143 }
1144
1145 static struct rpc_xprt *
rpc_task_get_next_xprt(struct rpc_clnt * clnt)1146 rpc_task_get_next_xprt(struct rpc_clnt *clnt)
1147 {
1148 return rpc_task_get_xprt(clnt, xprt_iter_get_next(&clnt->cl_xpi));
1149 }
1150
1151 static
rpc_task_set_transport(struct rpc_task * task,struct rpc_clnt * clnt)1152 void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
1153 {
1154 if (task->tk_xprt) {
1155 if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) &&
1156 (task->tk_flags & RPC_TASK_MOVEABLE)))
1157 return;
1158 xprt_release(task);
1159 xprt_put(task->tk_xprt);
1160 }
1161 if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN)
1162 task->tk_xprt = rpc_task_get_first_xprt(clnt);
1163 else
1164 task->tk_xprt = rpc_task_get_next_xprt(clnt);
1165 }
1166
1167 static
rpc_task_set_client(struct rpc_task * task,struct rpc_clnt * clnt)1168 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
1169 {
1170 rpc_task_set_transport(task, clnt);
1171 task->tk_client = clnt;
1172 refcount_inc(&clnt->cl_count);
1173 if (clnt->cl_softrtry)
1174 task->tk_flags |= RPC_TASK_SOFT;
1175 if (clnt->cl_softerr)
1176 task->tk_flags |= RPC_TASK_TIMEOUT;
1177 if (clnt->cl_noretranstimeo)
1178 task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
1179 /* Add to the client's list of all tasks */
1180 spin_lock(&clnt->cl_lock);
1181 list_add_tail(&task->tk_task, &clnt->cl_tasks);
1182 spin_unlock(&clnt->cl_lock);
1183 }
1184
1185 static void
rpc_task_set_rpc_message(struct rpc_task * task,const struct rpc_message * msg)1186 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
1187 {
1188 if (msg != NULL) {
1189 task->tk_msg.rpc_proc = msg->rpc_proc;
1190 task->tk_msg.rpc_argp = msg->rpc_argp;
1191 task->tk_msg.rpc_resp = msg->rpc_resp;
1192 task->tk_msg.rpc_cred = msg->rpc_cred;
1193 if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
1194 get_cred(task->tk_msg.rpc_cred);
1195 }
1196 }
1197
1198 /*
1199 * Default callback for async RPC calls
1200 */
1201 static void
rpc_default_callback(struct rpc_task * task,void * data)1202 rpc_default_callback(struct rpc_task *task, void *data)
1203 {
1204 }
1205
1206 static const struct rpc_call_ops rpc_default_ops = {
1207 .rpc_call_done = rpc_default_callback,
1208 };
1209
1210 /**
1211 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
1212 * @task_setup_data: pointer to task initialisation data
1213 */
rpc_run_task(const struct rpc_task_setup * task_setup_data)1214 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
1215 {
1216 struct rpc_task *task;
1217
1218 task = rpc_new_task(task_setup_data);
1219 if (IS_ERR(task))
1220 return task;
1221
1222 if (!RPC_IS_ASYNC(task))
1223 task->tk_flags |= RPC_TASK_CRED_NOREF;
1224
1225 rpc_task_set_client(task, task_setup_data->rpc_client);
1226 rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
1227
1228 if (task->tk_action == NULL)
1229 rpc_call_start(task);
1230
1231 atomic_inc(&task->tk_count);
1232 rpc_execute(task);
1233 return task;
1234 }
1235 EXPORT_SYMBOL_GPL(rpc_run_task);
1236
1237 /**
1238 * rpc_call_sync - Perform a synchronous RPC call
1239 * @clnt: pointer to RPC client
1240 * @msg: RPC call parameters
1241 * @flags: RPC call flags
1242 */
rpc_call_sync(struct rpc_clnt * clnt,const struct rpc_message * msg,int flags)1243 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
1244 {
1245 struct rpc_task *task;
1246 struct rpc_task_setup task_setup_data = {
1247 .rpc_client = clnt,
1248 .rpc_message = msg,
1249 .callback_ops = &rpc_default_ops,
1250 .flags = flags,
1251 };
1252 int status;
1253
1254 WARN_ON_ONCE(flags & RPC_TASK_ASYNC);
1255 if (flags & RPC_TASK_ASYNC) {
1256 rpc_release_calldata(task_setup_data.callback_ops,
1257 task_setup_data.callback_data);
1258 return -EINVAL;
1259 }
1260
1261 task = rpc_run_task(&task_setup_data);
1262 if (IS_ERR(task))
1263 return PTR_ERR(task);
1264 status = task->tk_status;
1265 rpc_put_task(task);
1266 return status;
1267 }
1268 EXPORT_SYMBOL_GPL(rpc_call_sync);
1269
1270 /**
1271 * rpc_call_async - Perform an asynchronous RPC call
1272 * @clnt: pointer to RPC client
1273 * @msg: RPC call parameters
1274 * @flags: RPC call flags
1275 * @tk_ops: RPC call ops
1276 * @data: user call data
1277 */
1278 int
rpc_call_async(struct rpc_clnt * clnt,const struct rpc_message * msg,int flags,const struct rpc_call_ops * tk_ops,void * data)1279 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
1280 const struct rpc_call_ops *tk_ops, void *data)
1281 {
1282 struct rpc_task *task;
1283 struct rpc_task_setup task_setup_data = {
1284 .rpc_client = clnt,
1285 .rpc_message = msg,
1286 .callback_ops = tk_ops,
1287 .callback_data = data,
1288 .flags = flags|RPC_TASK_ASYNC,
1289 };
1290
1291 task = rpc_run_task(&task_setup_data);
1292 if (IS_ERR(task))
1293 return PTR_ERR(task);
1294 rpc_put_task(task);
1295 return 0;
1296 }
1297 EXPORT_SYMBOL_GPL(rpc_call_async);
1298
1299 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1300 static void call_bc_encode(struct rpc_task *task);
1301
1302 /**
1303 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
1304 * rpc_execute against it
1305 * @req: RPC request
1306 */
rpc_run_bc_task(struct rpc_rqst * req)1307 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
1308 {
1309 struct rpc_task *task;
1310 struct rpc_task_setup task_setup_data = {
1311 .callback_ops = &rpc_default_ops,
1312 .flags = RPC_TASK_SOFTCONN |
1313 RPC_TASK_NO_RETRANS_TIMEOUT,
1314 };
1315
1316 dprintk("RPC: rpc_run_bc_task req= %p\n", req);
1317 /*
1318 * Create an rpc_task to send the data
1319 */
1320 task = rpc_new_task(&task_setup_data);
1321 if (IS_ERR(task)) {
1322 xprt_free_bc_request(req);
1323 return task;
1324 }
1325
1326 xprt_init_bc_request(req, task);
1327
1328 task->tk_action = call_bc_encode;
1329 atomic_inc(&task->tk_count);
1330 WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
1331 rpc_execute(task);
1332
1333 dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
1334 return task;
1335 }
1336 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1337
1338 /**
1339 * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages
1340 * @req: RPC request to prepare
1341 * @pages: vector of struct page pointers
1342 * @base: offset in first page where receive should start, in bytes
1343 * @len: expected size of the upper layer data payload, in bytes
1344 * @hdrsize: expected size of upper layer reply header, in XDR words
1345 *
1346 */
rpc_prepare_reply_pages(struct rpc_rqst * req,struct page ** pages,unsigned int base,unsigned int len,unsigned int hdrsize)1347 void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
1348 unsigned int base, unsigned int len,
1349 unsigned int hdrsize)
1350 {
1351 hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign;
1352
1353 xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len);
1354 trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf);
1355 }
1356 EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages);
1357
1358 void
rpc_call_start(struct rpc_task * task)1359 rpc_call_start(struct rpc_task *task)
1360 {
1361 task->tk_action = call_start;
1362 }
1363 EXPORT_SYMBOL_GPL(rpc_call_start);
1364
1365 /**
1366 * rpc_peeraddr - extract remote peer address from clnt's xprt
1367 * @clnt: RPC client structure
1368 * @buf: target buffer
1369 * @bufsize: length of target buffer
1370 *
1371 * Returns the number of bytes that are actually in the stored address.
1372 */
rpc_peeraddr(struct rpc_clnt * clnt,struct sockaddr * buf,size_t bufsize)1373 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
1374 {
1375 size_t bytes;
1376 struct rpc_xprt *xprt;
1377
1378 rcu_read_lock();
1379 xprt = rcu_dereference(clnt->cl_xprt);
1380
1381 bytes = xprt->addrlen;
1382 if (bytes > bufsize)
1383 bytes = bufsize;
1384 memcpy(buf, &xprt->addr, bytes);
1385 rcu_read_unlock();
1386
1387 return bytes;
1388 }
1389 EXPORT_SYMBOL_GPL(rpc_peeraddr);
1390
1391 /**
1392 * rpc_peeraddr2str - return remote peer address in printable format
1393 * @clnt: RPC client structure
1394 * @format: address format
1395 *
1396 * NB: the lifetime of the memory referenced by the returned pointer is
1397 * the same as the rpc_xprt itself. As long as the caller uses this
1398 * pointer, it must hold the RCU read lock.
1399 */
rpc_peeraddr2str(struct rpc_clnt * clnt,enum rpc_display_format_t format)1400 const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
1401 enum rpc_display_format_t format)
1402 {
1403 struct rpc_xprt *xprt;
1404
1405 xprt = rcu_dereference(clnt->cl_xprt);
1406
1407 if (xprt->address_strings[format] != NULL)
1408 return xprt->address_strings[format];
1409 else
1410 return "unprintable";
1411 }
1412 EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
1413
1414 static const struct sockaddr_in rpc_inaddr_loopback = {
1415 .sin_family = AF_INET,
1416 .sin_addr.s_addr = htonl(INADDR_ANY),
1417 };
1418
1419 static const struct sockaddr_in6 rpc_in6addr_loopback = {
1420 .sin6_family = AF_INET6,
1421 .sin6_addr = IN6ADDR_ANY_INIT,
1422 };
1423
1424 /*
1425 * Try a getsockname() on a connected datagram socket. Using a
1426 * connected datagram socket prevents leaving a socket in TIME_WAIT.
1427 * This conserves the ephemeral port number space.
1428 *
1429 * Returns zero and fills in "buf" if successful; otherwise, a
1430 * negative errno is returned.
1431 */
rpc_sockname(struct net * net,struct sockaddr * sap,size_t salen,struct sockaddr * buf)1432 static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
1433 struct sockaddr *buf)
1434 {
1435 struct socket *sock;
1436 int err;
1437
1438 err = __sock_create(net, sap->sa_family,
1439 SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
1440 if (err < 0) {
1441 dprintk("RPC: can't create UDP socket (%d)\n", err);
1442 goto out;
1443 }
1444
1445 switch (sap->sa_family) {
1446 case AF_INET:
1447 err = kernel_bind(sock,
1448 (struct sockaddr *)&rpc_inaddr_loopback,
1449 sizeof(rpc_inaddr_loopback));
1450 break;
1451 case AF_INET6:
1452 err = kernel_bind(sock,
1453 (struct sockaddr *)&rpc_in6addr_loopback,
1454 sizeof(rpc_in6addr_loopback));
1455 break;
1456 default:
1457 err = -EAFNOSUPPORT;
1458 goto out_release;
1459 }
1460 if (err < 0) {
1461 dprintk("RPC: can't bind UDP socket (%d)\n", err);
1462 goto out_release;
1463 }
1464
1465 err = kernel_connect(sock, sap, salen, 0);
1466 if (err < 0) {
1467 dprintk("RPC: can't connect UDP socket (%d)\n", err);
1468 goto out_release;
1469 }
1470
1471 err = kernel_getsockname(sock, buf);
1472 if (err < 0) {
1473 dprintk("RPC: getsockname failed (%d)\n", err);
1474 goto out_release;
1475 }
1476
1477 err = 0;
1478 if (buf->sa_family == AF_INET6) {
1479 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf;
1480 sin6->sin6_scope_id = 0;
1481 }
1482 dprintk("RPC: %s succeeded\n", __func__);
1483
1484 out_release:
1485 sock_release(sock);
1486 out:
1487 return err;
1488 }
1489
1490 /*
1491 * Scraping a connected socket failed, so we don't have a useable
1492 * local address. Fallback: generate an address that will prevent
1493 * the server from calling us back.
1494 *
1495 * Returns zero and fills in "buf" if successful; otherwise, a
1496 * negative errno is returned.
1497 */
rpc_anyaddr(int family,struct sockaddr * buf,size_t buflen)1498 static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen)
1499 {
1500 switch (family) {
1501 case AF_INET:
1502 if (buflen < sizeof(rpc_inaddr_loopback))
1503 return -EINVAL;
1504 memcpy(buf, &rpc_inaddr_loopback,
1505 sizeof(rpc_inaddr_loopback));
1506 break;
1507 case AF_INET6:
1508 if (buflen < sizeof(rpc_in6addr_loopback))
1509 return -EINVAL;
1510 memcpy(buf, &rpc_in6addr_loopback,
1511 sizeof(rpc_in6addr_loopback));
1512 break;
1513 default:
1514 dprintk("RPC: %s: address family not supported\n",
1515 __func__);
1516 return -EAFNOSUPPORT;
1517 }
1518 dprintk("RPC: %s: succeeded\n", __func__);
1519 return 0;
1520 }
1521
1522 /**
1523 * rpc_localaddr - discover local endpoint address for an RPC client
1524 * @clnt: RPC client structure
1525 * @buf: target buffer
1526 * @buflen: size of target buffer, in bytes
1527 *
1528 * Returns zero and fills in "buf" and "buflen" if successful;
1529 * otherwise, a negative errno is returned.
1530 *
1531 * This works even if the underlying transport is not currently connected,
1532 * or if the upper layer never previously provided a source address.
1533 *
1534 * The result of this function call is transient: multiple calls in
1535 * succession may give different results, depending on how local
1536 * networking configuration changes over time.
1537 */
rpc_localaddr(struct rpc_clnt * clnt,struct sockaddr * buf,size_t buflen)1538 int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen)
1539 {
1540 struct sockaddr_storage address;
1541 struct sockaddr *sap = (struct sockaddr *)&address;
1542 struct rpc_xprt *xprt;
1543 struct net *net;
1544 size_t salen;
1545 int err;
1546
1547 rcu_read_lock();
1548 xprt = rcu_dereference(clnt->cl_xprt);
1549 salen = xprt->addrlen;
1550 memcpy(sap, &xprt->addr, salen);
1551 net = get_net(xprt->xprt_net);
1552 rcu_read_unlock();
1553
1554 rpc_set_port(sap, 0);
1555 err = rpc_sockname(net, sap, salen, buf);
1556 put_net(net);
1557 if (err != 0)
1558 /* Couldn't discover local address, return ANYADDR */
1559 return rpc_anyaddr(sap->sa_family, buf, buflen);
1560 return 0;
1561 }
1562 EXPORT_SYMBOL_GPL(rpc_localaddr);
1563
1564 void
rpc_setbufsize(struct rpc_clnt * clnt,unsigned int sndsize,unsigned int rcvsize)1565 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
1566 {
1567 struct rpc_xprt *xprt;
1568
1569 rcu_read_lock();
1570 xprt = rcu_dereference(clnt->cl_xprt);
1571 if (xprt->ops->set_buffer_size)
1572 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
1573 rcu_read_unlock();
1574 }
1575 EXPORT_SYMBOL_GPL(rpc_setbufsize);
1576
1577 /**
1578 * rpc_net_ns - Get the network namespace for this RPC client
1579 * @clnt: RPC client to query
1580 *
1581 */
rpc_net_ns(struct rpc_clnt * clnt)1582 struct net *rpc_net_ns(struct rpc_clnt *clnt)
1583 {
1584 struct net *ret;
1585
1586 rcu_read_lock();
1587 ret = rcu_dereference(clnt->cl_xprt)->xprt_net;
1588 rcu_read_unlock();
1589 return ret;
1590 }
1591 EXPORT_SYMBOL_GPL(rpc_net_ns);
1592
1593 /**
1594 * rpc_max_payload - Get maximum payload size for a transport, in bytes
1595 * @clnt: RPC client to query
1596 *
1597 * For stream transports, this is one RPC record fragment (see RFC
1598 * 1831), as we don't support multi-record requests yet. For datagram
1599 * transports, this is the size of an IP packet minus the IP, UDP, and
1600 * RPC header sizes.
1601 */
rpc_max_payload(struct rpc_clnt * clnt)1602 size_t rpc_max_payload(struct rpc_clnt *clnt)
1603 {
1604 size_t ret;
1605
1606 rcu_read_lock();
1607 ret = rcu_dereference(clnt->cl_xprt)->max_payload;
1608 rcu_read_unlock();
1609 return ret;
1610 }
1611 EXPORT_SYMBOL_GPL(rpc_max_payload);
1612
1613 /**
1614 * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes
1615 * @clnt: RPC client to query
1616 */
rpc_max_bc_payload(struct rpc_clnt * clnt)1617 size_t rpc_max_bc_payload(struct rpc_clnt *clnt)
1618 {
1619 struct rpc_xprt *xprt;
1620 size_t ret;
1621
1622 rcu_read_lock();
1623 xprt = rcu_dereference(clnt->cl_xprt);
1624 ret = xprt->ops->bc_maxpayload(xprt);
1625 rcu_read_unlock();
1626 return ret;
1627 }
1628 EXPORT_SYMBOL_GPL(rpc_max_bc_payload);
1629
rpc_num_bc_slots(struct rpc_clnt * clnt)1630 unsigned int rpc_num_bc_slots(struct rpc_clnt *clnt)
1631 {
1632 struct rpc_xprt *xprt;
1633 unsigned int ret;
1634
1635 rcu_read_lock();
1636 xprt = rcu_dereference(clnt->cl_xprt);
1637 ret = xprt->ops->bc_num_slots(xprt);
1638 rcu_read_unlock();
1639 return ret;
1640 }
1641 EXPORT_SYMBOL_GPL(rpc_num_bc_slots);
1642
1643 /**
1644 * rpc_force_rebind - force transport to check that remote port is unchanged
1645 * @clnt: client to rebind
1646 *
1647 */
rpc_force_rebind(struct rpc_clnt * clnt)1648 void rpc_force_rebind(struct rpc_clnt *clnt)
1649 {
1650 if (clnt->cl_autobind) {
1651 rcu_read_lock();
1652 xprt_clear_bound(rcu_dereference(clnt->cl_xprt));
1653 rcu_read_unlock();
1654 }
1655 }
1656 EXPORT_SYMBOL_GPL(rpc_force_rebind);
1657
1658 static int
__rpc_restart_call(struct rpc_task * task,void (* action)(struct rpc_task *))1659 __rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *))
1660 {
1661 task->tk_status = 0;
1662 task->tk_rpc_status = 0;
1663 task->tk_action = action;
1664 return 1;
1665 }
1666
1667 /*
1668 * Restart an (async) RPC call. Usually called from within the
1669 * exit handler.
1670 */
1671 int
rpc_restart_call(struct rpc_task * task)1672 rpc_restart_call(struct rpc_task *task)
1673 {
1674 return __rpc_restart_call(task, call_start);
1675 }
1676 EXPORT_SYMBOL_GPL(rpc_restart_call);
1677
1678 /*
1679 * Restart an (async) RPC call from the call_prepare state.
1680 * Usually called from within the exit handler.
1681 */
1682 int
rpc_restart_call_prepare(struct rpc_task * task)1683 rpc_restart_call_prepare(struct rpc_task *task)
1684 {
1685 if (task->tk_ops->rpc_call_prepare != NULL)
1686 return __rpc_restart_call(task, rpc_prepare_task);
1687 return rpc_restart_call(task);
1688 }
1689 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
1690
1691 const char
rpc_proc_name(const struct rpc_task * task)1692 *rpc_proc_name(const struct rpc_task *task)
1693 {
1694 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1695
1696 if (proc) {
1697 if (proc->p_name)
1698 return proc->p_name;
1699 else
1700 return "NULL";
1701 } else
1702 return "no proc";
1703 }
1704
1705 static void
__rpc_call_rpcerror(struct rpc_task * task,int tk_status,int rpc_status)1706 __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status)
1707 {
1708 trace_rpc_call_rpcerror(task, tk_status, rpc_status);
1709 rpc_task_set_rpc_status(task, rpc_status);
1710 rpc_exit(task, tk_status);
1711 }
1712
1713 static void
rpc_call_rpcerror(struct rpc_task * task,int status)1714 rpc_call_rpcerror(struct rpc_task *task, int status)
1715 {
1716 __rpc_call_rpcerror(task, status, status);
1717 }
1718
1719 /*
1720 * 0. Initial state
1721 *
1722 * Other FSM states can be visited zero or more times, but
1723 * this state is visited exactly once for each RPC.
1724 */
1725 static void
call_start(struct rpc_task * task)1726 call_start(struct rpc_task *task)
1727 {
1728 struct rpc_clnt *clnt = task->tk_client;
1729 int idx = task->tk_msg.rpc_proc->p_statidx;
1730
1731 trace_rpc_request(task);
1732
1733 if (task->tk_client->cl_shutdown) {
1734 rpc_call_rpcerror(task, -EIO);
1735 return;
1736 }
1737
1738 /* Increment call count (version might not be valid for ping) */
1739 if (clnt->cl_program->version[clnt->cl_vers])
1740 clnt->cl_program->version[clnt->cl_vers]->counts[idx]++;
1741 clnt->cl_stats->rpccnt++;
1742 task->tk_action = call_reserve;
1743 rpc_task_set_transport(task, clnt);
1744 }
1745
1746 /*
1747 * 1. Reserve an RPC call slot
1748 */
1749 static void
call_reserve(struct rpc_task * task)1750 call_reserve(struct rpc_task *task)
1751 {
1752 task->tk_status = 0;
1753 task->tk_action = call_reserveresult;
1754 xprt_reserve(task);
1755 }
1756
1757 static void call_retry_reserve(struct rpc_task *task);
1758
1759 /*
1760 * 1b. Grok the result of xprt_reserve()
1761 */
1762 static void
call_reserveresult(struct rpc_task * task)1763 call_reserveresult(struct rpc_task *task)
1764 {
1765 int status = task->tk_status;
1766
1767 /*
1768 * After a call to xprt_reserve(), we must have either
1769 * a request slot or else an error status.
1770 */
1771 task->tk_status = 0;
1772 if (status >= 0) {
1773 if (task->tk_rqstp) {
1774 task->tk_action = call_refresh;
1775 return;
1776 }
1777
1778 rpc_call_rpcerror(task, -EIO);
1779 return;
1780 }
1781
1782 switch (status) {
1783 case -ENOMEM:
1784 rpc_delay(task, HZ >> 2);
1785 fallthrough;
1786 case -EAGAIN: /* woken up; retry */
1787 task->tk_action = call_retry_reserve;
1788 return;
1789 default:
1790 rpc_call_rpcerror(task, status);
1791 }
1792 }
1793
1794 /*
1795 * 1c. Retry reserving an RPC call slot
1796 */
1797 static void
call_retry_reserve(struct rpc_task * task)1798 call_retry_reserve(struct rpc_task *task)
1799 {
1800 task->tk_status = 0;
1801 task->tk_action = call_reserveresult;
1802 xprt_retry_reserve(task);
1803 }
1804
1805 /*
1806 * 2. Bind and/or refresh the credentials
1807 */
1808 static void
call_refresh(struct rpc_task * task)1809 call_refresh(struct rpc_task *task)
1810 {
1811 task->tk_action = call_refreshresult;
1812 task->tk_status = 0;
1813 task->tk_client->cl_stats->rpcauthrefresh++;
1814 rpcauth_refreshcred(task);
1815 }
1816
1817 /*
1818 * 2a. Process the results of a credential refresh
1819 */
1820 static void
call_refreshresult(struct rpc_task * task)1821 call_refreshresult(struct rpc_task *task)
1822 {
1823 int status = task->tk_status;
1824
1825 task->tk_status = 0;
1826 task->tk_action = call_refresh;
1827 switch (status) {
1828 case 0:
1829 if (rpcauth_uptodatecred(task)) {
1830 task->tk_action = call_allocate;
1831 return;
1832 }
1833 /* Use rate-limiting and a max number of retries if refresh
1834 * had status 0 but failed to update the cred.
1835 */
1836 fallthrough;
1837 case -ETIMEDOUT:
1838 rpc_delay(task, 3*HZ);
1839 fallthrough;
1840 case -EAGAIN:
1841 status = -EACCES;
1842 fallthrough;
1843 case -EKEYEXPIRED:
1844 if (!task->tk_cred_retry)
1845 break;
1846 task->tk_cred_retry--;
1847 trace_rpc_retry_refresh_status(task);
1848 return;
1849 case -ENOMEM:
1850 rpc_delay(task, HZ >> 4);
1851 return;
1852 }
1853 trace_rpc_refresh_status(task);
1854 rpc_call_rpcerror(task, status);
1855 }
1856
1857 /*
1858 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
1859 * (Note: buffer memory is freed in xprt_release).
1860 */
1861 static void
call_allocate(struct rpc_task * task)1862 call_allocate(struct rpc_task *task)
1863 {
1864 const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth;
1865 struct rpc_rqst *req = task->tk_rqstp;
1866 struct rpc_xprt *xprt = req->rq_xprt;
1867 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1868 int status;
1869
1870 task->tk_status = 0;
1871 task->tk_action = call_encode;
1872
1873 if (req->rq_buffer)
1874 return;
1875
1876 if (proc->p_proc != 0) {
1877 BUG_ON(proc->p_arglen == 0);
1878 if (proc->p_decode != NULL)
1879 BUG_ON(proc->p_replen == 0);
1880 }
1881
1882 /*
1883 * Calculate the size (in quads) of the RPC call
1884 * and reply headers, and convert both values
1885 * to byte sizes.
1886 */
1887 req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) +
1888 proc->p_arglen;
1889 req->rq_callsize <<= 2;
1890 /*
1891 * Note: the reply buffer must at minimum allocate enough space
1892 * for the 'struct accepted_reply' from RFC5531.
1893 */
1894 req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \
1895 max_t(size_t, proc->p_replen, 2);
1896 req->rq_rcvsize <<= 2;
1897
1898 status = xprt->ops->buf_alloc(task);
1899 trace_rpc_buf_alloc(task, status);
1900 if (status == 0)
1901 return;
1902 if (status != -ENOMEM) {
1903 rpc_call_rpcerror(task, status);
1904 return;
1905 }
1906
1907 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1908 task->tk_action = call_allocate;
1909 rpc_delay(task, HZ>>4);
1910 return;
1911 }
1912
1913 rpc_call_rpcerror(task, -ERESTARTSYS);
1914 }
1915
1916 static int
rpc_task_need_encode(struct rpc_task * task)1917 rpc_task_need_encode(struct rpc_task *task)
1918 {
1919 return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 &&
1920 (!(task->tk_flags & RPC_TASK_SENT) ||
1921 !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) ||
1922 xprt_request_need_retransmit(task));
1923 }
1924
1925 static void
rpc_xdr_encode(struct rpc_task * task)1926 rpc_xdr_encode(struct rpc_task *task)
1927 {
1928 struct rpc_rqst *req = task->tk_rqstp;
1929 struct xdr_stream xdr;
1930
1931 xdr_buf_init(&req->rq_snd_buf,
1932 req->rq_buffer,
1933 req->rq_callsize);
1934 xdr_buf_init(&req->rq_rcv_buf,
1935 req->rq_rbuffer,
1936 req->rq_rcvsize);
1937
1938 req->rq_reply_bytes_recvd = 0;
1939 req->rq_snd_buf.head[0].iov_len = 0;
1940 xdr_init_encode(&xdr, &req->rq_snd_buf,
1941 req->rq_snd_buf.head[0].iov_base, req);
1942 if (rpc_encode_header(task, &xdr))
1943 return;
1944
1945 task->tk_status = rpcauth_wrap_req(task, &xdr);
1946 }
1947
1948 /*
1949 * 3. Encode arguments of an RPC call
1950 */
1951 static void
call_encode(struct rpc_task * task)1952 call_encode(struct rpc_task *task)
1953 {
1954 if (!rpc_task_need_encode(task))
1955 goto out;
1956
1957 /* Dequeue task from the receive queue while we're encoding */
1958 xprt_request_dequeue_xprt(task);
1959 /* Encode here so that rpcsec_gss can use correct sequence number. */
1960 rpc_xdr_encode(task);
1961 /* Add task to reply queue before transmission to avoid races */
1962 if (task->tk_status == 0 && rpc_reply_expected(task))
1963 task->tk_status = xprt_request_enqueue_receive(task);
1964 /* Did the encode result in an error condition? */
1965 if (task->tk_status != 0) {
1966 /* Was the error nonfatal? */
1967 switch (task->tk_status) {
1968 case -EAGAIN:
1969 case -ENOMEM:
1970 rpc_delay(task, HZ >> 4);
1971 break;
1972 case -EKEYEXPIRED:
1973 if (!task->tk_cred_retry) {
1974 rpc_call_rpcerror(task, task->tk_status);
1975 } else {
1976 task->tk_action = call_refresh;
1977 task->tk_cred_retry--;
1978 trace_rpc_retry_refresh_status(task);
1979 }
1980 break;
1981 default:
1982 rpc_call_rpcerror(task, task->tk_status);
1983 }
1984 return;
1985 }
1986
1987 xprt_request_enqueue_transmit(task);
1988 out:
1989 task->tk_action = call_transmit;
1990 /* Check that the connection is OK */
1991 if (!xprt_bound(task->tk_xprt))
1992 task->tk_action = call_bind;
1993 else if (!xprt_connected(task->tk_xprt))
1994 task->tk_action = call_connect;
1995 }
1996
1997 /*
1998 * Helpers to check if the task was already transmitted, and
1999 * to take action when that is the case.
2000 */
2001 static bool
rpc_task_transmitted(struct rpc_task * task)2002 rpc_task_transmitted(struct rpc_task *task)
2003 {
2004 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
2005 }
2006
2007 static void
rpc_task_handle_transmitted(struct rpc_task * task)2008 rpc_task_handle_transmitted(struct rpc_task *task)
2009 {
2010 xprt_end_transmit(task);
2011 task->tk_action = call_transmit_status;
2012 }
2013
2014 /*
2015 * 4. Get the server port number if not yet set
2016 */
2017 static void
call_bind(struct rpc_task * task)2018 call_bind(struct rpc_task *task)
2019 {
2020 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2021
2022 if (rpc_task_transmitted(task)) {
2023 rpc_task_handle_transmitted(task);
2024 return;
2025 }
2026
2027 if (xprt_bound(xprt)) {
2028 task->tk_action = call_connect;
2029 return;
2030 }
2031
2032 task->tk_action = call_bind_status;
2033 if (!xprt_prepare_transmit(task))
2034 return;
2035
2036 xprt->ops->rpcbind(task);
2037 }
2038
2039 /*
2040 * 4a. Sort out bind result
2041 */
2042 static void
call_bind_status(struct rpc_task * task)2043 call_bind_status(struct rpc_task *task)
2044 {
2045 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2046 int status = -EIO;
2047
2048 if (rpc_task_transmitted(task)) {
2049 rpc_task_handle_transmitted(task);
2050 return;
2051 }
2052
2053 if (task->tk_status >= 0)
2054 goto out_next;
2055 if (xprt_bound(xprt)) {
2056 task->tk_status = 0;
2057 goto out_next;
2058 }
2059
2060 switch (task->tk_status) {
2061 case -ENOMEM:
2062 rpc_delay(task, HZ >> 2);
2063 goto retry_timeout;
2064 case -EACCES:
2065 trace_rpcb_prog_unavail_err(task);
2066 /* fail immediately if this is an RPC ping */
2067 if (task->tk_msg.rpc_proc->p_proc == 0) {
2068 status = -EOPNOTSUPP;
2069 break;
2070 }
2071 rpc_delay(task, 3*HZ);
2072 goto retry_timeout;
2073 case -ENOBUFS:
2074 rpc_delay(task, HZ >> 2);
2075 goto retry_timeout;
2076 case -EAGAIN:
2077 goto retry_timeout;
2078 case -ETIMEDOUT:
2079 trace_rpcb_timeout_err(task);
2080 goto retry_timeout;
2081 case -EPFNOSUPPORT:
2082 /* server doesn't support any rpcbind version we know of */
2083 trace_rpcb_bind_version_err(task);
2084 break;
2085 case -EPROTONOSUPPORT:
2086 trace_rpcb_bind_version_err(task);
2087 goto retry_timeout;
2088 case -ECONNREFUSED: /* connection problems */
2089 case -ECONNRESET:
2090 case -ECONNABORTED:
2091 case -ENOTCONN:
2092 case -EHOSTDOWN:
2093 case -ENETDOWN:
2094 case -EHOSTUNREACH:
2095 case -ENETUNREACH:
2096 case -EPIPE:
2097 trace_rpcb_unreachable_err(task);
2098 if (!RPC_IS_SOFTCONN(task)) {
2099 rpc_delay(task, 5*HZ);
2100 goto retry_timeout;
2101 }
2102 status = task->tk_status;
2103 break;
2104 default:
2105 trace_rpcb_unrecognized_err(task);
2106 }
2107
2108 rpc_call_rpcerror(task, status);
2109 return;
2110 out_next:
2111 task->tk_action = call_connect;
2112 return;
2113 retry_timeout:
2114 task->tk_status = 0;
2115 task->tk_action = call_bind;
2116 rpc_check_timeout(task);
2117 }
2118
2119 /*
2120 * 4b. Connect to the RPC server
2121 */
2122 static void
call_connect(struct rpc_task * task)2123 call_connect(struct rpc_task *task)
2124 {
2125 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2126
2127 if (rpc_task_transmitted(task)) {
2128 rpc_task_handle_transmitted(task);
2129 return;
2130 }
2131
2132 if (xprt_connected(xprt)) {
2133 task->tk_action = call_transmit;
2134 return;
2135 }
2136
2137 task->tk_action = call_connect_status;
2138 if (task->tk_status < 0)
2139 return;
2140 if (task->tk_flags & RPC_TASK_NOCONNECT) {
2141 rpc_call_rpcerror(task, -ENOTCONN);
2142 return;
2143 }
2144 if (!xprt_prepare_transmit(task))
2145 return;
2146 xprt_connect(task);
2147 }
2148
2149 /*
2150 * 4c. Sort out connect result
2151 */
2152 static void
call_connect_status(struct rpc_task * task)2153 call_connect_status(struct rpc_task *task)
2154 {
2155 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2156 struct rpc_clnt *clnt = task->tk_client;
2157 int status = task->tk_status;
2158
2159 if (rpc_task_transmitted(task)) {
2160 rpc_task_handle_transmitted(task);
2161 return;
2162 }
2163
2164 trace_rpc_connect_status(task);
2165
2166 if (task->tk_status == 0) {
2167 clnt->cl_stats->netreconn++;
2168 goto out_next;
2169 }
2170 if (xprt_connected(xprt)) {
2171 task->tk_status = 0;
2172 goto out_next;
2173 }
2174
2175 task->tk_status = 0;
2176 switch (status) {
2177 case -ECONNREFUSED:
2178 case -ECONNRESET:
2179 /* A positive refusal suggests a rebind is needed. */
2180 if (RPC_IS_SOFTCONN(task))
2181 break;
2182 if (clnt->cl_autobind) {
2183 rpc_force_rebind(clnt);
2184 goto out_retry;
2185 }
2186 fallthrough;
2187 case -ECONNABORTED:
2188 case -ENETDOWN:
2189 case -ENETUNREACH:
2190 case -EHOSTUNREACH:
2191 case -EPIPE:
2192 case -EPROTO:
2193 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
2194 task->tk_rqstp->rq_connect_cookie);
2195 if (RPC_IS_SOFTCONN(task))
2196 break;
2197 /* retry with existing socket, after a delay */
2198 rpc_delay(task, 3*HZ);
2199 fallthrough;
2200 case -EADDRINUSE:
2201 case -ENOTCONN:
2202 case -EAGAIN:
2203 case -ETIMEDOUT:
2204 if (!(task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) &&
2205 (task->tk_flags & RPC_TASK_MOVEABLE) &&
2206 test_bit(XPRT_REMOVE, &xprt->state)) {
2207 struct rpc_xprt *saved = task->tk_xprt;
2208 struct rpc_xprt_switch *xps;
2209
2210 rcu_read_lock();
2211 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
2212 rcu_read_unlock();
2213 if (xps->xps_nxprts > 1) {
2214 long value;
2215
2216 xprt_release(task);
2217 value = atomic_long_dec_return(&xprt->queuelen);
2218 if (value == 0)
2219 rpc_xprt_switch_remove_xprt(xps, saved,
2220 true);
2221 xprt_put(saved);
2222 task->tk_xprt = NULL;
2223 task->tk_action = call_start;
2224 }
2225 xprt_switch_put(xps);
2226 if (!task->tk_xprt)
2227 return;
2228 }
2229 goto out_retry;
2230 case -ENOBUFS:
2231 rpc_delay(task, HZ >> 2);
2232 goto out_retry;
2233 }
2234 rpc_call_rpcerror(task, status);
2235 return;
2236 out_next:
2237 task->tk_action = call_transmit;
2238 return;
2239 out_retry:
2240 /* Check for timeouts before looping back to call_bind */
2241 task->tk_action = call_bind;
2242 rpc_check_timeout(task);
2243 }
2244
2245 /*
2246 * 5. Transmit the RPC request, and wait for reply
2247 */
2248 static void
call_transmit(struct rpc_task * task)2249 call_transmit(struct rpc_task *task)
2250 {
2251 if (rpc_task_transmitted(task)) {
2252 rpc_task_handle_transmitted(task);
2253 return;
2254 }
2255
2256 task->tk_action = call_transmit_status;
2257 if (!xprt_prepare_transmit(task))
2258 return;
2259 task->tk_status = 0;
2260 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
2261 if (!xprt_connected(task->tk_xprt)) {
2262 task->tk_status = -ENOTCONN;
2263 return;
2264 }
2265 xprt_transmit(task);
2266 }
2267 xprt_end_transmit(task);
2268 }
2269
2270 /*
2271 * 5a. Handle cleanup after a transmission
2272 */
2273 static void
call_transmit_status(struct rpc_task * task)2274 call_transmit_status(struct rpc_task *task)
2275 {
2276 task->tk_action = call_status;
2277
2278 /*
2279 * Common case: success. Force the compiler to put this
2280 * test first.
2281 */
2282 if (rpc_task_transmitted(task)) {
2283 task->tk_status = 0;
2284 xprt_request_wait_receive(task);
2285 return;
2286 }
2287
2288 switch (task->tk_status) {
2289 default:
2290 break;
2291 case -EBADMSG:
2292 task->tk_status = 0;
2293 task->tk_action = call_encode;
2294 break;
2295 /*
2296 * Special cases: if we've been waiting on the
2297 * socket's write_space() callback, or if the
2298 * socket just returned a connection error,
2299 * then hold onto the transport lock.
2300 */
2301 case -ENOMEM:
2302 case -ENOBUFS:
2303 rpc_delay(task, HZ>>2);
2304 fallthrough;
2305 case -EBADSLT:
2306 case -EAGAIN:
2307 task->tk_action = call_transmit;
2308 task->tk_status = 0;
2309 break;
2310 case -EHOSTDOWN:
2311 case -ENETDOWN:
2312 case -EHOSTUNREACH:
2313 case -ENETUNREACH:
2314 case -EPERM:
2315 break;
2316 case -ECONNREFUSED:
2317 if (RPC_IS_SOFTCONN(task)) {
2318 if (!task->tk_msg.rpc_proc->p_proc)
2319 trace_xprt_ping(task->tk_xprt,
2320 task->tk_status);
2321 rpc_call_rpcerror(task, task->tk_status);
2322 return;
2323 }
2324 fallthrough;
2325 case -ECONNRESET:
2326 case -ECONNABORTED:
2327 case -EADDRINUSE:
2328 case -ENOTCONN:
2329 case -EPIPE:
2330 task->tk_action = call_bind;
2331 task->tk_status = 0;
2332 break;
2333 }
2334 rpc_check_timeout(task);
2335 }
2336
2337 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
2338 static void call_bc_transmit(struct rpc_task *task);
2339 static void call_bc_transmit_status(struct rpc_task *task);
2340
2341 static void
call_bc_encode(struct rpc_task * task)2342 call_bc_encode(struct rpc_task *task)
2343 {
2344 xprt_request_enqueue_transmit(task);
2345 task->tk_action = call_bc_transmit;
2346 }
2347
2348 /*
2349 * 5b. Send the backchannel RPC reply. On error, drop the reply. In
2350 * addition, disconnect on connectivity errors.
2351 */
2352 static void
call_bc_transmit(struct rpc_task * task)2353 call_bc_transmit(struct rpc_task *task)
2354 {
2355 task->tk_action = call_bc_transmit_status;
2356 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
2357 if (!xprt_prepare_transmit(task))
2358 return;
2359 task->tk_status = 0;
2360 xprt_transmit(task);
2361 }
2362 xprt_end_transmit(task);
2363 }
2364
2365 static void
call_bc_transmit_status(struct rpc_task * task)2366 call_bc_transmit_status(struct rpc_task *task)
2367 {
2368 struct rpc_rqst *req = task->tk_rqstp;
2369
2370 if (rpc_task_transmitted(task))
2371 task->tk_status = 0;
2372
2373 switch (task->tk_status) {
2374 case 0:
2375 /* Success */
2376 case -ENETDOWN:
2377 case -EHOSTDOWN:
2378 case -EHOSTUNREACH:
2379 case -ENETUNREACH:
2380 case -ECONNRESET:
2381 case -ECONNREFUSED:
2382 case -EADDRINUSE:
2383 case -ENOTCONN:
2384 case -EPIPE:
2385 break;
2386 case -ENOMEM:
2387 case -ENOBUFS:
2388 rpc_delay(task, HZ>>2);
2389 fallthrough;
2390 case -EBADSLT:
2391 case -EAGAIN:
2392 task->tk_status = 0;
2393 task->tk_action = call_bc_transmit;
2394 return;
2395 case -ETIMEDOUT:
2396 /*
2397 * Problem reaching the server. Disconnect and let the
2398 * forechannel reestablish the connection. The server will
2399 * have to retransmit the backchannel request and we'll
2400 * reprocess it. Since these ops are idempotent, there's no
2401 * need to cache our reply at this time.
2402 */
2403 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
2404 "error: %d\n", task->tk_status);
2405 xprt_conditional_disconnect(req->rq_xprt,
2406 req->rq_connect_cookie);
2407 break;
2408 default:
2409 /*
2410 * We were unable to reply and will have to drop the
2411 * request. The server should reconnect and retransmit.
2412 */
2413 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
2414 "error: %d\n", task->tk_status);
2415 break;
2416 }
2417 task->tk_action = rpc_exit_task;
2418 }
2419 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
2420
2421 /*
2422 * 6. Sort out the RPC call status
2423 */
2424 static void
call_status(struct rpc_task * task)2425 call_status(struct rpc_task *task)
2426 {
2427 struct rpc_clnt *clnt = task->tk_client;
2428 int status;
2429
2430 if (!task->tk_msg.rpc_proc->p_proc)
2431 trace_xprt_ping(task->tk_xprt, task->tk_status);
2432
2433 status = task->tk_status;
2434 if (status >= 0) {
2435 task->tk_action = call_decode;
2436 return;
2437 }
2438
2439 trace_rpc_call_status(task);
2440 task->tk_status = 0;
2441 switch(status) {
2442 case -EHOSTDOWN:
2443 case -ENETDOWN:
2444 case -EHOSTUNREACH:
2445 case -ENETUNREACH:
2446 case -EPERM:
2447 if (RPC_IS_SOFTCONN(task))
2448 goto out_exit;
2449 /*
2450 * Delay any retries for 3 seconds, then handle as if it
2451 * were a timeout.
2452 */
2453 rpc_delay(task, 3*HZ);
2454 fallthrough;
2455 case -ETIMEDOUT:
2456 break;
2457 case -ECONNREFUSED:
2458 case -ECONNRESET:
2459 case -ECONNABORTED:
2460 case -ENOTCONN:
2461 rpc_force_rebind(clnt);
2462 break;
2463 case -EADDRINUSE:
2464 rpc_delay(task, 3*HZ);
2465 fallthrough;
2466 case -EPIPE:
2467 case -EAGAIN:
2468 break;
2469 case -ENFILE:
2470 case -ENOBUFS:
2471 case -ENOMEM:
2472 rpc_delay(task, HZ>>2);
2473 break;
2474 case -EIO:
2475 /* shutdown or soft timeout */
2476 goto out_exit;
2477 default:
2478 if (clnt->cl_chatty)
2479 printk("%s: RPC call returned error %d\n",
2480 clnt->cl_program->name, -status);
2481 goto out_exit;
2482 }
2483 task->tk_action = call_encode;
2484 rpc_check_timeout(task);
2485 return;
2486 out_exit:
2487 rpc_call_rpcerror(task, status);
2488 }
2489
2490 static bool
rpc_check_connected(const struct rpc_rqst * req)2491 rpc_check_connected(const struct rpc_rqst *req)
2492 {
2493 /* No allocated request or transport? return true */
2494 if (!req || !req->rq_xprt)
2495 return true;
2496 return xprt_connected(req->rq_xprt);
2497 }
2498
2499 static void
rpc_check_timeout(struct rpc_task * task)2500 rpc_check_timeout(struct rpc_task *task)
2501 {
2502 struct rpc_clnt *clnt = task->tk_client;
2503
2504 if (RPC_SIGNALLED(task))
2505 return;
2506
2507 if (xprt_adjust_timeout(task->tk_rqstp) == 0)
2508 return;
2509
2510 trace_rpc_timeout_status(task);
2511 task->tk_timeouts++;
2512
2513 if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) {
2514 rpc_call_rpcerror(task, -ETIMEDOUT);
2515 return;
2516 }
2517
2518 if (RPC_IS_SOFT(task)) {
2519 /*
2520 * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has
2521 * been sent, it should time out only if the transport
2522 * connection gets terminally broken.
2523 */
2524 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) &&
2525 rpc_check_connected(task->tk_rqstp))
2526 return;
2527
2528 if (clnt->cl_chatty) {
2529 pr_notice_ratelimited(
2530 "%s: server %s not responding, timed out\n",
2531 clnt->cl_program->name,
2532 task->tk_xprt->servername);
2533 }
2534 if (task->tk_flags & RPC_TASK_TIMEOUT)
2535 rpc_call_rpcerror(task, -ETIMEDOUT);
2536 else
2537 __rpc_call_rpcerror(task, -EIO, -ETIMEDOUT);
2538 return;
2539 }
2540
2541 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
2542 task->tk_flags |= RPC_CALL_MAJORSEEN;
2543 if (clnt->cl_chatty) {
2544 pr_notice_ratelimited(
2545 "%s: server %s not responding, still trying\n",
2546 clnt->cl_program->name,
2547 task->tk_xprt->servername);
2548 }
2549 }
2550 rpc_force_rebind(clnt);
2551 /*
2552 * Did our request time out due to an RPCSEC_GSS out-of-sequence
2553 * event? RFC2203 requires the server to drop all such requests.
2554 */
2555 rpcauth_invalcred(task);
2556 }
2557
2558 /*
2559 * 7. Decode the RPC reply
2560 */
2561 static void
call_decode(struct rpc_task * task)2562 call_decode(struct rpc_task *task)
2563 {
2564 struct rpc_clnt *clnt = task->tk_client;
2565 struct rpc_rqst *req = task->tk_rqstp;
2566 struct xdr_stream xdr;
2567 int err;
2568
2569 if (!task->tk_msg.rpc_proc->p_decode) {
2570 task->tk_action = rpc_exit_task;
2571 return;
2572 }
2573
2574 if (task->tk_flags & RPC_CALL_MAJORSEEN) {
2575 if (clnt->cl_chatty) {
2576 pr_notice_ratelimited("%s: server %s OK\n",
2577 clnt->cl_program->name,
2578 task->tk_xprt->servername);
2579 }
2580 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
2581 }
2582
2583 /*
2584 * Did we ever call xprt_complete_rqst()? If not, we should assume
2585 * the message is incomplete.
2586 */
2587 err = -EAGAIN;
2588 if (!req->rq_reply_bytes_recvd)
2589 goto out;
2590
2591 /* Ensure that we see all writes made by xprt_complete_rqst()
2592 * before it changed req->rq_reply_bytes_recvd.
2593 */
2594 smp_rmb();
2595
2596 req->rq_rcv_buf.len = req->rq_private_buf.len;
2597 trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
2598
2599 /* Check that the softirq receive buffer is valid */
2600 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
2601 sizeof(req->rq_rcv_buf)) != 0);
2602
2603 xdr_init_decode(&xdr, &req->rq_rcv_buf,
2604 req->rq_rcv_buf.head[0].iov_base, req);
2605 err = rpc_decode_header(task, &xdr);
2606 out:
2607 switch (err) {
2608 case 0:
2609 task->tk_action = rpc_exit_task;
2610 task->tk_status = rpcauth_unwrap_resp(task, &xdr);
2611 xdr_finish_decode(&xdr);
2612 return;
2613 case -EAGAIN:
2614 task->tk_status = 0;
2615 if (task->tk_client->cl_discrtry)
2616 xprt_conditional_disconnect(req->rq_xprt,
2617 req->rq_connect_cookie);
2618 task->tk_action = call_encode;
2619 rpc_check_timeout(task);
2620 break;
2621 case -EKEYREJECTED:
2622 task->tk_action = call_reserve;
2623 rpc_check_timeout(task);
2624 rpcauth_invalcred(task);
2625 /* Ensure we obtain a new XID if we retry! */
2626 xprt_release(task);
2627 }
2628 }
2629
2630 static int
rpc_encode_header(struct rpc_task * task,struct xdr_stream * xdr)2631 rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr)
2632 {
2633 struct rpc_clnt *clnt = task->tk_client;
2634 struct rpc_rqst *req = task->tk_rqstp;
2635 __be32 *p;
2636 int error;
2637
2638 error = -EMSGSIZE;
2639 p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2);
2640 if (!p)
2641 goto out_fail;
2642 *p++ = req->rq_xid;
2643 *p++ = rpc_call;
2644 *p++ = cpu_to_be32(RPC_VERSION);
2645 *p++ = cpu_to_be32(clnt->cl_prog);
2646 *p++ = cpu_to_be32(clnt->cl_vers);
2647 *p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc);
2648
2649 error = rpcauth_marshcred(task, xdr);
2650 if (error < 0)
2651 goto out_fail;
2652 return 0;
2653 out_fail:
2654 trace_rpc_bad_callhdr(task);
2655 rpc_call_rpcerror(task, error);
2656 return error;
2657 }
2658
2659 static noinline int
rpc_decode_header(struct rpc_task * task,struct xdr_stream * xdr)2660 rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr)
2661 {
2662 struct rpc_clnt *clnt = task->tk_client;
2663 int error;
2664 __be32 *p;
2665
2666 /* RFC-1014 says that the representation of XDR data must be a
2667 * multiple of four bytes
2668 * - if it isn't pointer subtraction in the NFS client may give
2669 * undefined results
2670 */
2671 if (task->tk_rqstp->rq_rcv_buf.len & 3)
2672 goto out_unparsable;
2673
2674 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
2675 if (!p)
2676 goto out_unparsable;
2677 p++; /* skip XID */
2678 if (*p++ != rpc_reply)
2679 goto out_unparsable;
2680 if (*p++ != rpc_msg_accepted)
2681 goto out_msg_denied;
2682
2683 error = rpcauth_checkverf(task, xdr);
2684 if (error)
2685 goto out_verifier;
2686
2687 p = xdr_inline_decode(xdr, sizeof(*p));
2688 if (!p)
2689 goto out_unparsable;
2690 switch (*p) {
2691 case rpc_success:
2692 return 0;
2693 case rpc_prog_unavail:
2694 trace_rpc__prog_unavail(task);
2695 error = -EPFNOSUPPORT;
2696 goto out_err;
2697 case rpc_prog_mismatch:
2698 trace_rpc__prog_mismatch(task);
2699 error = -EPROTONOSUPPORT;
2700 goto out_err;
2701 case rpc_proc_unavail:
2702 trace_rpc__proc_unavail(task);
2703 error = -EOPNOTSUPP;
2704 goto out_err;
2705 case rpc_garbage_args:
2706 case rpc_system_err:
2707 trace_rpc__garbage_args(task);
2708 error = -EIO;
2709 break;
2710 default:
2711 goto out_unparsable;
2712 }
2713
2714 out_garbage:
2715 clnt->cl_stats->rpcgarbage++;
2716 if (task->tk_garb_retry) {
2717 task->tk_garb_retry--;
2718 task->tk_action = call_encode;
2719 return -EAGAIN;
2720 }
2721 out_err:
2722 rpc_call_rpcerror(task, error);
2723 return error;
2724
2725 out_unparsable:
2726 trace_rpc__unparsable(task);
2727 error = -EIO;
2728 goto out_garbage;
2729
2730 out_verifier:
2731 trace_rpc_bad_verifier(task);
2732 switch (error) {
2733 case -EPROTONOSUPPORT:
2734 goto out_err;
2735 case -EACCES:
2736 /* Re-encode with a fresh cred */
2737 fallthrough;
2738 default:
2739 goto out_garbage;
2740 }
2741
2742 out_msg_denied:
2743 error = -EACCES;
2744 p = xdr_inline_decode(xdr, sizeof(*p));
2745 if (!p)
2746 goto out_unparsable;
2747 switch (*p++) {
2748 case rpc_auth_error:
2749 break;
2750 case rpc_mismatch:
2751 trace_rpc__mismatch(task);
2752 error = -EPROTONOSUPPORT;
2753 goto out_err;
2754 default:
2755 goto out_unparsable;
2756 }
2757
2758 p = xdr_inline_decode(xdr, sizeof(*p));
2759 if (!p)
2760 goto out_unparsable;
2761 switch (*p++) {
2762 case rpc_autherr_rejectedcred:
2763 case rpc_autherr_rejectedverf:
2764 case rpcsec_gsserr_credproblem:
2765 case rpcsec_gsserr_ctxproblem:
2766 rpcauth_invalcred(task);
2767 if (!task->tk_cred_retry)
2768 break;
2769 task->tk_cred_retry--;
2770 trace_rpc__stale_creds(task);
2771 return -EKEYREJECTED;
2772 case rpc_autherr_badcred:
2773 case rpc_autherr_badverf:
2774 /* possibly garbled cred/verf? */
2775 if (!task->tk_garb_retry)
2776 break;
2777 task->tk_garb_retry--;
2778 trace_rpc__bad_creds(task);
2779 task->tk_action = call_encode;
2780 return -EAGAIN;
2781 case rpc_autherr_tooweak:
2782 trace_rpc__auth_tooweak(task);
2783 pr_warn("RPC: server %s requires stronger authentication.\n",
2784 task->tk_xprt->servername);
2785 break;
2786 default:
2787 goto out_unparsable;
2788 }
2789 goto out_err;
2790 }
2791
rpcproc_encode_null(struct rpc_rqst * rqstp,struct xdr_stream * xdr,const void * obj)2792 static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
2793 const void *obj)
2794 {
2795 }
2796
rpcproc_decode_null(struct rpc_rqst * rqstp,struct xdr_stream * xdr,void * obj)2797 static int rpcproc_decode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
2798 void *obj)
2799 {
2800 return 0;
2801 }
2802
2803 static const struct rpc_procinfo rpcproc_null = {
2804 .p_encode = rpcproc_encode_null,
2805 .p_decode = rpcproc_decode_null,
2806 };
2807
2808 static const struct rpc_procinfo rpcproc_null_noreply = {
2809 .p_encode = rpcproc_encode_null,
2810 };
2811
2812 static void
rpc_null_call_prepare(struct rpc_task * task,void * data)2813 rpc_null_call_prepare(struct rpc_task *task, void *data)
2814 {
2815 task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT;
2816 rpc_call_start(task);
2817 }
2818
2819 static const struct rpc_call_ops rpc_null_ops = {
2820 .rpc_call_prepare = rpc_null_call_prepare,
2821 .rpc_call_done = rpc_default_callback,
2822 };
2823
2824 static
rpc_call_null_helper(struct rpc_clnt * clnt,struct rpc_xprt * xprt,struct rpc_cred * cred,int flags,const struct rpc_call_ops * ops,void * data)2825 struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt,
2826 struct rpc_xprt *xprt, struct rpc_cred *cred, int flags,
2827 const struct rpc_call_ops *ops, void *data)
2828 {
2829 struct rpc_message msg = {
2830 .rpc_proc = &rpcproc_null,
2831 };
2832 struct rpc_task_setup task_setup_data = {
2833 .rpc_client = clnt,
2834 .rpc_xprt = xprt,
2835 .rpc_message = &msg,
2836 .rpc_op_cred = cred,
2837 .callback_ops = ops ?: &rpc_null_ops,
2838 .callback_data = data,
2839 .flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN |
2840 RPC_TASK_NULLCREDS,
2841 };
2842
2843 return rpc_run_task(&task_setup_data);
2844 }
2845
rpc_call_null(struct rpc_clnt * clnt,struct rpc_cred * cred,int flags)2846 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
2847 {
2848 return rpc_call_null_helper(clnt, NULL, cred, flags, NULL, NULL);
2849 }
2850 EXPORT_SYMBOL_GPL(rpc_call_null);
2851
rpc_ping(struct rpc_clnt * clnt)2852 static int rpc_ping(struct rpc_clnt *clnt)
2853 {
2854 struct rpc_task *task;
2855 int status;
2856
2857 if (clnt->cl_auth->au_ops->ping)
2858 return clnt->cl_auth->au_ops->ping(clnt);
2859
2860 task = rpc_call_null_helper(clnt, NULL, NULL, 0, NULL, NULL);
2861 if (IS_ERR(task))
2862 return PTR_ERR(task);
2863 status = task->tk_status;
2864 rpc_put_task(task);
2865 return status;
2866 }
2867
rpc_ping_noreply(struct rpc_clnt * clnt)2868 static int rpc_ping_noreply(struct rpc_clnt *clnt)
2869 {
2870 struct rpc_message msg = {
2871 .rpc_proc = &rpcproc_null_noreply,
2872 };
2873 struct rpc_task_setup task_setup_data = {
2874 .rpc_client = clnt,
2875 .rpc_message = &msg,
2876 .callback_ops = &rpc_null_ops,
2877 .flags = RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS,
2878 };
2879 struct rpc_task *task;
2880 int status;
2881
2882 task = rpc_run_task(&task_setup_data);
2883 if (IS_ERR(task))
2884 return PTR_ERR(task);
2885 status = task->tk_status;
2886 rpc_put_task(task);
2887 return status;
2888 }
2889
2890 struct rpc_cb_add_xprt_calldata {
2891 struct rpc_xprt_switch *xps;
2892 struct rpc_xprt *xprt;
2893 };
2894
rpc_cb_add_xprt_done(struct rpc_task * task,void * calldata)2895 static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata)
2896 {
2897 struct rpc_cb_add_xprt_calldata *data = calldata;
2898
2899 if (task->tk_status == 0)
2900 rpc_xprt_switch_add_xprt(data->xps, data->xprt);
2901 }
2902
rpc_cb_add_xprt_release(void * calldata)2903 static void rpc_cb_add_xprt_release(void *calldata)
2904 {
2905 struct rpc_cb_add_xprt_calldata *data = calldata;
2906
2907 xprt_put(data->xprt);
2908 xprt_switch_put(data->xps);
2909 kfree(data);
2910 }
2911
2912 static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = {
2913 .rpc_call_prepare = rpc_null_call_prepare,
2914 .rpc_call_done = rpc_cb_add_xprt_done,
2915 .rpc_release = rpc_cb_add_xprt_release,
2916 };
2917
2918 /**
2919 * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt
2920 * @clnt: pointer to struct rpc_clnt
2921 * @xps: pointer to struct rpc_xprt_switch,
2922 * @xprt: pointer struct rpc_xprt
2923 * @in_max_connect: pointer to the max_connect value for the passed in xprt transport
2924 */
rpc_clnt_test_and_add_xprt(struct rpc_clnt * clnt,struct rpc_xprt_switch * xps,struct rpc_xprt * xprt,void * in_max_connect)2925 int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
2926 struct rpc_xprt_switch *xps, struct rpc_xprt *xprt,
2927 void *in_max_connect)
2928 {
2929 struct rpc_cb_add_xprt_calldata *data;
2930 struct rpc_task *task;
2931 int max_connect = clnt->cl_max_connect;
2932
2933 if (in_max_connect)
2934 max_connect = *(int *)in_max_connect;
2935 if (xps->xps_nunique_destaddr_xprts + 1 > max_connect) {
2936 rcu_read_lock();
2937 pr_warn("SUNRPC: reached max allowed number (%d) did not add "
2938 "transport to server: %s\n", max_connect,
2939 rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
2940 rcu_read_unlock();
2941 return -EINVAL;
2942 }
2943
2944 data = kmalloc(sizeof(*data), GFP_KERNEL);
2945 if (!data)
2946 return -ENOMEM;
2947 data->xps = xprt_switch_get(xps);
2948 data->xprt = xprt_get(xprt);
2949 if (rpc_xprt_switch_has_addr(data->xps, (struct sockaddr *)&xprt->addr)) {
2950 rpc_cb_add_xprt_release(data);
2951 goto success;
2952 }
2953
2954 task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
2955 &rpc_cb_add_xprt_call_ops, data);
2956 if (IS_ERR(task))
2957 return PTR_ERR(task);
2958
2959 data->xps->xps_nunique_destaddr_xprts++;
2960 rpc_put_task(task);
2961 success:
2962 return 1;
2963 }
2964 EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt);
2965
rpc_clnt_add_xprt_helper(struct rpc_clnt * clnt,struct rpc_xprt * xprt,struct rpc_add_xprt_test * data)2966 static int rpc_clnt_add_xprt_helper(struct rpc_clnt *clnt,
2967 struct rpc_xprt *xprt,
2968 struct rpc_add_xprt_test *data)
2969 {
2970 struct rpc_task *task;
2971 int status = -EADDRINUSE;
2972
2973 /* Test the connection */
2974 task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL);
2975 if (IS_ERR(task))
2976 return PTR_ERR(task);
2977
2978 status = task->tk_status;
2979 rpc_put_task(task);
2980
2981 if (status < 0)
2982 return status;
2983
2984 /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */
2985 data->add_xprt_test(clnt, xprt, data->data);
2986
2987 return 0;
2988 }
2989
2990 /**
2991 * rpc_clnt_setup_test_and_add_xprt()
2992 *
2993 * This is an rpc_clnt_add_xprt setup() function which returns 1 so:
2994 * 1) caller of the test function must dereference the rpc_xprt_switch
2995 * and the rpc_xprt.
2996 * 2) test function must call rpc_xprt_switch_add_xprt, usually in
2997 * the rpc_call_done routine.
2998 *
2999 * Upon success (return of 1), the test function adds the new
3000 * transport to the rpc_clnt xprt switch
3001 *
3002 * @clnt: struct rpc_clnt to get the new transport
3003 * @xps: the rpc_xprt_switch to hold the new transport
3004 * @xprt: the rpc_xprt to test
3005 * @data: a struct rpc_add_xprt_test pointer that holds the test function
3006 * and test function call data
3007 */
rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt * clnt,struct rpc_xprt_switch * xps,struct rpc_xprt * xprt,void * data)3008 int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt,
3009 struct rpc_xprt_switch *xps,
3010 struct rpc_xprt *xprt,
3011 void *data)
3012 {
3013 int status = -EADDRINUSE;
3014
3015 xprt = xprt_get(xprt);
3016 xprt_switch_get(xps);
3017
3018 if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr))
3019 goto out_err;
3020
3021 status = rpc_clnt_add_xprt_helper(clnt, xprt, data);
3022 if (status < 0)
3023 goto out_err;
3024
3025 status = 1;
3026 out_err:
3027 xprt_put(xprt);
3028 xprt_switch_put(xps);
3029 if (status < 0)
3030 pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not "
3031 "added\n", status,
3032 xprt->address_strings[RPC_DISPLAY_ADDR]);
3033 /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */
3034 return status;
3035 }
3036 EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt);
3037
3038 /**
3039 * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt
3040 * @clnt: pointer to struct rpc_clnt
3041 * @xprtargs: pointer to struct xprt_create
3042 * @setup: callback to test and/or set up the connection
3043 * @data: pointer to setup function data
3044 *
3045 * Creates a new transport using the parameters set in args and
3046 * adds it to clnt.
3047 * If ping is set, then test that connectivity succeeds before
3048 * adding the new transport.
3049 *
3050 */
rpc_clnt_add_xprt(struct rpc_clnt * clnt,struct xprt_create * xprtargs,int (* setup)(struct rpc_clnt *,struct rpc_xprt_switch *,struct rpc_xprt *,void *),void * data)3051 int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
3052 struct xprt_create *xprtargs,
3053 int (*setup)(struct rpc_clnt *,
3054 struct rpc_xprt_switch *,
3055 struct rpc_xprt *,
3056 void *),
3057 void *data)
3058 {
3059 struct rpc_xprt_switch *xps;
3060 struct rpc_xprt *xprt;
3061 unsigned long connect_timeout;
3062 unsigned long reconnect_timeout;
3063 unsigned char resvport, reuseport;
3064 int ret = 0, ident;
3065
3066 rcu_read_lock();
3067 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
3068 xprt = xprt_iter_xprt(&clnt->cl_xpi);
3069 if (xps == NULL || xprt == NULL) {
3070 rcu_read_unlock();
3071 xprt_switch_put(xps);
3072 return -EAGAIN;
3073 }
3074 resvport = xprt->resvport;
3075 reuseport = xprt->reuseport;
3076 connect_timeout = xprt->connect_timeout;
3077 reconnect_timeout = xprt->max_reconnect_timeout;
3078 ident = xprt->xprt_class->ident;
3079 rcu_read_unlock();
3080
3081 if (!xprtargs->ident)
3082 xprtargs->ident = ident;
3083 xprtargs->xprtsec = clnt->cl_xprtsec;
3084 xprt = xprt_create_transport(xprtargs);
3085 if (IS_ERR(xprt)) {
3086 ret = PTR_ERR(xprt);
3087 goto out_put_switch;
3088 }
3089 xprt->resvport = resvport;
3090 xprt->reuseport = reuseport;
3091
3092 if (xprtargs->connect_timeout)
3093 connect_timeout = xprtargs->connect_timeout;
3094 if (xprtargs->reconnect_timeout)
3095 reconnect_timeout = xprtargs->reconnect_timeout;
3096 if (xprt->ops->set_connect_timeout != NULL)
3097 xprt->ops->set_connect_timeout(xprt,
3098 connect_timeout,
3099 reconnect_timeout);
3100
3101 rpc_xprt_switch_set_roundrobin(xps);
3102 if (setup) {
3103 ret = setup(clnt, xps, xprt, data);
3104 if (ret != 0)
3105 goto out_put_xprt;
3106 }
3107 rpc_xprt_switch_add_xprt(xps, xprt);
3108 out_put_xprt:
3109 xprt_put(xprt);
3110 out_put_switch:
3111 xprt_switch_put(xps);
3112 return ret;
3113 }
3114 EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt);
3115
rpc_xprt_probe_trunked(struct rpc_clnt * clnt,struct rpc_xprt * xprt,struct rpc_add_xprt_test * data)3116 static int rpc_xprt_probe_trunked(struct rpc_clnt *clnt,
3117 struct rpc_xprt *xprt,
3118 struct rpc_add_xprt_test *data)
3119 {
3120 struct rpc_xprt_switch *xps;
3121 struct rpc_xprt *main_xprt;
3122 int status = 0;
3123
3124 xprt_get(xprt);
3125
3126 rcu_read_lock();
3127 main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
3128 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
3129 status = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr,
3130 (struct sockaddr *)&main_xprt->addr);
3131 rcu_read_unlock();
3132 xprt_put(main_xprt);
3133 if (status || !test_bit(XPRT_OFFLINE, &xprt->state))
3134 goto out;
3135
3136 status = rpc_clnt_add_xprt_helper(clnt, xprt, data);
3137 out:
3138 xprt_put(xprt);
3139 xprt_switch_put(xps);
3140 return status;
3141 }
3142
3143 /* rpc_clnt_probe_trunked_xprt -- probe offlined transport for session trunking
3144 * @clnt rpc_clnt structure
3145 *
3146 * For each offlined transport found in the rpc_clnt structure call
3147 * the function rpc_xprt_probe_trunked() which will determine if this
3148 * transport still belongs to the trunking group.
3149 */
rpc_clnt_probe_trunked_xprts(struct rpc_clnt * clnt,struct rpc_add_xprt_test * data)3150 void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *clnt,
3151 struct rpc_add_xprt_test *data)
3152 {
3153 struct rpc_xprt_iter xpi;
3154 int ret;
3155
3156 ret = rpc_clnt_xprt_iter_offline_init(clnt, &xpi);
3157 if (ret)
3158 return;
3159 for (;;) {
3160 struct rpc_xprt *xprt = xprt_iter_get_next(&xpi);
3161
3162 if (!xprt)
3163 break;
3164 ret = rpc_xprt_probe_trunked(clnt, xprt, data);
3165 xprt_put(xprt);
3166 if (ret < 0)
3167 break;
3168 xprt_iter_rewind(&xpi);
3169 }
3170 xprt_iter_destroy(&xpi);
3171 }
3172 EXPORT_SYMBOL_GPL(rpc_clnt_probe_trunked_xprts);
3173
rpc_xprt_offline(struct rpc_clnt * clnt,struct rpc_xprt * xprt,void * data)3174 static int rpc_xprt_offline(struct rpc_clnt *clnt,
3175 struct rpc_xprt *xprt,
3176 void *data)
3177 {
3178 struct rpc_xprt *main_xprt;
3179 struct rpc_xprt_switch *xps;
3180 int err = 0;
3181
3182 xprt_get(xprt);
3183
3184 rcu_read_lock();
3185 main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
3186 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
3187 err = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr,
3188 (struct sockaddr *)&main_xprt->addr);
3189 rcu_read_unlock();
3190 xprt_put(main_xprt);
3191 if (err)
3192 goto out;
3193
3194 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) {
3195 err = -EINTR;
3196 goto out;
3197 }
3198 xprt_set_offline_locked(xprt, xps);
3199
3200 xprt_release_write(xprt, NULL);
3201 out:
3202 xprt_put(xprt);
3203 xprt_switch_put(xps);
3204 return err;
3205 }
3206
3207 /* rpc_clnt_manage_trunked_xprts -- offline trunked transports
3208 * @clnt rpc_clnt structure
3209 *
3210 * For each active transport found in the rpc_clnt structure call
3211 * the function rpc_xprt_offline() which will identify trunked transports
3212 * and will mark them offline.
3213 */
rpc_clnt_manage_trunked_xprts(struct rpc_clnt * clnt)3214 void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *clnt)
3215 {
3216 rpc_clnt_iterate_for_each_xprt(clnt, rpc_xprt_offline, NULL);
3217 }
3218 EXPORT_SYMBOL_GPL(rpc_clnt_manage_trunked_xprts);
3219
3220 struct connect_timeout_data {
3221 unsigned long connect_timeout;
3222 unsigned long reconnect_timeout;
3223 };
3224
3225 static int
rpc_xprt_set_connect_timeout(struct rpc_clnt * clnt,struct rpc_xprt * xprt,void * data)3226 rpc_xprt_set_connect_timeout(struct rpc_clnt *clnt,
3227 struct rpc_xprt *xprt,
3228 void *data)
3229 {
3230 struct connect_timeout_data *timeo = data;
3231
3232 if (xprt->ops->set_connect_timeout)
3233 xprt->ops->set_connect_timeout(xprt,
3234 timeo->connect_timeout,
3235 timeo->reconnect_timeout);
3236 return 0;
3237 }
3238
3239 void
rpc_set_connect_timeout(struct rpc_clnt * clnt,unsigned long connect_timeout,unsigned long reconnect_timeout)3240 rpc_set_connect_timeout(struct rpc_clnt *clnt,
3241 unsigned long connect_timeout,
3242 unsigned long reconnect_timeout)
3243 {
3244 struct connect_timeout_data timeout = {
3245 .connect_timeout = connect_timeout,
3246 .reconnect_timeout = reconnect_timeout,
3247 };
3248 rpc_clnt_iterate_for_each_xprt(clnt,
3249 rpc_xprt_set_connect_timeout,
3250 &timeout);
3251 }
3252 EXPORT_SYMBOL_GPL(rpc_set_connect_timeout);
3253
rpc_clnt_xprt_switch_put(struct rpc_clnt * clnt)3254 void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt)
3255 {
3256 rcu_read_lock();
3257 xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
3258 rcu_read_unlock();
3259 }
3260 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put);
3261
rpc_clnt_xprt_set_online(struct rpc_clnt * clnt,struct rpc_xprt * xprt)3262 void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
3263 {
3264 struct rpc_xprt_switch *xps;
3265
3266 rcu_read_lock();
3267 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
3268 rcu_read_unlock();
3269 xprt_set_online_locked(xprt, xps);
3270 }
3271
rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt * clnt,struct rpc_xprt * xprt)3272 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
3273 {
3274 if (rpc_clnt_xprt_switch_has_addr(clnt,
3275 (const struct sockaddr *)&xprt->addr)) {
3276 return rpc_clnt_xprt_set_online(clnt, xprt);
3277 }
3278 rcu_read_lock();
3279 rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch),
3280 xprt);
3281 rcu_read_unlock();
3282 }
3283 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt);
3284
rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt * clnt,struct rpc_xprt * xprt)3285 void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
3286 {
3287 struct rpc_xprt_switch *xps;
3288
3289 rcu_read_lock();
3290 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
3291 rpc_xprt_switch_remove_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch),
3292 xprt, 0);
3293 xps->xps_nunique_destaddr_xprts--;
3294 rcu_read_unlock();
3295 }
3296 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_remove_xprt);
3297
rpc_clnt_xprt_switch_has_addr(struct rpc_clnt * clnt,const struct sockaddr * sap)3298 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
3299 const struct sockaddr *sap)
3300 {
3301 struct rpc_xprt_switch *xps;
3302 bool ret;
3303
3304 rcu_read_lock();
3305 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
3306 ret = rpc_xprt_switch_has_addr(xps, sap);
3307 rcu_read_unlock();
3308 return ret;
3309 }
3310 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr);
3311
3312 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
rpc_show_header(void)3313 static void rpc_show_header(void)
3314 {
3315 printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
3316 "-timeout ---ops--\n");
3317 }
3318
rpc_show_task(const struct rpc_clnt * clnt,const struct rpc_task * task)3319 static void rpc_show_task(const struct rpc_clnt *clnt,
3320 const struct rpc_task *task)
3321 {
3322 const char *rpc_waitq = "none";
3323
3324 if (RPC_IS_QUEUED(task))
3325 rpc_waitq = rpc_qname(task->tk_waitqueue);
3326
3327 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
3328 task->tk_pid, task->tk_flags, task->tk_status,
3329 clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops,
3330 clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task),
3331 task->tk_action, rpc_waitq);
3332 }
3333
rpc_show_tasks(struct net * net)3334 void rpc_show_tasks(struct net *net)
3335 {
3336 struct rpc_clnt *clnt;
3337 struct rpc_task *task;
3338 int header = 0;
3339 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
3340
3341 spin_lock(&sn->rpc_client_lock);
3342 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
3343 spin_lock(&clnt->cl_lock);
3344 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
3345 if (!header) {
3346 rpc_show_header();
3347 header++;
3348 }
3349 rpc_show_task(clnt, task);
3350 }
3351 spin_unlock(&clnt->cl_lock);
3352 }
3353 spin_unlock(&sn->rpc_client_lock);
3354 }
3355 #endif
3356
3357 #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
3358 static int
rpc_clnt_swap_activate_callback(struct rpc_clnt * clnt,struct rpc_xprt * xprt,void * dummy)3359 rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt,
3360 struct rpc_xprt *xprt,
3361 void *dummy)
3362 {
3363 return xprt_enable_swap(xprt);
3364 }
3365
3366 int
rpc_clnt_swap_activate(struct rpc_clnt * clnt)3367 rpc_clnt_swap_activate(struct rpc_clnt *clnt)
3368 {
3369 while (clnt != clnt->cl_parent)
3370 clnt = clnt->cl_parent;
3371 if (atomic_inc_return(&clnt->cl_swapper) == 1)
3372 return rpc_clnt_iterate_for_each_xprt(clnt,
3373 rpc_clnt_swap_activate_callback, NULL);
3374 return 0;
3375 }
3376 EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate);
3377
3378 static int
rpc_clnt_swap_deactivate_callback(struct rpc_clnt * clnt,struct rpc_xprt * xprt,void * dummy)3379 rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt,
3380 struct rpc_xprt *xprt,
3381 void *dummy)
3382 {
3383 xprt_disable_swap(xprt);
3384 return 0;
3385 }
3386
3387 void
rpc_clnt_swap_deactivate(struct rpc_clnt * clnt)3388 rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
3389 {
3390 while (clnt != clnt->cl_parent)
3391 clnt = clnt->cl_parent;
3392 if (atomic_dec_if_positive(&clnt->cl_swapper) == 0)
3393 rpc_clnt_iterate_for_each_xprt(clnt,
3394 rpc_clnt_swap_deactivate_callback, NULL);
3395 }
3396 EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate);
3397 #endif /* CONFIG_SUNRPC_SWAP */
3398