xref: /openbmc/linux/fs/nfsd/nfs4callback.c (revision 92ed1a76)
1 /*
2  *  Copyright (c) 2001 The Regents of the University of Michigan.
3  *  All rights reserved.
4  *
5  *  Kendrick Smith <kmsmith@umich.edu>
6  *  Andy Adamson <andros@umich.edu>
7  *
8  *  Redistribution and use in source and binary forms, with or without
9  *  modification, are permitted provided that the following conditions
10  *  are met:
11  *
12  *  1. Redistributions of source code must retain the above copyright
13  *     notice, this list of conditions and the following disclaimer.
14  *  2. Redistributions in binary form must reproduce the above copyright
15  *     notice, this list of conditions and the following disclaimer in the
16  *     documentation and/or other materials provided with the distribution.
17  *  3. Neither the name of the University nor the names of its
18  *     contributors may be used to endorse or promote products derived
19  *     from this software without specific prior written permission.
20  *
21  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <linux/sunrpc/clnt.h>
35 #include <linux/sunrpc/svc_xprt.h>
36 #include <linux/slab.h>
37 #include "nfsd.h"
38 #include "state.h"
39 
40 #define NFSDDBG_FACILITY                NFSDDBG_PROC
41 
42 #define NFSPROC4_CB_NULL 0
43 #define NFSPROC4_CB_COMPOUND 1
44 
45 /* Index of predefined Linux callback client operations */
46 
47 enum {
48 	NFSPROC4_CLNT_CB_NULL = 0,
49 	NFSPROC4_CLNT_CB_RECALL,
50 	NFSPROC4_CLNT_CB_SEQUENCE,
51 };
52 
53 enum nfs_cb_opnum4 {
54 	OP_CB_RECALL            = 4,
55 	OP_CB_SEQUENCE          = 11,
56 };
57 
58 #define NFS4_MAXTAGLEN		20
59 
60 #define NFS4_enc_cb_null_sz		0
61 #define NFS4_dec_cb_null_sz		0
62 #define cb_compound_enc_hdr_sz		4
63 #define cb_compound_dec_hdr_sz		(3 + (NFS4_MAXTAGLEN >> 2))
64 #define sessionid_sz			(NFS4_MAX_SESSIONID_LEN >> 2)
65 #define cb_sequence_enc_sz		(sessionid_sz + 4 +             \
66 					1 /* no referring calls list yet */)
67 #define cb_sequence_dec_sz		(op_dec_sz + sessionid_sz + 4)
68 
69 #define op_enc_sz			1
70 #define op_dec_sz			2
71 #define enc_nfs4_fh_sz			(1 + (NFS4_FHSIZE >> 2))
72 #define enc_stateid_sz			(NFS4_STATEID_SIZE >> 2)
73 #define NFS4_enc_cb_recall_sz		(cb_compound_enc_hdr_sz +       \
74 					cb_sequence_enc_sz +            \
75 					1 + enc_stateid_sz +            \
76 					enc_nfs4_fh_sz)
77 
78 #define NFS4_dec_cb_recall_sz		(cb_compound_dec_hdr_sz  +      \
79 					cb_sequence_dec_sz +            \
80 					op_dec_sz)
81 
82 /*
83 * Generic encode routines from fs/nfs/nfs4xdr.c
84 */
85 static inline __be32 *
86 xdr_writemem(__be32 *p, const void *ptr, int nbytes)
87 {
88 	int tmp = XDR_QUADLEN(nbytes);
89 	if (!tmp)
90 		return p;
91 	p[tmp-1] = 0;
92 	memcpy(p, ptr, nbytes);
93 	return p + tmp;
94 }
95 
96 #define WRITE32(n)               *p++ = htonl(n)
97 #define WRITEMEM(ptr,nbytes)     do {                           \
98 	p = xdr_writemem(p, ptr, nbytes);                       \
99 } while (0)
100 #define RESERVE_SPACE(nbytes)   do {                            \
101 	p = xdr_reserve_space(xdr, nbytes);                     \
102 	if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \
103 	BUG_ON(!p);                                             \
104 } while (0)
105 
106 /*
107  * Generic decode routines from fs/nfs/nfs4xdr.c
108  */
109 #define DECODE_TAIL                             \
110 	status = 0;                             \
111 out:                                            \
112 	return status;                          \
113 xdr_error:                                      \
114 	dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
115 	status = -EIO;                          \
116 	goto out
117 
118 #define READ32(x)         (x) = ntohl(*p++)
119 #define READ64(x)         do {                  \
120 	(x) = (u64)ntohl(*p++) << 32;           \
121 	(x) |= ntohl(*p++);                     \
122 } while (0)
123 #define READTIME(x)       do {                  \
124 	p++;                                    \
125 	(x.tv_sec) = ntohl(*p++);               \
126 	(x.tv_nsec) = ntohl(*p++);              \
127 } while (0)
128 #define READ_BUF(nbytes)  do { \
129 	p = xdr_inline_decode(xdr, nbytes); \
130 	if (!p) { \
131 		dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \
132 			__func__, __LINE__); \
133 		return -EIO; \
134 	} \
135 } while (0)
136 
137 struct nfs4_cb_compound_hdr {
138 	/* args */
139 	u32		ident;	/* minorversion 0 only */
140 	u32		nops;
141 	__be32		*nops_p;
142 	u32		minorversion;
143 	/* res */
144 	int		status;
145 };
146 
147 static struct {
148 int stat;
149 int errno;
150 } nfs_cb_errtbl[] = {
151 	{ NFS4_OK,		0               },
152 	{ NFS4ERR_PERM,		EPERM           },
153 	{ NFS4ERR_NOENT,	ENOENT          },
154 	{ NFS4ERR_IO,		EIO             },
155 	{ NFS4ERR_NXIO,		ENXIO           },
156 	{ NFS4ERR_ACCESS,	EACCES          },
157 	{ NFS4ERR_EXIST,	EEXIST          },
158 	{ NFS4ERR_XDEV,		EXDEV           },
159 	{ NFS4ERR_NOTDIR,	ENOTDIR         },
160 	{ NFS4ERR_ISDIR,	EISDIR          },
161 	{ NFS4ERR_INVAL,	EINVAL          },
162 	{ NFS4ERR_FBIG,		EFBIG           },
163 	{ NFS4ERR_NOSPC,	ENOSPC          },
164 	{ NFS4ERR_ROFS,		EROFS           },
165 	{ NFS4ERR_MLINK,	EMLINK          },
166 	{ NFS4ERR_NAMETOOLONG,	ENAMETOOLONG    },
167 	{ NFS4ERR_NOTEMPTY,	ENOTEMPTY       },
168 	{ NFS4ERR_DQUOT,	EDQUOT          },
169 	{ NFS4ERR_STALE,	ESTALE          },
170 	{ NFS4ERR_BADHANDLE,	EBADHANDLE      },
171 	{ NFS4ERR_BAD_COOKIE,	EBADCOOKIE      },
172 	{ NFS4ERR_NOTSUPP,	ENOTSUPP        },
173 	{ NFS4ERR_TOOSMALL,	ETOOSMALL       },
174 	{ NFS4ERR_SERVERFAULT,	ESERVERFAULT    },
175 	{ NFS4ERR_BADTYPE,	EBADTYPE        },
176 	{ NFS4ERR_LOCKED,	EAGAIN          },
177 	{ NFS4ERR_RESOURCE,	EREMOTEIO       },
178 	{ NFS4ERR_SYMLINK,	ELOOP           },
179 	{ NFS4ERR_OP_ILLEGAL,	EOPNOTSUPP      },
180 	{ NFS4ERR_DEADLOCK,	EDEADLK         },
181 	{ -1,                   EIO             }
182 };
183 
184 static int
185 nfs_cb_stat_to_errno(int stat)
186 {
187 	int i;
188 	for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
189 		if (nfs_cb_errtbl[i].stat == stat)
190 			return nfs_cb_errtbl[i].errno;
191 	}
192 	/* If we cannot translate the error, the recovery routines should
193 	* handle it.
194 	* Note: remaining NFSv4 error codes have values > 10000, so should
195 	* not conflict with native Linux error codes.
196 	*/
197 	return stat;
198 }
199 
200 /*
201  * XDR encode
202  */
203 
204 static void
205 encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
206 {
207 	__be32 *p;
208 
209 	RESERVE_SPACE(sizeof(stateid_t));
210 	WRITE32(sid->si_generation);
211 	WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
212 }
213 
214 static void
215 encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
216 {
217 	__be32 * p;
218 
219 	RESERVE_SPACE(16);
220 	WRITE32(0);            /* tag length is always 0 */
221 	WRITE32(hdr->minorversion);
222 	WRITE32(hdr->ident);
223 	hdr->nops_p = p;
224 	WRITE32(hdr->nops);
225 }
226 
227 static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
228 {
229 	*hdr->nops_p = htonl(hdr->nops);
230 }
231 
232 static void
233 encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
234 		struct nfs4_cb_compound_hdr *hdr)
235 {
236 	__be32 *p;
237 	int len = dp->dl_fh.fh_size;
238 
239 	RESERVE_SPACE(4);
240 	WRITE32(OP_CB_RECALL);
241 	encode_stateid(xdr, &dp->dl_stateid);
242 	RESERVE_SPACE(8 + (XDR_QUADLEN(len) << 2));
243 	WRITE32(0); /* truncate optimization not implemented */
244 	WRITE32(len);
245 	WRITEMEM(&dp->dl_fh.fh_base, len);
246 	hdr->nops++;
247 }
248 
249 static void
250 encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
251 		   struct nfs4_cb_compound_hdr *hdr)
252 {
253 	__be32 *p;
254 	struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
255 
256 	if (hdr->minorversion == 0)
257 		return;
258 
259 	RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
260 
261 	WRITE32(OP_CB_SEQUENCE);
262 	WRITEMEM(ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN);
263 	WRITE32(ses->se_cb_seq_nr);
264 	WRITE32(0);		/* slotid, always 0 */
265 	WRITE32(0);		/* highest slotid always 0 */
266 	WRITE32(0);		/* cachethis always 0 */
267 	WRITE32(0); /* FIXME: support referring_call_lists */
268 	hdr->nops++;
269 }
270 
271 static int
272 nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
273 {
274 	struct xdr_stream xdrs, *xdr = &xdrs;
275 
276 	xdr_init_encode(&xdrs, &req->rq_snd_buf, p);
277         RESERVE_SPACE(0);
278 	return 0;
279 }
280 
281 static int
282 nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
283 		struct nfsd4_callback *cb)
284 {
285 	struct xdr_stream xdr;
286 	struct nfs4_delegation *args = cb->cb_op;
287 	struct nfs4_cb_compound_hdr hdr = {
288 		.ident = cb->cb_clp->cl_cb_ident,
289 		.minorversion = cb->cb_minorversion,
290 	};
291 
292 	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
293 	encode_cb_compound_hdr(&xdr, &hdr);
294 	encode_cb_sequence(&xdr, cb, &hdr);
295 	encode_cb_recall(&xdr, args, &hdr);
296 	encode_cb_nops(&hdr);
297 	return 0;
298 }
299 
300 
301 static int
302 decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
303         __be32 *p;
304 	u32 taglen;
305 
306         READ_BUF(8);
307         READ32(hdr->status);
308 	/* We've got no use for the tag; ignore it: */
309         READ32(taglen);
310         READ_BUF(taglen + 4);
311         p += XDR_QUADLEN(taglen);
312         READ32(hdr->nops);
313         return 0;
314 }
315 
316 static int
317 decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
318 {
319 	__be32 *p;
320 	u32 op;
321 	int32_t nfserr;
322 
323 	READ_BUF(8);
324 	READ32(op);
325 	if (op != expected) {
326 		dprintk("NFSD: decode_cb_op_hdr: Callback server returned "
327 		         " operation %d but we issued a request for %d\n",
328 		         op, expected);
329 		return -EIO;
330 	}
331 	READ32(nfserr);
332 	if (nfserr != NFS_OK)
333 		return -nfs_cb_stat_to_errno(nfserr);
334 	return 0;
335 }
336 
337 /*
338  * Our current back channel implmentation supports a single backchannel
339  * with a single slot.
340  */
341 static int
342 decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
343 		   struct rpc_rqst *rqstp)
344 {
345 	struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
346 	struct nfs4_sessionid id;
347 	int status;
348 	u32 dummy;
349 	__be32 *p;
350 
351 	if (cb->cb_minorversion == 0)
352 		return 0;
353 
354 	status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
355 	if (status)
356 		return status;
357 
358 	/*
359 	 * If the server returns different values for sessionID, slotID or
360 	 * sequence number, the server is looney tunes.
361 	 */
362 	status = -ESERVERFAULT;
363 
364 	READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
365 	memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
366 	p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
367 	if (memcmp(id.data, ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
368 		dprintk("%s Invalid session id\n", __func__);
369 		goto out;
370 	}
371 	READ32(dummy);
372 	if (dummy != ses->se_cb_seq_nr) {
373 		dprintk("%s Invalid sequence number\n", __func__);
374 		goto out;
375 	}
376 	READ32(dummy); 	/* slotid must be 0 */
377 	if (dummy != 0) {
378 		dprintk("%s Invalid slotid\n", __func__);
379 		goto out;
380 	}
381 	/* FIXME: process highest slotid and target highest slotid */
382 	status = 0;
383 out:
384 	return status;
385 }
386 
387 
388 static int
389 nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
390 {
391 	return 0;
392 }
393 
394 static int
395 nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
396 		struct nfsd4_callback *cb)
397 {
398 	struct xdr_stream xdr;
399 	struct nfs4_cb_compound_hdr hdr;
400 	int status;
401 
402 	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
403 	status = decode_cb_compound_hdr(&xdr, &hdr);
404 	if (status)
405 		goto out;
406 	if (cb) {
407 		status = decode_cb_sequence(&xdr, cb, rqstp);
408 		if (status)
409 			goto out;
410 	}
411 	status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
412 out:
413 	return status;
414 }
415 
416 /*
417  * RPC procedure tables
418  */
419 #define PROC(proc, call, argtype, restype)                              \
420 [NFSPROC4_CLNT_##proc] = {                                      	\
421         .p_proc   = NFSPROC4_CB_##call,					\
422         .p_encode = (kxdrproc_t) nfs4_xdr_##argtype,                    \
423         .p_decode = (kxdrproc_t) nfs4_xdr_##restype,                    \
424         .p_arglen = NFS4_##argtype##_sz,                                \
425         .p_replen = NFS4_##restype##_sz,                                \
426         .p_statidx = NFSPROC4_CB_##call,				\
427 	.p_name   = #proc,                                              \
428 }
429 
430 static struct rpc_procinfo     nfs4_cb_procedures[] = {
431     PROC(CB_NULL,      NULL,     enc_cb_null,     dec_cb_null),
432     PROC(CB_RECALL,    COMPOUND,   enc_cb_recall,      dec_cb_recall),
433 };
434 
435 static struct rpc_version       nfs_cb_version4 = {
436 /*
437  * Note on the callback rpc program version number: despite language in rfc
438  * 5661 section 18.36.3 requiring servers to use 4 in this field, the
439  * official xdr descriptions for both 4.0 and 4.1 specify version 1, and
440  * in practice that appears to be what implementations use.  The section
441  * 18.36.3 language is expected to be fixed in an erratum.
442  */
443         .number                 = 1,
444         .nrprocs                = ARRAY_SIZE(nfs4_cb_procedures),
445         .procs                  = nfs4_cb_procedures
446 };
447 
448 static struct rpc_version *	nfs_cb_version[] = {
449 	&nfs_cb_version4,
450 };
451 
452 static struct rpc_program cb_program;
453 
454 static struct rpc_stat cb_stats = {
455 		.program	= &cb_program
456 };
457 
458 #define NFS4_CALLBACK 0x40000000
459 static struct rpc_program cb_program = {
460 		.name 		= "nfs4_cb",
461 		.number		= NFS4_CALLBACK,
462 		.nrvers		= ARRAY_SIZE(nfs_cb_version),
463 		.version	= nfs_cb_version,
464 		.stats		= &cb_stats,
465 		.pipe_dir_name  = "/nfsd4_cb",
466 };
467 
468 static int max_cb_time(void)
469 {
470 	return max(nfsd4_lease/10, (time_t)1) * HZ;
471 }
472 
473 /* Reference counting, callback cleanup, etc., all look racy as heck.
474  * And why is cl_cb_set an atomic? */
475 
476 int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
477 {
478 	struct rpc_timeout	timeparms = {
479 		.to_initval	= max_cb_time(),
480 		.to_retries	= 0,
481 	};
482 	struct rpc_create_args args = {
483 		.net		= &init_net,
484 		.address	= (struct sockaddr *) &conn->cb_addr,
485 		.addrsize	= conn->cb_addrlen,
486 		.timeout	= &timeparms,
487 		.program	= &cb_program,
488 		.version	= 0,
489 		.authflavor	= clp->cl_flavor,
490 		.flags		= (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
491 	};
492 	struct rpc_clnt *client;
493 
494 	if (clp->cl_minorversion == 0) {
495 		if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
496 			return -EINVAL;
497 		args.client_name = clp->cl_principal;
498 		args.prognumber	= conn->cb_prog,
499 		args.protocol = XPRT_TRANSPORT_TCP;
500 		clp->cl_cb_ident = conn->cb_ident;
501 	} else {
502 		args.bc_xprt = conn->cb_xprt;
503 		args.prognumber = clp->cl_cb_session->se_cb_prog;
504 		args.protocol = XPRT_TRANSPORT_BC_TCP;
505 	}
506 	/* Create RPC client */
507 	client = rpc_create(&args);
508 	if (IS_ERR(client)) {
509 		dprintk("NFSD: couldn't create callback client: %ld\n",
510 			PTR_ERR(client));
511 		return PTR_ERR(client);
512 	}
513 	clp->cl_cb_client = client;
514 	return 0;
515 
516 }
517 
518 static void warn_no_callback_path(struct nfs4_client *clp, int reason)
519 {
520 	dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
521 		(int)clp->cl_name.len, clp->cl_name.data, reason);
522 }
523 
524 static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
525 {
526 	struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
527 
528 	if (task->tk_status)
529 		warn_no_callback_path(clp, task->tk_status);
530 	else
531 		atomic_set(&clp->cl_cb_set, 1);
532 }
533 
534 static const struct rpc_call_ops nfsd4_cb_probe_ops = {
535 	/* XXX: release method to ensure we set the cb channel down if
536 	 * necessary on early failure? */
537 	.rpc_call_done = nfsd4_cb_probe_done,
538 };
539 
540 static struct rpc_cred *callback_cred;
541 
542 int set_callback_cred(void)
543 {
544 	if (callback_cred)
545 		return 0;
546 	callback_cred = rpc_lookup_machine_cred();
547 	if (!callback_cred)
548 		return -ENOMEM;
549 	return 0;
550 }
551 
552 static struct workqueue_struct *callback_wq;
553 
554 static void do_probe_callback(struct nfs4_client *clp)
555 {
556 	struct nfsd4_callback *cb = &clp->cl_cb_null;
557 
558 	cb->cb_op = NULL;
559 	cb->cb_clp = clp;
560 
561 	cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL];
562 	cb->cb_msg.rpc_argp = NULL;
563 	cb->cb_msg.rpc_resp = NULL;
564 	cb->cb_msg.rpc_cred = callback_cred;
565 
566 	cb->cb_ops = &nfsd4_cb_probe_ops;
567 
568 	queue_work(callback_wq, &cb->cb_work);
569 }
570 
571 /*
572  * Poke the callback thread to process any updates to the callback
573  * parameters, and send a null probe.
574  */
575 void nfsd4_probe_callback(struct nfs4_client *clp)
576 {
577 	set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
578 	do_probe_callback(clp);
579 }
580 
581 void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
582 {
583 	BUG_ON(atomic_read(&clp->cl_cb_set));
584 
585 	spin_lock(&clp->cl_lock);
586 	memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
587 	spin_unlock(&clp->cl_lock);
588 }
589 
590 /*
591  * There's currently a single callback channel slot.
592  * If the slot is available, then mark it busy.  Otherwise, set the
593  * thread for sleeping on the callback RPC wait queue.
594  */
595 static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
596 		struct rpc_task *task)
597 {
598 	u32 *ptr = (u32 *)clp->cl_cb_session->se_sessionid.data;
599 	int status = 0;
600 
601 	dprintk("%s: %u:%u:%u:%u\n", __func__,
602 		ptr[0], ptr[1], ptr[2], ptr[3]);
603 
604 	if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
605 		rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
606 		dprintk("%s slot is busy\n", __func__);
607 		status = -EAGAIN;
608 		goto out;
609 	}
610 out:
611 	dprintk("%s status=%d\n", __func__, status);
612 	return status;
613 }
614 
615 /*
616  * TODO: cb_sequence should support referring call lists, cachethis, multiple
617  * slots, and mark callback channel down on communication errors.
618  */
619 static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
620 {
621 	struct nfsd4_callback *cb = calldata;
622 	struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
623 	struct nfs4_client *clp = dp->dl_client;
624 	u32 minorversion = clp->cl_minorversion;
625 	int status = 0;
626 
627 	cb->cb_minorversion = minorversion;
628 	if (minorversion) {
629 		status = nfsd41_cb_setup_sequence(clp, task);
630 		if (status) {
631 			if (status != -EAGAIN) {
632 				/* terminate rpc task */
633 				task->tk_status = status;
634 				task->tk_action = NULL;
635 			}
636 			return;
637 		}
638 	}
639 	rpc_call_start(task);
640 }
641 
642 static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
643 {
644 	struct nfsd4_callback *cb = calldata;
645 	struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
646 	struct nfs4_client *clp = dp->dl_client;
647 
648 	dprintk("%s: minorversion=%d\n", __func__,
649 		clp->cl_minorversion);
650 
651 	if (clp->cl_minorversion) {
652 		/* No need for lock, access serialized in nfsd4_cb_prepare */
653 		++clp->cl_cb_session->se_cb_seq_nr;
654 		clear_bit(0, &clp->cl_cb_slot_busy);
655 		rpc_wake_up_next(&clp->cl_cb_waitq);
656 		dprintk("%s: freed slot, new seqid=%d\n", __func__,
657 			clp->cl_cb_session->se_cb_seq_nr);
658 
659 		/* We're done looking into the sequence information */
660 		task->tk_msg.rpc_resp = NULL;
661 	}
662 }
663 
664 
665 static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
666 {
667 	struct nfsd4_callback *cb = calldata;
668 	struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
669 	struct nfs4_client *clp = dp->dl_client;
670 	struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
671 
672 	nfsd4_cb_done(task, calldata);
673 
674 	if (current_rpc_client == NULL) {
675 		/* We're shutting down; give up. */
676 		/* XXX: err, or is it ok just to fall through
677 		 * and rpc_restart_call? */
678 		return;
679 	}
680 
681 	switch (task->tk_status) {
682 	case 0:
683 		return;
684 	case -EBADHANDLE:
685 	case -NFS4ERR_BAD_STATEID:
686 		/* Race: client probably got cb_recall
687 		 * before open reply granting delegation */
688 		break;
689 	default:
690 		/* Network partition? */
691 		atomic_set(&clp->cl_cb_set, 0);
692 		warn_no_callback_path(clp, task->tk_status);
693 		if (current_rpc_client != task->tk_client) {
694 			/* queue a callback on the new connection: */
695 			atomic_inc(&dp->dl_count);
696 			nfsd4_cb_recall(dp);
697 			return;
698 		}
699 	}
700 	if (dp->dl_retries--) {
701 		rpc_delay(task, 2*HZ);
702 		task->tk_status = 0;
703 		rpc_restart_call_prepare(task);
704 		return;
705 	} else {
706 		atomic_set(&clp->cl_cb_set, 0);
707 		warn_no_callback_path(clp, task->tk_status);
708 	}
709 }
710 
711 static void nfsd4_cb_recall_release(void *calldata)
712 {
713 	struct nfsd4_callback *cb = calldata;
714 	struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
715 
716 	nfs4_put_delegation(dp);
717 }
718 
719 static const struct rpc_call_ops nfsd4_cb_recall_ops = {
720 	.rpc_call_prepare = nfsd4_cb_prepare,
721 	.rpc_call_done = nfsd4_cb_recall_done,
722 	.rpc_release = nfsd4_cb_recall_release,
723 };
724 
725 int nfsd4_create_callback_queue(void)
726 {
727 	callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
728 	if (!callback_wq)
729 		return -ENOMEM;
730 	return 0;
731 }
732 
733 void nfsd4_destroy_callback_queue(void)
734 {
735 	destroy_workqueue(callback_wq);
736 }
737 
738 /* must be called under the state lock */
739 void nfsd4_shutdown_callback(struct nfs4_client *clp)
740 {
741 	set_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags);
742 	/*
743 	 * Note this won't actually result in a null callback;
744 	 * instead, nfsd4_do_callback_rpc() will detect the killed
745 	 * client, destroy the rpc client, and stop:
746 	 */
747 	do_probe_callback(clp);
748 	flush_workqueue(callback_wq);
749 }
750 
751 void nfsd4_release_cb(struct nfsd4_callback *cb)
752 {
753 	if (cb->cb_ops->rpc_release)
754 		cb->cb_ops->rpc_release(cb);
755 }
756 
757 void nfsd4_process_cb_update(struct nfsd4_callback *cb)
758 {
759 	struct nfs4_cb_conn conn;
760 	struct nfs4_client *clp = cb->cb_clp;
761 	int err;
762 
763 	/*
764 	 * This is either an update, or the client dying; in either case,
765 	 * kill the old client:
766 	 */
767 	if (clp->cl_cb_client) {
768 		rpc_shutdown_client(clp->cl_cb_client);
769 		clp->cl_cb_client = NULL;
770 	}
771 	if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags))
772 		return;
773 	spin_lock(&clp->cl_lock);
774 	/*
775 	 * Only serialized callback code is allowed to clear these
776 	 * flags; main nfsd code can only set them:
777 	 */
778 	BUG_ON(!clp->cl_cb_flags);
779 	clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
780 	memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
781 	spin_unlock(&clp->cl_lock);
782 
783 	err = setup_callback_client(clp, &conn);
784 	if (err)
785 		warn_no_callback_path(clp, err);
786 }
787 
788 void nfsd4_do_callback_rpc(struct work_struct *w)
789 {
790 	struct nfsd4_callback *cb = container_of(w, struct nfsd4_callback, cb_work);
791 	struct nfs4_client *clp = cb->cb_clp;
792 	struct rpc_clnt *clnt;
793 
794 	if (clp->cl_cb_flags)
795 		nfsd4_process_cb_update(cb);
796 
797 	clnt = clp->cl_cb_client;
798 	if (!clnt) {
799 		/* Callback channel broken, or client killed; give up: */
800 		nfsd4_release_cb(cb);
801 		return;
802 	}
803 	rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
804 			cb->cb_ops, cb);
805 }
806 
807 void nfsd4_cb_recall(struct nfs4_delegation *dp)
808 {
809 	struct nfsd4_callback *cb = &dp->dl_recall;
810 
811 	dp->dl_retries = 1;
812 	cb->cb_op = dp;
813 	cb->cb_clp = dp->dl_client;
814 	cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL];
815 	cb->cb_msg.rpc_argp = cb;
816 	cb->cb_msg.rpc_resp = cb;
817 	cb->cb_msg.rpc_cred = callback_cred;
818 
819 	cb->cb_ops = &nfsd4_cb_recall_ops;
820 	dp->dl_retries = 1;
821 
822 	queue_work(callback_wq, &dp->dl_recall.cb_work);
823 }
824