xref: /openbmc/linux/fs/nfsd/nfs4callback.c (revision 5d331b7f)
1 /*
2  *  Copyright (c) 2001 The Regents of the University of Michigan.
3  *  All rights reserved.
4  *
5  *  Kendrick Smith <kmsmith@umich.edu>
6  *  Andy Adamson <andros@umich.edu>
7  *
8  *  Redistribution and use in source and binary forms, with or without
9  *  modification, are permitted provided that the following conditions
10  *  are met:
11  *
12  *  1. Redistributions of source code must retain the above copyright
13  *     notice, this list of conditions and the following disclaimer.
14  *  2. Redistributions in binary form must reproduce the above copyright
15  *     notice, this list of conditions and the following disclaimer in the
16  *     documentation and/or other materials provided with the distribution.
17  *  3. Neither the name of the University nor the names of its
18  *     contributors may be used to endorse or promote products derived
19  *     from this software without specific prior written permission.
20  *
21  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <linux/sunrpc/clnt.h>
35 #include <linux/sunrpc/xprt.h>
36 #include <linux/sunrpc/svc_xprt.h>
37 #include <linux/slab.h>
38 #include "nfsd.h"
39 #include "state.h"
40 #include "netns.h"
41 #include "xdr4cb.h"
42 #include "xdr4.h"
43 
44 #define NFSDDBG_FACILITY                NFSDDBG_PROC
45 
46 static void nfsd4_mark_cb_fault(struct nfs4_client *, int reason);
47 
48 #define NFSPROC4_CB_NULL 0
49 #define NFSPROC4_CB_COMPOUND 1
50 
51 /* Index of predefined Linux callback client operations */
52 
53 struct nfs4_cb_compound_hdr {
54 	/* args */
55 	u32		ident;	/* minorversion 0 only */
56 	u32		nops;
57 	__be32		*nops_p;
58 	u32		minorversion;
59 	/* res */
60 	int		status;
61 };
62 
63 /*
64  * Handle decode buffer overflows out-of-line.
65  */
66 static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
67 {
68 	dprintk("NFS: %s prematurely hit the end of our receive buffer. "
69 		"Remaining buffer length is %tu words.\n",
70 		func, xdr->end - xdr->p);
71 }
72 
73 static __be32 *xdr_encode_empty_array(__be32 *p)
74 {
75 	*p++ = xdr_zero;
76 	return p;
77 }
78 
79 /*
80  * Encode/decode NFSv4 CB basic data types
81  *
82  * Basic NFSv4 callback data types are defined in section 15 of RFC
83  * 3530: "Network File System (NFS) version 4 Protocol" and section
84  * 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version
85  * 1 Protocol"
86  */
87 
88 /*
89  *	nfs_cb_opnum4
90  *
91  *	enum nfs_cb_opnum4 {
92  *		OP_CB_GETATTR		= 3,
93  *		  ...
94  *	};
95  */
96 enum nfs_cb_opnum4 {
97 	OP_CB_GETATTR			= 3,
98 	OP_CB_RECALL			= 4,
99 	OP_CB_LAYOUTRECALL		= 5,
100 	OP_CB_NOTIFY			= 6,
101 	OP_CB_PUSH_DELEG		= 7,
102 	OP_CB_RECALL_ANY		= 8,
103 	OP_CB_RECALLABLE_OBJ_AVAIL	= 9,
104 	OP_CB_RECALL_SLOT		= 10,
105 	OP_CB_SEQUENCE			= 11,
106 	OP_CB_WANTS_CANCELLED		= 12,
107 	OP_CB_NOTIFY_LOCK		= 13,
108 	OP_CB_NOTIFY_DEVICEID		= 14,
109 	OP_CB_OFFLOAD			= 15,
110 	OP_CB_ILLEGAL			= 10044
111 };
112 
113 static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
114 {
115 	__be32 *p;
116 
117 	p = xdr_reserve_space(xdr, 4);
118 	*p = cpu_to_be32(op);
119 }
120 
121 /*
122  * nfs_fh4
123  *
124  *	typedef opaque nfs_fh4<NFS4_FHSIZE>;
125  */
126 static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh)
127 {
128 	u32 length = fh->fh_size;
129 	__be32 *p;
130 
131 	BUG_ON(length > NFS4_FHSIZE);
132 	p = xdr_reserve_space(xdr, 4 + length);
133 	xdr_encode_opaque(p, &fh->fh_base, length);
134 }
135 
136 /*
137  * stateid4
138  *
139  *	struct stateid4 {
140  *		uint32_t	seqid;
141  *		opaque		other[12];
142  *	};
143  */
144 static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
145 {
146 	__be32 *p;
147 
148 	p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
149 	*p++ = cpu_to_be32(sid->si_generation);
150 	xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
151 }
152 
153 /*
154  * sessionid4
155  *
156  *	typedef opaque sessionid4[NFS4_SESSIONID_SIZE];
157  */
158 static void encode_sessionid4(struct xdr_stream *xdr,
159 			      const struct nfsd4_session *session)
160 {
161 	__be32 *p;
162 
163 	p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
164 	xdr_encode_opaque_fixed(p, session->se_sessionid.data,
165 					NFS4_MAX_SESSIONID_LEN);
166 }
167 
168 /*
169  * nfsstat4
170  */
171 static const struct {
172 	int stat;
173 	int errno;
174 } nfs_cb_errtbl[] = {
175 	{ NFS4_OK,		0		},
176 	{ NFS4ERR_PERM,		-EPERM		},
177 	{ NFS4ERR_NOENT,	-ENOENT		},
178 	{ NFS4ERR_IO,		-EIO		},
179 	{ NFS4ERR_NXIO,		-ENXIO		},
180 	{ NFS4ERR_ACCESS,	-EACCES		},
181 	{ NFS4ERR_EXIST,	-EEXIST		},
182 	{ NFS4ERR_XDEV,		-EXDEV		},
183 	{ NFS4ERR_NOTDIR,	-ENOTDIR	},
184 	{ NFS4ERR_ISDIR,	-EISDIR		},
185 	{ NFS4ERR_INVAL,	-EINVAL		},
186 	{ NFS4ERR_FBIG,		-EFBIG		},
187 	{ NFS4ERR_NOSPC,	-ENOSPC		},
188 	{ NFS4ERR_ROFS,		-EROFS		},
189 	{ NFS4ERR_MLINK,	-EMLINK		},
190 	{ NFS4ERR_NAMETOOLONG,	-ENAMETOOLONG	},
191 	{ NFS4ERR_NOTEMPTY,	-ENOTEMPTY	},
192 	{ NFS4ERR_DQUOT,	-EDQUOT		},
193 	{ NFS4ERR_STALE,	-ESTALE		},
194 	{ NFS4ERR_BADHANDLE,	-EBADHANDLE	},
195 	{ NFS4ERR_BAD_COOKIE,	-EBADCOOKIE	},
196 	{ NFS4ERR_NOTSUPP,	-ENOTSUPP	},
197 	{ NFS4ERR_TOOSMALL,	-ETOOSMALL	},
198 	{ NFS4ERR_SERVERFAULT,	-ESERVERFAULT	},
199 	{ NFS4ERR_BADTYPE,	-EBADTYPE	},
200 	{ NFS4ERR_LOCKED,	-EAGAIN		},
201 	{ NFS4ERR_RESOURCE,	-EREMOTEIO	},
202 	{ NFS4ERR_SYMLINK,	-ELOOP		},
203 	{ NFS4ERR_OP_ILLEGAL,	-EOPNOTSUPP	},
204 	{ NFS4ERR_DEADLOCK,	-EDEADLK	},
205 	{ -1,			-EIO		}
206 };
207 
208 /*
209  * If we cannot translate the error, the recovery routines should
210  * handle it.
211  *
212  * Note: remaining NFSv4 error codes have values > 10000, so should
213  * not conflict with native Linux error codes.
214  */
215 static int nfs_cb_stat_to_errno(int status)
216 {
217 	int i;
218 
219 	for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
220 		if (nfs_cb_errtbl[i].stat == status)
221 			return nfs_cb_errtbl[i].errno;
222 	}
223 
224 	dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status);
225 	return -status;
226 }
227 
228 static int decode_cb_op_status(struct xdr_stream *xdr,
229 			       enum nfs_cb_opnum4 expected, int *status)
230 {
231 	__be32 *p;
232 	u32 op;
233 
234 	p = xdr_inline_decode(xdr, 4 + 4);
235 	if (unlikely(p == NULL))
236 		goto out_overflow;
237 	op = be32_to_cpup(p++);
238 	if (unlikely(op != expected))
239 		goto out_unexpected;
240 	*status = nfs_cb_stat_to_errno(be32_to_cpup(p));
241 	return 0;
242 out_overflow:
243 	print_overflow_msg(__func__, xdr);
244 	return -EIO;
245 out_unexpected:
246 	dprintk("NFSD: Callback server returned operation %d but "
247 		"we issued a request for %d\n", op, expected);
248 	return -EIO;
249 }
250 
251 /*
252  * CB_COMPOUND4args
253  *
254  *	struct CB_COMPOUND4args {
255  *		utf8str_cs	tag;
256  *		uint32_t	minorversion;
257  *		uint32_t	callback_ident;
258  *		nfs_cb_argop4	argarray<>;
259  *	};
260 */
261 static void encode_cb_compound4args(struct xdr_stream *xdr,
262 				    struct nfs4_cb_compound_hdr *hdr)
263 {
264 	__be32 * p;
265 
266 	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
267 	p = xdr_encode_empty_array(p);		/* empty tag */
268 	*p++ = cpu_to_be32(hdr->minorversion);
269 	*p++ = cpu_to_be32(hdr->ident);
270 
271 	hdr->nops_p = p;
272 	*p = cpu_to_be32(hdr->nops);		/* argarray element count */
273 }
274 
275 /*
276  * Update argarray element count
277  */
278 static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
279 {
280 	BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS);
281 	*hdr->nops_p = cpu_to_be32(hdr->nops);
282 }
283 
284 /*
285  * CB_COMPOUND4res
286  *
287  *	struct CB_COMPOUND4res {
288  *		nfsstat4	status;
289  *		utf8str_cs	tag;
290  *		nfs_cb_resop4	resarray<>;
291  *	};
292  */
293 static int decode_cb_compound4res(struct xdr_stream *xdr,
294 				  struct nfs4_cb_compound_hdr *hdr)
295 {
296 	u32 length;
297 	__be32 *p;
298 
299 	p = xdr_inline_decode(xdr, 4 + 4);
300 	if (unlikely(p == NULL))
301 		goto out_overflow;
302 	hdr->status = be32_to_cpup(p++);
303 	/* Ignore the tag */
304 	length = be32_to_cpup(p++);
305 	p = xdr_inline_decode(xdr, length + 4);
306 	if (unlikely(p == NULL))
307 		goto out_overflow;
308 	p += XDR_QUADLEN(length);
309 	hdr->nops = be32_to_cpup(p);
310 	return 0;
311 out_overflow:
312 	print_overflow_msg(__func__, xdr);
313 	return -EIO;
314 }
315 
316 /*
317  * CB_RECALL4args
318  *
319  *	struct CB_RECALL4args {
320  *		stateid4	stateid;
321  *		bool		truncate;
322  *		nfs_fh4		fh;
323  *	};
324  */
325 static void encode_cb_recall4args(struct xdr_stream *xdr,
326 				  const struct nfs4_delegation *dp,
327 				  struct nfs4_cb_compound_hdr *hdr)
328 {
329 	__be32 *p;
330 
331 	encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
332 	encode_stateid4(xdr, &dp->dl_stid.sc_stateid);
333 
334 	p = xdr_reserve_space(xdr, 4);
335 	*p++ = xdr_zero;			/* truncate */
336 
337 	encode_nfs_fh4(xdr, &dp->dl_stid.sc_file->fi_fhandle);
338 
339 	hdr->nops++;
340 }
341 
342 /*
343  * CB_SEQUENCE4args
344  *
345  *	struct CB_SEQUENCE4args {
346  *		sessionid4		csa_sessionid;
347  *		sequenceid4		csa_sequenceid;
348  *		slotid4			csa_slotid;
349  *		slotid4			csa_highest_slotid;
350  *		bool			csa_cachethis;
351  *		referring_call_list4	csa_referring_call_lists<>;
352  *	};
353  */
354 static void encode_cb_sequence4args(struct xdr_stream *xdr,
355 				    const struct nfsd4_callback *cb,
356 				    struct nfs4_cb_compound_hdr *hdr)
357 {
358 	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
359 	__be32 *p;
360 
361 	if (hdr->minorversion == 0)
362 		return;
363 
364 	encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
365 	encode_sessionid4(xdr, session);
366 
367 	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
368 	*p++ = cpu_to_be32(session->se_cb_seq_nr);	/* csa_sequenceid */
369 	*p++ = xdr_zero;			/* csa_slotid */
370 	*p++ = xdr_zero;			/* csa_highest_slotid */
371 	*p++ = xdr_zero;			/* csa_cachethis */
372 	xdr_encode_empty_array(p);		/* csa_referring_call_lists */
373 
374 	hdr->nops++;
375 }
376 
377 /*
378  * CB_SEQUENCE4resok
379  *
380  *	struct CB_SEQUENCE4resok {
381  *		sessionid4	csr_sessionid;
382  *		sequenceid4	csr_sequenceid;
383  *		slotid4		csr_slotid;
384  *		slotid4		csr_highest_slotid;
385  *		slotid4		csr_target_highest_slotid;
386  *	};
387  *
388  *	union CB_SEQUENCE4res switch (nfsstat4 csr_status) {
389  *	case NFS4_OK:
390  *		CB_SEQUENCE4resok	csr_resok4;
391  *	default:
392  *		void;
393  *	};
394  *
395  * Our current back channel implmentation supports a single backchannel
396  * with a single slot.
397  */
398 static int decode_cb_sequence4resok(struct xdr_stream *xdr,
399 				    struct nfsd4_callback *cb)
400 {
401 	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
402 	int status = -ESERVERFAULT;
403 	__be32 *p;
404 	u32 dummy;
405 
406 	/*
407 	 * If the server returns different values for sessionID, slotID or
408 	 * sequence number, the server is looney tunes.
409 	 */
410 	p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
411 	if (unlikely(p == NULL))
412 		goto out_overflow;
413 
414 	if (memcmp(p, session->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
415 		dprintk("NFS: %s Invalid session id\n", __func__);
416 		goto out;
417 	}
418 	p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
419 
420 	dummy = be32_to_cpup(p++);
421 	if (dummy != session->se_cb_seq_nr) {
422 		dprintk("NFS: %s Invalid sequence number\n", __func__);
423 		goto out;
424 	}
425 
426 	dummy = be32_to_cpup(p++);
427 	if (dummy != 0) {
428 		dprintk("NFS: %s Invalid slotid\n", __func__);
429 		goto out;
430 	}
431 
432 	/*
433 	 * FIXME: process highest slotid and target highest slotid
434 	 */
435 	status = 0;
436 out:
437 	cb->cb_seq_status = status;
438 	return status;
439 out_overflow:
440 	print_overflow_msg(__func__, xdr);
441 	status = -EIO;
442 	goto out;
443 }
444 
445 static int decode_cb_sequence4res(struct xdr_stream *xdr,
446 				  struct nfsd4_callback *cb)
447 {
448 	int status;
449 
450 	if (cb->cb_clp->cl_minorversion == 0)
451 		return 0;
452 
453 	status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status);
454 	if (unlikely(status || cb->cb_seq_status))
455 		return status;
456 
457 	return decode_cb_sequence4resok(xdr, cb);
458 }
459 
460 /*
461  * NFSv4.0 and NFSv4.1 XDR encode functions
462  *
463  * NFSv4.0 callback argument types are defined in section 15 of RFC
464  * 3530: "Network File System (NFS) version 4 Protocol" and section 20
465  * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
466  * Protocol".
467  */
468 
469 /*
470  * NB: Without this zero space reservation, callbacks over krb5p fail
471  */
472 static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
473 				 const void *__unused)
474 {
475 	xdr_reserve_space(xdr, 0);
476 }
477 
478 /*
479  * 20.2. Operation 4: CB_RECALL - Recall a Delegation
480  */
481 static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
482 				   const void *data)
483 {
484 	const struct nfsd4_callback *cb = data;
485 	const struct nfs4_delegation *dp = cb_to_delegation(cb);
486 	struct nfs4_cb_compound_hdr hdr = {
487 		.ident = cb->cb_clp->cl_cb_ident,
488 		.minorversion = cb->cb_clp->cl_minorversion,
489 	};
490 
491 	encode_cb_compound4args(xdr, &hdr);
492 	encode_cb_sequence4args(xdr, cb, &hdr);
493 	encode_cb_recall4args(xdr, dp, &hdr);
494 	encode_cb_nops(&hdr);
495 }
496 
497 
498 /*
499  * NFSv4.0 and NFSv4.1 XDR decode functions
500  *
501  * NFSv4.0 callback result types are defined in section 15 of RFC
502  * 3530: "Network File System (NFS) version 4 Protocol" and section 20
503  * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
504  * Protocol".
505  */
506 
507 static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
508 				void *__unused)
509 {
510 	return 0;
511 }
512 
513 /*
514  * 20.2. Operation 4: CB_RECALL - Recall a Delegation
515  */
516 static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
517 				  struct xdr_stream *xdr,
518 				  void *data)
519 {
520 	struct nfsd4_callback *cb = data;
521 	struct nfs4_cb_compound_hdr hdr;
522 	int status;
523 
524 	status = decode_cb_compound4res(xdr, &hdr);
525 	if (unlikely(status))
526 		return status;
527 
528 	if (cb != NULL) {
529 		status = decode_cb_sequence4res(xdr, cb);
530 		if (unlikely(status || cb->cb_seq_status))
531 			return status;
532 	}
533 
534 	return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
535 }
536 
537 #ifdef CONFIG_NFSD_PNFS
538 /*
539  * CB_LAYOUTRECALL4args
540  *
541  *	struct layoutrecall_file4 {
542  *		nfs_fh4         lor_fh;
543  *		offset4         lor_offset;
544  *		length4         lor_length;
545  *		stateid4        lor_stateid;
546  *	};
547  *
548  *	union layoutrecall4 switch(layoutrecall_type4 lor_recalltype) {
549  *	case LAYOUTRECALL4_FILE:
550  *		layoutrecall_file4 lor_layout;
551  *	case LAYOUTRECALL4_FSID:
552  *		fsid4              lor_fsid;
553  *	case LAYOUTRECALL4_ALL:
554  *		void;
555  *	};
556  *
557  *	struct CB_LAYOUTRECALL4args {
558  *		layouttype4             clora_type;
559  *		layoutiomode4           clora_iomode;
560  *		bool                    clora_changed;
561  *		layoutrecall4           clora_recall;
562  *	};
563  */
564 static void encode_cb_layout4args(struct xdr_stream *xdr,
565 				  const struct nfs4_layout_stateid *ls,
566 				  struct nfs4_cb_compound_hdr *hdr)
567 {
568 	__be32 *p;
569 
570 	BUG_ON(hdr->minorversion == 0);
571 
572 	p = xdr_reserve_space(xdr, 5 * 4);
573 	*p++ = cpu_to_be32(OP_CB_LAYOUTRECALL);
574 	*p++ = cpu_to_be32(ls->ls_layout_type);
575 	*p++ = cpu_to_be32(IOMODE_ANY);
576 	*p++ = cpu_to_be32(1);
577 	*p = cpu_to_be32(RETURN_FILE);
578 
579 	encode_nfs_fh4(xdr, &ls->ls_stid.sc_file->fi_fhandle);
580 
581 	p = xdr_reserve_space(xdr, 2 * 8);
582 	p = xdr_encode_hyper(p, 0);
583 	xdr_encode_hyper(p, NFS4_MAX_UINT64);
584 
585 	encode_stateid4(xdr, &ls->ls_recall_sid);
586 
587 	hdr->nops++;
588 }
589 
590 static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
591 				   struct xdr_stream *xdr,
592 				   const void *data)
593 {
594 	const struct nfsd4_callback *cb = data;
595 	const struct nfs4_layout_stateid *ls =
596 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
597 	struct nfs4_cb_compound_hdr hdr = {
598 		.ident = 0,
599 		.minorversion = cb->cb_clp->cl_minorversion,
600 	};
601 
602 	encode_cb_compound4args(xdr, &hdr);
603 	encode_cb_sequence4args(xdr, cb, &hdr);
604 	encode_cb_layout4args(xdr, ls, &hdr);
605 	encode_cb_nops(&hdr);
606 }
607 
608 static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
609 				  struct xdr_stream *xdr,
610 				  void *data)
611 {
612 	struct nfsd4_callback *cb = data;
613 	struct nfs4_cb_compound_hdr hdr;
614 	int status;
615 
616 	status = decode_cb_compound4res(xdr, &hdr);
617 	if (unlikely(status))
618 		return status;
619 
620 	if (cb) {
621 		status = decode_cb_sequence4res(xdr, cb);
622 		if (unlikely(status || cb->cb_seq_status))
623 			return status;
624 	}
625 	return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
626 }
627 #endif /* CONFIG_NFSD_PNFS */
628 
629 static void encode_stateowner(struct xdr_stream *xdr, struct nfs4_stateowner *so)
630 {
631 	__be32	*p;
632 
633 	p = xdr_reserve_space(xdr, 8 + 4 + so->so_owner.len);
634 	p = xdr_encode_opaque_fixed(p, &so->so_client->cl_clientid, 8);
635 	xdr_encode_opaque(p, so->so_owner.data, so->so_owner.len);
636 }
637 
638 static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req,
639 					struct xdr_stream *xdr,
640 					const void *data)
641 {
642 	const struct nfsd4_callback *cb = data;
643 	const struct nfsd4_blocked_lock *nbl =
644 		container_of(cb, struct nfsd4_blocked_lock, nbl_cb);
645 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.fl_owner;
646 	struct nfs4_cb_compound_hdr hdr = {
647 		.ident = 0,
648 		.minorversion = cb->cb_clp->cl_minorversion,
649 	};
650 
651 	__be32 *p;
652 
653 	BUG_ON(hdr.minorversion == 0);
654 
655 	encode_cb_compound4args(xdr, &hdr);
656 	encode_cb_sequence4args(xdr, cb, &hdr);
657 
658 	p = xdr_reserve_space(xdr, 4);
659 	*p = cpu_to_be32(OP_CB_NOTIFY_LOCK);
660 	encode_nfs_fh4(xdr, &nbl->nbl_fh);
661 	encode_stateowner(xdr, &lo->lo_owner);
662 	hdr.nops++;
663 
664 	encode_cb_nops(&hdr);
665 }
666 
667 static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
668 					struct xdr_stream *xdr,
669 					void *data)
670 {
671 	struct nfsd4_callback *cb = data;
672 	struct nfs4_cb_compound_hdr hdr;
673 	int status;
674 
675 	status = decode_cb_compound4res(xdr, &hdr);
676 	if (unlikely(status))
677 		return status;
678 
679 	if (cb) {
680 		status = decode_cb_sequence4res(xdr, cb);
681 		if (unlikely(status || cb->cb_seq_status))
682 			return status;
683 	}
684 	return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status);
685 }
686 
687 /*
688  * struct write_response4 {
689  *	stateid4	wr_callback_id<1>;
690  *	length4		wr_count;
691  *	stable_how4	wr_committed;
692  *	verifier4	wr_writeverf;
693  * };
694  * union offload_info4 switch (nfsstat4 coa_status) {
695  *	case NFS4_OK:
696  *		write_response4	coa_resok4;
697  *	default:
698  *	length4		coa_bytes_copied;
699  * };
700  * struct CB_OFFLOAD4args {
701  *	nfs_fh4		coa_fh;
702  *	stateid4	coa_stateid;
703  *	offload_info4	coa_offload_info;
704  * };
705  */
706 static void encode_offload_info4(struct xdr_stream *xdr,
707 				 __be32 nfserr,
708 				 const struct nfsd4_copy *cp)
709 {
710 	__be32 *p;
711 
712 	p = xdr_reserve_space(xdr, 4);
713 	*p++ = nfserr;
714 	if (!nfserr) {
715 		p = xdr_reserve_space(xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
716 		p = xdr_encode_empty_array(p);
717 		p = xdr_encode_hyper(p, cp->cp_res.wr_bytes_written);
718 		*p++ = cpu_to_be32(cp->cp_res.wr_stable_how);
719 		p = xdr_encode_opaque_fixed(p, cp->cp_res.wr_verifier.data,
720 					    NFS4_VERIFIER_SIZE);
721 	} else {
722 		p = xdr_reserve_space(xdr, 8);
723 		/* We always return success if bytes were written */
724 		p = xdr_encode_hyper(p, 0);
725 	}
726 }
727 
728 static void encode_cb_offload4args(struct xdr_stream *xdr,
729 				   __be32 nfserr,
730 				   const struct knfsd_fh *fh,
731 				   const struct nfsd4_copy *cp,
732 				   struct nfs4_cb_compound_hdr *hdr)
733 {
734 	__be32 *p;
735 
736 	p = xdr_reserve_space(xdr, 4);
737 	*p++ = cpu_to_be32(OP_CB_OFFLOAD);
738 	encode_nfs_fh4(xdr, fh);
739 	encode_stateid4(xdr, &cp->cp_res.cb_stateid);
740 	encode_offload_info4(xdr, nfserr, cp);
741 
742 	hdr->nops++;
743 }
744 
745 static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req,
746 				    struct xdr_stream *xdr,
747 				    const void *data)
748 {
749 	const struct nfsd4_callback *cb = data;
750 	const struct nfsd4_copy *cp =
751 		container_of(cb, struct nfsd4_copy, cp_cb);
752 	struct nfs4_cb_compound_hdr hdr = {
753 		.ident = 0,
754 		.minorversion = cb->cb_clp->cl_minorversion,
755 	};
756 
757 	encode_cb_compound4args(xdr, &hdr);
758 	encode_cb_sequence4args(xdr, cb, &hdr);
759 	encode_cb_offload4args(xdr, cp->nfserr, &cp->fh, cp, &hdr);
760 	encode_cb_nops(&hdr);
761 }
762 
763 static int nfs4_xdr_dec_cb_offload(struct rpc_rqst *rqstp,
764 				   struct xdr_stream *xdr,
765 				   void *data)
766 {
767 	struct nfsd4_callback *cb = data;
768 	struct nfs4_cb_compound_hdr hdr;
769 	int status;
770 
771 	status = decode_cb_compound4res(xdr, &hdr);
772 	if (unlikely(status))
773 		return status;
774 
775 	if (cb) {
776 		status = decode_cb_sequence4res(xdr, cb);
777 		if (unlikely(status || cb->cb_seq_status))
778 			return status;
779 	}
780 	return decode_cb_op_status(xdr, OP_CB_OFFLOAD, &cb->cb_status);
781 }
782 /*
783  * RPC procedure tables
784  */
785 #define PROC(proc, call, argtype, restype)				\
786 [NFSPROC4_CLNT_##proc] = {						\
787 	.p_proc    = NFSPROC4_CB_##call,				\
788 	.p_encode  = nfs4_xdr_enc_##argtype,		\
789 	.p_decode  = nfs4_xdr_dec_##restype,				\
790 	.p_arglen  = NFS4_enc_##argtype##_sz,				\
791 	.p_replen  = NFS4_dec_##restype##_sz,				\
792 	.p_statidx = NFSPROC4_CB_##call,				\
793 	.p_name    = #proc,						\
794 }
795 
796 static const struct rpc_procinfo nfs4_cb_procedures[] = {
797 	PROC(CB_NULL,	NULL,		cb_null,	cb_null),
798 	PROC(CB_RECALL,	COMPOUND,	cb_recall,	cb_recall),
799 #ifdef CONFIG_NFSD_PNFS
800 	PROC(CB_LAYOUT,	COMPOUND,	cb_layout,	cb_layout),
801 #endif
802 	PROC(CB_NOTIFY_LOCK,	COMPOUND,	cb_notify_lock,	cb_notify_lock),
803 	PROC(CB_OFFLOAD,	COMPOUND,	cb_offload,	cb_offload),
804 };
805 
806 static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
807 static const struct rpc_version nfs_cb_version4 = {
808 /*
809  * Note on the callback rpc program version number: despite language in rfc
810  * 5661 section 18.36.3 requiring servers to use 4 in this field, the
811  * official xdr descriptions for both 4.0 and 4.1 specify version 1, and
812  * in practice that appears to be what implementations use.  The section
813  * 18.36.3 language is expected to be fixed in an erratum.
814  */
815 	.number			= 1,
816 	.nrprocs		= ARRAY_SIZE(nfs4_cb_procedures),
817 	.procs			= nfs4_cb_procedures,
818 	.counts			= nfs4_cb_counts,
819 };
820 
821 static const struct rpc_version *nfs_cb_version[2] = {
822 	[1] = &nfs_cb_version4,
823 };
824 
825 static const struct rpc_program cb_program;
826 
827 static struct rpc_stat cb_stats = {
828 	.program		= &cb_program
829 };
830 
831 #define NFS4_CALLBACK 0x40000000
832 static const struct rpc_program cb_program = {
833 	.name			= "nfs4_cb",
834 	.number			= NFS4_CALLBACK,
835 	.nrvers			= ARRAY_SIZE(nfs_cb_version),
836 	.version		= nfs_cb_version,
837 	.stats			= &cb_stats,
838 	.pipe_dir_name		= "nfsd4_cb",
839 };
840 
841 static int max_cb_time(struct net *net)
842 {
843 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
844 	return max(nn->nfsd4_lease/10, (time_t)1) * HZ;
845 }
846 
847 static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
848 {
849 	if (clp->cl_minorversion == 0) {
850 		char *principal = clp->cl_cred.cr_targ_princ ?
851 					clp->cl_cred.cr_targ_princ : "nfs";
852 		struct rpc_cred *cred;
853 
854 		cred = rpc_lookup_machine_cred(principal);
855 		if (!IS_ERR(cred))
856 			get_rpccred(cred);
857 		return cred;
858 	} else {
859 		struct rpc_auth *auth = client->cl_auth;
860 		struct auth_cred acred = {};
861 
862 		acred.uid = ses->se_cb_sec.uid;
863 		acred.gid = ses->se_cb_sec.gid;
864 		return auth->au_ops->lookup_cred(client->cl_auth, &acred, 0);
865 	}
866 }
867 
868 static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
869 {
870 	int maxtime = max_cb_time(clp->net);
871 	struct rpc_timeout	timeparms = {
872 		.to_initval	= maxtime,
873 		.to_retries	= 0,
874 		.to_maxval	= maxtime,
875 	};
876 	struct rpc_create_args args = {
877 		.net		= clp->net,
878 		.address	= (struct sockaddr *) &conn->cb_addr,
879 		.addrsize	= conn->cb_addrlen,
880 		.saddress	= (struct sockaddr *) &conn->cb_saddr,
881 		.timeout	= &timeparms,
882 		.program	= &cb_program,
883 		.version	= 1,
884 		.flags		= (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
885 	};
886 	struct rpc_clnt *client;
887 	struct rpc_cred *cred;
888 
889 	if (clp->cl_minorversion == 0) {
890 		if (!clp->cl_cred.cr_principal &&
891 				(clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
892 			return -EINVAL;
893 		args.client_name = clp->cl_cred.cr_principal;
894 		args.prognumber	= conn->cb_prog;
895 		args.protocol = XPRT_TRANSPORT_TCP;
896 		args.authflavor = clp->cl_cred.cr_flavor;
897 		clp->cl_cb_ident = conn->cb_ident;
898 	} else {
899 		if (!conn->cb_xprt)
900 			return -EINVAL;
901 		clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
902 		clp->cl_cb_session = ses;
903 		args.bc_xprt = conn->cb_xprt;
904 		args.prognumber = clp->cl_cb_session->se_cb_prog;
905 		args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
906 				XPRT_TRANSPORT_BC;
907 		args.authflavor = ses->se_cb_sec.flavor;
908 	}
909 	/* Create RPC client */
910 	client = rpc_create(&args);
911 	if (IS_ERR(client)) {
912 		dprintk("NFSD: couldn't create callback client: %ld\n",
913 			PTR_ERR(client));
914 		return PTR_ERR(client);
915 	}
916 	cred = get_backchannel_cred(clp, client, ses);
917 	if (IS_ERR(cred)) {
918 		rpc_shutdown_client(client);
919 		return PTR_ERR(cred);
920 	}
921 	clp->cl_cb_client = client;
922 	clp->cl_cb_cred = cred;
923 	return 0;
924 }
925 
926 static void warn_no_callback_path(struct nfs4_client *clp, int reason)
927 {
928 	dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
929 		(int)clp->cl_name.len, clp->cl_name.data, reason);
930 }
931 
932 static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
933 {
934 	if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
935 		return;
936 	clp->cl_cb_state = NFSD4_CB_DOWN;
937 	warn_no_callback_path(clp, reason);
938 }
939 
940 static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
941 {
942 	if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
943 		return;
944 	clp->cl_cb_state = NFSD4_CB_FAULT;
945 	warn_no_callback_path(clp, reason);
946 }
947 
948 static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
949 {
950 	struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
951 
952 	if (task->tk_status)
953 		nfsd4_mark_cb_down(clp, task->tk_status);
954 	else
955 		clp->cl_cb_state = NFSD4_CB_UP;
956 }
957 
958 static const struct rpc_call_ops nfsd4_cb_probe_ops = {
959 	/* XXX: release method to ensure we set the cb channel down if
960 	 * necessary on early failure? */
961 	.rpc_call_done = nfsd4_cb_probe_done,
962 };
963 
964 static struct workqueue_struct *callback_wq;
965 
966 /*
967  * Poke the callback thread to process any updates to the callback
968  * parameters, and send a null probe.
969  */
970 void nfsd4_probe_callback(struct nfs4_client *clp)
971 {
972 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
973 	set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
974 	nfsd4_run_cb(&clp->cl_cb_null);
975 }
976 
977 void nfsd4_probe_callback_sync(struct nfs4_client *clp)
978 {
979 	nfsd4_probe_callback(clp);
980 	flush_workqueue(callback_wq);
981 }
982 
983 void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
984 {
985 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
986 	spin_lock(&clp->cl_lock);
987 	memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
988 	spin_unlock(&clp->cl_lock);
989 }
990 
991 /*
992  * There's currently a single callback channel slot.
993  * If the slot is available, then mark it busy.  Otherwise, set the
994  * thread for sleeping on the callback RPC wait queue.
995  */
996 static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
997 {
998 	if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
999 		rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
1000 		/* Race breaker */
1001 		if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
1002 			dprintk("%s slot is busy\n", __func__);
1003 			return false;
1004 		}
1005 		rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
1006 	}
1007 	return true;
1008 }
1009 
1010 /*
1011  * TODO: cb_sequence should support referring call lists, cachethis, multiple
1012  * slots, and mark callback channel down on communication errors.
1013  */
1014 static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
1015 {
1016 	struct nfsd4_callback *cb = calldata;
1017 	struct nfs4_client *clp = cb->cb_clp;
1018 	u32 minorversion = clp->cl_minorversion;
1019 
1020 	/*
1021 	 * cb_seq_status is only set in decode_cb_sequence4res,
1022 	 * and so will remain 1 if an rpc level failure occurs.
1023 	 */
1024 	cb->cb_seq_status = 1;
1025 	cb->cb_status = 0;
1026 	if (minorversion) {
1027 		if (!nfsd41_cb_get_slot(clp, task))
1028 			return;
1029 	}
1030 	rpc_call_start(task);
1031 }
1032 
1033 static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
1034 {
1035 	struct nfs4_client *clp = cb->cb_clp;
1036 	struct nfsd4_session *session = clp->cl_cb_session;
1037 	bool ret = true;
1038 
1039 	if (!clp->cl_minorversion) {
1040 		/*
1041 		 * If the backchannel connection was shut down while this
1042 		 * task was queued, we need to resubmit it after setting up
1043 		 * a new backchannel connection.
1044 		 *
1045 		 * Note that if we lost our callback connection permanently
1046 		 * the submission code will error out, so we don't need to
1047 		 * handle that case here.
1048 		 */
1049 		if (task->tk_flags & RPC_TASK_KILLED)
1050 			goto need_restart;
1051 
1052 		return true;
1053 	}
1054 
1055 	switch (cb->cb_seq_status) {
1056 	case 0:
1057 		/*
1058 		 * No need for lock, access serialized in nfsd4_cb_prepare
1059 		 *
1060 		 * RFC5661 20.9.3
1061 		 * If CB_SEQUENCE returns an error, then the state of the slot
1062 		 * (sequence ID, cached reply) MUST NOT change.
1063 		 */
1064 		++session->se_cb_seq_nr;
1065 		break;
1066 	case -ESERVERFAULT:
1067 		++session->se_cb_seq_nr;
1068 		/* Fall through */
1069 	case 1:
1070 	case -NFS4ERR_BADSESSION:
1071 		nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
1072 		ret = false;
1073 		break;
1074 	case -NFS4ERR_DELAY:
1075 		if (!rpc_restart_call(task))
1076 			goto out;
1077 
1078 		rpc_delay(task, 2 * HZ);
1079 		return false;
1080 	case -NFS4ERR_BADSLOT:
1081 		goto retry_nowait;
1082 	case -NFS4ERR_SEQ_MISORDERED:
1083 		if (session->se_cb_seq_nr != 1) {
1084 			session->se_cb_seq_nr = 1;
1085 			goto retry_nowait;
1086 		}
1087 		break;
1088 	default:
1089 		dprintk("%s: unprocessed error %d\n", __func__,
1090 			cb->cb_seq_status);
1091 	}
1092 
1093 	clear_bit(0, &clp->cl_cb_slot_busy);
1094 	rpc_wake_up_next(&clp->cl_cb_waitq);
1095 	dprintk("%s: freed slot, new seqid=%d\n", __func__,
1096 		clp->cl_cb_session->se_cb_seq_nr);
1097 
1098 	if (task->tk_flags & RPC_TASK_KILLED)
1099 		goto need_restart;
1100 out:
1101 	return ret;
1102 retry_nowait:
1103 	if (rpc_restart_call_prepare(task))
1104 		ret = false;
1105 	goto out;
1106 need_restart:
1107 	task->tk_status = 0;
1108 	cb->cb_need_restart = true;
1109 	return false;
1110 }
1111 
1112 static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
1113 {
1114 	struct nfsd4_callback *cb = calldata;
1115 	struct nfs4_client *clp = cb->cb_clp;
1116 
1117 	dprintk("%s: minorversion=%d\n", __func__,
1118 		clp->cl_minorversion);
1119 
1120 	if (!nfsd4_cb_sequence_done(task, cb))
1121 		return;
1122 
1123 	if (cb->cb_status) {
1124 		WARN_ON_ONCE(task->tk_status);
1125 		task->tk_status = cb->cb_status;
1126 	}
1127 
1128 	switch (cb->cb_ops->done(cb, task)) {
1129 	case 0:
1130 		task->tk_status = 0;
1131 		rpc_restart_call_prepare(task);
1132 		return;
1133 	case 1:
1134 		break;
1135 	case -1:
1136 		/* Network partition? */
1137 		nfsd4_mark_cb_down(clp, task->tk_status);
1138 		break;
1139 	default:
1140 		BUG();
1141 	}
1142 }
1143 
1144 static void nfsd4_cb_release(void *calldata)
1145 {
1146 	struct nfsd4_callback *cb = calldata;
1147 
1148 	if (cb->cb_need_restart)
1149 		nfsd4_run_cb(cb);
1150 	else
1151 		cb->cb_ops->release(cb);
1152 
1153 }
1154 
1155 static const struct rpc_call_ops nfsd4_cb_ops = {
1156 	.rpc_call_prepare = nfsd4_cb_prepare,
1157 	.rpc_call_done = nfsd4_cb_done,
1158 	.rpc_release = nfsd4_cb_release,
1159 };
1160 
1161 int nfsd4_create_callback_queue(void)
1162 {
1163 	callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
1164 	if (!callback_wq)
1165 		return -ENOMEM;
1166 	return 0;
1167 }
1168 
1169 void nfsd4_destroy_callback_queue(void)
1170 {
1171 	destroy_workqueue(callback_wq);
1172 }
1173 
1174 /* must be called under the state lock */
1175 void nfsd4_shutdown_callback(struct nfs4_client *clp)
1176 {
1177 	set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags);
1178 	/*
1179 	 * Note this won't actually result in a null callback;
1180 	 * instead, nfsd4_run_cb_null() will detect the killed
1181 	 * client, destroy the rpc client, and stop:
1182 	 */
1183 	nfsd4_run_cb(&clp->cl_cb_null);
1184 	flush_workqueue(callback_wq);
1185 }
1186 
1187 /* requires cl_lock: */
1188 static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
1189 {
1190 	struct nfsd4_session *s;
1191 	struct nfsd4_conn *c;
1192 
1193 	list_for_each_entry(s, &clp->cl_sessions, se_perclnt) {
1194 		list_for_each_entry(c, &s->se_conns, cn_persession) {
1195 			if (c->cn_flags & NFS4_CDFC4_BACK)
1196 				return c;
1197 		}
1198 	}
1199 	return NULL;
1200 }
1201 
1202 static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
1203 {
1204 	struct nfs4_cb_conn conn;
1205 	struct nfs4_client *clp = cb->cb_clp;
1206 	struct nfsd4_session *ses = NULL;
1207 	struct nfsd4_conn *c;
1208 	int err;
1209 
1210 	/*
1211 	 * This is either an update, or the client dying; in either case,
1212 	 * kill the old client:
1213 	 */
1214 	if (clp->cl_cb_client) {
1215 		rpc_shutdown_client(clp->cl_cb_client);
1216 		clp->cl_cb_client = NULL;
1217 		put_rpccred(clp->cl_cb_cred);
1218 		clp->cl_cb_cred = NULL;
1219 	}
1220 	if (clp->cl_cb_conn.cb_xprt) {
1221 		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1222 		clp->cl_cb_conn.cb_xprt = NULL;
1223 	}
1224 	if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
1225 		return;
1226 	spin_lock(&clp->cl_lock);
1227 	/*
1228 	 * Only serialized callback code is allowed to clear these
1229 	 * flags; main nfsd code can only set them:
1230 	 */
1231 	BUG_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
1232 	clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
1233 	memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
1234 	c = __nfsd4_find_backchannel(clp);
1235 	if (c) {
1236 		svc_xprt_get(c->cn_xprt);
1237 		conn.cb_xprt = c->cn_xprt;
1238 		ses = c->cn_session;
1239 	}
1240 	spin_unlock(&clp->cl_lock);
1241 
1242 	err = setup_callback_client(clp, &conn, ses);
1243 	if (err) {
1244 		nfsd4_mark_cb_down(clp, err);
1245 		return;
1246 	}
1247 }
1248 
1249 static void
1250 nfsd4_run_cb_work(struct work_struct *work)
1251 {
1252 	struct nfsd4_callback *cb =
1253 		container_of(work, struct nfsd4_callback, cb_work);
1254 	struct nfs4_client *clp = cb->cb_clp;
1255 	struct rpc_clnt *clnt;
1256 
1257 	if (cb->cb_need_restart) {
1258 		cb->cb_need_restart = false;
1259 	} else {
1260 		if (cb->cb_ops && cb->cb_ops->prepare)
1261 			cb->cb_ops->prepare(cb);
1262 	}
1263 
1264 	if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
1265 		nfsd4_process_cb_update(cb);
1266 
1267 	clnt = clp->cl_cb_client;
1268 	if (!clnt) {
1269 		/* Callback channel broken, or client killed; give up: */
1270 		if (cb->cb_ops && cb->cb_ops->release)
1271 			cb->cb_ops->release(cb);
1272 		return;
1273 	}
1274 
1275 	/*
1276 	 * Don't send probe messages for 4.1 or later.
1277 	 */
1278 	if (!cb->cb_ops && clp->cl_minorversion) {
1279 		clp->cl_cb_state = NFSD4_CB_UP;
1280 		return;
1281 	}
1282 
1283 	cb->cb_msg.rpc_cred = clp->cl_cb_cred;
1284 	rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
1285 			cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
1286 }
1287 
1288 void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
1289 		const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op)
1290 {
1291 	cb->cb_clp = clp;
1292 	cb->cb_msg.rpc_proc = &nfs4_cb_procedures[op];
1293 	cb->cb_msg.rpc_argp = cb;
1294 	cb->cb_msg.rpc_resp = cb;
1295 	cb->cb_ops = ops;
1296 	INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
1297 	cb->cb_seq_status = 1;
1298 	cb->cb_status = 0;
1299 	cb->cb_need_restart = false;
1300 }
1301 
1302 void nfsd4_run_cb(struct nfsd4_callback *cb)
1303 {
1304 	queue_work(callback_wq, &cb->cb_work);
1305 }
1306