xref: /openbmc/linux/fs/nfs/nfs4proc.c (revision 3a83e4e6)
1 /*
2  *  fs/nfs/nfs4proc.c
3  *
4  *  Client-side procedure declarations for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *  Andy Adamson   <andros@umich.edu>
11  *
12  *  Redistribution and use in source and binary forms, with or without
13  *  modification, are permitted provided that the following conditions
14  *  are met:
15  *
16  *  1. Redistributions of source code must retain the above copyright
17  *     notice, this list of conditions and the following disclaimer.
18  *  2. Redistributions in binary form must reproduce the above copyright
19  *     notice, this list of conditions and the following disclaimer in the
20  *     documentation and/or other materials provided with the distribution.
21  *  3. Neither the name of the University nor the names of its
22  *     contributors may be used to endorse or promote products derived
23  *     from this software without specific prior written permission.
24  *
25  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/nfs.h>
47 #include <linux/nfs4.h>
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/nfs_mount.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/module.h>
54 #include <linux/xattr.h>
55 #include <linux/utsname.h>
56 #include <linux/freezer.h>
57 #include <linux/iversion.h>
58 
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4idmap.h"
67 #include "nfs4session.h"
68 #include "fscache.h"
69 #include "nfs42.h"
70 
71 #include "nfs4trace.h"
72 
73 #define NFSDBG_FACILITY		NFSDBG_PROC
74 
75 #define NFS4_BITMASK_SZ		3
76 
77 #define NFS4_POLL_RETRY_MIN	(HZ/10)
78 #define NFS4_POLL_RETRY_MAX	(15*HZ)
79 
80 /* file attributes which can be mapped to nfs attributes */
81 #define NFS4_VALID_ATTRS (ATTR_MODE \
82 	| ATTR_UID \
83 	| ATTR_GID \
84 	| ATTR_SIZE \
85 	| ATTR_ATIME \
86 	| ATTR_MTIME \
87 	| ATTR_CTIME \
88 	| ATTR_ATIME_SET \
89 	| ATTR_MTIME_SET)
90 
91 struct nfs4_opendata;
92 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
93 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
94 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
95 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label, struct inode *inode);
96 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
97 			    struct nfs_fattr *fattr, struct iattr *sattr,
98 			    struct nfs_open_context *ctx, struct nfs4_label *ilabel,
99 			    struct nfs4_label *olabel);
100 #ifdef CONFIG_NFS_V4_1
101 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
102 		const struct cred *cred,
103 		struct nfs4_slot *slot,
104 		bool is_privileged);
105 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
106 		const struct cred *);
107 static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
108 		const struct cred *, bool);
109 #endif
110 
111 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
112 static inline struct nfs4_label *
113 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
114 	struct iattr *sattr, struct nfs4_label *label)
115 {
116 	int err;
117 
118 	if (label == NULL)
119 		return NULL;
120 
121 	if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
122 		return NULL;
123 
124 	err = security_dentry_init_security(dentry, sattr->ia_mode,
125 				&dentry->d_name, (void **)&label->label, &label->len);
126 	if (err == 0)
127 		return label;
128 
129 	return NULL;
130 }
131 static inline void
132 nfs4_label_release_security(struct nfs4_label *label)
133 {
134 	if (label)
135 		security_release_secctx(label->label, label->len);
136 }
137 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
138 {
139 	if (label)
140 		return server->attr_bitmask;
141 
142 	return server->attr_bitmask_nl;
143 }
144 #else
145 static inline struct nfs4_label *
146 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
147 	struct iattr *sattr, struct nfs4_label *l)
148 { return NULL; }
149 static inline void
150 nfs4_label_release_security(struct nfs4_label *label)
151 { return; }
152 static inline u32 *
153 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
154 { return server->attr_bitmask; }
155 #endif
156 
157 /* Prevent leaks of NFSv4 errors into userland */
158 static int nfs4_map_errors(int err)
159 {
160 	if (err >= -1000)
161 		return err;
162 	switch (err) {
163 	case -NFS4ERR_RESOURCE:
164 	case -NFS4ERR_LAYOUTTRYLATER:
165 	case -NFS4ERR_RECALLCONFLICT:
166 		return -EREMOTEIO;
167 	case -NFS4ERR_WRONGSEC:
168 	case -NFS4ERR_WRONG_CRED:
169 		return -EPERM;
170 	case -NFS4ERR_BADOWNER:
171 	case -NFS4ERR_BADNAME:
172 		return -EINVAL;
173 	case -NFS4ERR_SHARE_DENIED:
174 		return -EACCES;
175 	case -NFS4ERR_MINOR_VERS_MISMATCH:
176 		return -EPROTONOSUPPORT;
177 	case -NFS4ERR_FILE_OPEN:
178 		return -EBUSY;
179 	default:
180 		dprintk("%s could not handle NFSv4 error %d\n",
181 				__func__, -err);
182 		break;
183 	}
184 	return -EIO;
185 }
186 
187 /*
188  * This is our standard bitmap for GETATTR requests.
189  */
190 const u32 nfs4_fattr_bitmap[3] = {
191 	FATTR4_WORD0_TYPE
192 	| FATTR4_WORD0_CHANGE
193 	| FATTR4_WORD0_SIZE
194 	| FATTR4_WORD0_FSID
195 	| FATTR4_WORD0_FILEID,
196 	FATTR4_WORD1_MODE
197 	| FATTR4_WORD1_NUMLINKS
198 	| FATTR4_WORD1_OWNER
199 	| FATTR4_WORD1_OWNER_GROUP
200 	| FATTR4_WORD1_RAWDEV
201 	| FATTR4_WORD1_SPACE_USED
202 	| FATTR4_WORD1_TIME_ACCESS
203 	| FATTR4_WORD1_TIME_METADATA
204 	| FATTR4_WORD1_TIME_MODIFY
205 	| FATTR4_WORD1_MOUNTED_ON_FILEID,
206 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
207 	FATTR4_WORD2_SECURITY_LABEL
208 #endif
209 };
210 
211 static const u32 nfs4_pnfs_open_bitmap[3] = {
212 	FATTR4_WORD0_TYPE
213 	| FATTR4_WORD0_CHANGE
214 	| FATTR4_WORD0_SIZE
215 	| FATTR4_WORD0_FSID
216 	| FATTR4_WORD0_FILEID,
217 	FATTR4_WORD1_MODE
218 	| FATTR4_WORD1_NUMLINKS
219 	| FATTR4_WORD1_OWNER
220 	| FATTR4_WORD1_OWNER_GROUP
221 	| FATTR4_WORD1_RAWDEV
222 	| FATTR4_WORD1_SPACE_USED
223 	| FATTR4_WORD1_TIME_ACCESS
224 	| FATTR4_WORD1_TIME_METADATA
225 	| FATTR4_WORD1_TIME_MODIFY,
226 	FATTR4_WORD2_MDSTHRESHOLD
227 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
228 	| FATTR4_WORD2_SECURITY_LABEL
229 #endif
230 };
231 
232 static const u32 nfs4_open_noattr_bitmap[3] = {
233 	FATTR4_WORD0_TYPE
234 	| FATTR4_WORD0_FILEID,
235 };
236 
237 const u32 nfs4_statfs_bitmap[3] = {
238 	FATTR4_WORD0_FILES_AVAIL
239 	| FATTR4_WORD0_FILES_FREE
240 	| FATTR4_WORD0_FILES_TOTAL,
241 	FATTR4_WORD1_SPACE_AVAIL
242 	| FATTR4_WORD1_SPACE_FREE
243 	| FATTR4_WORD1_SPACE_TOTAL
244 };
245 
246 const u32 nfs4_pathconf_bitmap[3] = {
247 	FATTR4_WORD0_MAXLINK
248 	| FATTR4_WORD0_MAXNAME,
249 	0
250 };
251 
252 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
253 			| FATTR4_WORD0_MAXREAD
254 			| FATTR4_WORD0_MAXWRITE
255 			| FATTR4_WORD0_LEASE_TIME,
256 			FATTR4_WORD1_TIME_DELTA
257 			| FATTR4_WORD1_FS_LAYOUT_TYPES,
258 			FATTR4_WORD2_LAYOUT_BLKSIZE
259 			| FATTR4_WORD2_CLONE_BLKSIZE
260 			| FATTR4_WORD2_XATTR_SUPPORT
261 };
262 
263 const u32 nfs4_fs_locations_bitmap[3] = {
264 	FATTR4_WORD0_CHANGE
265 	| FATTR4_WORD0_SIZE
266 	| FATTR4_WORD0_FSID
267 	| FATTR4_WORD0_FILEID
268 	| FATTR4_WORD0_FS_LOCATIONS,
269 	FATTR4_WORD1_OWNER
270 	| FATTR4_WORD1_OWNER_GROUP
271 	| FATTR4_WORD1_RAWDEV
272 	| FATTR4_WORD1_SPACE_USED
273 	| FATTR4_WORD1_TIME_ACCESS
274 	| FATTR4_WORD1_TIME_METADATA
275 	| FATTR4_WORD1_TIME_MODIFY
276 	| FATTR4_WORD1_MOUNTED_ON_FILEID,
277 };
278 
279 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
280 		struct inode *inode)
281 {
282 	unsigned long cache_validity;
283 
284 	memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst));
285 	if (!inode || !nfs4_have_delegation(inode, FMODE_READ))
286 		return;
287 
288 	cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
289 	if (!(cache_validity & NFS_INO_REVAL_FORCED))
290 		cache_validity &= ~(NFS_INO_INVALID_CHANGE
291 				| NFS_INO_INVALID_SIZE);
292 
293 	if (!(cache_validity & NFS_INO_INVALID_SIZE))
294 		dst[0] &= ~FATTR4_WORD0_SIZE;
295 
296 	if (!(cache_validity & NFS_INO_INVALID_CHANGE))
297 		dst[0] &= ~FATTR4_WORD0_CHANGE;
298 }
299 
300 static void nfs4_bitmap_copy_adjust_setattr(__u32 *dst,
301 		const __u32 *src, struct inode *inode)
302 {
303 	nfs4_bitmap_copy_adjust(dst, src, inode);
304 }
305 
306 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
307 		struct nfs4_readdir_arg *readdir)
308 {
309 	unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
310 	__be32 *start, *p;
311 
312 	if (cookie > 2) {
313 		readdir->cookie = cookie;
314 		memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
315 		return;
316 	}
317 
318 	readdir->cookie = 0;
319 	memset(&readdir->verifier, 0, sizeof(readdir->verifier));
320 	if (cookie == 2)
321 		return;
322 
323 	/*
324 	 * NFSv4 servers do not return entries for '.' and '..'
325 	 * Therefore, we fake these entries here.  We let '.'
326 	 * have cookie 0 and '..' have cookie 1.  Note that
327 	 * when talking to the server, we always send cookie 0
328 	 * instead of 1 or 2.
329 	 */
330 	start = p = kmap_atomic(*readdir->pages);
331 
332 	if (cookie == 0) {
333 		*p++ = xdr_one;                                  /* next */
334 		*p++ = xdr_zero;                   /* cookie, first word */
335 		*p++ = xdr_one;                   /* cookie, second word */
336 		*p++ = xdr_one;                             /* entry len */
337 		memcpy(p, ".\0\0\0", 4);                        /* entry */
338 		p++;
339 		*p++ = xdr_one;                         /* bitmap length */
340 		*p++ = htonl(attrs);                           /* bitmap */
341 		*p++ = htonl(12);             /* attribute buffer length */
342 		*p++ = htonl(NF4DIR);
343 		p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
344 	}
345 
346 	*p++ = xdr_one;                                  /* next */
347 	*p++ = xdr_zero;                   /* cookie, first word */
348 	*p++ = xdr_two;                   /* cookie, second word */
349 	*p++ = xdr_two;                             /* entry len */
350 	memcpy(p, "..\0\0", 4);                         /* entry */
351 	p++;
352 	*p++ = xdr_one;                         /* bitmap length */
353 	*p++ = htonl(attrs);                           /* bitmap */
354 	*p++ = htonl(12);             /* attribute buffer length */
355 	*p++ = htonl(NF4DIR);
356 	p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
357 
358 	readdir->pgbase = (char *)p - (char *)start;
359 	readdir->count -= readdir->pgbase;
360 	kunmap_atomic(start);
361 }
362 
363 static void nfs4_test_and_free_stateid(struct nfs_server *server,
364 		nfs4_stateid *stateid,
365 		const struct cred *cred)
366 {
367 	const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
368 
369 	ops->test_and_free_expired(server, stateid, cred);
370 }
371 
372 static void __nfs4_free_revoked_stateid(struct nfs_server *server,
373 		nfs4_stateid *stateid,
374 		const struct cred *cred)
375 {
376 	stateid->type = NFS4_REVOKED_STATEID_TYPE;
377 	nfs4_test_and_free_stateid(server, stateid, cred);
378 }
379 
380 static void nfs4_free_revoked_stateid(struct nfs_server *server,
381 		const nfs4_stateid *stateid,
382 		const struct cred *cred)
383 {
384 	nfs4_stateid tmp;
385 
386 	nfs4_stateid_copy(&tmp, stateid);
387 	__nfs4_free_revoked_stateid(server, &tmp, cred);
388 }
389 
390 static long nfs4_update_delay(long *timeout)
391 {
392 	long ret;
393 	if (!timeout)
394 		return NFS4_POLL_RETRY_MAX;
395 	if (*timeout <= 0)
396 		*timeout = NFS4_POLL_RETRY_MIN;
397 	if (*timeout > NFS4_POLL_RETRY_MAX)
398 		*timeout = NFS4_POLL_RETRY_MAX;
399 	ret = *timeout;
400 	*timeout <<= 1;
401 	return ret;
402 }
403 
404 static int nfs4_delay_killable(long *timeout)
405 {
406 	might_sleep();
407 
408 	freezable_schedule_timeout_killable_unsafe(
409 		nfs4_update_delay(timeout));
410 	if (!__fatal_signal_pending(current))
411 		return 0;
412 	return -EINTR;
413 }
414 
415 static int nfs4_delay_interruptible(long *timeout)
416 {
417 	might_sleep();
418 
419 	freezable_schedule_timeout_interruptible_unsafe(nfs4_update_delay(timeout));
420 	if (!signal_pending(current))
421 		return 0;
422 	return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
423 }
424 
425 static int nfs4_delay(long *timeout, bool interruptible)
426 {
427 	if (interruptible)
428 		return nfs4_delay_interruptible(timeout);
429 	return nfs4_delay_killable(timeout);
430 }
431 
432 static const nfs4_stateid *
433 nfs4_recoverable_stateid(const nfs4_stateid *stateid)
434 {
435 	if (!stateid)
436 		return NULL;
437 	switch (stateid->type) {
438 	case NFS4_OPEN_STATEID_TYPE:
439 	case NFS4_LOCK_STATEID_TYPE:
440 	case NFS4_DELEGATION_STATEID_TYPE:
441 		return stateid;
442 	default:
443 		break;
444 	}
445 	return NULL;
446 }
447 
448 /* This is the error handling routine for processes that are allowed
449  * to sleep.
450  */
451 static int nfs4_do_handle_exception(struct nfs_server *server,
452 		int errorcode, struct nfs4_exception *exception)
453 {
454 	struct nfs_client *clp = server->nfs_client;
455 	struct nfs4_state *state = exception->state;
456 	const nfs4_stateid *stateid;
457 	struct inode *inode = exception->inode;
458 	int ret = errorcode;
459 
460 	exception->delay = 0;
461 	exception->recovering = 0;
462 	exception->retry = 0;
463 
464 	stateid = nfs4_recoverable_stateid(exception->stateid);
465 	if (stateid == NULL && state != NULL)
466 		stateid = nfs4_recoverable_stateid(&state->stateid);
467 
468 	switch(errorcode) {
469 		case 0:
470 			return 0;
471 		case -NFS4ERR_BADHANDLE:
472 		case -ESTALE:
473 			if (inode != NULL && S_ISREG(inode->i_mode))
474 				pnfs_destroy_layout(NFS_I(inode));
475 			break;
476 		case -NFS4ERR_DELEG_REVOKED:
477 		case -NFS4ERR_ADMIN_REVOKED:
478 		case -NFS4ERR_EXPIRED:
479 		case -NFS4ERR_BAD_STATEID:
480 		case -NFS4ERR_PARTNER_NO_AUTH:
481 			if (inode != NULL && stateid != NULL) {
482 				nfs_inode_find_state_and_recover(inode,
483 						stateid);
484 				goto wait_on_recovery;
485 			}
486 			fallthrough;
487 		case -NFS4ERR_OPENMODE:
488 			if (inode) {
489 				int err;
490 
491 				err = nfs_async_inode_return_delegation(inode,
492 						stateid);
493 				if (err == 0)
494 					goto wait_on_recovery;
495 				if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
496 					exception->retry = 1;
497 					break;
498 				}
499 			}
500 			if (state == NULL)
501 				break;
502 			ret = nfs4_schedule_stateid_recovery(server, state);
503 			if (ret < 0)
504 				break;
505 			goto wait_on_recovery;
506 		case -NFS4ERR_STALE_STATEID:
507 		case -NFS4ERR_STALE_CLIENTID:
508 			nfs4_schedule_lease_recovery(clp);
509 			goto wait_on_recovery;
510 		case -NFS4ERR_MOVED:
511 			ret = nfs4_schedule_migration_recovery(server);
512 			if (ret < 0)
513 				break;
514 			goto wait_on_recovery;
515 		case -NFS4ERR_LEASE_MOVED:
516 			nfs4_schedule_lease_moved_recovery(clp);
517 			goto wait_on_recovery;
518 #if defined(CONFIG_NFS_V4_1)
519 		case -NFS4ERR_BADSESSION:
520 		case -NFS4ERR_BADSLOT:
521 		case -NFS4ERR_BAD_HIGH_SLOT:
522 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
523 		case -NFS4ERR_DEADSESSION:
524 		case -NFS4ERR_SEQ_FALSE_RETRY:
525 		case -NFS4ERR_SEQ_MISORDERED:
526 			/* Handled in nfs41_sequence_process() */
527 			goto wait_on_recovery;
528 #endif /* defined(CONFIG_NFS_V4_1) */
529 		case -NFS4ERR_FILE_OPEN:
530 			if (exception->timeout > HZ) {
531 				/* We have retried a decent amount, time to
532 				 * fail
533 				 */
534 				ret = -EBUSY;
535 				break;
536 			}
537 			fallthrough;
538 		case -NFS4ERR_DELAY:
539 			nfs_inc_server_stats(server, NFSIOS_DELAY);
540 			fallthrough;
541 		case -NFS4ERR_GRACE:
542 		case -NFS4ERR_LAYOUTTRYLATER:
543 		case -NFS4ERR_RECALLCONFLICT:
544 			exception->delay = 1;
545 			return 0;
546 
547 		case -NFS4ERR_RETRY_UNCACHED_REP:
548 		case -NFS4ERR_OLD_STATEID:
549 			exception->retry = 1;
550 			break;
551 		case -NFS4ERR_BADOWNER:
552 			/* The following works around a Linux server bug! */
553 		case -NFS4ERR_BADNAME:
554 			if (server->caps & NFS_CAP_UIDGID_NOMAP) {
555 				server->caps &= ~NFS_CAP_UIDGID_NOMAP;
556 				exception->retry = 1;
557 				printk(KERN_WARNING "NFS: v4 server %s "
558 						"does not accept raw "
559 						"uid/gids. "
560 						"Reenabling the idmapper.\n",
561 						server->nfs_client->cl_hostname);
562 			}
563 	}
564 	/* We failed to handle the error */
565 	return nfs4_map_errors(ret);
566 wait_on_recovery:
567 	exception->recovering = 1;
568 	return 0;
569 }
570 
571 /* This is the error handling routine for processes that are allowed
572  * to sleep.
573  */
574 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
575 {
576 	struct nfs_client *clp = server->nfs_client;
577 	int ret;
578 
579 	ret = nfs4_do_handle_exception(server, errorcode, exception);
580 	if (exception->delay) {
581 		ret = nfs4_delay(&exception->timeout,
582 				exception->interruptible);
583 		goto out_retry;
584 	}
585 	if (exception->recovering) {
586 		ret = nfs4_wait_clnt_recover(clp);
587 		if (test_bit(NFS_MIG_FAILED, &server->mig_status))
588 			return -EIO;
589 		goto out_retry;
590 	}
591 	return ret;
592 out_retry:
593 	if (ret == 0)
594 		exception->retry = 1;
595 	return ret;
596 }
597 
598 static int
599 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
600 		int errorcode, struct nfs4_exception *exception)
601 {
602 	struct nfs_client *clp = server->nfs_client;
603 	int ret;
604 
605 	ret = nfs4_do_handle_exception(server, errorcode, exception);
606 	if (exception->delay) {
607 		rpc_delay(task, nfs4_update_delay(&exception->timeout));
608 		goto out_retry;
609 	}
610 	if (exception->recovering) {
611 		rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
612 		if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
613 			rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
614 		goto out_retry;
615 	}
616 	if (test_bit(NFS_MIG_FAILED, &server->mig_status))
617 		ret = -EIO;
618 	return ret;
619 out_retry:
620 	if (ret == 0) {
621 		exception->retry = 1;
622 		/*
623 		 * For NFS4ERR_MOVED, the client transport will need to
624 		 * be recomputed after migration recovery has completed.
625 		 */
626 		if (errorcode == -NFS4ERR_MOVED)
627 			rpc_task_release_transport(task);
628 	}
629 	return ret;
630 }
631 
632 int
633 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
634 			struct nfs4_state *state, long *timeout)
635 {
636 	struct nfs4_exception exception = {
637 		.state = state,
638 	};
639 
640 	if (task->tk_status >= 0)
641 		return 0;
642 	if (timeout)
643 		exception.timeout = *timeout;
644 	task->tk_status = nfs4_async_handle_exception(task, server,
645 			task->tk_status,
646 			&exception);
647 	if (exception.delay && timeout)
648 		*timeout = exception.timeout;
649 	if (exception.retry)
650 		return -EAGAIN;
651 	return 0;
652 }
653 
654 /*
655  * Return 'true' if 'clp' is using an rpc_client that is integrity protected
656  * or 'false' otherwise.
657  */
658 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
659 {
660 	rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
661 	return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P);
662 }
663 
664 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
665 {
666 	spin_lock(&clp->cl_lock);
667 	if (time_before(clp->cl_last_renewal,timestamp))
668 		clp->cl_last_renewal = timestamp;
669 	spin_unlock(&clp->cl_lock);
670 }
671 
672 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
673 {
674 	struct nfs_client *clp = server->nfs_client;
675 
676 	if (!nfs4_has_session(clp))
677 		do_renew_lease(clp, timestamp);
678 }
679 
680 struct nfs4_call_sync_data {
681 	const struct nfs_server *seq_server;
682 	struct nfs4_sequence_args *seq_args;
683 	struct nfs4_sequence_res *seq_res;
684 };
685 
686 void nfs4_init_sequence(struct nfs4_sequence_args *args,
687 			struct nfs4_sequence_res *res, int cache_reply,
688 			int privileged)
689 {
690 	args->sa_slot = NULL;
691 	args->sa_cache_this = cache_reply;
692 	args->sa_privileged = privileged;
693 
694 	res->sr_slot = NULL;
695 }
696 
697 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
698 {
699 	struct nfs4_slot *slot = res->sr_slot;
700 	struct nfs4_slot_table *tbl;
701 
702 	tbl = slot->table;
703 	spin_lock(&tbl->slot_tbl_lock);
704 	if (!nfs41_wake_and_assign_slot(tbl, slot))
705 		nfs4_free_slot(tbl, slot);
706 	spin_unlock(&tbl->slot_tbl_lock);
707 
708 	res->sr_slot = NULL;
709 }
710 
711 static int nfs40_sequence_done(struct rpc_task *task,
712 			       struct nfs4_sequence_res *res)
713 {
714 	if (res->sr_slot != NULL)
715 		nfs40_sequence_free_slot(res);
716 	return 1;
717 }
718 
719 #if defined(CONFIG_NFS_V4_1)
720 
721 static void nfs41_release_slot(struct nfs4_slot *slot)
722 {
723 	struct nfs4_session *session;
724 	struct nfs4_slot_table *tbl;
725 	bool send_new_highest_used_slotid = false;
726 
727 	if (!slot)
728 		return;
729 	tbl = slot->table;
730 	session = tbl->session;
731 
732 	/* Bump the slot sequence number */
733 	if (slot->seq_done)
734 		slot->seq_nr++;
735 	slot->seq_done = 0;
736 
737 	spin_lock(&tbl->slot_tbl_lock);
738 	/* Be nice to the server: try to ensure that the last transmitted
739 	 * value for highest_user_slotid <= target_highest_slotid
740 	 */
741 	if (tbl->highest_used_slotid > tbl->target_highest_slotid)
742 		send_new_highest_used_slotid = true;
743 
744 	if (nfs41_wake_and_assign_slot(tbl, slot)) {
745 		send_new_highest_used_slotid = false;
746 		goto out_unlock;
747 	}
748 	nfs4_free_slot(tbl, slot);
749 
750 	if (tbl->highest_used_slotid != NFS4_NO_SLOT)
751 		send_new_highest_used_slotid = false;
752 out_unlock:
753 	spin_unlock(&tbl->slot_tbl_lock);
754 	if (send_new_highest_used_slotid)
755 		nfs41_notify_server(session->clp);
756 	if (waitqueue_active(&tbl->slot_waitq))
757 		wake_up_all(&tbl->slot_waitq);
758 }
759 
760 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
761 {
762 	nfs41_release_slot(res->sr_slot);
763 	res->sr_slot = NULL;
764 }
765 
766 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
767 		u32 seqnr)
768 {
769 	if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
770 		slot->seq_nr_highest_sent = seqnr;
771 }
772 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot,
773 		u32 seqnr)
774 {
775 	slot->seq_nr_highest_sent = seqnr;
776 	slot->seq_nr_last_acked = seqnr;
777 }
778 
779 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred,
780 				struct nfs4_slot *slot)
781 {
782 	struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true);
783 	if (!IS_ERR(task))
784 		rpc_put_task_async(task);
785 }
786 
787 static int nfs41_sequence_process(struct rpc_task *task,
788 		struct nfs4_sequence_res *res)
789 {
790 	struct nfs4_session *session;
791 	struct nfs4_slot *slot = res->sr_slot;
792 	struct nfs_client *clp;
793 	int status;
794 	int ret = 1;
795 
796 	if (slot == NULL)
797 		goto out_noaction;
798 	/* don't increment the sequence number if the task wasn't sent */
799 	if (!RPC_WAS_SENT(task) || slot->seq_done)
800 		goto out;
801 
802 	session = slot->table->session;
803 	clp = session->clp;
804 
805 	trace_nfs4_sequence_done(session, res);
806 
807 	status = res->sr_status;
808 	if (task->tk_status == -NFS4ERR_DEADSESSION)
809 		status = -NFS4ERR_DEADSESSION;
810 
811 	/* Check the SEQUENCE operation status */
812 	switch (status) {
813 	case 0:
814 		/* Mark this sequence number as having been acked */
815 		nfs4_slot_sequence_acked(slot, slot->seq_nr);
816 		/* Update the slot's sequence and clientid lease timer */
817 		slot->seq_done = 1;
818 		do_renew_lease(clp, res->sr_timestamp);
819 		/* Check sequence flags */
820 		nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
821 				!!slot->privileged);
822 		nfs41_update_target_slotid(slot->table, slot, res);
823 		break;
824 	case 1:
825 		/*
826 		 * sr_status remains 1 if an RPC level error occurred.
827 		 * The server may or may not have processed the sequence
828 		 * operation..
829 		 */
830 		nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
831 		slot->seq_done = 1;
832 		goto out;
833 	case -NFS4ERR_DELAY:
834 		/* The server detected a resend of the RPC call and
835 		 * returned NFS4ERR_DELAY as per Section 2.10.6.2
836 		 * of RFC5661.
837 		 */
838 		dprintk("%s: slot=%u seq=%u: Operation in progress\n",
839 			__func__,
840 			slot->slot_nr,
841 			slot->seq_nr);
842 		nfs4_slot_sequence_acked(slot, slot->seq_nr);
843 		goto out_retry;
844 	case -NFS4ERR_RETRY_UNCACHED_REP:
845 	case -NFS4ERR_SEQ_FALSE_RETRY:
846 		/*
847 		 * The server thinks we tried to replay a request.
848 		 * Retry the call after bumping the sequence ID.
849 		 */
850 		nfs4_slot_sequence_acked(slot, slot->seq_nr);
851 		goto retry_new_seq;
852 	case -NFS4ERR_BADSLOT:
853 		/*
854 		 * The slot id we used was probably retired. Try again
855 		 * using a different slot id.
856 		 */
857 		if (slot->slot_nr < slot->table->target_highest_slotid)
858 			goto session_recover;
859 		goto retry_nowait;
860 	case -NFS4ERR_SEQ_MISORDERED:
861 		nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
862 		/*
863 		 * Were one or more calls using this slot interrupted?
864 		 * If the server never received the request, then our
865 		 * transmitted slot sequence number may be too high. However,
866 		 * if the server did receive the request then it might
867 		 * accidentally give us a reply with a mismatched operation.
868 		 * We can sort this out by sending a lone sequence operation
869 		 * to the server on the same slot.
870 		 */
871 		if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
872 			slot->seq_nr--;
873 			if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
874 				nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
875 				res->sr_slot = NULL;
876 			}
877 			goto retry_nowait;
878 		}
879 		/*
880 		 * RFC5661:
881 		 * A retry might be sent while the original request is
882 		 * still in progress on the replier. The replier SHOULD
883 		 * deal with the issue by returning NFS4ERR_DELAY as the
884 		 * reply to SEQUENCE or CB_SEQUENCE operation, but
885 		 * implementations MAY return NFS4ERR_SEQ_MISORDERED.
886 		 *
887 		 * Restart the search after a delay.
888 		 */
889 		slot->seq_nr = slot->seq_nr_highest_sent;
890 		goto out_retry;
891 	case -NFS4ERR_BADSESSION:
892 	case -NFS4ERR_DEADSESSION:
893 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
894 		goto session_recover;
895 	default:
896 		/* Just update the slot sequence no. */
897 		slot->seq_done = 1;
898 	}
899 out:
900 	/* The session may be reset by one of the error handlers. */
901 	dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
902 out_noaction:
903 	return ret;
904 session_recover:
905 	nfs4_schedule_session_recovery(session, status);
906 	dprintk("%s ERROR: %d Reset session\n", __func__, status);
907 	nfs41_sequence_free_slot(res);
908 	goto out;
909 retry_new_seq:
910 	++slot->seq_nr;
911 retry_nowait:
912 	if (rpc_restart_call_prepare(task)) {
913 		nfs41_sequence_free_slot(res);
914 		task->tk_status = 0;
915 		ret = 0;
916 	}
917 	goto out;
918 out_retry:
919 	if (!rpc_restart_call(task))
920 		goto out;
921 	rpc_delay(task, NFS4_POLL_RETRY_MAX);
922 	return 0;
923 }
924 
925 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
926 {
927 	if (!nfs41_sequence_process(task, res))
928 		return 0;
929 	if (res->sr_slot != NULL)
930 		nfs41_sequence_free_slot(res);
931 	return 1;
932 
933 }
934 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
935 
936 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
937 {
938 	if (res->sr_slot == NULL)
939 		return 1;
940 	if (res->sr_slot->table->session != NULL)
941 		return nfs41_sequence_process(task, res);
942 	return nfs40_sequence_done(task, res);
943 }
944 
945 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
946 {
947 	if (res->sr_slot != NULL) {
948 		if (res->sr_slot->table->session != NULL)
949 			nfs41_sequence_free_slot(res);
950 		else
951 			nfs40_sequence_free_slot(res);
952 	}
953 }
954 
955 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
956 {
957 	if (res->sr_slot == NULL)
958 		return 1;
959 	if (!res->sr_slot->table->session)
960 		return nfs40_sequence_done(task, res);
961 	return nfs41_sequence_done(task, res);
962 }
963 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
964 
965 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
966 {
967 	struct nfs4_call_sync_data *data = calldata;
968 
969 	dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
970 
971 	nfs4_setup_sequence(data->seq_server->nfs_client,
972 			    data->seq_args, data->seq_res, task);
973 }
974 
975 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
976 {
977 	struct nfs4_call_sync_data *data = calldata;
978 
979 	nfs41_sequence_done(task, data->seq_res);
980 }
981 
982 static const struct rpc_call_ops nfs41_call_sync_ops = {
983 	.rpc_call_prepare = nfs41_call_sync_prepare,
984 	.rpc_call_done = nfs41_call_sync_done,
985 };
986 
987 #else	/* !CONFIG_NFS_V4_1 */
988 
989 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
990 {
991 	return nfs40_sequence_done(task, res);
992 }
993 
994 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
995 {
996 	if (res->sr_slot != NULL)
997 		nfs40_sequence_free_slot(res);
998 }
999 
1000 int nfs4_sequence_done(struct rpc_task *task,
1001 		       struct nfs4_sequence_res *res)
1002 {
1003 	return nfs40_sequence_done(task, res);
1004 }
1005 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
1006 
1007 #endif	/* !CONFIG_NFS_V4_1 */
1008 
1009 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
1010 {
1011 	res->sr_timestamp = jiffies;
1012 	res->sr_status_flags = 0;
1013 	res->sr_status = 1;
1014 }
1015 
1016 static
1017 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
1018 		struct nfs4_sequence_res *res,
1019 		struct nfs4_slot *slot)
1020 {
1021 	if (!slot)
1022 		return;
1023 	slot->privileged = args->sa_privileged ? 1 : 0;
1024 	args->sa_slot = slot;
1025 
1026 	res->sr_slot = slot;
1027 }
1028 
1029 int nfs4_setup_sequence(struct nfs_client *client,
1030 			struct nfs4_sequence_args *args,
1031 			struct nfs4_sequence_res *res,
1032 			struct rpc_task *task)
1033 {
1034 	struct nfs4_session *session = nfs4_get_session(client);
1035 	struct nfs4_slot_table *tbl  = client->cl_slot_tbl;
1036 	struct nfs4_slot *slot;
1037 
1038 	/* slot already allocated? */
1039 	if (res->sr_slot != NULL)
1040 		goto out_start;
1041 
1042 	if (session)
1043 		tbl = &session->fc_slot_table;
1044 
1045 	spin_lock(&tbl->slot_tbl_lock);
1046 	/* The state manager will wait until the slot table is empty */
1047 	if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
1048 		goto out_sleep;
1049 
1050 	slot = nfs4_alloc_slot(tbl);
1051 	if (IS_ERR(slot)) {
1052 		if (slot == ERR_PTR(-ENOMEM))
1053 			goto out_sleep_timeout;
1054 		goto out_sleep;
1055 	}
1056 	spin_unlock(&tbl->slot_tbl_lock);
1057 
1058 	nfs4_sequence_attach_slot(args, res, slot);
1059 
1060 	trace_nfs4_setup_sequence(session, args);
1061 out_start:
1062 	nfs41_sequence_res_init(res);
1063 	rpc_call_start(task);
1064 	return 0;
1065 out_sleep_timeout:
1066 	/* Try again in 1/4 second */
1067 	if (args->sa_privileged)
1068 		rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task,
1069 				jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED);
1070 	else
1071 		rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task,
1072 				NULL, jiffies + (HZ >> 2));
1073 	spin_unlock(&tbl->slot_tbl_lock);
1074 	return -EAGAIN;
1075 out_sleep:
1076 	if (args->sa_privileged)
1077 		rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
1078 				RPC_PRIORITY_PRIVILEGED);
1079 	else
1080 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
1081 	spin_unlock(&tbl->slot_tbl_lock);
1082 	return -EAGAIN;
1083 }
1084 EXPORT_SYMBOL_GPL(nfs4_setup_sequence);
1085 
1086 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
1087 {
1088 	struct nfs4_call_sync_data *data = calldata;
1089 	nfs4_setup_sequence(data->seq_server->nfs_client,
1090 				data->seq_args, data->seq_res, task);
1091 }
1092 
1093 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
1094 {
1095 	struct nfs4_call_sync_data *data = calldata;
1096 	nfs4_sequence_done(task, data->seq_res);
1097 }
1098 
1099 static const struct rpc_call_ops nfs40_call_sync_ops = {
1100 	.rpc_call_prepare = nfs40_call_sync_prepare,
1101 	.rpc_call_done = nfs40_call_sync_done,
1102 };
1103 
1104 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
1105 {
1106 	int ret;
1107 	struct rpc_task *task;
1108 
1109 	task = rpc_run_task(task_setup);
1110 	if (IS_ERR(task))
1111 		return PTR_ERR(task);
1112 
1113 	ret = task->tk_status;
1114 	rpc_put_task(task);
1115 	return ret;
1116 }
1117 
1118 static int nfs4_do_call_sync(struct rpc_clnt *clnt,
1119 			     struct nfs_server *server,
1120 			     struct rpc_message *msg,
1121 			     struct nfs4_sequence_args *args,
1122 			     struct nfs4_sequence_res *res,
1123 			     unsigned short task_flags)
1124 {
1125 	struct nfs_client *clp = server->nfs_client;
1126 	struct nfs4_call_sync_data data = {
1127 		.seq_server = server,
1128 		.seq_args = args,
1129 		.seq_res = res,
1130 	};
1131 	struct rpc_task_setup task_setup = {
1132 		.rpc_client = clnt,
1133 		.rpc_message = msg,
1134 		.callback_ops = clp->cl_mvops->call_sync_ops,
1135 		.callback_data = &data,
1136 		.flags = task_flags,
1137 	};
1138 
1139 	return nfs4_call_sync_custom(&task_setup);
1140 }
1141 
1142 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
1143 				   struct nfs_server *server,
1144 				   struct rpc_message *msg,
1145 				   struct nfs4_sequence_args *args,
1146 				   struct nfs4_sequence_res *res)
1147 {
1148 	return nfs4_do_call_sync(clnt, server, msg, args, res, 0);
1149 }
1150 
1151 
1152 int nfs4_call_sync(struct rpc_clnt *clnt,
1153 		   struct nfs_server *server,
1154 		   struct rpc_message *msg,
1155 		   struct nfs4_sequence_args *args,
1156 		   struct nfs4_sequence_res *res,
1157 		   int cache_reply)
1158 {
1159 	nfs4_init_sequence(args, res, cache_reply, 0);
1160 	return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1161 }
1162 
1163 static void
1164 nfs4_inc_nlink_locked(struct inode *inode)
1165 {
1166 	NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
1167 	inc_nlink(inode);
1168 }
1169 
1170 static void
1171 nfs4_dec_nlink_locked(struct inode *inode)
1172 {
1173 	NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
1174 	drop_nlink(inode);
1175 }
1176 
1177 static void
1178 nfs4_update_changeattr_locked(struct inode *inode,
1179 		struct nfs4_change_info *cinfo,
1180 		unsigned long timestamp, unsigned long cache_validity)
1181 {
1182 	struct nfs_inode *nfsi = NFS_I(inode);
1183 
1184 	nfsi->cache_validity |= NFS_INO_INVALID_CTIME
1185 		| NFS_INO_INVALID_MTIME
1186 		| cache_validity;
1187 
1188 	if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(inode)) {
1189 		nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
1190 		nfsi->attrtimeo_timestamp = jiffies;
1191 	} else {
1192 		if (S_ISDIR(inode->i_mode)) {
1193 			nfsi->cache_validity |= NFS_INO_INVALID_DATA;
1194 			nfs_force_lookup_revalidate(inode);
1195 		} else {
1196 			if (!NFS_PROTO(inode)->have_delegation(inode,
1197 							       FMODE_READ))
1198 				nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
1199 		}
1200 
1201 		if (cinfo->before != inode_peek_iversion_raw(inode))
1202 			nfsi->cache_validity |= NFS_INO_INVALID_ACCESS |
1203 						NFS_INO_INVALID_ACL |
1204 						NFS_INO_INVALID_XATTR;
1205 	}
1206 	inode_set_iversion_raw(inode, cinfo->after);
1207 	nfsi->read_cache_jiffies = timestamp;
1208 	nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1209 	nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
1210 
1211 	if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1212 		nfs_fscache_invalidate(inode);
1213 }
1214 
1215 void
1216 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
1217 		unsigned long timestamp, unsigned long cache_validity)
1218 {
1219 	spin_lock(&dir->i_lock);
1220 	nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
1221 	spin_unlock(&dir->i_lock);
1222 }
1223 
1224 struct nfs4_open_createattrs {
1225 	struct nfs4_label *label;
1226 	struct iattr *sattr;
1227 	const __u32 verf[2];
1228 };
1229 
1230 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1231 		int err, struct nfs4_exception *exception)
1232 {
1233 	if (err != -EINVAL)
1234 		return false;
1235 	if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1236 		return false;
1237 	server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1238 	exception->retry = 1;
1239 	return true;
1240 }
1241 
1242 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx)
1243 {
1244 	 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
1245 }
1246 
1247 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx)
1248 {
1249 	fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE);
1250 
1251 	return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret;
1252 }
1253 
1254 static u32
1255 nfs4_map_atomic_open_share(struct nfs_server *server,
1256 		fmode_t fmode, int openflags)
1257 {
1258 	u32 res = 0;
1259 
1260 	switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1261 	case FMODE_READ:
1262 		res = NFS4_SHARE_ACCESS_READ;
1263 		break;
1264 	case FMODE_WRITE:
1265 		res = NFS4_SHARE_ACCESS_WRITE;
1266 		break;
1267 	case FMODE_READ|FMODE_WRITE:
1268 		res = NFS4_SHARE_ACCESS_BOTH;
1269 	}
1270 	if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1271 		goto out;
1272 	/* Want no delegation if we're using O_DIRECT */
1273 	if (openflags & O_DIRECT)
1274 		res |= NFS4_SHARE_WANT_NO_DELEG;
1275 out:
1276 	return res;
1277 }
1278 
1279 static enum open_claim_type4
1280 nfs4_map_atomic_open_claim(struct nfs_server *server,
1281 		enum open_claim_type4 claim)
1282 {
1283 	if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1284 		return claim;
1285 	switch (claim) {
1286 	default:
1287 		return claim;
1288 	case NFS4_OPEN_CLAIM_FH:
1289 		return NFS4_OPEN_CLAIM_NULL;
1290 	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1291 		return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1292 	case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1293 		return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1294 	}
1295 }
1296 
1297 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1298 {
1299 	p->o_res.f_attr = &p->f_attr;
1300 	p->o_res.f_label = p->f_label;
1301 	p->o_res.seqid = p->o_arg.seqid;
1302 	p->c_res.seqid = p->c_arg.seqid;
1303 	p->o_res.server = p->o_arg.server;
1304 	p->o_res.access_request = p->o_arg.access;
1305 	nfs_fattr_init(&p->f_attr);
1306 	nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1307 }
1308 
1309 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1310 		struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1311 		const struct nfs4_open_createattrs *c,
1312 		enum open_claim_type4 claim,
1313 		gfp_t gfp_mask)
1314 {
1315 	struct dentry *parent = dget_parent(dentry);
1316 	struct inode *dir = d_inode(parent);
1317 	struct nfs_server *server = NFS_SERVER(dir);
1318 	struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1319 	struct nfs4_label *label = (c != NULL) ? c->label : NULL;
1320 	struct nfs4_opendata *p;
1321 
1322 	p = kzalloc(sizeof(*p), gfp_mask);
1323 	if (p == NULL)
1324 		goto err;
1325 
1326 	p->f_label = nfs4_label_alloc(server, gfp_mask);
1327 	if (IS_ERR(p->f_label))
1328 		goto err_free_p;
1329 
1330 	p->a_label = nfs4_label_alloc(server, gfp_mask);
1331 	if (IS_ERR(p->a_label))
1332 		goto err_free_f;
1333 
1334 	alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1335 	p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1336 	if (IS_ERR(p->o_arg.seqid))
1337 		goto err_free_label;
1338 	nfs_sb_active(dentry->d_sb);
1339 	p->dentry = dget(dentry);
1340 	p->dir = parent;
1341 	p->owner = sp;
1342 	atomic_inc(&sp->so_count);
1343 	p->o_arg.open_flags = flags;
1344 	p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1345 	p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1346 	p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1347 			fmode, flags);
1348 	if (flags & O_CREAT) {
1349 		p->o_arg.umask = current_umask();
1350 		p->o_arg.label = nfs4_label_copy(p->a_label, label);
1351 		if (c->sattr != NULL && c->sattr->ia_valid != 0) {
1352 			p->o_arg.u.attrs = &p->attrs;
1353 			memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
1354 
1355 			memcpy(p->o_arg.u.verifier.data, c->verf,
1356 					sizeof(p->o_arg.u.verifier.data));
1357 		}
1358 	}
1359 	/* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1360 	 * will return permission denied for all bits until close */
1361 	if (!(flags & O_EXCL)) {
1362 		/* ask server to check for all possible rights as results
1363 		 * are cached */
1364 		switch (p->o_arg.claim) {
1365 		default:
1366 			break;
1367 		case NFS4_OPEN_CLAIM_NULL:
1368 		case NFS4_OPEN_CLAIM_FH:
1369 			p->o_arg.access = NFS4_ACCESS_READ |
1370 				NFS4_ACCESS_MODIFY |
1371 				NFS4_ACCESS_EXTEND |
1372 				NFS4_ACCESS_EXECUTE;
1373 #ifdef CONFIG_NFS_V4_2
1374 			if (server->caps & NFS_CAP_XATTR)
1375 				p->o_arg.access |= NFS4_ACCESS_XAREAD |
1376 				    NFS4_ACCESS_XAWRITE |
1377 				    NFS4_ACCESS_XALIST;
1378 #endif
1379 		}
1380 	}
1381 	p->o_arg.clientid = server->nfs_client->cl_clientid;
1382 	p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1383 	p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1384 	p->o_arg.name = &dentry->d_name;
1385 	p->o_arg.server = server;
1386 	p->o_arg.bitmask = nfs4_bitmask(server, label);
1387 	p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1388 	switch (p->o_arg.claim) {
1389 	case NFS4_OPEN_CLAIM_NULL:
1390 	case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1391 	case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1392 		p->o_arg.fh = NFS_FH(dir);
1393 		break;
1394 	case NFS4_OPEN_CLAIM_PREVIOUS:
1395 	case NFS4_OPEN_CLAIM_FH:
1396 	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1397 	case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1398 		p->o_arg.fh = NFS_FH(d_inode(dentry));
1399 	}
1400 	p->c_arg.fh = &p->o_res.fh;
1401 	p->c_arg.stateid = &p->o_res.stateid;
1402 	p->c_arg.seqid = p->o_arg.seqid;
1403 	nfs4_init_opendata_res(p);
1404 	kref_init(&p->kref);
1405 	return p;
1406 
1407 err_free_label:
1408 	nfs4_label_free(p->a_label);
1409 err_free_f:
1410 	nfs4_label_free(p->f_label);
1411 err_free_p:
1412 	kfree(p);
1413 err:
1414 	dput(parent);
1415 	return NULL;
1416 }
1417 
1418 static void nfs4_opendata_free(struct kref *kref)
1419 {
1420 	struct nfs4_opendata *p = container_of(kref,
1421 			struct nfs4_opendata, kref);
1422 	struct super_block *sb = p->dentry->d_sb;
1423 
1424 	nfs4_lgopen_release(p->lgp);
1425 	nfs_free_seqid(p->o_arg.seqid);
1426 	nfs4_sequence_free_slot(&p->o_res.seq_res);
1427 	if (p->state != NULL)
1428 		nfs4_put_open_state(p->state);
1429 	nfs4_put_state_owner(p->owner);
1430 
1431 	nfs4_label_free(p->a_label);
1432 	nfs4_label_free(p->f_label);
1433 
1434 	dput(p->dir);
1435 	dput(p->dentry);
1436 	nfs_sb_deactive(sb);
1437 	nfs_fattr_free_names(&p->f_attr);
1438 	kfree(p->f_attr.mdsthreshold);
1439 	kfree(p);
1440 }
1441 
1442 static void nfs4_opendata_put(struct nfs4_opendata *p)
1443 {
1444 	if (p != NULL)
1445 		kref_put(&p->kref, nfs4_opendata_free);
1446 }
1447 
1448 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1449 		fmode_t fmode)
1450 {
1451 	switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1452 	case FMODE_READ|FMODE_WRITE:
1453 		return state->n_rdwr != 0;
1454 	case FMODE_WRITE:
1455 		return state->n_wronly != 0;
1456 	case FMODE_READ:
1457 		return state->n_rdonly != 0;
1458 	}
1459 	WARN_ON_ONCE(1);
1460 	return false;
1461 }
1462 
1463 static int can_open_cached(struct nfs4_state *state, fmode_t mode,
1464 		int open_mode, enum open_claim_type4 claim)
1465 {
1466 	int ret = 0;
1467 
1468 	if (open_mode & (O_EXCL|O_TRUNC))
1469 		goto out;
1470 	switch (claim) {
1471 	case NFS4_OPEN_CLAIM_NULL:
1472 	case NFS4_OPEN_CLAIM_FH:
1473 		goto out;
1474 	default:
1475 		break;
1476 	}
1477 	switch (mode & (FMODE_READ|FMODE_WRITE)) {
1478 		case FMODE_READ:
1479 			ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1480 				&& state->n_rdonly != 0;
1481 			break;
1482 		case FMODE_WRITE:
1483 			ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1484 				&& state->n_wronly != 0;
1485 			break;
1486 		case FMODE_READ|FMODE_WRITE:
1487 			ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1488 				&& state->n_rdwr != 0;
1489 	}
1490 out:
1491 	return ret;
1492 }
1493 
1494 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1495 		enum open_claim_type4 claim)
1496 {
1497 	if (delegation == NULL)
1498 		return 0;
1499 	if ((delegation->type & fmode) != fmode)
1500 		return 0;
1501 	switch (claim) {
1502 	case NFS4_OPEN_CLAIM_NULL:
1503 	case NFS4_OPEN_CLAIM_FH:
1504 		break;
1505 	case NFS4_OPEN_CLAIM_PREVIOUS:
1506 		if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1507 			break;
1508 		fallthrough;
1509 	default:
1510 		return 0;
1511 	}
1512 	nfs_mark_delegation_referenced(delegation);
1513 	return 1;
1514 }
1515 
1516 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1517 {
1518 	switch (fmode) {
1519 		case FMODE_WRITE:
1520 			state->n_wronly++;
1521 			break;
1522 		case FMODE_READ:
1523 			state->n_rdonly++;
1524 			break;
1525 		case FMODE_READ|FMODE_WRITE:
1526 			state->n_rdwr++;
1527 	}
1528 	nfs4_state_set_mode_locked(state, state->state | fmode);
1529 }
1530 
1531 #ifdef CONFIG_NFS_V4_1
1532 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
1533 {
1534 	if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
1535 		return true;
1536 	if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
1537 		return true;
1538 	if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
1539 		return true;
1540 	return false;
1541 }
1542 #endif /* CONFIG_NFS_V4_1 */
1543 
1544 static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
1545 {
1546 	if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1547 		wake_up_all(&state->waitq);
1548 }
1549 
1550 static void nfs_state_log_out_of_order_open_stateid(struct nfs4_state *state,
1551 		const nfs4_stateid *stateid)
1552 {
1553 	u32 state_seqid = be32_to_cpu(state->open_stateid.seqid);
1554 	u32 stateid_seqid = be32_to_cpu(stateid->seqid);
1555 
1556 	if (stateid_seqid == state_seqid + 1U ||
1557 	    (stateid_seqid == 1U && state_seqid == 0xffffffffU))
1558 		nfs_state_log_update_open_stateid(state);
1559 	else
1560 		set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
1561 }
1562 
1563 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1564 {
1565 	struct nfs_client *clp = state->owner->so_server->nfs_client;
1566 	bool need_recover = false;
1567 
1568 	if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1569 		need_recover = true;
1570 	if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1571 		need_recover = true;
1572 	if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1573 		need_recover = true;
1574 	if (need_recover)
1575 		nfs4_state_mark_reclaim_nograce(clp, state);
1576 }
1577 
1578 /*
1579  * Check for whether or not the caller may update the open stateid
1580  * to the value passed in by stateid.
1581  *
1582  * Note: This function relies heavily on the server implementing
1583  * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
1584  * correctly.
1585  * i.e. The stateid seqids have to be initialised to 1, and
1586  * are then incremented on every state transition.
1587  */
1588 static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1589 		const nfs4_stateid *stateid)
1590 {
1591 	if (test_bit(NFS_OPEN_STATE, &state->flags) == 0 ||
1592 	    !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1593 		if (stateid->seqid == cpu_to_be32(1))
1594 			nfs_state_log_update_open_stateid(state);
1595 		else
1596 			set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
1597 		return true;
1598 	}
1599 
1600 	if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1601 		nfs_state_log_out_of_order_open_stateid(state, stateid);
1602 		return true;
1603 	}
1604 	return false;
1605 }
1606 
1607 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1608 {
1609 	if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1610 		return;
1611 	if (state->n_wronly)
1612 		set_bit(NFS_O_WRONLY_STATE, &state->flags);
1613 	if (state->n_rdonly)
1614 		set_bit(NFS_O_RDONLY_STATE, &state->flags);
1615 	if (state->n_rdwr)
1616 		set_bit(NFS_O_RDWR_STATE, &state->flags);
1617 	set_bit(NFS_OPEN_STATE, &state->flags);
1618 }
1619 
1620 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1621 		nfs4_stateid *stateid, fmode_t fmode)
1622 {
1623 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
1624 	switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1625 	case FMODE_WRITE:
1626 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1627 		break;
1628 	case FMODE_READ:
1629 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1630 		break;
1631 	case 0:
1632 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1633 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1634 		clear_bit(NFS_OPEN_STATE, &state->flags);
1635 	}
1636 	if (stateid == NULL)
1637 		return;
1638 	/* Handle OPEN+OPEN_DOWNGRADE races */
1639 	if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1640 	    !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1641 		nfs_resync_open_stateid_locked(state);
1642 		goto out;
1643 	}
1644 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1645 		nfs4_stateid_copy(&state->stateid, stateid);
1646 	nfs4_stateid_copy(&state->open_stateid, stateid);
1647 	trace_nfs4_open_stateid_update(state->inode, stateid, 0);
1648 out:
1649 	nfs_state_log_update_open_stateid(state);
1650 }
1651 
1652 static void nfs_clear_open_stateid(struct nfs4_state *state,
1653 	nfs4_stateid *arg_stateid,
1654 	nfs4_stateid *stateid, fmode_t fmode)
1655 {
1656 	write_seqlock(&state->seqlock);
1657 	/* Ignore, if the CLOSE argment doesn't match the current stateid */
1658 	if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1659 		nfs_clear_open_stateid_locked(state, stateid, fmode);
1660 	write_sequnlock(&state->seqlock);
1661 	if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1662 		nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1663 }
1664 
1665 static void nfs_set_open_stateid_locked(struct nfs4_state *state,
1666 		const nfs4_stateid *stateid, nfs4_stateid *freeme)
1667 	__must_hold(&state->owner->so_lock)
1668 	__must_hold(&state->seqlock)
1669 	__must_hold(RCU)
1670 
1671 {
1672 	DEFINE_WAIT(wait);
1673 	int status = 0;
1674 	for (;;) {
1675 
1676 		if (!nfs_need_update_open_stateid(state, stateid))
1677 			return;
1678 		if (!test_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1679 			break;
1680 		if (status)
1681 			break;
1682 		/* Rely on seqids for serialisation with NFSv4.0 */
1683 		if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
1684 			break;
1685 
1686 		prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
1687 		/*
1688 		 * Ensure we process the state changes in the same order
1689 		 * in which the server processed them by delaying the
1690 		 * update of the stateid until we are in sequence.
1691 		 */
1692 		write_sequnlock(&state->seqlock);
1693 		spin_unlock(&state->owner->so_lock);
1694 		rcu_read_unlock();
1695 		trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
1696 		if (!signal_pending(current)) {
1697 			if (schedule_timeout(5*HZ) == 0)
1698 				status = -EAGAIN;
1699 			else
1700 				status = 0;
1701 		} else
1702 			status = -EINTR;
1703 		finish_wait(&state->waitq, &wait);
1704 		rcu_read_lock();
1705 		spin_lock(&state->owner->so_lock);
1706 		write_seqlock(&state->seqlock);
1707 	}
1708 
1709 	if (test_bit(NFS_OPEN_STATE, &state->flags) &&
1710 	    !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1711 		nfs4_stateid_copy(freeme, &state->open_stateid);
1712 		nfs_test_and_clear_all_open_stateid(state);
1713 	}
1714 
1715 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1716 		nfs4_stateid_copy(&state->stateid, stateid);
1717 	nfs4_stateid_copy(&state->open_stateid, stateid);
1718 	trace_nfs4_open_stateid_update(state->inode, stateid, status);
1719 	nfs_state_log_update_open_stateid(state);
1720 }
1721 
1722 static void nfs_state_set_open_stateid(struct nfs4_state *state,
1723 		const nfs4_stateid *open_stateid,
1724 		fmode_t fmode,
1725 		nfs4_stateid *freeme)
1726 {
1727 	/*
1728 	 * Protect the call to nfs4_state_set_mode_locked and
1729 	 * serialise the stateid update
1730 	 */
1731 	write_seqlock(&state->seqlock);
1732 	nfs_set_open_stateid_locked(state, open_stateid, freeme);
1733 	switch (fmode) {
1734 	case FMODE_READ:
1735 		set_bit(NFS_O_RDONLY_STATE, &state->flags);
1736 		break;
1737 	case FMODE_WRITE:
1738 		set_bit(NFS_O_WRONLY_STATE, &state->flags);
1739 		break;
1740 	case FMODE_READ|FMODE_WRITE:
1741 		set_bit(NFS_O_RDWR_STATE, &state->flags);
1742 	}
1743 	set_bit(NFS_OPEN_STATE, &state->flags);
1744 	write_sequnlock(&state->seqlock);
1745 }
1746 
1747 static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
1748 {
1749 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
1750 	clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1751 	clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1752 	clear_bit(NFS_OPEN_STATE, &state->flags);
1753 }
1754 
1755 static void nfs_state_set_delegation(struct nfs4_state *state,
1756 		const nfs4_stateid *deleg_stateid,
1757 		fmode_t fmode)
1758 {
1759 	/*
1760 	 * Protect the call to nfs4_state_set_mode_locked and
1761 	 * serialise the stateid update
1762 	 */
1763 	write_seqlock(&state->seqlock);
1764 	nfs4_stateid_copy(&state->stateid, deleg_stateid);
1765 	set_bit(NFS_DELEGATED_STATE, &state->flags);
1766 	write_sequnlock(&state->seqlock);
1767 }
1768 
1769 static void nfs_state_clear_delegation(struct nfs4_state *state)
1770 {
1771 	write_seqlock(&state->seqlock);
1772 	nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1773 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
1774 	write_sequnlock(&state->seqlock);
1775 }
1776 
1777 int update_open_stateid(struct nfs4_state *state,
1778 		const nfs4_stateid *open_stateid,
1779 		const nfs4_stateid *delegation,
1780 		fmode_t fmode)
1781 {
1782 	struct nfs_server *server = NFS_SERVER(state->inode);
1783 	struct nfs_client *clp = server->nfs_client;
1784 	struct nfs_inode *nfsi = NFS_I(state->inode);
1785 	struct nfs_delegation *deleg_cur;
1786 	nfs4_stateid freeme = { };
1787 	int ret = 0;
1788 
1789 	fmode &= (FMODE_READ|FMODE_WRITE);
1790 
1791 	rcu_read_lock();
1792 	spin_lock(&state->owner->so_lock);
1793 	if (open_stateid != NULL) {
1794 		nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
1795 		ret = 1;
1796 	}
1797 
1798 	deleg_cur = nfs4_get_valid_delegation(state->inode);
1799 	if (deleg_cur == NULL)
1800 		goto no_delegation;
1801 
1802 	spin_lock(&deleg_cur->lock);
1803 	if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1804 	   test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1805 	    (deleg_cur->type & fmode) != fmode)
1806 		goto no_delegation_unlock;
1807 
1808 	if (delegation == NULL)
1809 		delegation = &deleg_cur->stateid;
1810 	else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation))
1811 		goto no_delegation_unlock;
1812 
1813 	nfs_mark_delegation_referenced(deleg_cur);
1814 	nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
1815 	ret = 1;
1816 no_delegation_unlock:
1817 	spin_unlock(&deleg_cur->lock);
1818 no_delegation:
1819 	if (ret)
1820 		update_open_stateflags(state, fmode);
1821 	spin_unlock(&state->owner->so_lock);
1822 	rcu_read_unlock();
1823 
1824 	if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1825 		nfs4_schedule_state_manager(clp);
1826 	if (freeme.type != 0)
1827 		nfs4_test_and_free_stateid(server, &freeme,
1828 				state->owner->so_cred);
1829 
1830 	return ret;
1831 }
1832 
1833 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1834 		const nfs4_stateid *stateid)
1835 {
1836 	struct nfs4_state *state = lsp->ls_state;
1837 	bool ret = false;
1838 
1839 	spin_lock(&state->state_lock);
1840 	if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1841 		goto out_noupdate;
1842 	if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1843 		goto out_noupdate;
1844 	nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1845 	ret = true;
1846 out_noupdate:
1847 	spin_unlock(&state->state_lock);
1848 	return ret;
1849 }
1850 
1851 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1852 {
1853 	struct nfs_delegation *delegation;
1854 
1855 	fmode &= FMODE_READ|FMODE_WRITE;
1856 	rcu_read_lock();
1857 	delegation = nfs4_get_valid_delegation(inode);
1858 	if (delegation == NULL || (delegation->type & fmode) == fmode) {
1859 		rcu_read_unlock();
1860 		return;
1861 	}
1862 	rcu_read_unlock();
1863 	nfs4_inode_return_delegation(inode);
1864 }
1865 
1866 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1867 {
1868 	struct nfs4_state *state = opendata->state;
1869 	struct nfs_delegation *delegation;
1870 	int open_mode = opendata->o_arg.open_flags;
1871 	fmode_t fmode = opendata->o_arg.fmode;
1872 	enum open_claim_type4 claim = opendata->o_arg.claim;
1873 	nfs4_stateid stateid;
1874 	int ret = -EAGAIN;
1875 
1876 	for (;;) {
1877 		spin_lock(&state->owner->so_lock);
1878 		if (can_open_cached(state, fmode, open_mode, claim)) {
1879 			update_open_stateflags(state, fmode);
1880 			spin_unlock(&state->owner->so_lock);
1881 			goto out_return_state;
1882 		}
1883 		spin_unlock(&state->owner->so_lock);
1884 		rcu_read_lock();
1885 		delegation = nfs4_get_valid_delegation(state->inode);
1886 		if (!can_open_delegated(delegation, fmode, claim)) {
1887 			rcu_read_unlock();
1888 			break;
1889 		}
1890 		/* Save the delegation */
1891 		nfs4_stateid_copy(&stateid, &delegation->stateid);
1892 		rcu_read_unlock();
1893 		nfs_release_seqid(opendata->o_arg.seqid);
1894 		if (!opendata->is_recover) {
1895 			ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1896 			if (ret != 0)
1897 				goto out;
1898 		}
1899 		ret = -EAGAIN;
1900 
1901 		/* Try to update the stateid using the delegation */
1902 		if (update_open_stateid(state, NULL, &stateid, fmode))
1903 			goto out_return_state;
1904 	}
1905 out:
1906 	return ERR_PTR(ret);
1907 out_return_state:
1908 	refcount_inc(&state->count);
1909 	return state;
1910 }
1911 
1912 static void
1913 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1914 {
1915 	struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1916 	struct nfs_delegation *delegation;
1917 	int delegation_flags = 0;
1918 
1919 	rcu_read_lock();
1920 	delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1921 	if (delegation)
1922 		delegation_flags = delegation->flags;
1923 	rcu_read_unlock();
1924 	switch (data->o_arg.claim) {
1925 	default:
1926 		break;
1927 	case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1928 	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1929 		pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1930 				   "returning a delegation for "
1931 				   "OPEN(CLAIM_DELEGATE_CUR)\n",
1932 				   clp->cl_hostname);
1933 		return;
1934 	}
1935 	if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1936 		nfs_inode_set_delegation(state->inode,
1937 				data->owner->so_cred,
1938 				data->o_res.delegation_type,
1939 				&data->o_res.delegation,
1940 				data->o_res.pagemod_limit);
1941 	else
1942 		nfs_inode_reclaim_delegation(state->inode,
1943 				data->owner->so_cred,
1944 				data->o_res.delegation_type,
1945 				&data->o_res.delegation,
1946 				data->o_res.pagemod_limit);
1947 
1948 	if (data->o_res.do_recall)
1949 		nfs_async_inode_return_delegation(state->inode,
1950 						  &data->o_res.delegation);
1951 }
1952 
1953 /*
1954  * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1955  * and update the nfs4_state.
1956  */
1957 static struct nfs4_state *
1958 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1959 {
1960 	struct inode *inode = data->state->inode;
1961 	struct nfs4_state *state = data->state;
1962 	int ret;
1963 
1964 	if (!data->rpc_done) {
1965 		if (data->rpc_status)
1966 			return ERR_PTR(data->rpc_status);
1967 		/* cached opens have already been processed */
1968 		goto update;
1969 	}
1970 
1971 	ret = nfs_refresh_inode(inode, &data->f_attr);
1972 	if (ret)
1973 		return ERR_PTR(ret);
1974 
1975 	if (data->o_res.delegation_type != 0)
1976 		nfs4_opendata_check_deleg(data, state);
1977 update:
1978 	if (!update_open_stateid(state, &data->o_res.stateid,
1979 				NULL, data->o_arg.fmode))
1980 		return ERR_PTR(-EAGAIN);
1981 	refcount_inc(&state->count);
1982 
1983 	return state;
1984 }
1985 
1986 static struct inode *
1987 nfs4_opendata_get_inode(struct nfs4_opendata *data)
1988 {
1989 	struct inode *inode;
1990 
1991 	switch (data->o_arg.claim) {
1992 	case NFS4_OPEN_CLAIM_NULL:
1993 	case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1994 	case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1995 		if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1996 			return ERR_PTR(-EAGAIN);
1997 		inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh,
1998 				&data->f_attr, data->f_label);
1999 		break;
2000 	default:
2001 		inode = d_inode(data->dentry);
2002 		ihold(inode);
2003 		nfs_refresh_inode(inode, &data->f_attr);
2004 	}
2005 	return inode;
2006 }
2007 
2008 static struct nfs4_state *
2009 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
2010 {
2011 	struct nfs4_state *state;
2012 	struct inode *inode;
2013 
2014 	inode = nfs4_opendata_get_inode(data);
2015 	if (IS_ERR(inode))
2016 		return ERR_CAST(inode);
2017 	if (data->state != NULL && data->state->inode == inode) {
2018 		state = data->state;
2019 		refcount_inc(&state->count);
2020 	} else
2021 		state = nfs4_get_open_state(inode, data->owner);
2022 	iput(inode);
2023 	if (state == NULL)
2024 		state = ERR_PTR(-ENOMEM);
2025 	return state;
2026 }
2027 
2028 static struct nfs4_state *
2029 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2030 {
2031 	struct nfs4_state *state;
2032 
2033 	if (!data->rpc_done) {
2034 		state = nfs4_try_open_cached(data);
2035 		trace_nfs4_cached_open(data->state);
2036 		goto out;
2037 	}
2038 
2039 	state = nfs4_opendata_find_nfs4_state(data);
2040 	if (IS_ERR(state))
2041 		goto out;
2042 
2043 	if (data->o_res.delegation_type != 0)
2044 		nfs4_opendata_check_deleg(data, state);
2045 	if (!update_open_stateid(state, &data->o_res.stateid,
2046 				NULL, data->o_arg.fmode)) {
2047 		nfs4_put_open_state(state);
2048 		state = ERR_PTR(-EAGAIN);
2049 	}
2050 out:
2051 	nfs_release_seqid(data->o_arg.seqid);
2052 	return state;
2053 }
2054 
2055 static struct nfs4_state *
2056 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2057 {
2058 	struct nfs4_state *ret;
2059 
2060 	if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
2061 		ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
2062 	else
2063 		ret = _nfs4_opendata_to_nfs4_state(data);
2064 	nfs4_sequence_free_slot(&data->o_res.seq_res);
2065 	return ret;
2066 }
2067 
2068 static struct nfs_open_context *
2069 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode)
2070 {
2071 	struct nfs_inode *nfsi = NFS_I(state->inode);
2072 	struct nfs_open_context *ctx;
2073 
2074 	rcu_read_lock();
2075 	list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
2076 		if (ctx->state != state)
2077 			continue;
2078 		if ((ctx->mode & mode) != mode)
2079 			continue;
2080 		if (!get_nfs_open_context(ctx))
2081 			continue;
2082 		rcu_read_unlock();
2083 		return ctx;
2084 	}
2085 	rcu_read_unlock();
2086 	return ERR_PTR(-ENOENT);
2087 }
2088 
2089 static struct nfs_open_context *
2090 nfs4_state_find_open_context(struct nfs4_state *state)
2091 {
2092 	struct nfs_open_context *ctx;
2093 
2094 	ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE);
2095 	if (!IS_ERR(ctx))
2096 		return ctx;
2097 	ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE);
2098 	if (!IS_ERR(ctx))
2099 		return ctx;
2100 	return nfs4_state_find_open_context_mode(state, FMODE_READ);
2101 }
2102 
2103 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
2104 		struct nfs4_state *state, enum open_claim_type4 claim)
2105 {
2106 	struct nfs4_opendata *opendata;
2107 
2108 	opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
2109 			NULL, claim, GFP_NOFS);
2110 	if (opendata == NULL)
2111 		return ERR_PTR(-ENOMEM);
2112 	opendata->state = state;
2113 	refcount_inc(&state->count);
2114 	return opendata;
2115 }
2116 
2117 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
2118 		fmode_t fmode)
2119 {
2120 	struct nfs4_state *newstate;
2121 	int ret;
2122 
2123 	if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
2124 		return 0;
2125 	opendata->o_arg.open_flags = 0;
2126 	opendata->o_arg.fmode = fmode;
2127 	opendata->o_arg.share_access = nfs4_map_atomic_open_share(
2128 			NFS_SB(opendata->dentry->d_sb),
2129 			fmode, 0);
2130 	memset(&opendata->o_res, 0, sizeof(opendata->o_res));
2131 	memset(&opendata->c_res, 0, sizeof(opendata->c_res));
2132 	nfs4_init_opendata_res(opendata);
2133 	ret = _nfs4_recover_proc_open(opendata);
2134 	if (ret != 0)
2135 		return ret;
2136 	newstate = nfs4_opendata_to_nfs4_state(opendata);
2137 	if (IS_ERR(newstate))
2138 		return PTR_ERR(newstate);
2139 	if (newstate != opendata->state)
2140 		ret = -ESTALE;
2141 	nfs4_close_state(newstate, fmode);
2142 	return ret;
2143 }
2144 
2145 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
2146 {
2147 	int ret;
2148 
2149 	/* memory barrier prior to reading state->n_* */
2150 	smp_rmb();
2151 	ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2152 	if (ret != 0)
2153 		return ret;
2154 	ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2155 	if (ret != 0)
2156 		return ret;
2157 	ret = nfs4_open_recover_helper(opendata, FMODE_READ);
2158 	if (ret != 0)
2159 		return ret;
2160 	/*
2161 	 * We may have performed cached opens for all three recoveries.
2162 	 * Check if we need to update the current stateid.
2163 	 */
2164 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
2165 	    !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
2166 		write_seqlock(&state->seqlock);
2167 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
2168 			nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2169 		write_sequnlock(&state->seqlock);
2170 	}
2171 	return 0;
2172 }
2173 
2174 /*
2175  * OPEN_RECLAIM:
2176  * 	reclaim state on the server after a reboot.
2177  */
2178 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2179 {
2180 	struct nfs_delegation *delegation;
2181 	struct nfs4_opendata *opendata;
2182 	fmode_t delegation_type = 0;
2183 	int status;
2184 
2185 	opendata = nfs4_open_recoverdata_alloc(ctx, state,
2186 			NFS4_OPEN_CLAIM_PREVIOUS);
2187 	if (IS_ERR(opendata))
2188 		return PTR_ERR(opendata);
2189 	rcu_read_lock();
2190 	delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2191 	if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
2192 		delegation_type = delegation->type;
2193 	rcu_read_unlock();
2194 	opendata->o_arg.u.delegation_type = delegation_type;
2195 	status = nfs4_open_recover(opendata, state);
2196 	nfs4_opendata_put(opendata);
2197 	return status;
2198 }
2199 
2200 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2201 {
2202 	struct nfs_server *server = NFS_SERVER(state->inode);
2203 	struct nfs4_exception exception = { };
2204 	int err;
2205 	do {
2206 		err = _nfs4_do_open_reclaim(ctx, state);
2207 		trace_nfs4_open_reclaim(ctx, 0, err);
2208 		if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2209 			continue;
2210 		if (err != -NFS4ERR_DELAY)
2211 			break;
2212 		nfs4_handle_exception(server, err, &exception);
2213 	} while (exception.retry);
2214 	return err;
2215 }
2216 
2217 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
2218 {
2219 	struct nfs_open_context *ctx;
2220 	int ret;
2221 
2222 	ctx = nfs4_state_find_open_context(state);
2223 	if (IS_ERR(ctx))
2224 		return -EAGAIN;
2225 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
2226 	nfs_state_clear_open_state_flags(state);
2227 	ret = nfs4_do_open_reclaim(ctx, state);
2228 	put_nfs_open_context(ctx);
2229 	return ret;
2230 }
2231 
2232 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
2233 {
2234 	switch (err) {
2235 		default:
2236 			printk(KERN_ERR "NFS: %s: unhandled error "
2237 					"%d.\n", __func__, err);
2238 		case 0:
2239 		case -ENOENT:
2240 		case -EAGAIN:
2241 		case -ESTALE:
2242 		case -ETIMEDOUT:
2243 			break;
2244 		case -NFS4ERR_BADSESSION:
2245 		case -NFS4ERR_BADSLOT:
2246 		case -NFS4ERR_BAD_HIGH_SLOT:
2247 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2248 		case -NFS4ERR_DEADSESSION:
2249 			return -EAGAIN;
2250 		case -NFS4ERR_STALE_CLIENTID:
2251 		case -NFS4ERR_STALE_STATEID:
2252 			/* Don't recall a delegation if it was lost */
2253 			nfs4_schedule_lease_recovery(server->nfs_client);
2254 			return -EAGAIN;
2255 		case -NFS4ERR_MOVED:
2256 			nfs4_schedule_migration_recovery(server);
2257 			return -EAGAIN;
2258 		case -NFS4ERR_LEASE_MOVED:
2259 			nfs4_schedule_lease_moved_recovery(server->nfs_client);
2260 			return -EAGAIN;
2261 		case -NFS4ERR_DELEG_REVOKED:
2262 		case -NFS4ERR_ADMIN_REVOKED:
2263 		case -NFS4ERR_EXPIRED:
2264 		case -NFS4ERR_BAD_STATEID:
2265 		case -NFS4ERR_OPENMODE:
2266 			nfs_inode_find_state_and_recover(state->inode,
2267 					stateid);
2268 			nfs4_schedule_stateid_recovery(server, state);
2269 			return -EAGAIN;
2270 		case -NFS4ERR_DELAY:
2271 		case -NFS4ERR_GRACE:
2272 			ssleep(1);
2273 			return -EAGAIN;
2274 		case -ENOMEM:
2275 		case -NFS4ERR_DENIED:
2276 			if (fl) {
2277 				struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
2278 				if (lsp)
2279 					set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2280 			}
2281 			return 0;
2282 	}
2283 	return err;
2284 }
2285 
2286 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
2287 		struct nfs4_state *state, const nfs4_stateid *stateid)
2288 {
2289 	struct nfs_server *server = NFS_SERVER(state->inode);
2290 	struct nfs4_opendata *opendata;
2291 	int err = 0;
2292 
2293 	opendata = nfs4_open_recoverdata_alloc(ctx, state,
2294 			NFS4_OPEN_CLAIM_DELEG_CUR_FH);
2295 	if (IS_ERR(opendata))
2296 		return PTR_ERR(opendata);
2297 	nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
2298 	if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
2299 		err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2300 		if (err)
2301 			goto out;
2302 	}
2303 	if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
2304 		err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2305 		if (err)
2306 			goto out;
2307 	}
2308 	if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
2309 		err = nfs4_open_recover_helper(opendata, FMODE_READ);
2310 		if (err)
2311 			goto out;
2312 	}
2313 	nfs_state_clear_delegation(state);
2314 out:
2315 	nfs4_opendata_put(opendata);
2316 	return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
2317 }
2318 
2319 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
2320 {
2321 	struct nfs4_opendata *data = calldata;
2322 
2323 	nfs4_setup_sequence(data->o_arg.server->nfs_client,
2324 			   &data->c_arg.seq_args, &data->c_res.seq_res, task);
2325 }
2326 
2327 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
2328 {
2329 	struct nfs4_opendata *data = calldata;
2330 
2331 	nfs40_sequence_done(task, &data->c_res.seq_res);
2332 
2333 	data->rpc_status = task->tk_status;
2334 	if (data->rpc_status == 0) {
2335 		nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
2336 		nfs_confirm_seqid(&data->owner->so_seqid, 0);
2337 		renew_lease(data->o_res.server, data->timestamp);
2338 		data->rpc_done = true;
2339 	}
2340 }
2341 
2342 static void nfs4_open_confirm_release(void *calldata)
2343 {
2344 	struct nfs4_opendata *data = calldata;
2345 	struct nfs4_state *state = NULL;
2346 
2347 	/* If this request hasn't been cancelled, do nothing */
2348 	if (!data->cancelled)
2349 		goto out_free;
2350 	/* In case of error, no cleanup! */
2351 	if (!data->rpc_done)
2352 		goto out_free;
2353 	state = nfs4_opendata_to_nfs4_state(data);
2354 	if (!IS_ERR(state))
2355 		nfs4_close_state(state, data->o_arg.fmode);
2356 out_free:
2357 	nfs4_opendata_put(data);
2358 }
2359 
2360 static const struct rpc_call_ops nfs4_open_confirm_ops = {
2361 	.rpc_call_prepare = nfs4_open_confirm_prepare,
2362 	.rpc_call_done = nfs4_open_confirm_done,
2363 	.rpc_release = nfs4_open_confirm_release,
2364 };
2365 
2366 /*
2367  * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
2368  */
2369 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
2370 {
2371 	struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
2372 	struct rpc_task *task;
2373 	struct  rpc_message msg = {
2374 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
2375 		.rpc_argp = &data->c_arg,
2376 		.rpc_resp = &data->c_res,
2377 		.rpc_cred = data->owner->so_cred,
2378 	};
2379 	struct rpc_task_setup task_setup_data = {
2380 		.rpc_client = server->client,
2381 		.rpc_message = &msg,
2382 		.callback_ops = &nfs4_open_confirm_ops,
2383 		.callback_data = data,
2384 		.workqueue = nfsiod_workqueue,
2385 		.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2386 	};
2387 	int status;
2388 
2389 	nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1,
2390 				data->is_recover);
2391 	kref_get(&data->kref);
2392 	data->rpc_done = false;
2393 	data->rpc_status = 0;
2394 	data->timestamp = jiffies;
2395 	task = rpc_run_task(&task_setup_data);
2396 	if (IS_ERR(task))
2397 		return PTR_ERR(task);
2398 	status = rpc_wait_for_completion_task(task);
2399 	if (status != 0) {
2400 		data->cancelled = true;
2401 		smp_wmb();
2402 	} else
2403 		status = data->rpc_status;
2404 	rpc_put_task(task);
2405 	return status;
2406 }
2407 
2408 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
2409 {
2410 	struct nfs4_opendata *data = calldata;
2411 	struct nfs4_state_owner *sp = data->owner;
2412 	struct nfs_client *clp = sp->so_server->nfs_client;
2413 	enum open_claim_type4 claim = data->o_arg.claim;
2414 
2415 	if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
2416 		goto out_wait;
2417 	/*
2418 	 * Check if we still need to send an OPEN call, or if we can use
2419 	 * a delegation instead.
2420 	 */
2421 	if (data->state != NULL) {
2422 		struct nfs_delegation *delegation;
2423 
2424 		if (can_open_cached(data->state, data->o_arg.fmode,
2425 					data->o_arg.open_flags, claim))
2426 			goto out_no_action;
2427 		rcu_read_lock();
2428 		delegation = nfs4_get_valid_delegation(data->state->inode);
2429 		if (can_open_delegated(delegation, data->o_arg.fmode, claim))
2430 			goto unlock_no_action;
2431 		rcu_read_unlock();
2432 	}
2433 	/* Update client id. */
2434 	data->o_arg.clientid = clp->cl_clientid;
2435 	switch (claim) {
2436 	default:
2437 		break;
2438 	case NFS4_OPEN_CLAIM_PREVIOUS:
2439 	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2440 	case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
2441 		data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2442 		fallthrough;
2443 	case NFS4_OPEN_CLAIM_FH:
2444 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2445 	}
2446 	data->timestamp = jiffies;
2447 	if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
2448 				&data->o_arg.seq_args,
2449 				&data->o_res.seq_res,
2450 				task) != 0)
2451 		nfs_release_seqid(data->o_arg.seqid);
2452 
2453 	/* Set the create mode (note dependency on the session type) */
2454 	data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2455 	if (data->o_arg.open_flags & O_EXCL) {
2456 		data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2457 		if (nfs4_has_persistent_session(clp))
2458 			data->o_arg.createmode = NFS4_CREATE_GUARDED;
2459 		else if (clp->cl_mvops->minor_version > 0)
2460 			data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2461 	}
2462 	return;
2463 unlock_no_action:
2464 	trace_nfs4_cached_open(data->state);
2465 	rcu_read_unlock();
2466 out_no_action:
2467 	task->tk_action = NULL;
2468 out_wait:
2469 	nfs4_sequence_done(task, &data->o_res.seq_res);
2470 }
2471 
2472 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2473 {
2474 	struct nfs4_opendata *data = calldata;
2475 
2476 	data->rpc_status = task->tk_status;
2477 
2478 	if (!nfs4_sequence_process(task, &data->o_res.seq_res))
2479 		return;
2480 
2481 	if (task->tk_status == 0) {
2482 		if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2483 			switch (data->o_res.f_attr->mode & S_IFMT) {
2484 			case S_IFREG:
2485 				break;
2486 			case S_IFLNK:
2487 				data->rpc_status = -ELOOP;
2488 				break;
2489 			case S_IFDIR:
2490 				data->rpc_status = -EISDIR;
2491 				break;
2492 			default:
2493 				data->rpc_status = -ENOTDIR;
2494 			}
2495 		}
2496 		renew_lease(data->o_res.server, data->timestamp);
2497 		if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2498 			nfs_confirm_seqid(&data->owner->so_seqid, 0);
2499 	}
2500 	data->rpc_done = true;
2501 }
2502 
2503 static void nfs4_open_release(void *calldata)
2504 {
2505 	struct nfs4_opendata *data = calldata;
2506 	struct nfs4_state *state = NULL;
2507 
2508 	/* If this request hasn't been cancelled, do nothing */
2509 	if (!data->cancelled)
2510 		goto out_free;
2511 	/* In case of error, no cleanup! */
2512 	if (data->rpc_status != 0 || !data->rpc_done)
2513 		goto out_free;
2514 	/* In case we need an open_confirm, no cleanup! */
2515 	if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2516 		goto out_free;
2517 	state = nfs4_opendata_to_nfs4_state(data);
2518 	if (!IS_ERR(state))
2519 		nfs4_close_state(state, data->o_arg.fmode);
2520 out_free:
2521 	nfs4_opendata_put(data);
2522 }
2523 
2524 static const struct rpc_call_ops nfs4_open_ops = {
2525 	.rpc_call_prepare = nfs4_open_prepare,
2526 	.rpc_call_done = nfs4_open_done,
2527 	.rpc_release = nfs4_open_release,
2528 };
2529 
2530 static int nfs4_run_open_task(struct nfs4_opendata *data,
2531 			      struct nfs_open_context *ctx)
2532 {
2533 	struct inode *dir = d_inode(data->dir);
2534 	struct nfs_server *server = NFS_SERVER(dir);
2535 	struct nfs_openargs *o_arg = &data->o_arg;
2536 	struct nfs_openres *o_res = &data->o_res;
2537 	struct rpc_task *task;
2538 	struct rpc_message msg = {
2539 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2540 		.rpc_argp = o_arg,
2541 		.rpc_resp = o_res,
2542 		.rpc_cred = data->owner->so_cred,
2543 	};
2544 	struct rpc_task_setup task_setup_data = {
2545 		.rpc_client = server->client,
2546 		.rpc_message = &msg,
2547 		.callback_ops = &nfs4_open_ops,
2548 		.callback_data = data,
2549 		.workqueue = nfsiod_workqueue,
2550 		.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2551 	};
2552 	int status;
2553 
2554 	kref_get(&data->kref);
2555 	data->rpc_done = false;
2556 	data->rpc_status = 0;
2557 	data->cancelled = false;
2558 	data->is_recover = false;
2559 	if (!ctx) {
2560 		nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
2561 		data->is_recover = true;
2562 		task_setup_data.flags |= RPC_TASK_TIMEOUT;
2563 	} else {
2564 		nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
2565 		pnfs_lgopen_prepare(data, ctx);
2566 	}
2567 	task = rpc_run_task(&task_setup_data);
2568 	if (IS_ERR(task))
2569 		return PTR_ERR(task);
2570 	status = rpc_wait_for_completion_task(task);
2571 	if (status != 0) {
2572 		data->cancelled = true;
2573 		smp_wmb();
2574 	} else
2575 		status = data->rpc_status;
2576 	rpc_put_task(task);
2577 
2578 	return status;
2579 }
2580 
2581 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2582 {
2583 	struct inode *dir = d_inode(data->dir);
2584 	struct nfs_openres *o_res = &data->o_res;
2585 	int status;
2586 
2587 	status = nfs4_run_open_task(data, NULL);
2588 	if (status != 0 || !data->rpc_done)
2589 		return status;
2590 
2591 	nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2592 
2593 	if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM)
2594 		status = _nfs4_proc_open_confirm(data);
2595 
2596 	return status;
2597 }
2598 
2599 /*
2600  * Additional permission checks in order to distinguish between an
2601  * open for read, and an open for execute. This works around the
2602  * fact that NFSv4 OPEN treats read and execute permissions as being
2603  * the same.
2604  * Note that in the non-execute case, we want to turn off permission
2605  * checking if we just created a new file (POSIX open() semantics).
2606  */
2607 static int nfs4_opendata_access(const struct cred *cred,
2608 				struct nfs4_opendata *opendata,
2609 				struct nfs4_state *state, fmode_t fmode,
2610 				int openflags)
2611 {
2612 	struct nfs_access_entry cache;
2613 	u32 mask, flags;
2614 
2615 	/* access call failed or for some reason the server doesn't
2616 	 * support any access modes -- defer access call until later */
2617 	if (opendata->o_res.access_supported == 0)
2618 		return 0;
2619 
2620 	mask = 0;
2621 	/*
2622 	 * Use openflags to check for exec, because fmode won't
2623 	 * always have FMODE_EXEC set when file open for exec.
2624 	 */
2625 	if (openflags & __FMODE_EXEC) {
2626 		/* ONLY check for exec rights */
2627 		if (S_ISDIR(state->inode->i_mode))
2628 			mask = NFS4_ACCESS_LOOKUP;
2629 		else
2630 			mask = NFS4_ACCESS_EXECUTE;
2631 	} else if ((fmode & FMODE_READ) && !opendata->file_created)
2632 		mask = NFS4_ACCESS_READ;
2633 
2634 	cache.cred = cred;
2635 	nfs_access_set_mask(&cache, opendata->o_res.access_result);
2636 	nfs_access_add_cache(state->inode, &cache);
2637 
2638 	flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
2639 	if ((mask & ~cache.mask & flags) == 0)
2640 		return 0;
2641 
2642 	return -EACCES;
2643 }
2644 
2645 /*
2646  * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2647  */
2648 static int _nfs4_proc_open(struct nfs4_opendata *data,
2649 			   struct nfs_open_context *ctx)
2650 {
2651 	struct inode *dir = d_inode(data->dir);
2652 	struct nfs_server *server = NFS_SERVER(dir);
2653 	struct nfs_openargs *o_arg = &data->o_arg;
2654 	struct nfs_openres *o_res = &data->o_res;
2655 	int status;
2656 
2657 	status = nfs4_run_open_task(data, ctx);
2658 	if (!data->rpc_done)
2659 		return status;
2660 	if (status != 0) {
2661 		if (status == -NFS4ERR_BADNAME &&
2662 				!(o_arg->open_flags & O_CREAT))
2663 			return -ENOENT;
2664 		return status;
2665 	}
2666 
2667 	nfs_fattr_map_and_free_names(server, &data->f_attr);
2668 
2669 	if (o_arg->open_flags & O_CREAT) {
2670 		if (o_arg->open_flags & O_EXCL)
2671 			data->file_created = true;
2672 		else if (o_res->cinfo.before != o_res->cinfo.after)
2673 			data->file_created = true;
2674 		if (data->file_created ||
2675 		    inode_peek_iversion_raw(dir) != o_res->cinfo.after)
2676 			nfs4_update_changeattr(dir, &o_res->cinfo,
2677 					o_res->f_attr->time_start,
2678 					NFS_INO_INVALID_DATA);
2679 	}
2680 	if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2681 		server->caps &= ~NFS_CAP_POSIX_LOCK;
2682 	if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2683 		status = _nfs4_proc_open_confirm(data);
2684 		if (status != 0)
2685 			return status;
2686 	}
2687 	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
2688 		nfs4_sequence_free_slot(&o_res->seq_res);
2689 		nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr,
2690 				o_res->f_label, NULL);
2691 	}
2692 	return 0;
2693 }
2694 
2695 /*
2696  * OPEN_EXPIRED:
2697  * 	reclaim state on the server after a network partition.
2698  * 	Assumes caller holds the appropriate lock
2699  */
2700 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2701 {
2702 	struct nfs4_opendata *opendata;
2703 	int ret;
2704 
2705 	opendata = nfs4_open_recoverdata_alloc(ctx, state,
2706 			NFS4_OPEN_CLAIM_FH);
2707 	if (IS_ERR(opendata))
2708 		return PTR_ERR(opendata);
2709 	ret = nfs4_open_recover(opendata, state);
2710 	if (ret == -ESTALE)
2711 		d_drop(ctx->dentry);
2712 	nfs4_opendata_put(opendata);
2713 	return ret;
2714 }
2715 
2716 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2717 {
2718 	struct nfs_server *server = NFS_SERVER(state->inode);
2719 	struct nfs4_exception exception = { };
2720 	int err;
2721 
2722 	do {
2723 		err = _nfs4_open_expired(ctx, state);
2724 		trace_nfs4_open_expired(ctx, 0, err);
2725 		if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2726 			continue;
2727 		switch (err) {
2728 		default:
2729 			goto out;
2730 		case -NFS4ERR_GRACE:
2731 		case -NFS4ERR_DELAY:
2732 			nfs4_handle_exception(server, err, &exception);
2733 			err = 0;
2734 		}
2735 	} while (exception.retry);
2736 out:
2737 	return err;
2738 }
2739 
2740 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2741 {
2742 	struct nfs_open_context *ctx;
2743 	int ret;
2744 
2745 	ctx = nfs4_state_find_open_context(state);
2746 	if (IS_ERR(ctx))
2747 		return -EAGAIN;
2748 	ret = nfs4_do_open_expired(ctx, state);
2749 	put_nfs_open_context(ctx);
2750 	return ret;
2751 }
2752 
2753 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
2754 		const nfs4_stateid *stateid)
2755 {
2756 	nfs_remove_bad_delegation(state->inode, stateid);
2757 	nfs_state_clear_delegation(state);
2758 }
2759 
2760 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2761 {
2762 	if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2763 		nfs_finish_clear_delegation_stateid(state, NULL);
2764 }
2765 
2766 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2767 {
2768 	/* NFSv4.0 doesn't allow for delegation recovery on open expire */
2769 	nfs40_clear_delegation_stateid(state);
2770 	nfs_state_clear_open_state_flags(state);
2771 	return nfs4_open_expired(sp, state);
2772 }
2773 
2774 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
2775 		nfs4_stateid *stateid,
2776 		const struct cred *cred)
2777 {
2778 	return -NFS4ERR_BAD_STATEID;
2779 }
2780 
2781 #if defined(CONFIG_NFS_V4_1)
2782 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
2783 		nfs4_stateid *stateid,
2784 		const struct cred *cred)
2785 {
2786 	int status;
2787 
2788 	switch (stateid->type) {
2789 	default:
2790 		break;
2791 	case NFS4_INVALID_STATEID_TYPE:
2792 	case NFS4_SPECIAL_STATEID_TYPE:
2793 		return -NFS4ERR_BAD_STATEID;
2794 	case NFS4_REVOKED_STATEID_TYPE:
2795 		goto out_free;
2796 	}
2797 
2798 	status = nfs41_test_stateid(server, stateid, cred);
2799 	switch (status) {
2800 	case -NFS4ERR_EXPIRED:
2801 	case -NFS4ERR_ADMIN_REVOKED:
2802 	case -NFS4ERR_DELEG_REVOKED:
2803 		break;
2804 	default:
2805 		return status;
2806 	}
2807 out_free:
2808 	/* Ack the revoked state to the server */
2809 	nfs41_free_stateid(server, stateid, cred, true);
2810 	return -NFS4ERR_EXPIRED;
2811 }
2812 
2813 static int nfs41_check_delegation_stateid(struct nfs4_state *state)
2814 {
2815 	struct nfs_server *server = NFS_SERVER(state->inode);
2816 	nfs4_stateid stateid;
2817 	struct nfs_delegation *delegation;
2818 	const struct cred *cred = NULL;
2819 	int status, ret = NFS_OK;
2820 
2821 	/* Get the delegation credential for use by test/free_stateid */
2822 	rcu_read_lock();
2823 	delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2824 	if (delegation == NULL) {
2825 		rcu_read_unlock();
2826 		nfs_state_clear_delegation(state);
2827 		return NFS_OK;
2828 	}
2829 
2830 	spin_lock(&delegation->lock);
2831 	nfs4_stateid_copy(&stateid, &delegation->stateid);
2832 
2833 	if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2834 				&delegation->flags)) {
2835 		spin_unlock(&delegation->lock);
2836 		rcu_read_unlock();
2837 		return NFS_OK;
2838 	}
2839 
2840 	if (delegation->cred)
2841 		cred = get_cred(delegation->cred);
2842 	spin_unlock(&delegation->lock);
2843 	rcu_read_unlock();
2844 	status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
2845 	trace_nfs4_test_delegation_stateid(state, NULL, status);
2846 	if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
2847 		nfs_finish_clear_delegation_stateid(state, &stateid);
2848 	else
2849 		ret = status;
2850 
2851 	put_cred(cred);
2852 	return ret;
2853 }
2854 
2855 static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
2856 {
2857 	nfs4_stateid tmp;
2858 
2859 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
2860 	    nfs4_copy_delegation_stateid(state->inode, state->state,
2861 				&tmp, NULL) &&
2862 	    nfs4_stateid_match_other(&state->stateid, &tmp))
2863 		nfs_state_set_delegation(state, &tmp, state->state);
2864 	else
2865 		nfs_state_clear_delegation(state);
2866 }
2867 
2868 /**
2869  * nfs41_check_expired_locks - possibly free a lock stateid
2870  *
2871  * @state: NFSv4 state for an inode
2872  *
2873  * Returns NFS_OK if recovery for this stateid is now finished.
2874  * Otherwise a negative NFS4ERR value is returned.
2875  */
2876 static int nfs41_check_expired_locks(struct nfs4_state *state)
2877 {
2878 	int status, ret = NFS_OK;
2879 	struct nfs4_lock_state *lsp, *prev = NULL;
2880 	struct nfs_server *server = NFS_SERVER(state->inode);
2881 
2882 	if (!test_bit(LK_STATE_IN_USE, &state->flags))
2883 		goto out;
2884 
2885 	spin_lock(&state->state_lock);
2886 	list_for_each_entry(lsp, &state->lock_states, ls_locks) {
2887 		if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
2888 			const struct cred *cred = lsp->ls_state->owner->so_cred;
2889 
2890 			refcount_inc(&lsp->ls_count);
2891 			spin_unlock(&state->state_lock);
2892 
2893 			nfs4_put_lock_state(prev);
2894 			prev = lsp;
2895 
2896 			status = nfs41_test_and_free_expired_stateid(server,
2897 					&lsp->ls_stateid,
2898 					cred);
2899 			trace_nfs4_test_lock_stateid(state, lsp, status);
2900 			if (status == -NFS4ERR_EXPIRED ||
2901 			    status == -NFS4ERR_BAD_STATEID) {
2902 				clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
2903 				lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
2904 				if (!recover_lost_locks)
2905 					set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2906 			} else if (status != NFS_OK) {
2907 				ret = status;
2908 				nfs4_put_lock_state(prev);
2909 				goto out;
2910 			}
2911 			spin_lock(&state->state_lock);
2912 		}
2913 	}
2914 	spin_unlock(&state->state_lock);
2915 	nfs4_put_lock_state(prev);
2916 out:
2917 	return ret;
2918 }
2919 
2920 /**
2921  * nfs41_check_open_stateid - possibly free an open stateid
2922  *
2923  * @state: NFSv4 state for an inode
2924  *
2925  * Returns NFS_OK if recovery for this stateid is now finished.
2926  * Otherwise a negative NFS4ERR value is returned.
2927  */
2928 static int nfs41_check_open_stateid(struct nfs4_state *state)
2929 {
2930 	struct nfs_server *server = NFS_SERVER(state->inode);
2931 	nfs4_stateid *stateid = &state->open_stateid;
2932 	const struct cred *cred = state->owner->so_cred;
2933 	int status;
2934 
2935 	if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
2936 		return -NFS4ERR_BAD_STATEID;
2937 	status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
2938 	trace_nfs4_test_open_stateid(state, NULL, status);
2939 	if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
2940 		nfs_state_clear_open_state_flags(state);
2941 		stateid->type = NFS4_INVALID_STATEID_TYPE;
2942 		return status;
2943 	}
2944 	if (nfs_open_stateid_recover_openmode(state))
2945 		return -NFS4ERR_OPENMODE;
2946 	return NFS_OK;
2947 }
2948 
2949 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2950 {
2951 	int status;
2952 
2953 	status = nfs41_check_delegation_stateid(state);
2954 	if (status != NFS_OK)
2955 		return status;
2956 	nfs41_delegation_recover_stateid(state);
2957 
2958 	status = nfs41_check_expired_locks(state);
2959 	if (status != NFS_OK)
2960 		return status;
2961 	status = nfs41_check_open_stateid(state);
2962 	if (status != NFS_OK)
2963 		status = nfs4_open_expired(sp, state);
2964 	return status;
2965 }
2966 #endif
2967 
2968 /*
2969  * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2970  * fields corresponding to attributes that were used to store the verifier.
2971  * Make sure we clobber those fields in the later setattr call
2972  */
2973 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2974 				struct iattr *sattr, struct nfs4_label **label)
2975 {
2976 	const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask;
2977 	__u32 attrset[3];
2978 	unsigned ret;
2979 	unsigned i;
2980 
2981 	for (i = 0; i < ARRAY_SIZE(attrset); i++) {
2982 		attrset[i] = opendata->o_res.attrset[i];
2983 		if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1)
2984 			attrset[i] &= ~bitmask[i];
2985 	}
2986 
2987 	ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ?
2988 		sattr->ia_valid : 0;
2989 
2990 	if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) {
2991 		if (sattr->ia_valid & ATTR_ATIME_SET)
2992 			ret |= ATTR_ATIME_SET;
2993 		else
2994 			ret |= ATTR_ATIME;
2995 	}
2996 
2997 	if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) {
2998 		if (sattr->ia_valid & ATTR_MTIME_SET)
2999 			ret |= ATTR_MTIME_SET;
3000 		else
3001 			ret |= ATTR_MTIME;
3002 	}
3003 
3004 	if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL))
3005 		*label = NULL;
3006 	return ret;
3007 }
3008 
3009 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
3010 		int flags, struct nfs_open_context *ctx)
3011 {
3012 	struct nfs4_state_owner *sp = opendata->owner;
3013 	struct nfs_server *server = sp->so_server;
3014 	struct dentry *dentry;
3015 	struct nfs4_state *state;
3016 	fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
3017 	struct inode *dir = d_inode(opendata->dir);
3018 	unsigned long dir_verifier;
3019 	unsigned int seq;
3020 	int ret;
3021 
3022 	seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
3023 	dir_verifier = nfs_save_change_attribute(dir);
3024 
3025 	ret = _nfs4_proc_open(opendata, ctx);
3026 	if (ret != 0)
3027 		goto out;
3028 
3029 	state = _nfs4_opendata_to_nfs4_state(opendata);
3030 	ret = PTR_ERR(state);
3031 	if (IS_ERR(state))
3032 		goto out;
3033 	ctx->state = state;
3034 	if (server->caps & NFS_CAP_POSIX_LOCK)
3035 		set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
3036 	if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
3037 		set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
3038 
3039 	dentry = opendata->dentry;
3040 	if (d_really_is_negative(dentry)) {
3041 		struct dentry *alias;
3042 		d_drop(dentry);
3043 		alias = d_exact_alias(dentry, state->inode);
3044 		if (!alias)
3045 			alias = d_splice_alias(igrab(state->inode), dentry);
3046 		/* d_splice_alias() can't fail here - it's a non-directory */
3047 		if (alias) {
3048 			dput(ctx->dentry);
3049 			ctx->dentry = dentry = alias;
3050 		}
3051 	}
3052 
3053 	switch(opendata->o_arg.claim) {
3054 	default:
3055 		break;
3056 	case NFS4_OPEN_CLAIM_NULL:
3057 	case NFS4_OPEN_CLAIM_DELEGATE_CUR:
3058 	case NFS4_OPEN_CLAIM_DELEGATE_PREV:
3059 		if (!opendata->rpc_done)
3060 			break;
3061 		if (opendata->o_res.delegation_type != 0)
3062 			dir_verifier = nfs_save_change_attribute(dir);
3063 		nfs_set_verifier(dentry, dir_verifier);
3064 	}
3065 
3066 	/* Parse layoutget results before we check for access */
3067 	pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
3068 
3069 	ret = nfs4_opendata_access(sp->so_cred, opendata, state,
3070 			acc_mode, flags);
3071 	if (ret != 0)
3072 		goto out;
3073 
3074 	if (d_inode(dentry) == state->inode) {
3075 		nfs_inode_attach_open_context(ctx);
3076 		if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
3077 			nfs4_schedule_stateid_recovery(server, state);
3078 	}
3079 
3080 out:
3081 	if (!opendata->cancelled)
3082 		nfs4_sequence_free_slot(&opendata->o_res.seq_res);
3083 	return ret;
3084 }
3085 
3086 /*
3087  * Returns a referenced nfs4_state
3088  */
3089 static int _nfs4_do_open(struct inode *dir,
3090 			struct nfs_open_context *ctx,
3091 			int flags,
3092 			const struct nfs4_open_createattrs *c,
3093 			int *opened)
3094 {
3095 	struct nfs4_state_owner  *sp;
3096 	struct nfs4_state     *state = NULL;
3097 	struct nfs_server       *server = NFS_SERVER(dir);
3098 	struct nfs4_opendata *opendata;
3099 	struct dentry *dentry = ctx->dentry;
3100 	const struct cred *cred = ctx->cred;
3101 	struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
3102 	fmode_t fmode = _nfs4_ctx_to_openmode(ctx);
3103 	enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
3104 	struct iattr *sattr = c->sattr;
3105 	struct nfs4_label *label = c->label;
3106 	struct nfs4_label *olabel = NULL;
3107 	int status;
3108 
3109 	/* Protect against reboot recovery conflicts */
3110 	status = -ENOMEM;
3111 	sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
3112 	if (sp == NULL) {
3113 		dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
3114 		goto out_err;
3115 	}
3116 	status = nfs4_client_recover_expired_lease(server->nfs_client);
3117 	if (status != 0)
3118 		goto err_put_state_owner;
3119 	if (d_really_is_positive(dentry))
3120 		nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
3121 	status = -ENOMEM;
3122 	if (d_really_is_positive(dentry))
3123 		claim = NFS4_OPEN_CLAIM_FH;
3124 	opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
3125 			c, claim, GFP_KERNEL);
3126 	if (opendata == NULL)
3127 		goto err_put_state_owner;
3128 
3129 	if (label) {
3130 		olabel = nfs4_label_alloc(server, GFP_KERNEL);
3131 		if (IS_ERR(olabel)) {
3132 			status = PTR_ERR(olabel);
3133 			goto err_opendata_put;
3134 		}
3135 	}
3136 
3137 	if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
3138 		if (!opendata->f_attr.mdsthreshold) {
3139 			opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
3140 			if (!opendata->f_attr.mdsthreshold)
3141 				goto err_free_label;
3142 		}
3143 		opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
3144 	}
3145 	if (d_really_is_positive(dentry))
3146 		opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
3147 
3148 	status = _nfs4_open_and_get_state(opendata, flags, ctx);
3149 	if (status != 0)
3150 		goto err_free_label;
3151 	state = ctx->state;
3152 
3153 	if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
3154 	    (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
3155 		unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label);
3156 		/*
3157 		 * send create attributes which was not set by open
3158 		 * with an extra setattr.
3159 		 */
3160 		if (attrs || label) {
3161 			unsigned ia_old = sattr->ia_valid;
3162 
3163 			sattr->ia_valid = attrs;
3164 			nfs_fattr_init(opendata->o_res.f_attr);
3165 			status = nfs4_do_setattr(state->inode, cred,
3166 					opendata->o_res.f_attr, sattr,
3167 					ctx, label, olabel);
3168 			if (status == 0) {
3169 				nfs_setattr_update_inode(state->inode, sattr,
3170 						opendata->o_res.f_attr);
3171 				nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
3172 			}
3173 			sattr->ia_valid = ia_old;
3174 		}
3175 	}
3176 	if (opened && opendata->file_created)
3177 		*opened = 1;
3178 
3179 	if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
3180 		*ctx_th = opendata->f_attr.mdsthreshold;
3181 		opendata->f_attr.mdsthreshold = NULL;
3182 	}
3183 
3184 	nfs4_label_free(olabel);
3185 
3186 	nfs4_opendata_put(opendata);
3187 	nfs4_put_state_owner(sp);
3188 	return 0;
3189 err_free_label:
3190 	nfs4_label_free(olabel);
3191 err_opendata_put:
3192 	nfs4_opendata_put(opendata);
3193 err_put_state_owner:
3194 	nfs4_put_state_owner(sp);
3195 out_err:
3196 	return status;
3197 }
3198 
3199 
3200 static struct nfs4_state *nfs4_do_open(struct inode *dir,
3201 					struct nfs_open_context *ctx,
3202 					int flags,
3203 					struct iattr *sattr,
3204 					struct nfs4_label *label,
3205 					int *opened)
3206 {
3207 	struct nfs_server *server = NFS_SERVER(dir);
3208 	struct nfs4_exception exception = {
3209 		.interruptible = true,
3210 	};
3211 	struct nfs4_state *res;
3212 	struct nfs4_open_createattrs c = {
3213 		.label = label,
3214 		.sattr = sattr,
3215 		.verf = {
3216 			[0] = (__u32)jiffies,
3217 			[1] = (__u32)current->pid,
3218 		},
3219 	};
3220 	int status;
3221 
3222 	do {
3223 		status = _nfs4_do_open(dir, ctx, flags, &c, opened);
3224 		res = ctx->state;
3225 		trace_nfs4_open_file(ctx, flags, status);
3226 		if (status == 0)
3227 			break;
3228 		/* NOTE: BAD_SEQID means the server and client disagree about the
3229 		 * book-keeping w.r.t. state-changing operations
3230 		 * (OPEN/CLOSE/LOCK/LOCKU...)
3231 		 * It is actually a sign of a bug on the client or on the server.
3232 		 *
3233 		 * If we receive a BAD_SEQID error in the particular case of
3234 		 * doing an OPEN, we assume that nfs_increment_open_seqid() will
3235 		 * have unhashed the old state_owner for us, and that we can
3236 		 * therefore safely retry using a new one. We should still warn
3237 		 * the user though...
3238 		 */
3239 		if (status == -NFS4ERR_BAD_SEQID) {
3240 			pr_warn_ratelimited("NFS: v4 server %s "
3241 					" returned a bad sequence-id error!\n",
3242 					NFS_SERVER(dir)->nfs_client->cl_hostname);
3243 			exception.retry = 1;
3244 			continue;
3245 		}
3246 		/*
3247 		 * BAD_STATEID on OPEN means that the server cancelled our
3248 		 * state before it received the OPEN_CONFIRM.
3249 		 * Recover by retrying the request as per the discussion
3250 		 * on Page 181 of RFC3530.
3251 		 */
3252 		if (status == -NFS4ERR_BAD_STATEID) {
3253 			exception.retry = 1;
3254 			continue;
3255 		}
3256 		if (status == -NFS4ERR_EXPIRED) {
3257 			nfs4_schedule_lease_recovery(server->nfs_client);
3258 			exception.retry = 1;
3259 			continue;
3260 		}
3261 		if (status == -EAGAIN) {
3262 			/* We must have found a delegation */
3263 			exception.retry = 1;
3264 			continue;
3265 		}
3266 		if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
3267 			continue;
3268 		res = ERR_PTR(nfs4_handle_exception(server,
3269 					status, &exception));
3270 	} while (exception.retry);
3271 	return res;
3272 }
3273 
3274 static int _nfs4_do_setattr(struct inode *inode,
3275 			    struct nfs_setattrargs *arg,
3276 			    struct nfs_setattrres *res,
3277 			    const struct cred *cred,
3278 			    struct nfs_open_context *ctx)
3279 {
3280 	struct nfs_server *server = NFS_SERVER(inode);
3281 	struct rpc_message msg = {
3282 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
3283 		.rpc_argp	= arg,
3284 		.rpc_resp	= res,
3285 		.rpc_cred	= cred,
3286 	};
3287 	const struct cred *delegation_cred = NULL;
3288 	unsigned long timestamp = jiffies;
3289 	bool truncate;
3290 	int status;
3291 
3292 	nfs_fattr_init(res->fattr);
3293 
3294 	/* Servers should only apply open mode checks for file size changes */
3295 	truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
3296 	if (!truncate) {
3297 		nfs4_inode_make_writeable(inode);
3298 		goto zero_stateid;
3299 	}
3300 
3301 	if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
3302 		/* Use that stateid */
3303 	} else if (ctx != NULL && ctx->state) {
3304 		struct nfs_lock_context *l_ctx;
3305 		if (!nfs4_valid_open_stateid(ctx->state))
3306 			return -EBADF;
3307 		l_ctx = nfs_get_lock_context(ctx);
3308 		if (IS_ERR(l_ctx))
3309 			return PTR_ERR(l_ctx);
3310 		status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
3311 						&arg->stateid, &delegation_cred);
3312 		nfs_put_lock_context(l_ctx);
3313 		if (status == -EIO)
3314 			return -EBADF;
3315 		else if (status == -EAGAIN)
3316 			goto zero_stateid;
3317 	} else {
3318 zero_stateid:
3319 		nfs4_stateid_copy(&arg->stateid, &zero_stateid);
3320 	}
3321 	if (delegation_cred)
3322 		msg.rpc_cred = delegation_cred;
3323 
3324 	status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
3325 
3326 	put_cred(delegation_cred);
3327 	if (status == 0 && ctx != NULL)
3328 		renew_lease(server, timestamp);
3329 	trace_nfs4_setattr(inode, &arg->stateid, status);
3330 	return status;
3331 }
3332 
3333 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
3334 			   struct nfs_fattr *fattr, struct iattr *sattr,
3335 			   struct nfs_open_context *ctx, struct nfs4_label *ilabel,
3336 			   struct nfs4_label *olabel)
3337 {
3338 	struct nfs_server *server = NFS_SERVER(inode);
3339 	__u32 bitmask[NFS4_BITMASK_SZ];
3340 	struct nfs4_state *state = ctx ? ctx->state : NULL;
3341 	struct nfs_setattrargs	arg = {
3342 		.fh		= NFS_FH(inode),
3343 		.iap		= sattr,
3344 		.server		= server,
3345 		.bitmask = bitmask,
3346 		.label		= ilabel,
3347 	};
3348 	struct nfs_setattrres  res = {
3349 		.fattr		= fattr,
3350 		.label		= olabel,
3351 		.server		= server,
3352 	};
3353 	struct nfs4_exception exception = {
3354 		.state = state,
3355 		.inode = inode,
3356 		.stateid = &arg.stateid,
3357 	};
3358 	int err;
3359 
3360 	do {
3361 		nfs4_bitmap_copy_adjust_setattr(bitmask,
3362 				nfs4_bitmask(server, olabel),
3363 				inode);
3364 
3365 		err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
3366 		switch (err) {
3367 		case -NFS4ERR_OPENMODE:
3368 			if (!(sattr->ia_valid & ATTR_SIZE)) {
3369 				pr_warn_once("NFSv4: server %s is incorrectly "
3370 						"applying open mode checks to "
3371 						"a SETATTR that is not "
3372 						"changing file size.\n",
3373 						server->nfs_client->cl_hostname);
3374 			}
3375 			if (state && !(state->state & FMODE_WRITE)) {
3376 				err = -EBADF;
3377 				if (sattr->ia_valid & ATTR_OPEN)
3378 					err = -EACCES;
3379 				goto out;
3380 			}
3381 		}
3382 		err = nfs4_handle_exception(server, err, &exception);
3383 	} while (exception.retry);
3384 out:
3385 	return err;
3386 }
3387 
3388 static bool
3389 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
3390 {
3391 	if (inode == NULL || !nfs_have_layout(inode))
3392 		return false;
3393 
3394 	return pnfs_wait_on_layoutreturn(inode, task);
3395 }
3396 
3397 /*
3398  * Update the seqid of an open stateid
3399  */
3400 static void nfs4_sync_open_stateid(nfs4_stateid *dst,
3401 		struct nfs4_state *state)
3402 {
3403 	__be32 seqid_open;
3404 	u32 dst_seqid;
3405 	int seq;
3406 
3407 	for (;;) {
3408 		if (!nfs4_valid_open_stateid(state))
3409 			break;
3410 		seq = read_seqbegin(&state->seqlock);
3411 		if (!nfs4_state_match_open_stateid_other(state, dst)) {
3412 			nfs4_stateid_copy(dst, &state->open_stateid);
3413 			if (read_seqretry(&state->seqlock, seq))
3414 				continue;
3415 			break;
3416 		}
3417 		seqid_open = state->open_stateid.seqid;
3418 		if (read_seqretry(&state->seqlock, seq))
3419 			continue;
3420 
3421 		dst_seqid = be32_to_cpu(dst->seqid);
3422 		if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
3423 			dst->seqid = seqid_open;
3424 		break;
3425 	}
3426 }
3427 
3428 /*
3429  * Update the seqid of an open stateid after receiving
3430  * NFS4ERR_OLD_STATEID
3431  */
3432 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
3433 		struct nfs4_state *state)
3434 {
3435 	__be32 seqid_open;
3436 	u32 dst_seqid;
3437 	bool ret;
3438 	int seq;
3439 
3440 	for (;;) {
3441 		ret = false;
3442 		if (!nfs4_valid_open_stateid(state))
3443 			break;
3444 		seq = read_seqbegin(&state->seqlock);
3445 		if (!nfs4_state_match_open_stateid_other(state, dst)) {
3446 			if (read_seqretry(&state->seqlock, seq))
3447 				continue;
3448 			break;
3449 		}
3450 		seqid_open = state->open_stateid.seqid;
3451 		if (read_seqretry(&state->seqlock, seq))
3452 			continue;
3453 
3454 		dst_seqid = be32_to_cpu(dst->seqid);
3455 		if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) >= 0)
3456 			dst->seqid = cpu_to_be32(dst_seqid + 1);
3457 		else
3458 			dst->seqid = seqid_open;
3459 		ret = true;
3460 		break;
3461 	}
3462 
3463 	return ret;
3464 }
3465 
3466 struct nfs4_closedata {
3467 	struct inode *inode;
3468 	struct nfs4_state *state;
3469 	struct nfs_closeargs arg;
3470 	struct nfs_closeres res;
3471 	struct {
3472 		struct nfs4_layoutreturn_args arg;
3473 		struct nfs4_layoutreturn_res res;
3474 		struct nfs4_xdr_opaque_data ld_private;
3475 		u32 roc_barrier;
3476 		bool roc;
3477 	} lr;
3478 	struct nfs_fattr fattr;
3479 	unsigned long timestamp;
3480 };
3481 
3482 static void nfs4_free_closedata(void *data)
3483 {
3484 	struct nfs4_closedata *calldata = data;
3485 	struct nfs4_state_owner *sp = calldata->state->owner;
3486 	struct super_block *sb = calldata->state->inode->i_sb;
3487 
3488 	if (calldata->lr.roc)
3489 		pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
3490 				calldata->res.lr_ret);
3491 	nfs4_put_open_state(calldata->state);
3492 	nfs_free_seqid(calldata->arg.seqid);
3493 	nfs4_put_state_owner(sp);
3494 	nfs_sb_deactive(sb);
3495 	kfree(calldata);
3496 }
3497 
3498 static void nfs4_close_done(struct rpc_task *task, void *data)
3499 {
3500 	struct nfs4_closedata *calldata = data;
3501 	struct nfs4_state *state = calldata->state;
3502 	struct nfs_server *server = NFS_SERVER(calldata->inode);
3503 	nfs4_stateid *res_stateid = NULL;
3504 	struct nfs4_exception exception = {
3505 		.state = state,
3506 		.inode = calldata->inode,
3507 		.stateid = &calldata->arg.stateid,
3508 	};
3509 
3510 	dprintk("%s: begin!\n", __func__);
3511 	if (!nfs4_sequence_done(task, &calldata->res.seq_res))
3512 		return;
3513 	trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
3514 
3515 	/* Handle Layoutreturn errors */
3516 	if (pnfs_roc_done(task, calldata->inode,
3517 				&calldata->arg.lr_args,
3518 				&calldata->res.lr_res,
3519 				&calldata->res.lr_ret) == -EAGAIN)
3520 		goto out_restart;
3521 
3522 	/* hmm. we are done with the inode, and in the process of freeing
3523 	 * the state_owner. we keep this around to process errors
3524 	 */
3525 	switch (task->tk_status) {
3526 		case 0:
3527 			res_stateid = &calldata->res.stateid;
3528 			renew_lease(server, calldata->timestamp);
3529 			break;
3530 		case -NFS4ERR_ACCESS:
3531 			if (calldata->arg.bitmask != NULL) {
3532 				calldata->arg.bitmask = NULL;
3533 				calldata->res.fattr = NULL;
3534 				goto out_restart;
3535 
3536 			}
3537 			break;
3538 		case -NFS4ERR_OLD_STATEID:
3539 			/* Did we race with OPEN? */
3540 			if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
3541 						state))
3542 				goto out_restart;
3543 			goto out_release;
3544 		case -NFS4ERR_ADMIN_REVOKED:
3545 		case -NFS4ERR_STALE_STATEID:
3546 		case -NFS4ERR_EXPIRED:
3547 			nfs4_free_revoked_stateid(server,
3548 					&calldata->arg.stateid,
3549 					task->tk_msg.rpc_cred);
3550 			fallthrough;
3551 		case -NFS4ERR_BAD_STATEID:
3552 			if (calldata->arg.fmode == 0)
3553 				break;
3554 			fallthrough;
3555 		default:
3556 			task->tk_status = nfs4_async_handle_exception(task,
3557 					server, task->tk_status, &exception);
3558 			if (exception.retry)
3559 				goto out_restart;
3560 	}
3561 	nfs_clear_open_stateid(state, &calldata->arg.stateid,
3562 			res_stateid, calldata->arg.fmode);
3563 out_release:
3564 	task->tk_status = 0;
3565 	nfs_release_seqid(calldata->arg.seqid);
3566 	nfs_refresh_inode(calldata->inode, &calldata->fattr);
3567 	dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
3568 	return;
3569 out_restart:
3570 	task->tk_status = 0;
3571 	rpc_restart_call_prepare(task);
3572 	goto out_release;
3573 }
3574 
3575 static void nfs4_close_prepare(struct rpc_task *task, void *data)
3576 {
3577 	struct nfs4_closedata *calldata = data;
3578 	struct nfs4_state *state = calldata->state;
3579 	struct inode *inode = calldata->inode;
3580 	struct pnfs_layout_hdr *lo;
3581 	bool is_rdonly, is_wronly, is_rdwr;
3582 	int call_close = 0;
3583 
3584 	dprintk("%s: begin!\n", __func__);
3585 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
3586 		goto out_wait;
3587 
3588 	task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
3589 	spin_lock(&state->owner->so_lock);
3590 	is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
3591 	is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
3592 	is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
3593 	/* Calculate the change in open mode */
3594 	calldata->arg.fmode = 0;
3595 	if (state->n_rdwr == 0) {
3596 		if (state->n_rdonly == 0)
3597 			call_close |= is_rdonly;
3598 		else if (is_rdonly)
3599 			calldata->arg.fmode |= FMODE_READ;
3600 		if (state->n_wronly == 0)
3601 			call_close |= is_wronly;
3602 		else if (is_wronly)
3603 			calldata->arg.fmode |= FMODE_WRITE;
3604 		if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3605 			call_close |= is_rdwr;
3606 	} else if (is_rdwr)
3607 		calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3608 
3609 	nfs4_sync_open_stateid(&calldata->arg.stateid, state);
3610 	if (!nfs4_valid_open_stateid(state))
3611 		call_close = 0;
3612 	spin_unlock(&state->owner->so_lock);
3613 
3614 	if (!call_close) {
3615 		/* Note: exit _without_ calling nfs4_close_done */
3616 		goto out_no_action;
3617 	}
3618 
3619 	if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
3620 		nfs_release_seqid(calldata->arg.seqid);
3621 		goto out_wait;
3622 	}
3623 
3624 	lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
3625 	if (lo && !pnfs_layout_is_valid(lo)) {
3626 		calldata->arg.lr_args = NULL;
3627 		calldata->res.lr_res = NULL;
3628 	}
3629 
3630 	if (calldata->arg.fmode == 0)
3631 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
3632 
3633 	if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
3634 		/* Close-to-open cache consistency revalidation */
3635 		if (!nfs4_have_delegation(inode, FMODE_READ))
3636 			calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
3637 		else
3638 			calldata->arg.bitmask = NULL;
3639 	}
3640 
3641 	calldata->arg.share_access =
3642 		nfs4_map_atomic_open_share(NFS_SERVER(inode),
3643 				calldata->arg.fmode, 0);
3644 
3645 	if (calldata->res.fattr == NULL)
3646 		calldata->arg.bitmask = NULL;
3647 	else if (calldata->arg.bitmask == NULL)
3648 		calldata->res.fattr = NULL;
3649 	calldata->timestamp = jiffies;
3650 	if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client,
3651 				&calldata->arg.seq_args,
3652 				&calldata->res.seq_res,
3653 				task) != 0)
3654 		nfs_release_seqid(calldata->arg.seqid);
3655 	dprintk("%s: done!\n", __func__);
3656 	return;
3657 out_no_action:
3658 	task->tk_action = NULL;
3659 out_wait:
3660 	nfs4_sequence_done(task, &calldata->res.seq_res);
3661 }
3662 
3663 static const struct rpc_call_ops nfs4_close_ops = {
3664 	.rpc_call_prepare = nfs4_close_prepare,
3665 	.rpc_call_done = nfs4_close_done,
3666 	.rpc_release = nfs4_free_closedata,
3667 };
3668 
3669 /*
3670  * It is possible for data to be read/written from a mem-mapped file
3671  * after the sys_close call (which hits the vfs layer as a flush).
3672  * This means that we can't safely call nfsv4 close on a file until
3673  * the inode is cleared. This in turn means that we are not good
3674  * NFSv4 citizens - we do not indicate to the server to update the file's
3675  * share state even when we are done with one of the three share
3676  * stateid's in the inode.
3677  *
3678  * NOTE: Caller must be holding the sp->so_owner semaphore!
3679  */
3680 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
3681 {
3682 	struct nfs_server *server = NFS_SERVER(state->inode);
3683 	struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
3684 	struct nfs4_closedata *calldata;
3685 	struct nfs4_state_owner *sp = state->owner;
3686 	struct rpc_task *task;
3687 	struct rpc_message msg = {
3688 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
3689 		.rpc_cred = state->owner->so_cred,
3690 	};
3691 	struct rpc_task_setup task_setup_data = {
3692 		.rpc_client = server->client,
3693 		.rpc_message = &msg,
3694 		.callback_ops = &nfs4_close_ops,
3695 		.workqueue = nfsiod_workqueue,
3696 		.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
3697 	};
3698 	int status = -ENOMEM;
3699 
3700 	nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
3701 		&task_setup_data.rpc_client, &msg);
3702 
3703 	calldata = kzalloc(sizeof(*calldata), gfp_mask);
3704 	if (calldata == NULL)
3705 		goto out;
3706 	nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0);
3707 	calldata->inode = state->inode;
3708 	calldata->state = state;
3709 	calldata->arg.fh = NFS_FH(state->inode);
3710 	if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
3711 		goto out_free_calldata;
3712 	/* Serialization for the sequence id */
3713 	alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
3714 	calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
3715 	if (IS_ERR(calldata->arg.seqid))
3716 		goto out_free_calldata;
3717 	nfs_fattr_init(&calldata->fattr);
3718 	calldata->arg.fmode = 0;
3719 	calldata->lr.arg.ld_private = &calldata->lr.ld_private;
3720 	calldata->res.fattr = &calldata->fattr;
3721 	calldata->res.seqid = calldata->arg.seqid;
3722 	calldata->res.server = server;
3723 	calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
3724 	calldata->lr.roc = pnfs_roc(state->inode,
3725 			&calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
3726 	if (calldata->lr.roc) {
3727 		calldata->arg.lr_args = &calldata->lr.arg;
3728 		calldata->res.lr_res = &calldata->lr.res;
3729 	}
3730 	nfs_sb_active(calldata->inode->i_sb);
3731 
3732 	msg.rpc_argp = &calldata->arg;
3733 	msg.rpc_resp = &calldata->res;
3734 	task_setup_data.callback_data = calldata;
3735 	task = rpc_run_task(&task_setup_data);
3736 	if (IS_ERR(task))
3737 		return PTR_ERR(task);
3738 	status = 0;
3739 	if (wait)
3740 		status = rpc_wait_for_completion_task(task);
3741 	rpc_put_task(task);
3742 	return status;
3743 out_free_calldata:
3744 	kfree(calldata);
3745 out:
3746 	nfs4_put_open_state(state);
3747 	nfs4_put_state_owner(sp);
3748 	return status;
3749 }
3750 
3751 static struct inode *
3752 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
3753 		int open_flags, struct iattr *attr, int *opened)
3754 {
3755 	struct nfs4_state *state;
3756 	struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
3757 
3758 	label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3759 
3760 	/* Protect against concurrent sillydeletes */
3761 	state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3762 
3763 	nfs4_label_release_security(label);
3764 
3765 	if (IS_ERR(state))
3766 		return ERR_CAST(state);
3767 	return state->inode;
3768 }
3769 
3770 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3771 {
3772 	if (ctx->state == NULL)
3773 		return;
3774 	if (is_sync)
3775 		nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
3776 	else
3777 		nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx));
3778 }
3779 
3780 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3781 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3782 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL)
3783 
3784 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3785 {
3786 	u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3787 	struct nfs4_server_caps_arg args = {
3788 		.fhandle = fhandle,
3789 		.bitmask = bitmask,
3790 	};
3791 	struct nfs4_server_caps_res res = {};
3792 	struct rpc_message msg = {
3793 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3794 		.rpc_argp = &args,
3795 		.rpc_resp = &res,
3796 	};
3797 	int status;
3798 	int i;
3799 
3800 	bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3801 		     FATTR4_WORD0_FH_EXPIRE_TYPE |
3802 		     FATTR4_WORD0_LINK_SUPPORT |
3803 		     FATTR4_WORD0_SYMLINK_SUPPORT |
3804 		     FATTR4_WORD0_ACLSUPPORT;
3805 	if (minorversion)
3806 		bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3807 
3808 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3809 	if (status == 0) {
3810 		/* Sanity check the server answers */
3811 		switch (minorversion) {
3812 		case 0:
3813 			res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3814 			res.attr_bitmask[2] = 0;
3815 			break;
3816 		case 1:
3817 			res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3818 			break;
3819 		case 2:
3820 			res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3821 		}
3822 		memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3823 		server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
3824 				NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
3825 				NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
3826 				NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
3827 				NFS_CAP_CTIME|NFS_CAP_MTIME|
3828 				NFS_CAP_SECURITY_LABEL);
3829 		if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3830 				res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3831 			server->caps |= NFS_CAP_ACLS;
3832 		if (res.has_links != 0)
3833 			server->caps |= NFS_CAP_HARDLINKS;
3834 		if (res.has_symlinks != 0)
3835 			server->caps |= NFS_CAP_SYMLINKS;
3836 		if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
3837 			server->caps |= NFS_CAP_FILEID;
3838 		if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
3839 			server->caps |= NFS_CAP_MODE;
3840 		if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
3841 			server->caps |= NFS_CAP_NLINK;
3842 		if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
3843 			server->caps |= NFS_CAP_OWNER;
3844 		if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
3845 			server->caps |= NFS_CAP_OWNER_GROUP;
3846 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
3847 			server->caps |= NFS_CAP_ATIME;
3848 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
3849 			server->caps |= NFS_CAP_CTIME;
3850 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
3851 			server->caps |= NFS_CAP_MTIME;
3852 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
3853 		if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3854 			server->caps |= NFS_CAP_SECURITY_LABEL;
3855 #endif
3856 		memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3857 				sizeof(server->attr_bitmask));
3858 		server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3859 
3860 		memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3861 		server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3862 		server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3863 		server->cache_consistency_bitmask[2] = 0;
3864 
3865 		/* Avoid a regression due to buggy server */
3866 		for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
3867 			res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
3868 		memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3869 			sizeof(server->exclcreat_bitmask));
3870 
3871 		server->acl_bitmask = res.acl_bitmask;
3872 		server->fh_expire_type = res.fh_expire_type;
3873 	}
3874 
3875 	return status;
3876 }
3877 
3878 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3879 {
3880 	struct nfs4_exception exception = {
3881 		.interruptible = true,
3882 	};
3883 	int err;
3884 	do {
3885 		err = nfs4_handle_exception(server,
3886 				_nfs4_server_capabilities(server, fhandle),
3887 				&exception);
3888 	} while (exception.retry);
3889 	return err;
3890 }
3891 
3892 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3893 		struct nfs_fsinfo *info)
3894 {
3895 	u32 bitmask[3];
3896 	struct nfs4_lookup_root_arg args = {
3897 		.bitmask = bitmask,
3898 	};
3899 	struct nfs4_lookup_res res = {
3900 		.server = server,
3901 		.fattr = info->fattr,
3902 		.fh = fhandle,
3903 	};
3904 	struct rpc_message msg = {
3905 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
3906 		.rpc_argp = &args,
3907 		.rpc_resp = &res,
3908 	};
3909 
3910 	bitmask[0] = nfs4_fattr_bitmap[0];
3911 	bitmask[1] = nfs4_fattr_bitmap[1];
3912 	/*
3913 	 * Process the label in the upcoming getfattr
3914 	 */
3915 	bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
3916 
3917 	nfs_fattr_init(info->fattr);
3918 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3919 }
3920 
3921 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3922 		struct nfs_fsinfo *info)
3923 {
3924 	struct nfs4_exception exception = {
3925 		.interruptible = true,
3926 	};
3927 	int err;
3928 	do {
3929 		err = _nfs4_lookup_root(server, fhandle, info);
3930 		trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3931 		switch (err) {
3932 		case 0:
3933 		case -NFS4ERR_WRONGSEC:
3934 			goto out;
3935 		default:
3936 			err = nfs4_handle_exception(server, err, &exception);
3937 		}
3938 	} while (exception.retry);
3939 out:
3940 	return err;
3941 }
3942 
3943 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3944 				struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3945 {
3946 	struct rpc_auth_create_args auth_args = {
3947 		.pseudoflavor = flavor,
3948 	};
3949 	struct rpc_auth *auth;
3950 
3951 	auth = rpcauth_create(&auth_args, server->client);
3952 	if (IS_ERR(auth))
3953 		return -EACCES;
3954 	return nfs4_lookup_root(server, fhandle, info);
3955 }
3956 
3957 /*
3958  * Retry pseudoroot lookup with various security flavors.  We do this when:
3959  *
3960  *   NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3961  *   NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3962  *
3963  * Returns zero on success, or a negative NFS4ERR value, or a
3964  * negative errno value.
3965  */
3966 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3967 			      struct nfs_fsinfo *info)
3968 {
3969 	/* Per 3530bis 15.33.5 */
3970 	static const rpc_authflavor_t flav_array[] = {
3971 		RPC_AUTH_GSS_KRB5P,
3972 		RPC_AUTH_GSS_KRB5I,
3973 		RPC_AUTH_GSS_KRB5,
3974 		RPC_AUTH_UNIX,			/* courtesy */
3975 		RPC_AUTH_NULL,
3976 	};
3977 	int status = -EPERM;
3978 	size_t i;
3979 
3980 	if (server->auth_info.flavor_len > 0) {
3981 		/* try each flavor specified by user */
3982 		for (i = 0; i < server->auth_info.flavor_len; i++) {
3983 			status = nfs4_lookup_root_sec(server, fhandle, info,
3984 						server->auth_info.flavors[i]);
3985 			if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3986 				continue;
3987 			break;
3988 		}
3989 	} else {
3990 		/* no flavors specified by user, try default list */
3991 		for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3992 			status = nfs4_lookup_root_sec(server, fhandle, info,
3993 						      flav_array[i]);
3994 			if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3995 				continue;
3996 			break;
3997 		}
3998 	}
3999 
4000 	/*
4001 	 * -EACCES could mean that the user doesn't have correct permissions
4002 	 * to access the mount.  It could also mean that we tried to mount
4003 	 * with a gss auth flavor, but rpc.gssd isn't running.  Either way,
4004 	 * existing mount programs don't handle -EACCES very well so it should
4005 	 * be mapped to -EPERM instead.
4006 	 */
4007 	if (status == -EACCES)
4008 		status = -EPERM;
4009 	return status;
4010 }
4011 
4012 /**
4013  * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
4014  * @server: initialized nfs_server handle
4015  * @fhandle: we fill in the pseudo-fs root file handle
4016  * @info: we fill in an FSINFO struct
4017  * @auth_probe: probe the auth flavours
4018  *
4019  * Returns zero on success, or a negative errno.
4020  */
4021 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
4022 			 struct nfs_fsinfo *info,
4023 			 bool auth_probe)
4024 {
4025 	int status = 0;
4026 
4027 	if (!auth_probe)
4028 		status = nfs4_lookup_root(server, fhandle, info);
4029 
4030 	if (auth_probe || status == NFS4ERR_WRONGSEC)
4031 		status = server->nfs_client->cl_mvops->find_root_sec(server,
4032 				fhandle, info);
4033 
4034 	if (status == 0)
4035 		status = nfs4_server_capabilities(server, fhandle);
4036 	if (status == 0)
4037 		status = nfs4_do_fsinfo(server, fhandle, info);
4038 
4039 	return nfs4_map_errors(status);
4040 }
4041 
4042 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
4043 			      struct nfs_fsinfo *info)
4044 {
4045 	int error;
4046 	struct nfs_fattr *fattr = info->fattr;
4047 	struct nfs4_label *label = fattr->label;
4048 
4049 	error = nfs4_server_capabilities(server, mntfh);
4050 	if (error < 0) {
4051 		dprintk("nfs4_get_root: getcaps error = %d\n", -error);
4052 		return error;
4053 	}
4054 
4055 	error = nfs4_proc_getattr(server, mntfh, fattr, label, NULL);
4056 	if (error < 0) {
4057 		dprintk("nfs4_get_root: getattr error = %d\n", -error);
4058 		goto out;
4059 	}
4060 
4061 	if (fattr->valid & NFS_ATTR_FATTR_FSID &&
4062 	    !nfs_fsid_equal(&server->fsid, &fattr->fsid))
4063 		memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
4064 
4065 out:
4066 	return error;
4067 }
4068 
4069 /*
4070  * Get locations and (maybe) other attributes of a referral.
4071  * Note that we'll actually follow the referral later when
4072  * we detect fsid mismatch in inode revalidation
4073  */
4074 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
4075 			     const struct qstr *name, struct nfs_fattr *fattr,
4076 			     struct nfs_fh *fhandle)
4077 {
4078 	int status = -ENOMEM;
4079 	struct page *page = NULL;
4080 	struct nfs4_fs_locations *locations = NULL;
4081 
4082 	page = alloc_page(GFP_KERNEL);
4083 	if (page == NULL)
4084 		goto out;
4085 	locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
4086 	if (locations == NULL)
4087 		goto out;
4088 
4089 	status = nfs4_proc_fs_locations(client, dir, name, locations, page);
4090 	if (status != 0)
4091 		goto out;
4092 
4093 	/*
4094 	 * If the fsid didn't change, this is a migration event, not a
4095 	 * referral.  Cause us to drop into the exception handler, which
4096 	 * will kick off migration recovery.
4097 	 */
4098 	if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
4099 		dprintk("%s: server did not return a different fsid for"
4100 			" a referral at %s\n", __func__, name->name);
4101 		status = -NFS4ERR_MOVED;
4102 		goto out;
4103 	}
4104 	/* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
4105 	nfs_fixup_referral_attributes(&locations->fattr);
4106 
4107 	/* replace the lookup nfs_fattr with the locations nfs_fattr */
4108 	memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
4109 	memset(fhandle, 0, sizeof(struct nfs_fh));
4110 out:
4111 	if (page)
4112 		__free_page(page);
4113 	kfree(locations);
4114 	return status;
4115 }
4116 
4117 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4118 				struct nfs_fattr *fattr, struct nfs4_label *label,
4119 				struct inode *inode)
4120 {
4121 	__u32 bitmask[NFS4_BITMASK_SZ];
4122 	struct nfs4_getattr_arg args = {
4123 		.fh = fhandle,
4124 		.bitmask = bitmask,
4125 	};
4126 	struct nfs4_getattr_res res = {
4127 		.fattr = fattr,
4128 		.label = label,
4129 		.server = server,
4130 	};
4131 	struct rpc_message msg = {
4132 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4133 		.rpc_argp = &args,
4134 		.rpc_resp = &res,
4135 	};
4136 	unsigned short task_flags = 0;
4137 
4138 	/* Is this is an attribute revalidation, subject to softreval? */
4139 	if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
4140 		task_flags |= RPC_TASK_TIMEOUT;
4141 
4142 	nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode);
4143 
4144 	nfs_fattr_init(fattr);
4145 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4146 	return nfs4_do_call_sync(server->client, server, &msg,
4147 			&args.seq_args, &res.seq_res, task_flags);
4148 }
4149 
4150 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4151 				struct nfs_fattr *fattr, struct nfs4_label *label,
4152 				struct inode *inode)
4153 {
4154 	struct nfs4_exception exception = {
4155 		.interruptible = true,
4156 	};
4157 	int err;
4158 	do {
4159 		err = _nfs4_proc_getattr(server, fhandle, fattr, label, inode);
4160 		trace_nfs4_getattr(server, fhandle, fattr, err);
4161 		err = nfs4_handle_exception(server, err,
4162 				&exception);
4163 	} while (exception.retry);
4164 	return err;
4165 }
4166 
4167 /*
4168  * The file is not closed if it is opened due to the a request to change
4169  * the size of the file. The open call will not be needed once the
4170  * VFS layer lookup-intents are implemented.
4171  *
4172  * Close is called when the inode is destroyed.
4173  * If we haven't opened the file for O_WRONLY, we
4174  * need to in the size_change case to obtain a stateid.
4175  *
4176  * Got race?
4177  * Because OPEN is always done by name in nfsv4, it is
4178  * possible that we opened a different file by the same
4179  * name.  We can recognize this race condition, but we
4180  * can't do anything about it besides returning an error.
4181  *
4182  * This will be fixed with VFS changes (lookup-intent).
4183  */
4184 static int
4185 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
4186 		  struct iattr *sattr)
4187 {
4188 	struct inode *inode = d_inode(dentry);
4189 	const struct cred *cred = NULL;
4190 	struct nfs_open_context *ctx = NULL;
4191 	struct nfs4_label *label = NULL;
4192 	int status;
4193 
4194 	if (pnfs_ld_layoutret_on_setattr(inode) &&
4195 	    sattr->ia_valid & ATTR_SIZE &&
4196 	    sattr->ia_size < i_size_read(inode))
4197 		pnfs_commit_and_return_layout(inode);
4198 
4199 	nfs_fattr_init(fattr);
4200 
4201 	/* Deal with open(O_TRUNC) */
4202 	if (sattr->ia_valid & ATTR_OPEN)
4203 		sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
4204 
4205 	/* Optimization: if the end result is no change, don't RPC */
4206 	if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
4207 		return 0;
4208 
4209 	/* Search for an existing open(O_WRITE) file */
4210 	if (sattr->ia_valid & ATTR_FILE) {
4211 
4212 		ctx = nfs_file_open_context(sattr->ia_file);
4213 		if (ctx)
4214 			cred = ctx->cred;
4215 	}
4216 
4217 	label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
4218 	if (IS_ERR(label))
4219 		return PTR_ERR(label);
4220 
4221 	/* Return any delegations if we're going to change ACLs */
4222 	if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
4223 		nfs4_inode_make_writeable(inode);
4224 
4225 	status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL, label);
4226 	if (status == 0) {
4227 		nfs_setattr_update_inode(inode, sattr, fattr);
4228 		nfs_setsecurity(inode, fattr, label);
4229 	}
4230 	nfs4_label_free(label);
4231 	return status;
4232 }
4233 
4234 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
4235 		struct dentry *dentry, struct nfs_fh *fhandle,
4236 		struct nfs_fattr *fattr, struct nfs4_label *label)
4237 {
4238 	struct nfs_server *server = NFS_SERVER(dir);
4239 	int		       status;
4240 	struct nfs4_lookup_arg args = {
4241 		.bitmask = server->attr_bitmask,
4242 		.dir_fh = NFS_FH(dir),
4243 		.name = &dentry->d_name,
4244 	};
4245 	struct nfs4_lookup_res res = {
4246 		.server = server,
4247 		.fattr = fattr,
4248 		.label = label,
4249 		.fh = fhandle,
4250 	};
4251 	struct rpc_message msg = {
4252 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
4253 		.rpc_argp = &args,
4254 		.rpc_resp = &res,
4255 	};
4256 	unsigned short task_flags = 0;
4257 
4258 	/* Is this is an attribute revalidation, subject to softreval? */
4259 	if (nfs_lookup_is_soft_revalidate(dentry))
4260 		task_flags |= RPC_TASK_TIMEOUT;
4261 
4262 	args.bitmask = nfs4_bitmask(server, label);
4263 
4264 	nfs_fattr_init(fattr);
4265 
4266 	dprintk("NFS call  lookup %pd2\n", dentry);
4267 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4268 	status = nfs4_do_call_sync(clnt, server, &msg,
4269 			&args.seq_args, &res.seq_res, task_flags);
4270 	dprintk("NFS reply lookup: %d\n", status);
4271 	return status;
4272 }
4273 
4274 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
4275 {
4276 	fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4277 		NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
4278 	fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4279 	fattr->nlink = 2;
4280 }
4281 
4282 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
4283 				   struct dentry *dentry, struct nfs_fh *fhandle,
4284 				   struct nfs_fattr *fattr, struct nfs4_label *label)
4285 {
4286 	struct nfs4_exception exception = {
4287 		.interruptible = true,
4288 	};
4289 	struct rpc_clnt *client = *clnt;
4290 	const struct qstr *name = &dentry->d_name;
4291 	int err;
4292 	do {
4293 		err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr, label);
4294 		trace_nfs4_lookup(dir, name, err);
4295 		switch (err) {
4296 		case -NFS4ERR_BADNAME:
4297 			err = -ENOENT;
4298 			goto out;
4299 		case -NFS4ERR_MOVED:
4300 			err = nfs4_get_referral(client, dir, name, fattr, fhandle);
4301 			if (err == -NFS4ERR_MOVED)
4302 				err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4303 			goto out;
4304 		case -NFS4ERR_WRONGSEC:
4305 			err = -EPERM;
4306 			if (client != *clnt)
4307 				goto out;
4308 			client = nfs4_negotiate_security(client, dir, name);
4309 			if (IS_ERR(client))
4310 				return PTR_ERR(client);
4311 
4312 			exception.retry = 1;
4313 			break;
4314 		default:
4315 			err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4316 		}
4317 	} while (exception.retry);
4318 
4319 out:
4320 	if (err == 0)
4321 		*clnt = client;
4322 	else if (client != *clnt)
4323 		rpc_shutdown_client(client);
4324 
4325 	return err;
4326 }
4327 
4328 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
4329 			    struct nfs_fh *fhandle, struct nfs_fattr *fattr,
4330 			    struct nfs4_label *label)
4331 {
4332 	int status;
4333 	struct rpc_clnt *client = NFS_CLIENT(dir);
4334 
4335 	status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, label);
4336 	if (client != NFS_CLIENT(dir)) {
4337 		rpc_shutdown_client(client);
4338 		nfs_fixup_secinfo_attributes(fattr);
4339 	}
4340 	return status;
4341 }
4342 
4343 struct rpc_clnt *
4344 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
4345 			    struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4346 {
4347 	struct rpc_clnt *client = NFS_CLIENT(dir);
4348 	int status;
4349 
4350 	status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, NULL);
4351 	if (status < 0)
4352 		return ERR_PTR(status);
4353 	return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
4354 }
4355 
4356 static int _nfs4_proc_lookupp(struct inode *inode,
4357 		struct nfs_fh *fhandle, struct nfs_fattr *fattr,
4358 		struct nfs4_label *label)
4359 {
4360 	struct rpc_clnt *clnt = NFS_CLIENT(inode);
4361 	struct nfs_server *server = NFS_SERVER(inode);
4362 	int		       status;
4363 	struct nfs4_lookupp_arg args = {
4364 		.bitmask = server->attr_bitmask,
4365 		.fh = NFS_FH(inode),
4366 	};
4367 	struct nfs4_lookupp_res res = {
4368 		.server = server,
4369 		.fattr = fattr,
4370 		.label = label,
4371 		.fh = fhandle,
4372 	};
4373 	struct rpc_message msg = {
4374 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
4375 		.rpc_argp = &args,
4376 		.rpc_resp = &res,
4377 	};
4378 
4379 	args.bitmask = nfs4_bitmask(server, label);
4380 
4381 	nfs_fattr_init(fattr);
4382 
4383 	dprintk("NFS call  lookupp ino=0x%lx\n", inode->i_ino);
4384 	status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
4385 				&res.seq_res, 0);
4386 	dprintk("NFS reply lookupp: %d\n", status);
4387 	return status;
4388 }
4389 
4390 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
4391 			     struct nfs_fattr *fattr, struct nfs4_label *label)
4392 {
4393 	struct nfs4_exception exception = {
4394 		.interruptible = true,
4395 	};
4396 	int err;
4397 	do {
4398 		err = _nfs4_proc_lookupp(inode, fhandle, fattr, label);
4399 		trace_nfs4_lookupp(inode, err);
4400 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
4401 				&exception);
4402 	} while (exception.retry);
4403 	return err;
4404 }
4405 
4406 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4407 {
4408 	struct nfs_server *server = NFS_SERVER(inode);
4409 	struct nfs4_accessargs args = {
4410 		.fh = NFS_FH(inode),
4411 		.access = entry->mask,
4412 	};
4413 	struct nfs4_accessres res = {
4414 		.server = server,
4415 	};
4416 	struct rpc_message msg = {
4417 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
4418 		.rpc_argp = &args,
4419 		.rpc_resp = &res,
4420 		.rpc_cred = entry->cred,
4421 	};
4422 	int status = 0;
4423 
4424 	if (!nfs4_have_delegation(inode, FMODE_READ)) {
4425 		res.fattr = nfs_alloc_fattr();
4426 		if (res.fattr == NULL)
4427 			return -ENOMEM;
4428 		args.bitmask = server->cache_consistency_bitmask;
4429 	}
4430 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4431 	if (!status) {
4432 		nfs_access_set_mask(entry, res.access);
4433 		if (res.fattr)
4434 			nfs_refresh_inode(inode, res.fattr);
4435 	}
4436 	nfs_free_fattr(res.fattr);
4437 	return status;
4438 }
4439 
4440 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4441 {
4442 	struct nfs4_exception exception = {
4443 		.interruptible = true,
4444 	};
4445 	int err;
4446 	do {
4447 		err = _nfs4_proc_access(inode, entry);
4448 		trace_nfs4_access(inode, err);
4449 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
4450 				&exception);
4451 	} while (exception.retry);
4452 	return err;
4453 }
4454 
4455 /*
4456  * TODO: For the time being, we don't try to get any attributes
4457  * along with any of the zero-copy operations READ, READDIR,
4458  * READLINK, WRITE.
4459  *
4460  * In the case of the first three, we want to put the GETATTR
4461  * after the read-type operation -- this is because it is hard
4462  * to predict the length of a GETATTR response in v4, and thus
4463  * align the READ data correctly.  This means that the GETATTR
4464  * may end up partially falling into the page cache, and we should
4465  * shift it into the 'tail' of the xdr_buf before processing.
4466  * To do this efficiently, we need to know the total length
4467  * of data received, which doesn't seem to be available outside
4468  * of the RPC layer.
4469  *
4470  * In the case of WRITE, we also want to put the GETATTR after
4471  * the operation -- in this case because we want to make sure
4472  * we get the post-operation mtime and size.
4473  *
4474  * Both of these changes to the XDR layer would in fact be quite
4475  * minor, but I decided to leave them for a subsequent patch.
4476  */
4477 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
4478 		unsigned int pgbase, unsigned int pglen)
4479 {
4480 	struct nfs4_readlink args = {
4481 		.fh       = NFS_FH(inode),
4482 		.pgbase	  = pgbase,
4483 		.pglen    = pglen,
4484 		.pages    = &page,
4485 	};
4486 	struct nfs4_readlink_res res;
4487 	struct rpc_message msg = {
4488 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
4489 		.rpc_argp = &args,
4490 		.rpc_resp = &res,
4491 	};
4492 
4493 	return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
4494 }
4495 
4496 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
4497 		unsigned int pgbase, unsigned int pglen)
4498 {
4499 	struct nfs4_exception exception = {
4500 		.interruptible = true,
4501 	};
4502 	int err;
4503 	do {
4504 		err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
4505 		trace_nfs4_readlink(inode, err);
4506 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
4507 				&exception);
4508 	} while (exception.retry);
4509 	return err;
4510 }
4511 
4512 /*
4513  * This is just for mknod.  open(O_CREAT) will always do ->open_context().
4514  */
4515 static int
4516 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
4517 		 int flags)
4518 {
4519 	struct nfs_server *server = NFS_SERVER(dir);
4520 	struct nfs4_label l, *ilabel = NULL;
4521 	struct nfs_open_context *ctx;
4522 	struct nfs4_state *state;
4523 	int status = 0;
4524 
4525 	ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
4526 	if (IS_ERR(ctx))
4527 		return PTR_ERR(ctx);
4528 
4529 	ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
4530 
4531 	if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4532 		sattr->ia_mode &= ~current_umask();
4533 	state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
4534 	if (IS_ERR(state)) {
4535 		status = PTR_ERR(state);
4536 		goto out;
4537 	}
4538 out:
4539 	nfs4_label_release_security(ilabel);
4540 	put_nfs_open_context(ctx);
4541 	return status;
4542 }
4543 
4544 static int
4545 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype)
4546 {
4547 	struct nfs_server *server = NFS_SERVER(dir);
4548 	struct nfs_removeargs args = {
4549 		.fh = NFS_FH(dir),
4550 		.name = *name,
4551 	};
4552 	struct nfs_removeres res = {
4553 		.server = server,
4554 	};
4555 	struct rpc_message msg = {
4556 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
4557 		.rpc_argp = &args,
4558 		.rpc_resp = &res,
4559 	};
4560 	unsigned long timestamp = jiffies;
4561 	int status;
4562 
4563 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
4564 	if (status == 0) {
4565 		spin_lock(&dir->i_lock);
4566 		nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp,
4567 					      NFS_INO_INVALID_DATA);
4568 		/* Removing a directory decrements nlink in the parent */
4569 		if (ftype == NF4DIR && dir->i_nlink > 2)
4570 			nfs4_dec_nlink_locked(dir);
4571 		spin_unlock(&dir->i_lock);
4572 	}
4573 	return status;
4574 }
4575 
4576 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
4577 {
4578 	struct nfs4_exception exception = {
4579 		.interruptible = true,
4580 	};
4581 	struct inode *inode = d_inode(dentry);
4582 	int err;
4583 
4584 	if (inode) {
4585 		if (inode->i_nlink == 1)
4586 			nfs4_inode_return_delegation(inode);
4587 		else
4588 			nfs4_inode_make_writeable(inode);
4589 	}
4590 	do {
4591 		err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG);
4592 		trace_nfs4_remove(dir, &dentry->d_name, err);
4593 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
4594 				&exception);
4595 	} while (exception.retry);
4596 	return err;
4597 }
4598 
4599 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
4600 {
4601 	struct nfs4_exception exception = {
4602 		.interruptible = true,
4603 	};
4604 	int err;
4605 
4606 	do {
4607 		err = _nfs4_proc_remove(dir, name, NF4DIR);
4608 		trace_nfs4_remove(dir, name, err);
4609 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
4610 				&exception);
4611 	} while (exception.retry);
4612 	return err;
4613 }
4614 
4615 static void nfs4_proc_unlink_setup(struct rpc_message *msg,
4616 		struct dentry *dentry,
4617 		struct inode *inode)
4618 {
4619 	struct nfs_removeargs *args = msg->rpc_argp;
4620 	struct nfs_removeres *res = msg->rpc_resp;
4621 
4622 	res->server = NFS_SB(dentry->d_sb);
4623 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
4624 	nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0);
4625 
4626 	nfs_fattr_init(res->dir_attr);
4627 
4628 	if (inode)
4629 		nfs4_inode_return_delegation(inode);
4630 }
4631 
4632 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
4633 {
4634 	nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client,
4635 			&data->args.seq_args,
4636 			&data->res.seq_res,
4637 			task);
4638 }
4639 
4640 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
4641 {
4642 	struct nfs_unlinkdata *data = task->tk_calldata;
4643 	struct nfs_removeres *res = &data->res;
4644 
4645 	if (!nfs4_sequence_done(task, &res->seq_res))
4646 		return 0;
4647 	if (nfs4_async_handle_error(task, res->server, NULL,
4648 				    &data->timeout) == -EAGAIN)
4649 		return 0;
4650 	if (task->tk_status == 0)
4651 		nfs4_update_changeattr(dir, &res->cinfo,
4652 				res->dir_attr->time_start,
4653 				NFS_INO_INVALID_DATA);
4654 	return 1;
4655 }
4656 
4657 static void nfs4_proc_rename_setup(struct rpc_message *msg,
4658 		struct dentry *old_dentry,
4659 		struct dentry *new_dentry)
4660 {
4661 	struct nfs_renameargs *arg = msg->rpc_argp;
4662 	struct nfs_renameres *res = msg->rpc_resp;
4663 	struct inode *old_inode = d_inode(old_dentry);
4664 	struct inode *new_inode = d_inode(new_dentry);
4665 
4666 	if (old_inode)
4667 		nfs4_inode_make_writeable(old_inode);
4668 	if (new_inode)
4669 		nfs4_inode_return_delegation(new_inode);
4670 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
4671 	res->server = NFS_SB(old_dentry->d_sb);
4672 	nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0);
4673 }
4674 
4675 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
4676 {
4677 	nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client,
4678 			&data->args.seq_args,
4679 			&data->res.seq_res,
4680 			task);
4681 }
4682 
4683 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
4684 				 struct inode *new_dir)
4685 {
4686 	struct nfs_renamedata *data = task->tk_calldata;
4687 	struct nfs_renameres *res = &data->res;
4688 
4689 	if (!nfs4_sequence_done(task, &res->seq_res))
4690 		return 0;
4691 	if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
4692 		return 0;
4693 
4694 	if (task->tk_status == 0) {
4695 		if (new_dir != old_dir) {
4696 			/* Note: If we moved a directory, nlink will change */
4697 			nfs4_update_changeattr(old_dir, &res->old_cinfo,
4698 					res->old_fattr->time_start,
4699 					NFS_INO_INVALID_OTHER |
4700 					    NFS_INO_INVALID_DATA);
4701 			nfs4_update_changeattr(new_dir, &res->new_cinfo,
4702 					res->new_fattr->time_start,
4703 					NFS_INO_INVALID_OTHER |
4704 					    NFS_INO_INVALID_DATA);
4705 		} else
4706 			nfs4_update_changeattr(old_dir, &res->old_cinfo,
4707 					res->old_fattr->time_start,
4708 					NFS_INO_INVALID_DATA);
4709 	}
4710 	return 1;
4711 }
4712 
4713 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4714 {
4715 	struct nfs_server *server = NFS_SERVER(inode);
4716 	__u32 bitmask[NFS4_BITMASK_SZ];
4717 	struct nfs4_link_arg arg = {
4718 		.fh     = NFS_FH(inode),
4719 		.dir_fh = NFS_FH(dir),
4720 		.name   = name,
4721 		.bitmask = bitmask,
4722 	};
4723 	struct nfs4_link_res res = {
4724 		.server = server,
4725 		.label = NULL,
4726 	};
4727 	struct rpc_message msg = {
4728 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
4729 		.rpc_argp = &arg,
4730 		.rpc_resp = &res,
4731 	};
4732 	int status = -ENOMEM;
4733 
4734 	res.fattr = nfs_alloc_fattr();
4735 	if (res.fattr == NULL)
4736 		goto out;
4737 
4738 	res.label = nfs4_label_alloc(server, GFP_KERNEL);
4739 	if (IS_ERR(res.label)) {
4740 		status = PTR_ERR(res.label);
4741 		goto out;
4742 	}
4743 
4744 	nfs4_inode_make_writeable(inode);
4745 	nfs4_bitmap_copy_adjust_setattr(bitmask, nfs4_bitmask(server, res.label), inode);
4746 
4747 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4748 	if (!status) {
4749 		nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start,
4750 				       NFS_INO_INVALID_DATA);
4751 		status = nfs_post_op_update_inode(inode, res.fattr);
4752 		if (!status)
4753 			nfs_setsecurity(inode, res.fattr, res.label);
4754 	}
4755 
4756 
4757 	nfs4_label_free(res.label);
4758 
4759 out:
4760 	nfs_free_fattr(res.fattr);
4761 	return status;
4762 }
4763 
4764 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4765 {
4766 	struct nfs4_exception exception = {
4767 		.interruptible = true,
4768 	};
4769 	int err;
4770 	do {
4771 		err = nfs4_handle_exception(NFS_SERVER(inode),
4772 				_nfs4_proc_link(inode, dir, name),
4773 				&exception);
4774 	} while (exception.retry);
4775 	return err;
4776 }
4777 
4778 struct nfs4_createdata {
4779 	struct rpc_message msg;
4780 	struct nfs4_create_arg arg;
4781 	struct nfs4_create_res res;
4782 	struct nfs_fh fh;
4783 	struct nfs_fattr fattr;
4784 	struct nfs4_label *label;
4785 };
4786 
4787 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
4788 		const struct qstr *name, struct iattr *sattr, u32 ftype)
4789 {
4790 	struct nfs4_createdata *data;
4791 
4792 	data = kzalloc(sizeof(*data), GFP_KERNEL);
4793 	if (data != NULL) {
4794 		struct nfs_server *server = NFS_SERVER(dir);
4795 
4796 		data->label = nfs4_label_alloc(server, GFP_KERNEL);
4797 		if (IS_ERR(data->label))
4798 			goto out_free;
4799 
4800 		data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
4801 		data->msg.rpc_argp = &data->arg;
4802 		data->msg.rpc_resp = &data->res;
4803 		data->arg.dir_fh = NFS_FH(dir);
4804 		data->arg.server = server;
4805 		data->arg.name = name;
4806 		data->arg.attrs = sattr;
4807 		data->arg.ftype = ftype;
4808 		data->arg.bitmask = nfs4_bitmask(server, data->label);
4809 		data->arg.umask = current_umask();
4810 		data->res.server = server;
4811 		data->res.fh = &data->fh;
4812 		data->res.fattr = &data->fattr;
4813 		data->res.label = data->label;
4814 		nfs_fattr_init(data->res.fattr);
4815 	}
4816 	return data;
4817 out_free:
4818 	kfree(data);
4819 	return NULL;
4820 }
4821 
4822 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
4823 {
4824 	int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
4825 				    &data->arg.seq_args, &data->res.seq_res, 1);
4826 	if (status == 0) {
4827 		spin_lock(&dir->i_lock);
4828 		nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
4829 				data->res.fattr->time_start,
4830 				NFS_INO_INVALID_DATA);
4831 		/* Creating a directory bumps nlink in the parent */
4832 		if (data->arg.ftype == NF4DIR)
4833 			nfs4_inc_nlink_locked(dir);
4834 		spin_unlock(&dir->i_lock);
4835 		status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
4836 	}
4837 	return status;
4838 }
4839 
4840 static void nfs4_free_createdata(struct nfs4_createdata *data)
4841 {
4842 	nfs4_label_free(data->label);
4843 	kfree(data);
4844 }
4845 
4846 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
4847 		struct page *page, unsigned int len, struct iattr *sattr,
4848 		struct nfs4_label *label)
4849 {
4850 	struct nfs4_createdata *data;
4851 	int status = -ENAMETOOLONG;
4852 
4853 	if (len > NFS4_MAXPATHLEN)
4854 		goto out;
4855 
4856 	status = -ENOMEM;
4857 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
4858 	if (data == NULL)
4859 		goto out;
4860 
4861 	data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
4862 	data->arg.u.symlink.pages = &page;
4863 	data->arg.u.symlink.len = len;
4864 	data->arg.label = label;
4865 
4866 	status = nfs4_do_create(dir, dentry, data);
4867 
4868 	nfs4_free_createdata(data);
4869 out:
4870 	return status;
4871 }
4872 
4873 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
4874 		struct page *page, unsigned int len, struct iattr *sattr)
4875 {
4876 	struct nfs4_exception exception = {
4877 		.interruptible = true,
4878 	};
4879 	struct nfs4_label l, *label = NULL;
4880 	int err;
4881 
4882 	label = nfs4_label_init_security(dir, dentry, sattr, &l);
4883 
4884 	do {
4885 		err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
4886 		trace_nfs4_symlink(dir, &dentry->d_name, err);
4887 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
4888 				&exception);
4889 	} while (exception.retry);
4890 
4891 	nfs4_label_release_security(label);
4892 	return err;
4893 }
4894 
4895 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4896 		struct iattr *sattr, struct nfs4_label *label)
4897 {
4898 	struct nfs4_createdata *data;
4899 	int status = -ENOMEM;
4900 
4901 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
4902 	if (data == NULL)
4903 		goto out;
4904 
4905 	data->arg.label = label;
4906 	status = nfs4_do_create(dir, dentry, data);
4907 
4908 	nfs4_free_createdata(data);
4909 out:
4910 	return status;
4911 }
4912 
4913 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4914 		struct iattr *sattr)
4915 {
4916 	struct nfs_server *server = NFS_SERVER(dir);
4917 	struct nfs4_exception exception = {
4918 		.interruptible = true,
4919 	};
4920 	struct nfs4_label l, *label = NULL;
4921 	int err;
4922 
4923 	label = nfs4_label_init_security(dir, dentry, sattr, &l);
4924 
4925 	if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4926 		sattr->ia_mode &= ~current_umask();
4927 	do {
4928 		err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
4929 		trace_nfs4_mkdir(dir, &dentry->d_name, err);
4930 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
4931 				&exception);
4932 	} while (exception.retry);
4933 	nfs4_label_release_security(label);
4934 
4935 	return err;
4936 }
4937 
4938 static int _nfs4_proc_readdir(struct dentry *dentry, const struct cred *cred,
4939 		u64 cookie, struct page **pages, unsigned int count, bool plus)
4940 {
4941 	struct inode		*dir = d_inode(dentry);
4942 	struct nfs4_readdir_arg args = {
4943 		.fh = NFS_FH(dir),
4944 		.pages = pages,
4945 		.pgbase = 0,
4946 		.count = count,
4947 		.bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
4948 		.plus = plus,
4949 	};
4950 	struct nfs4_readdir_res res;
4951 	struct rpc_message msg = {
4952 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
4953 		.rpc_argp = &args,
4954 		.rpc_resp = &res,
4955 		.rpc_cred = cred,
4956 	};
4957 	int			status;
4958 
4959 	dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
4960 			dentry,
4961 			(unsigned long long)cookie);
4962 	nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
4963 	res.pgbase = args.pgbase;
4964 	status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
4965 	if (status >= 0) {
4966 		memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
4967 		status += args.pgbase;
4968 	}
4969 
4970 	nfs_invalidate_atime(dir);
4971 
4972 	dprintk("%s: returns %d\n", __func__, status);
4973 	return status;
4974 }
4975 
4976 static int nfs4_proc_readdir(struct dentry *dentry, const struct cred *cred,
4977 		u64 cookie, struct page **pages, unsigned int count, bool plus)
4978 {
4979 	struct nfs4_exception exception = {
4980 		.interruptible = true,
4981 	};
4982 	int err;
4983 	do {
4984 		err = _nfs4_proc_readdir(dentry, cred, cookie,
4985 				pages, count, plus);
4986 		trace_nfs4_readdir(d_inode(dentry), err);
4987 		err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
4988 				&exception);
4989 	} while (exception.retry);
4990 	return err;
4991 }
4992 
4993 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4994 		struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
4995 {
4996 	struct nfs4_createdata *data;
4997 	int mode = sattr->ia_mode;
4998 	int status = -ENOMEM;
4999 
5000 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
5001 	if (data == NULL)
5002 		goto out;
5003 
5004 	if (S_ISFIFO(mode))
5005 		data->arg.ftype = NF4FIFO;
5006 	else if (S_ISBLK(mode)) {
5007 		data->arg.ftype = NF4BLK;
5008 		data->arg.u.device.specdata1 = MAJOR(rdev);
5009 		data->arg.u.device.specdata2 = MINOR(rdev);
5010 	}
5011 	else if (S_ISCHR(mode)) {
5012 		data->arg.ftype = NF4CHR;
5013 		data->arg.u.device.specdata1 = MAJOR(rdev);
5014 		data->arg.u.device.specdata2 = MINOR(rdev);
5015 	} else if (!S_ISSOCK(mode)) {
5016 		status = -EINVAL;
5017 		goto out_free;
5018 	}
5019 
5020 	data->arg.label = label;
5021 	status = nfs4_do_create(dir, dentry, data);
5022 out_free:
5023 	nfs4_free_createdata(data);
5024 out:
5025 	return status;
5026 }
5027 
5028 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5029 		struct iattr *sattr, dev_t rdev)
5030 {
5031 	struct nfs_server *server = NFS_SERVER(dir);
5032 	struct nfs4_exception exception = {
5033 		.interruptible = true,
5034 	};
5035 	struct nfs4_label l, *label = NULL;
5036 	int err;
5037 
5038 	label = nfs4_label_init_security(dir, dentry, sattr, &l);
5039 
5040 	if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5041 		sattr->ia_mode &= ~current_umask();
5042 	do {
5043 		err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
5044 		trace_nfs4_mknod(dir, &dentry->d_name, err);
5045 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
5046 				&exception);
5047 	} while (exception.retry);
5048 
5049 	nfs4_label_release_security(label);
5050 
5051 	return err;
5052 }
5053 
5054 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
5055 		 struct nfs_fsstat *fsstat)
5056 {
5057 	struct nfs4_statfs_arg args = {
5058 		.fh = fhandle,
5059 		.bitmask = server->attr_bitmask,
5060 	};
5061 	struct nfs4_statfs_res res = {
5062 		.fsstat = fsstat,
5063 	};
5064 	struct rpc_message msg = {
5065 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
5066 		.rpc_argp = &args,
5067 		.rpc_resp = &res,
5068 	};
5069 
5070 	nfs_fattr_init(fsstat->fattr);
5071 	return  nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5072 }
5073 
5074 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
5075 {
5076 	struct nfs4_exception exception = {
5077 		.interruptible = true,
5078 	};
5079 	int err;
5080 	do {
5081 		err = nfs4_handle_exception(server,
5082 				_nfs4_proc_statfs(server, fhandle, fsstat),
5083 				&exception);
5084 	} while (exception.retry);
5085 	return err;
5086 }
5087 
5088 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
5089 		struct nfs_fsinfo *fsinfo)
5090 {
5091 	struct nfs4_fsinfo_arg args = {
5092 		.fh = fhandle,
5093 		.bitmask = server->attr_bitmask,
5094 	};
5095 	struct nfs4_fsinfo_res res = {
5096 		.fsinfo = fsinfo,
5097 	};
5098 	struct rpc_message msg = {
5099 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
5100 		.rpc_argp = &args,
5101 		.rpc_resp = &res,
5102 	};
5103 
5104 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5105 }
5106 
5107 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5108 {
5109 	struct nfs4_exception exception = {
5110 		.interruptible = true,
5111 	};
5112 	int err;
5113 
5114 	do {
5115 		err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
5116 		trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
5117 		if (err == 0) {
5118 			nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
5119 			break;
5120 		}
5121 		err = nfs4_handle_exception(server, err, &exception);
5122 	} while (exception.retry);
5123 	return err;
5124 }
5125 
5126 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5127 {
5128 	int error;
5129 
5130 	nfs_fattr_init(fsinfo->fattr);
5131 	error = nfs4_do_fsinfo(server, fhandle, fsinfo);
5132 	if (error == 0) {
5133 		/* block layout checks this! */
5134 		server->pnfs_blksize = fsinfo->blksize;
5135 		set_pnfs_layoutdriver(server, fhandle, fsinfo);
5136 	}
5137 
5138 	return error;
5139 }
5140 
5141 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5142 		struct nfs_pathconf *pathconf)
5143 {
5144 	struct nfs4_pathconf_arg args = {
5145 		.fh = fhandle,
5146 		.bitmask = server->attr_bitmask,
5147 	};
5148 	struct nfs4_pathconf_res res = {
5149 		.pathconf = pathconf,
5150 	};
5151 	struct rpc_message msg = {
5152 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
5153 		.rpc_argp = &args,
5154 		.rpc_resp = &res,
5155 	};
5156 
5157 	/* None of the pathconf attributes are mandatory to implement */
5158 	if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
5159 		memset(pathconf, 0, sizeof(*pathconf));
5160 		return 0;
5161 	}
5162 
5163 	nfs_fattr_init(pathconf->fattr);
5164 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5165 }
5166 
5167 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5168 		struct nfs_pathconf *pathconf)
5169 {
5170 	struct nfs4_exception exception = {
5171 		.interruptible = true,
5172 	};
5173 	int err;
5174 
5175 	do {
5176 		err = nfs4_handle_exception(server,
5177 				_nfs4_proc_pathconf(server, fhandle, pathconf),
5178 				&exception);
5179 	} while (exception.retry);
5180 	return err;
5181 }
5182 
5183 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
5184 		const struct nfs_open_context *ctx,
5185 		const struct nfs_lock_context *l_ctx,
5186 		fmode_t fmode)
5187 {
5188 	return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
5189 }
5190 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
5191 
5192 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
5193 		const struct nfs_open_context *ctx,
5194 		const struct nfs_lock_context *l_ctx,
5195 		fmode_t fmode)
5196 {
5197 	nfs4_stateid _current_stateid;
5198 
5199 	/* If the current stateid represents a lost lock, then exit */
5200 	if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO)
5201 		return true;
5202 	return nfs4_stateid_match(stateid, &_current_stateid);
5203 }
5204 
5205 static bool nfs4_error_stateid_expired(int err)
5206 {
5207 	switch (err) {
5208 	case -NFS4ERR_DELEG_REVOKED:
5209 	case -NFS4ERR_ADMIN_REVOKED:
5210 	case -NFS4ERR_BAD_STATEID:
5211 	case -NFS4ERR_STALE_STATEID:
5212 	case -NFS4ERR_OLD_STATEID:
5213 	case -NFS4ERR_OPENMODE:
5214 	case -NFS4ERR_EXPIRED:
5215 		return true;
5216 	}
5217 	return false;
5218 }
5219 
5220 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
5221 {
5222 	struct nfs_server *server = NFS_SERVER(hdr->inode);
5223 
5224 	trace_nfs4_read(hdr, task->tk_status);
5225 	if (task->tk_status < 0) {
5226 		struct nfs4_exception exception = {
5227 			.inode = hdr->inode,
5228 			.state = hdr->args.context->state,
5229 			.stateid = &hdr->args.stateid,
5230 		};
5231 		task->tk_status = nfs4_async_handle_exception(task,
5232 				server, task->tk_status, &exception);
5233 		if (exception.retry) {
5234 			rpc_restart_call_prepare(task);
5235 			return -EAGAIN;
5236 		}
5237 	}
5238 
5239 	if (task->tk_status > 0)
5240 		renew_lease(server, hdr->timestamp);
5241 	return 0;
5242 }
5243 
5244 static bool nfs4_read_stateid_changed(struct rpc_task *task,
5245 		struct nfs_pgio_args *args)
5246 {
5247 
5248 	if (!nfs4_error_stateid_expired(task->tk_status) ||
5249 		nfs4_stateid_is_current(&args->stateid,
5250 				args->context,
5251 				args->lock_context,
5252 				FMODE_READ))
5253 		return false;
5254 	rpc_restart_call_prepare(task);
5255 	return true;
5256 }
5257 
5258 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5259 {
5260 
5261 	dprintk("--> %s\n", __func__);
5262 
5263 	if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5264 		return -EAGAIN;
5265 	if (nfs4_read_stateid_changed(task, &hdr->args))
5266 		return -EAGAIN;
5267 	if (task->tk_status > 0)
5268 		nfs_invalidate_atime(hdr->inode);
5269 	return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5270 				    nfs4_read_done_cb(task, hdr);
5271 }
5272 
5273 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
5274 				 struct rpc_message *msg)
5275 {
5276 	hdr->timestamp   = jiffies;
5277 	if (!hdr->pgio_done_cb)
5278 		hdr->pgio_done_cb = nfs4_read_done_cb;
5279 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5280 	nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5281 }
5282 
5283 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
5284 				      struct nfs_pgio_header *hdr)
5285 {
5286 	if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client,
5287 			&hdr->args.seq_args,
5288 			&hdr->res.seq_res,
5289 			task))
5290 		return 0;
5291 	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
5292 				hdr->args.lock_context,
5293 				hdr->rw_mode) == -EIO)
5294 		return -EIO;
5295 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
5296 		return -EIO;
5297 	return 0;
5298 }
5299 
5300 static int nfs4_write_done_cb(struct rpc_task *task,
5301 			      struct nfs_pgio_header *hdr)
5302 {
5303 	struct inode *inode = hdr->inode;
5304 
5305 	trace_nfs4_write(hdr, task->tk_status);
5306 	if (task->tk_status < 0) {
5307 		struct nfs4_exception exception = {
5308 			.inode = hdr->inode,
5309 			.state = hdr->args.context->state,
5310 			.stateid = &hdr->args.stateid,
5311 		};
5312 		task->tk_status = nfs4_async_handle_exception(task,
5313 				NFS_SERVER(inode), task->tk_status,
5314 				&exception);
5315 		if (exception.retry) {
5316 			rpc_restart_call_prepare(task);
5317 			return -EAGAIN;
5318 		}
5319 	}
5320 	if (task->tk_status >= 0) {
5321 		renew_lease(NFS_SERVER(inode), hdr->timestamp);
5322 		nfs_writeback_update_inode(hdr);
5323 	}
5324 	return 0;
5325 }
5326 
5327 static bool nfs4_write_stateid_changed(struct rpc_task *task,
5328 		struct nfs_pgio_args *args)
5329 {
5330 
5331 	if (!nfs4_error_stateid_expired(task->tk_status) ||
5332 		nfs4_stateid_is_current(&args->stateid,
5333 				args->context,
5334 				args->lock_context,
5335 				FMODE_WRITE))
5336 		return false;
5337 	rpc_restart_call_prepare(task);
5338 	return true;
5339 }
5340 
5341 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5342 {
5343 	if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5344 		return -EAGAIN;
5345 	if (nfs4_write_stateid_changed(task, &hdr->args))
5346 		return -EAGAIN;
5347 	return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5348 		nfs4_write_done_cb(task, hdr);
5349 }
5350 
5351 static
5352 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
5353 {
5354 	/* Don't request attributes for pNFS or O_DIRECT writes */
5355 	if (hdr->ds_clp != NULL || hdr->dreq != NULL)
5356 		return false;
5357 	/* Otherwise, request attributes if and only if we don't hold
5358 	 * a delegation
5359 	 */
5360 	return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
5361 }
5362 
5363 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
5364 				  struct rpc_message *msg,
5365 				  struct rpc_clnt **clnt)
5366 {
5367 	struct nfs_server *server = NFS_SERVER(hdr->inode);
5368 
5369 	if (!nfs4_write_need_cache_consistency_data(hdr)) {
5370 		hdr->args.bitmask = NULL;
5371 		hdr->res.fattr = NULL;
5372 	} else
5373 		hdr->args.bitmask = server->cache_consistency_bitmask;
5374 
5375 	if (!hdr->pgio_done_cb)
5376 		hdr->pgio_done_cb = nfs4_write_done_cb;
5377 	hdr->res.server = server;
5378 	hdr->timestamp   = jiffies;
5379 
5380 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
5381 	nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5382 	nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
5383 }
5384 
5385 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
5386 {
5387 	nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
5388 			&data->args.seq_args,
5389 			&data->res.seq_res,
5390 			task);
5391 }
5392 
5393 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
5394 {
5395 	struct inode *inode = data->inode;
5396 
5397 	trace_nfs4_commit(data, task->tk_status);
5398 	if (nfs4_async_handle_error(task, NFS_SERVER(inode),
5399 				    NULL, NULL) == -EAGAIN) {
5400 		rpc_restart_call_prepare(task);
5401 		return -EAGAIN;
5402 	}
5403 	return 0;
5404 }
5405 
5406 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
5407 {
5408 	if (!nfs4_sequence_done(task, &data->res.seq_res))
5409 		return -EAGAIN;
5410 	return data->commit_done_cb(task, data);
5411 }
5412 
5413 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg,
5414 				   struct rpc_clnt **clnt)
5415 {
5416 	struct nfs_server *server = NFS_SERVER(data->inode);
5417 
5418 	if (data->commit_done_cb == NULL)
5419 		data->commit_done_cb = nfs4_commit_done_cb;
5420 	data->res.server = server;
5421 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
5422 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
5423 	nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
5424 }
5425 
5426 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
5427 				struct nfs_commitres *res)
5428 {
5429 	struct inode *dst_inode = file_inode(dst);
5430 	struct nfs_server *server = NFS_SERVER(dst_inode);
5431 	struct rpc_message msg = {
5432 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
5433 		.rpc_argp = args,
5434 		.rpc_resp = res,
5435 	};
5436 
5437 	args->fh = NFS_FH(dst_inode);
5438 	return nfs4_call_sync(server->client, server, &msg,
5439 			&args->seq_args, &res->seq_res, 1);
5440 }
5441 
5442 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res)
5443 {
5444 	struct nfs_commitargs args = {
5445 		.offset = offset,
5446 		.count = count,
5447 	};
5448 	struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
5449 	struct nfs4_exception exception = { };
5450 	int status;
5451 
5452 	do {
5453 		status = _nfs4_proc_commit(dst, &args, res);
5454 		status = nfs4_handle_exception(dst_server, status, &exception);
5455 	} while (exception.retry);
5456 
5457 	return status;
5458 }
5459 
5460 struct nfs4_renewdata {
5461 	struct nfs_client	*client;
5462 	unsigned long		timestamp;
5463 };
5464 
5465 /*
5466  * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
5467  * standalone procedure for queueing an asynchronous RENEW.
5468  */
5469 static void nfs4_renew_release(void *calldata)
5470 {
5471 	struct nfs4_renewdata *data = calldata;
5472 	struct nfs_client *clp = data->client;
5473 
5474 	if (refcount_read(&clp->cl_count) > 1)
5475 		nfs4_schedule_state_renewal(clp);
5476 	nfs_put_client(clp);
5477 	kfree(data);
5478 }
5479 
5480 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
5481 {
5482 	struct nfs4_renewdata *data = calldata;
5483 	struct nfs_client *clp = data->client;
5484 	unsigned long timestamp = data->timestamp;
5485 
5486 	trace_nfs4_renew_async(clp, task->tk_status);
5487 	switch (task->tk_status) {
5488 	case 0:
5489 		break;
5490 	case -NFS4ERR_LEASE_MOVED:
5491 		nfs4_schedule_lease_moved_recovery(clp);
5492 		break;
5493 	default:
5494 		/* Unless we're shutting down, schedule state recovery! */
5495 		if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
5496 			return;
5497 		if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
5498 			nfs4_schedule_lease_recovery(clp);
5499 			return;
5500 		}
5501 		nfs4_schedule_path_down_recovery(clp);
5502 	}
5503 	do_renew_lease(clp, timestamp);
5504 }
5505 
5506 static const struct rpc_call_ops nfs4_renew_ops = {
5507 	.rpc_call_done = nfs4_renew_done,
5508 	.rpc_release = nfs4_renew_release,
5509 };
5510 
5511 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
5512 {
5513 	struct rpc_message msg = {
5514 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5515 		.rpc_argp	= clp,
5516 		.rpc_cred	= cred,
5517 	};
5518 	struct nfs4_renewdata *data;
5519 
5520 	if (renew_flags == 0)
5521 		return 0;
5522 	if (!refcount_inc_not_zero(&clp->cl_count))
5523 		return -EIO;
5524 	data = kmalloc(sizeof(*data), GFP_NOFS);
5525 	if (data == NULL) {
5526 		nfs_put_client(clp);
5527 		return -ENOMEM;
5528 	}
5529 	data->client = clp;
5530 	data->timestamp = jiffies;
5531 	return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
5532 			&nfs4_renew_ops, data);
5533 }
5534 
5535 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
5536 {
5537 	struct rpc_message msg = {
5538 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5539 		.rpc_argp	= clp,
5540 		.rpc_cred	= cred,
5541 	};
5542 	unsigned long now = jiffies;
5543 	int status;
5544 
5545 	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5546 	if (status < 0)
5547 		return status;
5548 	do_renew_lease(clp, now);
5549 	return 0;
5550 }
5551 
5552 static inline int nfs4_server_supports_acls(struct nfs_server *server)
5553 {
5554 	return server->caps & NFS_CAP_ACLS;
5555 }
5556 
5557 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
5558  * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
5559  * the stack.
5560  */
5561 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
5562 
5563 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen,
5564 		struct page **pages)
5565 {
5566 	struct page *newpage, **spages;
5567 	int rc = 0;
5568 	size_t len;
5569 	spages = pages;
5570 
5571 	do {
5572 		len = min_t(size_t, PAGE_SIZE, buflen);
5573 		newpage = alloc_page(GFP_KERNEL);
5574 
5575 		if (newpage == NULL)
5576 			goto unwind;
5577 		memcpy(page_address(newpage), buf, len);
5578 		buf += len;
5579 		buflen -= len;
5580 		*pages++ = newpage;
5581 		rc++;
5582 	} while (buflen != 0);
5583 
5584 	return rc;
5585 
5586 unwind:
5587 	for(; rc > 0; rc--)
5588 		__free_page(spages[rc-1]);
5589 	return -ENOMEM;
5590 }
5591 
5592 struct nfs4_cached_acl {
5593 	int cached;
5594 	size_t len;
5595 	char data[];
5596 };
5597 
5598 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
5599 {
5600 	struct nfs_inode *nfsi = NFS_I(inode);
5601 
5602 	spin_lock(&inode->i_lock);
5603 	kfree(nfsi->nfs4_acl);
5604 	nfsi->nfs4_acl = acl;
5605 	spin_unlock(&inode->i_lock);
5606 }
5607 
5608 static void nfs4_zap_acl_attr(struct inode *inode)
5609 {
5610 	nfs4_set_cached_acl(inode, NULL);
5611 }
5612 
5613 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
5614 {
5615 	struct nfs_inode *nfsi = NFS_I(inode);
5616 	struct nfs4_cached_acl *acl;
5617 	int ret = -ENOENT;
5618 
5619 	spin_lock(&inode->i_lock);
5620 	acl = nfsi->nfs4_acl;
5621 	if (acl == NULL)
5622 		goto out;
5623 	if (buf == NULL) /* user is just asking for length */
5624 		goto out_len;
5625 	if (acl->cached == 0)
5626 		goto out;
5627 	ret = -ERANGE; /* see getxattr(2) man page */
5628 	if (acl->len > buflen)
5629 		goto out;
5630 	memcpy(buf, acl->data, acl->len);
5631 out_len:
5632 	ret = acl->len;
5633 out:
5634 	spin_unlock(&inode->i_lock);
5635 	return ret;
5636 }
5637 
5638 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
5639 {
5640 	struct nfs4_cached_acl *acl;
5641 	size_t buflen = sizeof(*acl) + acl_len;
5642 
5643 	if (buflen <= PAGE_SIZE) {
5644 		acl = kmalloc(buflen, GFP_KERNEL);
5645 		if (acl == NULL)
5646 			goto out;
5647 		acl->cached = 1;
5648 		_copy_from_pages(acl->data, pages, pgbase, acl_len);
5649 	} else {
5650 		acl = kmalloc(sizeof(*acl), GFP_KERNEL);
5651 		if (acl == NULL)
5652 			goto out;
5653 		acl->cached = 0;
5654 	}
5655 	acl->len = acl_len;
5656 out:
5657 	nfs4_set_cached_acl(inode, acl);
5658 }
5659 
5660 /*
5661  * The getxattr API returns the required buffer length when called with a
5662  * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
5663  * the required buf.  On a NULL buf, we send a page of data to the server
5664  * guessing that the ACL request can be serviced by a page. If so, we cache
5665  * up to the page of ACL data, and the 2nd call to getxattr is serviced by
5666  * the cache. If not so, we throw away the page, and cache the required
5667  * length. The next getxattr call will then produce another round trip to
5668  * the server, this time with the input buf of the required size.
5669  */
5670 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5671 {
5672 	struct page **pages;
5673 	struct nfs_getaclargs args = {
5674 		.fh = NFS_FH(inode),
5675 		.acl_len = buflen,
5676 	};
5677 	struct nfs_getaclres res = {
5678 		.acl_len = buflen,
5679 	};
5680 	struct rpc_message msg = {
5681 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
5682 		.rpc_argp = &args,
5683 		.rpc_resp = &res,
5684 	};
5685 	unsigned int npages;
5686 	int ret = -ENOMEM, i;
5687 	struct nfs_server *server = NFS_SERVER(inode);
5688 
5689 	if (buflen == 0)
5690 		buflen = server->rsize;
5691 
5692 	npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
5693 	pages = kmalloc_array(npages, sizeof(struct page *), GFP_NOFS);
5694 	if (!pages)
5695 		return -ENOMEM;
5696 
5697 	args.acl_pages = pages;
5698 
5699 	for (i = 0; i < npages; i++) {
5700 		pages[i] = alloc_page(GFP_KERNEL);
5701 		if (!pages[i])
5702 			goto out_free;
5703 	}
5704 
5705 	/* for decoding across pages */
5706 	res.acl_scratch = alloc_page(GFP_KERNEL);
5707 	if (!res.acl_scratch)
5708 		goto out_free;
5709 
5710 	args.acl_len = npages * PAGE_SIZE;
5711 
5712 	dprintk("%s  buf %p buflen %zu npages %d args.acl_len %zu\n",
5713 		__func__, buf, buflen, npages, args.acl_len);
5714 	ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
5715 			     &msg, &args.seq_args, &res.seq_res, 0);
5716 	if (ret)
5717 		goto out_free;
5718 
5719 	/* Handle the case where the passed-in buffer is too short */
5720 	if (res.acl_flags & NFS4_ACL_TRUNC) {
5721 		/* Did the user only issue a request for the acl length? */
5722 		if (buf == NULL)
5723 			goto out_ok;
5724 		ret = -ERANGE;
5725 		goto out_free;
5726 	}
5727 	nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
5728 	if (buf) {
5729 		if (res.acl_len > buflen) {
5730 			ret = -ERANGE;
5731 			goto out_free;
5732 		}
5733 		_copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
5734 	}
5735 out_ok:
5736 	ret = res.acl_len;
5737 out_free:
5738 	for (i = 0; i < npages; i++)
5739 		if (pages[i])
5740 			__free_page(pages[i]);
5741 	if (res.acl_scratch)
5742 		__free_page(res.acl_scratch);
5743 	kfree(pages);
5744 	return ret;
5745 }
5746 
5747 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5748 {
5749 	struct nfs4_exception exception = {
5750 		.interruptible = true,
5751 	};
5752 	ssize_t ret;
5753 	do {
5754 		ret = __nfs4_get_acl_uncached(inode, buf, buflen);
5755 		trace_nfs4_get_acl(inode, ret);
5756 		if (ret >= 0)
5757 			break;
5758 		ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
5759 	} while (exception.retry);
5760 	return ret;
5761 }
5762 
5763 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
5764 {
5765 	struct nfs_server *server = NFS_SERVER(inode);
5766 	int ret;
5767 
5768 	if (!nfs4_server_supports_acls(server))
5769 		return -EOPNOTSUPP;
5770 	ret = nfs_revalidate_inode(server, inode);
5771 	if (ret < 0)
5772 		return ret;
5773 	if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
5774 		nfs_zap_acl_cache(inode);
5775 	ret = nfs4_read_cached_acl(inode, buf, buflen);
5776 	if (ret != -ENOENT)
5777 		/* -ENOENT is returned if there is no ACL or if there is an ACL
5778 		 * but no cached acl data, just the acl length */
5779 		return ret;
5780 	return nfs4_get_acl_uncached(inode, buf, buflen);
5781 }
5782 
5783 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
5784 {
5785 	struct nfs_server *server = NFS_SERVER(inode);
5786 	struct page *pages[NFS4ACL_MAXPAGES];
5787 	struct nfs_setaclargs arg = {
5788 		.fh		= NFS_FH(inode),
5789 		.acl_pages	= pages,
5790 		.acl_len	= buflen,
5791 	};
5792 	struct nfs_setaclres res;
5793 	struct rpc_message msg = {
5794 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETACL],
5795 		.rpc_argp	= &arg,
5796 		.rpc_resp	= &res,
5797 	};
5798 	unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
5799 	int ret, i;
5800 
5801 	if (!nfs4_server_supports_acls(server))
5802 		return -EOPNOTSUPP;
5803 	if (npages > ARRAY_SIZE(pages))
5804 		return -ERANGE;
5805 	i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages);
5806 	if (i < 0)
5807 		return i;
5808 	nfs4_inode_make_writeable(inode);
5809 	ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5810 
5811 	/*
5812 	 * Free each page after tx, so the only ref left is
5813 	 * held by the network stack
5814 	 */
5815 	for (; i > 0; i--)
5816 		put_page(pages[i-1]);
5817 
5818 	/*
5819 	 * Acl update can result in inode attribute update.
5820 	 * so mark the attribute cache invalid.
5821 	 */
5822 	spin_lock(&inode->i_lock);
5823 	NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
5824 		| NFS_INO_INVALID_CTIME
5825 		| NFS_INO_REVAL_FORCED;
5826 	spin_unlock(&inode->i_lock);
5827 	nfs_access_zap_cache(inode);
5828 	nfs_zap_acl_cache(inode);
5829 	return ret;
5830 }
5831 
5832 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
5833 {
5834 	struct nfs4_exception exception = { };
5835 	int err;
5836 	do {
5837 		err = __nfs4_proc_set_acl(inode, buf, buflen);
5838 		trace_nfs4_set_acl(inode, err);
5839 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
5840 				&exception);
5841 	} while (exception.retry);
5842 	return err;
5843 }
5844 
5845 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
5846 static int _nfs4_get_security_label(struct inode *inode, void *buf,
5847 					size_t buflen)
5848 {
5849 	struct nfs_server *server = NFS_SERVER(inode);
5850 	struct nfs_fattr fattr;
5851 	struct nfs4_label label = {0, 0, buflen, buf};
5852 
5853 	u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
5854 	struct nfs4_getattr_arg arg = {
5855 		.fh		= NFS_FH(inode),
5856 		.bitmask	= bitmask,
5857 	};
5858 	struct nfs4_getattr_res res = {
5859 		.fattr		= &fattr,
5860 		.label		= &label,
5861 		.server		= server,
5862 	};
5863 	struct rpc_message msg = {
5864 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
5865 		.rpc_argp	= &arg,
5866 		.rpc_resp	= &res,
5867 	};
5868 	int ret;
5869 
5870 	nfs_fattr_init(&fattr);
5871 
5872 	ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
5873 	if (ret)
5874 		return ret;
5875 	if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
5876 		return -ENOENT;
5877 	return 0;
5878 }
5879 
5880 static int nfs4_get_security_label(struct inode *inode, void *buf,
5881 					size_t buflen)
5882 {
5883 	struct nfs4_exception exception = {
5884 		.interruptible = true,
5885 	};
5886 	int err;
5887 
5888 	if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5889 		return -EOPNOTSUPP;
5890 
5891 	do {
5892 		err = _nfs4_get_security_label(inode, buf, buflen);
5893 		trace_nfs4_get_security_label(inode, err);
5894 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
5895 				&exception);
5896 	} while (exception.retry);
5897 	return err;
5898 }
5899 
5900 static int _nfs4_do_set_security_label(struct inode *inode,
5901 		struct nfs4_label *ilabel,
5902 		struct nfs_fattr *fattr,
5903 		struct nfs4_label *olabel)
5904 {
5905 
5906 	struct iattr sattr = {0};
5907 	struct nfs_server *server = NFS_SERVER(inode);
5908 	const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
5909 	struct nfs_setattrargs arg = {
5910 		.fh		= NFS_FH(inode),
5911 		.iap		= &sattr,
5912 		.server		= server,
5913 		.bitmask	= bitmask,
5914 		.label		= ilabel,
5915 	};
5916 	struct nfs_setattrres res = {
5917 		.fattr		= fattr,
5918 		.label		= olabel,
5919 		.server		= server,
5920 	};
5921 	struct rpc_message msg = {
5922 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
5923 		.rpc_argp	= &arg,
5924 		.rpc_resp	= &res,
5925 	};
5926 	int status;
5927 
5928 	nfs4_stateid_copy(&arg.stateid, &zero_stateid);
5929 
5930 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5931 	if (status)
5932 		dprintk("%s failed: %d\n", __func__, status);
5933 
5934 	return status;
5935 }
5936 
5937 static int nfs4_do_set_security_label(struct inode *inode,
5938 		struct nfs4_label *ilabel,
5939 		struct nfs_fattr *fattr,
5940 		struct nfs4_label *olabel)
5941 {
5942 	struct nfs4_exception exception = { };
5943 	int err;
5944 
5945 	do {
5946 		err = _nfs4_do_set_security_label(inode, ilabel,
5947 				fattr, olabel);
5948 		trace_nfs4_set_security_label(inode, err);
5949 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
5950 				&exception);
5951 	} while (exception.retry);
5952 	return err;
5953 }
5954 
5955 static int
5956 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
5957 {
5958 	struct nfs4_label ilabel, *olabel = NULL;
5959 	struct nfs_fattr fattr;
5960 	int status;
5961 
5962 	if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5963 		return -EOPNOTSUPP;
5964 
5965 	nfs_fattr_init(&fattr);
5966 
5967 	ilabel.pi = 0;
5968 	ilabel.lfs = 0;
5969 	ilabel.label = (char *)buf;
5970 	ilabel.len = buflen;
5971 
5972 	olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
5973 	if (IS_ERR(olabel)) {
5974 		status = -PTR_ERR(olabel);
5975 		goto out;
5976 	}
5977 
5978 	status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
5979 	if (status == 0)
5980 		nfs_setsecurity(inode, &fattr, olabel);
5981 
5982 	nfs4_label_free(olabel);
5983 out:
5984 	return status;
5985 }
5986 #endif	/* CONFIG_NFS_V4_SECURITY_LABEL */
5987 
5988 
5989 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
5990 				    nfs4_verifier *bootverf)
5991 {
5992 	__be32 verf[2];
5993 
5994 	if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
5995 		/* An impossible timestamp guarantees this value
5996 		 * will never match a generated boot time. */
5997 		verf[0] = cpu_to_be32(U32_MAX);
5998 		verf[1] = cpu_to_be32(U32_MAX);
5999 	} else {
6000 		struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6001 		u64 ns = ktime_to_ns(nn->boot_time);
6002 
6003 		verf[0] = cpu_to_be32(ns >> 32);
6004 		verf[1] = cpu_to_be32(ns);
6005 	}
6006 	memcpy(bootverf->data, verf, sizeof(bootverf->data));
6007 }
6008 
6009 static int
6010 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
6011 {
6012 	size_t len;
6013 	char *str;
6014 
6015 	if (clp->cl_owner_id != NULL)
6016 		return 0;
6017 
6018 	rcu_read_lock();
6019 	len = 14 +
6020 		strlen(clp->cl_rpcclient->cl_nodename) +
6021 		1 +
6022 		strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
6023 		1;
6024 	rcu_read_unlock();
6025 	if (nfs4_client_id_uniquifier[0] != '\0')
6026 		len += strlen(nfs4_client_id_uniquifier) + 1;
6027 	if (len > NFS4_OPAQUE_LIMIT + 1)
6028 		return -EINVAL;
6029 
6030 	/*
6031 	 * Since this string is allocated at mount time, and held until the
6032 	 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6033 	 * about a memory-reclaim deadlock.
6034 	 */
6035 	str = kmalloc(len, GFP_KERNEL);
6036 	if (!str)
6037 		return -ENOMEM;
6038 
6039 	rcu_read_lock();
6040 	if (nfs4_client_id_uniquifier[0] != '\0')
6041 		scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s",
6042 			  clp->cl_rpcclient->cl_nodename,
6043 			  nfs4_client_id_uniquifier,
6044 			  rpc_peeraddr2str(clp->cl_rpcclient,
6045 					   RPC_DISPLAY_ADDR));
6046 	else
6047 		scnprintf(str, len, "Linux NFSv4.0 %s/%s",
6048 			  clp->cl_rpcclient->cl_nodename,
6049 			  rpc_peeraddr2str(clp->cl_rpcclient,
6050 					   RPC_DISPLAY_ADDR));
6051 	rcu_read_unlock();
6052 
6053 	clp->cl_owner_id = str;
6054 	return 0;
6055 }
6056 
6057 static int
6058 nfs4_init_uniquifier_client_string(struct nfs_client *clp)
6059 {
6060 	size_t len;
6061 	char *str;
6062 
6063 	len = 10 + 10 + 1 + 10 + 1 +
6064 		strlen(nfs4_client_id_uniquifier) + 1 +
6065 		strlen(clp->cl_rpcclient->cl_nodename) + 1;
6066 
6067 	if (len > NFS4_OPAQUE_LIMIT + 1)
6068 		return -EINVAL;
6069 
6070 	/*
6071 	 * Since this string is allocated at mount time, and held until the
6072 	 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6073 	 * about a memory-reclaim deadlock.
6074 	 */
6075 	str = kmalloc(len, GFP_KERNEL);
6076 	if (!str)
6077 		return -ENOMEM;
6078 
6079 	scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
6080 			clp->rpc_ops->version, clp->cl_minorversion,
6081 			nfs4_client_id_uniquifier,
6082 			clp->cl_rpcclient->cl_nodename);
6083 	clp->cl_owner_id = str;
6084 	return 0;
6085 }
6086 
6087 static int
6088 nfs4_init_uniform_client_string(struct nfs_client *clp)
6089 {
6090 	size_t len;
6091 	char *str;
6092 
6093 	if (clp->cl_owner_id != NULL)
6094 		return 0;
6095 
6096 	if (nfs4_client_id_uniquifier[0] != '\0')
6097 		return nfs4_init_uniquifier_client_string(clp);
6098 
6099 	len = 10 + 10 + 1 + 10 + 1 +
6100 		strlen(clp->cl_rpcclient->cl_nodename) + 1;
6101 
6102 	if (len > NFS4_OPAQUE_LIMIT + 1)
6103 		return -EINVAL;
6104 
6105 	/*
6106 	 * Since this string is allocated at mount time, and held until the
6107 	 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6108 	 * about a memory-reclaim deadlock.
6109 	 */
6110 	str = kmalloc(len, GFP_KERNEL);
6111 	if (!str)
6112 		return -ENOMEM;
6113 
6114 	scnprintf(str, len, "Linux NFSv%u.%u %s",
6115 			clp->rpc_ops->version, clp->cl_minorversion,
6116 			clp->cl_rpcclient->cl_nodename);
6117 	clp->cl_owner_id = str;
6118 	return 0;
6119 }
6120 
6121 /*
6122  * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
6123  * services.  Advertise one based on the address family of the
6124  * clientaddr.
6125  */
6126 static unsigned int
6127 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
6128 {
6129 	if (strchr(clp->cl_ipaddr, ':') != NULL)
6130 		return scnprintf(buf, len, "tcp6");
6131 	else
6132 		return scnprintf(buf, len, "tcp");
6133 }
6134 
6135 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
6136 {
6137 	struct nfs4_setclientid *sc = calldata;
6138 
6139 	if (task->tk_status == 0)
6140 		sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
6141 }
6142 
6143 static const struct rpc_call_ops nfs4_setclientid_ops = {
6144 	.rpc_call_done = nfs4_setclientid_done,
6145 };
6146 
6147 /**
6148  * nfs4_proc_setclientid - Negotiate client ID
6149  * @clp: state data structure
6150  * @program: RPC program for NFSv4 callback service
6151  * @port: IP port number for NFS4 callback service
6152  * @cred: credential to use for this call
6153  * @res: where to place the result
6154  *
6155  * Returns zero, a negative errno, or a negative NFS4ERR status code.
6156  */
6157 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
6158 		unsigned short port, const struct cred *cred,
6159 		struct nfs4_setclientid_res *res)
6160 {
6161 	nfs4_verifier sc_verifier;
6162 	struct nfs4_setclientid setclientid = {
6163 		.sc_verifier = &sc_verifier,
6164 		.sc_prog = program,
6165 		.sc_clnt = clp,
6166 	};
6167 	struct rpc_message msg = {
6168 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
6169 		.rpc_argp = &setclientid,
6170 		.rpc_resp = res,
6171 		.rpc_cred = cred,
6172 	};
6173 	struct rpc_task_setup task_setup_data = {
6174 		.rpc_client = clp->cl_rpcclient,
6175 		.rpc_message = &msg,
6176 		.callback_ops = &nfs4_setclientid_ops,
6177 		.callback_data = &setclientid,
6178 		.flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
6179 	};
6180 	unsigned long now = jiffies;
6181 	int status;
6182 
6183 	/* nfs_client_id4 */
6184 	nfs4_init_boot_verifier(clp, &sc_verifier);
6185 
6186 	if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
6187 		status = nfs4_init_uniform_client_string(clp);
6188 	else
6189 		status = nfs4_init_nonuniform_client_string(clp);
6190 
6191 	if (status)
6192 		goto out;
6193 
6194 	/* cb_client4 */
6195 	setclientid.sc_netid_len =
6196 				nfs4_init_callback_netid(clp,
6197 						setclientid.sc_netid,
6198 						sizeof(setclientid.sc_netid));
6199 	setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
6200 				sizeof(setclientid.sc_uaddr), "%s.%u.%u",
6201 				clp->cl_ipaddr, port >> 8, port & 255);
6202 
6203 	dprintk("NFS call  setclientid auth=%s, '%s'\n",
6204 		clp->cl_rpcclient->cl_auth->au_ops->au_name,
6205 		clp->cl_owner_id);
6206 
6207 	status = nfs4_call_sync_custom(&task_setup_data);
6208 	if (setclientid.sc_cred) {
6209 		kfree(clp->cl_acceptor);
6210 		clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
6211 		put_rpccred(setclientid.sc_cred);
6212 	}
6213 
6214 	if (status == 0)
6215 		do_renew_lease(clp, now);
6216 out:
6217 	trace_nfs4_setclientid(clp, status);
6218 	dprintk("NFS reply setclientid: %d\n", status);
6219 	return status;
6220 }
6221 
6222 /**
6223  * nfs4_proc_setclientid_confirm - Confirm client ID
6224  * @clp: state data structure
6225  * @arg: result of a previous SETCLIENTID
6226  * @cred: credential to use for this call
6227  *
6228  * Returns zero, a negative errno, or a negative NFS4ERR status code.
6229  */
6230 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
6231 		struct nfs4_setclientid_res *arg,
6232 		const struct cred *cred)
6233 {
6234 	struct rpc_message msg = {
6235 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
6236 		.rpc_argp = arg,
6237 		.rpc_cred = cred,
6238 	};
6239 	int status;
6240 
6241 	dprintk("NFS call  setclientid_confirm auth=%s, (client ID %llx)\n",
6242 		clp->cl_rpcclient->cl_auth->au_ops->au_name,
6243 		clp->cl_clientid);
6244 	status = rpc_call_sync(clp->cl_rpcclient, &msg,
6245 			       RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
6246 	trace_nfs4_setclientid_confirm(clp, status);
6247 	dprintk("NFS reply setclientid_confirm: %d\n", status);
6248 	return status;
6249 }
6250 
6251 struct nfs4_delegreturndata {
6252 	struct nfs4_delegreturnargs args;
6253 	struct nfs4_delegreturnres res;
6254 	struct nfs_fh fh;
6255 	nfs4_stateid stateid;
6256 	unsigned long timestamp;
6257 	struct {
6258 		struct nfs4_layoutreturn_args arg;
6259 		struct nfs4_layoutreturn_res res;
6260 		struct nfs4_xdr_opaque_data ld_private;
6261 		u32 roc_barrier;
6262 		bool roc;
6263 	} lr;
6264 	struct nfs_fattr fattr;
6265 	int rpc_status;
6266 	struct inode *inode;
6267 };
6268 
6269 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
6270 {
6271 	struct nfs4_delegreturndata *data = calldata;
6272 	struct nfs4_exception exception = {
6273 		.inode = data->inode,
6274 		.stateid = &data->stateid,
6275 	};
6276 
6277 	if (!nfs4_sequence_done(task, &data->res.seq_res))
6278 		return;
6279 
6280 	trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
6281 
6282 	/* Handle Layoutreturn errors */
6283 	if (pnfs_roc_done(task, data->inode,
6284 				&data->args.lr_args,
6285 				&data->res.lr_res,
6286 				&data->res.lr_ret) == -EAGAIN)
6287 		goto out_restart;
6288 
6289 	switch (task->tk_status) {
6290 	case 0:
6291 		renew_lease(data->res.server, data->timestamp);
6292 		break;
6293 	case -NFS4ERR_ADMIN_REVOKED:
6294 	case -NFS4ERR_DELEG_REVOKED:
6295 	case -NFS4ERR_EXPIRED:
6296 		nfs4_free_revoked_stateid(data->res.server,
6297 				data->args.stateid,
6298 				task->tk_msg.rpc_cred);
6299 		fallthrough;
6300 	case -NFS4ERR_BAD_STATEID:
6301 	case -NFS4ERR_STALE_STATEID:
6302 	case -ETIMEDOUT:
6303 		task->tk_status = 0;
6304 		break;
6305 	case -NFS4ERR_OLD_STATEID:
6306 		if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
6307 			nfs4_stateid_seqid_inc(&data->stateid);
6308 		if (data->args.bitmask) {
6309 			data->args.bitmask = NULL;
6310 			data->res.fattr = NULL;
6311 		}
6312 		goto out_restart;
6313 	case -NFS4ERR_ACCESS:
6314 		if (data->args.bitmask) {
6315 			data->args.bitmask = NULL;
6316 			data->res.fattr = NULL;
6317 			goto out_restart;
6318 		}
6319 		fallthrough;
6320 	default:
6321 		task->tk_status = nfs4_async_handle_exception(task,
6322 				data->res.server, task->tk_status,
6323 				&exception);
6324 		if (exception.retry)
6325 			goto out_restart;
6326 	}
6327 	nfs_delegation_mark_returned(data->inode, data->args.stateid);
6328 	data->rpc_status = task->tk_status;
6329 	return;
6330 out_restart:
6331 	task->tk_status = 0;
6332 	rpc_restart_call_prepare(task);
6333 }
6334 
6335 static void nfs4_delegreturn_release(void *calldata)
6336 {
6337 	struct nfs4_delegreturndata *data = calldata;
6338 	struct inode *inode = data->inode;
6339 
6340 	if (inode) {
6341 		if (data->lr.roc)
6342 			pnfs_roc_release(&data->lr.arg, &data->lr.res,
6343 					data->res.lr_ret);
6344 		nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
6345 		nfs_iput_and_deactive(inode);
6346 	}
6347 	kfree(calldata);
6348 }
6349 
6350 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
6351 {
6352 	struct nfs4_delegreturndata *d_data;
6353 	struct pnfs_layout_hdr *lo;
6354 
6355 	d_data = (struct nfs4_delegreturndata *)data;
6356 
6357 	if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
6358 		nfs4_sequence_done(task, &d_data->res.seq_res);
6359 		return;
6360 	}
6361 
6362 	lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
6363 	if (lo && !pnfs_layout_is_valid(lo)) {
6364 		d_data->args.lr_args = NULL;
6365 		d_data->res.lr_res = NULL;
6366 	}
6367 
6368 	nfs4_setup_sequence(d_data->res.server->nfs_client,
6369 			&d_data->args.seq_args,
6370 			&d_data->res.seq_res,
6371 			task);
6372 }
6373 
6374 static const struct rpc_call_ops nfs4_delegreturn_ops = {
6375 	.rpc_call_prepare = nfs4_delegreturn_prepare,
6376 	.rpc_call_done = nfs4_delegreturn_done,
6377 	.rpc_release = nfs4_delegreturn_release,
6378 };
6379 
6380 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
6381 {
6382 	struct nfs4_delegreturndata *data;
6383 	struct nfs_server *server = NFS_SERVER(inode);
6384 	struct rpc_task *task;
6385 	struct rpc_message msg = {
6386 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
6387 		.rpc_cred = cred,
6388 	};
6389 	struct rpc_task_setup task_setup_data = {
6390 		.rpc_client = server->client,
6391 		.rpc_message = &msg,
6392 		.callback_ops = &nfs4_delegreturn_ops,
6393 		.flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
6394 	};
6395 	int status = 0;
6396 
6397 	data = kzalloc(sizeof(*data), GFP_NOFS);
6398 	if (data == NULL)
6399 		return -ENOMEM;
6400 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
6401 
6402 	nfs4_state_protect(server->nfs_client,
6403 			NFS_SP4_MACH_CRED_CLEANUP,
6404 			&task_setup_data.rpc_client, &msg);
6405 
6406 	data->args.fhandle = &data->fh;
6407 	data->args.stateid = &data->stateid;
6408 	data->args.bitmask = server->cache_consistency_bitmask;
6409 	nfs_copy_fh(&data->fh, NFS_FH(inode));
6410 	nfs4_stateid_copy(&data->stateid, stateid);
6411 	data->res.fattr = &data->fattr;
6412 	data->res.server = server;
6413 	data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
6414 	data->lr.arg.ld_private = &data->lr.ld_private;
6415 	nfs_fattr_init(data->res.fattr);
6416 	data->timestamp = jiffies;
6417 	data->rpc_status = 0;
6418 	data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, cred);
6419 	data->inode = nfs_igrab_and_active(inode);
6420 	if (data->inode) {
6421 		if (data->lr.roc) {
6422 			data->args.lr_args = &data->lr.arg;
6423 			data->res.lr_res = &data->lr.res;
6424 		}
6425 	} else if (data->lr.roc) {
6426 		pnfs_roc_release(&data->lr.arg, &data->lr.res, 0);
6427 		data->lr.roc = false;
6428 	}
6429 
6430 	task_setup_data.callback_data = data;
6431 	msg.rpc_argp = &data->args;
6432 	msg.rpc_resp = &data->res;
6433 	task = rpc_run_task(&task_setup_data);
6434 	if (IS_ERR(task))
6435 		return PTR_ERR(task);
6436 	if (!issync)
6437 		goto out;
6438 	status = rpc_wait_for_completion_task(task);
6439 	if (status != 0)
6440 		goto out;
6441 	status = data->rpc_status;
6442 out:
6443 	rpc_put_task(task);
6444 	return status;
6445 }
6446 
6447 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
6448 {
6449 	struct nfs_server *server = NFS_SERVER(inode);
6450 	struct nfs4_exception exception = { };
6451 	int err;
6452 	do {
6453 		err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
6454 		trace_nfs4_delegreturn(inode, stateid, err);
6455 		switch (err) {
6456 			case -NFS4ERR_STALE_STATEID:
6457 			case -NFS4ERR_EXPIRED:
6458 			case 0:
6459 				return 0;
6460 		}
6461 		err = nfs4_handle_exception(server, err, &exception);
6462 	} while (exception.retry);
6463 	return err;
6464 }
6465 
6466 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6467 {
6468 	struct inode *inode = state->inode;
6469 	struct nfs_server *server = NFS_SERVER(inode);
6470 	struct nfs_client *clp = server->nfs_client;
6471 	struct nfs_lockt_args arg = {
6472 		.fh = NFS_FH(inode),
6473 		.fl = request,
6474 	};
6475 	struct nfs_lockt_res res = {
6476 		.denied = request,
6477 	};
6478 	struct rpc_message msg = {
6479 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
6480 		.rpc_argp	= &arg,
6481 		.rpc_resp	= &res,
6482 		.rpc_cred	= state->owner->so_cred,
6483 	};
6484 	struct nfs4_lock_state *lsp;
6485 	int status;
6486 
6487 	arg.lock_owner.clientid = clp->cl_clientid;
6488 	status = nfs4_set_lock_state(state, request);
6489 	if (status != 0)
6490 		goto out;
6491 	lsp = request->fl_u.nfs4_fl.owner;
6492 	arg.lock_owner.id = lsp->ls_seqid.owner_id;
6493 	arg.lock_owner.s_dev = server->s_dev;
6494 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6495 	switch (status) {
6496 		case 0:
6497 			request->fl_type = F_UNLCK;
6498 			break;
6499 		case -NFS4ERR_DENIED:
6500 			status = 0;
6501 	}
6502 	request->fl_ops->fl_release_private(request);
6503 	request->fl_ops = NULL;
6504 out:
6505 	return status;
6506 }
6507 
6508 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6509 {
6510 	struct nfs4_exception exception = {
6511 		.interruptible = true,
6512 	};
6513 	int err;
6514 
6515 	do {
6516 		err = _nfs4_proc_getlk(state, cmd, request);
6517 		trace_nfs4_get_lock(request, state, cmd, err);
6518 		err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
6519 				&exception);
6520 	} while (exception.retry);
6521 	return err;
6522 }
6523 
6524 /*
6525  * Update the seqid of a lock stateid after receiving
6526  * NFS4ERR_OLD_STATEID
6527  */
6528 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
6529 		struct nfs4_lock_state *lsp)
6530 {
6531 	struct nfs4_state *state = lsp->ls_state;
6532 	bool ret = false;
6533 
6534 	spin_lock(&state->state_lock);
6535 	if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
6536 		goto out;
6537 	if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
6538 		nfs4_stateid_seqid_inc(dst);
6539 	else
6540 		dst->seqid = lsp->ls_stateid.seqid;
6541 	ret = true;
6542 out:
6543 	spin_unlock(&state->state_lock);
6544 	return ret;
6545 }
6546 
6547 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
6548 		struct nfs4_lock_state *lsp)
6549 {
6550 	struct nfs4_state *state = lsp->ls_state;
6551 	bool ret;
6552 
6553 	spin_lock(&state->state_lock);
6554 	ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
6555 	nfs4_stateid_copy(dst, &lsp->ls_stateid);
6556 	spin_unlock(&state->state_lock);
6557 	return ret;
6558 }
6559 
6560 struct nfs4_unlockdata {
6561 	struct nfs_locku_args arg;
6562 	struct nfs_locku_res res;
6563 	struct nfs4_lock_state *lsp;
6564 	struct nfs_open_context *ctx;
6565 	struct nfs_lock_context *l_ctx;
6566 	struct file_lock fl;
6567 	struct nfs_server *server;
6568 	unsigned long timestamp;
6569 };
6570 
6571 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
6572 		struct nfs_open_context *ctx,
6573 		struct nfs4_lock_state *lsp,
6574 		struct nfs_seqid *seqid)
6575 {
6576 	struct nfs4_unlockdata *p;
6577 	struct nfs4_state *state = lsp->ls_state;
6578 	struct inode *inode = state->inode;
6579 
6580 	p = kzalloc(sizeof(*p), GFP_NOFS);
6581 	if (p == NULL)
6582 		return NULL;
6583 	p->arg.fh = NFS_FH(inode);
6584 	p->arg.fl = &p->fl;
6585 	p->arg.seqid = seqid;
6586 	p->res.seqid = seqid;
6587 	p->lsp = lsp;
6588 	/* Ensure we don't close file until we're done freeing locks! */
6589 	p->ctx = get_nfs_open_context(ctx);
6590 	p->l_ctx = nfs_get_lock_context(ctx);
6591 	locks_init_lock(&p->fl);
6592 	locks_copy_lock(&p->fl, fl);
6593 	p->server = NFS_SERVER(inode);
6594 	spin_lock(&state->state_lock);
6595 	nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
6596 	spin_unlock(&state->state_lock);
6597 	return p;
6598 }
6599 
6600 static void nfs4_locku_release_calldata(void *data)
6601 {
6602 	struct nfs4_unlockdata *calldata = data;
6603 	nfs_free_seqid(calldata->arg.seqid);
6604 	nfs4_put_lock_state(calldata->lsp);
6605 	nfs_put_lock_context(calldata->l_ctx);
6606 	put_nfs_open_context(calldata->ctx);
6607 	kfree(calldata);
6608 }
6609 
6610 static void nfs4_locku_done(struct rpc_task *task, void *data)
6611 {
6612 	struct nfs4_unlockdata *calldata = data;
6613 	struct nfs4_exception exception = {
6614 		.inode = calldata->lsp->ls_state->inode,
6615 		.stateid = &calldata->arg.stateid,
6616 	};
6617 
6618 	if (!nfs4_sequence_done(task, &calldata->res.seq_res))
6619 		return;
6620 	switch (task->tk_status) {
6621 		case 0:
6622 			renew_lease(calldata->server, calldata->timestamp);
6623 			locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
6624 			if (nfs4_update_lock_stateid(calldata->lsp,
6625 					&calldata->res.stateid))
6626 				break;
6627 			fallthrough;
6628 		case -NFS4ERR_ADMIN_REVOKED:
6629 		case -NFS4ERR_EXPIRED:
6630 			nfs4_free_revoked_stateid(calldata->server,
6631 					&calldata->arg.stateid,
6632 					task->tk_msg.rpc_cred);
6633 			fallthrough;
6634 		case -NFS4ERR_BAD_STATEID:
6635 		case -NFS4ERR_STALE_STATEID:
6636 			if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
6637 						calldata->lsp))
6638 				rpc_restart_call_prepare(task);
6639 			break;
6640 		case -NFS4ERR_OLD_STATEID:
6641 			if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
6642 						calldata->lsp))
6643 				rpc_restart_call_prepare(task);
6644 			break;
6645 		default:
6646 			task->tk_status = nfs4_async_handle_exception(task,
6647 					calldata->server, task->tk_status,
6648 					&exception);
6649 			if (exception.retry)
6650 				rpc_restart_call_prepare(task);
6651 	}
6652 	nfs_release_seqid(calldata->arg.seqid);
6653 }
6654 
6655 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
6656 {
6657 	struct nfs4_unlockdata *calldata = data;
6658 
6659 	if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
6660 		nfs_async_iocounter_wait(task, calldata->l_ctx))
6661 		return;
6662 
6663 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
6664 		goto out_wait;
6665 	if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
6666 		/* Note: exit _without_ running nfs4_locku_done */
6667 		goto out_no_action;
6668 	}
6669 	calldata->timestamp = jiffies;
6670 	if (nfs4_setup_sequence(calldata->server->nfs_client,
6671 				&calldata->arg.seq_args,
6672 				&calldata->res.seq_res,
6673 				task) != 0)
6674 		nfs_release_seqid(calldata->arg.seqid);
6675 	return;
6676 out_no_action:
6677 	task->tk_action = NULL;
6678 out_wait:
6679 	nfs4_sequence_done(task, &calldata->res.seq_res);
6680 }
6681 
6682 static const struct rpc_call_ops nfs4_locku_ops = {
6683 	.rpc_call_prepare = nfs4_locku_prepare,
6684 	.rpc_call_done = nfs4_locku_done,
6685 	.rpc_release = nfs4_locku_release_calldata,
6686 };
6687 
6688 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
6689 		struct nfs_open_context *ctx,
6690 		struct nfs4_lock_state *lsp,
6691 		struct nfs_seqid *seqid)
6692 {
6693 	struct nfs4_unlockdata *data;
6694 	struct rpc_message msg = {
6695 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
6696 		.rpc_cred = ctx->cred,
6697 	};
6698 	struct rpc_task_setup task_setup_data = {
6699 		.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
6700 		.rpc_message = &msg,
6701 		.callback_ops = &nfs4_locku_ops,
6702 		.workqueue = nfsiod_workqueue,
6703 		.flags = RPC_TASK_ASYNC,
6704 	};
6705 
6706 	nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
6707 		NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
6708 
6709 	/* Ensure this is an unlock - when canceling a lock, the
6710 	 * canceled lock is passed in, and it won't be an unlock.
6711 	 */
6712 	fl->fl_type = F_UNLCK;
6713 	if (fl->fl_flags & FL_CLOSE)
6714 		set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
6715 
6716 	data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
6717 	if (data == NULL) {
6718 		nfs_free_seqid(seqid);
6719 		return ERR_PTR(-ENOMEM);
6720 	}
6721 
6722 	nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0);
6723 	msg.rpc_argp = &data->arg;
6724 	msg.rpc_resp = &data->res;
6725 	task_setup_data.callback_data = data;
6726 	return rpc_run_task(&task_setup_data);
6727 }
6728 
6729 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
6730 {
6731 	struct inode *inode = state->inode;
6732 	struct nfs4_state_owner *sp = state->owner;
6733 	struct nfs_inode *nfsi = NFS_I(inode);
6734 	struct nfs_seqid *seqid;
6735 	struct nfs4_lock_state *lsp;
6736 	struct rpc_task *task;
6737 	struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
6738 	int status = 0;
6739 	unsigned char fl_flags = request->fl_flags;
6740 
6741 	status = nfs4_set_lock_state(state, request);
6742 	/* Unlock _before_ we do the RPC call */
6743 	request->fl_flags |= FL_EXISTS;
6744 	/* Exclude nfs_delegation_claim_locks() */
6745 	mutex_lock(&sp->so_delegreturn_mutex);
6746 	/* Exclude nfs4_reclaim_open_stateid() - note nesting! */
6747 	down_read(&nfsi->rwsem);
6748 	if (locks_lock_inode_wait(inode, request) == -ENOENT) {
6749 		up_read(&nfsi->rwsem);
6750 		mutex_unlock(&sp->so_delegreturn_mutex);
6751 		goto out;
6752 	}
6753 	up_read(&nfsi->rwsem);
6754 	mutex_unlock(&sp->so_delegreturn_mutex);
6755 	if (status != 0)
6756 		goto out;
6757 	/* Is this a delegated lock? */
6758 	lsp = request->fl_u.nfs4_fl.owner;
6759 	if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
6760 		goto out;
6761 	alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
6762 	seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
6763 	status = -ENOMEM;
6764 	if (IS_ERR(seqid))
6765 		goto out;
6766 	task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
6767 	status = PTR_ERR(task);
6768 	if (IS_ERR(task))
6769 		goto out;
6770 	status = rpc_wait_for_completion_task(task);
6771 	rpc_put_task(task);
6772 out:
6773 	request->fl_flags = fl_flags;
6774 	trace_nfs4_unlock(request, state, F_SETLK, status);
6775 	return status;
6776 }
6777 
6778 struct nfs4_lockdata {
6779 	struct nfs_lock_args arg;
6780 	struct nfs_lock_res res;
6781 	struct nfs4_lock_state *lsp;
6782 	struct nfs_open_context *ctx;
6783 	struct file_lock fl;
6784 	unsigned long timestamp;
6785 	int rpc_status;
6786 	int cancelled;
6787 	struct nfs_server *server;
6788 };
6789 
6790 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
6791 		struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
6792 		gfp_t gfp_mask)
6793 {
6794 	struct nfs4_lockdata *p;
6795 	struct inode *inode = lsp->ls_state->inode;
6796 	struct nfs_server *server = NFS_SERVER(inode);
6797 	struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
6798 
6799 	p = kzalloc(sizeof(*p), gfp_mask);
6800 	if (p == NULL)
6801 		return NULL;
6802 
6803 	p->arg.fh = NFS_FH(inode);
6804 	p->arg.fl = &p->fl;
6805 	p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
6806 	if (IS_ERR(p->arg.open_seqid))
6807 		goto out_free;
6808 	alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
6809 	p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
6810 	if (IS_ERR(p->arg.lock_seqid))
6811 		goto out_free_seqid;
6812 	p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
6813 	p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
6814 	p->arg.lock_owner.s_dev = server->s_dev;
6815 	p->res.lock_seqid = p->arg.lock_seqid;
6816 	p->lsp = lsp;
6817 	p->server = server;
6818 	p->ctx = get_nfs_open_context(ctx);
6819 	locks_init_lock(&p->fl);
6820 	locks_copy_lock(&p->fl, fl);
6821 	return p;
6822 out_free_seqid:
6823 	nfs_free_seqid(p->arg.open_seqid);
6824 out_free:
6825 	kfree(p);
6826 	return NULL;
6827 }
6828 
6829 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
6830 {
6831 	struct nfs4_lockdata *data = calldata;
6832 	struct nfs4_state *state = data->lsp->ls_state;
6833 
6834 	dprintk("%s: begin!\n", __func__);
6835 	if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
6836 		goto out_wait;
6837 	/* Do we need to do an open_to_lock_owner? */
6838 	if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
6839 		if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
6840 			goto out_release_lock_seqid;
6841 		}
6842 		nfs4_stateid_copy(&data->arg.open_stateid,
6843 				&state->open_stateid);
6844 		data->arg.new_lock_owner = 1;
6845 		data->res.open_seqid = data->arg.open_seqid;
6846 	} else {
6847 		data->arg.new_lock_owner = 0;
6848 		nfs4_stateid_copy(&data->arg.lock_stateid,
6849 				&data->lsp->ls_stateid);
6850 	}
6851 	if (!nfs4_valid_open_stateid(state)) {
6852 		data->rpc_status = -EBADF;
6853 		task->tk_action = NULL;
6854 		goto out_release_open_seqid;
6855 	}
6856 	data->timestamp = jiffies;
6857 	if (nfs4_setup_sequence(data->server->nfs_client,
6858 				&data->arg.seq_args,
6859 				&data->res.seq_res,
6860 				task) == 0)
6861 		return;
6862 out_release_open_seqid:
6863 	nfs_release_seqid(data->arg.open_seqid);
6864 out_release_lock_seqid:
6865 	nfs_release_seqid(data->arg.lock_seqid);
6866 out_wait:
6867 	nfs4_sequence_done(task, &data->res.seq_res);
6868 	dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
6869 }
6870 
6871 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
6872 {
6873 	struct nfs4_lockdata *data = calldata;
6874 	struct nfs4_lock_state *lsp = data->lsp;
6875 
6876 	dprintk("%s: begin!\n", __func__);
6877 
6878 	if (!nfs4_sequence_done(task, &data->res.seq_res))
6879 		return;
6880 
6881 	data->rpc_status = task->tk_status;
6882 	switch (task->tk_status) {
6883 	case 0:
6884 		renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
6885 				data->timestamp);
6886 		if (data->arg.new_lock && !data->cancelled) {
6887 			data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
6888 			if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
6889 				goto out_restart;
6890 		}
6891 		if (data->arg.new_lock_owner != 0) {
6892 			nfs_confirm_seqid(&lsp->ls_seqid, 0);
6893 			nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
6894 			set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
6895 		} else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
6896 			goto out_restart;
6897 		break;
6898 	case -NFS4ERR_BAD_STATEID:
6899 	case -NFS4ERR_OLD_STATEID:
6900 	case -NFS4ERR_STALE_STATEID:
6901 	case -NFS4ERR_EXPIRED:
6902 		if (data->arg.new_lock_owner != 0) {
6903 			if (!nfs4_stateid_match(&data->arg.open_stateid,
6904 						&lsp->ls_state->open_stateid))
6905 				goto out_restart;
6906 		} else if (!nfs4_stateid_match(&data->arg.lock_stateid,
6907 						&lsp->ls_stateid))
6908 				goto out_restart;
6909 	}
6910 out_done:
6911 	dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
6912 	return;
6913 out_restart:
6914 	if (!data->cancelled)
6915 		rpc_restart_call_prepare(task);
6916 	goto out_done;
6917 }
6918 
6919 static void nfs4_lock_release(void *calldata)
6920 {
6921 	struct nfs4_lockdata *data = calldata;
6922 
6923 	dprintk("%s: begin!\n", __func__);
6924 	nfs_free_seqid(data->arg.open_seqid);
6925 	if (data->cancelled && data->rpc_status == 0) {
6926 		struct rpc_task *task;
6927 		task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
6928 				data->arg.lock_seqid);
6929 		if (!IS_ERR(task))
6930 			rpc_put_task_async(task);
6931 		dprintk("%s: cancelling lock!\n", __func__);
6932 	} else
6933 		nfs_free_seqid(data->arg.lock_seqid);
6934 	nfs4_put_lock_state(data->lsp);
6935 	put_nfs_open_context(data->ctx);
6936 	kfree(data);
6937 	dprintk("%s: done!\n", __func__);
6938 }
6939 
6940 static const struct rpc_call_ops nfs4_lock_ops = {
6941 	.rpc_call_prepare = nfs4_lock_prepare,
6942 	.rpc_call_done = nfs4_lock_done,
6943 	.rpc_release = nfs4_lock_release,
6944 };
6945 
6946 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
6947 {
6948 	switch (error) {
6949 	case -NFS4ERR_ADMIN_REVOKED:
6950 	case -NFS4ERR_EXPIRED:
6951 	case -NFS4ERR_BAD_STATEID:
6952 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
6953 		if (new_lock_owner != 0 ||
6954 		   test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
6955 			nfs4_schedule_stateid_recovery(server, lsp->ls_state);
6956 		break;
6957 	case -NFS4ERR_STALE_STATEID:
6958 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
6959 		nfs4_schedule_lease_recovery(server->nfs_client);
6960 	}
6961 }
6962 
6963 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
6964 {
6965 	struct nfs4_lockdata *data;
6966 	struct rpc_task *task;
6967 	struct rpc_message msg = {
6968 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
6969 		.rpc_cred = state->owner->so_cred,
6970 	};
6971 	struct rpc_task_setup task_setup_data = {
6972 		.rpc_client = NFS_CLIENT(state->inode),
6973 		.rpc_message = &msg,
6974 		.callback_ops = &nfs4_lock_ops,
6975 		.workqueue = nfsiod_workqueue,
6976 		.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
6977 	};
6978 	int ret;
6979 
6980 	dprintk("%s: begin!\n", __func__);
6981 	data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
6982 			fl->fl_u.nfs4_fl.owner,
6983 			recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
6984 	if (data == NULL)
6985 		return -ENOMEM;
6986 	if (IS_SETLKW(cmd))
6987 		data->arg.block = 1;
6988 	nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1,
6989 				recovery_type > NFS_LOCK_NEW);
6990 	msg.rpc_argp = &data->arg;
6991 	msg.rpc_resp = &data->res;
6992 	task_setup_data.callback_data = data;
6993 	if (recovery_type > NFS_LOCK_NEW) {
6994 		if (recovery_type == NFS_LOCK_RECLAIM)
6995 			data->arg.reclaim = NFS_LOCK_RECLAIM;
6996 	} else
6997 		data->arg.new_lock = 1;
6998 	task = rpc_run_task(&task_setup_data);
6999 	if (IS_ERR(task))
7000 		return PTR_ERR(task);
7001 	ret = rpc_wait_for_completion_task(task);
7002 	if (ret == 0) {
7003 		ret = data->rpc_status;
7004 		if (ret)
7005 			nfs4_handle_setlk_error(data->server, data->lsp,
7006 					data->arg.new_lock_owner, ret);
7007 	} else
7008 		data->cancelled = true;
7009 	rpc_put_task(task);
7010 	dprintk("%s: done, ret = %d!\n", __func__, ret);
7011 	trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
7012 	return ret;
7013 }
7014 
7015 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
7016 {
7017 	struct nfs_server *server = NFS_SERVER(state->inode);
7018 	struct nfs4_exception exception = {
7019 		.inode = state->inode,
7020 	};
7021 	int err;
7022 
7023 	do {
7024 		/* Cache the lock if possible... */
7025 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7026 			return 0;
7027 		err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
7028 		if (err != -NFS4ERR_DELAY)
7029 			break;
7030 		nfs4_handle_exception(server, err, &exception);
7031 	} while (exception.retry);
7032 	return err;
7033 }
7034 
7035 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
7036 {
7037 	struct nfs_server *server = NFS_SERVER(state->inode);
7038 	struct nfs4_exception exception = {
7039 		.inode = state->inode,
7040 	};
7041 	int err;
7042 
7043 	err = nfs4_set_lock_state(state, request);
7044 	if (err != 0)
7045 		return err;
7046 	if (!recover_lost_locks) {
7047 		set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
7048 		return 0;
7049 	}
7050 	do {
7051 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7052 			return 0;
7053 		err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
7054 		switch (err) {
7055 		default:
7056 			goto out;
7057 		case -NFS4ERR_GRACE:
7058 		case -NFS4ERR_DELAY:
7059 			nfs4_handle_exception(server, err, &exception);
7060 			err = 0;
7061 		}
7062 	} while (exception.retry);
7063 out:
7064 	return err;
7065 }
7066 
7067 #if defined(CONFIG_NFS_V4_1)
7068 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
7069 {
7070 	struct nfs4_lock_state *lsp;
7071 	int status;
7072 
7073 	status = nfs4_set_lock_state(state, request);
7074 	if (status != 0)
7075 		return status;
7076 	lsp = request->fl_u.nfs4_fl.owner;
7077 	if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
7078 	    test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
7079 		return 0;
7080 	return nfs4_lock_expired(state, request);
7081 }
7082 #endif
7083 
7084 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7085 {
7086 	struct nfs_inode *nfsi = NFS_I(state->inode);
7087 	struct nfs4_state_owner *sp = state->owner;
7088 	unsigned char fl_flags = request->fl_flags;
7089 	int status;
7090 
7091 	request->fl_flags |= FL_ACCESS;
7092 	status = locks_lock_inode_wait(state->inode, request);
7093 	if (status < 0)
7094 		goto out;
7095 	mutex_lock(&sp->so_delegreturn_mutex);
7096 	down_read(&nfsi->rwsem);
7097 	if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
7098 		/* Yes: cache locks! */
7099 		/* ...but avoid races with delegation recall... */
7100 		request->fl_flags = fl_flags & ~FL_SLEEP;
7101 		status = locks_lock_inode_wait(state->inode, request);
7102 		up_read(&nfsi->rwsem);
7103 		mutex_unlock(&sp->so_delegreturn_mutex);
7104 		goto out;
7105 	}
7106 	up_read(&nfsi->rwsem);
7107 	mutex_unlock(&sp->so_delegreturn_mutex);
7108 	status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
7109 out:
7110 	request->fl_flags = fl_flags;
7111 	return status;
7112 }
7113 
7114 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7115 {
7116 	struct nfs4_exception exception = {
7117 		.state = state,
7118 		.inode = state->inode,
7119 		.interruptible = true,
7120 	};
7121 	int err;
7122 
7123 	do {
7124 		err = _nfs4_proc_setlk(state, cmd, request);
7125 		if (err == -NFS4ERR_DENIED)
7126 			err = -EAGAIN;
7127 		err = nfs4_handle_exception(NFS_SERVER(state->inode),
7128 				err, &exception);
7129 	} while (exception.retry);
7130 	return err;
7131 }
7132 
7133 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
7134 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
7135 
7136 static int
7137 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
7138 			struct file_lock *request)
7139 {
7140 	int		status = -ERESTARTSYS;
7141 	unsigned long	timeout = NFS4_LOCK_MINTIMEOUT;
7142 
7143 	while(!signalled()) {
7144 		status = nfs4_proc_setlk(state, cmd, request);
7145 		if ((status != -EAGAIN) || IS_SETLK(cmd))
7146 			break;
7147 		freezable_schedule_timeout_interruptible(timeout);
7148 		timeout *= 2;
7149 		timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
7150 		status = -ERESTARTSYS;
7151 	}
7152 	return status;
7153 }
7154 
7155 #ifdef CONFIG_NFS_V4_1
7156 struct nfs4_lock_waiter {
7157 	struct task_struct	*task;
7158 	struct inode		*inode;
7159 	struct nfs_lowner	*owner;
7160 };
7161 
7162 static int
7163 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
7164 {
7165 	int ret;
7166 	struct nfs4_lock_waiter	*waiter	= wait->private;
7167 
7168 	/* NULL key means to wake up everyone */
7169 	if (key) {
7170 		struct cb_notify_lock_args	*cbnl = key;
7171 		struct nfs_lowner		*lowner = &cbnl->cbnl_owner,
7172 						*wowner = waiter->owner;
7173 
7174 		/* Only wake if the callback was for the same owner. */
7175 		if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev)
7176 			return 0;
7177 
7178 		/* Make sure it's for the right inode */
7179 		if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
7180 			return 0;
7181 	}
7182 
7183 	/* override "private" so we can use default_wake_function */
7184 	wait->private = waiter->task;
7185 	ret = woken_wake_function(wait, mode, flags, key);
7186 	if (ret)
7187 		list_del_init(&wait->entry);
7188 	wait->private = waiter;
7189 	return ret;
7190 }
7191 
7192 static int
7193 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7194 {
7195 	int status = -ERESTARTSYS;
7196 	struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
7197 	struct nfs_server *server = NFS_SERVER(state->inode);
7198 	struct nfs_client *clp = server->nfs_client;
7199 	wait_queue_head_t *q = &clp->cl_lock_waitq;
7200 	struct nfs_lowner owner = { .clientid = clp->cl_clientid,
7201 				    .id = lsp->ls_seqid.owner_id,
7202 				    .s_dev = server->s_dev };
7203 	struct nfs4_lock_waiter waiter = { .task  = current,
7204 					   .inode = state->inode,
7205 					   .owner = &owner};
7206 	wait_queue_entry_t wait;
7207 
7208 	/* Don't bother with waitqueue if we don't expect a callback */
7209 	if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
7210 		return nfs4_retry_setlk_simple(state, cmd, request);
7211 
7212 	init_wait(&wait);
7213 	wait.private = &waiter;
7214 	wait.func = nfs4_wake_lock_waiter;
7215 
7216 	while(!signalled()) {
7217 		add_wait_queue(q, &wait);
7218 		status = nfs4_proc_setlk(state, cmd, request);
7219 		if ((status != -EAGAIN) || IS_SETLK(cmd)) {
7220 			finish_wait(q, &wait);
7221 			break;
7222 		}
7223 
7224 		status = -ERESTARTSYS;
7225 		freezer_do_not_count();
7226 		wait_woken(&wait, TASK_INTERRUPTIBLE, NFS4_LOCK_MAXTIMEOUT);
7227 		freezer_count();
7228 		finish_wait(q, &wait);
7229 	}
7230 
7231 	return status;
7232 }
7233 #else /* !CONFIG_NFS_V4_1 */
7234 static inline int
7235 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7236 {
7237 	return nfs4_retry_setlk_simple(state, cmd, request);
7238 }
7239 #endif
7240 
7241 static int
7242 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
7243 {
7244 	struct nfs_open_context *ctx;
7245 	struct nfs4_state *state;
7246 	int status;
7247 
7248 	/* verify open state */
7249 	ctx = nfs_file_open_context(filp);
7250 	state = ctx->state;
7251 
7252 	if (IS_GETLK(cmd)) {
7253 		if (state != NULL)
7254 			return nfs4_proc_getlk(state, F_GETLK, request);
7255 		return 0;
7256 	}
7257 
7258 	if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
7259 		return -EINVAL;
7260 
7261 	if (request->fl_type == F_UNLCK) {
7262 		if (state != NULL)
7263 			return nfs4_proc_unlck(state, cmd, request);
7264 		return 0;
7265 	}
7266 
7267 	if (state == NULL)
7268 		return -ENOLCK;
7269 
7270 	if ((request->fl_flags & FL_POSIX) &&
7271 	    !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
7272 		return -ENOLCK;
7273 
7274 	/*
7275 	 * Don't rely on the VFS having checked the file open mode,
7276 	 * since it won't do this for flock() locks.
7277 	 */
7278 	switch (request->fl_type) {
7279 	case F_RDLCK:
7280 		if (!(filp->f_mode & FMODE_READ))
7281 			return -EBADF;
7282 		break;
7283 	case F_WRLCK:
7284 		if (!(filp->f_mode & FMODE_WRITE))
7285 			return -EBADF;
7286 	}
7287 
7288 	status = nfs4_set_lock_state(state, request);
7289 	if (status != 0)
7290 		return status;
7291 
7292 	return nfs4_retry_setlk(state, cmd, request);
7293 }
7294 
7295 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
7296 {
7297 	struct nfs_server *server = NFS_SERVER(state->inode);
7298 	int err;
7299 
7300 	err = nfs4_set_lock_state(state, fl);
7301 	if (err != 0)
7302 		return err;
7303 	do {
7304 		err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
7305 		if (err != -NFS4ERR_DELAY)
7306 			break;
7307 		ssleep(1);
7308 	} while (err == -NFS4ERR_DELAY);
7309 	return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
7310 }
7311 
7312 struct nfs_release_lockowner_data {
7313 	struct nfs4_lock_state *lsp;
7314 	struct nfs_server *server;
7315 	struct nfs_release_lockowner_args args;
7316 	struct nfs_release_lockowner_res res;
7317 	unsigned long timestamp;
7318 };
7319 
7320 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
7321 {
7322 	struct nfs_release_lockowner_data *data = calldata;
7323 	struct nfs_server *server = data->server;
7324 	nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
7325 			   &data->res.seq_res, task);
7326 	data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7327 	data->timestamp = jiffies;
7328 }
7329 
7330 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
7331 {
7332 	struct nfs_release_lockowner_data *data = calldata;
7333 	struct nfs_server *server = data->server;
7334 
7335 	nfs40_sequence_done(task, &data->res.seq_res);
7336 
7337 	switch (task->tk_status) {
7338 	case 0:
7339 		renew_lease(server, data->timestamp);
7340 		break;
7341 	case -NFS4ERR_STALE_CLIENTID:
7342 	case -NFS4ERR_EXPIRED:
7343 		nfs4_schedule_lease_recovery(server->nfs_client);
7344 		break;
7345 	case -NFS4ERR_LEASE_MOVED:
7346 	case -NFS4ERR_DELAY:
7347 		if (nfs4_async_handle_error(task, server,
7348 					    NULL, NULL) == -EAGAIN)
7349 			rpc_restart_call_prepare(task);
7350 	}
7351 }
7352 
7353 static void nfs4_release_lockowner_release(void *calldata)
7354 {
7355 	struct nfs_release_lockowner_data *data = calldata;
7356 	nfs4_free_lock_state(data->server, data->lsp);
7357 	kfree(calldata);
7358 }
7359 
7360 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
7361 	.rpc_call_prepare = nfs4_release_lockowner_prepare,
7362 	.rpc_call_done = nfs4_release_lockowner_done,
7363 	.rpc_release = nfs4_release_lockowner_release,
7364 };
7365 
7366 static void
7367 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
7368 {
7369 	struct nfs_release_lockowner_data *data;
7370 	struct rpc_message msg = {
7371 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
7372 	};
7373 
7374 	if (server->nfs_client->cl_mvops->minor_version != 0)
7375 		return;
7376 
7377 	data = kmalloc(sizeof(*data), GFP_NOFS);
7378 	if (!data)
7379 		return;
7380 	data->lsp = lsp;
7381 	data->server = server;
7382 	data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7383 	data->args.lock_owner.id = lsp->ls_seqid.owner_id;
7384 	data->args.lock_owner.s_dev = server->s_dev;
7385 
7386 	msg.rpc_argp = &data->args;
7387 	msg.rpc_resp = &data->res;
7388 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
7389 	rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
7390 }
7391 
7392 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
7393 
7394 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
7395 				   struct dentry *unused, struct inode *inode,
7396 				   const char *key, const void *buf,
7397 				   size_t buflen, int flags)
7398 {
7399 	return nfs4_proc_set_acl(inode, buf, buflen);
7400 }
7401 
7402 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
7403 				   struct dentry *unused, struct inode *inode,
7404 				   const char *key, void *buf, size_t buflen)
7405 {
7406 	return nfs4_proc_get_acl(inode, buf, buflen);
7407 }
7408 
7409 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
7410 {
7411 	return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)));
7412 }
7413 
7414 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
7415 
7416 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
7417 				     struct dentry *unused, struct inode *inode,
7418 				     const char *key, const void *buf,
7419 				     size_t buflen, int flags)
7420 {
7421 	if (security_ismaclabel(key))
7422 		return nfs4_set_security_label(inode, buf, buflen);
7423 
7424 	return -EOPNOTSUPP;
7425 }
7426 
7427 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
7428 				     struct dentry *unused, struct inode *inode,
7429 				     const char *key, void *buf, size_t buflen)
7430 {
7431 	if (security_ismaclabel(key))
7432 		return nfs4_get_security_label(inode, buf, buflen);
7433 	return -EOPNOTSUPP;
7434 }
7435 
7436 static ssize_t
7437 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7438 {
7439 	int len = 0;
7440 
7441 	if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
7442 		len = security_inode_listsecurity(inode, list, list_len);
7443 		if (list_len && len > list_len)
7444 			return -ERANGE;
7445 	}
7446 	return len;
7447 }
7448 
7449 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
7450 	.prefix = XATTR_SECURITY_PREFIX,
7451 	.get	= nfs4_xattr_get_nfs4_label,
7452 	.set	= nfs4_xattr_set_nfs4_label,
7453 };
7454 
7455 #else
7456 
7457 static ssize_t
7458 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7459 {
7460 	return 0;
7461 }
7462 
7463 #endif
7464 
7465 #ifdef CONFIG_NFS_V4_2
7466 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
7467 				    struct dentry *unused, struct inode *inode,
7468 				    const char *key, const void *buf,
7469 				    size_t buflen, int flags)
7470 {
7471 	struct nfs_access_entry cache;
7472 	int ret;
7473 
7474 	if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7475 		return -EOPNOTSUPP;
7476 
7477 	/*
7478 	 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA*
7479 	 * flags right now. Handling of xattr operations use the normal
7480 	 * file read/write permissions.
7481 	 *
7482 	 * Just in case the server has other ideas (which RFC 8276 allows),
7483 	 * do a cached access check for the XA* flags to possibly avoid
7484 	 * doing an RPC and getting EACCES back.
7485 	 */
7486 	if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
7487 		if (!(cache.mask & NFS_ACCESS_XAWRITE))
7488 			return -EACCES;
7489 	}
7490 
7491 	if (buf == NULL) {
7492 		ret = nfs42_proc_removexattr(inode, key);
7493 		if (!ret)
7494 			nfs4_xattr_cache_remove(inode, key);
7495 	} else {
7496 		ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags);
7497 		if (!ret)
7498 			nfs4_xattr_cache_add(inode, key, buf, NULL, buflen);
7499 	}
7500 
7501 	return ret;
7502 }
7503 
7504 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
7505 				    struct dentry *unused, struct inode *inode,
7506 				    const char *key, void *buf, size_t buflen)
7507 {
7508 	struct nfs_access_entry cache;
7509 	ssize_t ret;
7510 
7511 	if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7512 		return -EOPNOTSUPP;
7513 
7514 	if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
7515 		if (!(cache.mask & NFS_ACCESS_XAREAD))
7516 			return -EACCES;
7517 	}
7518 
7519 	ret = nfs_revalidate_inode(NFS_SERVER(inode), inode);
7520 	if (ret)
7521 		return ret;
7522 
7523 	ret = nfs4_xattr_cache_get(inode, key, buf, buflen);
7524 	if (ret >= 0 || (ret < 0 && ret != -ENOENT))
7525 		return ret;
7526 
7527 	ret = nfs42_proc_getxattr(inode, key, buf, buflen);
7528 
7529 	return ret;
7530 }
7531 
7532 static ssize_t
7533 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
7534 {
7535 	u64 cookie;
7536 	bool eof;
7537 	ssize_t ret, size;
7538 	char *buf;
7539 	size_t buflen;
7540 	struct nfs_access_entry cache;
7541 
7542 	if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7543 		return 0;
7544 
7545 	if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
7546 		if (!(cache.mask & NFS_ACCESS_XALIST))
7547 			return 0;
7548 	}
7549 
7550 	ret = nfs_revalidate_inode(NFS_SERVER(inode), inode);
7551 	if (ret)
7552 		return ret;
7553 
7554 	ret = nfs4_xattr_cache_list(inode, list, list_len);
7555 	if (ret >= 0 || (ret < 0 && ret != -ENOENT))
7556 		return ret;
7557 
7558 	cookie = 0;
7559 	eof = false;
7560 	buflen = list_len ? list_len : XATTR_LIST_MAX;
7561 	buf = list_len ? list : NULL;
7562 	size = 0;
7563 
7564 	while (!eof) {
7565 		ret = nfs42_proc_listxattrs(inode, buf, buflen,
7566 		    &cookie, &eof);
7567 		if (ret < 0)
7568 			return ret;
7569 
7570 		if (list_len) {
7571 			buf += ret;
7572 			buflen -= ret;
7573 		}
7574 		size += ret;
7575 	}
7576 
7577 	if (list_len)
7578 		nfs4_xattr_cache_set_list(inode, list, size);
7579 
7580 	return size;
7581 }
7582 
7583 #else
7584 
7585 static ssize_t
7586 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
7587 {
7588 	return 0;
7589 }
7590 #endif /* CONFIG_NFS_V4_2 */
7591 
7592 /*
7593  * nfs_fhget will use either the mounted_on_fileid or the fileid
7594  */
7595 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
7596 {
7597 	if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
7598 	       (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
7599 	      (fattr->valid & NFS_ATTR_FATTR_FSID) &&
7600 	      (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
7601 		return;
7602 
7603 	fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
7604 		NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
7605 	fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
7606 	fattr->nlink = 2;
7607 }
7608 
7609 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7610 				   const struct qstr *name,
7611 				   struct nfs4_fs_locations *fs_locations,
7612 				   struct page *page)
7613 {
7614 	struct nfs_server *server = NFS_SERVER(dir);
7615 	u32 bitmask[3];
7616 	struct nfs4_fs_locations_arg args = {
7617 		.dir_fh = NFS_FH(dir),
7618 		.name = name,
7619 		.page = page,
7620 		.bitmask = bitmask,
7621 	};
7622 	struct nfs4_fs_locations_res res = {
7623 		.fs_locations = fs_locations,
7624 	};
7625 	struct rpc_message msg = {
7626 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7627 		.rpc_argp = &args,
7628 		.rpc_resp = &res,
7629 	};
7630 	int status;
7631 
7632 	dprintk("%s: start\n", __func__);
7633 
7634 	bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
7635 	bitmask[1] = nfs4_fattr_bitmap[1];
7636 
7637 	/* Ask for the fileid of the absent filesystem if mounted_on_fileid
7638 	 * is not supported */
7639 	if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
7640 		bitmask[0] &= ~FATTR4_WORD0_FILEID;
7641 	else
7642 		bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
7643 
7644 	nfs_fattr_init(&fs_locations->fattr);
7645 	fs_locations->server = server;
7646 	fs_locations->nlocations = 0;
7647 	status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
7648 	dprintk("%s: returned status = %d\n", __func__, status);
7649 	return status;
7650 }
7651 
7652 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7653 			   const struct qstr *name,
7654 			   struct nfs4_fs_locations *fs_locations,
7655 			   struct page *page)
7656 {
7657 	struct nfs4_exception exception = {
7658 		.interruptible = true,
7659 	};
7660 	int err;
7661 	do {
7662 		err = _nfs4_proc_fs_locations(client, dir, name,
7663 				fs_locations, page);
7664 		trace_nfs4_get_fs_locations(dir, name, err);
7665 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
7666 				&exception);
7667 	} while (exception.retry);
7668 	return err;
7669 }
7670 
7671 /*
7672  * This operation also signals the server that this client is
7673  * performing migration recovery.  The server can stop returning
7674  * NFS4ERR_LEASE_MOVED to this client.  A RENEW operation is
7675  * appended to this compound to identify the client ID which is
7676  * performing recovery.
7677  */
7678 static int _nfs40_proc_get_locations(struct inode *inode,
7679 				     struct nfs4_fs_locations *locations,
7680 				     struct page *page, const struct cred *cred)
7681 {
7682 	struct nfs_server *server = NFS_SERVER(inode);
7683 	struct rpc_clnt *clnt = server->client;
7684 	u32 bitmask[2] = {
7685 		[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
7686 	};
7687 	struct nfs4_fs_locations_arg args = {
7688 		.clientid	= server->nfs_client->cl_clientid,
7689 		.fh		= NFS_FH(inode),
7690 		.page		= page,
7691 		.bitmask	= bitmask,
7692 		.migration	= 1,		/* skip LOOKUP */
7693 		.renew		= 1,		/* append RENEW */
7694 	};
7695 	struct nfs4_fs_locations_res res = {
7696 		.fs_locations	= locations,
7697 		.migration	= 1,
7698 		.renew		= 1,
7699 	};
7700 	struct rpc_message msg = {
7701 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7702 		.rpc_argp	= &args,
7703 		.rpc_resp	= &res,
7704 		.rpc_cred	= cred,
7705 	};
7706 	unsigned long now = jiffies;
7707 	int status;
7708 
7709 	nfs_fattr_init(&locations->fattr);
7710 	locations->server = server;
7711 	locations->nlocations = 0;
7712 
7713 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
7714 	status = nfs4_call_sync_sequence(clnt, server, &msg,
7715 					&args.seq_args, &res.seq_res);
7716 	if (status)
7717 		return status;
7718 
7719 	renew_lease(server, now);
7720 	return 0;
7721 }
7722 
7723 #ifdef CONFIG_NFS_V4_1
7724 
7725 /*
7726  * This operation also signals the server that this client is
7727  * performing migration recovery.  The server can stop asserting
7728  * SEQ4_STATUS_LEASE_MOVED for this client.  The client ID
7729  * performing this operation is identified in the SEQUENCE
7730  * operation in this compound.
7731  *
7732  * When the client supports GETATTR(fs_locations_info), it can
7733  * be plumbed in here.
7734  */
7735 static int _nfs41_proc_get_locations(struct inode *inode,
7736 				     struct nfs4_fs_locations *locations,
7737 				     struct page *page, const struct cred *cred)
7738 {
7739 	struct nfs_server *server = NFS_SERVER(inode);
7740 	struct rpc_clnt *clnt = server->client;
7741 	u32 bitmask[2] = {
7742 		[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
7743 	};
7744 	struct nfs4_fs_locations_arg args = {
7745 		.fh		= NFS_FH(inode),
7746 		.page		= page,
7747 		.bitmask	= bitmask,
7748 		.migration	= 1,		/* skip LOOKUP */
7749 	};
7750 	struct nfs4_fs_locations_res res = {
7751 		.fs_locations	= locations,
7752 		.migration	= 1,
7753 	};
7754 	struct rpc_message msg = {
7755 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7756 		.rpc_argp	= &args,
7757 		.rpc_resp	= &res,
7758 		.rpc_cred	= cred,
7759 	};
7760 	int status;
7761 
7762 	nfs_fattr_init(&locations->fattr);
7763 	locations->server = server;
7764 	locations->nlocations = 0;
7765 
7766 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
7767 	status = nfs4_call_sync_sequence(clnt, server, &msg,
7768 					&args.seq_args, &res.seq_res);
7769 	if (status == NFS4_OK &&
7770 	    res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
7771 		status = -NFS4ERR_LEASE_MOVED;
7772 	return status;
7773 }
7774 
7775 #endif	/* CONFIG_NFS_V4_1 */
7776 
7777 /**
7778  * nfs4_proc_get_locations - discover locations for a migrated FSID
7779  * @inode: inode on FSID that is migrating
7780  * @locations: result of query
7781  * @page: buffer
7782  * @cred: credential to use for this operation
7783  *
7784  * Returns NFS4_OK on success, a negative NFS4ERR status code if the
7785  * operation failed, or a negative errno if a local error occurred.
7786  *
7787  * On success, "locations" is filled in, but if the server has
7788  * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
7789  * asserted.
7790  *
7791  * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
7792  * from this client that require migration recovery.
7793  */
7794 int nfs4_proc_get_locations(struct inode *inode,
7795 			    struct nfs4_fs_locations *locations,
7796 			    struct page *page, const struct cred *cred)
7797 {
7798 	struct nfs_server *server = NFS_SERVER(inode);
7799 	struct nfs_client *clp = server->nfs_client;
7800 	const struct nfs4_mig_recovery_ops *ops =
7801 					clp->cl_mvops->mig_recovery_ops;
7802 	struct nfs4_exception exception = {
7803 		.interruptible = true,
7804 	};
7805 	int status;
7806 
7807 	dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
7808 		(unsigned long long)server->fsid.major,
7809 		(unsigned long long)server->fsid.minor,
7810 		clp->cl_hostname);
7811 	nfs_display_fhandle(NFS_FH(inode), __func__);
7812 
7813 	do {
7814 		status = ops->get_locations(inode, locations, page, cred);
7815 		if (status != -NFS4ERR_DELAY)
7816 			break;
7817 		nfs4_handle_exception(server, status, &exception);
7818 	} while (exception.retry);
7819 	return status;
7820 }
7821 
7822 /*
7823  * This operation also signals the server that this client is
7824  * performing "lease moved" recovery.  The server can stop
7825  * returning NFS4ERR_LEASE_MOVED to this client.  A RENEW operation
7826  * is appended to this compound to identify the client ID which is
7827  * performing recovery.
7828  */
7829 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred)
7830 {
7831 	struct nfs_server *server = NFS_SERVER(inode);
7832 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
7833 	struct rpc_clnt *clnt = server->client;
7834 	struct nfs4_fsid_present_arg args = {
7835 		.fh		= NFS_FH(inode),
7836 		.clientid	= clp->cl_clientid,
7837 		.renew		= 1,		/* append RENEW */
7838 	};
7839 	struct nfs4_fsid_present_res res = {
7840 		.renew		= 1,
7841 	};
7842 	struct rpc_message msg = {
7843 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
7844 		.rpc_argp	= &args,
7845 		.rpc_resp	= &res,
7846 		.rpc_cred	= cred,
7847 	};
7848 	unsigned long now = jiffies;
7849 	int status;
7850 
7851 	res.fh = nfs_alloc_fhandle();
7852 	if (res.fh == NULL)
7853 		return -ENOMEM;
7854 
7855 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
7856 	status = nfs4_call_sync_sequence(clnt, server, &msg,
7857 						&args.seq_args, &res.seq_res);
7858 	nfs_free_fhandle(res.fh);
7859 	if (status)
7860 		return status;
7861 
7862 	do_renew_lease(clp, now);
7863 	return 0;
7864 }
7865 
7866 #ifdef CONFIG_NFS_V4_1
7867 
7868 /*
7869  * This operation also signals the server that this client is
7870  * performing "lease moved" recovery.  The server can stop asserting
7871  * SEQ4_STATUS_LEASE_MOVED for this client.  The client ID performing
7872  * this operation is identified in the SEQUENCE operation in this
7873  * compound.
7874  */
7875 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred)
7876 {
7877 	struct nfs_server *server = NFS_SERVER(inode);
7878 	struct rpc_clnt *clnt = server->client;
7879 	struct nfs4_fsid_present_arg args = {
7880 		.fh		= NFS_FH(inode),
7881 	};
7882 	struct nfs4_fsid_present_res res = {
7883 	};
7884 	struct rpc_message msg = {
7885 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
7886 		.rpc_argp	= &args,
7887 		.rpc_resp	= &res,
7888 		.rpc_cred	= cred,
7889 	};
7890 	int status;
7891 
7892 	res.fh = nfs_alloc_fhandle();
7893 	if (res.fh == NULL)
7894 		return -ENOMEM;
7895 
7896 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
7897 	status = nfs4_call_sync_sequence(clnt, server, &msg,
7898 						&args.seq_args, &res.seq_res);
7899 	nfs_free_fhandle(res.fh);
7900 	if (status == NFS4_OK &&
7901 	    res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
7902 		status = -NFS4ERR_LEASE_MOVED;
7903 	return status;
7904 }
7905 
7906 #endif	/* CONFIG_NFS_V4_1 */
7907 
7908 /**
7909  * nfs4_proc_fsid_present - Is this FSID present or absent on server?
7910  * @inode: inode on FSID to check
7911  * @cred: credential to use for this operation
7912  *
7913  * Server indicates whether the FSID is present, moved, or not
7914  * recognized.  This operation is necessary to clear a LEASE_MOVED
7915  * condition for this client ID.
7916  *
7917  * Returns NFS4_OK if the FSID is present on this server,
7918  * -NFS4ERR_MOVED if the FSID is no longer present, a negative
7919  *  NFS4ERR code if some error occurred on the server, or a
7920  *  negative errno if a local failure occurred.
7921  */
7922 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
7923 {
7924 	struct nfs_server *server = NFS_SERVER(inode);
7925 	struct nfs_client *clp = server->nfs_client;
7926 	const struct nfs4_mig_recovery_ops *ops =
7927 					clp->cl_mvops->mig_recovery_ops;
7928 	struct nfs4_exception exception = {
7929 		.interruptible = true,
7930 	};
7931 	int status;
7932 
7933 	dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
7934 		(unsigned long long)server->fsid.major,
7935 		(unsigned long long)server->fsid.minor,
7936 		clp->cl_hostname);
7937 	nfs_display_fhandle(NFS_FH(inode), __func__);
7938 
7939 	do {
7940 		status = ops->fsid_present(inode, cred);
7941 		if (status != -NFS4ERR_DELAY)
7942 			break;
7943 		nfs4_handle_exception(server, status, &exception);
7944 	} while (exception.retry);
7945 	return status;
7946 }
7947 
7948 /*
7949  * If 'use_integrity' is true and the state managment nfs_client
7950  * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
7951  * and the machine credential as per RFC3530bis and RFC5661 Security
7952  * Considerations sections. Otherwise, just use the user cred with the
7953  * filesystem's rpc_client.
7954  */
7955 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
7956 {
7957 	int status;
7958 	struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
7959 	struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
7960 	struct nfs4_secinfo_arg args = {
7961 		.dir_fh = NFS_FH(dir),
7962 		.name   = name,
7963 	};
7964 	struct nfs4_secinfo_res res = {
7965 		.flavors     = flavors,
7966 	};
7967 	struct rpc_message msg = {
7968 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
7969 		.rpc_argp = &args,
7970 		.rpc_resp = &res,
7971 	};
7972 	struct nfs4_call_sync_data data = {
7973 		.seq_server = NFS_SERVER(dir),
7974 		.seq_args = &args.seq_args,
7975 		.seq_res = &res.seq_res,
7976 	};
7977 	struct rpc_task_setup task_setup = {
7978 		.rpc_client = clnt,
7979 		.rpc_message = &msg,
7980 		.callback_ops = clp->cl_mvops->call_sync_ops,
7981 		.callback_data = &data,
7982 		.flags = RPC_TASK_NO_ROUND_ROBIN,
7983 	};
7984 	const struct cred *cred = NULL;
7985 
7986 	if (use_integrity) {
7987 		clnt = clp->cl_rpcclient;
7988 		task_setup.rpc_client = clnt;
7989 
7990 		cred = nfs4_get_clid_cred(clp);
7991 		msg.rpc_cred = cred;
7992 	}
7993 
7994 	dprintk("NFS call  secinfo %s\n", name->name);
7995 
7996 	nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
7997 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
7998 	status = nfs4_call_sync_custom(&task_setup);
7999 
8000 	dprintk("NFS reply  secinfo: %d\n", status);
8001 
8002 	put_cred(cred);
8003 	return status;
8004 }
8005 
8006 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
8007 		      struct nfs4_secinfo_flavors *flavors)
8008 {
8009 	struct nfs4_exception exception = {
8010 		.interruptible = true,
8011 	};
8012 	int err;
8013 	do {
8014 		err = -NFS4ERR_WRONGSEC;
8015 
8016 		/* try to use integrity protection with machine cred */
8017 		if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
8018 			err = _nfs4_proc_secinfo(dir, name, flavors, true);
8019 
8020 		/*
8021 		 * if unable to use integrity protection, or SECINFO with
8022 		 * integrity protection returns NFS4ERR_WRONGSEC (which is
8023 		 * disallowed by spec, but exists in deployed servers) use
8024 		 * the current filesystem's rpc_client and the user cred.
8025 		 */
8026 		if (err == -NFS4ERR_WRONGSEC)
8027 			err = _nfs4_proc_secinfo(dir, name, flavors, false);
8028 
8029 		trace_nfs4_secinfo(dir, name, err);
8030 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
8031 				&exception);
8032 	} while (exception.retry);
8033 	return err;
8034 }
8035 
8036 #ifdef CONFIG_NFS_V4_1
8037 /*
8038  * Check the exchange flags returned by the server for invalid flags, having
8039  * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
8040  * DS flags set.
8041  */
8042 static int nfs4_check_cl_exchange_flags(u32 flags)
8043 {
8044 	if (flags & ~EXCHGID4_FLAG_MASK_R)
8045 		goto out_inval;
8046 	if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
8047 	    (flags & EXCHGID4_FLAG_USE_NON_PNFS))
8048 		goto out_inval;
8049 	if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
8050 		goto out_inval;
8051 	return NFS_OK;
8052 out_inval:
8053 	return -NFS4ERR_INVAL;
8054 }
8055 
8056 static bool
8057 nfs41_same_server_scope(struct nfs41_server_scope *a,
8058 			struct nfs41_server_scope *b)
8059 {
8060 	if (a->server_scope_sz != b->server_scope_sz)
8061 		return false;
8062 	return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0;
8063 }
8064 
8065 static void
8066 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
8067 {
8068 	struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
8069 	struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
8070 	struct nfs_client *clp = args->client;
8071 
8072 	switch (task->tk_status) {
8073 	case -NFS4ERR_BADSESSION:
8074 	case -NFS4ERR_DEADSESSION:
8075 		nfs4_schedule_session_recovery(clp->cl_session,
8076 				task->tk_status);
8077 	}
8078 	if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
8079 			res->dir != NFS4_CDFS4_BOTH) {
8080 		rpc_task_close_connection(task);
8081 		if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
8082 			rpc_restart_call(task);
8083 	}
8084 }
8085 
8086 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
8087 	.rpc_call_done =  nfs4_bind_one_conn_to_session_done,
8088 };
8089 
8090 /*
8091  * nfs4_proc_bind_one_conn_to_session()
8092  *
8093  * The 4.1 client currently uses the same TCP connection for the
8094  * fore and backchannel.
8095  */
8096 static
8097 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
8098 		struct rpc_xprt *xprt,
8099 		struct nfs_client *clp,
8100 		const struct cred *cred)
8101 {
8102 	int status;
8103 	struct nfs41_bind_conn_to_session_args args = {
8104 		.client = clp,
8105 		.dir = NFS4_CDFC4_FORE_OR_BOTH,
8106 		.retries = 0,
8107 	};
8108 	struct nfs41_bind_conn_to_session_res res;
8109 	struct rpc_message msg = {
8110 		.rpc_proc =
8111 			&nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
8112 		.rpc_argp = &args,
8113 		.rpc_resp = &res,
8114 		.rpc_cred = cred,
8115 	};
8116 	struct rpc_task_setup task_setup_data = {
8117 		.rpc_client = clnt,
8118 		.rpc_xprt = xprt,
8119 		.callback_ops = &nfs4_bind_one_conn_to_session_ops,
8120 		.rpc_message = &msg,
8121 		.flags = RPC_TASK_TIMEOUT,
8122 	};
8123 	struct rpc_task *task;
8124 
8125 	nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
8126 	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
8127 		args.dir = NFS4_CDFC4_FORE;
8128 
8129 	/* Do not set the backchannel flag unless this is clnt->cl_xprt */
8130 	if (xprt != rcu_access_pointer(clnt->cl_xprt))
8131 		args.dir = NFS4_CDFC4_FORE;
8132 
8133 	task = rpc_run_task(&task_setup_data);
8134 	if (!IS_ERR(task)) {
8135 		status = task->tk_status;
8136 		rpc_put_task(task);
8137 	} else
8138 		status = PTR_ERR(task);
8139 	trace_nfs4_bind_conn_to_session(clp, status);
8140 	if (status == 0) {
8141 		if (memcmp(res.sessionid.data,
8142 		    clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
8143 			dprintk("NFS: %s: Session ID mismatch\n", __func__);
8144 			return -EIO;
8145 		}
8146 		if ((res.dir & args.dir) != res.dir || res.dir == 0) {
8147 			dprintk("NFS: %s: Unexpected direction from server\n",
8148 				__func__);
8149 			return -EIO;
8150 		}
8151 		if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
8152 			dprintk("NFS: %s: Server returned RDMA mode = true\n",
8153 				__func__);
8154 			return -EIO;
8155 		}
8156 	}
8157 
8158 	return status;
8159 }
8160 
8161 struct rpc_bind_conn_calldata {
8162 	struct nfs_client *clp;
8163 	const struct cred *cred;
8164 };
8165 
8166 static int
8167 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
8168 		struct rpc_xprt *xprt,
8169 		void *calldata)
8170 {
8171 	struct rpc_bind_conn_calldata *p = calldata;
8172 
8173 	return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
8174 }
8175 
8176 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred)
8177 {
8178 	struct rpc_bind_conn_calldata data = {
8179 		.clp = clp,
8180 		.cred = cred,
8181 	};
8182 	return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
8183 			nfs4_proc_bind_conn_to_session_callback, &data);
8184 }
8185 
8186 /*
8187  * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
8188  * and operations we'd like to see to enable certain features in the allow map
8189  */
8190 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
8191 	.how = SP4_MACH_CRED,
8192 	.enforce.u.words = {
8193 		[1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8194 		      1 << (OP_EXCHANGE_ID - 32) |
8195 		      1 << (OP_CREATE_SESSION - 32) |
8196 		      1 << (OP_DESTROY_SESSION - 32) |
8197 		      1 << (OP_DESTROY_CLIENTID - 32)
8198 	},
8199 	.allow.u.words = {
8200 		[0] = 1 << (OP_CLOSE) |
8201 		      1 << (OP_OPEN_DOWNGRADE) |
8202 		      1 << (OP_LOCKU) |
8203 		      1 << (OP_DELEGRETURN) |
8204 		      1 << (OP_COMMIT),
8205 		[1] = 1 << (OP_SECINFO - 32) |
8206 		      1 << (OP_SECINFO_NO_NAME - 32) |
8207 		      1 << (OP_LAYOUTRETURN - 32) |
8208 		      1 << (OP_TEST_STATEID - 32) |
8209 		      1 << (OP_FREE_STATEID - 32) |
8210 		      1 << (OP_WRITE - 32)
8211 	}
8212 };
8213 
8214 /*
8215  * Select the state protection mode for client `clp' given the server results
8216  * from exchange_id in `sp'.
8217  *
8218  * Returns 0 on success, negative errno otherwise.
8219  */
8220 static int nfs4_sp4_select_mode(struct nfs_client *clp,
8221 				 struct nfs41_state_protection *sp)
8222 {
8223 	static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
8224 		[1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8225 		      1 << (OP_EXCHANGE_ID - 32) |
8226 		      1 << (OP_CREATE_SESSION - 32) |
8227 		      1 << (OP_DESTROY_SESSION - 32) |
8228 		      1 << (OP_DESTROY_CLIENTID - 32)
8229 	};
8230 	unsigned long flags = 0;
8231 	unsigned int i;
8232 	int ret = 0;
8233 
8234 	if (sp->how == SP4_MACH_CRED) {
8235 		/* Print state protect result */
8236 		dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
8237 		for (i = 0; i <= LAST_NFS4_OP; i++) {
8238 			if (test_bit(i, sp->enforce.u.longs))
8239 				dfprintk(MOUNT, "  enforce op %d\n", i);
8240 			if (test_bit(i, sp->allow.u.longs))
8241 				dfprintk(MOUNT, "  allow op %d\n", i);
8242 		}
8243 
8244 		/* make sure nothing is on enforce list that isn't supported */
8245 		for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
8246 			if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
8247 				dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8248 				ret = -EINVAL;
8249 				goto out;
8250 			}
8251 		}
8252 
8253 		/*
8254 		 * Minimal mode - state operations are allowed to use machine
8255 		 * credential.  Note this already happens by default, so the
8256 		 * client doesn't have to do anything more than the negotiation.
8257 		 *
8258 		 * NOTE: we don't care if EXCHANGE_ID is in the list -
8259 		 *       we're already using the machine cred for exchange_id
8260 		 *       and will never use a different cred.
8261 		 */
8262 		if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
8263 		    test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
8264 		    test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
8265 		    test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
8266 			dfprintk(MOUNT, "sp4_mach_cred:\n");
8267 			dfprintk(MOUNT, "  minimal mode enabled\n");
8268 			__set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags);
8269 		} else {
8270 			dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8271 			ret = -EINVAL;
8272 			goto out;
8273 		}
8274 
8275 		if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
8276 		    test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
8277 		    test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
8278 		    test_bit(OP_LOCKU, sp->allow.u.longs)) {
8279 			dfprintk(MOUNT, "  cleanup mode enabled\n");
8280 			__set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags);
8281 		}
8282 
8283 		if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
8284 			dfprintk(MOUNT, "  pnfs cleanup mode enabled\n");
8285 			__set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags);
8286 		}
8287 
8288 		if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
8289 		    test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
8290 			dfprintk(MOUNT, "  secinfo mode enabled\n");
8291 			__set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags);
8292 		}
8293 
8294 		if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
8295 		    test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
8296 			dfprintk(MOUNT, "  stateid mode enabled\n");
8297 			__set_bit(NFS_SP4_MACH_CRED_STATEID, &flags);
8298 		}
8299 
8300 		if (test_bit(OP_WRITE, sp->allow.u.longs)) {
8301 			dfprintk(MOUNT, "  write mode enabled\n");
8302 			__set_bit(NFS_SP4_MACH_CRED_WRITE, &flags);
8303 		}
8304 
8305 		if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
8306 			dfprintk(MOUNT, "  commit mode enabled\n");
8307 			__set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags);
8308 		}
8309 	}
8310 out:
8311 	clp->cl_sp4_flags = flags;
8312 	return ret;
8313 }
8314 
8315 struct nfs41_exchange_id_data {
8316 	struct nfs41_exchange_id_res res;
8317 	struct nfs41_exchange_id_args args;
8318 };
8319 
8320 static void nfs4_exchange_id_release(void *data)
8321 {
8322 	struct nfs41_exchange_id_data *cdata =
8323 					(struct nfs41_exchange_id_data *)data;
8324 
8325 	nfs_put_client(cdata->args.client);
8326 	kfree(cdata->res.impl_id);
8327 	kfree(cdata->res.server_scope);
8328 	kfree(cdata->res.server_owner);
8329 	kfree(cdata);
8330 }
8331 
8332 static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
8333 	.rpc_release = nfs4_exchange_id_release,
8334 };
8335 
8336 /*
8337  * _nfs4_proc_exchange_id()
8338  *
8339  * Wrapper for EXCHANGE_ID operation.
8340  */
8341 static struct rpc_task *
8342 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
8343 			u32 sp4_how, struct rpc_xprt *xprt)
8344 {
8345 	struct rpc_message msg = {
8346 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
8347 		.rpc_cred = cred,
8348 	};
8349 	struct rpc_task_setup task_setup_data = {
8350 		.rpc_client = clp->cl_rpcclient,
8351 		.callback_ops = &nfs4_exchange_id_call_ops,
8352 		.rpc_message = &msg,
8353 		.flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
8354 	};
8355 	struct nfs41_exchange_id_data *calldata;
8356 	int status;
8357 
8358 	if (!refcount_inc_not_zero(&clp->cl_count))
8359 		return ERR_PTR(-EIO);
8360 
8361 	status = -ENOMEM;
8362 	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8363 	if (!calldata)
8364 		goto out;
8365 
8366 	nfs4_init_boot_verifier(clp, &calldata->args.verifier);
8367 
8368 	status = nfs4_init_uniform_client_string(clp);
8369 	if (status)
8370 		goto out_calldata;
8371 
8372 	calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
8373 						GFP_NOFS);
8374 	status = -ENOMEM;
8375 	if (unlikely(calldata->res.server_owner == NULL))
8376 		goto out_calldata;
8377 
8378 	calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
8379 					GFP_NOFS);
8380 	if (unlikely(calldata->res.server_scope == NULL))
8381 		goto out_server_owner;
8382 
8383 	calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
8384 	if (unlikely(calldata->res.impl_id == NULL))
8385 		goto out_server_scope;
8386 
8387 	switch (sp4_how) {
8388 	case SP4_NONE:
8389 		calldata->args.state_protect.how = SP4_NONE;
8390 		break;
8391 
8392 	case SP4_MACH_CRED:
8393 		calldata->args.state_protect = nfs4_sp4_mach_cred_request;
8394 		break;
8395 
8396 	default:
8397 		/* unsupported! */
8398 		WARN_ON_ONCE(1);
8399 		status = -EINVAL;
8400 		goto out_impl_id;
8401 	}
8402 	if (xprt) {
8403 		task_setup_data.rpc_xprt = xprt;
8404 		task_setup_data.flags |= RPC_TASK_SOFTCONN;
8405 		memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
8406 				sizeof(calldata->args.verifier.data));
8407 	}
8408 	calldata->args.client = clp;
8409 	calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
8410 	EXCHGID4_FLAG_BIND_PRINC_STATEID;
8411 #ifdef CONFIG_NFS_V4_1_MIGRATION
8412 	calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
8413 #endif
8414 	msg.rpc_argp = &calldata->args;
8415 	msg.rpc_resp = &calldata->res;
8416 	task_setup_data.callback_data = calldata;
8417 
8418 	return rpc_run_task(&task_setup_data);
8419 
8420 out_impl_id:
8421 	kfree(calldata->res.impl_id);
8422 out_server_scope:
8423 	kfree(calldata->res.server_scope);
8424 out_server_owner:
8425 	kfree(calldata->res.server_owner);
8426 out_calldata:
8427 	kfree(calldata);
8428 out:
8429 	nfs_put_client(clp);
8430 	return ERR_PTR(status);
8431 }
8432 
8433 /*
8434  * _nfs4_proc_exchange_id()
8435  *
8436  * Wrapper for EXCHANGE_ID operation.
8437  */
8438 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred,
8439 			u32 sp4_how)
8440 {
8441 	struct rpc_task *task;
8442 	struct nfs41_exchange_id_args *argp;
8443 	struct nfs41_exchange_id_res *resp;
8444 	unsigned long now = jiffies;
8445 	int status;
8446 
8447 	task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
8448 	if (IS_ERR(task))
8449 		return PTR_ERR(task);
8450 
8451 	argp = task->tk_msg.rpc_argp;
8452 	resp = task->tk_msg.rpc_resp;
8453 	status = task->tk_status;
8454 	if (status  != 0)
8455 		goto out;
8456 
8457 	status = nfs4_check_cl_exchange_flags(resp->flags);
8458 	if (status  != 0)
8459 		goto out;
8460 
8461 	status = nfs4_sp4_select_mode(clp, &resp->state_protect);
8462 	if (status != 0)
8463 		goto out;
8464 
8465 	do_renew_lease(clp, now);
8466 
8467 	clp->cl_clientid = resp->clientid;
8468 	clp->cl_exchange_flags = resp->flags;
8469 	clp->cl_seqid = resp->seqid;
8470 	/* Client ID is not confirmed */
8471 	if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R))
8472 		clear_bit(NFS4_SESSION_ESTABLISHED,
8473 			  &clp->cl_session->session_state);
8474 
8475 	if (clp->cl_serverscope != NULL &&
8476 	    !nfs41_same_server_scope(clp->cl_serverscope,
8477 				resp->server_scope)) {
8478 		dprintk("%s: server_scope mismatch detected\n",
8479 			__func__);
8480 		set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
8481 	}
8482 
8483 	swap(clp->cl_serverowner, resp->server_owner);
8484 	swap(clp->cl_serverscope, resp->server_scope);
8485 	swap(clp->cl_implid, resp->impl_id);
8486 
8487 	/* Save the EXCHANGE_ID verifier session trunk tests */
8488 	memcpy(clp->cl_confirm.data, argp->verifier.data,
8489 	       sizeof(clp->cl_confirm.data));
8490 out:
8491 	trace_nfs4_exchange_id(clp, status);
8492 	rpc_put_task(task);
8493 	return status;
8494 }
8495 
8496 /*
8497  * nfs4_proc_exchange_id()
8498  *
8499  * Returns zero, a negative errno, or a negative NFS4ERR status code.
8500  *
8501  * Since the clientid has expired, all compounds using sessions
8502  * associated with the stale clientid will be returning
8503  * NFS4ERR_BADSESSION in the sequence operation, and will therefore
8504  * be in some phase of session reset.
8505  *
8506  * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
8507  */
8508 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
8509 {
8510 	rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
8511 	int status;
8512 
8513 	/* try SP4_MACH_CRED if krb5i/p	*/
8514 	if (authflavor == RPC_AUTH_GSS_KRB5I ||
8515 	    authflavor == RPC_AUTH_GSS_KRB5P) {
8516 		status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
8517 		if (!status)
8518 			return 0;
8519 	}
8520 
8521 	/* try SP4_NONE */
8522 	return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
8523 }
8524 
8525 /**
8526  * nfs4_test_session_trunk
8527  *
8528  * This is an add_xprt_test() test function called from
8529  * rpc_clnt_setup_test_and_add_xprt.
8530  *
8531  * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
8532  * and is dereferrenced in nfs4_exchange_id_release
8533  *
8534  * Upon success, add the new transport to the rpc_clnt
8535  *
8536  * @clnt: struct rpc_clnt to get new transport
8537  * @xprt: the rpc_xprt to test
8538  * @data: call data for _nfs4_proc_exchange_id.
8539  */
8540 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
8541 			    void *data)
8542 {
8543 	struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data;
8544 	struct rpc_task *task;
8545 	int status;
8546 
8547 	u32 sp4_how;
8548 
8549 	dprintk("--> %s try %s\n", __func__,
8550 		xprt->address_strings[RPC_DISPLAY_ADDR]);
8551 
8552 	sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
8553 
8554 	/* Test connection for session trunking. Async exchange_id call */
8555 	task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
8556 	if (IS_ERR(task))
8557 		return;
8558 
8559 	status = task->tk_status;
8560 	if (status == 0)
8561 		status = nfs4_detect_session_trunking(adata->clp,
8562 				task->tk_msg.rpc_resp, xprt);
8563 
8564 	if (status == 0)
8565 		rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
8566 
8567 	rpc_put_task(task);
8568 }
8569 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
8570 
8571 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
8572 		const struct cred *cred)
8573 {
8574 	struct rpc_message msg = {
8575 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
8576 		.rpc_argp = clp,
8577 		.rpc_cred = cred,
8578 	};
8579 	int status;
8580 
8581 	status = rpc_call_sync(clp->cl_rpcclient, &msg,
8582 			       RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
8583 	trace_nfs4_destroy_clientid(clp, status);
8584 	if (status)
8585 		dprintk("NFS: Got error %d from the server %s on "
8586 			"DESTROY_CLIENTID.", status, clp->cl_hostname);
8587 	return status;
8588 }
8589 
8590 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
8591 		const struct cred *cred)
8592 {
8593 	unsigned int loop;
8594 	int ret;
8595 
8596 	for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
8597 		ret = _nfs4_proc_destroy_clientid(clp, cred);
8598 		switch (ret) {
8599 		case -NFS4ERR_DELAY:
8600 		case -NFS4ERR_CLIENTID_BUSY:
8601 			ssleep(1);
8602 			break;
8603 		default:
8604 			return ret;
8605 		}
8606 	}
8607 	return 0;
8608 }
8609 
8610 int nfs4_destroy_clientid(struct nfs_client *clp)
8611 {
8612 	const struct cred *cred;
8613 	int ret = 0;
8614 
8615 	if (clp->cl_mvops->minor_version < 1)
8616 		goto out;
8617 	if (clp->cl_exchange_flags == 0)
8618 		goto out;
8619 	if (clp->cl_preserve_clid)
8620 		goto out;
8621 	cred = nfs4_get_clid_cred(clp);
8622 	ret = nfs4_proc_destroy_clientid(clp, cred);
8623 	put_cred(cred);
8624 	switch (ret) {
8625 	case 0:
8626 	case -NFS4ERR_STALE_CLIENTID:
8627 		clp->cl_exchange_flags = 0;
8628 	}
8629 out:
8630 	return ret;
8631 }
8632 
8633 #endif /* CONFIG_NFS_V4_1 */
8634 
8635 struct nfs4_get_lease_time_data {
8636 	struct nfs4_get_lease_time_args *args;
8637 	struct nfs4_get_lease_time_res *res;
8638 	struct nfs_client *clp;
8639 };
8640 
8641 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
8642 					void *calldata)
8643 {
8644 	struct nfs4_get_lease_time_data *data =
8645 			(struct nfs4_get_lease_time_data *)calldata;
8646 
8647 	dprintk("--> %s\n", __func__);
8648 	/* just setup sequence, do not trigger session recovery
8649 	   since we're invoked within one */
8650 	nfs4_setup_sequence(data->clp,
8651 			&data->args->la_seq_args,
8652 			&data->res->lr_seq_res,
8653 			task);
8654 	dprintk("<-- %s\n", __func__);
8655 }
8656 
8657 /*
8658  * Called from nfs4_state_manager thread for session setup, so don't recover
8659  * from sequence operation or clientid errors.
8660  */
8661 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
8662 {
8663 	struct nfs4_get_lease_time_data *data =
8664 			(struct nfs4_get_lease_time_data *)calldata;
8665 
8666 	dprintk("--> %s\n", __func__);
8667 	if (!nfs4_sequence_done(task, &data->res->lr_seq_res))
8668 		return;
8669 	switch (task->tk_status) {
8670 	case -NFS4ERR_DELAY:
8671 	case -NFS4ERR_GRACE:
8672 		dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
8673 		rpc_delay(task, NFS4_POLL_RETRY_MIN);
8674 		task->tk_status = 0;
8675 		fallthrough;
8676 	case -NFS4ERR_RETRY_UNCACHED_REP:
8677 		rpc_restart_call_prepare(task);
8678 		return;
8679 	}
8680 	dprintk("<-- %s\n", __func__);
8681 }
8682 
8683 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
8684 	.rpc_call_prepare = nfs4_get_lease_time_prepare,
8685 	.rpc_call_done = nfs4_get_lease_time_done,
8686 };
8687 
8688 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
8689 {
8690 	struct nfs4_get_lease_time_args args;
8691 	struct nfs4_get_lease_time_res res = {
8692 		.lr_fsinfo = fsinfo,
8693 	};
8694 	struct nfs4_get_lease_time_data data = {
8695 		.args = &args,
8696 		.res = &res,
8697 		.clp = clp,
8698 	};
8699 	struct rpc_message msg = {
8700 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
8701 		.rpc_argp = &args,
8702 		.rpc_resp = &res,
8703 	};
8704 	struct rpc_task_setup task_setup = {
8705 		.rpc_client = clp->cl_rpcclient,
8706 		.rpc_message = &msg,
8707 		.callback_ops = &nfs4_get_lease_time_ops,
8708 		.callback_data = &data,
8709 		.flags = RPC_TASK_TIMEOUT,
8710 	};
8711 
8712 	nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
8713 	return nfs4_call_sync_custom(&task_setup);
8714 }
8715 
8716 #ifdef CONFIG_NFS_V4_1
8717 
8718 /*
8719  * Initialize the values to be used by the client in CREATE_SESSION
8720  * If nfs4_init_session set the fore channel request and response sizes,
8721  * use them.
8722  *
8723  * Set the back channel max_resp_sz_cached to zero to force the client to
8724  * always set csa_cachethis to FALSE because the current implementation
8725  * of the back channel DRC only supports caching the CB_SEQUENCE operation.
8726  */
8727 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
8728 				    struct rpc_clnt *clnt)
8729 {
8730 	unsigned int max_rqst_sz, max_resp_sz;
8731 	unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
8732 	unsigned int max_bc_slots = rpc_num_bc_slots(clnt);
8733 
8734 	max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
8735 	max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
8736 
8737 	/* Fore channel attributes */
8738 	args->fc_attrs.max_rqst_sz = max_rqst_sz;
8739 	args->fc_attrs.max_resp_sz = max_resp_sz;
8740 	args->fc_attrs.max_ops = NFS4_MAX_OPS;
8741 	args->fc_attrs.max_reqs = max_session_slots;
8742 
8743 	dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
8744 		"max_ops=%u max_reqs=%u\n",
8745 		__func__,
8746 		args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
8747 		args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
8748 
8749 	/* Back channel attributes */
8750 	args->bc_attrs.max_rqst_sz = max_bc_payload;
8751 	args->bc_attrs.max_resp_sz = max_bc_payload;
8752 	args->bc_attrs.max_resp_sz_cached = 0;
8753 	args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
8754 	args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
8755 	if (args->bc_attrs.max_reqs > max_bc_slots)
8756 		args->bc_attrs.max_reqs = max_bc_slots;
8757 
8758 	dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
8759 		"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
8760 		__func__,
8761 		args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
8762 		args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
8763 		args->bc_attrs.max_reqs);
8764 }
8765 
8766 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
8767 		struct nfs41_create_session_res *res)
8768 {
8769 	struct nfs4_channel_attrs *sent = &args->fc_attrs;
8770 	struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
8771 
8772 	if (rcvd->max_resp_sz > sent->max_resp_sz)
8773 		return -EINVAL;
8774 	/*
8775 	 * Our requested max_ops is the minimum we need; we're not
8776 	 * prepared to break up compounds into smaller pieces than that.
8777 	 * So, no point even trying to continue if the server won't
8778 	 * cooperate:
8779 	 */
8780 	if (rcvd->max_ops < sent->max_ops)
8781 		return -EINVAL;
8782 	if (rcvd->max_reqs == 0)
8783 		return -EINVAL;
8784 	if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
8785 		rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
8786 	return 0;
8787 }
8788 
8789 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
8790 		struct nfs41_create_session_res *res)
8791 {
8792 	struct nfs4_channel_attrs *sent = &args->bc_attrs;
8793 	struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
8794 
8795 	if (!(res->flags & SESSION4_BACK_CHAN))
8796 		goto out;
8797 	if (rcvd->max_rqst_sz > sent->max_rqst_sz)
8798 		return -EINVAL;
8799 	if (rcvd->max_resp_sz < sent->max_resp_sz)
8800 		return -EINVAL;
8801 	if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
8802 		return -EINVAL;
8803 	if (rcvd->max_ops > sent->max_ops)
8804 		return -EINVAL;
8805 	if (rcvd->max_reqs > sent->max_reqs)
8806 		return -EINVAL;
8807 out:
8808 	return 0;
8809 }
8810 
8811 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
8812 				     struct nfs41_create_session_res *res)
8813 {
8814 	int ret;
8815 
8816 	ret = nfs4_verify_fore_channel_attrs(args, res);
8817 	if (ret)
8818 		return ret;
8819 	return nfs4_verify_back_channel_attrs(args, res);
8820 }
8821 
8822 static void nfs4_update_session(struct nfs4_session *session,
8823 		struct nfs41_create_session_res *res)
8824 {
8825 	nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
8826 	/* Mark client id and session as being confirmed */
8827 	session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
8828 	set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
8829 	session->flags = res->flags;
8830 	memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
8831 	if (res->flags & SESSION4_BACK_CHAN)
8832 		memcpy(&session->bc_attrs, &res->bc_attrs,
8833 				sizeof(session->bc_attrs));
8834 }
8835 
8836 static int _nfs4_proc_create_session(struct nfs_client *clp,
8837 		const struct cred *cred)
8838 {
8839 	struct nfs4_session *session = clp->cl_session;
8840 	struct nfs41_create_session_args args = {
8841 		.client = clp,
8842 		.clientid = clp->cl_clientid,
8843 		.seqid = clp->cl_seqid,
8844 		.cb_program = NFS4_CALLBACK,
8845 	};
8846 	struct nfs41_create_session_res res;
8847 
8848 	struct rpc_message msg = {
8849 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
8850 		.rpc_argp = &args,
8851 		.rpc_resp = &res,
8852 		.rpc_cred = cred,
8853 	};
8854 	int status;
8855 
8856 	nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
8857 	args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
8858 
8859 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
8860 			       RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
8861 	trace_nfs4_create_session(clp, status);
8862 
8863 	switch (status) {
8864 	case -NFS4ERR_STALE_CLIENTID:
8865 	case -NFS4ERR_DELAY:
8866 	case -ETIMEDOUT:
8867 	case -EACCES:
8868 	case -EAGAIN:
8869 		goto out;
8870 	}
8871 
8872 	clp->cl_seqid++;
8873 	if (!status) {
8874 		/* Verify the session's negotiated channel_attrs values */
8875 		status = nfs4_verify_channel_attrs(&args, &res);
8876 		/* Increment the clientid slot sequence id */
8877 		if (status)
8878 			goto out;
8879 		nfs4_update_session(session, &res);
8880 	}
8881 out:
8882 	return status;
8883 }
8884 
8885 /*
8886  * Issues a CREATE_SESSION operation to the server.
8887  * It is the responsibility of the caller to verify the session is
8888  * expired before calling this routine.
8889  */
8890 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
8891 {
8892 	int status;
8893 	unsigned *ptr;
8894 	struct nfs4_session *session = clp->cl_session;
8895 
8896 	dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
8897 
8898 	status = _nfs4_proc_create_session(clp, cred);
8899 	if (status)
8900 		goto out;
8901 
8902 	/* Init or reset the session slot tables */
8903 	status = nfs4_setup_session_slot_tables(session);
8904 	dprintk("slot table setup returned %d\n", status);
8905 	if (status)
8906 		goto out;
8907 
8908 	ptr = (unsigned *)&session->sess_id.data[0];
8909 	dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
8910 		clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
8911 out:
8912 	dprintk("<-- %s\n", __func__);
8913 	return status;
8914 }
8915 
8916 /*
8917  * Issue the over-the-wire RPC DESTROY_SESSION.
8918  * The caller must serialize access to this routine.
8919  */
8920 int nfs4_proc_destroy_session(struct nfs4_session *session,
8921 		const struct cred *cred)
8922 {
8923 	struct rpc_message msg = {
8924 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
8925 		.rpc_argp = session,
8926 		.rpc_cred = cred,
8927 	};
8928 	int status = 0;
8929 
8930 	dprintk("--> nfs4_proc_destroy_session\n");
8931 
8932 	/* session is still being setup */
8933 	if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
8934 		return 0;
8935 
8936 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
8937 			       RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
8938 	trace_nfs4_destroy_session(session->clp, status);
8939 
8940 	if (status)
8941 		dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
8942 			"Session has been destroyed regardless...\n", status);
8943 
8944 	dprintk("<-- nfs4_proc_destroy_session\n");
8945 	return status;
8946 }
8947 
8948 /*
8949  * Renew the cl_session lease.
8950  */
8951 struct nfs4_sequence_data {
8952 	struct nfs_client *clp;
8953 	struct nfs4_sequence_args args;
8954 	struct nfs4_sequence_res res;
8955 };
8956 
8957 static void nfs41_sequence_release(void *data)
8958 {
8959 	struct nfs4_sequence_data *calldata = data;
8960 	struct nfs_client *clp = calldata->clp;
8961 
8962 	if (refcount_read(&clp->cl_count) > 1)
8963 		nfs4_schedule_state_renewal(clp);
8964 	nfs_put_client(clp);
8965 	kfree(calldata);
8966 }
8967 
8968 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
8969 {
8970 	switch(task->tk_status) {
8971 	case -NFS4ERR_DELAY:
8972 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
8973 		return -EAGAIN;
8974 	default:
8975 		nfs4_schedule_lease_recovery(clp);
8976 	}
8977 	return 0;
8978 }
8979 
8980 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
8981 {
8982 	struct nfs4_sequence_data *calldata = data;
8983 	struct nfs_client *clp = calldata->clp;
8984 
8985 	if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
8986 		return;
8987 
8988 	trace_nfs4_sequence(clp, task->tk_status);
8989 	if (task->tk_status < 0) {
8990 		dprintk("%s ERROR %d\n", __func__, task->tk_status);
8991 		if (refcount_read(&clp->cl_count) == 1)
8992 			goto out;
8993 
8994 		if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
8995 			rpc_restart_call_prepare(task);
8996 			return;
8997 		}
8998 	}
8999 	dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
9000 out:
9001 	dprintk("<-- %s\n", __func__);
9002 }
9003 
9004 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
9005 {
9006 	struct nfs4_sequence_data *calldata = data;
9007 	struct nfs_client *clp = calldata->clp;
9008 	struct nfs4_sequence_args *args;
9009 	struct nfs4_sequence_res *res;
9010 
9011 	args = task->tk_msg.rpc_argp;
9012 	res = task->tk_msg.rpc_resp;
9013 
9014 	nfs4_setup_sequence(clp, args, res, task);
9015 }
9016 
9017 static const struct rpc_call_ops nfs41_sequence_ops = {
9018 	.rpc_call_done = nfs41_sequence_call_done,
9019 	.rpc_call_prepare = nfs41_sequence_prepare,
9020 	.rpc_release = nfs41_sequence_release,
9021 };
9022 
9023 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
9024 		const struct cred *cred,
9025 		struct nfs4_slot *slot,
9026 		bool is_privileged)
9027 {
9028 	struct nfs4_sequence_data *calldata;
9029 	struct rpc_message msg = {
9030 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
9031 		.rpc_cred = cred,
9032 	};
9033 	struct rpc_task_setup task_setup_data = {
9034 		.rpc_client = clp->cl_rpcclient,
9035 		.rpc_message = &msg,
9036 		.callback_ops = &nfs41_sequence_ops,
9037 		.flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
9038 	};
9039 	struct rpc_task *ret;
9040 
9041 	ret = ERR_PTR(-EIO);
9042 	if (!refcount_inc_not_zero(&clp->cl_count))
9043 		goto out_err;
9044 
9045 	ret = ERR_PTR(-ENOMEM);
9046 	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
9047 	if (calldata == NULL)
9048 		goto out_put_clp;
9049 	nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged);
9050 	nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
9051 	msg.rpc_argp = &calldata->args;
9052 	msg.rpc_resp = &calldata->res;
9053 	calldata->clp = clp;
9054 	task_setup_data.callback_data = calldata;
9055 
9056 	ret = rpc_run_task(&task_setup_data);
9057 	if (IS_ERR(ret))
9058 		goto out_err;
9059 	return ret;
9060 out_put_clp:
9061 	nfs_put_client(clp);
9062 out_err:
9063 	nfs41_release_slot(slot);
9064 	return ret;
9065 }
9066 
9067 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
9068 {
9069 	struct rpc_task *task;
9070 	int ret = 0;
9071 
9072 	if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
9073 		return -EAGAIN;
9074 	task = _nfs41_proc_sequence(clp, cred, NULL, false);
9075 	if (IS_ERR(task))
9076 		ret = PTR_ERR(task);
9077 	else
9078 		rpc_put_task_async(task);
9079 	dprintk("<-- %s status=%d\n", __func__, ret);
9080 	return ret;
9081 }
9082 
9083 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred)
9084 {
9085 	struct rpc_task *task;
9086 	int ret;
9087 
9088 	task = _nfs41_proc_sequence(clp, cred, NULL, true);
9089 	if (IS_ERR(task)) {
9090 		ret = PTR_ERR(task);
9091 		goto out;
9092 	}
9093 	ret = rpc_wait_for_completion_task(task);
9094 	if (!ret)
9095 		ret = task->tk_status;
9096 	rpc_put_task(task);
9097 out:
9098 	dprintk("<-- %s status=%d\n", __func__, ret);
9099 	return ret;
9100 }
9101 
9102 struct nfs4_reclaim_complete_data {
9103 	struct nfs_client *clp;
9104 	struct nfs41_reclaim_complete_args arg;
9105 	struct nfs41_reclaim_complete_res res;
9106 };
9107 
9108 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
9109 {
9110 	struct nfs4_reclaim_complete_data *calldata = data;
9111 
9112 	nfs4_setup_sequence(calldata->clp,
9113 			&calldata->arg.seq_args,
9114 			&calldata->res.seq_res,
9115 			task);
9116 }
9117 
9118 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9119 {
9120 	switch(task->tk_status) {
9121 	case 0:
9122 		wake_up_all(&clp->cl_lock_waitq);
9123 		fallthrough;
9124 	case -NFS4ERR_COMPLETE_ALREADY:
9125 	case -NFS4ERR_WRONG_CRED: /* What to do here? */
9126 		break;
9127 	case -NFS4ERR_DELAY:
9128 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
9129 		fallthrough;
9130 	case -NFS4ERR_RETRY_UNCACHED_REP:
9131 		return -EAGAIN;
9132 	case -NFS4ERR_BADSESSION:
9133 	case -NFS4ERR_DEADSESSION:
9134 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
9135 		break;
9136 	default:
9137 		nfs4_schedule_lease_recovery(clp);
9138 	}
9139 	return 0;
9140 }
9141 
9142 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
9143 {
9144 	struct nfs4_reclaim_complete_data *calldata = data;
9145 	struct nfs_client *clp = calldata->clp;
9146 	struct nfs4_sequence_res *res = &calldata->res.seq_res;
9147 
9148 	dprintk("--> %s\n", __func__);
9149 	if (!nfs41_sequence_done(task, res))
9150 		return;
9151 
9152 	trace_nfs4_reclaim_complete(clp, task->tk_status);
9153 	if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
9154 		rpc_restart_call_prepare(task);
9155 		return;
9156 	}
9157 	dprintk("<-- %s\n", __func__);
9158 }
9159 
9160 static void nfs4_free_reclaim_complete_data(void *data)
9161 {
9162 	struct nfs4_reclaim_complete_data *calldata = data;
9163 
9164 	kfree(calldata);
9165 }
9166 
9167 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
9168 	.rpc_call_prepare = nfs4_reclaim_complete_prepare,
9169 	.rpc_call_done = nfs4_reclaim_complete_done,
9170 	.rpc_release = nfs4_free_reclaim_complete_data,
9171 };
9172 
9173 /*
9174  * Issue a global reclaim complete.
9175  */
9176 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
9177 		const struct cred *cred)
9178 {
9179 	struct nfs4_reclaim_complete_data *calldata;
9180 	struct rpc_message msg = {
9181 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
9182 		.rpc_cred = cred,
9183 	};
9184 	struct rpc_task_setup task_setup_data = {
9185 		.rpc_client = clp->cl_rpcclient,
9186 		.rpc_message = &msg,
9187 		.callback_ops = &nfs4_reclaim_complete_call_ops,
9188 		.flags = RPC_TASK_NO_ROUND_ROBIN,
9189 	};
9190 	int status = -ENOMEM;
9191 
9192 	dprintk("--> %s\n", __func__);
9193 	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
9194 	if (calldata == NULL)
9195 		goto out;
9196 	calldata->clp = clp;
9197 	calldata->arg.one_fs = 0;
9198 
9199 	nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1);
9200 	msg.rpc_argp = &calldata->arg;
9201 	msg.rpc_resp = &calldata->res;
9202 	task_setup_data.callback_data = calldata;
9203 	status = nfs4_call_sync_custom(&task_setup_data);
9204 out:
9205 	dprintk("<-- %s status=%d\n", __func__, status);
9206 	return status;
9207 }
9208 
9209 static void
9210 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
9211 {
9212 	struct nfs4_layoutget *lgp = calldata;
9213 	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
9214 
9215 	dprintk("--> %s\n", __func__);
9216 	nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args,
9217 				&lgp->res.seq_res, task);
9218 	dprintk("<-- %s\n", __func__);
9219 }
9220 
9221 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
9222 {
9223 	struct nfs4_layoutget *lgp = calldata;
9224 
9225 	dprintk("--> %s\n", __func__);
9226 	nfs41_sequence_process(task, &lgp->res.seq_res);
9227 	dprintk("<-- %s\n", __func__);
9228 }
9229 
9230 static int
9231 nfs4_layoutget_handle_exception(struct rpc_task *task,
9232 		struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
9233 {
9234 	struct inode *inode = lgp->args.inode;
9235 	struct nfs_server *server = NFS_SERVER(inode);
9236 	struct pnfs_layout_hdr *lo;
9237 	int nfs4err = task->tk_status;
9238 	int err, status = 0;
9239 	LIST_HEAD(head);
9240 
9241 	dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
9242 
9243 	nfs4_sequence_free_slot(&lgp->res.seq_res);
9244 
9245 	switch (nfs4err) {
9246 	case 0:
9247 		goto out;
9248 
9249 	/*
9250 	 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
9251 	 * on the file. set tk_status to -ENODATA to tell upper layer to
9252 	 * retry go inband.
9253 	 */
9254 	case -NFS4ERR_LAYOUTUNAVAILABLE:
9255 		status = -ENODATA;
9256 		goto out;
9257 	/*
9258 	 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
9259 	 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
9260 	 */
9261 	case -NFS4ERR_BADLAYOUT:
9262 		status = -EOVERFLOW;
9263 		goto out;
9264 	/*
9265 	 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
9266 	 * (or clients) writing to the same RAID stripe except when
9267 	 * the minlength argument is 0 (see RFC5661 section 18.43.3).
9268 	 *
9269 	 * Treat it like we would RECALLCONFLICT -- we retry for a little
9270 	 * while, and then eventually give up.
9271 	 */
9272 	case -NFS4ERR_LAYOUTTRYLATER:
9273 		if (lgp->args.minlength == 0) {
9274 			status = -EOVERFLOW;
9275 			goto out;
9276 		}
9277 		status = -EBUSY;
9278 		break;
9279 	case -NFS4ERR_RECALLCONFLICT:
9280 		status = -ERECALLCONFLICT;
9281 		break;
9282 	case -NFS4ERR_DELEG_REVOKED:
9283 	case -NFS4ERR_ADMIN_REVOKED:
9284 	case -NFS4ERR_EXPIRED:
9285 	case -NFS4ERR_BAD_STATEID:
9286 		exception->timeout = 0;
9287 		spin_lock(&inode->i_lock);
9288 		lo = NFS_I(inode)->layout;
9289 		/* If the open stateid was bad, then recover it. */
9290 		if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
9291 		    !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
9292 			spin_unlock(&inode->i_lock);
9293 			exception->state = lgp->args.ctx->state;
9294 			exception->stateid = &lgp->args.stateid;
9295 			break;
9296 		}
9297 
9298 		/*
9299 		 * Mark the bad layout state as invalid, then retry
9300 		 */
9301 		pnfs_mark_layout_stateid_invalid(lo, &head);
9302 		spin_unlock(&inode->i_lock);
9303 		nfs_commit_inode(inode, 0);
9304 		pnfs_free_lseg_list(&head);
9305 		status = -EAGAIN;
9306 		goto out;
9307 	}
9308 
9309 	err = nfs4_handle_exception(server, nfs4err, exception);
9310 	if (!status) {
9311 		if (exception->retry)
9312 			status = -EAGAIN;
9313 		else
9314 			status = err;
9315 	}
9316 out:
9317 	dprintk("<-- %s\n", __func__);
9318 	return status;
9319 }
9320 
9321 size_t max_response_pages(struct nfs_server *server)
9322 {
9323 	u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
9324 	return nfs_page_array_len(0, max_resp_sz);
9325 }
9326 
9327 static void nfs4_layoutget_release(void *calldata)
9328 {
9329 	struct nfs4_layoutget *lgp = calldata;
9330 
9331 	dprintk("--> %s\n", __func__);
9332 	nfs4_sequence_free_slot(&lgp->res.seq_res);
9333 	pnfs_layoutget_free(lgp);
9334 	dprintk("<-- %s\n", __func__);
9335 }
9336 
9337 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
9338 	.rpc_call_prepare = nfs4_layoutget_prepare,
9339 	.rpc_call_done = nfs4_layoutget_done,
9340 	.rpc_release = nfs4_layoutget_release,
9341 };
9342 
9343 struct pnfs_layout_segment *
9344 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
9345 {
9346 	struct inode *inode = lgp->args.inode;
9347 	struct nfs_server *server = NFS_SERVER(inode);
9348 	struct rpc_task *task;
9349 	struct rpc_message msg = {
9350 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
9351 		.rpc_argp = &lgp->args,
9352 		.rpc_resp = &lgp->res,
9353 		.rpc_cred = lgp->cred,
9354 	};
9355 	struct rpc_task_setup task_setup_data = {
9356 		.rpc_client = server->client,
9357 		.rpc_message = &msg,
9358 		.callback_ops = &nfs4_layoutget_call_ops,
9359 		.callback_data = lgp,
9360 		.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
9361 	};
9362 	struct pnfs_layout_segment *lseg = NULL;
9363 	struct nfs4_exception exception = {
9364 		.inode = inode,
9365 		.timeout = *timeout,
9366 	};
9367 	int status = 0;
9368 
9369 	dprintk("--> %s\n", __func__);
9370 
9371 	/* nfs4_layoutget_release calls pnfs_put_layout_hdr */
9372 	pnfs_get_layout_hdr(NFS_I(inode)->layout);
9373 
9374 	nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
9375 
9376 	task = rpc_run_task(&task_setup_data);
9377 
9378 	status = rpc_wait_for_completion_task(task);
9379 	if (status != 0)
9380 		goto out;
9381 
9382 	if (task->tk_status < 0) {
9383 		status = nfs4_layoutget_handle_exception(task, lgp, &exception);
9384 		*timeout = exception.timeout;
9385 	} else if (lgp->res.layoutp->len == 0) {
9386 		status = -EAGAIN;
9387 		*timeout = nfs4_update_delay(&exception.timeout);
9388 	} else
9389 		lseg = pnfs_layout_process(lgp);
9390 out:
9391 	trace_nfs4_layoutget(lgp->args.ctx,
9392 			&lgp->args.range,
9393 			&lgp->res.range,
9394 			&lgp->res.stateid,
9395 			status);
9396 
9397 	rpc_put_task(task);
9398 	dprintk("<-- %s status=%d\n", __func__, status);
9399 	if (status)
9400 		return ERR_PTR(status);
9401 	return lseg;
9402 }
9403 
9404 static void
9405 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
9406 {
9407 	struct nfs4_layoutreturn *lrp = calldata;
9408 
9409 	dprintk("--> %s\n", __func__);
9410 	nfs4_setup_sequence(lrp->clp,
9411 			&lrp->args.seq_args,
9412 			&lrp->res.seq_res,
9413 			task);
9414 	if (!pnfs_layout_is_valid(lrp->args.layout))
9415 		rpc_exit(task, 0);
9416 }
9417 
9418 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
9419 {
9420 	struct nfs4_layoutreturn *lrp = calldata;
9421 	struct nfs_server *server;
9422 
9423 	dprintk("--> %s\n", __func__);
9424 
9425 	if (!nfs41_sequence_process(task, &lrp->res.seq_res))
9426 		return;
9427 
9428 	/*
9429 	 * Was there an RPC level error? Assume the call succeeded,
9430 	 * and that we need to release the layout
9431 	 */
9432 	if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
9433 		lrp->res.lrs_present = 0;
9434 		return;
9435 	}
9436 
9437 	server = NFS_SERVER(lrp->args.inode);
9438 	switch (task->tk_status) {
9439 	case -NFS4ERR_OLD_STATEID:
9440 		if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
9441 					&lrp->args.range,
9442 					lrp->args.inode))
9443 			goto out_restart;
9444 		fallthrough;
9445 	default:
9446 		task->tk_status = 0;
9447 		fallthrough;
9448 	case 0:
9449 		break;
9450 	case -NFS4ERR_DELAY:
9451 		if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
9452 			break;
9453 		goto out_restart;
9454 	}
9455 	dprintk("<-- %s\n", __func__);
9456 	return;
9457 out_restart:
9458 	task->tk_status = 0;
9459 	nfs4_sequence_free_slot(&lrp->res.seq_res);
9460 	rpc_restart_call_prepare(task);
9461 }
9462 
9463 static void nfs4_layoutreturn_release(void *calldata)
9464 {
9465 	struct nfs4_layoutreturn *lrp = calldata;
9466 	struct pnfs_layout_hdr *lo = lrp->args.layout;
9467 
9468 	dprintk("--> %s\n", __func__);
9469 	pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
9470 			lrp->res.lrs_present ? &lrp->res.stateid : NULL);
9471 	nfs4_sequence_free_slot(&lrp->res.seq_res);
9472 	if (lrp->ld_private.ops && lrp->ld_private.ops->free)
9473 		lrp->ld_private.ops->free(&lrp->ld_private);
9474 	pnfs_put_layout_hdr(lrp->args.layout);
9475 	nfs_iput_and_deactive(lrp->inode);
9476 	put_cred(lrp->cred);
9477 	kfree(calldata);
9478 	dprintk("<-- %s\n", __func__);
9479 }
9480 
9481 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
9482 	.rpc_call_prepare = nfs4_layoutreturn_prepare,
9483 	.rpc_call_done = nfs4_layoutreturn_done,
9484 	.rpc_release = nfs4_layoutreturn_release,
9485 };
9486 
9487 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
9488 {
9489 	struct rpc_task *task;
9490 	struct rpc_message msg = {
9491 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
9492 		.rpc_argp = &lrp->args,
9493 		.rpc_resp = &lrp->res,
9494 		.rpc_cred = lrp->cred,
9495 	};
9496 	struct rpc_task_setup task_setup_data = {
9497 		.rpc_client = NFS_SERVER(lrp->args.inode)->client,
9498 		.rpc_message = &msg,
9499 		.callback_ops = &nfs4_layoutreturn_call_ops,
9500 		.callback_data = lrp,
9501 	};
9502 	int status = 0;
9503 
9504 	nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
9505 			NFS_SP4_MACH_CRED_PNFS_CLEANUP,
9506 			&task_setup_data.rpc_client, &msg);
9507 
9508 	dprintk("--> %s\n", __func__);
9509 	if (!sync) {
9510 		lrp->inode = nfs_igrab_and_active(lrp->args.inode);
9511 		if (!lrp->inode) {
9512 			nfs4_layoutreturn_release(lrp);
9513 			return -EAGAIN;
9514 		}
9515 		task_setup_data.flags |= RPC_TASK_ASYNC;
9516 	}
9517 	nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 0);
9518 	task = rpc_run_task(&task_setup_data);
9519 	if (IS_ERR(task))
9520 		return PTR_ERR(task);
9521 	if (sync)
9522 		status = task->tk_status;
9523 	trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
9524 	dprintk("<-- %s status=%d\n", __func__, status);
9525 	rpc_put_task(task);
9526 	return status;
9527 }
9528 
9529 static int
9530 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
9531 		struct pnfs_device *pdev,
9532 		const struct cred *cred)
9533 {
9534 	struct nfs4_getdeviceinfo_args args = {
9535 		.pdev = pdev,
9536 		.notify_types = NOTIFY_DEVICEID4_CHANGE |
9537 			NOTIFY_DEVICEID4_DELETE,
9538 	};
9539 	struct nfs4_getdeviceinfo_res res = {
9540 		.pdev = pdev,
9541 	};
9542 	struct rpc_message msg = {
9543 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
9544 		.rpc_argp = &args,
9545 		.rpc_resp = &res,
9546 		.rpc_cred = cred,
9547 	};
9548 	int status;
9549 
9550 	dprintk("--> %s\n", __func__);
9551 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
9552 	if (res.notification & ~args.notify_types)
9553 		dprintk("%s: unsupported notification\n", __func__);
9554 	if (res.notification != args.notify_types)
9555 		pdev->nocache = 1;
9556 
9557 	dprintk("<-- %s status=%d\n", __func__, status);
9558 
9559 	return status;
9560 }
9561 
9562 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
9563 		struct pnfs_device *pdev,
9564 		const struct cred *cred)
9565 {
9566 	struct nfs4_exception exception = { };
9567 	int err;
9568 
9569 	do {
9570 		err = nfs4_handle_exception(server,
9571 					_nfs4_proc_getdeviceinfo(server, pdev, cred),
9572 					&exception);
9573 	} while (exception.retry);
9574 	return err;
9575 }
9576 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
9577 
9578 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
9579 {
9580 	struct nfs4_layoutcommit_data *data = calldata;
9581 	struct nfs_server *server = NFS_SERVER(data->args.inode);
9582 
9583 	nfs4_setup_sequence(server->nfs_client,
9584 			&data->args.seq_args,
9585 			&data->res.seq_res,
9586 			task);
9587 }
9588 
9589 static void
9590 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
9591 {
9592 	struct nfs4_layoutcommit_data *data = calldata;
9593 	struct nfs_server *server = NFS_SERVER(data->args.inode);
9594 
9595 	if (!nfs41_sequence_done(task, &data->res.seq_res))
9596 		return;
9597 
9598 	switch (task->tk_status) { /* Just ignore these failures */
9599 	case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
9600 	case -NFS4ERR_BADIOMODE:     /* no IOMODE_RW layout for range */
9601 	case -NFS4ERR_BADLAYOUT:     /* no layout */
9602 	case -NFS4ERR_GRACE:	    /* loca_recalim always false */
9603 		task->tk_status = 0;
9604 	case 0:
9605 		break;
9606 	default:
9607 		if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
9608 			rpc_restart_call_prepare(task);
9609 			return;
9610 		}
9611 	}
9612 }
9613 
9614 static void nfs4_layoutcommit_release(void *calldata)
9615 {
9616 	struct nfs4_layoutcommit_data *data = calldata;
9617 
9618 	pnfs_cleanup_layoutcommit(data);
9619 	nfs_post_op_update_inode_force_wcc(data->args.inode,
9620 					   data->res.fattr);
9621 	put_cred(data->cred);
9622 	nfs_iput_and_deactive(data->inode);
9623 	kfree(data);
9624 }
9625 
9626 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
9627 	.rpc_call_prepare = nfs4_layoutcommit_prepare,
9628 	.rpc_call_done = nfs4_layoutcommit_done,
9629 	.rpc_release = nfs4_layoutcommit_release,
9630 };
9631 
9632 int
9633 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
9634 {
9635 	struct rpc_message msg = {
9636 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
9637 		.rpc_argp = &data->args,
9638 		.rpc_resp = &data->res,
9639 		.rpc_cred = data->cred,
9640 	};
9641 	struct rpc_task_setup task_setup_data = {
9642 		.task = &data->task,
9643 		.rpc_client = NFS_CLIENT(data->args.inode),
9644 		.rpc_message = &msg,
9645 		.callback_ops = &nfs4_layoutcommit_ops,
9646 		.callback_data = data,
9647 	};
9648 	struct rpc_task *task;
9649 	int status = 0;
9650 
9651 	dprintk("NFS: initiating layoutcommit call. sync %d "
9652 		"lbw: %llu inode %lu\n", sync,
9653 		data->args.lastbytewritten,
9654 		data->args.inode->i_ino);
9655 
9656 	if (!sync) {
9657 		data->inode = nfs_igrab_and_active(data->args.inode);
9658 		if (data->inode == NULL) {
9659 			nfs4_layoutcommit_release(data);
9660 			return -EAGAIN;
9661 		}
9662 		task_setup_data.flags = RPC_TASK_ASYNC;
9663 	}
9664 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
9665 	task = rpc_run_task(&task_setup_data);
9666 	if (IS_ERR(task))
9667 		return PTR_ERR(task);
9668 	if (sync)
9669 		status = task->tk_status;
9670 	trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
9671 	dprintk("%s: status %d\n", __func__, status);
9672 	rpc_put_task(task);
9673 	return status;
9674 }
9675 
9676 /*
9677  * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
9678  * possible) as per RFC3530bis and RFC5661 Security Considerations sections
9679  */
9680 static int
9681 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
9682 		    struct nfs_fsinfo *info,
9683 		    struct nfs4_secinfo_flavors *flavors, bool use_integrity)
9684 {
9685 	struct nfs41_secinfo_no_name_args args = {
9686 		.style = SECINFO_STYLE_CURRENT_FH,
9687 	};
9688 	struct nfs4_secinfo_res res = {
9689 		.flavors = flavors,
9690 	};
9691 	struct rpc_message msg = {
9692 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
9693 		.rpc_argp = &args,
9694 		.rpc_resp = &res,
9695 	};
9696 	struct rpc_clnt *clnt = server->client;
9697 	struct nfs4_call_sync_data data = {
9698 		.seq_server = server,
9699 		.seq_args = &args.seq_args,
9700 		.seq_res = &res.seq_res,
9701 	};
9702 	struct rpc_task_setup task_setup = {
9703 		.rpc_client = server->client,
9704 		.rpc_message = &msg,
9705 		.callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
9706 		.callback_data = &data,
9707 		.flags = RPC_TASK_NO_ROUND_ROBIN,
9708 	};
9709 	const struct cred *cred = NULL;
9710 	int status;
9711 
9712 	if (use_integrity) {
9713 		clnt = server->nfs_client->cl_rpcclient;
9714 		task_setup.rpc_client = clnt;
9715 
9716 		cred = nfs4_get_clid_cred(server->nfs_client);
9717 		msg.rpc_cred = cred;
9718 	}
9719 
9720 	dprintk("--> %s\n", __func__);
9721 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
9722 	status = nfs4_call_sync_custom(&task_setup);
9723 	dprintk("<-- %s status=%d\n", __func__, status);
9724 
9725 	put_cred(cred);
9726 
9727 	return status;
9728 }
9729 
9730 static int
9731 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
9732 			   struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
9733 {
9734 	struct nfs4_exception exception = {
9735 		.interruptible = true,
9736 	};
9737 	int err;
9738 	do {
9739 		/* first try using integrity protection */
9740 		err = -NFS4ERR_WRONGSEC;
9741 
9742 		/* try to use integrity protection with machine cred */
9743 		if (_nfs4_is_integrity_protected(server->nfs_client))
9744 			err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
9745 							  flavors, true);
9746 
9747 		/*
9748 		 * if unable to use integrity protection, or SECINFO with
9749 		 * integrity protection returns NFS4ERR_WRONGSEC (which is
9750 		 * disallowed by spec, but exists in deployed servers) use
9751 		 * the current filesystem's rpc_client and the user cred.
9752 		 */
9753 		if (err == -NFS4ERR_WRONGSEC)
9754 			err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
9755 							  flavors, false);
9756 
9757 		switch (err) {
9758 		case 0:
9759 		case -NFS4ERR_WRONGSEC:
9760 		case -ENOTSUPP:
9761 			goto out;
9762 		default:
9763 			err = nfs4_handle_exception(server, err, &exception);
9764 		}
9765 	} while (exception.retry);
9766 out:
9767 	return err;
9768 }
9769 
9770 static int
9771 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
9772 		    struct nfs_fsinfo *info)
9773 {
9774 	int err;
9775 	struct page *page;
9776 	rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
9777 	struct nfs4_secinfo_flavors *flavors;
9778 	struct nfs4_secinfo4 *secinfo;
9779 	int i;
9780 
9781 	page = alloc_page(GFP_KERNEL);
9782 	if (!page) {
9783 		err = -ENOMEM;
9784 		goto out;
9785 	}
9786 
9787 	flavors = page_address(page);
9788 	err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
9789 
9790 	/*
9791 	 * Fall back on "guess and check" method if
9792 	 * the server doesn't support SECINFO_NO_NAME
9793 	 */
9794 	if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
9795 		err = nfs4_find_root_sec(server, fhandle, info);
9796 		goto out_freepage;
9797 	}
9798 	if (err)
9799 		goto out_freepage;
9800 
9801 	for (i = 0; i < flavors->num_flavors; i++) {
9802 		secinfo = &flavors->flavors[i];
9803 
9804 		switch (secinfo->flavor) {
9805 		case RPC_AUTH_NULL:
9806 		case RPC_AUTH_UNIX:
9807 		case RPC_AUTH_GSS:
9808 			flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
9809 					&secinfo->flavor_info);
9810 			break;
9811 		default:
9812 			flavor = RPC_AUTH_MAXFLAVOR;
9813 			break;
9814 		}
9815 
9816 		if (!nfs_auth_info_match(&server->auth_info, flavor))
9817 			flavor = RPC_AUTH_MAXFLAVOR;
9818 
9819 		if (flavor != RPC_AUTH_MAXFLAVOR) {
9820 			err = nfs4_lookup_root_sec(server, fhandle,
9821 						   info, flavor);
9822 			if (!err)
9823 				break;
9824 		}
9825 	}
9826 
9827 	if (flavor == RPC_AUTH_MAXFLAVOR)
9828 		err = -EPERM;
9829 
9830 out_freepage:
9831 	put_page(page);
9832 	if (err == -EACCES)
9833 		return -EPERM;
9834 out:
9835 	return err;
9836 }
9837 
9838 static int _nfs41_test_stateid(struct nfs_server *server,
9839 		nfs4_stateid *stateid,
9840 		const struct cred *cred)
9841 {
9842 	int status;
9843 	struct nfs41_test_stateid_args args = {
9844 		.stateid = stateid,
9845 	};
9846 	struct nfs41_test_stateid_res res;
9847 	struct rpc_message msg = {
9848 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
9849 		.rpc_argp = &args,
9850 		.rpc_resp = &res,
9851 		.rpc_cred = cred,
9852 	};
9853 	struct rpc_clnt *rpc_client = server->client;
9854 
9855 	nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
9856 		&rpc_client, &msg);
9857 
9858 	dprintk("NFS call  test_stateid %p\n", stateid);
9859 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
9860 	status = nfs4_call_sync_sequence(rpc_client, server, &msg,
9861 			&args.seq_args, &res.seq_res);
9862 	if (status != NFS_OK) {
9863 		dprintk("NFS reply test_stateid: failed, %d\n", status);
9864 		return status;
9865 	}
9866 	dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
9867 	return -res.status;
9868 }
9869 
9870 static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
9871 		int err, struct nfs4_exception *exception)
9872 {
9873 	exception->retry = 0;
9874 	switch(err) {
9875 	case -NFS4ERR_DELAY:
9876 	case -NFS4ERR_RETRY_UNCACHED_REP:
9877 		nfs4_handle_exception(server, err, exception);
9878 		break;
9879 	case -NFS4ERR_BADSESSION:
9880 	case -NFS4ERR_BADSLOT:
9881 	case -NFS4ERR_BAD_HIGH_SLOT:
9882 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
9883 	case -NFS4ERR_DEADSESSION:
9884 		nfs4_do_handle_exception(server, err, exception);
9885 	}
9886 }
9887 
9888 /**
9889  * nfs41_test_stateid - perform a TEST_STATEID operation
9890  *
9891  * @server: server / transport on which to perform the operation
9892  * @stateid: state ID to test
9893  * @cred: credential
9894  *
9895  * Returns NFS_OK if the server recognizes that "stateid" is valid.
9896  * Otherwise a negative NFS4ERR value is returned if the operation
9897  * failed or the state ID is not currently valid.
9898  */
9899 static int nfs41_test_stateid(struct nfs_server *server,
9900 		nfs4_stateid *stateid,
9901 		const struct cred *cred)
9902 {
9903 	struct nfs4_exception exception = {
9904 		.interruptible = true,
9905 	};
9906 	int err;
9907 	do {
9908 		err = _nfs41_test_stateid(server, stateid, cred);
9909 		nfs4_handle_delay_or_session_error(server, err, &exception);
9910 	} while (exception.retry);
9911 	return err;
9912 }
9913 
9914 struct nfs_free_stateid_data {
9915 	struct nfs_server *server;
9916 	struct nfs41_free_stateid_args args;
9917 	struct nfs41_free_stateid_res res;
9918 };
9919 
9920 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
9921 {
9922 	struct nfs_free_stateid_data *data = calldata;
9923 	nfs4_setup_sequence(data->server->nfs_client,
9924 			&data->args.seq_args,
9925 			&data->res.seq_res,
9926 			task);
9927 }
9928 
9929 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
9930 {
9931 	struct nfs_free_stateid_data *data = calldata;
9932 
9933 	nfs41_sequence_done(task, &data->res.seq_res);
9934 
9935 	switch (task->tk_status) {
9936 	case -NFS4ERR_DELAY:
9937 		if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
9938 			rpc_restart_call_prepare(task);
9939 	}
9940 }
9941 
9942 static void nfs41_free_stateid_release(void *calldata)
9943 {
9944 	kfree(calldata);
9945 }
9946 
9947 static const struct rpc_call_ops nfs41_free_stateid_ops = {
9948 	.rpc_call_prepare = nfs41_free_stateid_prepare,
9949 	.rpc_call_done = nfs41_free_stateid_done,
9950 	.rpc_release = nfs41_free_stateid_release,
9951 };
9952 
9953 /**
9954  * nfs41_free_stateid - perform a FREE_STATEID operation
9955  *
9956  * @server: server / transport on which to perform the operation
9957  * @stateid: state ID to release
9958  * @cred: credential
9959  * @privileged: set to true if this call needs to be privileged
9960  *
9961  * Note: this function is always asynchronous.
9962  */
9963 static int nfs41_free_stateid(struct nfs_server *server,
9964 		const nfs4_stateid *stateid,
9965 		const struct cred *cred,
9966 		bool privileged)
9967 {
9968 	struct rpc_message msg = {
9969 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
9970 		.rpc_cred = cred,
9971 	};
9972 	struct rpc_task_setup task_setup = {
9973 		.rpc_client = server->client,
9974 		.rpc_message = &msg,
9975 		.callback_ops = &nfs41_free_stateid_ops,
9976 		.flags = RPC_TASK_ASYNC,
9977 	};
9978 	struct nfs_free_stateid_data *data;
9979 	struct rpc_task *task;
9980 
9981 	nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
9982 		&task_setup.rpc_client, &msg);
9983 
9984 	dprintk("NFS call  free_stateid %p\n", stateid);
9985 	data = kmalloc(sizeof(*data), GFP_NOFS);
9986 	if (!data)
9987 		return -ENOMEM;
9988 	data->server = server;
9989 	nfs4_stateid_copy(&data->args.stateid, stateid);
9990 
9991 	task_setup.callback_data = data;
9992 
9993 	msg.rpc_argp = &data->args;
9994 	msg.rpc_resp = &data->res;
9995 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged);
9996 	task = rpc_run_task(&task_setup);
9997 	if (IS_ERR(task))
9998 		return PTR_ERR(task);
9999 	rpc_put_task(task);
10000 	return 0;
10001 }
10002 
10003 static void
10004 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
10005 {
10006 	const struct cred *cred = lsp->ls_state->owner->so_cred;
10007 
10008 	nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
10009 	nfs4_free_lock_state(server, lsp);
10010 }
10011 
10012 static bool nfs41_match_stateid(const nfs4_stateid *s1,
10013 		const nfs4_stateid *s2)
10014 {
10015 	if (s1->type != s2->type)
10016 		return false;
10017 
10018 	if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
10019 		return false;
10020 
10021 	if (s1->seqid == s2->seqid)
10022 		return true;
10023 
10024 	return s1->seqid == 0 || s2->seqid == 0;
10025 }
10026 
10027 #endif /* CONFIG_NFS_V4_1 */
10028 
10029 static bool nfs4_match_stateid(const nfs4_stateid *s1,
10030 		const nfs4_stateid *s2)
10031 {
10032 	return nfs4_stateid_match(s1, s2);
10033 }
10034 
10035 
10036 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
10037 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10038 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
10039 	.recover_open	= nfs4_open_reclaim,
10040 	.recover_lock	= nfs4_lock_reclaim,
10041 	.establish_clid = nfs4_init_clientid,
10042 	.detect_trunking = nfs40_discover_server_trunking,
10043 };
10044 
10045 #if defined(CONFIG_NFS_V4_1)
10046 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
10047 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10048 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
10049 	.recover_open	= nfs4_open_reclaim,
10050 	.recover_lock	= nfs4_lock_reclaim,
10051 	.establish_clid = nfs41_init_clientid,
10052 	.reclaim_complete = nfs41_proc_reclaim_complete,
10053 	.detect_trunking = nfs41_discover_server_trunking,
10054 };
10055 #endif /* CONFIG_NFS_V4_1 */
10056 
10057 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
10058 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10059 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
10060 	.recover_open	= nfs40_open_expired,
10061 	.recover_lock	= nfs4_lock_expired,
10062 	.establish_clid = nfs4_init_clientid,
10063 };
10064 
10065 #if defined(CONFIG_NFS_V4_1)
10066 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
10067 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10068 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
10069 	.recover_open	= nfs41_open_expired,
10070 	.recover_lock	= nfs41_lock_expired,
10071 	.establish_clid = nfs41_init_clientid,
10072 };
10073 #endif /* CONFIG_NFS_V4_1 */
10074 
10075 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
10076 	.sched_state_renewal = nfs4_proc_async_renew,
10077 	.get_state_renewal_cred = nfs4_get_renew_cred,
10078 	.renew_lease = nfs4_proc_renew,
10079 };
10080 
10081 #if defined(CONFIG_NFS_V4_1)
10082 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
10083 	.sched_state_renewal = nfs41_proc_async_sequence,
10084 	.get_state_renewal_cred = nfs4_get_machine_cred,
10085 	.renew_lease = nfs4_proc_sequence,
10086 };
10087 #endif
10088 
10089 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
10090 	.get_locations = _nfs40_proc_get_locations,
10091 	.fsid_present = _nfs40_proc_fsid_present,
10092 };
10093 
10094 #if defined(CONFIG_NFS_V4_1)
10095 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
10096 	.get_locations = _nfs41_proc_get_locations,
10097 	.fsid_present = _nfs41_proc_fsid_present,
10098 };
10099 #endif	/* CONFIG_NFS_V4_1 */
10100 
10101 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
10102 	.minor_version = 0,
10103 	.init_caps = NFS_CAP_READDIRPLUS
10104 		| NFS_CAP_ATOMIC_OPEN
10105 		| NFS_CAP_POSIX_LOCK,
10106 	.init_client = nfs40_init_client,
10107 	.shutdown_client = nfs40_shutdown_client,
10108 	.match_stateid = nfs4_match_stateid,
10109 	.find_root_sec = nfs4_find_root_sec,
10110 	.free_lock_state = nfs4_release_lockowner,
10111 	.test_and_free_expired = nfs40_test_and_free_expired_stateid,
10112 	.alloc_seqid = nfs_alloc_seqid,
10113 	.call_sync_ops = &nfs40_call_sync_ops,
10114 	.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
10115 	.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
10116 	.state_renewal_ops = &nfs40_state_renewal_ops,
10117 	.mig_recovery_ops = &nfs40_mig_recovery_ops,
10118 };
10119 
10120 #if defined(CONFIG_NFS_V4_1)
10121 static struct nfs_seqid *
10122 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
10123 {
10124 	return NULL;
10125 }
10126 
10127 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
10128 	.minor_version = 1,
10129 	.init_caps = NFS_CAP_READDIRPLUS
10130 		| NFS_CAP_ATOMIC_OPEN
10131 		| NFS_CAP_POSIX_LOCK
10132 		| NFS_CAP_STATEID_NFSV41
10133 		| NFS_CAP_ATOMIC_OPEN_V1
10134 		| NFS_CAP_LGOPEN,
10135 	.init_client = nfs41_init_client,
10136 	.shutdown_client = nfs41_shutdown_client,
10137 	.match_stateid = nfs41_match_stateid,
10138 	.find_root_sec = nfs41_find_root_sec,
10139 	.free_lock_state = nfs41_free_lock_state,
10140 	.test_and_free_expired = nfs41_test_and_free_expired_stateid,
10141 	.alloc_seqid = nfs_alloc_no_seqid,
10142 	.session_trunk = nfs4_test_session_trunk,
10143 	.call_sync_ops = &nfs41_call_sync_ops,
10144 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10145 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10146 	.state_renewal_ops = &nfs41_state_renewal_ops,
10147 	.mig_recovery_ops = &nfs41_mig_recovery_ops,
10148 };
10149 #endif
10150 
10151 #if defined(CONFIG_NFS_V4_2)
10152 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
10153 	.minor_version = 2,
10154 	.init_caps = NFS_CAP_READDIRPLUS
10155 		| NFS_CAP_ATOMIC_OPEN
10156 		| NFS_CAP_POSIX_LOCK
10157 		| NFS_CAP_STATEID_NFSV41
10158 		| NFS_CAP_ATOMIC_OPEN_V1
10159 		| NFS_CAP_LGOPEN
10160 		| NFS_CAP_ALLOCATE
10161 		| NFS_CAP_COPY
10162 		| NFS_CAP_OFFLOAD_CANCEL
10163 		| NFS_CAP_COPY_NOTIFY
10164 		| NFS_CAP_DEALLOCATE
10165 		| NFS_CAP_SEEK
10166 		| NFS_CAP_LAYOUTSTATS
10167 		| NFS_CAP_CLONE
10168 		| NFS_CAP_LAYOUTERROR,
10169 	.init_client = nfs41_init_client,
10170 	.shutdown_client = nfs41_shutdown_client,
10171 	.match_stateid = nfs41_match_stateid,
10172 	.find_root_sec = nfs41_find_root_sec,
10173 	.free_lock_state = nfs41_free_lock_state,
10174 	.call_sync_ops = &nfs41_call_sync_ops,
10175 	.test_and_free_expired = nfs41_test_and_free_expired_stateid,
10176 	.alloc_seqid = nfs_alloc_no_seqid,
10177 	.session_trunk = nfs4_test_session_trunk,
10178 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10179 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10180 	.state_renewal_ops = &nfs41_state_renewal_ops,
10181 	.mig_recovery_ops = &nfs41_mig_recovery_ops,
10182 };
10183 #endif
10184 
10185 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
10186 	[0] = &nfs_v4_0_minor_ops,
10187 #if defined(CONFIG_NFS_V4_1)
10188 	[1] = &nfs_v4_1_minor_ops,
10189 #endif
10190 #if defined(CONFIG_NFS_V4_2)
10191 	[2] = &nfs_v4_2_minor_ops,
10192 #endif
10193 };
10194 
10195 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
10196 {
10197 	ssize_t error, error2, error3;
10198 
10199 	error = generic_listxattr(dentry, list, size);
10200 	if (error < 0)
10201 		return error;
10202 	if (list) {
10203 		list += error;
10204 		size -= error;
10205 	}
10206 
10207 	error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
10208 	if (error2 < 0)
10209 		return error2;
10210 
10211 	if (list) {
10212 		list += error2;
10213 		size -= error2;
10214 	}
10215 
10216 	error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size);
10217 	if (error3 < 0)
10218 		return error3;
10219 
10220 	return error + error2 + error3;
10221 }
10222 
10223 static const struct inode_operations nfs4_dir_inode_operations = {
10224 	.create		= nfs_create,
10225 	.lookup		= nfs_lookup,
10226 	.atomic_open	= nfs_atomic_open,
10227 	.link		= nfs_link,
10228 	.unlink		= nfs_unlink,
10229 	.symlink	= nfs_symlink,
10230 	.mkdir		= nfs_mkdir,
10231 	.rmdir		= nfs_rmdir,
10232 	.mknod		= nfs_mknod,
10233 	.rename		= nfs_rename,
10234 	.permission	= nfs_permission,
10235 	.getattr	= nfs_getattr,
10236 	.setattr	= nfs_setattr,
10237 	.listxattr	= nfs4_listxattr,
10238 };
10239 
10240 static const struct inode_operations nfs4_file_inode_operations = {
10241 	.permission	= nfs_permission,
10242 	.getattr	= nfs_getattr,
10243 	.setattr	= nfs_setattr,
10244 	.listxattr	= nfs4_listxattr,
10245 };
10246 
10247 const struct nfs_rpc_ops nfs_v4_clientops = {
10248 	.version	= 4,			/* protocol version */
10249 	.dentry_ops	= &nfs4_dentry_operations,
10250 	.dir_inode_ops	= &nfs4_dir_inode_operations,
10251 	.file_inode_ops	= &nfs4_file_inode_operations,
10252 	.file_ops	= &nfs4_file_operations,
10253 	.getroot	= nfs4_proc_get_root,
10254 	.submount	= nfs4_submount,
10255 	.try_get_tree	= nfs4_try_get_tree,
10256 	.getattr	= nfs4_proc_getattr,
10257 	.setattr	= nfs4_proc_setattr,
10258 	.lookup		= nfs4_proc_lookup,
10259 	.lookupp	= nfs4_proc_lookupp,
10260 	.access		= nfs4_proc_access,
10261 	.readlink	= nfs4_proc_readlink,
10262 	.create		= nfs4_proc_create,
10263 	.remove		= nfs4_proc_remove,
10264 	.unlink_setup	= nfs4_proc_unlink_setup,
10265 	.unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
10266 	.unlink_done	= nfs4_proc_unlink_done,
10267 	.rename_setup	= nfs4_proc_rename_setup,
10268 	.rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
10269 	.rename_done	= nfs4_proc_rename_done,
10270 	.link		= nfs4_proc_link,
10271 	.symlink	= nfs4_proc_symlink,
10272 	.mkdir		= nfs4_proc_mkdir,
10273 	.rmdir		= nfs4_proc_rmdir,
10274 	.readdir	= nfs4_proc_readdir,
10275 	.mknod		= nfs4_proc_mknod,
10276 	.statfs		= nfs4_proc_statfs,
10277 	.fsinfo		= nfs4_proc_fsinfo,
10278 	.pathconf	= nfs4_proc_pathconf,
10279 	.set_capabilities = nfs4_server_capabilities,
10280 	.decode_dirent	= nfs4_decode_dirent,
10281 	.pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
10282 	.read_setup	= nfs4_proc_read_setup,
10283 	.read_done	= nfs4_read_done,
10284 	.write_setup	= nfs4_proc_write_setup,
10285 	.write_done	= nfs4_write_done,
10286 	.commit_setup	= nfs4_proc_commit_setup,
10287 	.commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
10288 	.commit_done	= nfs4_commit_done,
10289 	.lock		= nfs4_proc_lock,
10290 	.clear_acl_cache = nfs4_zap_acl_attr,
10291 	.close_context  = nfs4_close_context,
10292 	.open_context	= nfs4_atomic_open,
10293 	.have_delegation = nfs4_have_delegation,
10294 	.alloc_client	= nfs4_alloc_client,
10295 	.init_client	= nfs4_init_client,
10296 	.free_client	= nfs4_free_client,
10297 	.create_server	= nfs4_create_server,
10298 	.clone_server	= nfs_clone_server,
10299 };
10300 
10301 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
10302 	.name	= XATTR_NAME_NFSV4_ACL,
10303 	.list	= nfs4_xattr_list_nfs4_acl,
10304 	.get	= nfs4_xattr_get_nfs4_acl,
10305 	.set	= nfs4_xattr_set_nfs4_acl,
10306 };
10307 
10308 #ifdef CONFIG_NFS_V4_2
10309 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
10310 	.prefix	= XATTR_USER_PREFIX,
10311 	.get	= nfs4_xattr_get_nfs4_user,
10312 	.set	= nfs4_xattr_set_nfs4_user,
10313 };
10314 #endif
10315 
10316 const struct xattr_handler *nfs4_xattr_handlers[] = {
10317 	&nfs4_xattr_nfs4_acl_handler,
10318 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
10319 	&nfs4_xattr_nfs4_label_handler,
10320 #endif
10321 #ifdef CONFIG_NFS_V4_2
10322 	&nfs4_xattr_nfs4_user_handler,
10323 #endif
10324 	NULL
10325 };
10326 
10327 /*
10328  * Local variables:
10329  *  c-basic-offset: 8
10330  * End:
10331  */
10332