xref: /openbmc/linux/fs/nfs/nfs4proc.c (revision e7065e20)
1 /*
2  *  fs/nfs/nfs4proc.c
3  *
4  *  Client-side procedure declarations for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *  Andy Adamson   <andros@umich.edu>
11  *
12  *  Redistribution and use in source and binary forms, with or without
13  *  modification, are permitted provided that the following conditions
14  *  are met:
15  *
16  *  1. Redistributions of source code must retain the above copyright
17  *     notice, this list of conditions and the following disclaimer.
18  *  2. Redistributions in binary form must reproduce the above copyright
19  *     notice, this list of conditions and the following disclaimer in the
20  *     documentation and/or other materials provided with the distribution.
21  *  3. Neither the name of the University nor the names of its
22  *     contributors may be used to endorse or promote products derived
23  *     from this software without specific prior written permission.
24  *
25  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/sunrpc/gss_api.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/nfs_idmap.h>
56 #include <linux/sunrpc/bc_xprt.h>
57 #include <linux/xattr.h>
58 #include <linux/utsname.h>
59 #include <linux/freezer.h>
60 
61 #include "nfs4_fs.h"
62 #include "delegation.h"
63 #include "internal.h"
64 #include "iostat.h"
65 #include "callback.h"
66 #include "pnfs.h"
67 
68 #define NFSDBG_FACILITY		NFSDBG_PROC
69 
70 #define NFS4_POLL_RETRY_MIN	(HZ/10)
71 #define NFS4_POLL_RETRY_MAX	(15*HZ)
72 
73 #define NFS4_MAX_LOOP_ON_RECOVER (10)
74 
75 static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
76 
77 struct nfs4_opendata;
78 static int _nfs4_proc_open(struct nfs4_opendata *data);
79 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
82 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
83 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
84 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
85 			    struct nfs_fattr *fattr, struct iattr *sattr,
86 			    struct nfs4_state *state);
87 #ifdef CONFIG_NFS_V4_1
88 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *);
89 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *);
90 #endif
91 /* Prevent leaks of NFSv4 errors into userland */
92 static int nfs4_map_errors(int err)
93 {
94 	if (err >= -1000)
95 		return err;
96 	switch (err) {
97 	case -NFS4ERR_RESOURCE:
98 		return -EREMOTEIO;
99 	case -NFS4ERR_WRONGSEC:
100 		return -EPERM;
101 	case -NFS4ERR_BADOWNER:
102 	case -NFS4ERR_BADNAME:
103 		return -EINVAL;
104 	default:
105 		dprintk("%s could not handle NFSv4 error %d\n",
106 				__func__, -err);
107 		break;
108 	}
109 	return -EIO;
110 }
111 
112 /*
113  * This is our standard bitmap for GETATTR requests.
114  */
115 const u32 nfs4_fattr_bitmap[2] = {
116 	FATTR4_WORD0_TYPE
117 	| FATTR4_WORD0_CHANGE
118 	| FATTR4_WORD0_SIZE
119 	| FATTR4_WORD0_FSID
120 	| FATTR4_WORD0_FILEID,
121 	FATTR4_WORD1_MODE
122 	| FATTR4_WORD1_NUMLINKS
123 	| FATTR4_WORD1_OWNER
124 	| FATTR4_WORD1_OWNER_GROUP
125 	| FATTR4_WORD1_RAWDEV
126 	| FATTR4_WORD1_SPACE_USED
127 	| FATTR4_WORD1_TIME_ACCESS
128 	| FATTR4_WORD1_TIME_METADATA
129 	| FATTR4_WORD1_TIME_MODIFY
130 };
131 
132 const u32 nfs4_statfs_bitmap[2] = {
133 	FATTR4_WORD0_FILES_AVAIL
134 	| FATTR4_WORD0_FILES_FREE
135 	| FATTR4_WORD0_FILES_TOTAL,
136 	FATTR4_WORD1_SPACE_AVAIL
137 	| FATTR4_WORD1_SPACE_FREE
138 	| FATTR4_WORD1_SPACE_TOTAL
139 };
140 
141 const u32 nfs4_pathconf_bitmap[2] = {
142 	FATTR4_WORD0_MAXLINK
143 	| FATTR4_WORD0_MAXNAME,
144 	0
145 };
146 
147 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
148 			| FATTR4_WORD0_MAXREAD
149 			| FATTR4_WORD0_MAXWRITE
150 			| FATTR4_WORD0_LEASE_TIME,
151 			FATTR4_WORD1_TIME_DELTA
152 			| FATTR4_WORD1_FS_LAYOUT_TYPES,
153 			FATTR4_WORD2_LAYOUT_BLKSIZE
154 };
155 
156 const u32 nfs4_fs_locations_bitmap[2] = {
157 	FATTR4_WORD0_TYPE
158 	| FATTR4_WORD0_CHANGE
159 	| FATTR4_WORD0_SIZE
160 	| FATTR4_WORD0_FSID
161 	| FATTR4_WORD0_FILEID
162 	| FATTR4_WORD0_FS_LOCATIONS,
163 	FATTR4_WORD1_MODE
164 	| FATTR4_WORD1_NUMLINKS
165 	| FATTR4_WORD1_OWNER
166 	| FATTR4_WORD1_OWNER_GROUP
167 	| FATTR4_WORD1_RAWDEV
168 	| FATTR4_WORD1_SPACE_USED
169 	| FATTR4_WORD1_TIME_ACCESS
170 	| FATTR4_WORD1_TIME_METADATA
171 	| FATTR4_WORD1_TIME_MODIFY
172 	| FATTR4_WORD1_MOUNTED_ON_FILEID
173 };
174 
175 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
176 		struct nfs4_readdir_arg *readdir)
177 {
178 	__be32 *start, *p;
179 
180 	BUG_ON(readdir->count < 80);
181 	if (cookie > 2) {
182 		readdir->cookie = cookie;
183 		memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
184 		return;
185 	}
186 
187 	readdir->cookie = 0;
188 	memset(&readdir->verifier, 0, sizeof(readdir->verifier));
189 	if (cookie == 2)
190 		return;
191 
192 	/*
193 	 * NFSv4 servers do not return entries for '.' and '..'
194 	 * Therefore, we fake these entries here.  We let '.'
195 	 * have cookie 0 and '..' have cookie 1.  Note that
196 	 * when talking to the server, we always send cookie 0
197 	 * instead of 1 or 2.
198 	 */
199 	start = p = kmap_atomic(*readdir->pages);
200 
201 	if (cookie == 0) {
202 		*p++ = xdr_one;                                  /* next */
203 		*p++ = xdr_zero;                   /* cookie, first word */
204 		*p++ = xdr_one;                   /* cookie, second word */
205 		*p++ = xdr_one;                             /* entry len */
206 		memcpy(p, ".\0\0\0", 4);                        /* entry */
207 		p++;
208 		*p++ = xdr_one;                         /* bitmap length */
209 		*p++ = htonl(FATTR4_WORD0_FILEID);             /* bitmap */
210 		*p++ = htonl(8);              /* attribute buffer length */
211 		p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
212 	}
213 
214 	*p++ = xdr_one;                                  /* next */
215 	*p++ = xdr_zero;                   /* cookie, first word */
216 	*p++ = xdr_two;                   /* cookie, second word */
217 	*p++ = xdr_two;                             /* entry len */
218 	memcpy(p, "..\0\0", 4);                         /* entry */
219 	p++;
220 	*p++ = xdr_one;                         /* bitmap length */
221 	*p++ = htonl(FATTR4_WORD0_FILEID);             /* bitmap */
222 	*p++ = htonl(8);              /* attribute buffer length */
223 	p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
224 
225 	readdir->pgbase = (char *)p - (char *)start;
226 	readdir->count -= readdir->pgbase;
227 	kunmap_atomic(start);
228 }
229 
230 static int nfs4_wait_clnt_recover(struct nfs_client *clp)
231 {
232 	int res;
233 
234 	might_sleep();
235 
236 	res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
237 			nfs_wait_bit_killable, TASK_KILLABLE);
238 	return res;
239 }
240 
241 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
242 {
243 	int res = 0;
244 
245 	might_sleep();
246 
247 	if (*timeout <= 0)
248 		*timeout = NFS4_POLL_RETRY_MIN;
249 	if (*timeout > NFS4_POLL_RETRY_MAX)
250 		*timeout = NFS4_POLL_RETRY_MAX;
251 	freezable_schedule_timeout_killable(*timeout);
252 	if (fatal_signal_pending(current))
253 		res = -ERESTARTSYS;
254 	*timeout <<= 1;
255 	return res;
256 }
257 
258 /* This is the error handling routine for processes that are allowed
259  * to sleep.
260  */
261 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
262 {
263 	struct nfs_client *clp = server->nfs_client;
264 	struct nfs4_state *state = exception->state;
265 	struct inode *inode = exception->inode;
266 	int ret = errorcode;
267 
268 	exception->retry = 0;
269 	switch(errorcode) {
270 		case 0:
271 			return 0;
272 		case -NFS4ERR_OPENMODE:
273 			if (inode && nfs_have_delegation(inode, FMODE_READ)) {
274 				nfs_inode_return_delegation(inode);
275 				exception->retry = 1;
276 				return 0;
277 			}
278 			if (state == NULL)
279 				break;
280 			nfs4_schedule_stateid_recovery(server, state);
281 			goto wait_on_recovery;
282 		case -NFS4ERR_DELEG_REVOKED:
283 		case -NFS4ERR_ADMIN_REVOKED:
284 		case -NFS4ERR_BAD_STATEID:
285 			if (state == NULL)
286 				break;
287 			nfs_remove_bad_delegation(state->inode);
288 			nfs4_schedule_stateid_recovery(server, state);
289 			goto wait_on_recovery;
290 		case -NFS4ERR_EXPIRED:
291 			if (state != NULL)
292 				nfs4_schedule_stateid_recovery(server, state);
293 		case -NFS4ERR_STALE_STATEID:
294 		case -NFS4ERR_STALE_CLIENTID:
295 			nfs4_schedule_lease_recovery(clp);
296 			goto wait_on_recovery;
297 #if defined(CONFIG_NFS_V4_1)
298 		case -NFS4ERR_BADSESSION:
299 		case -NFS4ERR_BADSLOT:
300 		case -NFS4ERR_BAD_HIGH_SLOT:
301 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
302 		case -NFS4ERR_DEADSESSION:
303 		case -NFS4ERR_SEQ_FALSE_RETRY:
304 		case -NFS4ERR_SEQ_MISORDERED:
305 			dprintk("%s ERROR: %d Reset session\n", __func__,
306 				errorcode);
307 			nfs4_schedule_session_recovery(clp->cl_session);
308 			exception->retry = 1;
309 			break;
310 #endif /* defined(CONFIG_NFS_V4_1) */
311 		case -NFS4ERR_FILE_OPEN:
312 			if (exception->timeout > HZ) {
313 				/* We have retried a decent amount, time to
314 				 * fail
315 				 */
316 				ret = -EBUSY;
317 				break;
318 			}
319 		case -NFS4ERR_GRACE:
320 		case -NFS4ERR_DELAY:
321 		case -EKEYEXPIRED:
322 			ret = nfs4_delay(server->client, &exception->timeout);
323 			if (ret != 0)
324 				break;
325 		case -NFS4ERR_RETRY_UNCACHED_REP:
326 		case -NFS4ERR_OLD_STATEID:
327 			exception->retry = 1;
328 			break;
329 		case -NFS4ERR_BADOWNER:
330 			/* The following works around a Linux server bug! */
331 		case -NFS4ERR_BADNAME:
332 			if (server->caps & NFS_CAP_UIDGID_NOMAP) {
333 				server->caps &= ~NFS_CAP_UIDGID_NOMAP;
334 				exception->retry = 1;
335 				printk(KERN_WARNING "NFS: v4 server %s "
336 						"does not accept raw "
337 						"uid/gids. "
338 						"Reenabling the idmapper.\n",
339 						server->nfs_client->cl_hostname);
340 			}
341 	}
342 	/* We failed to handle the error */
343 	return nfs4_map_errors(ret);
344 wait_on_recovery:
345 	ret = nfs4_wait_clnt_recover(clp);
346 	if (ret == 0)
347 		exception->retry = 1;
348 	return ret;
349 }
350 
351 
352 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
353 {
354 	spin_lock(&clp->cl_lock);
355 	if (time_before(clp->cl_last_renewal,timestamp))
356 		clp->cl_last_renewal = timestamp;
357 	spin_unlock(&clp->cl_lock);
358 }
359 
360 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
361 {
362 	do_renew_lease(server->nfs_client, timestamp);
363 }
364 
365 #if defined(CONFIG_NFS_V4_1)
366 
367 /*
368  * nfs4_free_slot - free a slot and efficiently update slot table.
369  *
370  * freeing a slot is trivially done by clearing its respective bit
371  * in the bitmap.
372  * If the freed slotid equals highest_used_slotid we want to update it
373  * so that the server would be able to size down the slot table if needed,
374  * otherwise we know that the highest_used_slotid is still in use.
375  * When updating highest_used_slotid there may be "holes" in the bitmap
376  * so we need to scan down from highest_used_slotid to 0 looking for the now
377  * highest slotid in use.
378  * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
379  *
380  * Must be called while holding tbl->slot_tbl_lock
381  */
382 static void
383 nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid)
384 {
385 	BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE);
386 	/* clear used bit in bitmap */
387 	__clear_bit(slotid, tbl->used_slots);
388 
389 	/* update highest_used_slotid when it is freed */
390 	if (slotid == tbl->highest_used_slotid) {
391 		slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
392 		if (slotid < tbl->max_slots)
393 			tbl->highest_used_slotid = slotid;
394 		else
395 			tbl->highest_used_slotid = NFS4_NO_SLOT;
396 	}
397 	dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
398 		slotid, tbl->highest_used_slotid);
399 }
400 
401 bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
402 {
403 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
404 	return true;
405 }
406 
407 /*
408  * Signal state manager thread if session fore channel is drained
409  */
410 static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
411 {
412 	if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
413 		rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
414 				nfs4_set_task_privileged, NULL);
415 		return;
416 	}
417 
418 	if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
419 		return;
420 
421 	dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
422 	complete(&ses->fc_slot_table.complete);
423 }
424 
425 /*
426  * Signal state manager thread if session back channel is drained
427  */
428 void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
429 {
430 	if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
431 	    ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
432 		return;
433 	dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
434 	complete(&ses->bc_slot_table.complete);
435 }
436 
437 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
438 {
439 	struct nfs4_slot_table *tbl;
440 
441 	tbl = &res->sr_session->fc_slot_table;
442 	if (!res->sr_slot) {
443 		/* just wake up the next guy waiting since
444 		 * we may have not consumed a slot after all */
445 		dprintk("%s: No slot\n", __func__);
446 		return;
447 	}
448 
449 	spin_lock(&tbl->slot_tbl_lock);
450 	nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
451 	nfs4_check_drain_fc_complete(res->sr_session);
452 	spin_unlock(&tbl->slot_tbl_lock);
453 	res->sr_slot = NULL;
454 }
455 
456 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
457 {
458 	unsigned long timestamp;
459 	struct nfs_client *clp;
460 
461 	/*
462 	 * sr_status remains 1 if an RPC level error occurred. The server
463 	 * may or may not have processed the sequence operation..
464 	 * Proceed as if the server received and processed the sequence
465 	 * operation.
466 	 */
467 	if (res->sr_status == 1)
468 		res->sr_status = NFS_OK;
469 
470 	/* don't increment the sequence number if the task wasn't sent */
471 	if (!RPC_WAS_SENT(task))
472 		goto out;
473 
474 	/* Check the SEQUENCE operation status */
475 	switch (res->sr_status) {
476 	case 0:
477 		/* Update the slot's sequence and clientid lease timer */
478 		++res->sr_slot->seq_nr;
479 		timestamp = res->sr_renewal_time;
480 		clp = res->sr_session->clp;
481 		do_renew_lease(clp, timestamp);
482 		/* Check sequence flags */
483 		if (res->sr_status_flags != 0)
484 			nfs4_schedule_lease_recovery(clp);
485 		break;
486 	case -NFS4ERR_DELAY:
487 		/* The server detected a resend of the RPC call and
488 		 * returned NFS4ERR_DELAY as per Section 2.10.6.2
489 		 * of RFC5661.
490 		 */
491 		dprintk("%s: slot=%td seq=%d: Operation in progress\n",
492 			__func__,
493 			res->sr_slot - res->sr_session->fc_slot_table.slots,
494 			res->sr_slot->seq_nr);
495 		goto out_retry;
496 	default:
497 		/* Just update the slot sequence no. */
498 		++res->sr_slot->seq_nr;
499 	}
500 out:
501 	/* The session may be reset by one of the error handlers. */
502 	dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
503 	nfs41_sequence_free_slot(res);
504 	return 1;
505 out_retry:
506 	if (!rpc_restart_call(task))
507 		goto out;
508 	rpc_delay(task, NFS4_POLL_RETRY_MAX);
509 	return 0;
510 }
511 
512 static int nfs4_sequence_done(struct rpc_task *task,
513 			       struct nfs4_sequence_res *res)
514 {
515 	if (res->sr_session == NULL)
516 		return 1;
517 	return nfs41_sequence_done(task, res);
518 }
519 
520 /*
521  * nfs4_find_slot - efficiently look for a free slot
522  *
523  * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
524  * If found, we mark the slot as used, update the highest_used_slotid,
525  * and respectively set up the sequence operation args.
526  * The slot number is returned if found, or NFS4_NO_SLOT otherwise.
527  *
528  * Note: must be called with under the slot_tbl_lock.
529  */
530 static u32
531 nfs4_find_slot(struct nfs4_slot_table *tbl)
532 {
533 	u32 slotid;
534 	u32 ret_id = NFS4_NO_SLOT;
535 
536 	dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
537 		__func__, tbl->used_slots[0], tbl->highest_used_slotid,
538 		tbl->max_slots);
539 	slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
540 	if (slotid >= tbl->max_slots)
541 		goto out;
542 	__set_bit(slotid, tbl->used_slots);
543 	if (slotid > tbl->highest_used_slotid ||
544 			tbl->highest_used_slotid == NFS4_NO_SLOT)
545 		tbl->highest_used_slotid = slotid;
546 	ret_id = slotid;
547 out:
548 	dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
549 		__func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
550 	return ret_id;
551 }
552 
553 static void nfs41_init_sequence(struct nfs4_sequence_args *args,
554 		struct nfs4_sequence_res *res, int cache_reply)
555 {
556 	args->sa_session = NULL;
557 	args->sa_cache_this = 0;
558 	if (cache_reply)
559 		args->sa_cache_this = 1;
560 	res->sr_session = NULL;
561 	res->sr_slot = NULL;
562 }
563 
564 int nfs41_setup_sequence(struct nfs4_session *session,
565 				struct nfs4_sequence_args *args,
566 				struct nfs4_sequence_res *res,
567 				struct rpc_task *task)
568 {
569 	struct nfs4_slot *slot;
570 	struct nfs4_slot_table *tbl;
571 	u32 slotid;
572 
573 	dprintk("--> %s\n", __func__);
574 	/* slot already allocated? */
575 	if (res->sr_slot != NULL)
576 		return 0;
577 
578 	tbl = &session->fc_slot_table;
579 
580 	spin_lock(&tbl->slot_tbl_lock);
581 	if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
582 	    !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
583 		/* The state manager will wait until the slot table is empty */
584 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
585 		spin_unlock(&tbl->slot_tbl_lock);
586 		dprintk("%s session is draining\n", __func__);
587 		return -EAGAIN;
588 	}
589 
590 	if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
591 	    !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
592 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
593 		spin_unlock(&tbl->slot_tbl_lock);
594 		dprintk("%s enforce FIFO order\n", __func__);
595 		return -EAGAIN;
596 	}
597 
598 	slotid = nfs4_find_slot(tbl);
599 	if (slotid == NFS4_NO_SLOT) {
600 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
601 		spin_unlock(&tbl->slot_tbl_lock);
602 		dprintk("<-- %s: no free slots\n", __func__);
603 		return -EAGAIN;
604 	}
605 	spin_unlock(&tbl->slot_tbl_lock);
606 
607 	rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
608 	slot = tbl->slots + slotid;
609 	args->sa_session = session;
610 	args->sa_slotid = slotid;
611 
612 	dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
613 
614 	res->sr_session = session;
615 	res->sr_slot = slot;
616 	res->sr_renewal_time = jiffies;
617 	res->sr_status_flags = 0;
618 	/*
619 	 * sr_status is only set in decode_sequence, and so will remain
620 	 * set to 1 if an rpc level failure occurs.
621 	 */
622 	res->sr_status = 1;
623 	return 0;
624 }
625 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
626 
627 int nfs4_setup_sequence(const struct nfs_server *server,
628 			struct nfs4_sequence_args *args,
629 			struct nfs4_sequence_res *res,
630 			struct rpc_task *task)
631 {
632 	struct nfs4_session *session = nfs4_get_session(server);
633 	int ret = 0;
634 
635 	if (session == NULL)
636 		goto out;
637 
638 	dprintk("--> %s clp %p session %p sr_slot %td\n",
639 		__func__, session->clp, session, res->sr_slot ?
640 			res->sr_slot - session->fc_slot_table.slots : -1);
641 
642 	ret = nfs41_setup_sequence(session, args, res, task);
643 out:
644 	dprintk("<-- %s status=%d\n", __func__, ret);
645 	return ret;
646 }
647 
648 struct nfs41_call_sync_data {
649 	const struct nfs_server *seq_server;
650 	struct nfs4_sequence_args *seq_args;
651 	struct nfs4_sequence_res *seq_res;
652 };
653 
654 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
655 {
656 	struct nfs41_call_sync_data *data = calldata;
657 
658 	dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
659 
660 	if (nfs4_setup_sequence(data->seq_server, data->seq_args,
661 				data->seq_res, task))
662 		return;
663 	rpc_call_start(task);
664 }
665 
666 static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
667 {
668 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
669 	nfs41_call_sync_prepare(task, calldata);
670 }
671 
672 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
673 {
674 	struct nfs41_call_sync_data *data = calldata;
675 
676 	nfs41_sequence_done(task, data->seq_res);
677 }
678 
679 static const struct rpc_call_ops nfs41_call_sync_ops = {
680 	.rpc_call_prepare = nfs41_call_sync_prepare,
681 	.rpc_call_done = nfs41_call_sync_done,
682 };
683 
684 static const struct rpc_call_ops nfs41_call_priv_sync_ops = {
685 	.rpc_call_prepare = nfs41_call_priv_sync_prepare,
686 	.rpc_call_done = nfs41_call_sync_done,
687 };
688 
689 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
690 				   struct nfs_server *server,
691 				   struct rpc_message *msg,
692 				   struct nfs4_sequence_args *args,
693 				   struct nfs4_sequence_res *res,
694 				   int privileged)
695 {
696 	int ret;
697 	struct rpc_task *task;
698 	struct nfs41_call_sync_data data = {
699 		.seq_server = server,
700 		.seq_args = args,
701 		.seq_res = res,
702 	};
703 	struct rpc_task_setup task_setup = {
704 		.rpc_client = clnt,
705 		.rpc_message = msg,
706 		.callback_ops = &nfs41_call_sync_ops,
707 		.callback_data = &data
708 	};
709 
710 	if (privileged)
711 		task_setup.callback_ops = &nfs41_call_priv_sync_ops;
712 	task = rpc_run_task(&task_setup);
713 	if (IS_ERR(task))
714 		ret = PTR_ERR(task);
715 	else {
716 		ret = task->tk_status;
717 		rpc_put_task(task);
718 	}
719 	return ret;
720 }
721 
722 int _nfs4_call_sync_session(struct rpc_clnt *clnt,
723 			    struct nfs_server *server,
724 			    struct rpc_message *msg,
725 			    struct nfs4_sequence_args *args,
726 			    struct nfs4_sequence_res *res,
727 			    int cache_reply)
728 {
729 	nfs41_init_sequence(args, res, cache_reply);
730 	return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0);
731 }
732 
733 #else
734 static inline
735 void nfs41_init_sequence(struct nfs4_sequence_args *args,
736 		struct nfs4_sequence_res *res, int cache_reply)
737 {
738 }
739 
740 static int nfs4_sequence_done(struct rpc_task *task,
741 			       struct nfs4_sequence_res *res)
742 {
743 	return 1;
744 }
745 #endif /* CONFIG_NFS_V4_1 */
746 
747 int _nfs4_call_sync(struct rpc_clnt *clnt,
748 		    struct nfs_server *server,
749 		    struct rpc_message *msg,
750 		    struct nfs4_sequence_args *args,
751 		    struct nfs4_sequence_res *res,
752 		    int cache_reply)
753 {
754 	nfs41_init_sequence(args, res, cache_reply);
755 	return rpc_call_sync(clnt, msg, 0);
756 }
757 
758 static inline
759 int nfs4_call_sync(struct rpc_clnt *clnt,
760 		   struct nfs_server *server,
761 		   struct rpc_message *msg,
762 		   struct nfs4_sequence_args *args,
763 		   struct nfs4_sequence_res *res,
764 		   int cache_reply)
765 {
766 	return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
767 						args, res, cache_reply);
768 }
769 
770 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
771 {
772 	struct nfs_inode *nfsi = NFS_I(dir);
773 
774 	spin_lock(&dir->i_lock);
775 	nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
776 	if (!cinfo->atomic || cinfo->before != dir->i_version)
777 		nfs_force_lookup_revalidate(dir);
778 	dir->i_version = cinfo->after;
779 	spin_unlock(&dir->i_lock);
780 }
781 
782 struct nfs4_opendata {
783 	struct kref kref;
784 	struct nfs_openargs o_arg;
785 	struct nfs_openres o_res;
786 	struct nfs_open_confirmargs c_arg;
787 	struct nfs_open_confirmres c_res;
788 	struct nfs4_string owner_name;
789 	struct nfs4_string group_name;
790 	struct nfs_fattr f_attr;
791 	struct nfs_fattr dir_attr;
792 	struct dentry *dir;
793 	struct dentry *dentry;
794 	struct nfs4_state_owner *owner;
795 	struct nfs4_state *state;
796 	struct iattr attrs;
797 	unsigned long timestamp;
798 	unsigned int rpc_done : 1;
799 	int rpc_status;
800 	int cancelled;
801 };
802 
803 
804 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
805 {
806 	p->o_res.f_attr = &p->f_attr;
807 	p->o_res.dir_attr = &p->dir_attr;
808 	p->o_res.seqid = p->o_arg.seqid;
809 	p->c_res.seqid = p->c_arg.seqid;
810 	p->o_res.server = p->o_arg.server;
811 	nfs_fattr_init(&p->f_attr);
812 	nfs_fattr_init(&p->dir_attr);
813 	nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
814 }
815 
816 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
817 		struct nfs4_state_owner *sp, fmode_t fmode, int flags,
818 		const struct iattr *attrs,
819 		gfp_t gfp_mask)
820 {
821 	struct dentry *parent = dget_parent(dentry);
822 	struct inode *dir = parent->d_inode;
823 	struct nfs_server *server = NFS_SERVER(dir);
824 	struct nfs4_opendata *p;
825 
826 	p = kzalloc(sizeof(*p), gfp_mask);
827 	if (p == NULL)
828 		goto err;
829 	p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
830 	if (p->o_arg.seqid == NULL)
831 		goto err_free;
832 	nfs_sb_active(dentry->d_sb);
833 	p->dentry = dget(dentry);
834 	p->dir = parent;
835 	p->owner = sp;
836 	atomic_inc(&sp->so_count);
837 	p->o_arg.fh = NFS_FH(dir);
838 	p->o_arg.open_flags = flags;
839 	p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
840 	p->o_arg.clientid = server->nfs_client->cl_clientid;
841 	p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
842 	p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
843 	p->o_arg.name = &dentry->d_name;
844 	p->o_arg.server = server;
845 	p->o_arg.bitmask = server->attr_bitmask;
846 	p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
847 	p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
848 	if (attrs != NULL && attrs->ia_valid != 0) {
849 		__be32 verf[2];
850 
851 		p->o_arg.u.attrs = &p->attrs;
852 		memcpy(&p->attrs, attrs, sizeof(p->attrs));
853 
854 		verf[0] = jiffies;
855 		verf[1] = current->pid;
856 		memcpy(p->o_arg.u.verifier.data, verf,
857 				sizeof(p->o_arg.u.verifier.data));
858 	}
859 	p->c_arg.fh = &p->o_res.fh;
860 	p->c_arg.stateid = &p->o_res.stateid;
861 	p->c_arg.seqid = p->o_arg.seqid;
862 	nfs4_init_opendata_res(p);
863 	kref_init(&p->kref);
864 	return p;
865 err_free:
866 	kfree(p);
867 err:
868 	dput(parent);
869 	return NULL;
870 }
871 
872 static void nfs4_opendata_free(struct kref *kref)
873 {
874 	struct nfs4_opendata *p = container_of(kref,
875 			struct nfs4_opendata, kref);
876 	struct super_block *sb = p->dentry->d_sb;
877 
878 	nfs_free_seqid(p->o_arg.seqid);
879 	if (p->state != NULL)
880 		nfs4_put_open_state(p->state);
881 	nfs4_put_state_owner(p->owner);
882 	dput(p->dir);
883 	dput(p->dentry);
884 	nfs_sb_deactive(sb);
885 	nfs_fattr_free_names(&p->f_attr);
886 	kfree(p);
887 }
888 
889 static void nfs4_opendata_put(struct nfs4_opendata *p)
890 {
891 	if (p != NULL)
892 		kref_put(&p->kref, nfs4_opendata_free);
893 }
894 
895 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
896 {
897 	int ret;
898 
899 	ret = rpc_wait_for_completion_task(task);
900 	return ret;
901 }
902 
903 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
904 {
905 	int ret = 0;
906 
907 	if (open_mode & (O_EXCL|O_TRUNC))
908 		goto out;
909 	switch (mode & (FMODE_READ|FMODE_WRITE)) {
910 		case FMODE_READ:
911 			ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
912 				&& state->n_rdonly != 0;
913 			break;
914 		case FMODE_WRITE:
915 			ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
916 				&& state->n_wronly != 0;
917 			break;
918 		case FMODE_READ|FMODE_WRITE:
919 			ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
920 				&& state->n_rdwr != 0;
921 	}
922 out:
923 	return ret;
924 }
925 
926 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
927 {
928 	if (delegation == NULL)
929 		return 0;
930 	if ((delegation->type & fmode) != fmode)
931 		return 0;
932 	if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
933 		return 0;
934 	nfs_mark_delegation_referenced(delegation);
935 	return 1;
936 }
937 
938 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
939 {
940 	switch (fmode) {
941 		case FMODE_WRITE:
942 			state->n_wronly++;
943 			break;
944 		case FMODE_READ:
945 			state->n_rdonly++;
946 			break;
947 		case FMODE_READ|FMODE_WRITE:
948 			state->n_rdwr++;
949 	}
950 	nfs4_state_set_mode_locked(state, state->state | fmode);
951 }
952 
953 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
954 {
955 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
956 		nfs4_stateid_copy(&state->stateid, stateid);
957 	nfs4_stateid_copy(&state->open_stateid, stateid);
958 	switch (fmode) {
959 		case FMODE_READ:
960 			set_bit(NFS_O_RDONLY_STATE, &state->flags);
961 			break;
962 		case FMODE_WRITE:
963 			set_bit(NFS_O_WRONLY_STATE, &state->flags);
964 			break;
965 		case FMODE_READ|FMODE_WRITE:
966 			set_bit(NFS_O_RDWR_STATE, &state->flags);
967 	}
968 }
969 
970 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
971 {
972 	write_seqlock(&state->seqlock);
973 	nfs_set_open_stateid_locked(state, stateid, fmode);
974 	write_sequnlock(&state->seqlock);
975 }
976 
977 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
978 {
979 	/*
980 	 * Protect the call to nfs4_state_set_mode_locked and
981 	 * serialise the stateid update
982 	 */
983 	write_seqlock(&state->seqlock);
984 	if (deleg_stateid != NULL) {
985 		nfs4_stateid_copy(&state->stateid, deleg_stateid);
986 		set_bit(NFS_DELEGATED_STATE, &state->flags);
987 	}
988 	if (open_stateid != NULL)
989 		nfs_set_open_stateid_locked(state, open_stateid, fmode);
990 	write_sequnlock(&state->seqlock);
991 	spin_lock(&state->owner->so_lock);
992 	update_open_stateflags(state, fmode);
993 	spin_unlock(&state->owner->so_lock);
994 }
995 
996 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
997 {
998 	struct nfs_inode *nfsi = NFS_I(state->inode);
999 	struct nfs_delegation *deleg_cur;
1000 	int ret = 0;
1001 
1002 	fmode &= (FMODE_READ|FMODE_WRITE);
1003 
1004 	rcu_read_lock();
1005 	deleg_cur = rcu_dereference(nfsi->delegation);
1006 	if (deleg_cur == NULL)
1007 		goto no_delegation;
1008 
1009 	spin_lock(&deleg_cur->lock);
1010 	if (nfsi->delegation != deleg_cur ||
1011 	    (deleg_cur->type & fmode) != fmode)
1012 		goto no_delegation_unlock;
1013 
1014 	if (delegation == NULL)
1015 		delegation = &deleg_cur->stateid;
1016 	else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1017 		goto no_delegation_unlock;
1018 
1019 	nfs_mark_delegation_referenced(deleg_cur);
1020 	__update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1021 	ret = 1;
1022 no_delegation_unlock:
1023 	spin_unlock(&deleg_cur->lock);
1024 no_delegation:
1025 	rcu_read_unlock();
1026 
1027 	if (!ret && open_stateid != NULL) {
1028 		__update_open_stateid(state, open_stateid, NULL, fmode);
1029 		ret = 1;
1030 	}
1031 
1032 	return ret;
1033 }
1034 
1035 
1036 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1037 {
1038 	struct nfs_delegation *delegation;
1039 
1040 	rcu_read_lock();
1041 	delegation = rcu_dereference(NFS_I(inode)->delegation);
1042 	if (delegation == NULL || (delegation->type & fmode) == fmode) {
1043 		rcu_read_unlock();
1044 		return;
1045 	}
1046 	rcu_read_unlock();
1047 	nfs_inode_return_delegation(inode);
1048 }
1049 
1050 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1051 {
1052 	struct nfs4_state *state = opendata->state;
1053 	struct nfs_inode *nfsi = NFS_I(state->inode);
1054 	struct nfs_delegation *delegation;
1055 	int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
1056 	fmode_t fmode = opendata->o_arg.fmode;
1057 	nfs4_stateid stateid;
1058 	int ret = -EAGAIN;
1059 
1060 	for (;;) {
1061 		if (can_open_cached(state, fmode, open_mode)) {
1062 			spin_lock(&state->owner->so_lock);
1063 			if (can_open_cached(state, fmode, open_mode)) {
1064 				update_open_stateflags(state, fmode);
1065 				spin_unlock(&state->owner->so_lock);
1066 				goto out_return_state;
1067 			}
1068 			spin_unlock(&state->owner->so_lock);
1069 		}
1070 		rcu_read_lock();
1071 		delegation = rcu_dereference(nfsi->delegation);
1072 		if (!can_open_delegated(delegation, fmode)) {
1073 			rcu_read_unlock();
1074 			break;
1075 		}
1076 		/* Save the delegation */
1077 		nfs4_stateid_copy(&stateid, &delegation->stateid);
1078 		rcu_read_unlock();
1079 		ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1080 		if (ret != 0)
1081 			goto out;
1082 		ret = -EAGAIN;
1083 
1084 		/* Try to update the stateid using the delegation */
1085 		if (update_open_stateid(state, NULL, &stateid, fmode))
1086 			goto out_return_state;
1087 	}
1088 out:
1089 	return ERR_PTR(ret);
1090 out_return_state:
1091 	atomic_inc(&state->count);
1092 	return state;
1093 }
1094 
1095 static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1096 {
1097 	struct inode *inode;
1098 	struct nfs4_state *state = NULL;
1099 	struct nfs_delegation *delegation;
1100 	int ret;
1101 
1102 	if (!data->rpc_done) {
1103 		state = nfs4_try_open_cached(data);
1104 		goto out;
1105 	}
1106 
1107 	ret = -EAGAIN;
1108 	if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1109 		goto err;
1110 	inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
1111 	ret = PTR_ERR(inode);
1112 	if (IS_ERR(inode))
1113 		goto err;
1114 	ret = -ENOMEM;
1115 	state = nfs4_get_open_state(inode, data->owner);
1116 	if (state == NULL)
1117 		goto err_put_inode;
1118 	if (data->o_res.delegation_type != 0) {
1119 		struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
1120 		int delegation_flags = 0;
1121 
1122 		rcu_read_lock();
1123 		delegation = rcu_dereference(NFS_I(inode)->delegation);
1124 		if (delegation)
1125 			delegation_flags = delegation->flags;
1126 		rcu_read_unlock();
1127 		if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1128 			pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1129 					"returning a delegation for "
1130 					"OPEN(CLAIM_DELEGATE_CUR)\n",
1131 					clp->cl_hostname);
1132 		} else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1133 			nfs_inode_set_delegation(state->inode,
1134 					data->owner->so_cred,
1135 					&data->o_res);
1136 		else
1137 			nfs_inode_reclaim_delegation(state->inode,
1138 					data->owner->so_cred,
1139 					&data->o_res);
1140 	}
1141 
1142 	update_open_stateid(state, &data->o_res.stateid, NULL,
1143 			data->o_arg.fmode);
1144 	iput(inode);
1145 out:
1146 	return state;
1147 err_put_inode:
1148 	iput(inode);
1149 err:
1150 	return ERR_PTR(ret);
1151 }
1152 
1153 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1154 {
1155 	struct nfs_inode *nfsi = NFS_I(state->inode);
1156 	struct nfs_open_context *ctx;
1157 
1158 	spin_lock(&state->inode->i_lock);
1159 	list_for_each_entry(ctx, &nfsi->open_files, list) {
1160 		if (ctx->state != state)
1161 			continue;
1162 		get_nfs_open_context(ctx);
1163 		spin_unlock(&state->inode->i_lock);
1164 		return ctx;
1165 	}
1166 	spin_unlock(&state->inode->i_lock);
1167 	return ERR_PTR(-ENOENT);
1168 }
1169 
1170 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
1171 {
1172 	struct nfs4_opendata *opendata;
1173 
1174 	opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
1175 	if (opendata == NULL)
1176 		return ERR_PTR(-ENOMEM);
1177 	opendata->state = state;
1178 	atomic_inc(&state->count);
1179 	return opendata;
1180 }
1181 
1182 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1183 {
1184 	struct nfs4_state *newstate;
1185 	int ret;
1186 
1187 	opendata->o_arg.open_flags = 0;
1188 	opendata->o_arg.fmode = fmode;
1189 	memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1190 	memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1191 	nfs4_init_opendata_res(opendata);
1192 	ret = _nfs4_recover_proc_open(opendata);
1193 	if (ret != 0)
1194 		return ret;
1195 	newstate = nfs4_opendata_to_nfs4_state(opendata);
1196 	if (IS_ERR(newstate))
1197 		return PTR_ERR(newstate);
1198 	nfs4_close_state(newstate, fmode);
1199 	*res = newstate;
1200 	return 0;
1201 }
1202 
1203 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1204 {
1205 	struct nfs4_state *newstate;
1206 	int ret;
1207 
1208 	/* memory barrier prior to reading state->n_* */
1209 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
1210 	smp_rmb();
1211 	if (state->n_rdwr != 0) {
1212 		clear_bit(NFS_O_RDWR_STATE, &state->flags);
1213 		ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1214 		if (ret != 0)
1215 			return ret;
1216 		if (newstate != state)
1217 			return -ESTALE;
1218 	}
1219 	if (state->n_wronly != 0) {
1220 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1221 		ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1222 		if (ret != 0)
1223 			return ret;
1224 		if (newstate != state)
1225 			return -ESTALE;
1226 	}
1227 	if (state->n_rdonly != 0) {
1228 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1229 		ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1230 		if (ret != 0)
1231 			return ret;
1232 		if (newstate != state)
1233 			return -ESTALE;
1234 	}
1235 	/*
1236 	 * We may have performed cached opens for all three recoveries.
1237 	 * Check if we need to update the current stateid.
1238 	 */
1239 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1240 	    !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1241 		write_seqlock(&state->seqlock);
1242 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1243 			nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1244 		write_sequnlock(&state->seqlock);
1245 	}
1246 	return 0;
1247 }
1248 
1249 /*
1250  * OPEN_RECLAIM:
1251  * 	reclaim state on the server after a reboot.
1252  */
1253 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1254 {
1255 	struct nfs_delegation *delegation;
1256 	struct nfs4_opendata *opendata;
1257 	fmode_t delegation_type = 0;
1258 	int status;
1259 
1260 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1261 	if (IS_ERR(opendata))
1262 		return PTR_ERR(opendata);
1263 	opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
1264 	opendata->o_arg.fh = NFS_FH(state->inode);
1265 	rcu_read_lock();
1266 	delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1267 	if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1268 		delegation_type = delegation->type;
1269 	rcu_read_unlock();
1270 	opendata->o_arg.u.delegation_type = delegation_type;
1271 	status = nfs4_open_recover(opendata, state);
1272 	nfs4_opendata_put(opendata);
1273 	return status;
1274 }
1275 
1276 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1277 {
1278 	struct nfs_server *server = NFS_SERVER(state->inode);
1279 	struct nfs4_exception exception = { };
1280 	int err;
1281 	do {
1282 		err = _nfs4_do_open_reclaim(ctx, state);
1283 		if (err != -NFS4ERR_DELAY)
1284 			break;
1285 		nfs4_handle_exception(server, err, &exception);
1286 	} while (exception.retry);
1287 	return err;
1288 }
1289 
1290 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1291 {
1292 	struct nfs_open_context *ctx;
1293 	int ret;
1294 
1295 	ctx = nfs4_state_find_open_context(state);
1296 	if (IS_ERR(ctx))
1297 		return PTR_ERR(ctx);
1298 	ret = nfs4_do_open_reclaim(ctx, state);
1299 	put_nfs_open_context(ctx);
1300 	return ret;
1301 }
1302 
1303 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1304 {
1305 	struct nfs4_opendata *opendata;
1306 	int ret;
1307 
1308 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1309 	if (IS_ERR(opendata))
1310 		return PTR_ERR(opendata);
1311 	opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
1312 	nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1313 	ret = nfs4_open_recover(opendata, state);
1314 	nfs4_opendata_put(opendata);
1315 	return ret;
1316 }
1317 
1318 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1319 {
1320 	struct nfs4_exception exception = { };
1321 	struct nfs_server *server = NFS_SERVER(state->inode);
1322 	int err;
1323 	do {
1324 		err = _nfs4_open_delegation_recall(ctx, state, stateid);
1325 		switch (err) {
1326 			case 0:
1327 			case -ENOENT:
1328 			case -ESTALE:
1329 				goto out;
1330 			case -NFS4ERR_BADSESSION:
1331 			case -NFS4ERR_BADSLOT:
1332 			case -NFS4ERR_BAD_HIGH_SLOT:
1333 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1334 			case -NFS4ERR_DEADSESSION:
1335 				nfs4_schedule_session_recovery(server->nfs_client->cl_session);
1336 				goto out;
1337 			case -NFS4ERR_STALE_CLIENTID:
1338 			case -NFS4ERR_STALE_STATEID:
1339 			case -NFS4ERR_EXPIRED:
1340 				/* Don't recall a delegation if it was lost */
1341 				nfs4_schedule_lease_recovery(server->nfs_client);
1342 				goto out;
1343 			case -ERESTARTSYS:
1344 				/*
1345 				 * The show must go on: exit, but mark the
1346 				 * stateid as needing recovery.
1347 				 */
1348 			case -NFS4ERR_DELEG_REVOKED:
1349 			case -NFS4ERR_ADMIN_REVOKED:
1350 			case -NFS4ERR_BAD_STATEID:
1351 				nfs_inode_find_state_and_recover(state->inode,
1352 						stateid);
1353 				nfs4_schedule_stateid_recovery(server, state);
1354 			case -EKEYEXPIRED:
1355 				/*
1356 				 * User RPCSEC_GSS context has expired.
1357 				 * We cannot recover this stateid now, so
1358 				 * skip it and allow recovery thread to
1359 				 * proceed.
1360 				 */
1361 			case -ENOMEM:
1362 				err = 0;
1363 				goto out;
1364 		}
1365 		err = nfs4_handle_exception(server, err, &exception);
1366 	} while (exception.retry);
1367 out:
1368 	return err;
1369 }
1370 
1371 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1372 {
1373 	struct nfs4_opendata *data = calldata;
1374 
1375 	data->rpc_status = task->tk_status;
1376 	if (data->rpc_status == 0) {
1377 		nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1378 		nfs_confirm_seqid(&data->owner->so_seqid, 0);
1379 		renew_lease(data->o_res.server, data->timestamp);
1380 		data->rpc_done = 1;
1381 	}
1382 }
1383 
1384 static void nfs4_open_confirm_release(void *calldata)
1385 {
1386 	struct nfs4_opendata *data = calldata;
1387 	struct nfs4_state *state = NULL;
1388 
1389 	/* If this request hasn't been cancelled, do nothing */
1390 	if (data->cancelled == 0)
1391 		goto out_free;
1392 	/* In case of error, no cleanup! */
1393 	if (!data->rpc_done)
1394 		goto out_free;
1395 	state = nfs4_opendata_to_nfs4_state(data);
1396 	if (!IS_ERR(state))
1397 		nfs4_close_state(state, data->o_arg.fmode);
1398 out_free:
1399 	nfs4_opendata_put(data);
1400 }
1401 
1402 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1403 	.rpc_call_done = nfs4_open_confirm_done,
1404 	.rpc_release = nfs4_open_confirm_release,
1405 };
1406 
1407 /*
1408  * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1409  */
1410 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1411 {
1412 	struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1413 	struct rpc_task *task;
1414 	struct  rpc_message msg = {
1415 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1416 		.rpc_argp = &data->c_arg,
1417 		.rpc_resp = &data->c_res,
1418 		.rpc_cred = data->owner->so_cred,
1419 	};
1420 	struct rpc_task_setup task_setup_data = {
1421 		.rpc_client = server->client,
1422 		.rpc_message = &msg,
1423 		.callback_ops = &nfs4_open_confirm_ops,
1424 		.callback_data = data,
1425 		.workqueue = nfsiod_workqueue,
1426 		.flags = RPC_TASK_ASYNC,
1427 	};
1428 	int status;
1429 
1430 	kref_get(&data->kref);
1431 	data->rpc_done = 0;
1432 	data->rpc_status = 0;
1433 	data->timestamp = jiffies;
1434 	task = rpc_run_task(&task_setup_data);
1435 	if (IS_ERR(task))
1436 		return PTR_ERR(task);
1437 	status = nfs4_wait_for_completion_rpc_task(task);
1438 	if (status != 0) {
1439 		data->cancelled = 1;
1440 		smp_wmb();
1441 	} else
1442 		status = data->rpc_status;
1443 	rpc_put_task(task);
1444 	return status;
1445 }
1446 
1447 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1448 {
1449 	struct nfs4_opendata *data = calldata;
1450 	struct nfs4_state_owner *sp = data->owner;
1451 
1452 	if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1453 		return;
1454 	/*
1455 	 * Check if we still need to send an OPEN call, or if we can use
1456 	 * a delegation instead.
1457 	 */
1458 	if (data->state != NULL) {
1459 		struct nfs_delegation *delegation;
1460 
1461 		if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1462 			goto out_no_action;
1463 		rcu_read_lock();
1464 		delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1465 		if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1466 		    can_open_delegated(delegation, data->o_arg.fmode))
1467 			goto unlock_no_action;
1468 		rcu_read_unlock();
1469 	}
1470 	/* Update client id. */
1471 	data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1472 	if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1473 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1474 		nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1475 	}
1476 	data->timestamp = jiffies;
1477 	if (nfs4_setup_sequence(data->o_arg.server,
1478 				&data->o_arg.seq_args,
1479 				&data->o_res.seq_res, task))
1480 		return;
1481 	rpc_call_start(task);
1482 	return;
1483 unlock_no_action:
1484 	rcu_read_unlock();
1485 out_no_action:
1486 	task->tk_action = NULL;
1487 
1488 }
1489 
1490 static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
1491 {
1492 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
1493 	nfs4_open_prepare(task, calldata);
1494 }
1495 
1496 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1497 {
1498 	struct nfs4_opendata *data = calldata;
1499 
1500 	data->rpc_status = task->tk_status;
1501 
1502 	if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1503 		return;
1504 
1505 	if (task->tk_status == 0) {
1506 		switch (data->o_res.f_attr->mode & S_IFMT) {
1507 			case S_IFREG:
1508 				break;
1509 			case S_IFLNK:
1510 				data->rpc_status = -ELOOP;
1511 				break;
1512 			case S_IFDIR:
1513 				data->rpc_status = -EISDIR;
1514 				break;
1515 			default:
1516 				data->rpc_status = -ENOTDIR;
1517 		}
1518 		renew_lease(data->o_res.server, data->timestamp);
1519 		if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1520 			nfs_confirm_seqid(&data->owner->so_seqid, 0);
1521 	}
1522 	data->rpc_done = 1;
1523 }
1524 
1525 static void nfs4_open_release(void *calldata)
1526 {
1527 	struct nfs4_opendata *data = calldata;
1528 	struct nfs4_state *state = NULL;
1529 
1530 	/* If this request hasn't been cancelled, do nothing */
1531 	if (data->cancelled == 0)
1532 		goto out_free;
1533 	/* In case of error, no cleanup! */
1534 	if (data->rpc_status != 0 || !data->rpc_done)
1535 		goto out_free;
1536 	/* In case we need an open_confirm, no cleanup! */
1537 	if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1538 		goto out_free;
1539 	state = nfs4_opendata_to_nfs4_state(data);
1540 	if (!IS_ERR(state))
1541 		nfs4_close_state(state, data->o_arg.fmode);
1542 out_free:
1543 	nfs4_opendata_put(data);
1544 }
1545 
1546 static const struct rpc_call_ops nfs4_open_ops = {
1547 	.rpc_call_prepare = nfs4_open_prepare,
1548 	.rpc_call_done = nfs4_open_done,
1549 	.rpc_release = nfs4_open_release,
1550 };
1551 
1552 static const struct rpc_call_ops nfs4_recover_open_ops = {
1553 	.rpc_call_prepare = nfs4_recover_open_prepare,
1554 	.rpc_call_done = nfs4_open_done,
1555 	.rpc_release = nfs4_open_release,
1556 };
1557 
1558 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1559 {
1560 	struct inode *dir = data->dir->d_inode;
1561 	struct nfs_server *server = NFS_SERVER(dir);
1562 	struct nfs_openargs *o_arg = &data->o_arg;
1563 	struct nfs_openres *o_res = &data->o_res;
1564 	struct rpc_task *task;
1565 	struct rpc_message msg = {
1566 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1567 		.rpc_argp = o_arg,
1568 		.rpc_resp = o_res,
1569 		.rpc_cred = data->owner->so_cred,
1570 	};
1571 	struct rpc_task_setup task_setup_data = {
1572 		.rpc_client = server->client,
1573 		.rpc_message = &msg,
1574 		.callback_ops = &nfs4_open_ops,
1575 		.callback_data = data,
1576 		.workqueue = nfsiod_workqueue,
1577 		.flags = RPC_TASK_ASYNC,
1578 	};
1579 	int status;
1580 
1581 	nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1582 	kref_get(&data->kref);
1583 	data->rpc_done = 0;
1584 	data->rpc_status = 0;
1585 	data->cancelled = 0;
1586 	if (isrecover)
1587 		task_setup_data.callback_ops = &nfs4_recover_open_ops;
1588 	task = rpc_run_task(&task_setup_data);
1589         if (IS_ERR(task))
1590                 return PTR_ERR(task);
1591         status = nfs4_wait_for_completion_rpc_task(task);
1592         if (status != 0) {
1593                 data->cancelled = 1;
1594                 smp_wmb();
1595         } else
1596                 status = data->rpc_status;
1597         rpc_put_task(task);
1598 
1599 	return status;
1600 }
1601 
1602 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1603 {
1604 	struct inode *dir = data->dir->d_inode;
1605 	struct nfs_openres *o_res = &data->o_res;
1606         int status;
1607 
1608 	status = nfs4_run_open_task(data, 1);
1609 	if (status != 0 || !data->rpc_done)
1610 		return status;
1611 
1612 	nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
1613 
1614 	nfs_refresh_inode(dir, o_res->dir_attr);
1615 
1616 	if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1617 		status = _nfs4_proc_open_confirm(data);
1618 		if (status != 0)
1619 			return status;
1620 	}
1621 
1622 	return status;
1623 }
1624 
1625 /*
1626  * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1627  */
1628 static int _nfs4_proc_open(struct nfs4_opendata *data)
1629 {
1630 	struct inode *dir = data->dir->d_inode;
1631 	struct nfs_server *server = NFS_SERVER(dir);
1632 	struct nfs_openargs *o_arg = &data->o_arg;
1633 	struct nfs_openres *o_res = &data->o_res;
1634 	int status;
1635 
1636 	status = nfs4_run_open_task(data, 0);
1637 	if (!data->rpc_done)
1638 		return status;
1639 	if (status != 0) {
1640 		if (status == -NFS4ERR_BADNAME &&
1641 				!(o_arg->open_flags & O_CREAT))
1642 			return -ENOENT;
1643 		return status;
1644 	}
1645 
1646 	nfs_fattr_map_and_free_names(server, &data->f_attr);
1647 
1648 	if (o_arg->open_flags & O_CREAT) {
1649 		update_changeattr(dir, &o_res->cinfo);
1650 		nfs_post_op_update_inode(dir, o_res->dir_attr);
1651 	} else
1652 		nfs_refresh_inode(dir, o_res->dir_attr);
1653 	if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1654 		server->caps &= ~NFS_CAP_POSIX_LOCK;
1655 	if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1656 		status = _nfs4_proc_open_confirm(data);
1657 		if (status != 0)
1658 			return status;
1659 	}
1660 	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1661 		_nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1662 	return 0;
1663 }
1664 
1665 static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
1666 {
1667 	unsigned int loop;
1668 	int ret;
1669 
1670 	for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1671 		ret = nfs4_wait_clnt_recover(clp);
1672 		if (ret != 0)
1673 			break;
1674 		if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1675 		    !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1676 			break;
1677 		nfs4_schedule_state_manager(clp);
1678 		ret = -EIO;
1679 	}
1680 	return ret;
1681 }
1682 
1683 static int nfs4_recover_expired_lease(struct nfs_server *server)
1684 {
1685 	return nfs4_client_recover_expired_lease(server->nfs_client);
1686 }
1687 
1688 /*
1689  * OPEN_EXPIRED:
1690  * 	reclaim state on the server after a network partition.
1691  * 	Assumes caller holds the appropriate lock
1692  */
1693 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1694 {
1695 	struct nfs4_opendata *opendata;
1696 	int ret;
1697 
1698 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1699 	if (IS_ERR(opendata))
1700 		return PTR_ERR(opendata);
1701 	ret = nfs4_open_recover(opendata, state);
1702 	if (ret == -ESTALE)
1703 		d_drop(ctx->dentry);
1704 	nfs4_opendata_put(opendata);
1705 	return ret;
1706 }
1707 
1708 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1709 {
1710 	struct nfs_server *server = NFS_SERVER(state->inode);
1711 	struct nfs4_exception exception = { };
1712 	int err;
1713 
1714 	do {
1715 		err = _nfs4_open_expired(ctx, state);
1716 		switch (err) {
1717 		default:
1718 			goto out;
1719 		case -NFS4ERR_GRACE:
1720 		case -NFS4ERR_DELAY:
1721 			nfs4_handle_exception(server, err, &exception);
1722 			err = 0;
1723 		}
1724 	} while (exception.retry);
1725 out:
1726 	return err;
1727 }
1728 
1729 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1730 {
1731 	struct nfs_open_context *ctx;
1732 	int ret;
1733 
1734 	ctx = nfs4_state_find_open_context(state);
1735 	if (IS_ERR(ctx))
1736 		return PTR_ERR(ctx);
1737 	ret = nfs4_do_open_expired(ctx, state);
1738 	put_nfs_open_context(ctx);
1739 	return ret;
1740 }
1741 
1742 #if defined(CONFIG_NFS_V4_1)
1743 static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags)
1744 {
1745 	int status = NFS_OK;
1746 	struct nfs_server *server = NFS_SERVER(state->inode);
1747 
1748 	if (state->flags & flags) {
1749 		status = nfs41_test_stateid(server, stateid);
1750 		if (status != NFS_OK) {
1751 			nfs41_free_stateid(server, stateid);
1752 			state->flags &= ~flags;
1753 		}
1754 	}
1755 	return status;
1756 }
1757 
1758 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1759 {
1760 	int deleg_status, open_status;
1761 	int deleg_flags = 1 << NFS_DELEGATED_STATE;
1762 	int open_flags = (1 << NFS_O_RDONLY_STATE) | (1 << NFS_O_WRONLY_STATE) | (1 << NFS_O_RDWR_STATE);
1763 
1764 	deleg_status = nfs41_check_expired_stateid(state, &state->stateid, deleg_flags);
1765 	open_status = nfs41_check_expired_stateid(state,  &state->open_stateid, open_flags);
1766 
1767 	if ((deleg_status == NFS_OK) && (open_status == NFS_OK))
1768 		return NFS_OK;
1769 	return nfs4_open_expired(sp, state);
1770 }
1771 #endif
1772 
1773 /*
1774  * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1775  * fields corresponding to attributes that were used to store the verifier.
1776  * Make sure we clobber those fields in the later setattr call
1777  */
1778 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
1779 {
1780 	if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
1781 	    !(sattr->ia_valid & ATTR_ATIME_SET))
1782 		sattr->ia_valid |= ATTR_ATIME;
1783 
1784 	if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
1785 	    !(sattr->ia_valid & ATTR_MTIME_SET))
1786 		sattr->ia_valid |= ATTR_MTIME;
1787 }
1788 
1789 /*
1790  * Returns a referenced nfs4_state
1791  */
1792 static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
1793 {
1794 	struct nfs4_state_owner  *sp;
1795 	struct nfs4_state     *state = NULL;
1796 	struct nfs_server       *server = NFS_SERVER(dir);
1797 	struct nfs4_opendata *opendata;
1798 	int status;
1799 
1800 	/* Protect against reboot recovery conflicts */
1801 	status = -ENOMEM;
1802 	sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
1803 	if (sp == NULL) {
1804 		dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
1805 		goto out_err;
1806 	}
1807 	status = nfs4_recover_expired_lease(server);
1808 	if (status != 0)
1809 		goto err_put_state_owner;
1810 	if (dentry->d_inode != NULL)
1811 		nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
1812 	status = -ENOMEM;
1813 	opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
1814 	if (opendata == NULL)
1815 		goto err_put_state_owner;
1816 
1817 	if (dentry->d_inode != NULL)
1818 		opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
1819 
1820 	status = _nfs4_proc_open(opendata);
1821 	if (status != 0)
1822 		goto err_opendata_put;
1823 
1824 	state = nfs4_opendata_to_nfs4_state(opendata);
1825 	status = PTR_ERR(state);
1826 	if (IS_ERR(state))
1827 		goto err_opendata_put;
1828 	if (server->caps & NFS_CAP_POSIX_LOCK)
1829 		set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1830 
1831 	if (opendata->o_arg.open_flags & O_EXCL) {
1832 		nfs4_exclusive_attrset(opendata, sattr);
1833 
1834 		nfs_fattr_init(opendata->o_res.f_attr);
1835 		status = nfs4_do_setattr(state->inode, cred,
1836 				opendata->o_res.f_attr, sattr,
1837 				state);
1838 		if (status == 0)
1839 			nfs_setattr_update_inode(state->inode, sattr);
1840 		nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
1841 	}
1842 	nfs4_opendata_put(opendata);
1843 	nfs4_put_state_owner(sp);
1844 	*res = state;
1845 	return 0;
1846 err_opendata_put:
1847 	nfs4_opendata_put(opendata);
1848 err_put_state_owner:
1849 	nfs4_put_state_owner(sp);
1850 out_err:
1851 	*res = NULL;
1852 	return status;
1853 }
1854 
1855 
1856 static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
1857 {
1858 	struct nfs4_exception exception = { };
1859 	struct nfs4_state *res;
1860 	int status;
1861 
1862 	do {
1863 		status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res);
1864 		if (status == 0)
1865 			break;
1866 		/* NOTE: BAD_SEQID means the server and client disagree about the
1867 		 * book-keeping w.r.t. state-changing operations
1868 		 * (OPEN/CLOSE/LOCK/LOCKU...)
1869 		 * It is actually a sign of a bug on the client or on the server.
1870 		 *
1871 		 * If we receive a BAD_SEQID error in the particular case of
1872 		 * doing an OPEN, we assume that nfs_increment_open_seqid() will
1873 		 * have unhashed the old state_owner for us, and that we can
1874 		 * therefore safely retry using a new one. We should still warn
1875 		 * the user though...
1876 		 */
1877 		if (status == -NFS4ERR_BAD_SEQID) {
1878 			pr_warn_ratelimited("NFS: v4 server %s "
1879 					" returned a bad sequence-id error!\n",
1880 					NFS_SERVER(dir)->nfs_client->cl_hostname);
1881 			exception.retry = 1;
1882 			continue;
1883 		}
1884 		/*
1885 		 * BAD_STATEID on OPEN means that the server cancelled our
1886 		 * state before it received the OPEN_CONFIRM.
1887 		 * Recover by retrying the request as per the discussion
1888 		 * on Page 181 of RFC3530.
1889 		 */
1890 		if (status == -NFS4ERR_BAD_STATEID) {
1891 			exception.retry = 1;
1892 			continue;
1893 		}
1894 		if (status == -EAGAIN) {
1895 			/* We must have found a delegation */
1896 			exception.retry = 1;
1897 			continue;
1898 		}
1899 		res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
1900 					status, &exception));
1901 	} while (exception.retry);
1902 	return res;
1903 }
1904 
1905 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1906 			    struct nfs_fattr *fattr, struct iattr *sattr,
1907 			    struct nfs4_state *state)
1908 {
1909 	struct nfs_server *server = NFS_SERVER(inode);
1910         struct nfs_setattrargs  arg = {
1911                 .fh             = NFS_FH(inode),
1912                 .iap            = sattr,
1913 		.server		= server,
1914 		.bitmask = server->attr_bitmask,
1915         };
1916         struct nfs_setattrres  res = {
1917 		.fattr		= fattr,
1918 		.server		= server,
1919         };
1920         struct rpc_message msg = {
1921 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
1922 		.rpc_argp	= &arg,
1923 		.rpc_resp	= &res,
1924 		.rpc_cred	= cred,
1925         };
1926 	unsigned long timestamp = jiffies;
1927 	int status;
1928 
1929 	nfs_fattr_init(fattr);
1930 
1931 	if (state != NULL) {
1932 		nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
1933 				current->files, current->tgid);
1934 	} else if (nfs4_copy_delegation_stateid(&arg.stateid, inode,
1935 				FMODE_WRITE)) {
1936 		/* Use that stateid */
1937 	} else
1938 		nfs4_stateid_copy(&arg.stateid, &zero_stateid);
1939 
1940 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
1941 	if (status == 0 && state != NULL)
1942 		renew_lease(server, timestamp);
1943 	return status;
1944 }
1945 
1946 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1947 			   struct nfs_fattr *fattr, struct iattr *sattr,
1948 			   struct nfs4_state *state)
1949 {
1950 	struct nfs_server *server = NFS_SERVER(inode);
1951 	struct nfs4_exception exception = {
1952 		.state = state,
1953 		.inode = inode,
1954 	};
1955 	int err;
1956 	do {
1957 		err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
1958 		switch (err) {
1959 		case -NFS4ERR_OPENMODE:
1960 			if (state && !(state->state & FMODE_WRITE)) {
1961 				err = -EBADF;
1962 				if (sattr->ia_valid & ATTR_OPEN)
1963 					err = -EACCES;
1964 				goto out;
1965 			}
1966 		}
1967 		err = nfs4_handle_exception(server, err, &exception);
1968 	} while (exception.retry);
1969 out:
1970 	return err;
1971 }
1972 
1973 struct nfs4_closedata {
1974 	struct inode *inode;
1975 	struct nfs4_state *state;
1976 	struct nfs_closeargs arg;
1977 	struct nfs_closeres res;
1978 	struct nfs_fattr fattr;
1979 	unsigned long timestamp;
1980 	bool roc;
1981 	u32 roc_barrier;
1982 };
1983 
1984 static void nfs4_free_closedata(void *data)
1985 {
1986 	struct nfs4_closedata *calldata = data;
1987 	struct nfs4_state_owner *sp = calldata->state->owner;
1988 	struct super_block *sb = calldata->state->inode->i_sb;
1989 
1990 	if (calldata->roc)
1991 		pnfs_roc_release(calldata->state->inode);
1992 	nfs4_put_open_state(calldata->state);
1993 	nfs_free_seqid(calldata->arg.seqid);
1994 	nfs4_put_state_owner(sp);
1995 	nfs_sb_deactive(sb);
1996 	kfree(calldata);
1997 }
1998 
1999 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
2000 		fmode_t fmode)
2001 {
2002 	spin_lock(&state->owner->so_lock);
2003 	if (!(fmode & FMODE_READ))
2004 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2005 	if (!(fmode & FMODE_WRITE))
2006 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2007 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
2008 	spin_unlock(&state->owner->so_lock);
2009 }
2010 
2011 static void nfs4_close_done(struct rpc_task *task, void *data)
2012 {
2013 	struct nfs4_closedata *calldata = data;
2014 	struct nfs4_state *state = calldata->state;
2015 	struct nfs_server *server = NFS_SERVER(calldata->inode);
2016 
2017 	dprintk("%s: begin!\n", __func__);
2018 	if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2019 		return;
2020         /* hmm. we are done with the inode, and in the process of freeing
2021 	 * the state_owner. we keep this around to process errors
2022 	 */
2023 	switch (task->tk_status) {
2024 		case 0:
2025 			if (calldata->roc)
2026 				pnfs_roc_set_barrier(state->inode,
2027 						     calldata->roc_barrier);
2028 			nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2029 			renew_lease(server, calldata->timestamp);
2030 			nfs4_close_clear_stateid_flags(state,
2031 					calldata->arg.fmode);
2032 			break;
2033 		case -NFS4ERR_STALE_STATEID:
2034 		case -NFS4ERR_OLD_STATEID:
2035 		case -NFS4ERR_BAD_STATEID:
2036 		case -NFS4ERR_EXPIRED:
2037 			if (calldata->arg.fmode == 0)
2038 				break;
2039 		default:
2040 			if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
2041 				rpc_restart_call_prepare(task);
2042 	}
2043 	nfs_release_seqid(calldata->arg.seqid);
2044 	nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2045 	dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2046 }
2047 
2048 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2049 {
2050 	struct nfs4_closedata *calldata = data;
2051 	struct nfs4_state *state = calldata->state;
2052 	int call_close = 0;
2053 
2054 	dprintk("%s: begin!\n", __func__);
2055 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2056 		return;
2057 
2058 	task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2059 	calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
2060 	spin_lock(&state->owner->so_lock);
2061 	/* Calculate the change in open mode */
2062 	if (state->n_rdwr == 0) {
2063 		if (state->n_rdonly == 0) {
2064 			call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
2065 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2066 			calldata->arg.fmode &= ~FMODE_READ;
2067 		}
2068 		if (state->n_wronly == 0) {
2069 			call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
2070 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2071 			calldata->arg.fmode &= ~FMODE_WRITE;
2072 		}
2073 	}
2074 	spin_unlock(&state->owner->so_lock);
2075 
2076 	if (!call_close) {
2077 		/* Note: exit _without_ calling nfs4_close_done */
2078 		task->tk_action = NULL;
2079 		goto out;
2080 	}
2081 
2082 	if (calldata->arg.fmode == 0) {
2083 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2084 		if (calldata->roc &&
2085 		    pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
2086 			rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
2087 				     task, NULL);
2088 			goto out;
2089 		}
2090 	}
2091 
2092 	nfs_fattr_init(calldata->res.fattr);
2093 	calldata->timestamp = jiffies;
2094 	if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
2095 				&calldata->arg.seq_args,
2096 				&calldata->res.seq_res,
2097 				task))
2098 		goto out;
2099 	rpc_call_start(task);
2100 out:
2101 	dprintk("%s: done!\n", __func__);
2102 }
2103 
2104 static const struct rpc_call_ops nfs4_close_ops = {
2105 	.rpc_call_prepare = nfs4_close_prepare,
2106 	.rpc_call_done = nfs4_close_done,
2107 	.rpc_release = nfs4_free_closedata,
2108 };
2109 
2110 /*
2111  * It is possible for data to be read/written from a mem-mapped file
2112  * after the sys_close call (which hits the vfs layer as a flush).
2113  * This means that we can't safely call nfsv4 close on a file until
2114  * the inode is cleared. This in turn means that we are not good
2115  * NFSv4 citizens - we do not indicate to the server to update the file's
2116  * share state even when we are done with one of the three share
2117  * stateid's in the inode.
2118  *
2119  * NOTE: Caller must be holding the sp->so_owner semaphore!
2120  */
2121 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
2122 {
2123 	struct nfs_server *server = NFS_SERVER(state->inode);
2124 	struct nfs4_closedata *calldata;
2125 	struct nfs4_state_owner *sp = state->owner;
2126 	struct rpc_task *task;
2127 	struct rpc_message msg = {
2128 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2129 		.rpc_cred = state->owner->so_cred,
2130 	};
2131 	struct rpc_task_setup task_setup_data = {
2132 		.rpc_client = server->client,
2133 		.rpc_message = &msg,
2134 		.callback_ops = &nfs4_close_ops,
2135 		.workqueue = nfsiod_workqueue,
2136 		.flags = RPC_TASK_ASYNC,
2137 	};
2138 	int status = -ENOMEM;
2139 
2140 	calldata = kzalloc(sizeof(*calldata), gfp_mask);
2141 	if (calldata == NULL)
2142 		goto out;
2143 	nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2144 	calldata->inode = state->inode;
2145 	calldata->state = state;
2146 	calldata->arg.fh = NFS_FH(state->inode);
2147 	calldata->arg.stateid = &state->open_stateid;
2148 	/* Serialization for the sequence id */
2149 	calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
2150 	if (calldata->arg.seqid == NULL)
2151 		goto out_free_calldata;
2152 	calldata->arg.fmode = 0;
2153 	calldata->arg.bitmask = server->cache_consistency_bitmask;
2154 	calldata->res.fattr = &calldata->fattr;
2155 	calldata->res.seqid = calldata->arg.seqid;
2156 	calldata->res.server = server;
2157 	calldata->roc = roc;
2158 	nfs_sb_active(calldata->inode->i_sb);
2159 
2160 	msg.rpc_argp = &calldata->arg;
2161 	msg.rpc_resp = &calldata->res;
2162 	task_setup_data.callback_data = calldata;
2163 	task = rpc_run_task(&task_setup_data);
2164 	if (IS_ERR(task))
2165 		return PTR_ERR(task);
2166 	status = 0;
2167 	if (wait)
2168 		status = rpc_wait_for_completion_task(task);
2169 	rpc_put_task(task);
2170 	return status;
2171 out_free_calldata:
2172 	kfree(calldata);
2173 out:
2174 	if (roc)
2175 		pnfs_roc_release(state->inode);
2176 	nfs4_put_open_state(state);
2177 	nfs4_put_state_owner(sp);
2178 	return status;
2179 }
2180 
2181 static struct inode *
2182 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
2183 {
2184 	struct nfs4_state *state;
2185 
2186 	/* Protect against concurrent sillydeletes */
2187 	state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred);
2188 	if (IS_ERR(state))
2189 		return ERR_CAST(state);
2190 	ctx->state = state;
2191 	return igrab(state->inode);
2192 }
2193 
2194 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2195 {
2196 	if (ctx->state == NULL)
2197 		return;
2198 	if (is_sync)
2199 		nfs4_close_sync(ctx->state, ctx->mode);
2200 	else
2201 		nfs4_close_state(ctx->state, ctx->mode);
2202 }
2203 
2204 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2205 {
2206 	struct nfs4_server_caps_arg args = {
2207 		.fhandle = fhandle,
2208 	};
2209 	struct nfs4_server_caps_res res = {};
2210 	struct rpc_message msg = {
2211 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2212 		.rpc_argp = &args,
2213 		.rpc_resp = &res,
2214 	};
2215 	int status;
2216 
2217 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2218 	if (status == 0) {
2219 		memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2220 		server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2221 				NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2222 				NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2223 				NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2224 				NFS_CAP_CTIME|NFS_CAP_MTIME);
2225 		if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2226 			server->caps |= NFS_CAP_ACLS;
2227 		if (res.has_links != 0)
2228 			server->caps |= NFS_CAP_HARDLINKS;
2229 		if (res.has_symlinks != 0)
2230 			server->caps |= NFS_CAP_SYMLINKS;
2231 		if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2232 			server->caps |= NFS_CAP_FILEID;
2233 		if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2234 			server->caps |= NFS_CAP_MODE;
2235 		if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2236 			server->caps |= NFS_CAP_NLINK;
2237 		if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2238 			server->caps |= NFS_CAP_OWNER;
2239 		if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2240 			server->caps |= NFS_CAP_OWNER_GROUP;
2241 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2242 			server->caps |= NFS_CAP_ATIME;
2243 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2244 			server->caps |= NFS_CAP_CTIME;
2245 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2246 			server->caps |= NFS_CAP_MTIME;
2247 
2248 		memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2249 		server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2250 		server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2251 		server->acl_bitmask = res.acl_bitmask;
2252 		server->fh_expire_type = res.fh_expire_type;
2253 	}
2254 
2255 	return status;
2256 }
2257 
2258 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2259 {
2260 	struct nfs4_exception exception = { };
2261 	int err;
2262 	do {
2263 		err = nfs4_handle_exception(server,
2264 				_nfs4_server_capabilities(server, fhandle),
2265 				&exception);
2266 	} while (exception.retry);
2267 	return err;
2268 }
2269 
2270 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2271 		struct nfs_fsinfo *info)
2272 {
2273 	struct nfs4_lookup_root_arg args = {
2274 		.bitmask = nfs4_fattr_bitmap,
2275 	};
2276 	struct nfs4_lookup_res res = {
2277 		.server = server,
2278 		.fattr = info->fattr,
2279 		.fh = fhandle,
2280 	};
2281 	struct rpc_message msg = {
2282 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2283 		.rpc_argp = &args,
2284 		.rpc_resp = &res,
2285 	};
2286 
2287 	nfs_fattr_init(info->fattr);
2288 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2289 }
2290 
2291 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2292 		struct nfs_fsinfo *info)
2293 {
2294 	struct nfs4_exception exception = { };
2295 	int err;
2296 	do {
2297 		err = _nfs4_lookup_root(server, fhandle, info);
2298 		switch (err) {
2299 		case 0:
2300 		case -NFS4ERR_WRONGSEC:
2301 			goto out;
2302 		default:
2303 			err = nfs4_handle_exception(server, err, &exception);
2304 		}
2305 	} while (exception.retry);
2306 out:
2307 	return err;
2308 }
2309 
2310 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2311 				struct nfs_fsinfo *info, rpc_authflavor_t flavor)
2312 {
2313 	struct rpc_auth *auth;
2314 	int ret;
2315 
2316 	auth = rpcauth_create(flavor, server->client);
2317 	if (!auth) {
2318 		ret = -EIO;
2319 		goto out;
2320 	}
2321 	ret = nfs4_lookup_root(server, fhandle, info);
2322 out:
2323 	return ret;
2324 }
2325 
2326 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2327 			      struct nfs_fsinfo *info)
2328 {
2329 	int i, len, status = 0;
2330 	rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
2331 
2332 	len = gss_mech_list_pseudoflavors(&flav_array[0]);
2333 	flav_array[len] = RPC_AUTH_NULL;
2334 	len += 1;
2335 
2336 	for (i = 0; i < len; i++) {
2337 		status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
2338 		if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
2339 			continue;
2340 		break;
2341 	}
2342 	/*
2343 	 * -EACCESS could mean that the user doesn't have correct permissions
2344 	 * to access the mount.  It could also mean that we tried to mount
2345 	 * with a gss auth flavor, but rpc.gssd isn't running.  Either way,
2346 	 * existing mount programs don't handle -EACCES very well so it should
2347 	 * be mapped to -EPERM instead.
2348 	 */
2349 	if (status == -EACCES)
2350 		status = -EPERM;
2351 	return status;
2352 }
2353 
2354 /*
2355  * get the file handle for the "/" directory on the server
2356  */
2357 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
2358 			      struct nfs_fsinfo *info)
2359 {
2360 	int minor_version = server->nfs_client->cl_minorversion;
2361 	int status = nfs4_lookup_root(server, fhandle, info);
2362 	if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
2363 		/*
2364 		 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
2365 		 * by nfs4_map_errors() as this function exits.
2366 		 */
2367 		status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
2368 	if (status == 0)
2369 		status = nfs4_server_capabilities(server, fhandle);
2370 	if (status == 0)
2371 		status = nfs4_do_fsinfo(server, fhandle, info);
2372 	return nfs4_map_errors(status);
2373 }
2374 
2375 /*
2376  * Get locations and (maybe) other attributes of a referral.
2377  * Note that we'll actually follow the referral later when
2378  * we detect fsid mismatch in inode revalidation
2379  */
2380 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
2381 			     const struct qstr *name, struct nfs_fattr *fattr,
2382 			     struct nfs_fh *fhandle)
2383 {
2384 	int status = -ENOMEM;
2385 	struct page *page = NULL;
2386 	struct nfs4_fs_locations *locations = NULL;
2387 
2388 	page = alloc_page(GFP_KERNEL);
2389 	if (page == NULL)
2390 		goto out;
2391 	locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2392 	if (locations == NULL)
2393 		goto out;
2394 
2395 	status = nfs4_proc_fs_locations(client, dir, name, locations, page);
2396 	if (status != 0)
2397 		goto out;
2398 	/* Make sure server returned a different fsid for the referral */
2399 	if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2400 		dprintk("%s: server did not return a different fsid for"
2401 			" a referral at %s\n", __func__, name->name);
2402 		status = -EIO;
2403 		goto out;
2404 	}
2405 	/* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2406 	nfs_fixup_referral_attributes(&locations->fattr);
2407 
2408 	/* replace the lookup nfs_fattr with the locations nfs_fattr */
2409 	memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2410 	memset(fhandle, 0, sizeof(struct nfs_fh));
2411 out:
2412 	if (page)
2413 		__free_page(page);
2414 	kfree(locations);
2415 	return status;
2416 }
2417 
2418 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2419 {
2420 	struct nfs4_getattr_arg args = {
2421 		.fh = fhandle,
2422 		.bitmask = server->attr_bitmask,
2423 	};
2424 	struct nfs4_getattr_res res = {
2425 		.fattr = fattr,
2426 		.server = server,
2427 	};
2428 	struct rpc_message msg = {
2429 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2430 		.rpc_argp = &args,
2431 		.rpc_resp = &res,
2432 	};
2433 
2434 	nfs_fattr_init(fattr);
2435 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2436 }
2437 
2438 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2439 {
2440 	struct nfs4_exception exception = { };
2441 	int err;
2442 	do {
2443 		err = nfs4_handle_exception(server,
2444 				_nfs4_proc_getattr(server, fhandle, fattr),
2445 				&exception);
2446 	} while (exception.retry);
2447 	return err;
2448 }
2449 
2450 /*
2451  * The file is not closed if it is opened due to the a request to change
2452  * the size of the file. The open call will not be needed once the
2453  * VFS layer lookup-intents are implemented.
2454  *
2455  * Close is called when the inode is destroyed.
2456  * If we haven't opened the file for O_WRONLY, we
2457  * need to in the size_change case to obtain a stateid.
2458  *
2459  * Got race?
2460  * Because OPEN is always done by name in nfsv4, it is
2461  * possible that we opened a different file by the same
2462  * name.  We can recognize this race condition, but we
2463  * can't do anything about it besides returning an error.
2464  *
2465  * This will be fixed with VFS changes (lookup-intent).
2466  */
2467 static int
2468 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2469 		  struct iattr *sattr)
2470 {
2471 	struct inode *inode = dentry->d_inode;
2472 	struct rpc_cred *cred = NULL;
2473 	struct nfs4_state *state = NULL;
2474 	int status;
2475 
2476 	if (pnfs_ld_layoutret_on_setattr(inode))
2477 		pnfs_return_layout(inode);
2478 
2479 	nfs_fattr_init(fattr);
2480 
2481 	/* Search for an existing open(O_WRITE) file */
2482 	if (sattr->ia_valid & ATTR_FILE) {
2483 		struct nfs_open_context *ctx;
2484 
2485 		ctx = nfs_file_open_context(sattr->ia_file);
2486 		if (ctx) {
2487 			cred = ctx->cred;
2488 			state = ctx->state;
2489 		}
2490 	}
2491 
2492 	/* Deal with open(O_TRUNC) */
2493 	if (sattr->ia_valid & ATTR_OPEN)
2494 		sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
2495 
2496 	status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2497 	if (status == 0)
2498 		nfs_setattr_update_inode(inode, sattr);
2499 	return status;
2500 }
2501 
2502 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
2503 		const struct qstr *name, struct nfs_fh *fhandle,
2504 		struct nfs_fattr *fattr)
2505 {
2506 	struct nfs_server *server = NFS_SERVER(dir);
2507 	int		       status;
2508 	struct nfs4_lookup_arg args = {
2509 		.bitmask = server->attr_bitmask,
2510 		.dir_fh = NFS_FH(dir),
2511 		.name = name,
2512 	};
2513 	struct nfs4_lookup_res res = {
2514 		.server = server,
2515 		.fattr = fattr,
2516 		.fh = fhandle,
2517 	};
2518 	struct rpc_message msg = {
2519 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2520 		.rpc_argp = &args,
2521 		.rpc_resp = &res,
2522 	};
2523 
2524 	nfs_fattr_init(fattr);
2525 
2526 	dprintk("NFS call  lookup %s\n", name->name);
2527 	status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
2528 	dprintk("NFS reply lookup: %d\n", status);
2529 	return status;
2530 }
2531 
2532 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
2533 {
2534 	fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
2535 		NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
2536 	fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
2537 	fattr->nlink = 2;
2538 }
2539 
2540 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
2541 				   struct qstr *name, struct nfs_fh *fhandle,
2542 				   struct nfs_fattr *fattr)
2543 {
2544 	struct nfs4_exception exception = { };
2545 	struct rpc_clnt *client = *clnt;
2546 	int err;
2547 	do {
2548 		err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
2549 		switch (err) {
2550 		case -NFS4ERR_BADNAME:
2551 			err = -ENOENT;
2552 			goto out;
2553 		case -NFS4ERR_MOVED:
2554 			err = nfs4_get_referral(client, dir, name, fattr, fhandle);
2555 			goto out;
2556 		case -NFS4ERR_WRONGSEC:
2557 			err = -EPERM;
2558 			if (client != *clnt)
2559 				goto out;
2560 
2561 			client = nfs4_create_sec_client(client, dir, name);
2562 			if (IS_ERR(client))
2563 				return PTR_ERR(client);
2564 
2565 			exception.retry = 1;
2566 			break;
2567 		default:
2568 			err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
2569 		}
2570 	} while (exception.retry);
2571 
2572 out:
2573 	if (err == 0)
2574 		*clnt = client;
2575 	else if (client != *clnt)
2576 		rpc_shutdown_client(client);
2577 
2578 	return err;
2579 }
2580 
2581 static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
2582 			    struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2583 {
2584 	int status;
2585 	struct rpc_clnt *client = NFS_CLIENT(dir);
2586 
2587 	status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2588 	if (client != NFS_CLIENT(dir)) {
2589 		rpc_shutdown_client(client);
2590 		nfs_fixup_secinfo_attributes(fattr);
2591 	}
2592 	return status;
2593 }
2594 
2595 struct rpc_clnt *
2596 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
2597 			    struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2598 {
2599 	int status;
2600 	struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
2601 
2602 	status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2603 	if (status < 0) {
2604 		rpc_shutdown_client(client);
2605 		return ERR_PTR(status);
2606 	}
2607 	return client;
2608 }
2609 
2610 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2611 {
2612 	struct nfs_server *server = NFS_SERVER(inode);
2613 	struct nfs4_accessargs args = {
2614 		.fh = NFS_FH(inode),
2615 		.bitmask = server->cache_consistency_bitmask,
2616 	};
2617 	struct nfs4_accessres res = {
2618 		.server = server,
2619 	};
2620 	struct rpc_message msg = {
2621 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2622 		.rpc_argp = &args,
2623 		.rpc_resp = &res,
2624 		.rpc_cred = entry->cred,
2625 	};
2626 	int mode = entry->mask;
2627 	int status;
2628 
2629 	/*
2630 	 * Determine which access bits we want to ask for...
2631 	 */
2632 	if (mode & MAY_READ)
2633 		args.access |= NFS4_ACCESS_READ;
2634 	if (S_ISDIR(inode->i_mode)) {
2635 		if (mode & MAY_WRITE)
2636 			args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2637 		if (mode & MAY_EXEC)
2638 			args.access |= NFS4_ACCESS_LOOKUP;
2639 	} else {
2640 		if (mode & MAY_WRITE)
2641 			args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2642 		if (mode & MAY_EXEC)
2643 			args.access |= NFS4_ACCESS_EXECUTE;
2644 	}
2645 
2646 	res.fattr = nfs_alloc_fattr();
2647 	if (res.fattr == NULL)
2648 		return -ENOMEM;
2649 
2650 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2651 	if (!status) {
2652 		entry->mask = 0;
2653 		if (res.access & NFS4_ACCESS_READ)
2654 			entry->mask |= MAY_READ;
2655 		if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
2656 			entry->mask |= MAY_WRITE;
2657 		if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
2658 			entry->mask |= MAY_EXEC;
2659 		nfs_refresh_inode(inode, res.fattr);
2660 	}
2661 	nfs_free_fattr(res.fattr);
2662 	return status;
2663 }
2664 
2665 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2666 {
2667 	struct nfs4_exception exception = { };
2668 	int err;
2669 	do {
2670 		err = nfs4_handle_exception(NFS_SERVER(inode),
2671 				_nfs4_proc_access(inode, entry),
2672 				&exception);
2673 	} while (exception.retry);
2674 	return err;
2675 }
2676 
2677 /*
2678  * TODO: For the time being, we don't try to get any attributes
2679  * along with any of the zero-copy operations READ, READDIR,
2680  * READLINK, WRITE.
2681  *
2682  * In the case of the first three, we want to put the GETATTR
2683  * after the read-type operation -- this is because it is hard
2684  * to predict the length of a GETATTR response in v4, and thus
2685  * align the READ data correctly.  This means that the GETATTR
2686  * may end up partially falling into the page cache, and we should
2687  * shift it into the 'tail' of the xdr_buf before processing.
2688  * To do this efficiently, we need to know the total length
2689  * of data received, which doesn't seem to be available outside
2690  * of the RPC layer.
2691  *
2692  * In the case of WRITE, we also want to put the GETATTR after
2693  * the operation -- in this case because we want to make sure
2694  * we get the post-operation mtime and size.  This means that
2695  * we can't use xdr_encode_pages() as written: we need a variant
2696  * of it which would leave room in the 'tail' iovec.
2697  *
2698  * Both of these changes to the XDR layer would in fact be quite
2699  * minor, but I decided to leave them for a subsequent patch.
2700  */
2701 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
2702 		unsigned int pgbase, unsigned int pglen)
2703 {
2704 	struct nfs4_readlink args = {
2705 		.fh       = NFS_FH(inode),
2706 		.pgbase	  = pgbase,
2707 		.pglen    = pglen,
2708 		.pages    = &page,
2709 	};
2710 	struct nfs4_readlink_res res;
2711 	struct rpc_message msg = {
2712 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
2713 		.rpc_argp = &args,
2714 		.rpc_resp = &res,
2715 	};
2716 
2717 	return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
2718 }
2719 
2720 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
2721 		unsigned int pgbase, unsigned int pglen)
2722 {
2723 	struct nfs4_exception exception = { };
2724 	int err;
2725 	do {
2726 		err = nfs4_handle_exception(NFS_SERVER(inode),
2727 				_nfs4_proc_readlink(inode, page, pgbase, pglen),
2728 				&exception);
2729 	} while (exception.retry);
2730 	return err;
2731 }
2732 
2733 /*
2734  * Got race?
2735  * We will need to arrange for the VFS layer to provide an atomic open.
2736  * Until then, this create/open method is prone to inefficiency and race
2737  * conditions due to the lookup, create, and open VFS calls from sys_open()
2738  * placed on the wire.
2739  *
2740  * Given the above sorry state of affairs, I'm simply sending an OPEN.
2741  * The file will be opened again in the subsequent VFS open call
2742  * (nfs4_proc_file_open).
2743  *
2744  * The open for read will just hang around to be used by any process that
2745  * opens the file O_RDONLY. This will all be resolved with the VFS changes.
2746  */
2747 
2748 static int
2749 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2750                  int flags, struct nfs_open_context *ctx)
2751 {
2752 	struct dentry *de = dentry;
2753 	struct nfs4_state *state;
2754 	struct rpc_cred *cred = NULL;
2755 	fmode_t fmode = 0;
2756 	int status = 0;
2757 
2758 	if (ctx != NULL) {
2759 		cred = ctx->cred;
2760 		de = ctx->dentry;
2761 		fmode = ctx->mode;
2762 	}
2763 	sattr->ia_mode &= ~current_umask();
2764 	state = nfs4_do_open(dir, de, fmode, flags, sattr, cred);
2765 	d_drop(dentry);
2766 	if (IS_ERR(state)) {
2767 		status = PTR_ERR(state);
2768 		goto out;
2769 	}
2770 	d_add(dentry, igrab(state->inode));
2771 	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
2772 	if (ctx != NULL)
2773 		ctx->state = state;
2774 	else
2775 		nfs4_close_sync(state, fmode);
2776 out:
2777 	return status;
2778 }
2779 
2780 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2781 {
2782 	struct nfs_server *server = NFS_SERVER(dir);
2783 	struct nfs_removeargs args = {
2784 		.fh = NFS_FH(dir),
2785 		.name.len = name->len,
2786 		.name.name = name->name,
2787 		.bitmask = server->attr_bitmask,
2788 	};
2789 	struct nfs_removeres res = {
2790 		.server = server,
2791 	};
2792 	struct rpc_message msg = {
2793 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
2794 		.rpc_argp = &args,
2795 		.rpc_resp = &res,
2796 	};
2797 	int status = -ENOMEM;
2798 
2799 	res.dir_attr = nfs_alloc_fattr();
2800 	if (res.dir_attr == NULL)
2801 		goto out;
2802 
2803 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
2804 	if (status == 0) {
2805 		update_changeattr(dir, &res.cinfo);
2806 		nfs_post_op_update_inode(dir, res.dir_attr);
2807 	}
2808 	nfs_free_fattr(res.dir_attr);
2809 out:
2810 	return status;
2811 }
2812 
2813 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
2814 {
2815 	struct nfs4_exception exception = { };
2816 	int err;
2817 	do {
2818 		err = nfs4_handle_exception(NFS_SERVER(dir),
2819 				_nfs4_proc_remove(dir, name),
2820 				&exception);
2821 	} while (exception.retry);
2822 	return err;
2823 }
2824 
2825 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
2826 {
2827 	struct nfs_server *server = NFS_SERVER(dir);
2828 	struct nfs_removeargs *args = msg->rpc_argp;
2829 	struct nfs_removeres *res = msg->rpc_resp;
2830 
2831 	args->bitmask = server->cache_consistency_bitmask;
2832 	res->server = server;
2833 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
2834 	nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
2835 }
2836 
2837 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
2838 {
2839 	if (nfs4_setup_sequence(NFS_SERVER(data->dir),
2840 				&data->args.seq_args,
2841 				&data->res.seq_res,
2842 				task))
2843 		return;
2844 	rpc_call_start(task);
2845 }
2846 
2847 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2848 {
2849 	struct nfs_removeres *res = task->tk_msg.rpc_resp;
2850 
2851 	if (!nfs4_sequence_done(task, &res->seq_res))
2852 		return 0;
2853 	if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2854 		return 0;
2855 	update_changeattr(dir, &res->cinfo);
2856 	nfs_post_op_update_inode(dir, res->dir_attr);
2857 	return 1;
2858 }
2859 
2860 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
2861 {
2862 	struct nfs_server *server = NFS_SERVER(dir);
2863 	struct nfs_renameargs *arg = msg->rpc_argp;
2864 	struct nfs_renameres *res = msg->rpc_resp;
2865 
2866 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
2867 	arg->bitmask = server->attr_bitmask;
2868 	res->server = server;
2869 	nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
2870 }
2871 
2872 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
2873 {
2874 	if (nfs4_setup_sequence(NFS_SERVER(data->old_dir),
2875 				&data->args.seq_args,
2876 				&data->res.seq_res,
2877 				task))
2878 		return;
2879 	rpc_call_start(task);
2880 }
2881 
2882 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
2883 				 struct inode *new_dir)
2884 {
2885 	struct nfs_renameres *res = task->tk_msg.rpc_resp;
2886 
2887 	if (!nfs4_sequence_done(task, &res->seq_res))
2888 		return 0;
2889 	if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2890 		return 0;
2891 
2892 	update_changeattr(old_dir, &res->old_cinfo);
2893 	nfs_post_op_update_inode(old_dir, res->old_fattr);
2894 	update_changeattr(new_dir, &res->new_cinfo);
2895 	nfs_post_op_update_inode(new_dir, res->new_fattr);
2896 	return 1;
2897 }
2898 
2899 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2900 		struct inode *new_dir, struct qstr *new_name)
2901 {
2902 	struct nfs_server *server = NFS_SERVER(old_dir);
2903 	struct nfs_renameargs arg = {
2904 		.old_dir = NFS_FH(old_dir),
2905 		.new_dir = NFS_FH(new_dir),
2906 		.old_name = old_name,
2907 		.new_name = new_name,
2908 		.bitmask = server->attr_bitmask,
2909 	};
2910 	struct nfs_renameres res = {
2911 		.server = server,
2912 	};
2913 	struct rpc_message msg = {
2914 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
2915 		.rpc_argp = &arg,
2916 		.rpc_resp = &res,
2917 	};
2918 	int status = -ENOMEM;
2919 
2920 	res.old_fattr = nfs_alloc_fattr();
2921 	res.new_fattr = nfs_alloc_fattr();
2922 	if (res.old_fattr == NULL || res.new_fattr == NULL)
2923 		goto out;
2924 
2925 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2926 	if (!status) {
2927 		update_changeattr(old_dir, &res.old_cinfo);
2928 		nfs_post_op_update_inode(old_dir, res.old_fattr);
2929 		update_changeattr(new_dir, &res.new_cinfo);
2930 		nfs_post_op_update_inode(new_dir, res.new_fattr);
2931 	}
2932 out:
2933 	nfs_free_fattr(res.new_fattr);
2934 	nfs_free_fattr(res.old_fattr);
2935 	return status;
2936 }
2937 
2938 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2939 		struct inode *new_dir, struct qstr *new_name)
2940 {
2941 	struct nfs4_exception exception = { };
2942 	int err;
2943 	do {
2944 		err = nfs4_handle_exception(NFS_SERVER(old_dir),
2945 				_nfs4_proc_rename(old_dir, old_name,
2946 					new_dir, new_name),
2947 				&exception);
2948 	} while (exception.retry);
2949 	return err;
2950 }
2951 
2952 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2953 {
2954 	struct nfs_server *server = NFS_SERVER(inode);
2955 	struct nfs4_link_arg arg = {
2956 		.fh     = NFS_FH(inode),
2957 		.dir_fh = NFS_FH(dir),
2958 		.name   = name,
2959 		.bitmask = server->attr_bitmask,
2960 	};
2961 	struct nfs4_link_res res = {
2962 		.server = server,
2963 	};
2964 	struct rpc_message msg = {
2965 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
2966 		.rpc_argp = &arg,
2967 		.rpc_resp = &res,
2968 	};
2969 	int status = -ENOMEM;
2970 
2971 	res.fattr = nfs_alloc_fattr();
2972 	res.dir_attr = nfs_alloc_fattr();
2973 	if (res.fattr == NULL || res.dir_attr == NULL)
2974 		goto out;
2975 
2976 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2977 	if (!status) {
2978 		update_changeattr(dir, &res.cinfo);
2979 		nfs_post_op_update_inode(dir, res.dir_attr);
2980 		nfs_post_op_update_inode(inode, res.fattr);
2981 	}
2982 out:
2983 	nfs_free_fattr(res.dir_attr);
2984 	nfs_free_fattr(res.fattr);
2985 	return status;
2986 }
2987 
2988 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2989 {
2990 	struct nfs4_exception exception = { };
2991 	int err;
2992 	do {
2993 		err = nfs4_handle_exception(NFS_SERVER(inode),
2994 				_nfs4_proc_link(inode, dir, name),
2995 				&exception);
2996 	} while (exception.retry);
2997 	return err;
2998 }
2999 
3000 struct nfs4_createdata {
3001 	struct rpc_message msg;
3002 	struct nfs4_create_arg arg;
3003 	struct nfs4_create_res res;
3004 	struct nfs_fh fh;
3005 	struct nfs_fattr fattr;
3006 	struct nfs_fattr dir_fattr;
3007 };
3008 
3009 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3010 		struct qstr *name, struct iattr *sattr, u32 ftype)
3011 {
3012 	struct nfs4_createdata *data;
3013 
3014 	data = kzalloc(sizeof(*data), GFP_KERNEL);
3015 	if (data != NULL) {
3016 		struct nfs_server *server = NFS_SERVER(dir);
3017 
3018 		data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3019 		data->msg.rpc_argp = &data->arg;
3020 		data->msg.rpc_resp = &data->res;
3021 		data->arg.dir_fh = NFS_FH(dir);
3022 		data->arg.server = server;
3023 		data->arg.name = name;
3024 		data->arg.attrs = sattr;
3025 		data->arg.ftype = ftype;
3026 		data->arg.bitmask = server->attr_bitmask;
3027 		data->res.server = server;
3028 		data->res.fh = &data->fh;
3029 		data->res.fattr = &data->fattr;
3030 		data->res.dir_fattr = &data->dir_fattr;
3031 		nfs_fattr_init(data->res.fattr);
3032 		nfs_fattr_init(data->res.dir_fattr);
3033 	}
3034 	return data;
3035 }
3036 
3037 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3038 {
3039 	int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3040 				    &data->arg.seq_args, &data->res.seq_res, 1);
3041 	if (status == 0) {
3042 		update_changeattr(dir, &data->res.dir_cinfo);
3043 		nfs_post_op_update_inode(dir, data->res.dir_fattr);
3044 		status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
3045 	}
3046 	return status;
3047 }
3048 
3049 static void nfs4_free_createdata(struct nfs4_createdata *data)
3050 {
3051 	kfree(data);
3052 }
3053 
3054 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3055 		struct page *page, unsigned int len, struct iattr *sattr)
3056 {
3057 	struct nfs4_createdata *data;
3058 	int status = -ENAMETOOLONG;
3059 
3060 	if (len > NFS4_MAXPATHLEN)
3061 		goto out;
3062 
3063 	status = -ENOMEM;
3064 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3065 	if (data == NULL)
3066 		goto out;
3067 
3068 	data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3069 	data->arg.u.symlink.pages = &page;
3070 	data->arg.u.symlink.len = len;
3071 
3072 	status = nfs4_do_create(dir, dentry, data);
3073 
3074 	nfs4_free_createdata(data);
3075 out:
3076 	return status;
3077 }
3078 
3079 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3080 		struct page *page, unsigned int len, struct iattr *sattr)
3081 {
3082 	struct nfs4_exception exception = { };
3083 	int err;
3084 	do {
3085 		err = nfs4_handle_exception(NFS_SERVER(dir),
3086 				_nfs4_proc_symlink(dir, dentry, page,
3087 							len, sattr),
3088 				&exception);
3089 	} while (exception.retry);
3090 	return err;
3091 }
3092 
3093 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3094 		struct iattr *sattr)
3095 {
3096 	struct nfs4_createdata *data;
3097 	int status = -ENOMEM;
3098 
3099 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3100 	if (data == NULL)
3101 		goto out;
3102 
3103 	status = nfs4_do_create(dir, dentry, data);
3104 
3105 	nfs4_free_createdata(data);
3106 out:
3107 	return status;
3108 }
3109 
3110 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3111 		struct iattr *sattr)
3112 {
3113 	struct nfs4_exception exception = { };
3114 	int err;
3115 
3116 	sattr->ia_mode &= ~current_umask();
3117 	do {
3118 		err = nfs4_handle_exception(NFS_SERVER(dir),
3119 				_nfs4_proc_mkdir(dir, dentry, sattr),
3120 				&exception);
3121 	} while (exception.retry);
3122 	return err;
3123 }
3124 
3125 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3126 		u64 cookie, struct page **pages, unsigned int count, int plus)
3127 {
3128 	struct inode		*dir = dentry->d_inode;
3129 	struct nfs4_readdir_arg args = {
3130 		.fh = NFS_FH(dir),
3131 		.pages = pages,
3132 		.pgbase = 0,
3133 		.count = count,
3134 		.bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
3135 		.plus = plus,
3136 	};
3137 	struct nfs4_readdir_res res;
3138 	struct rpc_message msg = {
3139 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3140 		.rpc_argp = &args,
3141 		.rpc_resp = &res,
3142 		.rpc_cred = cred,
3143 	};
3144 	int			status;
3145 
3146 	dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
3147 			dentry->d_parent->d_name.name,
3148 			dentry->d_name.name,
3149 			(unsigned long long)cookie);
3150 	nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
3151 	res.pgbase = args.pgbase;
3152 	status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3153 	if (status >= 0) {
3154 		memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
3155 		status += args.pgbase;
3156 	}
3157 
3158 	nfs_invalidate_atime(dir);
3159 
3160 	dprintk("%s: returns %d\n", __func__, status);
3161 	return status;
3162 }
3163 
3164 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3165 		u64 cookie, struct page **pages, unsigned int count, int plus)
3166 {
3167 	struct nfs4_exception exception = { };
3168 	int err;
3169 	do {
3170 		err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
3171 				_nfs4_proc_readdir(dentry, cred, cookie,
3172 					pages, count, plus),
3173 				&exception);
3174 	} while (exception.retry);
3175 	return err;
3176 }
3177 
3178 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3179 		struct iattr *sattr, dev_t rdev)
3180 {
3181 	struct nfs4_createdata *data;
3182 	int mode = sattr->ia_mode;
3183 	int status = -ENOMEM;
3184 
3185 	BUG_ON(!(sattr->ia_valid & ATTR_MODE));
3186 	BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
3187 
3188 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3189 	if (data == NULL)
3190 		goto out;
3191 
3192 	if (S_ISFIFO(mode))
3193 		data->arg.ftype = NF4FIFO;
3194 	else if (S_ISBLK(mode)) {
3195 		data->arg.ftype = NF4BLK;
3196 		data->arg.u.device.specdata1 = MAJOR(rdev);
3197 		data->arg.u.device.specdata2 = MINOR(rdev);
3198 	}
3199 	else if (S_ISCHR(mode)) {
3200 		data->arg.ftype = NF4CHR;
3201 		data->arg.u.device.specdata1 = MAJOR(rdev);
3202 		data->arg.u.device.specdata2 = MINOR(rdev);
3203 	}
3204 
3205 	status = nfs4_do_create(dir, dentry, data);
3206 
3207 	nfs4_free_createdata(data);
3208 out:
3209 	return status;
3210 }
3211 
3212 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3213 		struct iattr *sattr, dev_t rdev)
3214 {
3215 	struct nfs4_exception exception = { };
3216 	int err;
3217 
3218 	sattr->ia_mode &= ~current_umask();
3219 	do {
3220 		err = nfs4_handle_exception(NFS_SERVER(dir),
3221 				_nfs4_proc_mknod(dir, dentry, sattr, rdev),
3222 				&exception);
3223 	} while (exception.retry);
3224 	return err;
3225 }
3226 
3227 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3228 		 struct nfs_fsstat *fsstat)
3229 {
3230 	struct nfs4_statfs_arg args = {
3231 		.fh = fhandle,
3232 		.bitmask = server->attr_bitmask,
3233 	};
3234 	struct nfs4_statfs_res res = {
3235 		.fsstat = fsstat,
3236 	};
3237 	struct rpc_message msg = {
3238 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3239 		.rpc_argp = &args,
3240 		.rpc_resp = &res,
3241 	};
3242 
3243 	nfs_fattr_init(fsstat->fattr);
3244 	return  nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3245 }
3246 
3247 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
3248 {
3249 	struct nfs4_exception exception = { };
3250 	int err;
3251 	do {
3252 		err = nfs4_handle_exception(server,
3253 				_nfs4_proc_statfs(server, fhandle, fsstat),
3254 				&exception);
3255 	} while (exception.retry);
3256 	return err;
3257 }
3258 
3259 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
3260 		struct nfs_fsinfo *fsinfo)
3261 {
3262 	struct nfs4_fsinfo_arg args = {
3263 		.fh = fhandle,
3264 		.bitmask = server->attr_bitmask,
3265 	};
3266 	struct nfs4_fsinfo_res res = {
3267 		.fsinfo = fsinfo,
3268 	};
3269 	struct rpc_message msg = {
3270 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
3271 		.rpc_argp = &args,
3272 		.rpc_resp = &res,
3273 	};
3274 
3275 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3276 }
3277 
3278 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3279 {
3280 	struct nfs4_exception exception = { };
3281 	int err;
3282 
3283 	do {
3284 		err = nfs4_handle_exception(server,
3285 				_nfs4_do_fsinfo(server, fhandle, fsinfo),
3286 				&exception);
3287 	} while (exception.retry);
3288 	return err;
3289 }
3290 
3291 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3292 {
3293 	nfs_fattr_init(fsinfo->fattr);
3294 	return nfs4_do_fsinfo(server, fhandle, fsinfo);
3295 }
3296 
3297 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3298 		struct nfs_pathconf *pathconf)
3299 {
3300 	struct nfs4_pathconf_arg args = {
3301 		.fh = fhandle,
3302 		.bitmask = server->attr_bitmask,
3303 	};
3304 	struct nfs4_pathconf_res res = {
3305 		.pathconf = pathconf,
3306 	};
3307 	struct rpc_message msg = {
3308 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
3309 		.rpc_argp = &args,
3310 		.rpc_resp = &res,
3311 	};
3312 
3313 	/* None of the pathconf attributes are mandatory to implement */
3314 	if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
3315 		memset(pathconf, 0, sizeof(*pathconf));
3316 		return 0;
3317 	}
3318 
3319 	nfs_fattr_init(pathconf->fattr);
3320 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3321 }
3322 
3323 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3324 		struct nfs_pathconf *pathconf)
3325 {
3326 	struct nfs4_exception exception = { };
3327 	int err;
3328 
3329 	do {
3330 		err = nfs4_handle_exception(server,
3331 				_nfs4_proc_pathconf(server, fhandle, pathconf),
3332 				&exception);
3333 	} while (exception.retry);
3334 	return err;
3335 }
3336 
3337 void __nfs4_read_done_cb(struct nfs_read_data *data)
3338 {
3339 	nfs_invalidate_atime(data->inode);
3340 }
3341 
3342 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
3343 {
3344 	struct nfs_server *server = NFS_SERVER(data->inode);
3345 
3346 	if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3347 		rpc_restart_call_prepare(task);
3348 		return -EAGAIN;
3349 	}
3350 
3351 	__nfs4_read_done_cb(data);
3352 	if (task->tk_status > 0)
3353 		renew_lease(server, data->timestamp);
3354 	return 0;
3355 }
3356 
3357 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3358 {
3359 
3360 	dprintk("--> %s\n", __func__);
3361 
3362 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3363 		return -EAGAIN;
3364 
3365 	return data->read_done_cb ? data->read_done_cb(task, data) :
3366 				    nfs4_read_done_cb(task, data);
3367 }
3368 
3369 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
3370 {
3371 	data->timestamp   = jiffies;
3372 	data->read_done_cb = nfs4_read_done_cb;
3373 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
3374 	nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
3375 }
3376 
3377 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
3378 {
3379 	if (nfs4_setup_sequence(NFS_SERVER(data->inode),
3380 				&data->args.seq_args,
3381 				&data->res.seq_res,
3382 				task))
3383 		return;
3384 	rpc_call_start(task);
3385 }
3386 
3387 /* Reset the the nfs_read_data to send the read to the MDS. */
3388 void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
3389 {
3390 	dprintk("%s Reset task for i/o through\n", __func__);
3391 	put_lseg(data->lseg);
3392 	data->lseg = NULL;
3393 	/* offsets will differ in the dense stripe case */
3394 	data->args.offset = data->mds_offset;
3395 	data->ds_clp = NULL;
3396 	data->args.fh     = NFS_FH(data->inode);
3397 	data->read_done_cb = nfs4_read_done_cb;
3398 	task->tk_ops = data->mds_ops;
3399 	rpc_task_reset_client(task, NFS_CLIENT(data->inode));
3400 }
3401 EXPORT_SYMBOL_GPL(nfs4_reset_read);
3402 
3403 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3404 {
3405 	struct inode *inode = data->inode;
3406 
3407 	if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3408 		rpc_restart_call_prepare(task);
3409 		return -EAGAIN;
3410 	}
3411 	if (task->tk_status >= 0) {
3412 		renew_lease(NFS_SERVER(inode), data->timestamp);
3413 		nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
3414 	}
3415 	return 0;
3416 }
3417 
3418 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3419 {
3420 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3421 		return -EAGAIN;
3422 	return data->write_done_cb ? data->write_done_cb(task, data) :
3423 		nfs4_write_done_cb(task, data);
3424 }
3425 
3426 /* Reset the the nfs_write_data to send the write to the MDS. */
3427 void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
3428 {
3429 	dprintk("%s Reset task for i/o through\n", __func__);
3430 	put_lseg(data->lseg);
3431 	data->lseg          = NULL;
3432 	data->ds_clp        = NULL;
3433 	data->write_done_cb = nfs4_write_done_cb;
3434 	data->args.fh       = NFS_FH(data->inode);
3435 	data->args.bitmask  = data->res.server->cache_consistency_bitmask;
3436 	data->args.offset   = data->mds_offset;
3437 	data->res.fattr     = &data->fattr;
3438 	task->tk_ops        = data->mds_ops;
3439 	rpc_task_reset_client(task, NFS_CLIENT(data->inode));
3440 }
3441 EXPORT_SYMBOL_GPL(nfs4_reset_write);
3442 
3443 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3444 {
3445 	struct nfs_server *server = NFS_SERVER(data->inode);
3446 
3447 	if (data->lseg) {
3448 		data->args.bitmask = NULL;
3449 		data->res.fattr = NULL;
3450 	} else
3451 		data->args.bitmask = server->cache_consistency_bitmask;
3452 	if (!data->write_done_cb)
3453 		data->write_done_cb = nfs4_write_done_cb;
3454 	data->res.server = server;
3455 	data->timestamp   = jiffies;
3456 
3457 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
3458 	nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3459 }
3460 
3461 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
3462 {
3463 	if (nfs4_setup_sequence(NFS_SERVER(data->inode),
3464 				&data->args.seq_args,
3465 				&data->res.seq_res,
3466 				task))
3467 		return;
3468 	rpc_call_start(task);
3469 }
3470 
3471 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3472 {
3473 	struct inode *inode = data->inode;
3474 
3475 	if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3476 		rpc_restart_call_prepare(task);
3477 		return -EAGAIN;
3478 	}
3479 	nfs_refresh_inode(inode, data->res.fattr);
3480 	return 0;
3481 }
3482 
3483 static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
3484 {
3485 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3486 		return -EAGAIN;
3487 	return data->write_done_cb(task, data);
3488 }
3489 
3490 static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
3491 {
3492 	struct nfs_server *server = NFS_SERVER(data->inode);
3493 
3494 	if (data->lseg) {
3495 		data->args.bitmask = NULL;
3496 		data->res.fattr = NULL;
3497 	} else
3498 		data->args.bitmask = server->cache_consistency_bitmask;
3499 	if (!data->write_done_cb)
3500 		data->write_done_cb = nfs4_commit_done_cb;
3501 	data->res.server = server;
3502 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3503 	nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3504 }
3505 
3506 struct nfs4_renewdata {
3507 	struct nfs_client	*client;
3508 	unsigned long		timestamp;
3509 };
3510 
3511 /*
3512  * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3513  * standalone procedure for queueing an asynchronous RENEW.
3514  */
3515 static void nfs4_renew_release(void *calldata)
3516 {
3517 	struct nfs4_renewdata *data = calldata;
3518 	struct nfs_client *clp = data->client;
3519 
3520 	if (atomic_read(&clp->cl_count) > 1)
3521 		nfs4_schedule_state_renewal(clp);
3522 	nfs_put_client(clp);
3523 	kfree(data);
3524 }
3525 
3526 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3527 {
3528 	struct nfs4_renewdata *data = calldata;
3529 	struct nfs_client *clp = data->client;
3530 	unsigned long timestamp = data->timestamp;
3531 
3532 	if (task->tk_status < 0) {
3533 		/* Unless we're shutting down, schedule state recovery! */
3534 		if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
3535 			return;
3536 		if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
3537 			nfs4_schedule_lease_recovery(clp);
3538 			return;
3539 		}
3540 		nfs4_schedule_path_down_recovery(clp);
3541 	}
3542 	do_renew_lease(clp, timestamp);
3543 }
3544 
3545 static const struct rpc_call_ops nfs4_renew_ops = {
3546 	.rpc_call_done = nfs4_renew_done,
3547 	.rpc_release = nfs4_renew_release,
3548 };
3549 
3550 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
3551 {
3552 	struct rpc_message msg = {
3553 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3554 		.rpc_argp	= clp,
3555 		.rpc_cred	= cred,
3556 	};
3557 	struct nfs4_renewdata *data;
3558 
3559 	if (renew_flags == 0)
3560 		return 0;
3561 	if (!atomic_inc_not_zero(&clp->cl_count))
3562 		return -EIO;
3563 	data = kmalloc(sizeof(*data), GFP_NOFS);
3564 	if (data == NULL)
3565 		return -ENOMEM;
3566 	data->client = clp;
3567 	data->timestamp = jiffies;
3568 	return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3569 			&nfs4_renew_ops, data);
3570 }
3571 
3572 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3573 {
3574 	struct rpc_message msg = {
3575 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3576 		.rpc_argp	= clp,
3577 		.rpc_cred	= cred,
3578 	};
3579 	unsigned long now = jiffies;
3580 	int status;
3581 
3582 	status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3583 	if (status < 0)
3584 		return status;
3585 	do_renew_lease(clp, now);
3586 	return 0;
3587 }
3588 
3589 static inline int nfs4_server_supports_acls(struct nfs_server *server)
3590 {
3591 	return (server->caps & NFS_CAP_ACLS)
3592 		&& (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3593 		&& (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3594 }
3595 
3596 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
3597  * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
3598  * the stack.
3599  */
3600 #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3601 
3602 static int buf_to_pages_noslab(const void *buf, size_t buflen,
3603 		struct page **pages, unsigned int *pgbase)
3604 {
3605 	struct page *newpage, **spages;
3606 	int rc = 0;
3607 	size_t len;
3608 	spages = pages;
3609 
3610 	do {
3611 		len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
3612 		newpage = alloc_page(GFP_KERNEL);
3613 
3614 		if (newpage == NULL)
3615 			goto unwind;
3616 		memcpy(page_address(newpage), buf, len);
3617                 buf += len;
3618                 buflen -= len;
3619 		*pages++ = newpage;
3620 		rc++;
3621 	} while (buflen != 0);
3622 
3623 	return rc;
3624 
3625 unwind:
3626 	for(; rc > 0; rc--)
3627 		__free_page(spages[rc-1]);
3628 	return -ENOMEM;
3629 }
3630 
3631 struct nfs4_cached_acl {
3632 	int cached;
3633 	size_t len;
3634 	char data[0];
3635 };
3636 
3637 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3638 {
3639 	struct nfs_inode *nfsi = NFS_I(inode);
3640 
3641 	spin_lock(&inode->i_lock);
3642 	kfree(nfsi->nfs4_acl);
3643 	nfsi->nfs4_acl = acl;
3644 	spin_unlock(&inode->i_lock);
3645 }
3646 
3647 static void nfs4_zap_acl_attr(struct inode *inode)
3648 {
3649 	nfs4_set_cached_acl(inode, NULL);
3650 }
3651 
3652 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3653 {
3654 	struct nfs_inode *nfsi = NFS_I(inode);
3655 	struct nfs4_cached_acl *acl;
3656 	int ret = -ENOENT;
3657 
3658 	spin_lock(&inode->i_lock);
3659 	acl = nfsi->nfs4_acl;
3660 	if (acl == NULL)
3661 		goto out;
3662 	if (buf == NULL) /* user is just asking for length */
3663 		goto out_len;
3664 	if (acl->cached == 0)
3665 		goto out;
3666 	ret = -ERANGE; /* see getxattr(2) man page */
3667 	if (acl->len > buflen)
3668 		goto out;
3669 	memcpy(buf, acl->data, acl->len);
3670 out_len:
3671 	ret = acl->len;
3672 out:
3673 	spin_unlock(&inode->i_lock);
3674 	return ret;
3675 }
3676 
3677 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
3678 {
3679 	struct nfs4_cached_acl *acl;
3680 
3681 	if (pages && acl_len <= PAGE_SIZE) {
3682 		acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
3683 		if (acl == NULL)
3684 			goto out;
3685 		acl->cached = 1;
3686 		_copy_from_pages(acl->data, pages, pgbase, acl_len);
3687 	} else {
3688 		acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3689 		if (acl == NULL)
3690 			goto out;
3691 		acl->cached = 0;
3692 	}
3693 	acl->len = acl_len;
3694 out:
3695 	nfs4_set_cached_acl(inode, acl);
3696 }
3697 
3698 /*
3699  * The getxattr API returns the required buffer length when called with a
3700  * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3701  * the required buf.  On a NULL buf, we send a page of data to the server
3702  * guessing that the ACL request can be serviced by a page. If so, we cache
3703  * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3704  * the cache. If not so, we throw away the page, and cache the required
3705  * length. The next getxattr call will then produce another round trip to
3706  * the server, this time with the input buf of the required size.
3707  */
3708 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3709 {
3710 	struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
3711 	struct nfs_getaclargs args = {
3712 		.fh = NFS_FH(inode),
3713 		.acl_pages = pages,
3714 		.acl_len = buflen,
3715 	};
3716 	struct nfs_getaclres res = {
3717 		.acl_len = buflen,
3718 	};
3719 	struct rpc_message msg = {
3720 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
3721 		.rpc_argp = &args,
3722 		.rpc_resp = &res,
3723 	};
3724 	int ret = -ENOMEM, npages, i, acl_len = 0;
3725 
3726 	npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3727 	/* As long as we're doing a round trip to the server anyway,
3728 	 * let's be prepared for a page of acl data. */
3729 	if (npages == 0)
3730 		npages = 1;
3731 
3732 	/* Add an extra page to handle the bitmap returned */
3733 	npages++;
3734 
3735 	for (i = 0; i < npages; i++) {
3736 		pages[i] = alloc_page(GFP_KERNEL);
3737 		if (!pages[i])
3738 			goto out_free;
3739 	}
3740 
3741 	/* for decoding across pages */
3742 	res.acl_scratch = alloc_page(GFP_KERNEL);
3743 	if (!res.acl_scratch)
3744 		goto out_free;
3745 
3746 	args.acl_len = npages * PAGE_SIZE;
3747 	args.acl_pgbase = 0;
3748 
3749 	/* Let decode_getfacl know not to fail if the ACL data is larger than
3750 	 * the page we send as a guess */
3751 	if (buf == NULL)
3752 		res.acl_flags |= NFS4_ACL_LEN_REQUEST;
3753 
3754 	dprintk("%s  buf %p buflen %zu npages %d args.acl_len %zu\n",
3755 		__func__, buf, buflen, npages, args.acl_len);
3756 	ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
3757 			     &msg, &args.seq_args, &res.seq_res, 0);
3758 	if (ret)
3759 		goto out_free;
3760 
3761 	acl_len = res.acl_len - res.acl_data_offset;
3762 	if (acl_len > args.acl_len)
3763 		nfs4_write_cached_acl(inode, NULL, 0, acl_len);
3764 	else
3765 		nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
3766 				      acl_len);
3767 	if (buf) {
3768 		ret = -ERANGE;
3769 		if (acl_len > buflen)
3770 			goto out_free;
3771 		_copy_from_pages(buf, pages, res.acl_data_offset,
3772 				acl_len);
3773 	}
3774 	ret = acl_len;
3775 out_free:
3776 	for (i = 0; i < npages; i++)
3777 		if (pages[i])
3778 			__free_page(pages[i]);
3779 	if (res.acl_scratch)
3780 		__free_page(res.acl_scratch);
3781 	return ret;
3782 }
3783 
3784 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3785 {
3786 	struct nfs4_exception exception = { };
3787 	ssize_t ret;
3788 	do {
3789 		ret = __nfs4_get_acl_uncached(inode, buf, buflen);
3790 		if (ret >= 0)
3791 			break;
3792 		ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
3793 	} while (exception.retry);
3794 	return ret;
3795 }
3796 
3797 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3798 {
3799 	struct nfs_server *server = NFS_SERVER(inode);
3800 	int ret;
3801 
3802 	if (!nfs4_server_supports_acls(server))
3803 		return -EOPNOTSUPP;
3804 	ret = nfs_revalidate_inode(server, inode);
3805 	if (ret < 0)
3806 		return ret;
3807 	if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
3808 		nfs_zap_acl_cache(inode);
3809 	ret = nfs4_read_cached_acl(inode, buf, buflen);
3810 	if (ret != -ENOENT)
3811 		/* -ENOENT is returned if there is no ACL or if there is an ACL
3812 		 * but no cached acl data, just the acl length */
3813 		return ret;
3814 	return nfs4_get_acl_uncached(inode, buf, buflen);
3815 }
3816 
3817 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3818 {
3819 	struct nfs_server *server = NFS_SERVER(inode);
3820 	struct page *pages[NFS4ACL_MAXPAGES];
3821 	struct nfs_setaclargs arg = {
3822 		.fh		= NFS_FH(inode),
3823 		.acl_pages	= pages,
3824 		.acl_len	= buflen,
3825 	};
3826 	struct nfs_setaclres res;
3827 	struct rpc_message msg = {
3828 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETACL],
3829 		.rpc_argp	= &arg,
3830 		.rpc_resp	= &res,
3831 	};
3832 	int ret, i;
3833 
3834 	if (!nfs4_server_supports_acls(server))
3835 		return -EOPNOTSUPP;
3836 	i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3837 	if (i < 0)
3838 		return i;
3839 	nfs_inode_return_delegation(inode);
3840 	ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3841 
3842 	/*
3843 	 * Free each page after tx, so the only ref left is
3844 	 * held by the network stack
3845 	 */
3846 	for (; i > 0; i--)
3847 		put_page(pages[i-1]);
3848 
3849 	/*
3850 	 * Acl update can result in inode attribute update.
3851 	 * so mark the attribute cache invalid.
3852 	 */
3853 	spin_lock(&inode->i_lock);
3854 	NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
3855 	spin_unlock(&inode->i_lock);
3856 	nfs_access_zap_cache(inode);
3857 	nfs_zap_acl_cache(inode);
3858 	return ret;
3859 }
3860 
3861 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3862 {
3863 	struct nfs4_exception exception = { };
3864 	int err;
3865 	do {
3866 		err = nfs4_handle_exception(NFS_SERVER(inode),
3867 				__nfs4_proc_set_acl(inode, buf, buflen),
3868 				&exception);
3869 	} while (exception.retry);
3870 	return err;
3871 }
3872 
3873 static int
3874 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
3875 {
3876 	struct nfs_client *clp = server->nfs_client;
3877 
3878 	if (task->tk_status >= 0)
3879 		return 0;
3880 	switch(task->tk_status) {
3881 		case -NFS4ERR_DELEG_REVOKED:
3882 		case -NFS4ERR_ADMIN_REVOKED:
3883 		case -NFS4ERR_BAD_STATEID:
3884 			if (state == NULL)
3885 				break;
3886 			nfs_remove_bad_delegation(state->inode);
3887 		case -NFS4ERR_OPENMODE:
3888 			if (state == NULL)
3889 				break;
3890 			nfs4_schedule_stateid_recovery(server, state);
3891 			goto wait_on_recovery;
3892 		case -NFS4ERR_EXPIRED:
3893 			if (state != NULL)
3894 				nfs4_schedule_stateid_recovery(server, state);
3895 		case -NFS4ERR_STALE_STATEID:
3896 		case -NFS4ERR_STALE_CLIENTID:
3897 			nfs4_schedule_lease_recovery(clp);
3898 			goto wait_on_recovery;
3899 #if defined(CONFIG_NFS_V4_1)
3900 		case -NFS4ERR_BADSESSION:
3901 		case -NFS4ERR_BADSLOT:
3902 		case -NFS4ERR_BAD_HIGH_SLOT:
3903 		case -NFS4ERR_DEADSESSION:
3904 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
3905 		case -NFS4ERR_SEQ_FALSE_RETRY:
3906 		case -NFS4ERR_SEQ_MISORDERED:
3907 			dprintk("%s ERROR %d, Reset session\n", __func__,
3908 				task->tk_status);
3909 			nfs4_schedule_session_recovery(clp->cl_session);
3910 			task->tk_status = 0;
3911 			return -EAGAIN;
3912 #endif /* CONFIG_NFS_V4_1 */
3913 		case -NFS4ERR_DELAY:
3914 			nfs_inc_server_stats(server, NFSIOS_DELAY);
3915 		case -NFS4ERR_GRACE:
3916 		case -EKEYEXPIRED:
3917 			rpc_delay(task, NFS4_POLL_RETRY_MAX);
3918 			task->tk_status = 0;
3919 			return -EAGAIN;
3920 		case -NFS4ERR_RETRY_UNCACHED_REP:
3921 		case -NFS4ERR_OLD_STATEID:
3922 			task->tk_status = 0;
3923 			return -EAGAIN;
3924 	}
3925 	task->tk_status = nfs4_map_errors(task->tk_status);
3926 	return 0;
3927 wait_on_recovery:
3928 	rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3929 	if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3930 		rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3931 	task->tk_status = 0;
3932 	return -EAGAIN;
3933 }
3934 
3935 static void nfs4_construct_boot_verifier(struct nfs_client *clp,
3936 					 nfs4_verifier *bootverf)
3937 {
3938 	__be32 verf[2];
3939 
3940 	verf[0] = htonl((u32)clp->cl_boot_time.tv_sec);
3941 	verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec);
3942 	memcpy(bootverf->data, verf, sizeof(bootverf->data));
3943 }
3944 
3945 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3946 		unsigned short port, struct rpc_cred *cred,
3947 		struct nfs4_setclientid_res *res)
3948 {
3949 	nfs4_verifier sc_verifier;
3950 	struct nfs4_setclientid setclientid = {
3951 		.sc_verifier = &sc_verifier,
3952 		.sc_prog = program,
3953 		.sc_cb_ident = clp->cl_cb_ident,
3954 	};
3955 	struct rpc_message msg = {
3956 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
3957 		.rpc_argp = &setclientid,
3958 		.rpc_resp = res,
3959 		.rpc_cred = cred,
3960 	};
3961 	int loop = 0;
3962 	int status;
3963 
3964 	nfs4_construct_boot_verifier(clp, &sc_verifier);
3965 
3966 	for(;;) {
3967 		rcu_read_lock();
3968 		setclientid.sc_name_len = scnprintf(setclientid.sc_name,
3969 				sizeof(setclientid.sc_name), "%s/%s %s %s %u",
3970 				clp->cl_ipaddr,
3971 				rpc_peeraddr2str(clp->cl_rpcclient,
3972 							RPC_DISPLAY_ADDR),
3973 				rpc_peeraddr2str(clp->cl_rpcclient,
3974 							RPC_DISPLAY_PROTO),
3975 				clp->cl_rpcclient->cl_auth->au_ops->au_name,
3976 				clp->cl_id_uniquifier);
3977 		setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
3978 				sizeof(setclientid.sc_netid),
3979 				rpc_peeraddr2str(clp->cl_rpcclient,
3980 							RPC_DISPLAY_NETID));
3981 		setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
3982 				sizeof(setclientid.sc_uaddr), "%s.%u.%u",
3983 				clp->cl_ipaddr, port >> 8, port & 255);
3984 		rcu_read_unlock();
3985 
3986 		status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
3987 		if (status != -NFS4ERR_CLID_INUSE)
3988 			break;
3989 		if (loop != 0) {
3990 			++clp->cl_id_uniquifier;
3991 			break;
3992 		}
3993 		++loop;
3994 		ssleep(clp->cl_lease_time / HZ + 1);
3995 	}
3996 	return status;
3997 }
3998 
3999 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
4000 		struct nfs4_setclientid_res *arg,
4001 		struct rpc_cred *cred)
4002 {
4003 	struct nfs_fsinfo fsinfo;
4004 	struct rpc_message msg = {
4005 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
4006 		.rpc_argp = arg,
4007 		.rpc_resp = &fsinfo,
4008 		.rpc_cred = cred,
4009 	};
4010 	unsigned long now;
4011 	int status;
4012 
4013 	now = jiffies;
4014 	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4015 	if (status == 0) {
4016 		spin_lock(&clp->cl_lock);
4017 		clp->cl_lease_time = fsinfo.lease_time * HZ;
4018 		clp->cl_last_renewal = now;
4019 		spin_unlock(&clp->cl_lock);
4020 	}
4021 	return status;
4022 }
4023 
4024 struct nfs4_delegreturndata {
4025 	struct nfs4_delegreturnargs args;
4026 	struct nfs4_delegreturnres res;
4027 	struct nfs_fh fh;
4028 	nfs4_stateid stateid;
4029 	unsigned long timestamp;
4030 	struct nfs_fattr fattr;
4031 	int rpc_status;
4032 };
4033 
4034 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
4035 {
4036 	struct nfs4_delegreturndata *data = calldata;
4037 
4038 	if (!nfs4_sequence_done(task, &data->res.seq_res))
4039 		return;
4040 
4041 	switch (task->tk_status) {
4042 	case -NFS4ERR_STALE_STATEID:
4043 	case -NFS4ERR_EXPIRED:
4044 	case 0:
4045 		renew_lease(data->res.server, data->timestamp);
4046 		break;
4047 	default:
4048 		if (nfs4_async_handle_error(task, data->res.server, NULL) ==
4049 				-EAGAIN) {
4050 			rpc_restart_call_prepare(task);
4051 			return;
4052 		}
4053 	}
4054 	data->rpc_status = task->tk_status;
4055 }
4056 
4057 static void nfs4_delegreturn_release(void *calldata)
4058 {
4059 	kfree(calldata);
4060 }
4061 
4062 #if defined(CONFIG_NFS_V4_1)
4063 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
4064 {
4065 	struct nfs4_delegreturndata *d_data;
4066 
4067 	d_data = (struct nfs4_delegreturndata *)data;
4068 
4069 	if (nfs4_setup_sequence(d_data->res.server,
4070 				&d_data->args.seq_args,
4071 				&d_data->res.seq_res, task))
4072 		return;
4073 	rpc_call_start(task);
4074 }
4075 #endif /* CONFIG_NFS_V4_1 */
4076 
4077 static const struct rpc_call_ops nfs4_delegreturn_ops = {
4078 #if defined(CONFIG_NFS_V4_1)
4079 	.rpc_call_prepare = nfs4_delegreturn_prepare,
4080 #endif /* CONFIG_NFS_V4_1 */
4081 	.rpc_call_done = nfs4_delegreturn_done,
4082 	.rpc_release = nfs4_delegreturn_release,
4083 };
4084 
4085 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4086 {
4087 	struct nfs4_delegreturndata *data;
4088 	struct nfs_server *server = NFS_SERVER(inode);
4089 	struct rpc_task *task;
4090 	struct rpc_message msg = {
4091 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
4092 		.rpc_cred = cred,
4093 	};
4094 	struct rpc_task_setup task_setup_data = {
4095 		.rpc_client = server->client,
4096 		.rpc_message = &msg,
4097 		.callback_ops = &nfs4_delegreturn_ops,
4098 		.flags = RPC_TASK_ASYNC,
4099 	};
4100 	int status = 0;
4101 
4102 	data = kzalloc(sizeof(*data), GFP_NOFS);
4103 	if (data == NULL)
4104 		return -ENOMEM;
4105 	nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4106 	data->args.fhandle = &data->fh;
4107 	data->args.stateid = &data->stateid;
4108 	data->args.bitmask = server->attr_bitmask;
4109 	nfs_copy_fh(&data->fh, NFS_FH(inode));
4110 	nfs4_stateid_copy(&data->stateid, stateid);
4111 	data->res.fattr = &data->fattr;
4112 	data->res.server = server;
4113 	nfs_fattr_init(data->res.fattr);
4114 	data->timestamp = jiffies;
4115 	data->rpc_status = 0;
4116 
4117 	task_setup_data.callback_data = data;
4118 	msg.rpc_argp = &data->args;
4119 	msg.rpc_resp = &data->res;
4120 	task = rpc_run_task(&task_setup_data);
4121 	if (IS_ERR(task))
4122 		return PTR_ERR(task);
4123 	if (!issync)
4124 		goto out;
4125 	status = nfs4_wait_for_completion_rpc_task(task);
4126 	if (status != 0)
4127 		goto out;
4128 	status = data->rpc_status;
4129 	if (status != 0)
4130 		goto out;
4131 	nfs_refresh_inode(inode, &data->fattr);
4132 out:
4133 	rpc_put_task(task);
4134 	return status;
4135 }
4136 
4137 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4138 {
4139 	struct nfs_server *server = NFS_SERVER(inode);
4140 	struct nfs4_exception exception = { };
4141 	int err;
4142 	do {
4143 		err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
4144 		switch (err) {
4145 			case -NFS4ERR_STALE_STATEID:
4146 			case -NFS4ERR_EXPIRED:
4147 			case 0:
4148 				return 0;
4149 		}
4150 		err = nfs4_handle_exception(server, err, &exception);
4151 	} while (exception.retry);
4152 	return err;
4153 }
4154 
4155 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
4156 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
4157 
4158 /*
4159  * sleep, with exponential backoff, and retry the LOCK operation.
4160  */
4161 static unsigned long
4162 nfs4_set_lock_task_retry(unsigned long timeout)
4163 {
4164 	freezable_schedule_timeout_killable(timeout);
4165 	timeout <<= 1;
4166 	if (timeout > NFS4_LOCK_MAXTIMEOUT)
4167 		return NFS4_LOCK_MAXTIMEOUT;
4168 	return timeout;
4169 }
4170 
4171 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4172 {
4173 	struct inode *inode = state->inode;
4174 	struct nfs_server *server = NFS_SERVER(inode);
4175 	struct nfs_client *clp = server->nfs_client;
4176 	struct nfs_lockt_args arg = {
4177 		.fh = NFS_FH(inode),
4178 		.fl = request,
4179 	};
4180 	struct nfs_lockt_res res = {
4181 		.denied = request,
4182 	};
4183 	struct rpc_message msg = {
4184 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
4185 		.rpc_argp       = &arg,
4186 		.rpc_resp       = &res,
4187 		.rpc_cred	= state->owner->so_cred,
4188 	};
4189 	struct nfs4_lock_state *lsp;
4190 	int status;
4191 
4192 	arg.lock_owner.clientid = clp->cl_clientid;
4193 	status = nfs4_set_lock_state(state, request);
4194 	if (status != 0)
4195 		goto out;
4196 	lsp = request->fl_u.nfs4_fl.owner;
4197 	arg.lock_owner.id = lsp->ls_seqid.owner_id;
4198 	arg.lock_owner.s_dev = server->s_dev;
4199 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4200 	switch (status) {
4201 		case 0:
4202 			request->fl_type = F_UNLCK;
4203 			break;
4204 		case -NFS4ERR_DENIED:
4205 			status = 0;
4206 	}
4207 	request->fl_ops->fl_release_private(request);
4208 out:
4209 	return status;
4210 }
4211 
4212 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4213 {
4214 	struct nfs4_exception exception = { };
4215 	int err;
4216 
4217 	do {
4218 		err = nfs4_handle_exception(NFS_SERVER(state->inode),
4219 				_nfs4_proc_getlk(state, cmd, request),
4220 				&exception);
4221 	} while (exception.retry);
4222 	return err;
4223 }
4224 
4225 static int do_vfs_lock(struct file *file, struct file_lock *fl)
4226 {
4227 	int res = 0;
4228 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
4229 		case FL_POSIX:
4230 			res = posix_lock_file_wait(file, fl);
4231 			break;
4232 		case FL_FLOCK:
4233 			res = flock_lock_file_wait(file, fl);
4234 			break;
4235 		default:
4236 			BUG();
4237 	}
4238 	return res;
4239 }
4240 
4241 struct nfs4_unlockdata {
4242 	struct nfs_locku_args arg;
4243 	struct nfs_locku_res res;
4244 	struct nfs4_lock_state *lsp;
4245 	struct nfs_open_context *ctx;
4246 	struct file_lock fl;
4247 	const struct nfs_server *server;
4248 	unsigned long timestamp;
4249 };
4250 
4251 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
4252 		struct nfs_open_context *ctx,
4253 		struct nfs4_lock_state *lsp,
4254 		struct nfs_seqid *seqid)
4255 {
4256 	struct nfs4_unlockdata *p;
4257 	struct inode *inode = lsp->ls_state->inode;
4258 
4259 	p = kzalloc(sizeof(*p), GFP_NOFS);
4260 	if (p == NULL)
4261 		return NULL;
4262 	p->arg.fh = NFS_FH(inode);
4263 	p->arg.fl = &p->fl;
4264 	p->arg.seqid = seqid;
4265 	p->res.seqid = seqid;
4266 	p->arg.stateid = &lsp->ls_stateid;
4267 	p->lsp = lsp;
4268 	atomic_inc(&lsp->ls_count);
4269 	/* Ensure we don't close file until we're done freeing locks! */
4270 	p->ctx = get_nfs_open_context(ctx);
4271 	memcpy(&p->fl, fl, sizeof(p->fl));
4272 	p->server = NFS_SERVER(inode);
4273 	return p;
4274 }
4275 
4276 static void nfs4_locku_release_calldata(void *data)
4277 {
4278 	struct nfs4_unlockdata *calldata = data;
4279 	nfs_free_seqid(calldata->arg.seqid);
4280 	nfs4_put_lock_state(calldata->lsp);
4281 	put_nfs_open_context(calldata->ctx);
4282 	kfree(calldata);
4283 }
4284 
4285 static void nfs4_locku_done(struct rpc_task *task, void *data)
4286 {
4287 	struct nfs4_unlockdata *calldata = data;
4288 
4289 	if (!nfs4_sequence_done(task, &calldata->res.seq_res))
4290 		return;
4291 	switch (task->tk_status) {
4292 		case 0:
4293 			nfs4_stateid_copy(&calldata->lsp->ls_stateid,
4294 					&calldata->res.stateid);
4295 			renew_lease(calldata->server, calldata->timestamp);
4296 			break;
4297 		case -NFS4ERR_BAD_STATEID:
4298 		case -NFS4ERR_OLD_STATEID:
4299 		case -NFS4ERR_STALE_STATEID:
4300 		case -NFS4ERR_EXPIRED:
4301 			break;
4302 		default:
4303 			if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
4304 				rpc_restart_call_prepare(task);
4305 	}
4306 }
4307 
4308 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
4309 {
4310 	struct nfs4_unlockdata *calldata = data;
4311 
4312 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
4313 		return;
4314 	if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
4315 		/* Note: exit _without_ running nfs4_locku_done */
4316 		task->tk_action = NULL;
4317 		return;
4318 	}
4319 	calldata->timestamp = jiffies;
4320 	if (nfs4_setup_sequence(calldata->server,
4321 				&calldata->arg.seq_args,
4322 				&calldata->res.seq_res, task))
4323 		return;
4324 	rpc_call_start(task);
4325 }
4326 
4327 static const struct rpc_call_ops nfs4_locku_ops = {
4328 	.rpc_call_prepare = nfs4_locku_prepare,
4329 	.rpc_call_done = nfs4_locku_done,
4330 	.rpc_release = nfs4_locku_release_calldata,
4331 };
4332 
4333 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
4334 		struct nfs_open_context *ctx,
4335 		struct nfs4_lock_state *lsp,
4336 		struct nfs_seqid *seqid)
4337 {
4338 	struct nfs4_unlockdata *data;
4339 	struct rpc_message msg = {
4340 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
4341 		.rpc_cred = ctx->cred,
4342 	};
4343 	struct rpc_task_setup task_setup_data = {
4344 		.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
4345 		.rpc_message = &msg,
4346 		.callback_ops = &nfs4_locku_ops,
4347 		.workqueue = nfsiod_workqueue,
4348 		.flags = RPC_TASK_ASYNC,
4349 	};
4350 
4351 	/* Ensure this is an unlock - when canceling a lock, the
4352 	 * canceled lock is passed in, and it won't be an unlock.
4353 	 */
4354 	fl->fl_type = F_UNLCK;
4355 
4356 	data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
4357 	if (data == NULL) {
4358 		nfs_free_seqid(seqid);
4359 		return ERR_PTR(-ENOMEM);
4360 	}
4361 
4362 	nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4363 	msg.rpc_argp = &data->arg;
4364 	msg.rpc_resp = &data->res;
4365 	task_setup_data.callback_data = data;
4366 	return rpc_run_task(&task_setup_data);
4367 }
4368 
4369 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
4370 {
4371 	struct nfs_inode *nfsi = NFS_I(state->inode);
4372 	struct nfs_seqid *seqid;
4373 	struct nfs4_lock_state *lsp;
4374 	struct rpc_task *task;
4375 	int status = 0;
4376 	unsigned char fl_flags = request->fl_flags;
4377 
4378 	status = nfs4_set_lock_state(state, request);
4379 	/* Unlock _before_ we do the RPC call */
4380 	request->fl_flags |= FL_EXISTS;
4381 	down_read(&nfsi->rwsem);
4382 	if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
4383 		up_read(&nfsi->rwsem);
4384 		goto out;
4385 	}
4386 	up_read(&nfsi->rwsem);
4387 	if (status != 0)
4388 		goto out;
4389 	/* Is this a delegated lock? */
4390 	if (test_bit(NFS_DELEGATED_STATE, &state->flags))
4391 		goto out;
4392 	lsp = request->fl_u.nfs4_fl.owner;
4393 	seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
4394 	status = -ENOMEM;
4395 	if (seqid == NULL)
4396 		goto out;
4397 	task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
4398 	status = PTR_ERR(task);
4399 	if (IS_ERR(task))
4400 		goto out;
4401 	status = nfs4_wait_for_completion_rpc_task(task);
4402 	rpc_put_task(task);
4403 out:
4404 	request->fl_flags = fl_flags;
4405 	return status;
4406 }
4407 
4408 struct nfs4_lockdata {
4409 	struct nfs_lock_args arg;
4410 	struct nfs_lock_res res;
4411 	struct nfs4_lock_state *lsp;
4412 	struct nfs_open_context *ctx;
4413 	struct file_lock fl;
4414 	unsigned long timestamp;
4415 	int rpc_status;
4416 	int cancelled;
4417 	struct nfs_server *server;
4418 };
4419 
4420 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
4421 		struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
4422 		gfp_t gfp_mask)
4423 {
4424 	struct nfs4_lockdata *p;
4425 	struct inode *inode = lsp->ls_state->inode;
4426 	struct nfs_server *server = NFS_SERVER(inode);
4427 
4428 	p = kzalloc(sizeof(*p), gfp_mask);
4429 	if (p == NULL)
4430 		return NULL;
4431 
4432 	p->arg.fh = NFS_FH(inode);
4433 	p->arg.fl = &p->fl;
4434 	p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
4435 	if (p->arg.open_seqid == NULL)
4436 		goto out_free;
4437 	p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
4438 	if (p->arg.lock_seqid == NULL)
4439 		goto out_free_seqid;
4440 	p->arg.lock_stateid = &lsp->ls_stateid;
4441 	p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
4442 	p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
4443 	p->arg.lock_owner.s_dev = server->s_dev;
4444 	p->res.lock_seqid = p->arg.lock_seqid;
4445 	p->lsp = lsp;
4446 	p->server = server;
4447 	atomic_inc(&lsp->ls_count);
4448 	p->ctx = get_nfs_open_context(ctx);
4449 	memcpy(&p->fl, fl, sizeof(p->fl));
4450 	return p;
4451 out_free_seqid:
4452 	nfs_free_seqid(p->arg.open_seqid);
4453 out_free:
4454 	kfree(p);
4455 	return NULL;
4456 }
4457 
4458 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
4459 {
4460 	struct nfs4_lockdata *data = calldata;
4461 	struct nfs4_state *state = data->lsp->ls_state;
4462 
4463 	dprintk("%s: begin!\n", __func__);
4464 	if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
4465 		return;
4466 	/* Do we need to do an open_to_lock_owner? */
4467 	if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
4468 		if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
4469 			return;
4470 		data->arg.open_stateid = &state->stateid;
4471 		data->arg.new_lock_owner = 1;
4472 		data->res.open_seqid = data->arg.open_seqid;
4473 	} else
4474 		data->arg.new_lock_owner = 0;
4475 	data->timestamp = jiffies;
4476 	if (nfs4_setup_sequence(data->server,
4477 				&data->arg.seq_args,
4478 				&data->res.seq_res, task))
4479 		return;
4480 	rpc_call_start(task);
4481 	dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
4482 }
4483 
4484 static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
4485 {
4486 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
4487 	nfs4_lock_prepare(task, calldata);
4488 }
4489 
4490 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4491 {
4492 	struct nfs4_lockdata *data = calldata;
4493 
4494 	dprintk("%s: begin!\n", __func__);
4495 
4496 	if (!nfs4_sequence_done(task, &data->res.seq_res))
4497 		return;
4498 
4499 	data->rpc_status = task->tk_status;
4500 	if (data->arg.new_lock_owner != 0) {
4501 		if (data->rpc_status == 0)
4502 			nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
4503 		else
4504 			goto out;
4505 	}
4506 	if (data->rpc_status == 0) {
4507 		nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
4508 		data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
4509 		renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
4510 	}
4511 out:
4512 	dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
4513 }
4514 
4515 static void nfs4_lock_release(void *calldata)
4516 {
4517 	struct nfs4_lockdata *data = calldata;
4518 
4519 	dprintk("%s: begin!\n", __func__);
4520 	nfs_free_seqid(data->arg.open_seqid);
4521 	if (data->cancelled != 0) {
4522 		struct rpc_task *task;
4523 		task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4524 				data->arg.lock_seqid);
4525 		if (!IS_ERR(task))
4526 			rpc_put_task_async(task);
4527 		dprintk("%s: cancelling lock!\n", __func__);
4528 	} else
4529 		nfs_free_seqid(data->arg.lock_seqid);
4530 	nfs4_put_lock_state(data->lsp);
4531 	put_nfs_open_context(data->ctx);
4532 	kfree(data);
4533 	dprintk("%s: done!\n", __func__);
4534 }
4535 
4536 static const struct rpc_call_ops nfs4_lock_ops = {
4537 	.rpc_call_prepare = nfs4_lock_prepare,
4538 	.rpc_call_done = nfs4_lock_done,
4539 	.rpc_release = nfs4_lock_release,
4540 };
4541 
4542 static const struct rpc_call_ops nfs4_recover_lock_ops = {
4543 	.rpc_call_prepare = nfs4_recover_lock_prepare,
4544 	.rpc_call_done = nfs4_lock_done,
4545 	.rpc_release = nfs4_lock_release,
4546 };
4547 
4548 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4549 {
4550 	switch (error) {
4551 	case -NFS4ERR_ADMIN_REVOKED:
4552 	case -NFS4ERR_BAD_STATEID:
4553 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4554 		if (new_lock_owner != 0 ||
4555 		   (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4556 			nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4557 		break;
4558 	case -NFS4ERR_STALE_STATEID:
4559 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4560 	case -NFS4ERR_EXPIRED:
4561 		nfs4_schedule_lease_recovery(server->nfs_client);
4562 	};
4563 }
4564 
4565 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4566 {
4567 	struct nfs4_lockdata *data;
4568 	struct rpc_task *task;
4569 	struct rpc_message msg = {
4570 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
4571 		.rpc_cred = state->owner->so_cred,
4572 	};
4573 	struct rpc_task_setup task_setup_data = {
4574 		.rpc_client = NFS_CLIENT(state->inode),
4575 		.rpc_message = &msg,
4576 		.callback_ops = &nfs4_lock_ops,
4577 		.workqueue = nfsiod_workqueue,
4578 		.flags = RPC_TASK_ASYNC,
4579 	};
4580 	int ret;
4581 
4582 	dprintk("%s: begin!\n", __func__);
4583 	data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
4584 			fl->fl_u.nfs4_fl.owner,
4585 			recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
4586 	if (data == NULL)
4587 		return -ENOMEM;
4588 	if (IS_SETLKW(cmd))
4589 		data->arg.block = 1;
4590 	if (recovery_type > NFS_LOCK_NEW) {
4591 		if (recovery_type == NFS_LOCK_RECLAIM)
4592 			data->arg.reclaim = NFS_LOCK_RECLAIM;
4593 		task_setup_data.callback_ops = &nfs4_recover_lock_ops;
4594 	}
4595 	nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4596 	msg.rpc_argp = &data->arg;
4597 	msg.rpc_resp = &data->res;
4598 	task_setup_data.callback_data = data;
4599 	task = rpc_run_task(&task_setup_data);
4600 	if (IS_ERR(task))
4601 		return PTR_ERR(task);
4602 	ret = nfs4_wait_for_completion_rpc_task(task);
4603 	if (ret == 0) {
4604 		ret = data->rpc_status;
4605 		if (ret)
4606 			nfs4_handle_setlk_error(data->server, data->lsp,
4607 					data->arg.new_lock_owner, ret);
4608 	} else
4609 		data->cancelled = 1;
4610 	rpc_put_task(task);
4611 	dprintk("%s: done, ret = %d!\n", __func__, ret);
4612 	return ret;
4613 }
4614 
4615 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4616 {
4617 	struct nfs_server *server = NFS_SERVER(state->inode);
4618 	struct nfs4_exception exception = {
4619 		.inode = state->inode,
4620 	};
4621 	int err;
4622 
4623 	do {
4624 		/* Cache the lock if possible... */
4625 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4626 			return 0;
4627 		err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
4628 		if (err != -NFS4ERR_DELAY)
4629 			break;
4630 		nfs4_handle_exception(server, err, &exception);
4631 	} while (exception.retry);
4632 	return err;
4633 }
4634 
4635 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4636 {
4637 	struct nfs_server *server = NFS_SERVER(state->inode);
4638 	struct nfs4_exception exception = {
4639 		.inode = state->inode,
4640 	};
4641 	int err;
4642 
4643 	err = nfs4_set_lock_state(state, request);
4644 	if (err != 0)
4645 		return err;
4646 	do {
4647 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4648 			return 0;
4649 		err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
4650 		switch (err) {
4651 		default:
4652 			goto out;
4653 		case -NFS4ERR_GRACE:
4654 		case -NFS4ERR_DELAY:
4655 			nfs4_handle_exception(server, err, &exception);
4656 			err = 0;
4657 		}
4658 	} while (exception.retry);
4659 out:
4660 	return err;
4661 }
4662 
4663 #if defined(CONFIG_NFS_V4_1)
4664 static int nfs41_check_expired_locks(struct nfs4_state *state)
4665 {
4666 	int status, ret = NFS_OK;
4667 	struct nfs4_lock_state *lsp;
4668 	struct nfs_server *server = NFS_SERVER(state->inode);
4669 
4670 	list_for_each_entry(lsp, &state->lock_states, ls_locks) {
4671 		if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
4672 			status = nfs41_test_stateid(server, &lsp->ls_stateid);
4673 			if (status != NFS_OK) {
4674 				nfs41_free_stateid(server, &lsp->ls_stateid);
4675 				lsp->ls_flags &= ~NFS_LOCK_INITIALIZED;
4676 				ret = status;
4677 			}
4678 		}
4679 	};
4680 
4681 	return ret;
4682 }
4683 
4684 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
4685 {
4686 	int status = NFS_OK;
4687 
4688 	if (test_bit(LK_STATE_IN_USE, &state->flags))
4689 		status = nfs41_check_expired_locks(state);
4690 	if (status == NFS_OK)
4691 		return status;
4692 	return nfs4_lock_expired(state, request);
4693 }
4694 #endif
4695 
4696 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4697 {
4698 	struct nfs_inode *nfsi = NFS_I(state->inode);
4699 	unsigned char fl_flags = request->fl_flags;
4700 	int status = -ENOLCK;
4701 
4702 	if ((fl_flags & FL_POSIX) &&
4703 			!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
4704 		goto out;
4705 	/* Is this a delegated open? */
4706 	status = nfs4_set_lock_state(state, request);
4707 	if (status != 0)
4708 		goto out;
4709 	request->fl_flags |= FL_ACCESS;
4710 	status = do_vfs_lock(request->fl_file, request);
4711 	if (status < 0)
4712 		goto out;
4713 	down_read(&nfsi->rwsem);
4714 	if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
4715 		/* Yes: cache locks! */
4716 		/* ...but avoid races with delegation recall... */
4717 		request->fl_flags = fl_flags & ~FL_SLEEP;
4718 		status = do_vfs_lock(request->fl_file, request);
4719 		goto out_unlock;
4720 	}
4721 	status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
4722 	if (status != 0)
4723 		goto out_unlock;
4724 	/* Note: we always want to sleep here! */
4725 	request->fl_flags = fl_flags | FL_SLEEP;
4726 	if (do_vfs_lock(request->fl_file, request) < 0)
4727 		printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
4728 			"manager!\n", __func__);
4729 out_unlock:
4730 	up_read(&nfsi->rwsem);
4731 out:
4732 	request->fl_flags = fl_flags;
4733 	return status;
4734 }
4735 
4736 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4737 {
4738 	struct nfs4_exception exception = {
4739 		.state = state,
4740 		.inode = state->inode,
4741 	};
4742 	int err;
4743 
4744 	do {
4745 		err = _nfs4_proc_setlk(state, cmd, request);
4746 		if (err == -NFS4ERR_DENIED)
4747 			err = -EAGAIN;
4748 		err = nfs4_handle_exception(NFS_SERVER(state->inode),
4749 				err, &exception);
4750 	} while (exception.retry);
4751 	return err;
4752 }
4753 
4754 static int
4755 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4756 {
4757 	struct nfs_open_context *ctx;
4758 	struct nfs4_state *state;
4759 	unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
4760 	int status;
4761 
4762 	/* verify open state */
4763 	ctx = nfs_file_open_context(filp);
4764 	state = ctx->state;
4765 
4766 	if (request->fl_start < 0 || request->fl_end < 0)
4767 		return -EINVAL;
4768 
4769 	if (IS_GETLK(cmd)) {
4770 		if (state != NULL)
4771 			return nfs4_proc_getlk(state, F_GETLK, request);
4772 		return 0;
4773 	}
4774 
4775 	if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
4776 		return -EINVAL;
4777 
4778 	if (request->fl_type == F_UNLCK) {
4779 		if (state != NULL)
4780 			return nfs4_proc_unlck(state, cmd, request);
4781 		return 0;
4782 	}
4783 
4784 	if (state == NULL)
4785 		return -ENOLCK;
4786 	/*
4787 	 * Don't rely on the VFS having checked the file open mode,
4788 	 * since it won't do this for flock() locks.
4789 	 */
4790 	switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
4791 	case F_RDLCK:
4792 		if (!(filp->f_mode & FMODE_READ))
4793 			return -EBADF;
4794 		break;
4795 	case F_WRLCK:
4796 		if (!(filp->f_mode & FMODE_WRITE))
4797 			return -EBADF;
4798 	}
4799 
4800 	do {
4801 		status = nfs4_proc_setlk(state, cmd, request);
4802 		if ((status != -EAGAIN) || IS_SETLK(cmd))
4803 			break;
4804 		timeout = nfs4_set_lock_task_retry(timeout);
4805 		status = -ERESTARTSYS;
4806 		if (signalled())
4807 			break;
4808 	} while(status < 0);
4809 	return status;
4810 }
4811 
4812 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4813 {
4814 	struct nfs_server *server = NFS_SERVER(state->inode);
4815 	struct nfs4_exception exception = { };
4816 	int err;
4817 
4818 	err = nfs4_set_lock_state(state, fl);
4819 	if (err != 0)
4820 		goto out;
4821 	do {
4822 		err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
4823 		switch (err) {
4824 			default:
4825 				printk(KERN_ERR "NFS: %s: unhandled error "
4826 					"%d.\n", __func__, err);
4827 			case 0:
4828 			case -ESTALE:
4829 				goto out;
4830 			case -NFS4ERR_EXPIRED:
4831 				nfs4_schedule_stateid_recovery(server, state);
4832 			case -NFS4ERR_STALE_CLIENTID:
4833 			case -NFS4ERR_STALE_STATEID:
4834 				nfs4_schedule_lease_recovery(server->nfs_client);
4835 				goto out;
4836 			case -NFS4ERR_BADSESSION:
4837 			case -NFS4ERR_BADSLOT:
4838 			case -NFS4ERR_BAD_HIGH_SLOT:
4839 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4840 			case -NFS4ERR_DEADSESSION:
4841 				nfs4_schedule_session_recovery(server->nfs_client->cl_session);
4842 				goto out;
4843 			case -ERESTARTSYS:
4844 				/*
4845 				 * The show must go on: exit, but mark the
4846 				 * stateid as needing recovery.
4847 				 */
4848 			case -NFS4ERR_DELEG_REVOKED:
4849 			case -NFS4ERR_ADMIN_REVOKED:
4850 			case -NFS4ERR_BAD_STATEID:
4851 			case -NFS4ERR_OPENMODE:
4852 				nfs4_schedule_stateid_recovery(server, state);
4853 				err = 0;
4854 				goto out;
4855 			case -EKEYEXPIRED:
4856 				/*
4857 				 * User RPCSEC_GSS context has expired.
4858 				 * We cannot recover this stateid now, so
4859 				 * skip it and allow recovery thread to
4860 				 * proceed.
4861 				 */
4862 				err = 0;
4863 				goto out;
4864 			case -ENOMEM:
4865 			case -NFS4ERR_DENIED:
4866 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
4867 				err = 0;
4868 				goto out;
4869 			case -NFS4ERR_DELAY:
4870 				break;
4871 		}
4872 		err = nfs4_handle_exception(server, err, &exception);
4873 	} while (exception.retry);
4874 out:
4875 	return err;
4876 }
4877 
4878 struct nfs_release_lockowner_data {
4879 	struct nfs4_lock_state *lsp;
4880 	struct nfs_server *server;
4881 	struct nfs_release_lockowner_args args;
4882 };
4883 
4884 static void nfs4_release_lockowner_release(void *calldata)
4885 {
4886 	struct nfs_release_lockowner_data *data = calldata;
4887 	nfs4_free_lock_state(data->server, data->lsp);
4888 	kfree(calldata);
4889 }
4890 
4891 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
4892 	.rpc_release = nfs4_release_lockowner_release,
4893 };
4894 
4895 int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
4896 {
4897 	struct nfs_server *server = lsp->ls_state->owner->so_server;
4898 	struct nfs_release_lockowner_data *data;
4899 	struct rpc_message msg = {
4900 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
4901 	};
4902 
4903 	if (server->nfs_client->cl_mvops->minor_version != 0)
4904 		return -EINVAL;
4905 	data = kmalloc(sizeof(*data), GFP_NOFS);
4906 	if (!data)
4907 		return -ENOMEM;
4908 	data->lsp = lsp;
4909 	data->server = server;
4910 	data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
4911 	data->args.lock_owner.id = lsp->ls_seqid.owner_id;
4912 	data->args.lock_owner.s_dev = server->s_dev;
4913 	msg.rpc_argp = &data->args;
4914 	rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
4915 	return 0;
4916 }
4917 
4918 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
4919 
4920 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
4921 				   const void *buf, size_t buflen,
4922 				   int flags, int type)
4923 {
4924 	if (strcmp(key, "") != 0)
4925 		return -EINVAL;
4926 
4927 	return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
4928 }
4929 
4930 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
4931 				   void *buf, size_t buflen, int type)
4932 {
4933 	if (strcmp(key, "") != 0)
4934 		return -EINVAL;
4935 
4936 	return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
4937 }
4938 
4939 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
4940 				       size_t list_len, const char *name,
4941 				       size_t name_len, int type)
4942 {
4943 	size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
4944 
4945 	if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
4946 		return 0;
4947 
4948 	if (list && len <= list_len)
4949 		memcpy(list, XATTR_NAME_NFSV4_ACL, len);
4950 	return len;
4951 }
4952 
4953 /*
4954  * nfs_fhget will use either the mounted_on_fileid or the fileid
4955  */
4956 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
4957 {
4958 	if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
4959 	       (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
4960 	      (fattr->valid & NFS_ATTR_FATTR_FSID) &&
4961 	      (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
4962 		return;
4963 
4964 	fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4965 		NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
4966 	fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4967 	fattr->nlink = 2;
4968 }
4969 
4970 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
4971 				   const struct qstr *name,
4972 				   struct nfs4_fs_locations *fs_locations,
4973 				   struct page *page)
4974 {
4975 	struct nfs_server *server = NFS_SERVER(dir);
4976 	u32 bitmask[2] = {
4977 		[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
4978 	};
4979 	struct nfs4_fs_locations_arg args = {
4980 		.dir_fh = NFS_FH(dir),
4981 		.name = name,
4982 		.page = page,
4983 		.bitmask = bitmask,
4984 	};
4985 	struct nfs4_fs_locations_res res = {
4986 		.fs_locations = fs_locations,
4987 	};
4988 	struct rpc_message msg = {
4989 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
4990 		.rpc_argp = &args,
4991 		.rpc_resp = &res,
4992 	};
4993 	int status;
4994 
4995 	dprintk("%s: start\n", __func__);
4996 
4997 	/* Ask for the fileid of the absent filesystem if mounted_on_fileid
4998 	 * is not supported */
4999 	if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
5000 		bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
5001 	else
5002 		bitmask[0] |= FATTR4_WORD0_FILEID;
5003 
5004 	nfs_fattr_init(&fs_locations->fattr);
5005 	fs_locations->server = server;
5006 	fs_locations->nlocations = 0;
5007 	status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
5008 	dprintk("%s: returned status = %d\n", __func__, status);
5009 	return status;
5010 }
5011 
5012 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5013 			   const struct qstr *name,
5014 			   struct nfs4_fs_locations *fs_locations,
5015 			   struct page *page)
5016 {
5017 	struct nfs4_exception exception = { };
5018 	int err;
5019 	do {
5020 		err = nfs4_handle_exception(NFS_SERVER(dir),
5021 				_nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
5022 				&exception);
5023 	} while (exception.retry);
5024 	return err;
5025 }
5026 
5027 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
5028 {
5029 	int status;
5030 	struct nfs4_secinfo_arg args = {
5031 		.dir_fh = NFS_FH(dir),
5032 		.name   = name,
5033 	};
5034 	struct nfs4_secinfo_res res = {
5035 		.flavors     = flavors,
5036 	};
5037 	struct rpc_message msg = {
5038 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
5039 		.rpc_argp = &args,
5040 		.rpc_resp = &res,
5041 	};
5042 
5043 	dprintk("NFS call  secinfo %s\n", name->name);
5044 	status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
5045 	dprintk("NFS reply  secinfo: %d\n", status);
5046 	return status;
5047 }
5048 
5049 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
5050 		      struct nfs4_secinfo_flavors *flavors)
5051 {
5052 	struct nfs4_exception exception = { };
5053 	int err;
5054 	do {
5055 		err = nfs4_handle_exception(NFS_SERVER(dir),
5056 				_nfs4_proc_secinfo(dir, name, flavors),
5057 				&exception);
5058 	} while (exception.retry);
5059 	return err;
5060 }
5061 
5062 #ifdef CONFIG_NFS_V4_1
5063 /*
5064  * Check the exchange flags returned by the server for invalid flags, having
5065  * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
5066  * DS flags set.
5067  */
5068 static int nfs4_check_cl_exchange_flags(u32 flags)
5069 {
5070 	if (flags & ~EXCHGID4_FLAG_MASK_R)
5071 		goto out_inval;
5072 	if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
5073 	    (flags & EXCHGID4_FLAG_USE_NON_PNFS))
5074 		goto out_inval;
5075 	if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
5076 		goto out_inval;
5077 	return NFS_OK;
5078 out_inval:
5079 	return -NFS4ERR_INVAL;
5080 }
5081 
5082 static bool
5083 nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
5084 {
5085 	if (a->server_scope_sz == b->server_scope_sz &&
5086 	    memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
5087 		return true;
5088 
5089 	return false;
5090 }
5091 
5092 /*
5093  * nfs4_proc_exchange_id()
5094  *
5095  * Since the clientid has expired, all compounds using sessions
5096  * associated with the stale clientid will be returning
5097  * NFS4ERR_BADSESSION in the sequence operation, and will therefore
5098  * be in some phase of session reset.
5099  */
5100 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5101 {
5102 	nfs4_verifier verifier;
5103 	struct nfs41_exchange_id_args args = {
5104 		.verifier = &verifier,
5105 		.client = clp,
5106 		.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
5107 	};
5108 	struct nfs41_exchange_id_res res = {
5109 		.client = clp,
5110 	};
5111 	int status;
5112 	struct rpc_message msg = {
5113 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
5114 		.rpc_argp = &args,
5115 		.rpc_resp = &res,
5116 		.rpc_cred = cred,
5117 	};
5118 
5119 	dprintk("--> %s\n", __func__);
5120 	BUG_ON(clp == NULL);
5121 
5122 	nfs4_construct_boot_verifier(clp, &verifier);
5123 
5124 	args.id_len = scnprintf(args.id, sizeof(args.id),
5125 				"%s/%s/%u",
5126 				clp->cl_ipaddr,
5127 				clp->cl_rpcclient->cl_nodename,
5128 				clp->cl_rpcclient->cl_auth->au_flavor);
5129 
5130 	res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
5131 	if (unlikely(!res.server_scope)) {
5132 		status = -ENOMEM;
5133 		goto out;
5134 	}
5135 
5136 	res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL);
5137 	if (unlikely(!res.impl_id)) {
5138 		status = -ENOMEM;
5139 		goto out_server_scope;
5140 	}
5141 
5142 	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5143 	if (!status)
5144 		status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
5145 
5146 	if (!status) {
5147 		/* use the most recent implementation id */
5148 		kfree(clp->impl_id);
5149 		clp->impl_id = res.impl_id;
5150 	} else
5151 		kfree(res.impl_id);
5152 
5153 	if (!status) {
5154 		if (clp->server_scope &&
5155 		    !nfs41_same_server_scope(clp->server_scope,
5156 					     res.server_scope)) {
5157 			dprintk("%s: server_scope mismatch detected\n",
5158 				__func__);
5159 			set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
5160 			kfree(clp->server_scope);
5161 			clp->server_scope = NULL;
5162 		}
5163 
5164 		if (!clp->server_scope) {
5165 			clp->server_scope = res.server_scope;
5166 			goto out;
5167 		}
5168 	}
5169 
5170 out_server_scope:
5171 	kfree(res.server_scope);
5172 out:
5173 	if (clp->impl_id)
5174 		dprintk("%s: Server Implementation ID: "
5175 			"domain: %s, name: %s, date: %llu,%u\n",
5176 			__func__, clp->impl_id->domain, clp->impl_id->name,
5177 			clp->impl_id->date.seconds,
5178 			clp->impl_id->date.nseconds);
5179 	dprintk("<-- %s status= %d\n", __func__, status);
5180 	return status;
5181 }
5182 
5183 struct nfs4_get_lease_time_data {
5184 	struct nfs4_get_lease_time_args *args;
5185 	struct nfs4_get_lease_time_res *res;
5186 	struct nfs_client *clp;
5187 };
5188 
5189 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
5190 					void *calldata)
5191 {
5192 	int ret;
5193 	struct nfs4_get_lease_time_data *data =
5194 			(struct nfs4_get_lease_time_data *)calldata;
5195 
5196 	dprintk("--> %s\n", __func__);
5197 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5198 	/* just setup sequence, do not trigger session recovery
5199 	   since we're invoked within one */
5200 	ret = nfs41_setup_sequence(data->clp->cl_session,
5201 				   &data->args->la_seq_args,
5202 				   &data->res->lr_seq_res, task);
5203 
5204 	BUG_ON(ret == -EAGAIN);
5205 	rpc_call_start(task);
5206 	dprintk("<-- %s\n", __func__);
5207 }
5208 
5209 /*
5210  * Called from nfs4_state_manager thread for session setup, so don't recover
5211  * from sequence operation or clientid errors.
5212  */
5213 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
5214 {
5215 	struct nfs4_get_lease_time_data *data =
5216 			(struct nfs4_get_lease_time_data *)calldata;
5217 
5218 	dprintk("--> %s\n", __func__);
5219 	if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
5220 		return;
5221 	switch (task->tk_status) {
5222 	case -NFS4ERR_DELAY:
5223 	case -NFS4ERR_GRACE:
5224 		dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
5225 		rpc_delay(task, NFS4_POLL_RETRY_MIN);
5226 		task->tk_status = 0;
5227 		/* fall through */
5228 	case -NFS4ERR_RETRY_UNCACHED_REP:
5229 		rpc_restart_call_prepare(task);
5230 		return;
5231 	}
5232 	dprintk("<-- %s\n", __func__);
5233 }
5234 
5235 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
5236 	.rpc_call_prepare = nfs4_get_lease_time_prepare,
5237 	.rpc_call_done = nfs4_get_lease_time_done,
5238 };
5239 
5240 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
5241 {
5242 	struct rpc_task *task;
5243 	struct nfs4_get_lease_time_args args;
5244 	struct nfs4_get_lease_time_res res = {
5245 		.lr_fsinfo = fsinfo,
5246 	};
5247 	struct nfs4_get_lease_time_data data = {
5248 		.args = &args,
5249 		.res = &res,
5250 		.clp = clp,
5251 	};
5252 	struct rpc_message msg = {
5253 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
5254 		.rpc_argp = &args,
5255 		.rpc_resp = &res,
5256 	};
5257 	struct rpc_task_setup task_setup = {
5258 		.rpc_client = clp->cl_rpcclient,
5259 		.rpc_message = &msg,
5260 		.callback_ops = &nfs4_get_lease_time_ops,
5261 		.callback_data = &data,
5262 		.flags = RPC_TASK_TIMEOUT,
5263 	};
5264 	int status;
5265 
5266 	nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
5267 	dprintk("--> %s\n", __func__);
5268 	task = rpc_run_task(&task_setup);
5269 
5270 	if (IS_ERR(task))
5271 		status = PTR_ERR(task);
5272 	else {
5273 		status = task->tk_status;
5274 		rpc_put_task(task);
5275 	}
5276 	dprintk("<-- %s return %d\n", __func__, status);
5277 
5278 	return status;
5279 }
5280 
5281 static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags)
5282 {
5283 	return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags);
5284 }
5285 
5286 static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl,
5287 		struct nfs4_slot *new,
5288 		u32 max_slots,
5289 		u32 ivalue)
5290 {
5291 	struct nfs4_slot *old = NULL;
5292 	u32 i;
5293 
5294 	spin_lock(&tbl->slot_tbl_lock);
5295 	if (new) {
5296 		old = tbl->slots;
5297 		tbl->slots = new;
5298 		tbl->max_slots = max_slots;
5299 	}
5300 	tbl->highest_used_slotid = -1;	/* no slot is currently used */
5301 	for (i = 0; i < tbl->max_slots; i++)
5302 		tbl->slots[i].seq_nr = ivalue;
5303 	spin_unlock(&tbl->slot_tbl_lock);
5304 	kfree(old);
5305 }
5306 
5307 /*
5308  * (re)Initialise a slot table
5309  */
5310 static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
5311 				 u32 ivalue)
5312 {
5313 	struct nfs4_slot *new = NULL;
5314 	int ret = -ENOMEM;
5315 
5316 	dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
5317 		max_reqs, tbl->max_slots);
5318 
5319 	/* Does the newly negotiated max_reqs match the existing slot table? */
5320 	if (max_reqs != tbl->max_slots) {
5321 		new = nfs4_alloc_slots(max_reqs, GFP_NOFS);
5322 		if (!new)
5323 			goto out;
5324 	}
5325 	ret = 0;
5326 
5327 	nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue);
5328 	dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
5329 		tbl, tbl->slots, tbl->max_slots);
5330 out:
5331 	dprintk("<-- %s: return %d\n", __func__, ret);
5332 	return ret;
5333 }
5334 
5335 /* Destroy the slot table */
5336 static void nfs4_destroy_slot_tables(struct nfs4_session *session)
5337 {
5338 	if (session->fc_slot_table.slots != NULL) {
5339 		kfree(session->fc_slot_table.slots);
5340 		session->fc_slot_table.slots = NULL;
5341 	}
5342 	if (session->bc_slot_table.slots != NULL) {
5343 		kfree(session->bc_slot_table.slots);
5344 		session->bc_slot_table.slots = NULL;
5345 	}
5346 	return;
5347 }
5348 
5349 /*
5350  * Initialize or reset the forechannel and backchannel tables
5351  */
5352 static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
5353 {
5354 	struct nfs4_slot_table *tbl;
5355 	int status;
5356 
5357 	dprintk("--> %s\n", __func__);
5358 	/* Fore channel */
5359 	tbl = &ses->fc_slot_table;
5360 	status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
5361 	if (status) /* -ENOMEM */
5362 		return status;
5363 	/* Back channel */
5364 	tbl = &ses->bc_slot_table;
5365 	status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
5366 	if (status && tbl->slots == NULL)
5367 		/* Fore and back channel share a connection so get
5368 		 * both slot tables or neither */
5369 		nfs4_destroy_slot_tables(ses);
5370 	return status;
5371 }
5372 
5373 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
5374 {
5375 	struct nfs4_session *session;
5376 	struct nfs4_slot_table *tbl;
5377 
5378 	session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5379 	if (!session)
5380 		return NULL;
5381 
5382 	tbl = &session->fc_slot_table;
5383 	tbl->highest_used_slotid = NFS4_NO_SLOT;
5384 	spin_lock_init(&tbl->slot_tbl_lock);
5385 	rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
5386 	init_completion(&tbl->complete);
5387 
5388 	tbl = &session->bc_slot_table;
5389 	tbl->highest_used_slotid = NFS4_NO_SLOT;
5390 	spin_lock_init(&tbl->slot_tbl_lock);
5391 	rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
5392 	init_completion(&tbl->complete);
5393 
5394 	session->session_state = 1<<NFS4_SESSION_INITING;
5395 
5396 	session->clp = clp;
5397 	return session;
5398 }
5399 
5400 void nfs4_destroy_session(struct nfs4_session *session)
5401 {
5402 	struct rpc_xprt *xprt;
5403 
5404 	nfs4_proc_destroy_session(session);
5405 
5406 	rcu_read_lock();
5407 	xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
5408 	rcu_read_unlock();
5409 	dprintk("%s Destroy backchannel for xprt %p\n",
5410 		__func__, xprt);
5411 	xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
5412 	nfs4_destroy_slot_tables(session);
5413 	kfree(session);
5414 }
5415 
5416 /*
5417  * Initialize the values to be used by the client in CREATE_SESSION
5418  * If nfs4_init_session set the fore channel request and response sizes,
5419  * use them.
5420  *
5421  * Set the back channel max_resp_sz_cached to zero to force the client to
5422  * always set csa_cachethis to FALSE because the current implementation
5423  * of the back channel DRC only supports caching the CB_SEQUENCE operation.
5424  */
5425 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5426 {
5427 	struct nfs4_session *session = args->client->cl_session;
5428 	unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
5429 		     mxresp_sz = session->fc_attrs.max_resp_sz;
5430 
5431 	if (mxrqst_sz == 0)
5432 		mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
5433 	if (mxresp_sz == 0)
5434 		mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5435 	/* Fore channel attributes */
5436 	args->fc_attrs.max_rqst_sz = mxrqst_sz;
5437 	args->fc_attrs.max_resp_sz = mxresp_sz;
5438 	args->fc_attrs.max_ops = NFS4_MAX_OPS;
5439 	args->fc_attrs.max_reqs = max_session_slots;
5440 
5441 	dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
5442 		"max_ops=%u max_reqs=%u\n",
5443 		__func__,
5444 		args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
5445 		args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5446 
5447 	/* Back channel attributes */
5448 	args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5449 	args->bc_attrs.max_resp_sz = PAGE_SIZE;
5450 	args->bc_attrs.max_resp_sz_cached = 0;
5451 	args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
5452 	args->bc_attrs.max_reqs = 1;
5453 
5454 	dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
5455 		"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
5456 		__func__,
5457 		args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
5458 		args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
5459 		args->bc_attrs.max_reqs);
5460 }
5461 
5462 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5463 {
5464 	struct nfs4_channel_attrs *sent = &args->fc_attrs;
5465 	struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5466 
5467 	if (rcvd->max_resp_sz > sent->max_resp_sz)
5468 		return -EINVAL;
5469 	/*
5470 	 * Our requested max_ops is the minimum we need; we're not
5471 	 * prepared to break up compounds into smaller pieces than that.
5472 	 * So, no point even trying to continue if the server won't
5473 	 * cooperate:
5474 	 */
5475 	if (rcvd->max_ops < sent->max_ops)
5476 		return -EINVAL;
5477 	if (rcvd->max_reqs == 0)
5478 		return -EINVAL;
5479 	if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
5480 		rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
5481 	return 0;
5482 }
5483 
5484 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5485 {
5486 	struct nfs4_channel_attrs *sent = &args->bc_attrs;
5487 	struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
5488 
5489 	if (rcvd->max_rqst_sz > sent->max_rqst_sz)
5490 		return -EINVAL;
5491 	if (rcvd->max_resp_sz < sent->max_resp_sz)
5492 		return -EINVAL;
5493 	if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
5494 		return -EINVAL;
5495 	/* These would render the backchannel useless: */
5496 	if (rcvd->max_ops != sent->max_ops)
5497 		return -EINVAL;
5498 	if (rcvd->max_reqs != sent->max_reqs)
5499 		return -EINVAL;
5500 	return 0;
5501 }
5502 
5503 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
5504 				     struct nfs4_session *session)
5505 {
5506 	int ret;
5507 
5508 	ret = nfs4_verify_fore_channel_attrs(args, session);
5509 	if (ret)
5510 		return ret;
5511 	return nfs4_verify_back_channel_attrs(args, session);
5512 }
5513 
5514 static int _nfs4_proc_create_session(struct nfs_client *clp)
5515 {
5516 	struct nfs4_session *session = clp->cl_session;
5517 	struct nfs41_create_session_args args = {
5518 		.client = clp,
5519 		.cb_program = NFS4_CALLBACK,
5520 	};
5521 	struct nfs41_create_session_res res = {
5522 		.client = clp,
5523 	};
5524 	struct rpc_message msg = {
5525 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
5526 		.rpc_argp = &args,
5527 		.rpc_resp = &res,
5528 	};
5529 	int status;
5530 
5531 	nfs4_init_channel_attrs(&args);
5532 	args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
5533 
5534 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5535 
5536 	if (!status)
5537 		/* Verify the session's negotiated channel_attrs values */
5538 		status = nfs4_verify_channel_attrs(&args, session);
5539 	if (!status) {
5540 		/* Increment the clientid slot sequence id */
5541 		clp->cl_seqid++;
5542 	}
5543 
5544 	return status;
5545 }
5546 
5547 /*
5548  * Issues a CREATE_SESSION operation to the server.
5549  * It is the responsibility of the caller to verify the session is
5550  * expired before calling this routine.
5551  */
5552 int nfs4_proc_create_session(struct nfs_client *clp)
5553 {
5554 	int status;
5555 	unsigned *ptr;
5556 	struct nfs4_session *session = clp->cl_session;
5557 
5558 	dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
5559 
5560 	status = _nfs4_proc_create_session(clp);
5561 	if (status)
5562 		goto out;
5563 
5564 	/* Init or reset the session slot tables */
5565 	status = nfs4_setup_session_slot_tables(session);
5566 	dprintk("slot table setup returned %d\n", status);
5567 	if (status)
5568 		goto out;
5569 
5570 	ptr = (unsigned *)&session->sess_id.data[0];
5571 	dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
5572 		clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
5573 out:
5574 	dprintk("<-- %s\n", __func__);
5575 	return status;
5576 }
5577 
5578 /*
5579  * Issue the over-the-wire RPC DESTROY_SESSION.
5580  * The caller must serialize access to this routine.
5581  */
5582 int nfs4_proc_destroy_session(struct nfs4_session *session)
5583 {
5584 	int status = 0;
5585 	struct rpc_message msg;
5586 
5587 	dprintk("--> nfs4_proc_destroy_session\n");
5588 
5589 	/* session is still being setup */
5590 	if (session->clp->cl_cons_state != NFS_CS_READY)
5591 		return status;
5592 
5593 	msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
5594 	msg.rpc_argp = session;
5595 	msg.rpc_resp = NULL;
5596 	msg.rpc_cred = NULL;
5597 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5598 
5599 	if (status)
5600 		printk(KERN_WARNING
5601 			"NFS: Got error %d from the server on DESTROY_SESSION. "
5602 			"Session has been destroyed regardless...\n", status);
5603 
5604 	dprintk("<-- nfs4_proc_destroy_session\n");
5605 	return status;
5606 }
5607 
5608 int nfs4_init_session(struct nfs_server *server)
5609 {
5610 	struct nfs_client *clp = server->nfs_client;
5611 	struct nfs4_session *session;
5612 	unsigned int rsize, wsize;
5613 	int ret;
5614 
5615 	if (!nfs4_has_session(clp))
5616 		return 0;
5617 
5618 	session = clp->cl_session;
5619 	if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
5620 		return 0;
5621 
5622 	rsize = server->rsize;
5623 	if (rsize == 0)
5624 		rsize = NFS_MAX_FILE_IO_SIZE;
5625 	wsize = server->wsize;
5626 	if (wsize == 0)
5627 		wsize = NFS_MAX_FILE_IO_SIZE;
5628 
5629 	session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
5630 	session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
5631 
5632 	ret = nfs4_recover_expired_lease(server);
5633 	if (!ret)
5634 		ret = nfs4_check_client_ready(clp);
5635 	return ret;
5636 }
5637 
5638 int nfs4_init_ds_session(struct nfs_client *clp)
5639 {
5640 	struct nfs4_session *session = clp->cl_session;
5641 	int ret;
5642 
5643 	if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
5644 		return 0;
5645 
5646 	ret = nfs4_client_recover_expired_lease(clp);
5647 	if (!ret)
5648 		/* Test for the DS role */
5649 		if (!is_ds_client(clp))
5650 			ret = -ENODEV;
5651 	if (!ret)
5652 		ret = nfs4_check_client_ready(clp);
5653 	return ret;
5654 
5655 }
5656 EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
5657 
5658 
5659 /*
5660  * Renew the cl_session lease.
5661  */
5662 struct nfs4_sequence_data {
5663 	struct nfs_client *clp;
5664 	struct nfs4_sequence_args args;
5665 	struct nfs4_sequence_res res;
5666 };
5667 
5668 static void nfs41_sequence_release(void *data)
5669 {
5670 	struct nfs4_sequence_data *calldata = data;
5671 	struct nfs_client *clp = calldata->clp;
5672 
5673 	if (atomic_read(&clp->cl_count) > 1)
5674 		nfs4_schedule_state_renewal(clp);
5675 	nfs_put_client(clp);
5676 	kfree(calldata);
5677 }
5678 
5679 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5680 {
5681 	switch(task->tk_status) {
5682 	case -NFS4ERR_DELAY:
5683 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
5684 		return -EAGAIN;
5685 	default:
5686 		nfs4_schedule_lease_recovery(clp);
5687 	}
5688 	return 0;
5689 }
5690 
5691 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
5692 {
5693 	struct nfs4_sequence_data *calldata = data;
5694 	struct nfs_client *clp = calldata->clp;
5695 
5696 	if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
5697 		return;
5698 
5699 	if (task->tk_status < 0) {
5700 		dprintk("%s ERROR %d\n", __func__, task->tk_status);
5701 		if (atomic_read(&clp->cl_count) == 1)
5702 			goto out;
5703 
5704 		if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
5705 			rpc_restart_call_prepare(task);
5706 			return;
5707 		}
5708 	}
5709 	dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
5710 out:
5711 	dprintk("<-- %s\n", __func__);
5712 }
5713 
5714 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
5715 {
5716 	struct nfs4_sequence_data *calldata = data;
5717 	struct nfs_client *clp = calldata->clp;
5718 	struct nfs4_sequence_args *args;
5719 	struct nfs4_sequence_res *res;
5720 
5721 	args = task->tk_msg.rpc_argp;
5722 	res = task->tk_msg.rpc_resp;
5723 
5724 	if (nfs41_setup_sequence(clp->cl_session, args, res, task))
5725 		return;
5726 	rpc_call_start(task);
5727 }
5728 
5729 static const struct rpc_call_ops nfs41_sequence_ops = {
5730 	.rpc_call_done = nfs41_sequence_call_done,
5731 	.rpc_call_prepare = nfs41_sequence_prepare,
5732 	.rpc_release = nfs41_sequence_release,
5733 };
5734 
5735 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5736 {
5737 	struct nfs4_sequence_data *calldata;
5738 	struct rpc_message msg = {
5739 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
5740 		.rpc_cred = cred,
5741 	};
5742 	struct rpc_task_setup task_setup_data = {
5743 		.rpc_client = clp->cl_rpcclient,
5744 		.rpc_message = &msg,
5745 		.callback_ops = &nfs41_sequence_ops,
5746 		.flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
5747 	};
5748 
5749 	if (!atomic_inc_not_zero(&clp->cl_count))
5750 		return ERR_PTR(-EIO);
5751 	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5752 	if (calldata == NULL) {
5753 		nfs_put_client(clp);
5754 		return ERR_PTR(-ENOMEM);
5755 	}
5756 	nfs41_init_sequence(&calldata->args, &calldata->res, 0);
5757 	msg.rpc_argp = &calldata->args;
5758 	msg.rpc_resp = &calldata->res;
5759 	calldata->clp = clp;
5760 	task_setup_data.callback_data = calldata;
5761 
5762 	return rpc_run_task(&task_setup_data);
5763 }
5764 
5765 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
5766 {
5767 	struct rpc_task *task;
5768 	int ret = 0;
5769 
5770 	if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
5771 		return 0;
5772 	task = _nfs41_proc_sequence(clp, cred);
5773 	if (IS_ERR(task))
5774 		ret = PTR_ERR(task);
5775 	else
5776 		rpc_put_task_async(task);
5777 	dprintk("<-- %s status=%d\n", __func__, ret);
5778 	return ret;
5779 }
5780 
5781 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5782 {
5783 	struct rpc_task *task;
5784 	int ret;
5785 
5786 	task = _nfs41_proc_sequence(clp, cred);
5787 	if (IS_ERR(task)) {
5788 		ret = PTR_ERR(task);
5789 		goto out;
5790 	}
5791 	ret = rpc_wait_for_completion_task(task);
5792 	if (!ret) {
5793 		struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
5794 
5795 		if (task->tk_status == 0)
5796 			nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
5797 		ret = task->tk_status;
5798 	}
5799 	rpc_put_task(task);
5800 out:
5801 	dprintk("<-- %s status=%d\n", __func__, ret);
5802 	return ret;
5803 }
5804 
5805 struct nfs4_reclaim_complete_data {
5806 	struct nfs_client *clp;
5807 	struct nfs41_reclaim_complete_args arg;
5808 	struct nfs41_reclaim_complete_res res;
5809 };
5810 
5811 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
5812 {
5813 	struct nfs4_reclaim_complete_data *calldata = data;
5814 
5815 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5816 	if (nfs41_setup_sequence(calldata->clp->cl_session,
5817 				&calldata->arg.seq_args,
5818 				&calldata->res.seq_res, task))
5819 		return;
5820 
5821 	rpc_call_start(task);
5822 }
5823 
5824 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5825 {
5826 	switch(task->tk_status) {
5827 	case 0:
5828 	case -NFS4ERR_COMPLETE_ALREADY:
5829 	case -NFS4ERR_WRONG_CRED: /* What to do here? */
5830 		break;
5831 	case -NFS4ERR_DELAY:
5832 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
5833 		/* fall through */
5834 	case -NFS4ERR_RETRY_UNCACHED_REP:
5835 		return -EAGAIN;
5836 	default:
5837 		nfs4_schedule_lease_recovery(clp);
5838 	}
5839 	return 0;
5840 }
5841 
5842 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
5843 {
5844 	struct nfs4_reclaim_complete_data *calldata = data;
5845 	struct nfs_client *clp = calldata->clp;
5846 	struct nfs4_sequence_res *res = &calldata->res.seq_res;
5847 
5848 	dprintk("--> %s\n", __func__);
5849 	if (!nfs41_sequence_done(task, res))
5850 		return;
5851 
5852 	if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
5853 		rpc_restart_call_prepare(task);
5854 		return;
5855 	}
5856 	dprintk("<-- %s\n", __func__);
5857 }
5858 
5859 static void nfs4_free_reclaim_complete_data(void *data)
5860 {
5861 	struct nfs4_reclaim_complete_data *calldata = data;
5862 
5863 	kfree(calldata);
5864 }
5865 
5866 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
5867 	.rpc_call_prepare = nfs4_reclaim_complete_prepare,
5868 	.rpc_call_done = nfs4_reclaim_complete_done,
5869 	.rpc_release = nfs4_free_reclaim_complete_data,
5870 };
5871 
5872 /*
5873  * Issue a global reclaim complete.
5874  */
5875 static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
5876 {
5877 	struct nfs4_reclaim_complete_data *calldata;
5878 	struct rpc_task *task;
5879 	struct rpc_message msg = {
5880 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
5881 	};
5882 	struct rpc_task_setup task_setup_data = {
5883 		.rpc_client = clp->cl_rpcclient,
5884 		.rpc_message = &msg,
5885 		.callback_ops = &nfs4_reclaim_complete_call_ops,
5886 		.flags = RPC_TASK_ASYNC,
5887 	};
5888 	int status = -ENOMEM;
5889 
5890 	dprintk("--> %s\n", __func__);
5891 	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5892 	if (calldata == NULL)
5893 		goto out;
5894 	calldata->clp = clp;
5895 	calldata->arg.one_fs = 0;
5896 
5897 	nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
5898 	msg.rpc_argp = &calldata->arg;
5899 	msg.rpc_resp = &calldata->res;
5900 	task_setup_data.callback_data = calldata;
5901 	task = rpc_run_task(&task_setup_data);
5902 	if (IS_ERR(task)) {
5903 		status = PTR_ERR(task);
5904 		goto out;
5905 	}
5906 	status = nfs4_wait_for_completion_rpc_task(task);
5907 	if (status == 0)
5908 		status = task->tk_status;
5909 	rpc_put_task(task);
5910 	return 0;
5911 out:
5912 	dprintk("<-- %s status=%d\n", __func__, status);
5913 	return status;
5914 }
5915 
5916 static void
5917 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
5918 {
5919 	struct nfs4_layoutget *lgp = calldata;
5920 	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5921 
5922 	dprintk("--> %s\n", __func__);
5923 	/* Note the is a race here, where a CB_LAYOUTRECALL can come in
5924 	 * right now covering the LAYOUTGET we are about to send.
5925 	 * However, that is not so catastrophic, and there seems
5926 	 * to be no way to prevent it completely.
5927 	 */
5928 	if (nfs4_setup_sequence(server, &lgp->args.seq_args,
5929 				&lgp->res.seq_res, task))
5930 		return;
5931 	if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
5932 					  NFS_I(lgp->args.inode)->layout,
5933 					  lgp->args.ctx->state)) {
5934 		rpc_exit(task, NFS4_OK);
5935 		return;
5936 	}
5937 	rpc_call_start(task);
5938 }
5939 
5940 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
5941 {
5942 	struct nfs4_layoutget *lgp = calldata;
5943 	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5944 
5945 	dprintk("--> %s\n", __func__);
5946 
5947 	if (!nfs4_sequence_done(task, &lgp->res.seq_res))
5948 		return;
5949 
5950 	switch (task->tk_status) {
5951 	case 0:
5952 		break;
5953 	case -NFS4ERR_LAYOUTTRYLATER:
5954 	case -NFS4ERR_RECALLCONFLICT:
5955 		task->tk_status = -NFS4ERR_DELAY;
5956 		/* Fall through */
5957 	default:
5958 		if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
5959 			rpc_restart_call_prepare(task);
5960 			return;
5961 		}
5962 	}
5963 	dprintk("<-- %s\n", __func__);
5964 }
5965 
5966 static void nfs4_layoutget_release(void *calldata)
5967 {
5968 	struct nfs4_layoutget *lgp = calldata;
5969 
5970 	dprintk("--> %s\n", __func__);
5971 	put_nfs_open_context(lgp->args.ctx);
5972 	kfree(calldata);
5973 	dprintk("<-- %s\n", __func__);
5974 }
5975 
5976 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
5977 	.rpc_call_prepare = nfs4_layoutget_prepare,
5978 	.rpc_call_done = nfs4_layoutget_done,
5979 	.rpc_release = nfs4_layoutget_release,
5980 };
5981 
5982 int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
5983 {
5984 	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5985 	struct rpc_task *task;
5986 	struct rpc_message msg = {
5987 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
5988 		.rpc_argp = &lgp->args,
5989 		.rpc_resp = &lgp->res,
5990 	};
5991 	struct rpc_task_setup task_setup_data = {
5992 		.rpc_client = server->client,
5993 		.rpc_message = &msg,
5994 		.callback_ops = &nfs4_layoutget_call_ops,
5995 		.callback_data = lgp,
5996 		.flags = RPC_TASK_ASYNC,
5997 	};
5998 	int status = 0;
5999 
6000 	dprintk("--> %s\n", __func__);
6001 
6002 	lgp->res.layoutp = &lgp->args.layout;
6003 	lgp->res.seq_res.sr_slot = NULL;
6004 	nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
6005 	task = rpc_run_task(&task_setup_data);
6006 	if (IS_ERR(task))
6007 		return PTR_ERR(task);
6008 	status = nfs4_wait_for_completion_rpc_task(task);
6009 	if (status == 0)
6010 		status = task->tk_status;
6011 	if (status == 0)
6012 		status = pnfs_layout_process(lgp);
6013 	rpc_put_task(task);
6014 	dprintk("<-- %s status=%d\n", __func__, status);
6015 	return status;
6016 }
6017 
6018 static void
6019 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
6020 {
6021 	struct nfs4_layoutreturn *lrp = calldata;
6022 
6023 	dprintk("--> %s\n", __func__);
6024 	if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
6025 				&lrp->res.seq_res, task))
6026 		return;
6027 	rpc_call_start(task);
6028 }
6029 
6030 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
6031 {
6032 	struct nfs4_layoutreturn *lrp = calldata;
6033 	struct nfs_server *server;
6034 	struct pnfs_layout_hdr *lo = lrp->args.layout;
6035 
6036 	dprintk("--> %s\n", __func__);
6037 
6038 	if (!nfs4_sequence_done(task, &lrp->res.seq_res))
6039 		return;
6040 
6041 	server = NFS_SERVER(lrp->args.inode);
6042 	if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6043 		rpc_restart_call_prepare(task);
6044 		return;
6045 	}
6046 	spin_lock(&lo->plh_inode->i_lock);
6047 	if (task->tk_status == 0) {
6048 		if (lrp->res.lrs_present) {
6049 			pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6050 		} else
6051 			BUG_ON(!list_empty(&lo->plh_segs));
6052 	}
6053 	lo->plh_block_lgets--;
6054 	spin_unlock(&lo->plh_inode->i_lock);
6055 	dprintk("<-- %s\n", __func__);
6056 }
6057 
6058 static void nfs4_layoutreturn_release(void *calldata)
6059 {
6060 	struct nfs4_layoutreturn *lrp = calldata;
6061 
6062 	dprintk("--> %s\n", __func__);
6063 	put_layout_hdr(lrp->args.layout);
6064 	kfree(calldata);
6065 	dprintk("<-- %s\n", __func__);
6066 }
6067 
6068 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
6069 	.rpc_call_prepare = nfs4_layoutreturn_prepare,
6070 	.rpc_call_done = nfs4_layoutreturn_done,
6071 	.rpc_release = nfs4_layoutreturn_release,
6072 };
6073 
6074 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
6075 {
6076 	struct rpc_task *task;
6077 	struct rpc_message msg = {
6078 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
6079 		.rpc_argp = &lrp->args,
6080 		.rpc_resp = &lrp->res,
6081 	};
6082 	struct rpc_task_setup task_setup_data = {
6083 		.rpc_client = lrp->clp->cl_rpcclient,
6084 		.rpc_message = &msg,
6085 		.callback_ops = &nfs4_layoutreturn_call_ops,
6086 		.callback_data = lrp,
6087 	};
6088 	int status;
6089 
6090 	dprintk("--> %s\n", __func__);
6091 	nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
6092 	task = rpc_run_task(&task_setup_data);
6093 	if (IS_ERR(task))
6094 		return PTR_ERR(task);
6095 	status = task->tk_status;
6096 	dprintk("<-- %s status=%d\n", __func__, status);
6097 	rpc_put_task(task);
6098 	return status;
6099 }
6100 
6101 /*
6102  * Retrieve the list of Data Server devices from the MDS.
6103  */
6104 static int _nfs4_getdevicelist(struct nfs_server *server,
6105 				    const struct nfs_fh *fh,
6106 				    struct pnfs_devicelist *devlist)
6107 {
6108 	struct nfs4_getdevicelist_args args = {
6109 		.fh = fh,
6110 		.layoutclass = server->pnfs_curr_ld->id,
6111 	};
6112 	struct nfs4_getdevicelist_res res = {
6113 		.devlist = devlist,
6114 	};
6115 	struct rpc_message msg = {
6116 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
6117 		.rpc_argp = &args,
6118 		.rpc_resp = &res,
6119 	};
6120 	int status;
6121 
6122 	dprintk("--> %s\n", __func__);
6123 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
6124 				&res.seq_res, 0);
6125 	dprintk("<-- %s status=%d\n", __func__, status);
6126 	return status;
6127 }
6128 
6129 int nfs4_proc_getdevicelist(struct nfs_server *server,
6130 			    const struct nfs_fh *fh,
6131 			    struct pnfs_devicelist *devlist)
6132 {
6133 	struct nfs4_exception exception = { };
6134 	int err;
6135 
6136 	do {
6137 		err = nfs4_handle_exception(server,
6138 				_nfs4_getdevicelist(server, fh, devlist),
6139 				&exception);
6140 	} while (exception.retry);
6141 
6142 	dprintk("%s: err=%d, num_devs=%u\n", __func__,
6143 		err, devlist->num_devs);
6144 
6145 	return err;
6146 }
6147 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
6148 
6149 static int
6150 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6151 {
6152 	struct nfs4_getdeviceinfo_args args = {
6153 		.pdev = pdev,
6154 	};
6155 	struct nfs4_getdeviceinfo_res res = {
6156 		.pdev = pdev,
6157 	};
6158 	struct rpc_message msg = {
6159 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
6160 		.rpc_argp = &args,
6161 		.rpc_resp = &res,
6162 	};
6163 	int status;
6164 
6165 	dprintk("--> %s\n", __func__);
6166 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6167 	dprintk("<-- %s status=%d\n", __func__, status);
6168 
6169 	return status;
6170 }
6171 
6172 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6173 {
6174 	struct nfs4_exception exception = { };
6175 	int err;
6176 
6177 	do {
6178 		err = nfs4_handle_exception(server,
6179 					_nfs4_proc_getdeviceinfo(server, pdev),
6180 					&exception);
6181 	} while (exception.retry);
6182 	return err;
6183 }
6184 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
6185 
6186 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
6187 {
6188 	struct nfs4_layoutcommit_data *data = calldata;
6189 	struct nfs_server *server = NFS_SERVER(data->args.inode);
6190 
6191 	if (nfs4_setup_sequence(server, &data->args.seq_args,
6192 				&data->res.seq_res, task))
6193 		return;
6194 	rpc_call_start(task);
6195 }
6196 
6197 static void
6198 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
6199 {
6200 	struct nfs4_layoutcommit_data *data = calldata;
6201 	struct nfs_server *server = NFS_SERVER(data->args.inode);
6202 
6203 	if (!nfs4_sequence_done(task, &data->res.seq_res))
6204 		return;
6205 
6206 	switch (task->tk_status) { /* Just ignore these failures */
6207 	case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
6208 	case -NFS4ERR_BADIOMODE:     /* no IOMODE_RW layout for range */
6209 	case -NFS4ERR_BADLAYOUT:     /* no layout */
6210 	case -NFS4ERR_GRACE:	    /* loca_recalim always false */
6211 		task->tk_status = 0;
6212 		break;
6213 	case 0:
6214 		nfs_post_op_update_inode_force_wcc(data->args.inode,
6215 						   data->res.fattr);
6216 		break;
6217 	default:
6218 		if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6219 			rpc_restart_call_prepare(task);
6220 			return;
6221 		}
6222 	}
6223 }
6224 
6225 static void nfs4_layoutcommit_release(void *calldata)
6226 {
6227 	struct nfs4_layoutcommit_data *data = calldata;
6228 	struct pnfs_layout_segment *lseg, *tmp;
6229 	unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
6230 
6231 	pnfs_cleanup_layoutcommit(data);
6232 	/* Matched by references in pnfs_set_layoutcommit */
6233 	list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
6234 		list_del_init(&lseg->pls_lc_list);
6235 		if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
6236 				       &lseg->pls_flags))
6237 			put_lseg(lseg);
6238 	}
6239 
6240 	clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
6241 	smp_mb__after_clear_bit();
6242 	wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
6243 
6244 	put_rpccred(data->cred);
6245 	kfree(data);
6246 }
6247 
6248 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
6249 	.rpc_call_prepare = nfs4_layoutcommit_prepare,
6250 	.rpc_call_done = nfs4_layoutcommit_done,
6251 	.rpc_release = nfs4_layoutcommit_release,
6252 };
6253 
6254 int
6255 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
6256 {
6257 	struct rpc_message msg = {
6258 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
6259 		.rpc_argp = &data->args,
6260 		.rpc_resp = &data->res,
6261 		.rpc_cred = data->cred,
6262 	};
6263 	struct rpc_task_setup task_setup_data = {
6264 		.task = &data->task,
6265 		.rpc_client = NFS_CLIENT(data->args.inode),
6266 		.rpc_message = &msg,
6267 		.callback_ops = &nfs4_layoutcommit_ops,
6268 		.callback_data = data,
6269 		.flags = RPC_TASK_ASYNC,
6270 	};
6271 	struct rpc_task *task;
6272 	int status = 0;
6273 
6274 	dprintk("NFS: %4d initiating layoutcommit call. sync %d "
6275 		"lbw: %llu inode %lu\n",
6276 		data->task.tk_pid, sync,
6277 		data->args.lastbytewritten,
6278 		data->args.inode->i_ino);
6279 
6280 	nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
6281 	task = rpc_run_task(&task_setup_data);
6282 	if (IS_ERR(task))
6283 		return PTR_ERR(task);
6284 	if (sync == false)
6285 		goto out;
6286 	status = nfs4_wait_for_completion_rpc_task(task);
6287 	if (status != 0)
6288 		goto out;
6289 	status = task->tk_status;
6290 out:
6291 	dprintk("%s: status %d\n", __func__, status);
6292 	rpc_put_task(task);
6293 	return status;
6294 }
6295 
6296 static int
6297 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6298 		    struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6299 {
6300 	struct nfs41_secinfo_no_name_args args = {
6301 		.style = SECINFO_STYLE_CURRENT_FH,
6302 	};
6303 	struct nfs4_secinfo_res res = {
6304 		.flavors = flavors,
6305 	};
6306 	struct rpc_message msg = {
6307 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
6308 		.rpc_argp = &args,
6309 		.rpc_resp = &res,
6310 	};
6311 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6312 }
6313 
6314 static int
6315 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6316 			   struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6317 {
6318 	struct nfs4_exception exception = { };
6319 	int err;
6320 	do {
6321 		err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6322 		switch (err) {
6323 		case 0:
6324 		case -NFS4ERR_WRONGSEC:
6325 		case -NFS4ERR_NOTSUPP:
6326 			goto out;
6327 		default:
6328 			err = nfs4_handle_exception(server, err, &exception);
6329 		}
6330 	} while (exception.retry);
6331 out:
6332 	return err;
6333 }
6334 
6335 static int
6336 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
6337 		    struct nfs_fsinfo *info)
6338 {
6339 	int err;
6340 	struct page *page;
6341 	rpc_authflavor_t flavor;
6342 	struct nfs4_secinfo_flavors *flavors;
6343 
6344 	page = alloc_page(GFP_KERNEL);
6345 	if (!page) {
6346 		err = -ENOMEM;
6347 		goto out;
6348 	}
6349 
6350 	flavors = page_address(page);
6351 	err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6352 
6353 	/*
6354 	 * Fall back on "guess and check" method if
6355 	 * the server doesn't support SECINFO_NO_NAME
6356 	 */
6357 	if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
6358 		err = nfs4_find_root_sec(server, fhandle, info);
6359 		goto out_freepage;
6360 	}
6361 	if (err)
6362 		goto out_freepage;
6363 
6364 	flavor = nfs_find_best_sec(flavors);
6365 	if (err == 0)
6366 		err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
6367 
6368 out_freepage:
6369 	put_page(page);
6370 	if (err == -EACCES)
6371 		return -EPERM;
6372 out:
6373 	return err;
6374 }
6375 
6376 static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6377 {
6378 	int status;
6379 	struct nfs41_test_stateid_args args = {
6380 		.stateid = stateid,
6381 	};
6382 	struct nfs41_test_stateid_res res;
6383 	struct rpc_message msg = {
6384 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
6385 		.rpc_argp = &args,
6386 		.rpc_resp = &res,
6387 	};
6388 
6389 	nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6390 	status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6391 
6392 	if (status == NFS_OK)
6393 		return res.status;
6394 	return status;
6395 }
6396 
6397 static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6398 {
6399 	struct nfs4_exception exception = { };
6400 	int err;
6401 	do {
6402 		err = nfs4_handle_exception(server,
6403 				_nfs41_test_stateid(server, stateid),
6404 				&exception);
6405 	} while (exception.retry);
6406 	return err;
6407 }
6408 
6409 static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6410 {
6411 	struct nfs41_free_stateid_args args = {
6412 		.stateid = stateid,
6413 	};
6414 	struct nfs41_free_stateid_res res;
6415 	struct rpc_message msg = {
6416 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
6417 		.rpc_argp = &args,
6418 		.rpc_resp = &res,
6419 	};
6420 
6421 	nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6422 	return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6423 }
6424 
6425 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6426 {
6427 	struct nfs4_exception exception = { };
6428 	int err;
6429 	do {
6430 		err = nfs4_handle_exception(server,
6431 				_nfs4_free_stateid(server, stateid),
6432 				&exception);
6433 	} while (exception.retry);
6434 	return err;
6435 }
6436 
6437 static bool nfs41_match_stateid(const nfs4_stateid *s1,
6438 		const nfs4_stateid *s2)
6439 {
6440 	if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
6441 		return false;
6442 
6443 	if (s1->seqid == s2->seqid)
6444 		return true;
6445 	if (s1->seqid == 0 || s2->seqid == 0)
6446 		return true;
6447 
6448 	return false;
6449 }
6450 
6451 #endif /* CONFIG_NFS_V4_1 */
6452 
6453 static bool nfs4_match_stateid(const nfs4_stateid *s1,
6454 		const nfs4_stateid *s2)
6455 {
6456 	return nfs4_stateid_match(s1, s2);
6457 }
6458 
6459 
6460 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
6461 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6462 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
6463 	.recover_open	= nfs4_open_reclaim,
6464 	.recover_lock	= nfs4_lock_reclaim,
6465 	.establish_clid = nfs4_init_clientid,
6466 	.get_clid_cred	= nfs4_get_setclientid_cred,
6467 };
6468 
6469 #if defined(CONFIG_NFS_V4_1)
6470 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
6471 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6472 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
6473 	.recover_open	= nfs4_open_reclaim,
6474 	.recover_lock	= nfs4_lock_reclaim,
6475 	.establish_clid = nfs41_init_clientid,
6476 	.get_clid_cred	= nfs4_get_exchange_id_cred,
6477 	.reclaim_complete = nfs41_proc_reclaim_complete,
6478 };
6479 #endif /* CONFIG_NFS_V4_1 */
6480 
6481 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
6482 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6483 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
6484 	.recover_open	= nfs4_open_expired,
6485 	.recover_lock	= nfs4_lock_expired,
6486 	.establish_clid = nfs4_init_clientid,
6487 	.get_clid_cred	= nfs4_get_setclientid_cred,
6488 };
6489 
6490 #if defined(CONFIG_NFS_V4_1)
6491 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
6492 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6493 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
6494 	.recover_open	= nfs41_open_expired,
6495 	.recover_lock	= nfs41_lock_expired,
6496 	.establish_clid = nfs41_init_clientid,
6497 	.get_clid_cred	= nfs4_get_exchange_id_cred,
6498 };
6499 #endif /* CONFIG_NFS_V4_1 */
6500 
6501 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
6502 	.sched_state_renewal = nfs4_proc_async_renew,
6503 	.get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
6504 	.renew_lease = nfs4_proc_renew,
6505 };
6506 
6507 #if defined(CONFIG_NFS_V4_1)
6508 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
6509 	.sched_state_renewal = nfs41_proc_async_sequence,
6510 	.get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
6511 	.renew_lease = nfs4_proc_sequence,
6512 };
6513 #endif
6514 
6515 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
6516 	.minor_version = 0,
6517 	.call_sync = _nfs4_call_sync,
6518 	.match_stateid = nfs4_match_stateid,
6519 	.find_root_sec = nfs4_find_root_sec,
6520 	.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
6521 	.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
6522 	.state_renewal_ops = &nfs40_state_renewal_ops,
6523 };
6524 
6525 #if defined(CONFIG_NFS_V4_1)
6526 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
6527 	.minor_version = 1,
6528 	.call_sync = _nfs4_call_sync_session,
6529 	.match_stateid = nfs41_match_stateid,
6530 	.find_root_sec = nfs41_find_root_sec,
6531 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
6532 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
6533 	.state_renewal_ops = &nfs41_state_renewal_ops,
6534 };
6535 #endif
6536 
6537 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
6538 	[0] = &nfs_v4_0_minor_ops,
6539 #if defined(CONFIG_NFS_V4_1)
6540 	[1] = &nfs_v4_1_minor_ops,
6541 #endif
6542 };
6543 
6544 static const struct inode_operations nfs4_file_inode_operations = {
6545 	.permission	= nfs_permission,
6546 	.getattr	= nfs_getattr,
6547 	.setattr	= nfs_setattr,
6548 	.getxattr	= generic_getxattr,
6549 	.setxattr	= generic_setxattr,
6550 	.listxattr	= generic_listxattr,
6551 	.removexattr	= generic_removexattr,
6552 };
6553 
6554 const struct nfs_rpc_ops nfs_v4_clientops = {
6555 	.version	= 4,			/* protocol version */
6556 	.dentry_ops	= &nfs4_dentry_operations,
6557 	.dir_inode_ops	= &nfs4_dir_inode_operations,
6558 	.file_inode_ops	= &nfs4_file_inode_operations,
6559 	.file_ops	= &nfs4_file_operations,
6560 	.getroot	= nfs4_proc_get_root,
6561 	.getattr	= nfs4_proc_getattr,
6562 	.setattr	= nfs4_proc_setattr,
6563 	.lookup		= nfs4_proc_lookup,
6564 	.access		= nfs4_proc_access,
6565 	.readlink	= nfs4_proc_readlink,
6566 	.create		= nfs4_proc_create,
6567 	.remove		= nfs4_proc_remove,
6568 	.unlink_setup	= nfs4_proc_unlink_setup,
6569 	.unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
6570 	.unlink_done	= nfs4_proc_unlink_done,
6571 	.rename		= nfs4_proc_rename,
6572 	.rename_setup	= nfs4_proc_rename_setup,
6573 	.rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
6574 	.rename_done	= nfs4_proc_rename_done,
6575 	.link		= nfs4_proc_link,
6576 	.symlink	= nfs4_proc_symlink,
6577 	.mkdir		= nfs4_proc_mkdir,
6578 	.rmdir		= nfs4_proc_remove,
6579 	.readdir	= nfs4_proc_readdir,
6580 	.mknod		= nfs4_proc_mknod,
6581 	.statfs		= nfs4_proc_statfs,
6582 	.fsinfo		= nfs4_proc_fsinfo,
6583 	.pathconf	= nfs4_proc_pathconf,
6584 	.set_capabilities = nfs4_server_capabilities,
6585 	.decode_dirent	= nfs4_decode_dirent,
6586 	.read_setup	= nfs4_proc_read_setup,
6587 	.read_rpc_prepare = nfs4_proc_read_rpc_prepare,
6588 	.read_done	= nfs4_read_done,
6589 	.write_setup	= nfs4_proc_write_setup,
6590 	.write_rpc_prepare = nfs4_proc_write_rpc_prepare,
6591 	.write_done	= nfs4_write_done,
6592 	.commit_setup	= nfs4_proc_commit_setup,
6593 	.commit_done	= nfs4_commit_done,
6594 	.lock		= nfs4_proc_lock,
6595 	.clear_acl_cache = nfs4_zap_acl_attr,
6596 	.close_context  = nfs4_close_context,
6597 	.open_context	= nfs4_atomic_open,
6598 	.init_client	= nfs4_init_client,
6599 	.secinfo	= nfs4_proc_secinfo,
6600 };
6601 
6602 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
6603 	.prefix	= XATTR_NAME_NFSV4_ACL,
6604 	.list	= nfs4_xattr_list_nfs4_acl,
6605 	.get	= nfs4_xattr_get_nfs4_acl,
6606 	.set	= nfs4_xattr_set_nfs4_acl,
6607 };
6608 
6609 const struct xattr_handler *nfs4_xattr_handlers[] = {
6610 	&nfs4_xattr_nfs4_acl_handler,
6611 	NULL
6612 };
6613 
6614 module_param(max_session_slots, ushort, 0644);
6615 MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
6616 		"requests the client will negotiate");
6617 
6618 /*
6619  * Local variables:
6620  *  c-basic-offset: 8
6621  * End:
6622  */
6623